#!/usr/bin/env python # coding: utf-8 # # # Clean-Clean ER # # In this notebook we present the pyJedAI approach in the well-known ABT-BUY dataset. Clean-Clean ER in the link discovery/deduplication between two sets of entities. # # Dataset: __Abt-Buy dataset__ (D1) # # The Abt-Buy dataset for entity resolution derives from the online retailers Abt.com and Buy.com. The dataset contains 1076 entities from abt.com and 1076 entities from buy.com as well as a gold standard (perfect mapping) with 1076 matching record pairs between the two data sources. The common attributes between the two data sources are: product name, product description and product price. # ## How to install? # # pyJedAI is an open-source library that can be installed from PyPI. # # For more: [pypi.org/project/pyjedai/](https://pypi.org/project/pyjedai/) # In[ ]: get_ipython().system('pip install pyjedai -U') # In[2]: get_ipython().system('pip show pyjedai') # Imports # In[1]: import os import sys import pandas as pd import networkx from networkx import draw, Graph # In[2]: import pyjedai from pyjedai.utils import ( text_cleaning_method, print_clusters, print_blocks, print_candidate_pairs ) from pyjedai.evaluation import Evaluation # # Workflow Architecture # # ![workflow-example.png](https://github.com/AI-team-UoA/pyJedAI/blob/main/docs/img/workflow-example.png?raw=true) # # Data Reading # # pyJedAI in order to perfrom needs only the tranformation of the initial data into a pandas DataFrame. Hence, pyJedAI can function in every structured or semi-structured data. In this case Abt-Buy dataset is provided as .csv files. # # In[3]: from pyjedai.datamodel import Data from pyjedai.evaluation import Evaluation # In[4]: d1 = pd.read_csv("./../data/ccer/D2/abt.csv", sep='|', engine='python', na_filter=False) d2 = pd.read_csv("./../data/ccer/D2/buy.csv", sep='|', engine='python', na_filter=False) gt = pd.read_csv("./../data/ccer/D2/gt.csv", sep='|', engine='python') data = Data(dataset_1=d1, id_column_name_1='id', dataset_2=d2, id_column_name_2='id', ground_truth=gt) # pyJedAI offers also dataset analysis methods (more will be developed) # In[5]: data.print_specs() # In[6]: data.dataset_1.head(5) # In[7]: data.dataset_2.head(5) # In[8]: data.ground_truth.head(3) # ### Data cleaning step (optional) # # pyJedAI offers 4 types of text cleaning/processing. # # - Stopwords removal # - Punctuation removal # - Numbers removal # - Unicodes removal # In[9]: data.clean_dataset(remove_stopwords = False, remove_punctuation = False, remove_numbers = False, remove_unicodes = False) # ## Block Building # # It clusters entities into overlapping blocks in a lazy manner that relies on unsupervised blocking keys: every token in an attribute value forms a key. Blocks are then extracted, possibly using a transformation, based on its equality or on its similarity with other keys. # # The following methods are currently supported: # # - Standard/Token Blocking # - Sorted Neighborhood # - Extended Sorted Neighborhood # - Q-Grams Blocking # - Extended Q-Grams Blocking # - Suffix Arrays Blocking # - Extended Suffix Arrays Blocking # In[10]: from pyjedai.block_building import ( StandardBlocking, QGramsBlocking, ExtendedQGramsBlocking, SuffixArraysBlocking, ExtendedSuffixArraysBlocking, ) # In[11]: bb = StandardBlocking() blocks = bb.build_blocks(data, attributes_1=['name'], attributes_2=['name']) # In[12]: bb.report() # In[13]: _ = bb.evaluate(blocks, with_classification_report=True) # ## Block Purging # # __Optional step__ # # Discards the blocks exceeding a certain number of comparisons. # # In[14]: from pyjedai.block_cleaning import BlockPurging # In[15]: bp = BlockPurging() cleaned_blocks = bp.process(blocks, data, tqdm_disable=False) # In[16]: bp.report() # In[17]: _ = bp.evaluate(cleaned_blocks) # ## Block Cleaning # # ___Optional step___ # # Its goal is to clean a set of overlapping blocks from unnecessary comparisons, which can be either redundant (i.e., repeated comparisons that have already been executed in a previously examined block) or superfluous (i.e., comparisons that involve non-matching entities). Its methods operate on the coarse level of individual blocks or entities. # In[18]: from pyjedai.block_cleaning import BlockFiltering # In[19]: bf = BlockFiltering(ratio=0.8) filtered_blocks = bf.process(cleaned_blocks, data, tqdm_disable=False) # In[20]: bf.evaluate(filtered_blocks) # ## Comparison Cleaning - Meta Blocking # # ___Optional step___ # # Similar to Block Cleaning, this step aims to clean a set of blocks from both redundant and superfluous comparisons. Unlike Block Cleaning, its methods operate on the finer granularity of individual comparisons. # # The following methods are currently supported: # # - Comparison Propagation # - Cardinality Edge Pruning (CEP) # - Cardinality Node Pruning (CNP) # - Weighed Edge Pruning (WEP) # - Weighed Node Pruning (WNP) # - Reciprocal Cardinality Node Pruning (ReCNP) # - Reciprocal Weighed Node Pruning (ReWNP) # - BLAST # # Most of these methods are Meta-blocking techniques. All methods are optional, but competive, in the sense that only one of them can part of an ER workflow. For more details on the functionality of these methods, see here. They can be combined with one of the following weighting schemes: # # - Aggregate Reciprocal Comparisons Scheme (ARCS) # - Common Blocks Scheme (CBS) # - Enhanced Common Blocks Scheme (ECBS) # - Jaccard Scheme (JS) # - Enhanced Jaccard Scheme (EJS) # In[21]: from pyjedai.comparison_cleaning import ( WeightedEdgePruning, WeightedNodePruning, CardinalityEdgePruning, CardinalityNodePruning, BLAST, ReciprocalCardinalityNodePruning, ReciprocalWeightedNodePruning, ComparisonPropagation ) # In[22]: mb = WeightedEdgePruning(weighting_scheme='EJS') candidate_pairs_blocks = mb.process(filtered_blocks, data, tqdm_disable=True) # In[23]: _ = mb.evaluate(candidate_pairs_blocks) # ### Want to export pairs in this step? # # Every step provides a method named `export_to_df` that exports all pairs in dataframe. If you wish to export them in a file use `.to_csv` from pandas. # In[24]: pairs_df=mb.export_to_df(candidate_pairs_blocks) # In[25]: pairs_df.head(5) # ## Entity Matching # # It compares pairs of entity profiles, associating every pair with a similarity in [0,1]. Its output comprises the similarity graph, i.e., an undirected, weighted graph where the nodes correspond to entities and the edges connect pairs of compared entities. # In[26]: from pyjedai.matching import EntityMatching # In[27]: em = EntityMatching( metric='cosine', tokenizer='char_tokenizer', vectorizer='tfidf', qgram=3, similarity_threshold=0.0 ) pairs_graph = em.predict(candidate_pairs_blocks, data, tqdm_disable=True) # In[28]: draw(pairs_graph) # In[29]: _ = em.evaluate(pairs_graph) # ### How to set a valid similarity threshold? # # Configure similariy threshold with a Grid-Search or with an Optuna search. Also pyJedAI provides some visualizations on the distributions of the scores. # # For example with a classic histogram: # # In[30]: em.plot_distribution_of_all_weights() # Or with a range 0.1 from 0.0 to 1.0 grouping: # In[31]: em.plot_distribution_of_scores() # ## Entity Clustering # # It takes as input the similarity graph produced by Entity Matching and partitions it into a set of equivalence clusters, with every cluster corresponding to a distinct real-world object. # In[32]: from pyjedai.clustering import ConnectedComponentsClustering, UniqueMappingClustering # In[33]: ccc = UniqueMappingClustering() clusters = ccc.process(pairs_graph, data, similarity_threshold=0.17) # In[34]: ccc.report() # In[35]: _ = ccc.evaluate(clusters) #
#
# K. Nikoletos, J. Maciejewski, G. Papadakis & M. Koubarakis #
#
# Apache License 2.0 #