#!/usr/bin/env python # coding: utf-8 # # # Dirty ER # # In this notebook we present the pyJedAI approach in the well-known Cora dataset. Dirty ER, is the process of dedeplication of one set. # # How to install? # # pyJedAI is an open-source library that can be installed from PyPI. # # For more: [pypi.org/project/pyjedai/](https://pypi.org/project/pyjedai/) # In[ ]: get_ipython().run_line_magic('python', '--version') # In[ ]: get_ipython().run_line_magic('pip', 'install pyjedai -U') # In[ ]: get_ipython().run_line_magic('pip', 'show pyjedai') # Imports # In[1]: import os import sys import pandas as pd import networkx from networkx import draw, Graph from pyjedai.utils import print_clusters, print_blocks, print_candidate_pairs from pyjedai.evaluation import Evaluation # ## Reading the dataset # # pyJedAI in order to perfrom needs only the tranformation of the initial data into a pandas DataFrame. Hence, pyJedAI can function in every structured or semi-structured data. In this case Abt-Buy dataset is provided as .csv files. # #
# #
# # # ### pyjedai module # # Data module offers a numpber of options # - Selecting the parameters (columns) of the dataframe, in D1 (and in D2) # - Prints a detailed text analysis # - Stores a hidden mapping of the ids, and creates it if not exists. # In[2]: from pyjedai.datamodel import Data d1 = pd.read_csv("./../data/der/cora/cora.csv", sep='|') gt = pd.read_csv("./../data/der/cora/cora_gt.csv", sep='|', header=None) attr = ['author', 'title'] # Data is the connecting module of all steps of the workflow # In[3]: data = Data( dataset_1=d1, id_column_name_1='Entity Id', ground_truth=gt, attributes_1=attr, dataset_name_1="CORA" ) # ## Workflow with Block Cleaning Methods # # In this notebook we created the bellow architecture: # # ![workflow1-cora.png](https://github.com/AI-team-UoA/pyJedAI/blob/main/docs/img/workflow1-cora.png?raw=true) # # # ## Block Building # # It clusters entities into overlapping blocks in a lazy manner that relies on unsupervised blocking keys: every token in an attribute value forms a key. Blocks are then extracted, possibly using a transformation, based on its equality or on its similarity with other keys. # # The following methods are currently supported: # # - Standard/Token Blocking # - Sorted Neighborhood # - Extended Sorted Neighborhood # - Q-Grams Blocking # - Extended Q-Grams Blocking # - Suffix Arrays Blocking # - Extended Suffix Arrays Blocking # In[4]: from pyjedai.block_building import ( StandardBlocking, QGramsBlocking, SuffixArraysBlocking, ExtendedSuffixArraysBlocking, ExtendedQGramsBlocking ) # In[5]: bb = SuffixArraysBlocking(suffix_length=2) blocks = bb.build_blocks(data) # In[6]: _ = bb.evaluate(blocks) # ## Block Purging # # __Optional step__ # # Discards the blocks exceeding a certain number of comparisons. # # In[7]: from pyjedai.block_cleaning import BlockPurging # In[8]: bp = BlockPurging() cleaned_blocks = bp.process(blocks, data, tqdm_disable=False) # In[9]: bp.report() # In[10]: _ = bp.evaluate(cleaned_blocks) # ## Block Cleaning # # ___Optional step___ # # Its goal is to clean a set of overlapping blocks from unnecessary comparisons, which can be either redundant (i.e., repeated comparisons that have already been executed in a previously examined block) or superfluous (i.e., comparisons that involve non-matching entities). Its methods operate on the coarse level of individual blocks or entities. # In[11]: from pyjedai.block_cleaning import BlockFiltering # In[12]: bc = BlockFiltering(ratio=0.9) blocks = bc.process(blocks, data) # In[13]: _ = bc.evaluate(blocks) # ## Comparison Cleaning - Meta Blocking # # ___Optional step___ # # Similar to Block Cleaning, this step aims to clean a set of blocks from both redundant and superfluous comparisons. Unlike Block Cleaning, its methods operate on the finer granularity of individual comparisons. # # The following methods are currently supported: # # - Comparison Propagation # - Cardinality Edge Pruning (CEP) # - Cardinality Node Pruning (CNP) # - Weighed Edge Pruning (WEP) # - Weighed Node Pruning (WNP) # - Reciprocal Cardinality Node Pruning (ReCNP) # - Reciprocal Weighed Node Pruning (ReWNP) # - BLAST # # Most of these methods are Meta-blocking techniques. All methods are optional, but competive, in the sense that only one of them can part of an ER workflow. For more details on the functionality of these methods, see here. They can be combined with one of the following weighting schemes: # # - Aggregate Reciprocal Comparisons Scheme (ARCS) # - Common Blocks Scheme (CBS) # - Enhanced Common Blocks Scheme (ECBS) # - Jaccard Scheme (JS) # - Enhanced Jaccard Scheme (EJS) # In[14]: from pyjedai.comparison_cleaning import ( WeightedEdgePruning, WeightedNodePruning, CardinalityEdgePruning, CardinalityNodePruning, BLAST, ReciprocalCardinalityNodePruning, ComparisonPropagation ) # In[15]: mb = WeightedEdgePruning(weighting_scheme='CBS') blocks = mb.process(blocks, data) # In[16]: _ = mb.evaluate(blocks) # ## Entity Matching # # It compares pairs of entity profiles, associating every pair with a similarity in [0,1]. Its output comprises the similarity graph, i.e., an undirected, weighted graph where the nodes correspond to entities and the edges connect pairs of compared entities. # In[17]: from pyjedai.matching import EntityMatching # In[18]: em = EntityMatching( metric='jaccard', similarity_threshold=0.0 ) pairs_graph = em.predict(blocks, data) # In[19]: draw(pairs_graph) # In[20]: _ = em.evaluate(pairs_graph) # ### Experimenting with the attributes selected in Matching step # Giving a `list` of attributes (subset of initial), the user can experiment with the attributes that are selected in the matching step. The user can select the attributes that are used in the matching step, and the attributes that are used in the blocking step. # In[21]: em = EntityMatching( metric='jaccard', similarity_threshold=0.0, attributes=['author'] ) authors_pairs_graph = em.predict(blocks, data) _ = em.evaluate(authors_pairs_graph) # Giving weights as `dict`. Adding a weight factor to each attribute. # In[22]: weights = { 'author': 0.2, 'title': 0.8 } em = EntityMatching( metric='jaccard', similarity_threshold=0.0, attributes=weights ) weights_pairs_graph = em.predict(blocks, data) _ = em.evaluate(weights_pairs_graph) # ### How to set a valid similarity threshold? # # Configure similariy threshold with a Grid-Search or with an Optuna search. Also pyJedAI provides some visualizations on the distributions of the scores. # # For example with a classic histogram: # # In[23]: em.plot_distribution_of_all_weights() # Or with a range 0.1 from 0.0 to 1.0 grouping: # In[24]: em.plot_distribution_of_scores() # ## Entity Clustering # # It takes as input the similarity graph produced by Entity Matching and partitions it into a set of equivalence clusters, with every cluster corresponding to a distinct real-world object. # In[25]: from pyjedai.clustering import ConnectedComponentsClustering # In[26]: ec = ConnectedComponentsClustering() clusters = ec.process(pairs_graph, data, similarity_threshold=0.3) # In[27]: _ = ec.evaluate(clusters) # # Workflow with Similarity Joins # # In this notebook we created the bellow archtecture: # # ![workflow2-cora.png](https://github.com/AI-team-UoA/pyJedAI/blob/main/documentation/workflow2-cora.png?raw=true) # # # ## Data Reading # Data is the connecting module of all steps of the workflow # In[28]: from pyjedai.datamodel import Data d1 = pd.read_csv("./../data/der/cora/cora.csv", sep='|') gt = pd.read_csv("./../data/der/cora/cora_gt.csv", sep='|', header=None) attr = ['Entity Id','author', 'title'] data = Data( dataset_1=d1, id_column_name_1='Entity Id', ground_truth=gt, attributes_1=attr ) # ## Similarity Joins # In[29]: from pyjedai.joins import EJoin, TopKJoin # In[30]: join = EJoin(similarity_threshold = 0.5, metric = 'jaccard', tokenization = 'qgrams_multiset', qgrams = 2) g = join.fit(data) # In[31]: _ = join.evaluate(g) # In[32]: topk_join = TopKJoin(K=20, metric = 'jaccard', tokenization = 'qgrams', qgrams = 3) g = topk_join.fit(data) # In[33]: draw(g) # In[34]: topk_join.evaluate(g) # ## Entity Clustering # In[35]: from pyjedai.clustering import ConnectedComponentsClustering # In[36]: ccc = ConnectedComponentsClustering() clusters = ccc.process(g, data) # In[37]: _ = ccc.evaluate(clusters) #
#
# K. Nikoletos, J. Maciejewski, G. Papadakis & M. Koubarakis #
#
# Apache License 2.0 #