import functools
from functools import partial
import itertools
import logging
import math
import os
import pickle
import sys
from pathlib import Path
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
import pingouin as pg
import seaborn as sns
import yaml
%load_ext autoreload
%autoreload 2
%matplotlib inline
sns.set_context("poster")
sns.set(rc={"figure.figsize": (16, 12.0)})
sns.set_style("whitegrid")
import numpy as np
import pandas as pd
from scipy.stats import kendalltau, rankdata, spearmanr, pearsonr, ttest_rel
import torch.nn.functional as F
pd.set_option("display.max_rows", 120)
pd.set_option("display.max_columns", 120)
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
from lda4rec.datasets import Interactions, DataLoader, random_train_test_split, MOVIELENS_1M,MetaData, get_dataset, items_per_user_train_test_split
import lda4rec.evaluations as lda_eval
from lda4rec.estimators import MFEst, PopEst, LDA4RecEst, SNMFEst
from lda4rec.utils import process_ids, cmp_ranks, Config, split_along_dim_apply, plot_cat
from lda4rec import lda
import pyro
import pyro.distributions as dist
import pyro.optim as optim
import torch
from pyro.distributions import constraints
from pyro.infer import SVI, Predictive, Trace_ELBO, TraceEnum_ELBO, config_enumerate
import neptune.new as neptune
# init dummy neptune to avoid problems with logging
neptune.init(mode='offline')
WARNING:neptune.internal.hardware.gpu.gpu_monitor:Info (NVML): NVML Shared Library Not Found. GPU usage metrics may not be reported. For more information, see https://docs-legacy.neptune.ai/logging-and-managing-experiment-results/logging-experiment-data.html#hardware-consumption offline/817d0465-9ab1-4c54-9245-7b263d3083e3 Remember to stop your run once you’ve finished logging your metadata (https://docs.neptune.ai/api-reference/run#stop). It will be stopped automatically only when the notebook kernel/interactive console is terminated.
<neptune.new.run.Run at 0x7f9dcc3a4790>
from icecream import ic, install
install()
# configure icecream
def ic_str(obj):
if hasattr(obj, "shape"):
return f"{obj} " #
else:
return str(obj)
ic.configureOutput(argToStringFunction=ic_str)
cfg = Config(Path('../configs/exp_2.yaml'))
cfg['experiment']['dataset']
'goodbooks'
results = []
for cfg in lda_eval.get_cfgs_from_path(Path('../configs')):
data, train, test, data_rng = lda_eval.get_train_test_data(cfg)
est = lda_eval.load_model(cfg, train)
v, t, h, b = est.get_lda_params()
med = np.median((1/t).flatten())
results.append(dict(dataset=cfg['experiment']['dataset'], rnd=cfg['experiment']['model_seed'], median=med))
df = pd.DataFrame(results)
df.groupby('dataset').mean()
rnd | median | |
---|---|---|
dataset | ||
amazon | 18517721.4 | 0.010686 |
goodbooks | 18517721.4 | 0.003384 |
movielens-1m | 18517721.4 | 0.016350 |
results
[{'dataset': 'goodbooks', 'rnd': 1729, 'median': 0.003220169}, {'dataset': 'goodbooks', 'rnd': 234829, 'median': 0.0037773054}, {'dataset': 'movielens-1m', 'rnd': 234829, 'median': 0.016483523}, {'dataset': 'amazon', 'rnd': 1981, 'median': 0.011081425}, {'dataset': 'goodbooks', 'rnd': 666, 'median': 0.0033622752}, {'dataset': 'movielens-1m', 'rnd': 1981, 'median': 0.0152998455}, {'dataset': 'goodbooks', 'rnd': 1981, 'median': 0.0034285225}, {'dataset': 'amazon', 'rnd': 234829, 'median': 0.01030039}, {'dataset': 'amazon', 'rnd': 666, 'median': 0.010790214}, {'dataset': 'movielens-1m', 'rnd': 666, 'median': 0.017283749}, {'dataset': 'goodbooks', 'rnd': 92349402, 'median': 0.0031326886}, {'dataset': 'amazon', 'rnd': 1729, 'median': 0.010667719}, {'dataset': 'movielens-1m', 'rnd': 92349402, 'median': 0.016401894}, {'dataset': 'amazon', 'rnd': 92349402, 'median': 0.010588221}, {'dataset': 'movielens-1m', 'rnd': 1729, 'median': 0.01627946}]
def get_results(path):
for res_file in path.glob("result_exp_*.pickle"):
with open(res_file, 'rb') as fh:
res = pickle.load(fh)
yield res
results = list(get_results(Path('../evaluations/')))
exp1_df = pd.DataFrame([dict(dataset=res['experiment']['dataset'],
dim=res['experiment']['est_params']['embedding_dim'],
train_p=res['result']['ttest_cohort_user_interaction_train']['p-val'].loc['T-test'],
train_d=res['result']['ttest_cohort_user_interaction_train']['cohen-d'].loc['T-test'],
test_p=res['result']['ttest_cohort_user_interaction_test']['p-val'].loc['T-test'],
test_d=res['result']['ttest_cohort_user_interaction_test']['cohen-d'].loc['T-test'])
for res in results])
exp1_df = exp1_df.groupby(['dataset', 'dim']).agg([np.mean, np.std])
#exp1_df.loc['goodbooks'] = exp1_df.loc['goodbooks'] / (2*calc_pairs(GB_DIM))
#exp1_df.loc['movielens-1m'] = exp1_df.loc['movielens-1m'] / (2*calc_pairs(ML_DIM))
exp1_df
train_p | train_d | test_p | test_d | ||||||
---|---|---|---|---|---|---|---|---|---|
mean | std | mean | std | mean | std | mean | std | ||
dataset | dim | ||||||||
amazon | 256 | 7.739793e-41 | 8.916809e-41 | 0.124953 | 0.004170 | 5.608193e-10 | 1.130118e-09 | 0.059283 | 0.003940 |
goodbooks | 256 | 3.321596e-161 | 7.425478e-161 | 0.178114 | 0.010057 | 1.010080e-11 | 2.245296e-11 | 0.046596 | 0.004882 |
movielens-1m | 64 | 6.663359e-213 | 0.000000e+00 | 0.608852 | 0.022047 | 2.440486e-15 | 5.454175e-15 | 0.162705 | 0.017907 |
exp2_df = pd.DataFrame([dict(dataset=res['experiment']['dataset'],
dim=res['experiment']['est_params']['embedding_dim'],
corr_pop_train=res['result']['corr_popularity_train'][0],
p_val_train=res['result']['corr_popularity_train'][1],
corr_pop_data=res['result']['corr_popularity_data'][0],
p_val_data=res['result']['corr_popularity_data'][1])
for res in results])
exp2_df = exp2_df.groupby(['dataset', 'dim']).agg([np.mean, np.std])
exp2_df
corr_pop_train | p_val_train | corr_pop_data | p_val_data | ||||||
---|---|---|---|---|---|---|---|---|---|
mean | std | mean | std | mean | std | mean | std | ||
dataset | dim | ||||||||
amazon | 256 | 0.377344 | 0.003264 | 0.0 | 0.0 | 0.372699 | 0.003339 | 0.0 | 0.0 |
goodbooks | 256 | 0.264629 | 0.004193 | 0.0 | 0.0 | 0.267341 | 0.004364 | 0.0 | 0.0 |
movielens-1m | 64 | 0.519759 | 0.005958 | 0.0 | 0.0 | 0.520511 | 0.005910 | 0.0 | 0.0 |
exp3_df = pd.DataFrame([dict(dataset=res['experiment']['dataset'],
dim=res['experiment']['est_params']['embedding_dim'],
corr_conf_pop_train=res['result']['corr_conformity_pop_train'][0],
p_val_conf_pop_train=res['result']['corr_conformity_pop_train'][1],
corr_conf_pop_data=res['result']['corr_conformity_pop_data'][0],
p_val_conf_pop_data=res['result']['corr_conformity_pop_data'][1],
corr_conf_pop_data_all=res['result']['corr_conformity_pop_data_all'][0],
p_val_conf_pop_data_all=res['result']['corr_conformity_pop_data_all'][1],
corr_conf_b=res['result']['corr_conformity_b'][0],
p_val_conf_b=res['result']['corr_conformity_b'][1],)
for res in results])
exp3_df = exp3_df.groupby(['dataset', 'dim']).agg([np.mean, np.std])
exp3_df
corr_conf_pop_train | p_val_conf_pop_train | corr_conf_pop_data | p_val_conf_pop_data | corr_conf_pop_data_all | p_val_conf_pop_data_all | corr_conf_b | p_val_conf_b | ||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
mean | std | mean | std | mean | std | mean | std | mean | std | mean | std | mean | std | mean | std | ||
dataset | dim | ||||||||||||||||
amazon | 256 | 0.081838 | 0.002615 | 2.916868e-73 | 6.522311e-73 | 0.091750 | 0.002602 | 1.663145e-92 | 3.718905e-92 | 0.091463 | 0.002512 | 1.850760e-92 | 4.138412e-92 | -0.004453 | 0.005992 | 1.554960e-01 | 1.111974e-01 |
goodbooks | 256 | -0.001633 | 0.001303 | 6.055143e-01 | 2.563741e-01 | -0.000374 | 0.001304 | 7.789111e-01 | 2.305083e-01 | -0.001573 | 0.001312 | 6.198387e-01 | 2.608295e-01 | -0.044962 | 0.002004 | 1.608840e-48 | 3.597471e-48 |
movielens-1m | 64 | 0.374866 | 0.008969 | 0.000000e+00 | 0.000000e+00 | 0.383584 | 0.009038 | 0.000000e+00 | 0.000000e+00 | 0.394357 | 0.009267 | 0.000000e+00 | 0.000000e+00 | 0.523747 | 0.009208 | 0.000000e+00 | 0.000000e+00 |
exp4_df = pd.DataFrame([dict(dataset=res['experiment']['dataset'],
dim=res['experiment']['est_params']['embedding_dim'],
good_bad_train_p=res['result']['ttest_user_interaction_good_bad_train']['p-val'].loc['T-test'],
good_bad_train_d=res['result']['ttest_user_interaction_good_bad_train']['cohen-d'].loc['T-test'],
good_bad_test_p=res['result']['ttest_user_interaction_good_bad_test']['p-val'].loc['T-test'],
good_bad_test_d=res['result']['ttest_user_interaction_good_bad_test']['cohen-d'].loc['T-test'],
good_rnd_train_p=res['result']['ttest_user_interaction_good_rnd_train']['p-val'].loc['T-test'],
good_rnd_train_d=res['result']['ttest_user_interaction_good_rnd_train']['cohen-d'].loc['T-test'],
good_rnd_test_p=res['result']['ttest_user_interaction_good_rnd_test']['p-val'].loc['T-test'],
good_rnd_test_d=res['result']['ttest_user_interaction_good_rnd_test']['cohen-d'].loc['T-test'])
for res in results])
exp4_df = exp4_df.groupby(['dataset', 'dim']).agg([np.mean, np.std])
exp4_df
good_bad_train_p | good_bad_train_d | good_bad_test_p | good_bad_test_d | good_rnd_train_p | good_rnd_train_d | good_rnd_test_p | good_rnd_test_d | ||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
mean | std | mean | std | mean | std | mean | std | mean | std | mean | std | mean | std | mean | std | ||
dataset | dim | ||||||||||||||||
amazon | 256 | 3.332443e-312 | 0.0 | 1.478724 | 0.027948 | 7.705007e-130 | 1.722892e-129 | 0.862463 | 0.029791 | 1.306924e-299 | 0.0 | 1.435556 | 0.027294 | 1.353733e-124 | 3.027038e-124 | 0.842361 | 0.028882 |
goodbooks | 256 | 0.000000e+00 | 0.0 | 2.884263 | 0.058301 | 2.748834e-68 | 6.146576e-68 | 0.618658 | 0.036337 | 0.000000e+00 | 0.0 | 2.118543 | 0.041310 | 6.762792e-38 | 1.372516e-37 | 0.434077 | 0.022393 |
movielens-1m | 64 | 0.000000e+00 | 0.0 | 1.881296 | 0.030967 | 6.777133e-71 | 1.370053e-70 | 0.607651 | 0.021897 | 2.630566e-255 | 0.0 | 1.172604 | 0.012945 | 4.412468e-43 | 7.926921e-43 | 0.449772 | 0.016349 |