import numpy as np
import pandas as pd
import keras
from keras import Model
from keras.regularizers import l2
from keras.optimizers import (
Adam,
Adamax,
Adagrad,
SGD,
RMSprop
)
from keras.layers import (
Embedding,
Input,
Flatten,
Multiply,
Concatenate,
Dense
)
import sys
sys.path.append('../')
from cf_ec2 import (
GMF,
MLP,
NCF,
Data,
evaluation,
evaluation_grouped
)
Using TensorFlow backend.
train = pd.read_csv('../data/ml-1m.train.rating',sep='\t',header=None,names=['user','item','rating','event_ts'])
test = pd.read_csv('../data/ml-1m.test.rating',sep='\t',header=None,names=['user','item','rating','event_ts'])
test.user.nunique(), test.shape
(6040, (6040, 4))
dataset = Data(
train=train,
test=test,
col_user='user',
col_item='item',
col_rating='rating',
col_time='event_ts',
binary=True,
n_neg=4,
n_neg_test=100
)
dataset.prepTrainDNN(negSample=True)
dataset.prepTestDNN(group=False)
dataset.interaction_train.head(3)
user | item_interacted | item_negative | |
---|---|---|---|
0 | 0 | {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,... | {52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 6... |
1 | 1 | {15, 22, 31, 34, 35, 42, 43, 52, 53, 54, 55, 5... | {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,... |
2 | 2 | {2, 135, 136, 14, 18, 147, 159, 163, 36, 40, 1... | {0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 15... |
newItems = set(dataset.items_test)-set(dataset.items)
idx2del = []
for idx,item in enumerate(dataset.items_test):
if item in newItems:
idx2del.append(idx)
length_test_original = len(dataset.users_test)
dataset.users_test = [
dataset.users_test[idx]
for idx in range(length_test_original) if idx not in idx2del
]
dataset.items_test = [
dataset.items_test[idx]
for idx in range(length_test_original) if idx not in idx2del
]
dataset.ratings_test = [
dataset.ratings_test[idx]
for idx in range(length_test_original) if idx not in idx2del
]
n_users = 6040
n_items = 3704
n_factors_gmf = 32
layers_mlp = [64,32,16,8]
reg_gmf = 0.
reg_layers_mlp = [0.,0.,0.,0.]
learning_rate = 0.01
flg_pretrain = ''
filepath = ''
filepath_gmf_pretrain = ''
filepath_mlp_pretrain = ''
num_epochs = 20
batch_size = 100
gmf = GMF(
n_users=n_users,
n_items=n_items,
n_factors_gmf=n_factors_gmf
)
gmf.create_model()
gmf.compile(learning_rate=0.01)
hist = gmf.fit(
dataset=dataset,
batch_size=batch_size,
num_epochs=num_epochs,
path_model_weights='/Users/xyin/Documents/work/projects/rec_utils/metadata/gmf/gmf-weights-improvement-{epoch:02d}-{val_loss:.4f}.hdf5',
path_csvlog='/Users/xyin/Documents/work/projects/rec_utils/metadata/gmf/gmf_log.csv'
)
/Users/xyin/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/framework/indexed_slices.py:433: UserWarning: Converting sparse IndexedSlices to a dense Tensor of unknown shape. This may consume a large amount of memory. "Converting sparse IndexedSlices to a dense Tensor of unknown shape. "
Train on 4970845 samples, validate on 610038 samples Epoch 1/20 - 140s - loss: 0.3344 - accuracy: 0.8515 - val_loss: 0.1743 - val_accuracy: 0.9315 Epoch 00001: val_loss improved from inf to 0.17435, saving model to /Users/xyin/Documents/work/projects/rec_utils/metadata/gmf/gmf-weights-improvement-01-0.1743.hdf5 Epoch 2/20 - 134s - loss: 0.3026 - accuracy: 0.8685 - val_loss: 0.1670 - val_accuracy: 0.9302 Epoch 00002: val_loss improved from 0.17435 to 0.16697, saving model to /Users/xyin/Documents/work/projects/rec_utils/metadata/gmf/gmf-weights-improvement-02-0.1670.hdf5 Epoch 3/20 - 139s - loss: 0.3021 - accuracy: 0.8699 - val_loss: 0.1932 - val_accuracy: 0.9173 Epoch 00003: val_loss did not improve from 0.16697 Epoch 4/20 - 133s - loss: 0.3051 - accuracy: 0.8699 - val_loss: 0.1588 - val_accuracy: 0.9335 Epoch 00004: val_loss improved from 0.16697 to 0.15884, saving model to /Users/xyin/Documents/work/projects/rec_utils/metadata/gmf/gmf-weights-improvement-04-0.1588.hdf5 Epoch 5/20 - 131s - loss: 0.3075 - accuracy: 0.8702 - val_loss: 0.1795 - val_accuracy: 0.9208 Epoch 00005: val_loss did not improve from 0.15884 Epoch 6/20 - 132s - loss: 0.3101 - accuracy: 0.8698 - val_loss: 0.1624 - val_accuracy: 0.9293 Epoch 00006: val_loss did not improve from 0.15884 Epoch 7/20 - 127s - loss: 0.3130 - accuracy: 0.8695 - val_loss: 0.2141 - val_accuracy: 0.9046 Epoch 00007: val_loss did not improve from 0.15884 Epoch 8/20 - 126s - loss: 0.3158 - accuracy: 0.8690 - val_loss: 0.2003 - val_accuracy: 0.9094 Epoch 00008: val_loss did not improve from 0.15884 Epoch 00008: ReduceLROnPlateau reducing learning rate to 0.0029999999329447745. Epoch 9/20 - 135s - loss: 0.2802 - accuracy: 0.8809 - val_loss: 0.1479 - val_accuracy: 0.9366 Epoch 00009: val_loss improved from 0.15884 to 0.14790, saving model to /Users/xyin/Documents/work/projects/rec_utils/metadata/gmf/gmf-weights-improvement-09-0.1479.hdf5 Epoch 10/20 - 128s - loss: 0.2759 - accuracy: 0.8829 - val_loss: 0.1420 - val_accuracy: 0.9403 Epoch 00010: val_loss improved from 0.14790 to 0.14199, saving model to /Users/xyin/Documents/work/projects/rec_utils/metadata/gmf/gmf-weights-improvement-10-0.1420.hdf5 Epoch 11/20 - 126s - loss: 0.2711 - accuracy: 0.8854 - val_loss: 0.1357 - val_accuracy: 0.9434 Epoch 00011: val_loss improved from 0.14199 to 0.13571, saving model to /Users/xyin/Documents/work/projects/rec_utils/metadata/gmf/gmf-weights-improvement-11-0.1357.hdf5 Epoch 12/20 - 137s - loss: 0.2657 - accuracy: 0.8884 - val_loss: 0.1365 - val_accuracy: 0.9438 Epoch 00012: val_loss did not improve from 0.13571 Epoch 13/20 - 126s - loss: 0.2604 - accuracy: 0.8912 - val_loss: 0.1747 - val_accuracy: 0.9252 Epoch 00013: val_loss did not improve from 0.13571 Epoch 14/20 - 124s - loss: 0.2561 - accuracy: 0.8932 - val_loss: 0.1606 - val_accuracy: 0.9323 Epoch 00014: val_loss did not improve from 0.13571 Epoch 15/20 - 133s - loss: 0.2533 - accuracy: 0.8949 - val_loss: 0.1698 - val_accuracy: 0.9282 Epoch 00015: val_loss did not improve from 0.13571 Epoch 00015: ReduceLROnPlateau reducing learning rate to 0.0009000000078231095. Epoch 16/20 - 130s - loss: 0.2304 - accuracy: 0.9041 - val_loss: 0.1834 - val_accuracy: 0.9216 Epoch 00016: val_loss did not improve from 0.13571 Epoch 17/20 - 128s - loss: 0.2288 - accuracy: 0.9049 - val_loss: 0.1515 - val_accuracy: 0.9374 Epoch 00017: val_loss did not improve from 0.13571 Epoch 18/20 - 127s - loss: 0.2277 - accuracy: 0.9056 - val_loss: 0.1380 - val_accuracy: 0.9438 Epoch 00018: val_loss did not improve from 0.13571 Epoch 19/20 - 128s - loss: 0.2268 - accuracy: 0.9060 - val_loss: 0.2042 - val_accuracy: 0.9120 Epoch 00019: val_loss did not improve from 0.13571 Epoch 00019: ReduceLROnPlateau reducing learning rate to 0.00026999999536201356. Epoch 20/20 - 132s - loss: 0.2190 - accuracy: 0.9094 - val_loss: 0.1693 - val_accuracy: 0.9287 Epoch 00020: val_loss did not improve from 0.13571
import imp
imp.reload(evaluation_grouped)
<module 'cf_ec2.evaluation_grouped' from '../cf_ec2/evaluation_grouped.py'>
evaluator = evaluation_grouped.metricsEval(
model=gmf.model,
users=dataset.users,
items=dataset.items
)
evaluator.getRecs()
100%|██████████| 6040/6040 [02:11<00:00, 46.00it/s]
evaluator.all_predictions.head(3)
userID | itemID | prediction | |
---|---|---|---|
0 | 0 | 0 | 0.956932 |
1 | 0 | 1 | 0.991923 |
2 | 0 | 2 | 0.988913 |
rmse,auc,logloss = evaluator.getOverlapBasedMetrics(
dataset.users_test,
dataset.items_test,
dataset.ratings_test
)
rmse,auc,logloss
(0.22678508081596974, 0.8910193560266216, 0.16930384419089145)
it proves that the model is still at the state of last epoch !!!
You can also do something like this
scores = gmf.model.evaluate(
x = [
np.array(dataset.users_test),
np.array(dataset.items_test)
],
y = np.array(dataset.ratings_test),
verbose=0
)
scores
[0.16930384472775314, 0.9286995530128479]
gmf.model.metrics_names
['loss', 'accuracy']
gmf.model.load_weights('../metadata/gmf/gmf-weights-improvement-11-0.1357.hdf5')
scores = gmf.model.evaluate(
x = [
np.array(dataset.users_test),
np.array(dataset.items_test)
],
y = np.array(dataset.ratings_test),
verbose=0
)
scores
[0.13571090596280566, 0.9433576464653015]
evaluator = evaluation_grouped.metricsEval(
model=gmf.model,
users=dataset.users,
items=dataset.items
)
evaluator.getRecs()
rmse,auc,logloss = evaluator.getOverlapBasedMetrics(
dataset.users_test,
dataset.items_test,
dataset.ratings_test
)
rmse,auc,logloss
100%|██████████| 6040/6040 [02:15<00:00, 44.54it/s]
(0.20208704972160227, 0.8827614048663103, 0.13571090694790497)
gmf2 = GMF(
n_users=n_users,
n_items=n_items,
n_factors_gmf=n_factors_gmf
)
gmf2.create_model(path_pretrain='../metadata/gmf/gmf-weights-improvement-11-0.1357.hdf5')
gmf2.compile(learning_rate=learning_rate)
scores = gmf2.model.evaluate(
x = [
np.array(dataset.users_test),
np.array(dataset.items_test)
],
y = np.array(dataset.ratings_test),
verbose=0
)
scores
[0.13571090596280566, 0.9433576464653015]
evaluator = evaluation_grouped.metricsEval(
model=gmf2.model,
users=dataset.users,
items=dataset.items
)
evaluator.getRecs()
rmse,auc,logloss = evaluator.getOverlapBasedMetrics(
dataset.users_test,
dataset.items_test,
dataset.ratings_test
)
rmse,auc,logloss
100%|██████████| 6040/6040 [02:18<00:00, 43.61it/s]
(0.20208704972160227, 0.8827614048663103, 0.13571090694790497)
Confirmed that results from both ways are the same!!!
gmf.model.save('../metadata/gmf/gmf-best.hdf5')
model3 = keras.models.load_model('../metadata/gmf/gmf-best.hdf5')
/Users/xyin/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/framework/indexed_slices.py:433: UserWarning: Converting sparse IndexedSlices to a dense Tensor of unknown shape. This may consume a large amount of memory. "Converting sparse IndexedSlices to a dense Tensor of unknown shape. "
evaluator = evaluation_grouped.metricsEval(
model=model3,
users=dataset.users,
items=dataset.items
)
evaluator.getRecs()
rmse,auc,logloss = evaluator.getOverlapBasedMetrics(
dataset.users_test,
dataset.items_test,
dataset.ratings_test
)
rmse,auc,logloss
100%|██████████| 6040/6040 [02:17<00:00, 44.01it/s]
(0.20208704972160227, 0.8827614048663103, 0.13571090694790497)