import numpy as np
import pandas as pd
import keras
from keras import Model
from keras.regularizers import l2
from keras.optimizers import (
Adam,
Adamax,
Adagrad,
SGD,
RMSprop
)
from keras.layers import (
Embedding,
Input,
Flatten,
Multiply,
Concatenate,
Dense
)
import sys
sys.path.append('../')
from cf_ec2 import (
GMF,
MLP,
NCF,
Data,
evaluation
)
Using TensorFlow backend.
train = pd.read_csv('../data/ml-1m.train.rating',sep='\t',header=None,names=['user','item','rating','event_ts'])
test = pd.read_csv('../data/ml-1m.test.rating',sep='\t',header=None,names=['user','item','rating','event_ts'])
train.head(3)
user | item | rating | event_ts | |
---|---|---|---|---|
0 | 0 | 32 | 4 | 978824330 |
1 | 0 | 34 | 4 | 978824330 |
2 | 0 | 4 | 5 | 978824291 |
test.head(3)
user | item | rating | event_ts | |
---|---|---|---|---|
0 | 0 | 25 | 5 | 978824351 |
1 | 1 | 133 | 3 | 978300174 |
2 | 2 | 207 | 4 | 978298504 |
test.user.nunique(), test.shape
(6040, (6040, 4))
dataset = Data(
train=train,
test=test,
col_user='user',
col_item='item',
col_rating='rating',
col_time='event_ts',
binary=True,
n_neg=4,
n_neg_test=100
)
dataset.prepTrainDNN(negSample=True)
dataset.prepTestDNN(group=True)
Method to save python object to disk for later use
import pickle
## pickle data
with open('../metadata/datasetGmf','wb') as fp:
pickle.dump(dataset, fp)
## pickle data with compression
import bz2
with bz2.BZ2File('datasetGmfSmaller', 'w') as fp:
pickle.dump(dataset, fp)
## unpickle data
with open('../metadata/datasetGmf','rb') as fp:
dataset2 = pickle.load(fp)
with bz2.BZ2File('../metadata/datasetGmfSmaller', 'r') as fp:
dataset2 = pickle.load(fp)
len(dataset.users),train.shape
(4970845, (994169, 6))
len(dataset.users_test),test.shape
(610040, (6040, 6))
train.user.nunique(), test.user.nunique()
(6040, 6040)
train.item.nunique(), test.item.nunique()
(3704, 1921)
dataset.interaction_train.head(3)
user | item_interacted | item_negative | |
---|---|---|---|
0 | 0 | {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,... | {52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 6... |
1 | 1 | {15, 22, 31, 34, 35, 42, 43, 52, 53, 54, 55, 5... | {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,... |
2 | 2 | {2, 135, 136, 14, 18, 147, 159, 163, 36, 40, 1... | {0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 15... |
newItems = set(dataset.items_test)-set(dataset.items)
idx2del = []
for idx,item in enumerate(dataset.items_test):
if item in newItems:
idx2del.append(idx)
length_test_original = len(dataset.users_test)
dataset.users_test = [
dataset.users_test[idx]
for idx in range(length_test_original) if idx not in idx2del
]
dataset.items_test = [
dataset.items_test[idx]
for idx in range(length_test_original) if idx not in idx2del
]
dataset.ratings_test = [
dataset.ratings_test[idx]
for idx in range(length_test_original) if idx not in idx2del
]
n_users = 6040
n_items = 3704
n_factors_gmf = 32
layers_mlp = [64,32,16,8]
reg_gmf = 0.
reg_layers_mlp = [0.,0.,0.,0.]
learning_rate = 0.01
flg_pretrain = ''
filepath = ''
filepath_gmf_pretrain = ''
filepath_mlp_pretrain = ''
num_epochs = 20
batch_size = 100
gmf = GMF(
n_users=n_users,
n_items=n_items,
n_factors_gmf=n_factors_gmf
)
model = gmf.create_model()
#### compile the model
model.compile(
optimizer=Adam(lr=learning_rate),
loss='binary_crossentropy',
metrics=['accuracy']
)
#### create the callback metrics
filepath="../metadata/gmf/gmf-weights-improvement-{epoch:02d}-{val_loss:.4f}.hdf5"
checkpoint = keras.callbacks.ModelCheckpoint(
filepath=filepath,
verbose=1,
save_best_only=True
)
csvlog = keras.callbacks.CSVLogger(
'../metadata/gmf/gmf_log.csv',
separator=',',
append=False
)
earlystop = keras.callbacks.EarlyStopping(patience=12)
lrreduce = keras.callbacks.ReduceLROnPlateau(
monitor="val_loss",
factor=0.3,
patience=4,
verbose=1
)
class newMetrics(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
# print(len(self.validation_data))
# print(self.validation_data[0][:5])
# print(self.validation_data[1][:5])
# print(self.validation_data[2][:5])
# print(self.validation_data[3][:5])
# X_val, y_val = self.validation_data[0], self.validation_data[1]
X_val = [self.validation_data[0],self.validation_data[1]]
y_val = self.validation_data[2]
y_predict = model.predict(x = X_val)
logs['val_auc'] = evaluation.auc(y_val, y_predict)
metrics2 = newMetrics()
#### train
hist = model.fit(
x = [
np.array(dataset.users),
np.array(dataset.items)
],
y = np.array(dataset.ratings),
batch_size=batch_size,
epochs=num_epochs,
verbose=2,
shuffle=True,
callbacks=[metrics2,checkpoint,csvlog,earlystop,lrreduce],
validation_data=(
[
np.array(dataset.users_test),
np.array(dataset.items_test)
],
np.array(dataset.ratings_test)
)
)
Train on 4970845 samples, validate on 610038 samples Epoch 1/20 - 141s - loss: 0.3309 - accuracy: 0.8533 - val_loss: 0.1642 - val_accuracy: 0.9342 Epoch 00001: val_loss improved from inf to 0.16419, saving model to ../metadata/gmf/gmf-weights-improvement-01-0.1642.hdf5 Epoch 2/20 - 127s - loss: 0.2979 - accuracy: 0.8706 - val_loss: 0.1629 - val_accuracy: 0.9331 Epoch 00002: val_loss improved from 0.16419 to 0.16291, saving model to ../metadata/gmf/gmf-weights-improvement-02-0.1629.hdf5 Epoch 3/20 - 133s - loss: 0.2978 - accuracy: 0.8721 - val_loss: 0.1971 - val_accuracy: 0.9157 Epoch 00003: val_loss did not improve from 0.16291 Epoch 4/20 - 141s - loss: 0.3012 - accuracy: 0.8717 - val_loss: 0.1345 - val_accuracy: 0.9442 Epoch 00004: val_loss improved from 0.16291 to 0.13455, saving model to ../metadata/gmf/gmf-weights-improvement-04-0.1345.hdf5 Epoch 5/20 - 138s - loss: 0.3032 - accuracy: 0.8722 - val_loss: 0.1558 - val_accuracy: 0.9334 Epoch 00005: val_loss did not improve from 0.13455 Epoch 6/20 - 138s - loss: 0.3065 - accuracy: 0.8717 - val_loss: 0.1512 - val_accuracy: 0.9353 Epoch 00006: val_loss did not improve from 0.13455 Epoch 7/20 - 140s - loss: 0.3101 - accuracy: 0.8711 - val_loss: 0.2357 - val_accuracy: 0.8980 Epoch 00007: val_loss did not improve from 0.13455 Epoch 8/20 - 136s - loss: 0.3128 - accuracy: 0.8706 - val_loss: 0.1754 - val_accuracy: 0.9287 Epoch 00008: val_loss did not improve from 0.13455 Epoch 00008: ReduceLROnPlateau reducing learning rate to 0.0029999999329447745. Epoch 9/20 - 142s - loss: 0.2770 - accuracy: 0.8824 - val_loss: 0.1284 - val_accuracy: 0.9473 Epoch 00009: val_loss improved from 0.13455 to 0.12836, saving model to ../metadata/gmf/gmf-weights-improvement-09-0.1284.hdf5 Epoch 10/20 - 143s - loss: 0.2738 - accuracy: 0.8839 - val_loss: 0.1577 - val_accuracy: 0.9319 Epoch 00010: val_loss did not improve from 0.12836 Epoch 11/20 - 146s - loss: 0.2700 - accuracy: 0.8859 - val_loss: 0.1619 - val_accuracy: 0.9309 Epoch 00011: val_loss did not improve from 0.12836 Epoch 12/20 - 143s - loss: 0.2650 - accuracy: 0.8885 - val_loss: 0.1763 - val_accuracy: 0.9251 Epoch 00012: val_loss did not improve from 0.12836 Epoch 13/20 - 177s - loss: 0.2601 - accuracy: 0.8912 - val_loss: 0.1537 - val_accuracy: 0.9356 Epoch 00013: val_loss did not improve from 0.12836 Epoch 00013: ReduceLROnPlateau reducing learning rate to 0.0009000000078231095. Epoch 14/20 - 135s - loss: 0.2380 - accuracy: 0.9007 - val_loss: 0.1668 - val_accuracy: 0.9293 Epoch 00014: val_loss did not improve from 0.12836 Epoch 15/20 - 137s - loss: 0.2358 - accuracy: 0.9017 - val_loss: 0.1608 - val_accuracy: 0.9320 Epoch 00015: val_loss did not improve from 0.12836 Epoch 16/20 - 131s - loss: 0.2340 - accuracy: 0.9026 - val_loss: 0.1760 - val_accuracy: 0.9249 Epoch 00016: val_loss did not improve from 0.12836 Epoch 17/20 - 125s - loss: 0.2325 - accuracy: 0.9033 - val_loss: 0.1782 - val_accuracy: 0.9243 Epoch 00017: val_loss did not improve from 0.12836 Epoch 00017: ReduceLROnPlateau reducing learning rate to 0.00026999999536201356. Epoch 18/20 - 133s - loss: 0.2248 - accuracy: 0.9068 - val_loss: 0.1582 - val_accuracy: 0.9338 Epoch 00018: val_loss did not improve from 0.12836 Epoch 19/20 - 143s - loss: 0.2243 - accuracy: 0.9070 - val_loss: 0.1655 - val_accuracy: 0.9303 Epoch 00019: val_loss did not improve from 0.12836 Epoch 20/20 - 143s - loss: 0.2238 - accuracy: 0.9072 - val_loss: 0.1588 - val_accuracy: 0.9338 Epoch 00020: val_loss did not improve from 0.12836
dataset.users_test[:5], dataset.items_test[:5], dataset.ratings_test[:5], dataset.ratings[:5]
([0, 0, 0, 0, 0], [398, 1981, 873, 752, 1481], [1.0, 0.0, 0.0, 0.0, 0.0], array([1., 0., 0., 0., 0.]))
model.summary()
Model: "model_1" __________________________________________________________________________________________________ Layer (type) Output Shape Param # Connected to ================================================================================================== user_input (InputLayer) (None, 1) 0 __________________________________________________________________________________________________ item_input (InputLayer) (None, 1) 0 __________________________________________________________________________________________________ embedding_gmf_User (Embedding) (None, 1, 32) 193280 user_input[0][0] __________________________________________________________________________________________________ embedding_gmf_Item (Embedding) (None, 1, 32) 118528 item_input[0][0] __________________________________________________________________________________________________ flatten_gmf_User (Flatten) (None, 32) 0 embedding_gmf_User[0][0] __________________________________________________________________________________________________ flatten_gmf_Item (Flatten) (None, 32) 0 embedding_gmf_Item[0][0] __________________________________________________________________________________________________ multiply_gmf_UserItem (Multiply (None, 32) 0 flatten_gmf_User[0][0] flatten_gmf_Item[0][0] __________________________________________________________________________________________________ output (Dense) (None, 1) 33 multiply_gmf_UserItem[0][0] ================================================================================================== Total params: 311,841 Trainable params: 311,841 Non-trainable params: 0 __________________________________________________________________________________________________