Modeling Influential Contexts with Heterogeneous Relations for Sparse and Cold-Start Recommendation.
HERS consists of three heterogeneous relations: user-user, item-item, and user-item. Each user’s choice is relevant to the corresponding user’s and item’s influential contexts.
The architecture of HERS for modeling user-item interaction with user’s and item’s influential contexts.
Influential-Context Aggregation Unit (ICAU): A two-stage aggregation model to construct ICE.
!pip install fastFM==0.2.9
%tensorflow_version 1.x
TensorFlow 1.x selected.
!apt-get -qq install tree
Selecting previously unselected package tree. (Reading database ... 155062 files and directories currently installed.) Preparing to unpack .../tree_1.7.0-5_amd64.deb ... Unpacking tree (1.7.0-5) ... Setting up tree (1.7.0-5) ... Processing triggers for man-db (2.8.3-2ubuntu0.1) ...
!git clone https://github.com/rainmilk/aaai19hers.git
Cloning into 'aaai19hers'... remote: Enumerating objects: 70, done. remote: Counting objects: 100% (13/13), done. remote: Compressing objects: 100% (10/10), done. remote: Total 70 (delta 3), reused 9 (delta 3), pack-reused 57 Unpacking objects: 100% (70/70), done.
%cd aaai19hers
/content/aaai19hers
!tree --du -h -C .
. ├── [ 40M] datasets │ ├── [ 23M] book │ │ ├── [7.6M] book_itemNet.txt │ │ ├── [1.2M] book_rating_test_cold_item_neg.txt │ │ ├── [1.3M] book_rating_test_cold_item.txt │ │ ├── [573K] book_rating_test_cold_user_neg.txt │ │ ├── [490K] book_rating_test_cold_user.txt │ │ ├── [1014K] book_rating_test.txt │ │ ├── [757K] book_rating_train_cold_item_reverse.txt │ │ ├── [757K] book_rating_train_cold_item.txt │ │ ├── [4.5M] book_rating_train_cold_user.txt │ │ ├── [4.0M] book_rating_train.txt │ │ ├── [1022K] book_rating.txt │ │ └── [134K] book_userNet.txt │ └── [ 17M] lastfm │ ├── [1.2M] lastfm_itemNet.txt │ ├── [2.3M] lastfm_rating_test_cold_item_neg.txt │ ├── [2.9M] lastfm_rating_test_cold_item.txt │ ├── [792K] lastfm_rating_test_cold_user_neg.txt │ ├── [864K] lastfm_rating_test_cold_user.txt │ ├── [844K] lastfm_rating_test.txt │ ├── [230K] lastfm_rating_train_cold_item_reverse.txt │ ├── [230K] lastfm_rating_train_cold_item.txt │ ├── [3.3M] lastfm_rating_train_cold_user.txt │ ├── [3.3M] lastfm_rating_train.txt │ ├── [749K] lastfm_rating.txt │ └── [219K] lastfm_userNet.txt ├── [ 18K] evaluation │ ├── [ 0] __init__.py │ ├── [3.9K] test_FM.py │ └── [9.9K] test_hers.py ├── [ 79K] model │ ├── [ 25K] attentionlayer.py │ ├── [8.3K] batch_generator_np.py │ ├── [10.0K] construct_RS_train.py │ ├── [ 626] data_utilities.py │ ├── [1.4K] graph_utilities.py │ ├── [ 0] __init__.py │ ├── [ 515] losses.py │ ├── [ 905] masklayers.py │ ├── [1.5K] mlmr.py │ ├── [2.7K] ranking.py │ ├── [4.1K] RSbatch.py │ ├── [2.5K] scorer.py │ ├── [2.6K] socialRC.py │ ├── [8.3K] srs_model.py │ └── [6.2K] timedistributed.py └── [ 119] README.md 40M used in 5 directories, 43 files
import sys
sys.path.insert(0,'.')
from model.graph_utilities import read_graph
from model.losses import infinite_margin_loss, max_margin_loss
from keras.regularizers import l2
from model.srs_model import NetworkRS
import numpy as np
import math
from model.RSbatch import ItemGenerator,TripletGenerator
from sklearn.utils import shuffle
from model.socialRC import test_recommendation
from model.mlmr import mlmf
from model.scorer import nn_scoremodel, inner_prod_scoremodel, fm_scoremodel
data_name='lastfm'
user_net_path='datasets/%s/%s_userNet.txt'%(data_name,data_name)
ui_net_path ='datasets/%s/%s_rating.txt'%(data_name,data_name)
item_path = 'datasets/%s/%s_itemNet.txt'%(data_name,data_name)
#
# train_path = "networkRS/%s_rating_train.txt"%data_name
# test_path = "networkRS/%s_rating_test.txt"%data_name
# neg_test_path = "networkRS/%s_rating_test_neg.csv"%data_name
# item_rep_path = "networkRS/%s_item_rep_user.txt"%data_name
# user_rep_path = "networkRS/%s_user_rep_user.txt"%data_name
neg_test_path= "datasets/%s/%s_rating_test_cold_user_neg.txt"%(data_name,data_name)
train_path = "datasets/%s/%s_rating_train_cold_user.txt"%(data_name,data_name)
test_path = "datasets/%s/%s_rating_test_cold_user.txt"%(data_name,data_name)
item_rep_path = "datasets/%s/%s_item_rep_user_cold.txt"%(data_name,data_name)
user_rep_path = "datasets/%s/%s_user_rep_user_cold.txt"%(data_name,data_name)
#
# neg_test_path= "networkRS/%s_rating_test_cold_item_neg.txt"%data_name
# train_path = "networkRS/%s_rating_train_cold_item.txt"%data_name
# test_path = "networkRS/%s_rating_test_cold_item.txt"%data_name
#
# item_rep_path = "networkRS/%s_item_rep_item_cold.txt"%data_name
# user_rep_path = "networkRS/%s_user_rep_item_cold.txt"%data_name
def get_user_rep(model, nx_G, embed_len, user_rep_path, batch_size=100, save=False):
node_size = nx_G.number_of_nodes()
memory_output = np.zeros((node_size + 1, embed_len))
node_list = list(nx_G.nodes())
num_node=len(node_list)
nb_batch = math.ceil(len(node_list) / batch_size)
for j in range(nb_batch):
batch_node = node_list[j * batch_size:min(num_node, (j + 1) * batch_size)]
first_batch_data, second_batch_data = batchGenerator.get_batch_data_topk(batch_node=batch_node, topK=topK)
memory_out = model.user_model.predict_on_batch([np.array(batch_node), first_batch_data, second_batch_data])
memory_output[batch_node, :] = memory_out
if save:
np.savetxt(user_rep_path, memory_output[1:])
print("save memory successfully")
return memory_output[1:]
def get_cold_start_user_rep(model, embed_len, test_users, batch_size=100):
memory_output = np.zeros((user_size + 1, embed_len))
node_list = test_users
num_node=len(node_list)
nb_batch = math.ceil(len(node_list) / batch_size)
for j in range(nb_batch):
batch_node = node_list[j * batch_size:min(num_node, (j + 1) * batch_size)]
first_batch_data, second_batch_data = batchGenerator.get_batch_data_topk(batch_node=batch_node, topK=topK)
memory_out = model.first_model.predict_on_batch([np.array(batch_node), first_batch_data, second_batch_data])
memory_output[batch_node, :] = np.squeeze(memory_out, axis=1)
return memory_output[1:]
def get_item_rep(model, G_item, embed_len, item_rep_path, batch_size=100, save=False):
node_list = list(G_item.nodes())
node_size = len(node_list)
memory_output = np.zeros((node_size + 1, embed_len))
nb_batch = math.ceil(len(node_list) / batch_size)
for j in range(nb_batch):
batch_node = node_list[j * batch_size:min(node_size, (j + 1) * batch_size)]
first_batch_data, _ = batchGenerator.itemGenerate.get_batch_data_topk(batch_node=batch_node, topK=topK, predict_batch_size=100, order=1)
memory_out = model.item_model.predict_on_batch([np.array(batch_node), first_batch_data])
memory_output[batch_node, :] = memory_out
# embedding_nodeset = embedding_matrix[node_set]
# np.savetxt(config.embedding_path, embedding_matrix[1:])
# np.savetxt(config.memory_path, memory_output)
if save:
np.savetxt(item_rep_path, memory_output[1:])
print("save item representation successfully")
return memory_output[1:]
def model_testembed_zero(model, test_path):
test_data=np.loadtxt(test_path,dtype=np.int32)
test_user_list=list(set(test_data[:,0]))
user_embed = model.user_emb.get_weights()[0]
user_embed[test_user_list] = 0
model.user_embed.set_weights(user_embed)
test_data=np.loadtxt(test_path,dtype=np.int32)
test_user_list = list(set(test_data[:,0]))
G_user=read_graph(user_net_path)
G_item=read_graph(item_path)
G_ui= np.loadtxt(train_path, dtype=np.int32)
directed=False
user_list=list(G_user.nodes())
item_list=list(G_item.nodes())
user_size=len(user_list)
item_size = len(item_list)
edges = G_ui
num_edges = len(edges)
embed_len=128
topK=10
fliter_theta=16
aggre_theta=64
batch_size = 400
samples = 3
margin=20
iter_without_att = 5
iter_with_att = 25
max_iter = iter_without_att + iter_with_att
batch_num = math.ceil(num_edges / batch_size)
loss = max_margin_loss
# score_model = nn_scoremodel((embed_len,), embed_len, score_act=None)
score_model = inner_prod_scoremodel((embed_len,), score_rep_norm=False)
# score_model = fm_scoremodel((embed_len,), score_rep_norm=False, score_act=None)
pretrain_model=mlmf(nb_user=user_size+1, nb_item=item_size+1, embed_dim=embed_len,
score_model=score_model, reg=l2(1e-7))
pretrain_model.contrast_model.compile(loss=loss, optimizer='adam')
pretrain_model.contrast_model.summary()
pretrain_samples = 3
pretrain_batch_sz = 200
pretrain_batch_num = math.ceil(num_edges / pretrain_batch_sz)
pretrain_iter = 3
for i in range(pretrain_iter):
shuffle(edges)
train_loss = 0
# print("Running on iteration %d/%d:"%(i, max_iter))
for s in range(pretrain_samples):
for j in range(pretrain_batch_num):
edge_batch = np.array(edges[j * pretrain_batch_sz:min(num_edges, (j + 1) * pretrain_batch_sz)])
batch_node_array, positive_batch_array, negative_batch_array = \
(edge_batch[:,0], edge_batch[:,1], np.random.randint(low=1,high=item_size,size=len(edge_batch)))
train_loss_temp = pretrain_model.contrast_model.train_on_batch(
x=[batch_node_array, positive_batch_array, negative_batch_array,], y=margin * np.ones([len(edge_batch)]))
train_loss += train_loss_temp
print("Training on sample %d and iter %d" % (s + 1, i + 1))
print("Finish iteration %d/%d with loss: %f" % (i + 1, pretrain_iter, train_loss / (pretrain_batch_num * pretrain_samples)))
user_rep = pretrain_model.user_emb.get_weights()[0][1:]
item_rep = pretrain_model.item_emb.get_weights()[0][1:]
test_recommendation(user_rep, item_rep, pretrain_model.score_model, test_path, neg_test_path)
#test_recommendation(item_rep, user_rep, test_path, neg_test_path) #for cold start item
model = NetworkRS(user_size, item_size, embed_len, score_model,
topK, topK, embed_regularizer=l2(5e-7), directed=directed,
mem_filt_alpha=fliter_theta, mem_agg_alpha=aggre_theta,
user_mask=None)
model.triplet_model.compile(loss=loss, optimizer='adam')
model.triplet_model.summary()
if pretrain_iter > 0:
model.user_embed.set_weights(pretrain_model.user_emb.get_weights())
model.item_embed.set_weights(pretrain_model.item_emb.get_weights())
batchGenerator = TripletGenerator(G_user, model, G_ui, G_item)
# model.user_embed.set_weights([user_embed])
for i in range(max_iter):
edges = shuffle(edges)
train_loss = 0
# print("Running on iteration %d/%d:"%(i, max_iter))
spl = samples if i < iter_without_att else samples
for s in range(spl):
for j in range(batch_num):
edge_batch = edges[j * batch_size:min(num_edges, (j + 1) * batch_size)]
batch_node, positive_batch, negative_batch, \
first_batch_data, second_batch_data, \
positive_first_batch, \
negative_first_batch = \
batchGenerator.generate_triplet_batch(edge_batch=edge_batch, topK=topK,
attention_sampling=i >= iter_without_att)
batch_node_array = np.asarray(batch_node)
positive_batch_array = np.asarray(positive_batch)
negative_batch_array = np.asarray(negative_batch)
train_loss_temp = model.triplet_model.train_on_batch(
x=[batch_node_array, first_batch_data, second_batch_data,
positive_batch_array, positive_first_batch,
negative_batch_array, negative_first_batch],
y=margin * np.ones((len(batch_node),)))
train_loss += train_loss_temp
if (j + 1) % 100 == 0:
print("Training on batch %d/%d sample %d and iter %d on dataset %s" % (j + 1, batch_num, s + 1, i + 1, data_name))
print("Finish iteration %d/%d with loss: %f" % (i + 1, max_iter, train_loss / (batch_num * spl)))
batchGenerator.clear_node_cache()
saveMem = (i + 1) % 5 == 0 or i == max_iter - 1
item_rep= get_item_rep(model, G_item, embed_len, item_rep_path, batch_size=batch_size, save=saveMem)
# user_rep = get_cold_start_user_rep(model, embed_len, test_user_list, batch_size=batch_size)
# test_recommendation(user_rep, item_rep, test_path, neg_test_path)
user_rep = get_user_rep(model, G_user, embed_len, user_rep_path, batch_size=batch_size, save=saveMem)
test_recommendation(user_rep, item_rep, model.score_model, test_path, neg_test_path)
#test_recommendation(item_rep, user_rep, test_path, neg_test_path) # for cold start item
#
# from model.construct_RS_train import get_attention_graph_RS
# att_graph_path="./%s_att_graph.csv"%data_name
# edge=[41,2589]
# get_attention_graph_RS(model, G_user, G_item, edge, topK, att_graph_path, order=2)
# model_save_path="./%s_model.h5"%data_name
# model.triplet_model.save(model_save_path)
# print("save triplet model successfully")
Using TensorFlow backend.
WARNING:tensorflow:From /tensorflow-1.15.2/python3.7/tensorflow_core/python/ops/resource_variable_ops.py:1630: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version. Instructions for updating: If using Keras pass *_constraint arguments to layers. Model: "contrastive_model" __________________________________________________________________________________________________ Layer (type) Output Shape Param # Connected to ================================================================================================== user_input (InputLayer) (None, 1) 0 __________________________________________________________________________________________________ pos_item_input (InputLayer) (None, 1) 0 __________________________________________________________________________________________________ neg_item_input (InputLayer) (None, 1) 0 __________________________________________________________________________________________________ user_embedding (Embedding) (None, 1, 128) 242304 user_input[0][0] __________________________________________________________________________________________________ item_embedding (Embedding) (None, 1, 128) 1581952 pos_item_input[0][0] neg_item_input[0][0] __________________________________________________________________________________________________ lambda_1 (Lambda) (None, 128) 0 user_embedding[0][0] item_embedding[0][0] item_embedding[1][0] __________________________________________________________________________________________________ score_model (Model) (None, 1) 0 lambda_1[0][0] lambda_1[1][0] lambda_1[0][0] lambda_1[2][0] __________________________________________________________________________________________________ contrastive_score (Concatenate) (None, 2) 0 score_model[1][0] score_model[2][0] ================================================================================================== Total params: 1,824,256 Trainable params: 1,824,256 Non-trainable params: 0 __________________________________________________________________________________________________ WARNING:tensorflow:From /tensorflow-1.15.2/python3.7/tensorflow_core/python/ops/math_grad.py:1424: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version. Instructions for updating: Use tf.where in 2.0, which has the same broadcast rule as np.where WARNING:tensorflow:From /tensorflow-1.15.2/python3.7/keras/backend/tensorflow_backend.py:422: The name tf.global_variables is deprecated. Please use tf.compat.v1.global_variables instead. Training on sample 1 and iter 1 Training on sample 2 and iter 1 Training on sample 3 and iter 1 Finish iteration 1/3 with loss: 18.241182 the MAP at [0.10525066 0.0872151 0.07567542 0.068401 0.06283624 0.05824598 0.05463924 0.05170926 0.0496745 0.04895933] the mean recall at [0.0182748 0.03368914 0.04729032 0.06166572 0.0743528 0.08638901 0.0984089 0.10993599 0.12167688 0.13167849] the mean ndcg at [0.15023066 0.14890209 0.14392312 0.14235024 0.13911982 0.13628249 0.13438462 0.13275857 0.132171 0.13357852] Training on sample 1 and iter 2 Training on sample 2 and iter 2 Training on sample 3 and iter 2 Finish iteration 2/3 with loss: 8.765345 the MAP at [0.08685136 0.07197921 0.0628175 0.0567766 0.0524198 0.04882929 0.04610379 0.043686 0.04215434 0.04212455] the mean recall at [0.0168401 0.03020062 0.0425496 0.05502262 0.06809384 0.07972486 0.09124271 0.10185474 0.1125887 0.12428723] the mean ndcg at [0.12904449 0.12753073 0.12488705 0.12343631 0.12321759 0.12169319 0.12068633 0.1194807 0.11922531 0.12245477] Training on sample 1 and iter 3 Training on sample 2 and iter 3 Training on sample 3 and iter 3 Finish iteration 3/3 with loss: 5.985732 the MAP at [0.08126649 0.0675802 0.0598328 0.0548443 0.05030018 0.04711405 0.04437393 0.04241969 0.04094197 0.04086823] the mean recall at [0.01551918 0.02886323 0.0416366 0.05463804 0.06596641 0.07809979 0.08893671 0.10060455 0.11160765 0.12215893] the mean ndcg at [0.11808492 0.12025033 0.12024766 0.12048602 0.11854225 0.11812849 0.11669326 0.11661063 0.11676213 0.11934721] Model: "triplet_model" __________________________________________________________________________________________________ Layer (type) Output Shape Param # Connected to ================================================================================================== target_input (InputLayer) (None, 1) 0 __________________________________________________________________________________________________ target_first_input (InputLayer) (None, 10) 0 __________________________________________________________________________________________________ target_second_input (InputLayer (None, 10, 10) 0 __________________________________________________________________________________________________ positive_input (InputLayer) (None, 1) 0 __________________________________________________________________________________________________ positive_first_input (InputLaye (None, 10) 0 __________________________________________________________________________________________________ negative_input (InputLayer) (None, 1) 0 __________________________________________________________________________________________________ negative_first_input (InputLaye (None, 10) 0 __________________________________________________________________________________________________ user_model (Model) (None, 128) 291972 target_input[0][0] target_first_input[0][0] target_second_input[0][0] __________________________________________________________________________________________________ item_model (Model) (None, 128) 1606786 positive_input[0][0] positive_first_input[0][0] negative_input[0][0] negative_first_input[0][0] __________________________________________________________________________________________________ score_model (Model) (None, 1) 0 user_model[1][0] item_model[1][0] user_model[1][0] item_model[2][0] __________________________________________________________________________________________________ contrastive_score (Concatenate) (None, 2) 0 score_model[3][0] score_model[4][0] ================================================================================================== Total params: 1,898,758 Trainable params: 1,898,758 Non-trainable params: 0 __________________________________________________________________________________________________ Training on batch 100/172 sample 1 and iter 1 on dataset lastfm Training on batch 100/172 sample 2 and iter 1 on dataset lastfm Training on batch 100/172 sample 3 and iter 1 on dataset lastfm Finish iteration 1/30 with loss: 5.635064 the MAP at [0.22272647 0.18670666 0.16461287 0.15177528 0.1401979 0.12925662 0.1209762 0.11374372 0.10803256 0.10594346] the mean recall at [0.03084107 0.06147762 0.08677184 0.11261106 0.13533306 0.15546178 0.17478276 0.19213053 0.2077343 0.22380648] the mean ndcg at [0.297442 0.28639218 0.27565113 0.27016327 0.26246969 0.25434639 0.24773193 0.24141381 0.23614692 0.23696483] Training on batch 100/172 sample 1 and iter 2 on dataset lastfm Training on batch 100/172 sample 2 and iter 2 on dataset lastfm Training on batch 100/172 sample 3 and iter 2 on dataset lastfm Finish iteration 2/30 with loss: 4.760596 the MAP at [0.27255937 0.2214915 0.19454094 0.17665937 0.16201014 0.14929053 0.13920971 0.13047091 0.12350605 0.12078478] the mean recall at [0.03570137 0.06554486 0.09525043 0.1212082 0.14425527 0.16494801 0.18483454 0.2039537 0.2208235 0.23820965] the mean ndcg at [0.34373792 0.32286684 0.30855118 0.29816215 0.28769292 0.27742483 0.26905321 0.26214813 0.25619899 0.25651924] Training on batch 100/172 sample 1 and iter 3 on dataset lastfm Training on batch 100/172 sample 2 and iter 3 on dataset lastfm Training on batch 100/172 sample 3 and iter 3 on dataset lastfm Finish iteration 3/30 with loss: 4.170074 the MAP at [0.25850484 0.21837637 0.19161314 0.17420808 0.15983544 0.14886259 0.13885896 0.13045848 0.12387013 0.12106817] the mean recall at [0.0347768 0.06725711 0.09320374 0.11939323 0.14213045 0.16360747 0.18313954 0.20073852 0.21866584 0.23455916] the mean ndcg at [0.32949986 0.31400003 0.29867472 0.28971258 0.27982352 0.27166466 0.26370649 0.25613718 0.25132485 0.25118102] Training on batch 100/172 sample 1 and iter 4 on dataset lastfm Training on batch 100/172 sample 2 and iter 4 on dataset lastfm Training on batch 100/172 sample 3 and iter 4 on dataset lastfm Finish iteration 4/30 with loss: 3.683889 the MAP at [0.27147757 0.22222578 0.19223141 0.17430309 0.16066627 0.14936223 0.14034241 0.1326905 0.12613214 0.12428094] the mean recall at [0.0387941 0.06790382 0.09499372 0.1208624 0.14601645 0.16823789 0.18969912 0.21026749 0.22825787 0.24726625] the mean ndcg at [0.34616364 0.32171579 0.30627825 0.2958496 0.28811596 0.27976426 0.2729372 0.26703786 0.26152012 0.26310554] Training on batch 100/172 sample 1 and iter 5 on dataset lastfm Training on batch 100/172 sample 2 and iter 5 on dataset lastfm Training on batch 100/172 sample 3 and iter 5 on dataset lastfm Finish iteration 5/30 with loss: 3.332913 save item representation successfully save memory successfully the MAP at [0.23755497 0.200826 0.17411452 0.15809933 0.14601762 0.13604498 0.12853867 0.12246192 0.1172684 0.11549145] the mean recall at [0.03456201 0.06227569 0.08672343 0.11114177 0.13400808 0.15473667 0.17558321 0.1956089 0.21396345 0.2317678 ] the mean ndcg at [0.3042486 0.28998448 0.27695444 0.2691055 0.26218003 0.25516614 0.25017115 0.24607019 0.24236405 0.24389987] Training on batch 100/172 sample 1 and iter 6 on dataset lastfm Training on batch 100/172 sample 2 and iter 6 on dataset lastfm Training on batch 100/172 sample 3 and iter 6 on dataset lastfm Finish iteration 6/30 with loss: 3.017033 the MAP at [0.30626209 0.25481352 0.22328453 0.20141359 0.18325239 0.16898384 0.15620385 0.14655656 0.13887434 0.13569403] the mean recall at [0.04220384 0.07383557 0.10216575 0.12894536 0.15304636 0.17516488 0.19454146 0.21364018 0.23261608 0.25069024] the mean ndcg at [0.37789034 0.35147161 0.33302959 0.31987234 0.30788797 0.29728847 0.28683243 0.27880649 0.27319241 0.27346335] Training on batch 100/172 sample 1 and iter 7 on dataset lastfm Training on batch 100/172 sample 2 and iter 7 on dataset lastfm Training on batch 100/172 sample 3 and iter 7 on dataset lastfm Finish iteration 7/30 with loss: 2.761373 the MAP at [0.28737907 0.23977164 0.21232614 0.18720858 0.17027161 0.1560692 0.14477297 0.13582319 0.12833658 0.12551523] the mean recall at [0.0398102 0.07022699 0.09873408 0.12217624 0.14644166 0.16835766 0.18810265 0.2078854 0.2260577 0.24430567] the mean ndcg at [0.35974147 0.33515181 0.32088719 0.30417715 0.29399527 0.28445681 0.27549809 0.26888653 0.26327119 0.26377186] Training on batch 100/172 sample 1 and iter 8 on dataset lastfm Training on batch 100/172 sample 2 and iter 8 on dataset lastfm Training on batch 100/172 sample 3 and iter 8 on dataset lastfm Finish iteration 8/30 with loss: 2.519779 the MAP at [0.25593668 0.20735572 0.1817019 0.16258274 0.14947434 0.13703792 0.12728549 0.11946924 0.11303752 0.1107409 ] the mean recall at [0.03362044 0.06378493 0.08879778 0.11286946 0.13549632 0.15545845 0.17478091 0.19265355 0.20889323 0.22536967] the mean ndcg at [0.32451431 0.30247467 0.28776008 0.27698124 0.26857394 0.25985634 0.25287661 0.24679682 0.24161056 0.24225384] Training on batch 100/172 sample 1 and iter 9 on dataset lastfm Training on batch 100/172 sample 2 and iter 9 on dataset lastfm Training on batch 100/172 sample 3 and iter 9 on dataset lastfm Finish iteration 9/30 with loss: 2.351650 the MAP at [0.25324538 0.19760031 0.17231283 0.15344305 0.13937447 0.12881417 0.11924113 0.11143542 0.10548039 0.10282508] the mean recall at [0.03372707 0.05907113 0.08629609 0.10927638 0.12903035 0.14958907 0.16698803 0.18436671 0.20104376 0.21638212] the mean ndcg at [0.32711054 0.29632793 0.28280124 0.27154773 0.26036307 0.25311439 0.24493126 0.23887481 0.23446399 0.23445968] Training on batch 100/172 sample 1 and iter 10 on dataset lastfm Training on batch 100/172 sample 2 and iter 10 on dataset lastfm Training on batch 100/172 sample 3 and iter 10 on dataset lastfm Finish iteration 10/30 with loss: 2.213068 save item representation successfully save memory successfully the MAP at [0.26776605 0.21782437 0.18787246 0.16778486 0.15312793 0.14203968 0.13228501 0.12306069 0.11654155 0.11367439] the mean recall at [0.03593744 0.06472916 0.0920411 0.11557171 0.13756059 0.15920119 0.17836887 0.19447762 0.21185864 0.22823893] the mean ndcg at [0.34035604 0.3154945 0.2978132 0.28517721 0.27472339 0.26721048 0.25948749 0.25112593 0.24619955 0.24607255] Training on batch 100/172 sample 1 and iter 11 on dataset lastfm Training on batch 100/172 sample 2 and iter 11 on dataset lastfm Training on batch 100/172 sample 3 and iter 11 on dataset lastfm Finish iteration 11/30 with loss: 2.049977 the MAP at [0.25583113 0.20507266 0.1802549 0.16081145 0.1462962 0.13534861 0.12682628 0.11920289 0.11318884 0.11090376] the mean recall at [0.03486257 0.06155943 0.09029582 0.11311628 0.13462271 0.15513749 0.17546979 0.19428764 0.21189049 0.22895547] the mean ndcg at [0.33006603 0.3034149 0.29126745 0.27869606 0.26859612 0.26048915 0.25427608 0.24863362 0.24408893 0.24462987] Training on batch 100/172 sample 1 and iter 12 on dataset lastfm Training on batch 100/172 sample 2 and iter 12 on dataset lastfm Training on batch 100/172 sample 3 and iter 12 on dataset lastfm Finish iteration 12/30 with loss: 1.940037 the MAP at [0.25922603 0.2100511 0.18532501 0.16448848 0.14976868 0.13851862 0.12853499 0.12086081 0.1148144 0.1131881 ] the mean recall at [0.03486234 0.06197616 0.09103478 0.11338199 0.13554359 0.15644586 0.17510304 0.19402452 0.21247467 0.23097315] the mean ndcg at [0.33120488 0.30498386 0.29256551 0.27894538 0.26975689 0.26189511 0.25389984 0.24839298 0.24465479 0.24644975] Training on batch 100/172 sample 1 and iter 13 on dataset lastfm Training on batch 100/172 sample 2 and iter 13 on dataset lastfm Training on batch 100/172 sample 3 and iter 13 on dataset lastfm Finish iteration 13/30 with loss: 1.809631 the MAP at [0.25720317 0.21096955 0.18442228 0.16627673 0.15169154 0.1393995 0.12997507 0.12219363 0.11646076 0.1143801 ] the mean recall at [0.033904 0.0620167 0.09062036 0.11501597 0.13782259 0.1579188 0.17784078 0.19714789 0.21513915 0.23274233] the mean ndcg at [0.33137395 0.30835894 0.29439567 0.28380531 0.27473124 0.26517046 0.2581782 0.25260677 0.24813933 0.24912551] Training on batch 100/172 sample 1 and iter 14 on dataset lastfm Training on batch 100/172 sample 2 and iter 14 on dataset lastfm Training on batch 100/172 sample 3 and iter 14 on dataset lastfm Finish iteration 14/30 with loss: 1.701791 the MAP at [0.25397537 0.20204873 0.17975512 0.16304339 0.14795512 0.13680931 0.12693788 0.11940866 0.113457 0.1111581 ] the mean recall at [0.0334876 0.05986583 0.08949628 0.11357236 0.13411029 0.15488265 0.17356544 0.19187672 0.21028328 0.22693466] the mean ndcg at [0.32315626 0.29762914 0.28869415 0.27855411 0.26740752 0.25949753 0.25174072 0.24579463 0.24194455 0.24270333] Training on batch 100/172 sample 1 and iter 15 on dataset lastfm Training on batch 100/172 sample 2 and iter 15 on dataset lastfm Training on batch 100/172 sample 3 and iter 15 on dataset lastfm Finish iteration 15/30 with loss: 1.630384 save item representation successfully save memory successfully the MAP at [0.2182058 0.18113509 0.16019778 0.14506571 0.13280463 0.12137789 0.11388569 0.10751192 0.10217517 0.10111828] the mean recall at [0.02967468 0.05503142 0.08118111 0.10378638 0.12433406 0.1417488 0.16105635 0.17909831 0.19588391 0.21382027] the mean ndcg at [0.2855451 0.26991778 0.25986712 0.25209454 0.24425917 0.23550627 0.23049601 0.22604128 0.22252292 0.22492766] Training on batch 100/172 sample 1 and iter 16 on dataset lastfm Training on batch 100/172 sample 2 and iter 16 on dataset lastfm Training on batch 100/172 sample 3 and iter 16 on dataset lastfm Finish iteration 16/30 with loss: 1.566339 the MAP at [0.20591909 0.17341092 0.15532148 0.14095635 0.12978597 0.1204427 0.11257745 0.10647815 0.10179339 0.1002338 ] the mean recall at [0.02819758 0.05358144 0.07893282 0.10020005 0.12179601 0.14099291 0.15892562 0.17652275 0.19388226 0.2104588 ] the mean ndcg at [0.26798143 0.25808928 0.2496284 0.24157542 0.23629948 0.23025325 0.22456234 0.22013443 0.21733429 0.21911362] Training on batch 100/172 sample 1 and iter 17 on dataset lastfm Training on batch 100/172 sample 2 and iter 17 on dataset lastfm Training on batch 100/172 sample 3 and iter 17 on dataset lastfm Finish iteration 17/30 with loss: 1.493484 the MAP at [0.16306069 0.13660008 0.11944043 0.10751592 0.09974598 0.09202424 0.08637625 0.081895 0.07796739 0.0766401 ] the mean recall at [0.0228597 0.04449086 0.0665177 0.08430162 0.10325818 0.11937424 0.13494307 0.15120494 0.16521616 0.17949766] the mean ndcg at [0.22580028 0.21885995 0.21154837 0.20463799 0.20129841 0.1957813 0.19131689 0.18867335 0.1856005 0.18696221] Training on batch 100/172 sample 1 and iter 18 on dataset lastfm Training on batch 100/172 sample 2 and iter 18 on dataset lastfm Training on batch 100/172 sample 3 and iter 18 on dataset lastfm Finish iteration 18/30 with loss: 1.423724 the MAP at [0.13813544 0.11872304 0.10353435 0.09447171 0.0876722 0.08154195 0.07724628 0.07282223 0.06946746 0.06828121] the mean recall at [0.0201383 0.0431395 0.05994607 0.07727967 0.09362966 0.10893769 0.12517866 0.13821757 0.15203621 0.16449533] the mean ndcg at [0.19702241 0.19635344 0.18825263 0.18405342 0.18001654 0.17621656 0.17420998 0.1704177 0.16864232 0.16962136] Training on batch 100/172 sample 1 and iter 19 on dataset lastfm Training on batch 100/172 sample 2 and iter 19 on dataset lastfm Training on batch 100/172 sample 3 and iter 19 on dataset lastfm Finish iteration 19/30 with loss: 1.366675 the MAP at [0.14432718 0.11400804 0.09842052 0.08892525 0.08152314 0.07550994 0.07037349 0.06630479 0.06376948 0.0631296 ] the mean recall at [0.02436324 0.04249572 0.0595887 0.07587341 0.09087383 0.10546706 0.11948694 0.13284822 0.14776192 0.16194132] the mean ndcg at [0.20678944 0.19404984 0.18645651 0.18119955 0.17596929 0.17164038 0.16807869 0.16515024 0.16459408 0.16720269] Training on batch 100/172 sample 1 and iter 20 on dataset lastfm
HERS: Modeling Influential Contexts with Heterogeneous Relations for Sparse and Cold-Start Recommendation. Hu et. al.. 2019. arXiv. https://ojs.aaai.org//index.php/AAAI/article/view/4270