import argparse
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from ogb.linkproppred import PygLinkPropPredDataset, Evaluator
import torch_geometric.transforms as T
from utils import Logger, EarlyStopping, seed_everything
from model import GNN
# import edge feat generator
from gtrick.pyg import CommonNeighbors, ResourceAllocation, AdamicAdar, AnchorDistance
class LinkPredictor(torch.nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels, edim, num_layers,
dropout):
super(LinkPredictor, self).__init__()
self.edge_encoder = torch.nn.Linear(edim, in_channels)
self.lins = torch.nn.ModuleList()
self.lins.append(torch.nn.Linear(in_channels, hidden_channels))
for _ in range(num_layers - 2):
self.lins.append(torch.nn.Linear(hidden_channels, hidden_channels))
self.lins.append(torch.nn.Linear(hidden_channels, out_channels))
self.dropout = dropout
def reset_parameters(self):
for lin in self.lins:
lin.reset_parameters()
self.edge_encoder.reset_parameters()
def forward(self, x_i, x_j, ex):
# add edge feat
x = x_i * x_j + self.edge_encoder(ex)
for lin in self.lins[:-1]:
x = lin(x)
x = F.relu(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.lins[-1](x)
return torch.sigmoid(x)
def train(model, predictor, data, ex, split_edge, optimizer, batch_size):
model.train()
predictor.train()
pos_train_edge = split_edge['train']['edge'].to(data.x.device)
neg_train_edge = split_edge['train']['edge_neg'].to(data.x.device)
total_loss = total_examples = 0
for perm in DataLoader(range(pos_train_edge.size(0)), batch_size,
shuffle=True):
optimizer.zero_grad()
h = model(data.x, data.adj_t)
edge = pos_train_edge[perm].t()
pos_out = predictor(h[edge[0]], h[edge[1]], ex['train']['edge'][perm])
pos_loss = -torch.log(pos_out + 1e-15).mean()
# Just do some trivial random sampling.
neg_idx = torch.randint(0, neg_train_edge.size(0), (edge.size(1), ), dtype=torch.long,
device=h.device)
edge = neg_train_edge[neg_idx].t()
neg_out = predictor(h[edge[0]], h[edge[1]], ex['train']['edge_neg'][neg_idx])
neg_loss = -torch.log(1 - neg_out + 1e-15).mean()
loss = pos_loss + neg_loss
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
torch.nn.utils.clip_grad_norm_(predictor.parameters(), 1.0)
optimizer.step()
num_examples = pos_out.size(0)
total_loss += loss.item() * num_examples
total_examples += num_examples
return total_loss / total_examples
@torch.no_grad()
def test(model, predictor, data, ex, split_edge, evaluator, batch_size, eval_metric):
model.eval()
predictor.eval()
h = model(data.x, data.adj_t)
pos_train_edge = split_edge['train']['edge'].to(h.device)
pos_valid_edge = split_edge['valid']['edge'].to(h.device)
neg_valid_edge = split_edge['valid']['edge_neg'].to(h.device)
pos_test_edge = split_edge['test']['edge'].to(h.device)
neg_test_edge = split_edge['test']['edge_neg'].to(h.device)
pos_train_preds = []
for perm in DataLoader(range(pos_train_edge.size(0)), batch_size):
edge = pos_train_edge[perm].t()
pos_train_preds += [predictor(h[edge[0]], h[edge[1]], ex['train']['edge'][perm]).squeeze().cpu()]
pos_train_pred = torch.cat(pos_train_preds, dim=0)
pos_valid_preds = []
for perm in DataLoader(range(pos_valid_edge.size(0)), batch_size):
edge = pos_valid_edge[perm].t()
pos_valid_preds += [predictor(h[edge[0]], h[edge[1]], ex['valid']['edge'][perm]).squeeze().cpu()]
pos_valid_pred = torch.cat(pos_valid_preds, dim=0)
neg_valid_preds = []
for perm in DataLoader(range(neg_valid_edge.size(0)), batch_size):
edge = neg_valid_edge[perm].t()
neg_valid_preds += [predictor(h[edge[0]], h[edge[1]], ex['valid']['edge_neg'][perm]).squeeze().cpu()]
neg_valid_pred = torch.cat(neg_valid_preds, dim=0)
# h = model(g, x)
pos_test_preds = []
for perm in DataLoader(range(pos_test_edge.size(0)), batch_size):
edge = pos_test_edge[perm].t()
pos_test_preds += [predictor(h[edge[0]], h[edge[1]], ex['test']['edge'][perm]).squeeze().cpu()]
pos_test_pred = torch.cat(pos_test_preds, dim=0)
neg_test_preds = []
for perm in DataLoader(range(neg_test_edge.size(0)), batch_size):
edge = neg_test_edge[perm].t()
neg_test_preds += [predictor(h[edge[0]], h[edge[1]], ex['test']['edge_neg'][perm]).squeeze().cpu()]
neg_test_pred = torch.cat(neg_test_preds, dim=0)
train_hits = evaluator.eval({
'y_pred_pos': pos_train_pred,
'y_pred_neg': neg_valid_pred,
})[eval_metric]
valid_hits = evaluator.eval({
'y_pred_pos': pos_valid_pred,
'y_pred_neg': neg_valid_pred,
})[eval_metric]
test_hits = evaluator.eval({
'y_pred_pos': pos_test_pred,
'y_pred_neg': neg_test_pred,
})[eval_metric]
return train_hits, valid_hits, test_hits
def run_link_pred(args, model, dataset):
device = f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu'
device = torch.device(device)
model.to(device)
evaluator = Evaluator(name=args.dataset)
data = dataset[0]
edge_index = data.edge_index
# define edge feat generator
edim = 1
if args.cn:
ef = CommonNeighbors(edge_index, batch_size=1024)
elif args.ra:
ef = ResourceAllocation(edge_index, batch_size=1024)
elif args.aa:
ef = AdamicAdar(edge_index, batch_size=1024)
elif args.ad:
ef = AnchorDistance(data, 3, 500, 200)
edim = 3
data.edge_weight = data.edge_weight.view(-1).to(torch.float)
data = T.ToSparseTensor()(data)
data.edge_index = data.adj_t
split_edge = dataset.get_edge_split()
split_edge['train']['edge_neg'] = torch.randint(0, data.num_nodes, split_edge['train']['edge'].size())
# calculate edge feat
ex = {}
for key in split_edge:
if key not in ex:
ex[key] = {}
for e in split_edge[key]:
if not e.startswith('edge'):
continue
ex[key][e] = ef(edges=split_edge[key][e]).to(device)
data = data.to(device)
predictor = LinkPredictor(args.hidden_channels, args.hidden_channels, 1, edim,
args.num_layers, args.dropout).to(device)
logger = Logger(args.runs)
for run in range(args.runs):
model.reset_parameters()
optimizer = torch.optim.Adam(
list(model.parameters()) + list(predictor.parameters()),
lr=args.lr)
early_stopping = EarlyStopping(
patience=args.patience, verbose=True, mode='max')
for epoch in range(1, 1 + args.epochs):
loss = train(model, predictor, data, ex, split_edge,
optimizer, args.batch_size)
result = test(model, predictor, data, ex, split_edge,
evaluator, args.batch_size, dataset.eval_metric)
logger.add_result(run, result)
train_hits, valid_hits, test_hits = result
if epoch % args.log_steps == 0:
print(f'Run: {run + 1:02d}, '
f'Epoch: {epoch:02d}, '
f'Loss: {loss:.4f}, '
f'Train: {100 * train_hits:.2f}%, '
f'Valid: {100 * valid_hits:.2f}%, '
f'Test: {100 * test_hits:.2f}%')
if early_stopping(valid_hits, model):
break
logger.print_statistics(run)
logger.print_statistics()
parser = argparse.ArgumentParser(
description='train link property prediction')
parser.add_argument('--dataset', type=str, default='ogbl-collab',
choices=['ogbl-collab'])
parser.add_argument('--dataset_path', type=str, default='/dev/dataset',
help='path to dataset')
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--log_steps', type=int, default=1)
parser.add_argument('--model', type=str, default='gcn')
parser.add_argument('--num_layers', type=int, default=3)
parser.add_argument('--hidden_channels', type=int, default=256)
parser.add_argument('--dropout', type=float, default=0.0)
parser.add_argument('--batch_size', type=int, default=64 * 1024)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--epochs', type=int, default=500)
parser.add_argument('--runs', type=int, default=3)
parser.add_argument('--patience', type=int, default=30)
parser.add_argument('--cn', action='store_true', default=False)
parser.add_argument('--ra', action='store_true', default=False)
parser.add_argument('--aa', action='store_true', default=False)
parser.add_argument('--ad', action='store_true', default=False)
args = parser.parse_args(args=['--cn'])
print(args)
seed_everything(3042)
dataset = PygLinkPropPredDataset(name=args.dataset, root=args.dataset_path)
num_features = dataset[0].num_features
model = GNN(num_features, args.hidden_channels,
args.hidden_channels, args.num_layers,
args.dropout, args.model)
run_link_pred(args, model, dataset)