-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathmain.py
More file actions
145 lines (131 loc) · 4.32 KB
/
main.py
File metadata and controls
145 lines (131 loc) · 4.32 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
"""
make dataset
model 선언
evaluate_data 생성
for epoch in range(epochs):
train loader에서 train_positive + train_negative 샘플링
모델 학습
evaluate(ndcg, hr 평가)
"""
import os
import pandas as pd
import torch
import torch.nn as nn
import argparse
import time
import random
from Data_Loader import Make_Dataset, SampleGenerator
from model import NeuralCF
from evaluate import Engine
from metrics import MetronAtK
import wandb
import warnings
warnings.filterwarnings("ignore")
def main():
wandb.init(project="Capstone-Design")
parser = argparse.ArgumentParser()
parser.add_argument('--optim',
type=str,
default='adam',
help='optimizer')
parser.add_argument('--lr',
type=float,
default=0.001,
help='learning rate')
parser.add_argument('--epochs',
type=int,
default=100,
help='learning rate')
parser.add_argument('--batch_size',
type=int,
default=1024,
help='train batch size')
parser.add_argument('--latent_dim_mf',
type=int,
default=8,
help='latent_dim_mf')
parser.add_argument('--num_layers',
type=int,
default=3,
help='num layers')
parser.add_argument('--num_neg',
type=int,
default=10,
help='negative sample')
parser.add_argument('--l2',
type=float,
default=0.0,
help='l2_regularization')
parser.add_argument('--gpu',
type=str,
default='0',
help='gpu number')
args = parser.parse_args()
wandb.config.update(args)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
data = pd.read_feather("melon_35919.ftr")
print(data)
MD = Make_Dataset(ratings = data)
user, item, rating = MD.trainset
evaluate_data = MD.evaluate_data
#NCF model
model = NeuralCF(num_users= len(data),num_items = 35919,
embedding_size = args.latent_dim_mf,
num_layers = args.num_layers)
model.cuda()
model = nn.DataParallel(model)
print(model)
optimizer = torch.optim.Adam(model.parameters(),lr=args.lr,weight_decay=args.l2)
criterion = nn.BCEWithLogitsLoss()
wandb.watch(model)
N = []
patience = 0
for epoch in range(args.epochs):
print('Epoch {} starts !'.format(epoch+1))
print('-' * 80)
t1 = time.time()
model.train()
total_loss = 0
sample = SampleGenerator(user = user, item = item,
rating = rating, ratings = data,
positive_len = MD.positive_len, num_neg = args.num_neg)
train_loader = sample.instance_a_train_loader(args.batch_size)
print("Train Loader 생성 완료")
for batch_id, batch in enumerate(train_loader):
users, items, ratings = batch[0], batch[1], batch[2]
ratings = ratings.float()
users, items, ratings = users.cuda(), items.cuda(), ratings.cuda()
optimizer.zero_grad()
output = model(users, items)
loss = criterion(output, ratings)
loss.backward()
optimizer.step()
loss = loss.item()
wandb.log({'Batch Loss': loss})
total_loss += loss
t2 = time.time()
print("train : ", t2 - t1)
engine = Engine()
hit_ratio,ndcg = engine.evaluate(model,evaluate_data, epoch_id=epoch)
wandb.log({"epoch" : epoch,
"HR" : hit_ratio,
"NDCG" : ndcg})
N.append(ndcg)
if N[-1] < max(N):
if patience > 10:
print("Patience = 10 초과")
print("ndcg = {:.4f}".format(max(N)))
break
else:
patience += 1
print("Patience = {} ndcg = {:.4f}".format(patience, max(N)))
else:
patience = 0
print("Patience = {}".format(patience))
if __name__ == '__main__':
file_name = "melon_35919.ftr"
if os.path.exists(file_name):
print("Data 존재")
main()
else:
print("데이터 없음")