-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathlanguage_model.py
More file actions
348 lines (272 loc) · 12.4 KB
/
language_model.py
File metadata and controls
348 lines (272 loc) · 12.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
# -*- coding: utf-8 -*-
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.optim import Adam
from torch.utils.data import DataLoader
from torch.nn.utils import clip_grad_norm_
import torch.nn.functional as functional
import matplotlib.pyplot as plt
from tqdm import tqdm
from config import Config
from dataset import DualNovelDataSet
config = Config()
class LanguageModel(object):
def __init__(self, direction):
"""初始化语言模型
Args:
direction: 代表语言模型是前向还是后向,原论文的程序有这个设定但本程序中没有用到
"""
# 设定模型类型
assert direction in config.lm_model_directions
self.direction = direction
self.model_path = config.lm_model_path.format(direction)
# 设定数据集
self.train_set = DualNovelDataSet(test=False, max_len=config.max_sentence_length, classification=False)
self.test_set = DualNovelDataSet(test=True, max_len=config.max_sentence_length, classification=False)
# 设定模型
self.num_tokens = self.train_set.vocabulary.vocab_size
self.pad = self.train_set.pad
self.go = self.train_set.go
self.eos = self.train_set.eos
self.rnn_model = LanguageModelRNN(
num_tokens=self.num_tokens,
embedding_dim=config.lm_embedding_dim,
hidden_dim=config.lm_hidden_dim,
num_layers=config.lm_num_layers
)
if config.gpu:
self.rnn_model = self.rnn_model.cuda()
self.criterion = nn.CrossEntropyLoss()
# 设置训练参数和优化器
self.batch_size = config.lm_batch_size
self.epochs = config.lm_epochs
self.num_workers = config.lm_num_workers
self.learning_rate = config.lm_learning_rate
self.beta1 = config.lm_beta1
self.beta2 = config.lm_beta2
self.grad_norm_bound = config.lm_grad_norm_bound
self.parameters = self.rnn_model.parameters()
self.optimizer = Adam(self.parameters, self.learning_rate, (self.beta1, self.beta2))
def train(self, verbose=False, graph=False):
"""训练模型
Args:
verbose: 是否在训练完每一个epoch后输出提示信息
graph: 是否在训练完成后将损失函数和预测准确率变化绘制成图表
"""
loss_list = []
acc_list = []
for epoch in range(self.epochs):
epoch_loss = self.run_epoch(test=False)
loss_list += epoch_loss
if verbose:
print('\n[TRAIN] Epoch {}, mean loss {}'.format(epoch, np.mean(epoch_loss)))
train_accuracy, _ = self.run_epoch(test=True)
acc_list.append(train_accuracy)
if graph:
plt.figure()
plt.subplot(1, 2, 1)
plt.plot([x for x in range(len(loss_list))], loss_list)
plt.xlabel('step')
plt.ylabel('loss')
plt.grid()
plt.title('Training loss')
plt.subplot(1, 2, 2)
plt.plot([x for x in range(self.epochs)], acc_list)
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.grid()
plt.title('Training accuracy')
plt.show()
def set_training(self, train_mode):
"""设定训练/测试模式
Args:
train_mode: 布尔型,是否是训练模式
"""
self.rnn_model.train(train_mode)
def run_epoch(self, test=False):
"""运行一个epoch
Args:
test: 布尔型,代表是否是测试模式
Returns:
test == True:
mean_acc, mean_loss: 测试模式下,该epoch中的平均预测准确率和平均损失
test == False:
loss_list: 训练模式下,该epoch中各步的损失函数值
"""
loss_list = []
acc_list = []
if not test:
loader = DataLoader(self.train_set, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers)
self.rnn_model.train(mode=True)
else:
loader = DataLoader(self.test_set, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers)
self.rnn_model.train(mode=False)
with tqdm(loader) as pbar:
for data in pbar:
# 读取数据
inputs, targets = self.preprocess_data(data)
# 生成循环神经网络使用的初始状态
states = self.rnn_model.init_states(inputs.shape[0])
if config.gpu:
states = [state.cuda() for state in states]
states = tuple(self.detach(states))
# 前向传播和计算损失
outputs, states = self.rnn_model(inputs, states)
loss = self.criterion(outputs, targets.reshape(-1))
loss_list.append(loss.item())
if not test:
# 训练模式,使用优化器进行更新
self.rnn_model.zero_grad()
loss.backward()
clip_grad_norm_(self.rnn_model.parameters(), self.grad_norm_bound)
self.optimizer.step()
else:
# 测试模式,计算预测准确率
prediction = torch.argmax(outputs, dim=1)
label = targets.reshape(-1)
correct = (prediction == label).float()
null_mask = label.eq(self.train_set.pad).float()
valid_mask = 1.0 - null_mask
total_num = torch.sum(valid_mask).item()
masked_correct = correct * valid_mask
correct_num = torch.sum(masked_correct).item()
accuracy = correct_num / total_num
acc_list.append(accuracy)
return (np.mean(acc_list), np.mean(loss_list)) if test else loss_list
def inference(self, data_batch):
"""推断部分,输入语句(用编号表示),输出该句子出现的概率
Args:
data_batch: 形状为[batch_size, max_seq_len]的语句数据,其中存储的是单词的编号
Returns:
sentence_prob: 形状为[batch_size]的np.ndarray,表示每个语句的概率
"""
with torch.no_grad():
batch_size = data_batch.shape[0]
states = self.rnn_model.init_states(batch_size)
if config.gpu:
states = [state.cuda() for state in states]
states = tuple(state.detach() for state in states)
# outputs shape: [batch_size * max_len, num_tokens]
outputs, _ = self.rnn_model(data_batch, states)
probability = functional.softmax(outputs, dim=1)
probability = probability.view(batch_size, config.max_sentence_length, self.num_tokens)
# null shape: [batch_size, max_len]
null_mask = data_batch.eq(self.train_set.pad).int()
sentence_prob = np.zeros(batch_size)
for i in range(batch_size):
prob = 1.0
for j in range(1, config.max_sentence_length):
if null_mask[i, j] == 1:
break
word_id = data_batch[i, j]
prob *= probability[i, j - 1, word_id]
sentence_prob[i] = prob
return sentence_prob
def detach(self, states):
"""将变量从当前计算图中分离,使之不需要梯度
用于处理输入循环神经网络的初始状态
Args:
states: 一个列表或元组,每个元素代表一个初始状态
Returns:
states: 进行detach操作后的初始状态列表
"""
return [state.detach() for state in states]
def preprocess_data(self, data):
"""预处理数据,将正负样本数据进行合并,生成对应的数据和标签
Args:
data: DataLoader返回的数据,格式见本函数第一行
Returns:
inputs: 输入的语句数据,形状为[batch_size * 2, max_seq_len]的LongTensor
targets: 输入的标签数据,状态同inputs
"""
(input_0, target_0, len_0), (input_1, target_1, len_1) = data
inputs = torch.cat([input_0, input_1], dim=0)
targets = torch.cat([target_0, target_1], dim=0)
if config.gpu:
inputs = inputs.cuda()
targets = targets.cuda()
return inputs, targets
def save_model(self):
"""
保存模型
使用的文件路径在config中已经定义
"""
torch.save(self.rnn_model.state_dict(), self.model_path)
def load_model(self):
"""
读取模型
使用的文件路径在config中已经定义
"""
self.rnn_model.load_state_dict(torch.load(self.model_path, map_location=lambda storage, loc: storage))
class LanguageModelRNN(nn.Module):
def __init__(self, num_tokens, embedding_dim, hidden_dim, num_layers):
"""初始化语言模型
Args:
num_tokens: 词典中单词的数量
embedding_dim: Embedding层的维度
hidden_dim: LSTM每个隐含状态的维度
num_layers: LSTM的层数
"""
super(LanguageModelRNN, self).__init__()
self.num_tokens = num_tokens
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.embedding = nn.Embedding(self.num_tokens, self.embedding_dim)
self.rnn_lstm = nn.LSTM(embedding_dim, hidden_dim, num_layers, batch_first=True)
self.dense = nn.Linear(hidden_dim, num_tokens)
# self.dense = nn.Sequential(
# nn.Linear(hidden_dim, num_tokens),
# nn.Softmax()
# )
def forward(self, inputs, hidden):
"""前向传播过程
LSTM有两个隐含状态
Args:
inputs: 输入,形状为[batch_size, max_seq_len, num_tokens]
hidden: 含两个元素的元组,每个元素为形状为[max_seq_len, batch_size, hidden_dim]的Tensor
Returns:
output: 输出结果,形状为[batch_size * max_seq_len, num_tokens]
(hidden_h, hidden_c): LSTM部分最后的状态
"""
embedded = self.embedding(inputs)
output, (hidden_h, hidden_c) = self.rnn_lstm(embedded, hidden)
# output shape: [batch_size * max_seq_len, hidden_dim]
output = output.reshape(output.size(0) * output.size(1), output.size(2))
# output shape: [batch_size * max_seq_len, num_tokens]
output = self.dense(output)
return output, (hidden_h, hidden_c)
def init_states(self, batch_size):
"""生成可用于输入LSTM的隐含状态初值
这里使用全零状态
DataLoader在读取值数据集末尾时,可能会出现batch_size小于设定值的情况(剩余数据不足一个完整的batch)
Args:
batch_size: 一批数据中样本的数量
"""
hidden_h = torch.zeros((self.num_layers, batch_size, self.hidden_dim))
hidden_c = torch.zeros((self.num_layers, batch_size, self.hidden_dim))
if config.gpu:
hidden_h = hidden_h.cuda()
hidden_c = hidden_c.cuda()
return hidden_h, hidden_c
def train_language_model():
print('Training language model...')
model = LanguageModel(direction='forward')
model.train(verbose=True, graph=True)
model.save_model()
print('Language model saved.')
print('Loading language model...')
model.load_model()
print('Testing...')
loader = DataLoader(model.train_set, batch_size=16)
for data in loader:
inputs, targets = model.preprocess_data(data)
prob = model.inference(inputs)
sentences = model.train_set.get_sentences(inputs)
for i in range(len(sentences)):
print('[{}], Prob: {}, Sentence: {}'.format(i, prob[i], ' '.join(sentences[i])))
break
if __name__ == '__main__':
train_language_model()