import numpyas np
import torch
from torchimport nn, optim
import torch.nn.functionalas F
import random
import math
import time
def sgd(params, lr, batch_size):
# 为了和原书保持一致,这里除以了batch_size,但是应该是不用除的,因为一般用PyTorch计算loss时就默认已经
# 沿batch维求了平均了。
for paramin params:
param.data -= lr * param.grad / batch_size# 注意这里更改param时用的param.data
def load_data_jay_lyrics():
with open('../data/jaychou_lyrics.txt')as f:
corpus_chars = f.read()
corpus_chars = corpus_chars.replace('\n', ' ').replace('\r', ' ')
corpus_chars = corpus_chars[0:18]
idx_to_char =list(set(corpus_chars))
char_to_idx =dict([(char, i)for i, charin enumerate(idx_to_char)])
vocab_size =len(char_to_idx)
corpus_indices = [char_to_idx[char]for charin corpus_chars]
return corpus_indices, char_to_idx, idx_to_char, vocab_size
def data_iter_random(corpus_indices, batch_size, num_steps, device=None):
# 减1是因为输出的索引x是相应输入的索引y加1
num_examples = (len(corpus_indices) -1) // num_steps
epoch_size = num_examples // batch_size
example_indices =list(range(num_examples))
random.shuffle(example_indices)
# 返回从pos开始的长为num_steps的序列
def _data(pos):
return corpus_indices[pos: pos + num_steps]
if deviceis None:
device = torch.device('cuda' if torch.cuda.is_available()else 'cpu')
for iin range(epoch_size):
# 每次读取batch_size个随机样本
i = i * batch_size
batch_indices = example_indices[i: i + batch_size]
X = [_data(j * num_steps)for jin batch_indices]
Y = [_data(j * num_steps +1)for jin batch_indices]
yield torch.tensor(X, dtype=torch.float32, device=device), torch.tensor(Y, dtype=torch.float32, device=device)
def data_iter_consecutive(corpus_indices, batch_size, num_steps, device=None):
if deviceis None:
device = torch.device('cuda' if torch.cuda.is_available()else 'cpu')
corpus_indices = torch.tensor(corpus_indices, dtype=torch.float32, device=device)
data_len =len(corpus_indices)
batch_len = data_len // batch_size
indices = corpus_indices[0: batch_size * batch_len].view(batch_size, batch_len)
epoch_size = (batch_len -1) // num_steps
for iin range(epoch_size):
i = i * num_steps
X = indices[:, i: i + num_steps]
Y = indices[:, i +1: i + num_steps +1]
yield X, Y
# ###################################### 6.4 ######################################
def one_hot(x, n_class, dtype=torch.float32):
# X shape: (batch), output shape: (batch, n_class)
x = x.long()
res = torch.zeros(x.shape[0], n_class, dtype=dtype, device=x.device)
res.scatter_(1, x.view(-1, 1), 1)
return res
def to_onehot(X, n_class):
# X shape: (batch, seq_len), output: seq_len elements of (batch, n_class)
return [one_hot(X[:, i], n_class)for iin range(X.shape[1])]
def grad_clipping(params, theta, device):
norm = torch.tensor([0.0], device=device)
for paramin params:
norm += (param.grad.data **2).sum()
norm = norm.sqrt().item()
if norm > theta:
for paramin params:
param.grad.data *= (theta / norm)
class RNNModel(nn.Module):
def __init__(self, rnn_layer, vocab_size):
super(RNNModel, self).__init__()
self.rnn = rnn_layer
self.hidden_size = rnn_layer.hidden_size * (2 if rnn_layer.bidirectionalelse 1)
self.vocab_size = vocab_size
self.dense = nn.Linear(self.hidden_size, vocab_size)
self.state =None
def forward(self, inputs, state):# inputs: (batch, seq_len)
# 获取one-hot向量表示
X = to_onehot(inputs, self.vocab_size)# X是个list
Y, self.state =self.rnn(torch.stack(X), state)
# 全连接层会首先将Y的形状变成(num_steps * batch_size, num_hiddens),它的输出
# 形状为(num_steps * batch_size, vocab_size)
output =self.dense(Y.view(-1, Y.shape[-1]))
return output, self.state
def predict_rnn_pytorch(prefix, num_chars, model, vocab_size, device, idx_to_char,
char_to_idx):
state =None
output = [char_to_idx[prefix[0]]]# output会记录prefix加上输出
for tin range(num_chars +len(prefix) -1):
X = torch.tensor([output[-1]], device=device).view(1, 1)
if stateis not None:
if isinstance(state, tuple):# LSTM, state:(h, c)
state = (state[0].to(device), state[1].to(device))
else:
state = state.to(device)
(Y, state) = model(X, state)# 前向计算不需要传入模型参数
if t
output.append(char_to_idx[prefix[t +1]])
else:
output.append(int(Y.argmax(dim=1).item()))
return ''.join([idx_to_char[i]for iin output])
def train_and_predict_rnn_pytorch(model, num_hiddens, vocab_size, device,
corpus_indices, idx_to_char, char_to_idx,
num_epochs, num_steps, lr, clipping_theta,
batch_size, pred_period, pred_len, prefixes):
loss = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
model.to(device)
state =None
for epochin range(num_epochs):
l_sum, n, start =0.0, 0, time.time()
data_iter = data_iter_consecutive(corpus_indices, batch_size, num_steps, device)# 相邻采样
for X, Yin data_iter:
if stateis not None:
# 使用detach函数从计算图分离隐藏状态, 这是为了
# 使模型参数的梯度计算只依赖一次迭代读取的小批量序列(防止梯度计算开销太大)
if isinstance(state, tuple):# LSTM, state:(h, c)
state = (state[0].detach(), state[1].detach())
else:
state = state.detach()
(output, state) = model(X, state)# output: 形状为(num_steps * batch_size, vocab_size)
# Y的形状是(batch_size, num_steps),转置后再变成长度为
# batch * num_steps 的向量,这样跟输出的行一一对应
y = torch.transpose(Y, 0, 1).contiguous().view(-1)
l = loss(output, y.long())
optimizer.zero_grad()
l.backward()
# 梯度裁剪
grad_clipping(model.parameters(), clipping_theta, device)
optimizer.step()
l_sum += l.item() * y.shape[0]
n += y.shape[0]
try:
perplexity = math.exp(l_sum / n)
except OverflowError:
perplexity =float('inf')
if (epoch +1) % pred_period ==0:
print('epoch %d, perplexity %f, time %.2f sec' % (
epoch +1, perplexity, time.time() - start))
for prefixin prefixes:
print(' -', predict_rnn_pytorch(
prefix, pred_len, model, vocab_size, device, idx_to_char,
char_to_idx))
batch_size =3
num_steps =5
device = torch.device('cuda' if torch.cuda.is_available()else 'cpu')
(corpus_indices, char_to_idx, idx_to_char, vocab_size) = load_data_jay_lyrics()
num_hiddens=256
num_epochs, num_steps, batch_size, lr, clipping_theta =160, 5, 3, 1e2, 1e-2
pred_period, pred_len, prefixes =40, 50, ['你', '你']
lr =1e-2 # 注意调整学习率
gru_layer = nn.GRU(input_size=vocab_size, hidden_size=num_hiddens)
lstm_layer = nn.LSTM(input_size=vocab_size, hidden_size=num_hiddens)
deep_lstm_layer = nn.LSTM(input_size=vocab_size, hidden_size=num_hiddens,num_layers=2)
bi_deep_lstm =nn.GRU(input_size=vocab_size, hidden_size=num_hiddens,bidirectional=True,num_layers=9)
model = RNNModel(bi_deep_lstm, vocab_size).to(device)
train_and_predict_rnn_pytorch(model, num_hiddens, vocab_size, device,
corpus_indices, idx_to_char, char_to_idx,
num_epochs, num_steps, lr, clipping_theta,
batch_size, pred_period, pred_len, prefixes)