用于处理序列(将文本理解为单词序列或字符序列)的两种基本的深度学习算法分别是循环神经网络(recurrent neural network)和一维卷积神经网络(1D convnet)。
处理文本数据
深度学习模型不会接收原始文本作为输入,它只能处理数值张量。
文本向量化(vectorize)是指将文本转换为数值张量的过程。
文本向量化:
将文本分割为单词,并将每个单词转换为一个向量。
将文本分割为字符,并将每个字符转换为一个向量。
提取单词或字符的 n-gram,并将每个 n-gram 转换为一个向量。n-gram 是多个连续单词或字符的集合(n-gram 之间可重叠)。
单词和字符的 one-hot 编码
#单词级的 one-hot 编码(简单示例)
import numpy as np
samples = ['The cat sat on the mat.', 'The dog ate my homework.']
token_index = {}
for sample in samples:
for word in sample.split():
if word not in token_index:
token_index[word] = len(token_index) + 1
max_length = 10
results = np.zeros(shape=(len(samples),
max_length,
max(token_index.values()) + 1))
for i, sample in enumerate(samples):
for j, word in list(enumerate(sample.split()))[:max_length]:
index = token_index.get(word)
results[i, j, index] = 1.#保存结果
#字符级的 one-hot 编码(简单示例)
import string
samples = ['The cat sat on the mat.', 'The dog ate my homework.']
characters = string.printable
token_index = dict(zip(range(1, len(characters) + 1), characters))
max_length = 50
results = np.zeros((len(samples),
max_length,
max(token_index.keys()) + 1))
for i, sample in enumerate(samples):
for j, character in enumerate(sample):
index = token_index.get(character)
results[i, j, index] = 1.
#用 Keras 实现单词级的 one-hot 编码
from keras.preprocessing.text import Tokenizer
samples = ['The cat sat on the mat.', 'The dog ate my homework.']
tokenizer = Tokenizer(num_words=1000) #创建一个分词器(tokenizer),设置 为只考虑前 1000 个最常见的单词
tokenizer.fit_on_texts(samples) #构建单词索引
sequences = tokenizer.texts_to_sequences(samples) #将字符串转换为整数索引组成的列表
one_hot_results = tokenizer.texts_to_matrix(samples, mode='binary')
#也可以直接得到 one-hot 二进制表示。 这个分词器也支持除 one-hot 编码外 的其他向量化模式
word_index = tokenizer.word_index #找回单词索引
print('Found %s unique tokens.' % len(word_index)) # 9
如果 散列空间的维度远大于需要散列的唯一标记的个数,散列冲突的可能性会减小。
#使用散列技巧的单词级的 one-hot 编码(简单示例)
samples = ['The cat sat on the mat.', 'The dog ate my homework.']
dimensionality = 1000
max_length = 10
results = np.zeros((len(samples), max_length, dimensionality))
for i, sample in enumerate(samples):
for j, word in list(enumerate(sample.split()))[:max_length]:
index = abs(hash(word)) % dimensionality
results[i, j, index] = 1.
使用词嵌入
将单词与向量相关联还有另一种常用的强大方法,就是使用密集的词向量(word vector),也叫词嵌入(word embedding)。
获取词嵌入的两种方法:
在完成主任务(比如文档分类或情感预测)的同时学习词嵌入。在这种情况下,一开始
是随机的词向量,然后对这些词向量进行学习,其学习方式与学习神经网络的权重相同。
在不同于待解决问题的机器学习任务上预计算好词嵌入,然后将其加载到模型中。这些 词嵌入叫作预训练词嵌入(pretrained word embedding)。
-
利用 Embedding 层学习词嵌入
每个新任务都学习一个新的嵌入空间。
Embedding 层实际上是一种字典查找:
#将一个Embedding 层实例化
from keras.layers import Embedding
embedding_layer = Embedding(1000, 64)#Embedding 层至少需要两个参数: 标记的个数(这里是 1000,即最 大单词索引 +1)和嵌入的维度(这里是 64)
#加载 IMDB 数据,准备用于 Embedding 层
from keras.datasets import imdb
import keras.preprocessing as preprocessing
max_features = 10000
maxlen = 20
(x_train, y_train), (x_test, y_test) = imdb.load_data( num_words=max_features)
x_train = preprocessing.sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = preprocessing.sequence.pad_sequences(x_test, maxlen=maxlen)
#在 IMDB 数据上使用 Embedding 层和分类器
from keras.models import Sequential
from keras.layers import Flatten, Dense, Embedding
model = Sequential()
model.add(Embedding(10000, 8, input_length=maxlen))
model.add(Flatten()) #将三维的嵌入张量展平成形状为 (samples, maxlen * 8) 的二维张量
model.add(Dense(1, activation='sigmoid')) #在上面添加分类器
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])
model.summary()
history = model.fit(x_train, y_train, epochs=10,batch_size=32, validation_split=0.2)
使用预训练的词嵌入
#处理 IMDB 原始数据的标签
#处理原始数据标签
import os
imdb_dir = '/Users/***/Desktop/learning_log/net_work/aclImdb'
train_dir = os.path.join(imdb_dir, 'train')
labels = []
texts = []
for label_type in ['neg', 'pos']:
dir_name = os.path.join(train_dir, label_type)
for fname in os.listdir(dir_name):
if fname[-4:] == '.txt':
f = open(os.path.join(dir_name, fname))
texts.append(f.read())
f.close()
if label_type == 'neg':
labels.append(0)
else:
labels.append(1)
#对原始数据的文本进行分词
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import numpy as np
maxlen = 100 #在 100 个单词后截断评论
training_samples = 200 #在 200 个样本上训练
validation_samples = 10000 #在 10 000 个样本上验证
max_words = 10000 #只考虑数据集中前 10 000 个最常见的单词
tokenizer = Tokenizer(num_words=max_words)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
data = pad_sequences(sequences, maxlen=maxlen)
labels = np.asarray(labels)
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
x_train = data[:training_samples]
y_train = labels[:training_samples]
x_val = data[training_samples: training_samples + validation_samples]
y_val = labels[training_samples: training_samples + validation_samples]
#GloVe 词嵌入 2014 年英文维基百科的预计算嵌入
#对嵌入进行预处理
#解析 GloVe 词嵌入文件
glove_dir = '/Users/fanhua/Desktop/learning_log/net_work/glove.6B'
embeddings_index = {}
f = open(os.path.join(glove_dir, 'glove.6B.100d.txt'))
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Found %s word vectors.' % len(embeddings_index))
#准备 GloVe 词嵌入矩阵
embedding_dim = 100
embedding_matrix = np.zeros((max_words, embedding_dim))
for word, i in word_index.items():
if i < max_words:
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
#定义模型
from keras.models import Sequential
from keras.layers import Embedding, Flatten, Dense
model = Sequential()
model.add(Embedding(max_words, embedding_dim, input_length=maxlen))
model.add(Flatten())
model.add(Dense(32, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.summary()
#在模型中加载 GloVe 嵌入
#将预训练的词嵌入加载到 Embedding 层中
model.layers[0].set_weights([embedding_matrix])
model.layers[0].trainable = False
#训练和评估
model.compile(optimizer='rmsprop', loss='binary_crossentropy',metrics=['acc'])
history = model.fit(x_train, y_train,epochs=10,batch_size=32, validation_data=(x_val, y_val))
model.save_weights('pre_trained_glove_model.h5')
#绘制结果
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
#在不使用预训练词嵌入的情况下,训练相同的模型
from keras.models import Sequential
from keras.layers import Embedding, Flatten, Dense
model = Sequential()
model.add(Embedding(max_words, embedding_dim, input_length=maxlen))
model.add(Flatten())
model.add(Dense(32, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.summary()
model.compile(optimizer='rmsprop', loss='binary_crossentropy',
metrics=['acc'])
history = model.fit(x_train, y_train,epochs=10,batch_size=32, validation_data=(x_val, y_val))
#对测试集数据进行分词
test_dir = os.path.join(imdb_dir, 'test')
labels = []
texts = []
for label_type in ['neg', 'pos']:
dir_name = os.path.join(test_dir, label_type)
for fname in sorted(os.listdir(dir_name)):
if fname[-4:] == '.txt':
f = open(os.path.join(dir_name, fname))
texts.append(f.read())
f.close()
if label_type == 'neg':
labels.append(0)
else:
labels.append(1)
sequences =tokenizer.texts_to_sequences(texts)
x_test = pad_sequences(sequences, maxlen=maxlen)
y_test = np.asarray(labels)
model.load_weights('pre_trained_glove_model.h5')
model.evaluate(x_test, y_test)
循环神经网络
循环神经网络(RNN,recurrent neural network)处理序列的方式是,遍历所有序列元素,并保存一个状态(state),其中包含与已查看内容相关的信息。实际上,RNN 是一类具有内部环的神经网络。在处理两个不同的独立序列(比如两条不同的 IMDB 评论)之间,RNN 状态会被重置,因此,你仍可以将一个序 列看作单个数据点,即网络的单个输入。真正改变的是,数据点不再是在单个步骤中进行处理, 相反,网络内部会对序列元素进行遍历。
#简单 RNN 的 Numpy 实现
import numpy as np
timesteps=100 #输入序列的时间步数
input_features=32 #输入特征空间维度
output_features=64 #输出特征空间维度
inputs=np.random.random((timesteps,input_features))#随机噪声
state_t=np.zeros((output_features,))#初始状态,全0向量
#随机变量权重矩阵
W=np.random.random((output_features,input_features))
U=np.random.random((output_features,output_features))
b=np.random.random((output_features,))
successive_outputs=[]
for input_t in inputs:
#由输入和当前状态(前一个输出)计算得到当前输出
output_t=np.tanh(np.dot(W,input_t)+np.dot(U,state_t)+b)
successive_outputs.append(output_t)#存储输出
state_t=output_t #更新网络状态,用于下一个时间步
#输出(timesteos,output_features)的二维向量
final_output_sequence=np.stack(successive_outputs,axis=0)
keras中的循环层
from keras import Sequential
from keras.layers import Embedding
from keras.layers.recurrent import SimpleRNN
model=Sequential()
model.add(Embedding(1000,32))
model.add(SimpleRNN(32))
model.summary()
#返回完整的序列状态
model=Sequential()
model.add(Embedding(1000,32))
model.add(SimpleRNN(32,return_sequences=True))
model.summary()
#所有中间层都返回完整输出序列
model=Sequential()
model.add(Embedding(1000,32))
model.add(SimpleRNN(32,return_sequences=True))
model.add(SimpleRNN(32,return_sequences=True))
model.add(SimpleRNN(32,return_sequences=True))
model.add(SimpleRNN(32))
model.summary()
将这个模型应用于IMDB电影评分模型
#准备IMDB数据
from keras.datasets import imdb
from keras.preprocessing import sequence
max_features=10000
maxlen=500
batch_size=32
print('loading data ……')
(input_train,y_train),(input_test,y_test)=imdb.load_data(num_words=max_features)
print(len(input_train),'train_sequences')
print(len(input_test),'test_sequences')
print('Pad sequences (samples x time)')
input_train = sequence.pad_sequences(input_train, maxlen=maxlen)
input_test = sequence.pad_sequences(input_test, maxlen=maxlen)
print('input_train shape:', input_train.shape)
print('input_test shape:', input_test.shape)
#用Embedding层和SimpleRNN训练网络
from keras.layers import Dense
model = Sequential()
model.add(Embedding(max_features, 32))
model.add(SimpleRNN(32))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['acc'])
history = model.fit(input_train,
y_train,
epochs=10,
batch_size=128,
validation_split=0.2)
#绘制结果
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
理解LSTM和GRU层
SimpleRNN的问题:梯度消失问题。
在时刻 t,理论上来说,它应该能够记住许多时间步之前见过的信息,但实际上它是不可能学 到这种长期依赖的。
LSTM层,长短期记忆,是SimpleRNN层的一种变体,它增加了一种携带信息跨越多个时间步的方法。它保存信息以便后面使用,从而防止较早期的信号在处理过程中逐渐消失。
#使用keras中的LSTM
from keras.layers import LSTM
model = Sequential()
model.add(Embedding(max_features, 32))
model.add(LSTM(32))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop', loss='binary_crossentropy',metrics=['acc'])
history = model.fit(input_train, y_train,epochs=10, batch_size=128, validation_split=0.2)
循环神经网络的高级用法
循环 dropout(recurrent dropout)。在循环层中使用 dropout来降低过拟合。
堆叠循环层(stacking recurrent layers)。这会提高网络的表示能力(代价是更高的计算负荷)。
双向循环层(bidirectional recurrent layer)。将相同的信息以不同的方式呈现给循环网络,可以提高精度并缓解遗忘问题。
温度预测问题
#观察耶拿天气数据集的数据
import os
data_dir = '/users/fanhua/Downloads/jena_climate'
fname = os.path.join(data_dir, 'jena_climate_2009_2016.csv')
f = open(fname)
data = f.read()
f.close()
lines = data.split('\n')
header = lines[0].split(',')
lines = lines[1:]
print(header)
print(len(lines))
#解析数据
import numpy as np
float_data = np.zeros((len(lines), len(header) - 1))
for i, line in enumerate(lines):
values = [float(x) for x in line.split(',')[1:]]
float_data[i, :] = values
#绘制温度时间序列
from matplotlib import pyplot as plt
temp = float_data[:, 1] # 温度(单位:摄氏度)
plt.plot(range(len(temp)), temp)
plt.show()
#绘制前 10 天的温度时间序列
plt.plot(range(1440), temp[:1440])
#准备数据
#数据标准化
mean = float_data[:200000].mean(axis=0)
float_data -= mean
std = float_data[:200000].std(axis=0)
float_data /= std
#生成时间序列样本及其目标的生成器
#python中把一边循环一边计算的机制叫做生成器(generator)。
def generator(data, lookback, delay, min_index, max_index, shuffle=False, batch_size=128, step=6):
if max_index is None:
max_index = len(data)-delay-1
i = min_index +lookback
while 1:
if shuffle: #乱序抽样
rows = np.random.randint( min_index + lookback,max_index, size=batch_size)
else:
if i + batch_size >= max_index:
i = min_index + lookback
rows = np.arange(i, min(i + batch_size, max_index))
i += len(rows)
samples = np.zeros((len(rows), lookback // step,data.shape[-1])) #shape[-1]返回列数
targets = np.zeros((len(rows),))
for j, row in enumerate(rows):
indices = range(rows[j] - lookback, rows[j], step)
samples[j] = data[indices]
targets[j] = data[rows[j] + delay][1]
yield samples, targets #yield 生成器
#准备训练生成器、验证生成器和测试生成器
lookback = 1440 step = 6
delay = 144 batch_size = 128
train_gen = generator(float_data,
lookback=lookback,
delay=delay,
min_index=0,
max_index=200000,
shuffle=True,
step=step,
batch_size=batch_size)
val_gen = generator(float_data,
lookback=lookback,
delay=delay,
min_index=200001,
max_index=300000,
step=step,
batch_size=batch_size)
test_gen = generator(float_data,
lookback=lookback,
delay=delay,
min_index=300001,
max_index=None,
step=step,
batch_size=batch_size)
val_steps = (300000 - 200001 - lookback) //batch_size#为了查看整个验证集,需要 从 val_gen 中抽取多少次
test_steps = (len(float_data) - 300001 - lookback) //batch_size#为了查看整个测试集,需要从 test_gen 中抽取多少次
评估:
#平均绝对误差
#平均绝对误差MAE
def evaluate_naive_method():
batch_maes = []
for step in range(val_steps):
samples, targets = next(val_gen)
preds = samples[:, -1, 1]
mae = np.mean(np.abs(preds - targets))
batch_maes.append(mae)
print(np.mean(batch_maes))
evaluate_naive_method()
#0.2897359729905486
#将 MAE 转换成摄氏温度误差
celsius_mae = 0.29 * std[1]
#2.5672247338393395 误差较大
一种基本的机器学习方法
#训练并评估一个密集连接模型
from keras.models import Sequential
from keras import layers
from keras.optimizers import RMSprop
model = Sequential()
model.add(layers.Flatten(input_shape=(lookback // step, float_data.shape[-1])))
model.add(layers.Dense(32, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer=RMSprop(), loss='mae')
history = model.fit_generator(train_gen,
steps_per_epoch=500,
epochs=20,
validation_data=val_gen,
validation_steps=val_steps)
绘制结果
import matplotlib.pyplot as plt
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(loss) + 1)
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
第一个循环网络基准
#训练并评估一个基于 GRU(门控循环单元gated recurrent unit) 的模型
from keras.models import Sequential
from keras import layers
from keras.optimizers import RMSprop
model = Sequential()
model.add(layers.GRU(32, input_shape=(None, float_data.shape[-1])))
model.add(layers.Dense(1))
model.compile(optimizer=RMSprop(), loss='mae')
history = model.fit_generator(train_gen,
steps_per_epoch=500,
epochs=20,
validation_data=val_gen,
validation_steps=val_steps)
使用循环dropout降低过拟合
from keras.models import Sequential
from keras import layers
from keras.optimizers import RMSprop
model = Sequential()
model.add(layers.GRU(32,dropout=0.2,recurrent_dropout=0.2,input_shape=(None, float_data.shape[-1])))
model.add(layers.Dense(1))
model.compile(optimizer=RMSprop(), loss='mae')
history = model.fit_generator(train_gen,
steps_per_epoch=500,
epochs=40,
validation_data=val_gen,
validation_steps=val_steps)
循环层堆叠
增加网络容量的通常做法是增加每层单元数或增加层数。循环层堆叠(recurrent layer stacking)是构建更加强大的循环网络的经典方法。
#训练并评估一个使用 dropout 正则化的堆叠 GRU 模型
from keras.models import Sequential
from keras import layers
from keras.optimizers import RMSprop
model = Sequential()
model.add(layers.GRU(32,dropout=0.1,
recurrent_dropout=0.5,
return_sequences=True,
input_shape=(None, float_data.shape[-1])))
model.add(layers.GRU(64, activation='relu', dropout=0.1,recurrent_dropout=0.5))
model.add(layers.Dense(1))
model.compile(optimizer=RMSprop(), loss='mae')
history = model.fit_generator(train_gen,
steps_per_epoch=500,
epochs=40,
validation_data=val_gen,
validation_steps=val_steps)
使用双向RNN
RNN 特别依赖于顺序或时间,RNN 按顺序处理输入序列的时间步,而打乱时间步或反转 时间步会完全改变 RNN 从序列中提取的表示。
#使用逆序序列训练并评估一个 LSTM
from keras.datasets import imdb
from keras.preprocessing import sequence
from keras import layers
from keras.models import Sequential
max_features = 10000
maxlen = 500
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
x_train = [x[::-1] for x in x_train] #逆序
x_test = [x[::-1] for x in x_test]
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
model = Sequential()
model.add(layers.Embedding(max_features, 128))
model.add(layers.LSTM(32))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop', loss='binary_crossentropy',metrics=['acc'])
history = model.fit(x_train, y_train, epochs=10,batch_size=128, validation_split=0.2)
#训练并评估一个双向 LSTM
model = Sequential()
model.add(layers.Embedding(max_features, 32))
model.add(layers.Bidirectional(layers.LSTM(32)))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])
history = model.fit(x_train, y_train,epochs=10, batch_size=128, validation_split=0.2)
#训练一个双向 GRU
from keras.models import Sequential
from keras import layers
from keras.optimizers import RMSprop
model = Sequential()
model.add(layers.Bidirectional(layers.GRU(32),
input_shape=(None, float_data.shape[-1])))
model.add(layers.Dense(1))
model.compile(optimizer=RMSprop(), loss='mae')
history =model.fit_generator(train_gen,
steps_per_epoch=500,
epochs=40,
validation_data=val_gen,
validation_steps=val_steps)
用卷积神经网络处理序列
使用一维卷积,从序列中提取局部一维序列段(即子序列),一维卷积层可以识别序列中的局部模式。
一维可以做池化运算:从输入中提取一维序列段(即子序列), 然后输出其最大值(最大池化)或平均值(平均池化)。与二维卷积神经网络一样,该运算也是用于降低一维输入的长度(子采样)。
实现一维卷积神经网络
Keras 中的一维卷积神经网络是 Conv1D 层,其接口类似于 Conv2D。它接收的输入是形状 为 (samples, time, features) 的三维张量,并返回类似形状的三维张量。卷积窗口是时 间轴上的一维窗口(时间轴是输入张量的第二个轴)。
IMDB 情感分类任务
#准备数据
from keras.datasets import imdb
from keras.preprocessing import sequence
max_features = 10000
max_len = 500
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=max_len)
x_test = sequence.pad_sequences(x_test, maxlen=max_len)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
#在 IMDB 数据上训练并评估一个简单的一维卷积神经网络
from keras.models import Sequential
from keras import layers
from keras.optimizers import RMSprop
model = Sequential()
model.add(layers.Embedding(max_features, 128, input_length=max_len))
model.add(layers.Conv1D(32, 7, activation='relu'))
model.add(layers.MaxPooling1D(5))
model.add(layers.Conv1D(32, 7, activation='relu'))
model.add(layers.GlobalMaxPooling1D())
model.add(layers.Dense(1))
model.summary()
model.compile(optimizer=RMSprop(lr=1e-4),
loss='binary_crossentropy',
metrics=['acc'])
history = model.fit(x_train, y_train,
epochs=10,
batch_size=128,
validation_split=0.2)
#结合 CNN 和 RNN 来处理长序列
#在耶拿数据上训练并评估一个简单的一维卷积神经网络
#效果较差
from keras.models import Sequential
from keras import layers
from keras.optimizers import RMSprop
model = Sequential()
model.add(layers.Conv1D(32, 5, activation='relu',input_shape=(None, float_data.shape[-1])))
model.add(layers.MaxPooling1D(3))
model.add(layers.Conv1D(32, 5, activation='relu'))
model.add(layers.MaxPooling1D(3))
model.add(layers.Conv1D(32, 5, activation='relu'))
model.add(layers.GlobalMaxPooling1D())
model.add(layers.Dense(1))
model.compile(optimizer=RMSprop(), loss='mae')
history = model.fit_generator(train_gen,
steps_per_epoch=500,
epochs=20,
validation_data=val_gen,
validation_steps=val_steps)
#为耶拿数据集准备更高分辨率的数据生成器
step = 3
lookback = 720
delay = 144
train_gen = generator(float_data,
lookback=lookback,
delay=delay,
min_index=0,
max_index=200000,
shuffle=True,
step=step)
val_gen = generator(float_data,
lookback=lookback,
delay=delay,
min_index=200001,
max_index=300000,
step=step)
test_gen = generator(float_data,
lookback=lookback,
delay=delay,
min_index=300001,
max_index=None,
step=step)
val_steps = (300000 - 200001 - lookback) //128 #为了查看整个验证集,需要 从 val_gen 中抽取多少次
test_steps = (len(float_data) - 300001 - lookback) //128 #为了查看整个测试集,需要从 test_gen 中抽取多少次
#结合一维卷积基和 GRU 层的模型
from keras.models import Sequential
from keras import layers
from keras.optimizers import RMSprop
model = Sequential()
model.add(layers.Conv1D(32, 5,
activation='relu',
input_shape=(None, float_data.shape[-1])))
model.add(layers.MaxPooling1D(3))
model.add(layers.Conv1D(32, 5, activation='relu'))
model.add(layers.GRU(32, dropout=0.1, recurrent_dropout=0.5))
model.add(layers.Dense(1))
model.summary()
model.compile(optimizer=RMSprop(), loss='mae')
history = model.fit_generator(train_gen,
steps_per_epoch=500,
epochs=20,
validation_data=val_gen,
validation_steps=val_steps)