尝试使用TF+CNN时间MNIST,而不是《机器学习实战》中的KNN
首先导入该本次实验要用的包
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.model_selection import ShuffleSplit
from sklearn.preprocessing import StandardScaler, LabelEncoder, OneHotEncoder
如何定义一些后面会用到的变量,意义见注释
LABELS = 10 # 10种图片
WIDTH = 28 # 图的宽高
CHANNELS = 1 # 灰度图,所以只有一个channel
VALID = 10000 # 验证集尺寸
STEPS = 3500 #
BATCH = 100 # 随机梯度下降batch size
PATCH = 5 # 卷积核大小
DEPTH = 8 #32 # 卷积核深度大小==卷积核的数量
HIDDEN = 100 #1024 #完全连接层中隐藏神经元的数量
LR = 0.001 #学习速率
然后读取和简单处理:
data = pd.read_csv('input/train.csv') # 读取csv文件为DF类型
labels = np.array(data.pop('label')) # 移除标签,返回为数组
labels = LabelEncoder().fit_transform(labels)[:, None]
labels = OneHotEncoder().fit_transform(labels).todense()
data = StandardScaler().fit_transform(np.float32(data.values)) # DF转数组
data = data.reshape(-1, WIDTH, WIDTH, CHANNELS) # Reshape成二维图像xchannel
train_data, valid_data = data[:-VALID], data[-VALID:]
train_labels, valid_labels = labels[:-VALID], labels[-VALID:]
sklearn.preprocessing.LabelEncoder():标准化标签,将标签值统一转换成range(标签值个数-1)范围内,示例如下:
>> le = preprocessing.LabelEncoder()
>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>> list(le.classes_)
['amsterdam', 'paris', 'tokyo'] # 三个类别分别为0 1 2
>> le.transform(["tokyo", "tokyo", "paris"])
array([2, 2, 1]...)
>> list(le.inverse_transform([2, 2, 1])) # 逆过程
['tokyo', 'tokyo', 'paris']
one-hot独热编码,是因为大部分算法是基于向量空间中的度量来进行计算的,为了使非偏序关系的变量取值不具有偏序性,并且到圆点是等距的。使用one-hot编码,将离散特征的取值扩展到了欧式空间,离散特征的某个取值就对应欧式空间的某个点。将离散型特征使用one-hot编码,会让特征之间的距离计算更加合理。离散特征进行one-hot编码后,编码后的特征,其实每一维度的特征都可以看做是连续的特征。就可以跟对连续型特征的归一化方法一样,对每一维特征进行归一化。比如归一化到[-1,1]或归一化到均值为0,方差为1。
然后打印下尺寸啥的:
print('train data shape = ' + str(train_data.shape) + ' = (TRAIN, WIDTH, WIDTH, CHANNELS)')
print('labels shape = ' + str(labels.shape) + ' = (TRAIN, LABELS)')
定义“形参”后面用:
tf_data = tf.placeholder(tf.float32, shape=(None, WIDTH, WIDTH, CHANNELS))
tf_labels = tf.placeholder(tf.float32, shape=(None, LABELS))
生成相关权值:
w1 = tf.Variable(tf.truncated_normal([PATCH, PATCH, CHANNELS, DEPTH], stddev=0.1))
b1 = tf.Variable(tf.zeros([DEPTH]))
w2 = tf.Variable(tf.truncated_normal([PATCH, PATCH, DEPTH, 2*DEPTH], stddev=0.1))
b2 = tf.Variable(tf.constant(1.0, shape=[2*DEPTH]))
w3 = tf.Variable(tf.truncated_normal([WIDTH // 4 * WIDTH // 4 * 2*DEPTH, HIDDEN], stddev=0.1))
b3 = tf.Variable(tf.constant(1.0, shape=[HIDDEN]))
w4 = tf.Variable(tf.truncated_normal([HIDDEN, LABELS], stddev=0.1))
b4 = tf.Variable(tf.constant(1.0, shape=[LABELS]))
tf.truncated_normal(shape, mean, stddev) :shape表示生成张量的维度,mean是均值,stddev是标准差。这个函数产生正太分布,均值和标准差自己设定。
下面定义几个卷积层
def logits(data):
# 卷积层1
x = tf.nn.conv2d(data, w1, [1, 1, 1, 1], padding='SAME')
x = tf.nn.max_pool(x, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
x = tf.nn.relu(x + b1)
#卷积层2
x = tf.nn.conv2d(x, w2, [1, 1, 1, 1], padding='SAME')
x = tf.nn.max_pool(x, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
x = tf.nn.relu(x + b2)
#全连接层
x = tf.reshape(x, (-1, WIDTH // 4 * WIDTH // 4 * 2*DEPTH))
x = tf.nn.relu(tf.matmul(x, w3) + b3)
return tf.matmul(x, w4) + b4
预测部分的代码:
tf_pred = tf.nn.softmax(logits(tf_data))
tf_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits(tf_data),
labels=tf_labels))
tf_acc = 100*tf.reduce_mean(tf.to_float(tf.equal(tf.argmax(tf_pred, 1), tf.argmax(tf_labels, 1))))
下面是几种优化器:
#tf_opt = tf.train.GradientDescentOptimizer(LR)
#tf_opt = tf.train.AdamOptimizer(LR)
tf_opt = tf.train.RMSPropOptimizer(LR)
tf_step = tf_opt.minimize(tf_loss)
初始化session:
init = tf.global_variables_initializer()
session = tf.Session()
session.run(init)
ShuffleSplit用于生成交叉验证数据集,get_n_splits返回数据集:
ss = ShuffleSplit(n_splits=STEPS, train_size=BATCH)
ss.get_n_splits(train_data, train_labels)
history = [(0, np.nan, 10)] # 初始化错误差
for step, (idx, _) in enumerate(ss.split(train_data,train_labels), start=1):
fd = {tf_data:train_data[idx], tf_labels:train_labels[idx]}
session.run(tf_step, feed_dict=fd)
if step%500 == 0:
fd = {tf_data:valid_data, tf_labels:valid_labels}
valid_loss, valid_accuracy = session.run([tf_loss, tf_acc], feed_dict=fd)
history.append((step, valid_loss, valid_accuracy))
print('Step %i \t Valid. Acc. = %f'%(step, valid_accuracy), end='\n')
steps, loss, acc = zip(*history)
这些就是画图的啦
fig = plt.figure()
plt.title('Validation Loss / Accuracy')
ax_loss = fig.add_subplot(111)
ax_acc = ax_loss.twinx()
plt.xlabel('Training Steps')
plt.xlim(0, max(steps))
ax_loss.plot(steps, loss, '-o', color='C0')
ax_loss.set_ylabel('Log Loss', color='C0');
ax_loss.tick_params('y', colors='C0')
ax_loss.set_ylim(0.01, 0.5)
ax_acc.plot(steps, acc, '-o', color='C1')
ax_acc.set_ylabel('Accuracy [%]', color='C1');
ax_acc.tick_params('y', colors='C1')
ax_acc.set_ylim(1,100)
plt.show()
最后给出验证结果:
test = pd.read_csv('input/test.csv')
test_data = StandardScaler().fit_transform(np.float32(test.values)) # Convert the dataframe to a numpy array
test_data = test_data.reshape(-1, WIDTH, WIDTH, CHANNELS) # Reshape the data into 42000 2d images
test_pred = session.run(tf_pred, feed_dict={tf_data:test_data})
test_labels = np.argmax(test_pred, axis=1)
k = 0
print("Label Prediction: %i"%test_labels[k])
fig = plt.figure(figsize=(2,2)); plt.axis('off')
plt.imshow(test_data[k,:,:,0]); plt.show()
submission = pd.DataFrame(data={'ImageId':(np.arange(test_labels.shape[0])+1), 'Label':test_labels})
submission.to_csv('submission.csv', index=False)
submission.tail()
最近有点忙。。。未完待续