-- coding: utf-8 --
"""
Created on Thu Oct 25 16:47:11 2018
构建一个卷积神经网络来实现手势识别
@author: ltx
具体的实现步骤:
在向前传播过程中,添加:
1.实现卷积层
2.实现池化层
反向传播实现参数优化的功能使用tensorflow模型框架来实现
"""
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import h5py
from tensorflow.python.framework import ops
import cnn_utils
plt.rcParams['figure.figsize'] = (5.0, 4.0)
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
np.random.seed(1)
xarray=np.array([[[1, 1, 2, 2, 3, 4],
[1, 1, 2, 2, 3, 4],
[1, 1, 2, 2, 3, 4]],
[[0, 1, 2, 3, 4, 5],
[0, 1, 2, 3, 4, 5],
[0, 1, 2, 3, 4, 5]],
[[1, 1, 2, 2, 3, 4],
[1, 1, 2, 2, 3, 4],
[1, 1, 2, 2, 3, 4]]])
print("xarray="+str(xarray))
print('constant:\n'+str(np.pad(xarray, ((0, 0), (1, 1), (2, 2)), 'constant')))
占位键
def createhot(nh,nw,nc,ny):
X=tf.placeholder(tf.float32,[None,nh,nw,nc],name="X")
Y=tf.placeholder(tf.float32,[None,ny],name="Y")
return X,Y
初始化模型参数,卷积层1W1,卷积层2W2,使用Xavier
def initial_parameters():
W1=tf.get_variable("W1",[4,4,3,8],initializer=tf.contrib.layers.xavier_initializer(seed=0))
W2=tf.get_variable("W2",[2,2,8,16],initializer=tf.contrib.layers.xavier_initializer(seed=0))
parameters={"W1":W1,
"W2":W2
}
return parameters
向前传播
def forward(X,parameters):
W1=parameters["W1"]
Z1=tf.nn.conv2d(X,W1,strides=[1,1,1,1],padding="SAME")#1,2决定x,y方向步长,0,3固定值为1
A1=tf.nn.relu(Z1)
P1=tf.nn.max_pool(A1,ksize=[1,8,8,1],strides=[1,8,8,1],padding="SAME")
W2=parameters["W2"]
Z2=tf.nn.conv2d(P1,W2,strides=[1,1,1,1],padding="SAME")#1,2决定x,y方向步长,0,3固定值为1
A2=tf.nn.relu(Z2)
P2=tf.nn.max_pool(A2,ksize=[1,4,4,1],strides=[1,4,4,1],padding="SAME")
#一维化P2向量
P=tf.contrib.layers.flatten(P2)
#全连接层
Z3=tf.contrib.layers.fully_connected(P,6,activation_fn=None)#6代表输出单元数,因为0`5的手势
#sigmoid
return Z3
计算成本函数cost
def compute_cost(Z3,Y):
logits=tf.transpose(Z3)
labels=tf.transpose(Y)
cost=tf.nn.softmax_cross_entropy_with_logits(logits=Z3,labels=Y)
cost=tf.reduce_mean(cost)
return cost
反向传播,实现参数值优化,直接使用tensorflow框架
建立一个简单的卷积神经网络模型
def model(train_x,train_y,epoches=100,batch_size=64):
ops.reset_default_graph()
seed=3
costs=[]
(m,nh,nw,nc)=train_x.shape
ny=train_y.shape[1]
X,Y=createhot(nh,nw,nc,ny)
#初始化模型参数
parameters=initial_parameters()
#向前卷积传播
Z3=forward(X,parameters)
#计算成本函数cost
cost=compute_cost(Z3,Y)
#使用框架实现反向传播,优化模型参数
optimizer=tf.train.AdamOptimizer(learning_rate=0.009).minimize(cost)
#初始化所有参数
inits=tf.initialize_all_variables()
#建立session会话,运行计算图
with tf.Session() as sess:
#ops.reset_default_graph()
sess.run(inits)
for epoch in range(epoches):
seed=seed+1
epoch_cost=0
batches=cnn_utils.random_mini_batches(train_x,train_y,batch_size,seed)
batch_num=int(m/batch_size)
for batch in batches:
(batch_x,batch_y)=batch
_,batch_cost=sess.run([optimizer,cost],feed_dict={X:batch_x,Y:batch_y})
epoch_cost=epoch_cost+batch_cost/batch_num
#epoch_cost=epoch_cost/batch_num
if epoch % 1==0:
costs.append(epoch_cost)
if epoch % 5==0:
print("当前是第 " + str(epoch) + " 代,成本值为:" + str(epoch_cost))
return parameters
加载数据集
np.random.seed(1)
x_train,y_train,x_test,y_test,classes=cnn_utils.load_dataset()
不需要扁平化数据集,归一化数据集
train_x=x_train/255
test_x=x_test/255
print("train_x.shape="+str(train_x.shape))
生成独热矩阵
train_y=cnn_utils.convert_to_one_hot(y_train,6).T #c=6,0~5标签
test_y=cnn_utils.convert_to_one_hot(y_test,6).T
运行卷积神经网络模型
parameters=model(train_x=train_x,train_y=train_y)