TensorFlow基础
2019-10-01 本文已影响0人
Recalcitrant
TensorFlow框架
TensorFlow官网:https://tensorflow.google.cn/
TensorFlow中文社区:http://www.tensorfly.cn/
目录
一、Tensorflow基本概念
二、 TensorFlow 程序基本框架
三、MNIST数据集手写数字分类(浅层神经网络)
四、MNIST数据集手写数字分类(深层神经网络)
五、MNIST数据集手写数字分类(卷积神经网络)
一、Tensorflow基本概念
1.安装Tensorflow
- CPU版
pip install -i https://pypi.tuna.tsinghua.edu.cn/simple/ TensorFlow
- GPU加速版
pip install -i https://pypi.tuna.tsinghua.edu.cn/simple/ TensorFlow-GPU
2.TensorFlow数据类型
(1)Constant常量
node = tf.constant(常量值,tf.数据类型)
(2)Variable变量
W=tf.Variable([初始值],tf.数据类型)
(3)Placehoder占位符
node = tf.placeholder(tf.数据类型)
数据类型
3.Op计算节点
node_C = tf.运算(node_A,node_B)
示例:
node_A = tf.constant(10,tf.float32)
node_B = tf.constant(11,tf.float32)
node_C = tf.add(node_A,node_B)
sess = tf.Session()
print(sess.run(node_C))
运行结果
4.Session会话
- 1.启动计算图
sess = tf.Session()
- 2.关闭会话
sess.close()
- 3.上下文管理器
with tf.Session() as sess:
sess.run()
- 4.指定运行设备
with tf.Session() as sess:
with tf.device("/gpu:1"):
sess.run()
"/cpu:0": 机器的 CPU.
"/gpu:0": 机器的第一个 GPU(如果有的话)
"/gpu:1": 机器的第二个 GPU(如果有的话)
以此类推
二、 TensorFlow 程序基本框架
以线性方程回归为例:
1.准备数据
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(-1,1,100)
y = 2 * x + np.random.randn(100) * 0.3
plt.plot(x,y)
plt.show()
x_test = np.linspace(-1,1,10)
y_test = 2*x_test
plt.plot(x_test,y_test)
plt.show()
2.搭建模型
X = tf.placeholder(dtype=tf.float32, shape=None)
Y = tf.placeholder(dtype=tf.float32, shape=None)
# 前向传播
W = tf.Variable(tf.random_normal(shape=[1]), name='weight')
b = tf.Variable(tf.zeros(shape=[1]), name='bais')
z = tf.multiply(W, X) + b
# 后向传播
cost = tf.reduce_mean(tf.square(Y-z))
learning_rate = 0.01
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)
train_epochs = 20
display_step = 2
init = tf.global_variables_initializer()
cost = tf.reduce_mean(tf.square(Y-z)):求计算值z和真实值Y的均方误差
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)
上面这行代码, 通过梯度下降法,在底层调整权重W和偏置b,使得均方误差cost最小(也即损失最小)。
3.迭代模型
with tf.Session() as sess:
sess.run(init)
for epoch in range(train_epochs):
for (x_s, y_s) in zip(x, y):
sess.run(optimizer, feed_dict={X: x_s, Y: y_s})
if epoch % display_step==0:
loss = sess.run(cost, feed_dict={X: x_test, Y: y_test})
print('epoch: ', epoch, ' loss:', loss)
# 预测
print("x=0.2, z=", sess.run(z, feed_dict={X: 0.2}))
4.完整流程代码
以一元二次方程回归为例:
import numpy as np
import tensorflow as tf
X = np.linspace(-1, 1, 300)[:, np.newaxis].astype('float32')
noise = np.random.normal(0, 0.05, X.shape).astype('float32')
y = np.square(X) - 0.5 + noise
def addConnect(inputs, in_size, out_size, activation_function=None):
Weights = tf.Variable(tf.random_normal([in_size, out_size]))
biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)
Wx_plus_b = tf.matmul(inputs, Weights) + biases
if not activation_function:
return Wx_plus_b
else:
return activation_function(Wx_plus_b)
connect_1 = addConnect(X, 1, 10, tf.nn.relu)
predict_y = addConnect(connect_1, 10, 1)
loss = tf.reduce_mean(tf.square(y - predict_y))
optimizer = tf.train.AdamOptimizer(0.1)
train = optimizer.minimize(loss)
init = tf.global_variables_initializer()
session = tf.Session()
session.run(init)
for step in range(201):
session.run(train)
if step % 20 == 0:
print(step, 'loss:', session.run(loss))
if step == 200:
predict_value = session.run(predict_y)
ax = plt.subplot(111)
ax.scatter(X, y)
plt.ylim(-0.65, 0.65)
lines = ax.plot(X, predict_value, 'r-', lw=5)
plt.title('step: %d loss: %.4f' % (step, session.run(loss)))
plt.show()
三、MNIST数据集手写数字分类(浅层神经网络)
1.数据准备
import warnings
warnings.filterwarnings('ignore')
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
batch_size = 100
X_holder = tf.placeholder(tf.float32)
y_holder = tf.placeholder(tf.float32)
2.数据观察
- 变量mnist的方法和属性
dir(mnist)[:]
(1)查看数据集信息
(2)查看数据形状
images = mnist.train.images
type(images), images.shape
运行结果
从上面的运行结果可以看出,在变量mnist.train中总共有55000个样本,每个样本有784个特征。
原图片形状为28×28,
28×28=784
,每个图片样本展平后则有784维特征。
(3)绘制数字图形
import matplotlib.pyplot as plt
image = mnist.train.images[1].reshape(-1, 28)
plt.subplot(131)
plt.imshow(image)
plt.axis('off')
plt.subplot(132)
plt.imshow(image, cmap='gray')
plt.axis('off')
plt.subplot(133)
plt.imshow(image, cmap='gray_r')
plt.axis('off')
plt.show()
import matplotlib.pyplot as plt
import math
import numpy as np
def drawDigit(position, image, title):
# 元组解包
plt.subplot(*position)
# reshape(-1, 28):列数指定为28,行数自动计算
plt.imshow(image.reshape(-1, 28), cmap='gray_r')
plt.axis('off')
plt.title(title)
def batchDraw(batch_size):
# 特征和标签
images,labels = mnist.train.next_batch(batch_size)
# 图片数量
image_number = images.shape[0]
# 子图行数列数:图片数量开平方
row_number = math.ceil(image_number ** 0.5)
column_number = row_number
plt.figure(figsize=(row_number, column_number))
for i in range(row_number):
for j in range(column_number):
index = i * column_number + j
if index < image_number:
position = (row_number, column_number, index+1)
image = images[index]
# 标签值为独热编码,需用np.argmax()方法返回最大值索引(即数字标签)
title = 'actual:%d' % (np.argmax(labels[index]))
drawDigit(position, image, title)
batchDraw(196)
plt.show()
3.搭建模型
由之前的数据观察可知,输入层特征值为784个,则输入层神经元为784个。
Weights = tf.Variable(tf.zeros([784, 10]))
biases = tf.Variable(tf.zeros([1,10]))
predict_y = tf.nn.softmax(tf.matmul(X_holder, Weights) + biases)
loss = tf.reduce_mean(-tf.reduce_sum(y_holder * tf.log(predict_y), 1))
optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(loss)
init = tf.global_variables_initializer()
session = tf.Session()
session.run(init)
此神经网络只有输入层和输出层,没有隐藏层(单层神经网络)。
4.迭代模型
for i in range(501):
images, labels = mnist.train.next_batch(batch_size)
session.run(train, feed_dict={X_holder:images, y_holder:labels})
if i % 25 == 0:
correct_prediction = tf.equal(tf.argmax(predict_y, 1), tf.argmax(y_holder, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
accuracy_value = session.run(accuracy, feed_dict={X_holder:mnist.test.images, y_holder:mnist.test.labels})
print('step:%d accuracy:%.4f' % (i, accuracy_value))
5.完整流程代码
import warnings
warnings.filterwarnings('ignore')
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
batch_size = 100
X_holder = tf.placeholder(tf.float32)
y_holder = tf.placeholder(tf.float32)
Weights = tf.Variable(tf.zeros([784, 10]))
biases = tf.Variable(tf.zeros([1,10]))
predict_y = tf.nn.softmax(tf.matmul(X_holder, Weights) + biases)
loss = tf.reduce_mean(-tf.reduce_sum(y_holder * tf.log(predict_y), 1))
optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(loss)
session = tf.Session()
init = tf.global_variables_initializer()
session.run(init)
for i in range(500):
images, labels = mnist.train.next_batch(batch_size)
session.run(train, feed_dict={X_holder:images, y_holder:labels})
if i % 25 == 0:
correct_prediction = tf.equal(tf.argmax(predict_y, 1), tf.argmax(y_holder, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
accuracy_value = session.run(accuracy, feed_dict={X_holder:mnist.test.images, y_holder:mnist.test.labels})
print('step:%d accuracy:%.4f' % (i, accuracy_value))
6.模型测试
import math
import matplotlib.pyplot as plt
import numpy as np
def drawDigit_test(position, image, title, isTrue):
plt.subplot(*position)
plt.imshow(image.reshape(-1, 28), cmap='gray_r')
plt.axis('off')
if not isTrue:
plt.title(title, color='red')
else:
plt.title(title)
def batchDraw_test(batch_size):
images,labels = mnist.test.next_batch(batch_size)
predict_labels = session.run(predict_y, feed_dict={X_holder:images, y_holder:labels})
image_number = images.shape[0]
row_number = math.ceil(image_number ** 0.5)
column_number = row_number
plt.figure(figsize=(row_number+8, column_number+8))
for i in range(row_number):
for j in range(column_number):
index = i * column_number + j
if index < image_number:
position = (row_number, column_number, index+1)
image = images[index]
actual = np.argmax(labels[index])
predict = np.argmax(predict_labels[index])
isTrue = actual==predict
title = 'actual:%d\npredict:%d' % (actual, predict)
drawDigit_test(position, image, title, isTrue)
batchDraw_test(100)
plt.show()
从上面的运行结果可以看出,单层神经网络预测的准确率不足九成。
四、MNIST数据集手写数字分类(深层神经网络)
数据准备与数据观察同上,此处不再赘述。
1.搭建模型
def addConnect(inputs, in_size, out_size, activation_function=None):
Weights = tf.Variable(tf.random_normal([in_size, out_size], stddev=0.01))
biases = tf.Variable(tf.zeros([1, out_size]))
Wx_plus_b = tf.matmul(inputs, Weights) + biases
if activation_function is None:
return Wx_plus_b
else:
return activation_function(Wx_plus_b)
layer_1 = addConnect(X_holder, 784, 300, tf.nn.relu)
layer_2 = addConnect(layer_1, 300, 300, tf.nn.relu)
predict_y = addConnect(layer_2, 300, 10, tf.nn.softmax)
loss = tf.reduce_mean(-tf.reduce_sum(y_holder * tf.log(predict_y), 1))
optimizer = tf.train.AdagradOptimizer(0.3)
train = optimizer.minimize(loss)
init = tf.global_variables_initializer()
session = tf.Session()
session.run(init)
2.迭代模型
for i in range(1001):
images, labels = mnist.train.next_batch(batch_size)
session.run(train, feed_dict={X_holder:images, y_holder:labels})
if i % 50 == 0:
correct_prediction = tf.equal(tf.argmax(predict_y, 1), tf.argmax(y_holder, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
accuracy_value = session.run(accuracy, feed_dict={X_holder:mnist.test.images, y_holder:mnist.test.labels})
print('step:%d accuracy:%.4f' %(i, accuracy_value))
运行结果
3.完整流程代码
import warnings
warnings.filterwarnings('ignore')
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
batch_size = 100
X_holder = tf.placeholder(tf.float32)
y_holder = tf.placeholder(tf.float32)
def addConnect(inputs, in_size, out_size, activation_function=None):
Weights = tf.Variable(tf.truncated_normal([in_size, out_size], stddev=0.01))
biases = tf.Variable(tf.zeros([1, out_size]))
Wx_plus_b = tf.matmul(inputs, Weights) + biases
if activation_function is None:
return Wx_plus_b
else:
return activation_function(Wx_plus_b)
layer_1 = addConnect(X_holder, 784, 300, tf.nn.relu)
layer_2 = addConnect(layer_1, 300, 300, tf.nn.relu)
predict_y = addConnect(layer_2, 300, 10, tf.nn.softmax)
loss = tf.reduce_mean(-tf.reduce_sum(y_holder * tf.log(predict_y), 1))
optimizer = tf.train.AdagradOptimizer(0.3)
train = optimizer.minimize(loss)
session = tf.Session()
init = tf.global_variables_initializer()
session.run(init)
for i in range(1000):
images, labels = mnist.train.next_batch(batch_size)
session.run(train, feed_dict={X_holder:images, y_holder:labels})
if i % 50 == 0:
correct_prediction = tf.equal(tf.argmax(predict_y, 1), tf.argmax(y_holder, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
accuracy_value = session.run(accuracy, feed_dict={X_holder:mnist.test.images, y_holder:mnist.test.labels})
print('step:%d accuracy:%.4f' % (i, accuracy_value))
4.测试模型
import math
import matplotlib.pyplot as plt
import numpy as np
def drawDigit_test(position, image, title, isTrue):
plt.subplot(*position)
plt.imshow(image.reshape(-1, 28), cmap='gray_r')
plt.axis('off')
if not isTrue:
plt.title(title, color='red')
else:
plt.title(title)
def batchDraw_test(batch_size):
images,labels = mnist.test.next_batch(batch_size)
predict_labels = session.run(predict_y, feed_dict={X_holder:images, y_holder:labels})
image_number = images.shape[0]
row_number = math.ceil(image_number ** 0.5)
column_number = row_number
plt.figure(figsize=(row_number+8, column_number+8))
for i in range(row_number):
for j in range(column_number):
index = i * column_number + j
if index < image_number:
position = (row_number, column_number, index+1)
image = images[index]
actual = np.argmax(labels[index])
predict = np.argmax(predict_labels[index])
isTrue = actual==predict
title = 'actual:%d\npredict:%d' % (actual, predict)
drawDigit_test(position, image, title, isTrue)
batchDraw_test(100)
plt.show()
从上面的运行结果可以看出,100个数字中只错了3个,准确率为97%左右。
五、MNIST数据集手写数字分类(卷积神经网络)
数据准备与数据观察同上,此处不再赘述。
1.搭建模型
# 将784个特征变形为28×28的矩阵
X_images = tf.reshape(X_holder, [-1, 28, 28, 1])
# 卷积层1
conv1_Weights = tf.Variable(tf.truncated_normal([5, 5, 1, 32], stddev=0.1), name='conv1_Weights')
conv1_biases = tf.Variable(tf.constant(0.1, shape=[32]), name='conv1_biases')
conv1_conv2d = tf.nn.conv2d(X_images, conv1_Weights, strides=[1, 1, 1, 1], padding='SAME') + conv1_biases
conv1_activated = tf.nn.relu(conv1_conv2d)
conv1_pooled = tf.nn.max_pool(conv1_activated, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# 卷积层2
conv2_Weights = tf.Variable(tf.truncated_normal([5, 5, 32, 64], stddev=0.1), name='conv2_Weights')
conv2_biases = tf.Variable(tf.constant(0.1, shape=[64]), name='conv2_biases')
conv2_conv2d = tf.nn.conv2d(conv1_pooled, conv2_Weights, strides=[1, 1, 1, 1], padding='SAME') + conv2_biases
conv2_activated = tf.nn.relu(conv2_conv2d)
conv2_pooled = tf.nn.max_pool(conv2_activated, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# 全连接层1
connect1_flat = tf.reshape(conv2_pooled, [-1, 7 * 7 * 64])
connect1_Weights = tf.Variable(tf.truncated_normal([7 * 7 * 64, 1024], stddev=0.1), name='connect1_Weights')
connect1_biases = tf.Variable(tf.constant(0.1, shape=[1024]), name='connect1_biases')
connect1_Wx_plus_b = tf.add(tf.matmul(connect1_flat, connect1_Weights), connect1_biases)
connect1_activated = tf.nn.relu(connect1_Wx_plus_b)
# 全连接层2
connect2_Weights = tf.Variable(tf.truncated_normal([1024, 10], stddev=0.1), name='connect2_Weights')
connect2_biases = tf.Variable(tf.constant(0.1, shape=[10]), name='connect2_biases')
connect2_Wx_plus_b = tf.add(tf.matmul(connect1_activated, connect2_Weights), connect2_biases)
predict_y = tf.nn.softmax(connect2_Wx_plus_b)
# 损失函数、优化器、训练过程
loss = tf.reduce_mean(-tf.reduce_sum(y_holder * tf.log(predict_y), 1))
optimizer = tf.train.AdamOptimizer(0.0001)
train = optimizer.minimize(loss)
init = tf.global_variables_initializer()
session = tf.Session()
session.run(init)
2.迭代模型
for i in range(1001):
train_images, train_labels = mnist.train.next_batch(200)
session.run(train, feed_dict={X_holder:train_images, y_holder:train_labels})
if i % 100 == 0:
correct_prediction = tf.equal(tf.argmax(predict_y, 1), tf.argmax(y_holder, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
test_images, test_labels = mnist.test.next_batch(2000)
train_accuracy = session.run(accuracy, feed_dict={X_holder:train_images, y_holder:train_labels})
test_accuracy = session.run(accuracy, feed_dict={X_holder:test_images, y_holder:test_labels})
print('step:%d train accuracy:%.4f test accuracy:%.4f' % (i, train_accuracy, test_accuracy))
运行结果
3.完整流程代码
import warnings
warnings.filterwarnings('ignore')
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
batch_size = 100
X_holder = tf.placeholder(tf.float32)
y_holder = tf.placeholder(tf.float32)
# -1表示通道数自适应,28,28表示图像大小为28×28,1表示输入特征图数为1
X_images = tf.reshape(X_holder, [-1, 28, 28, 1])
#convolutional layer 1
# 5,5表示卷积核大小为5×5,1表示输入特征图数为1,32表示输出特征图数为32
conv1_Weights = tf.Variable(tf.truncated_normal([5, 5, 1, 32], stddev=0.1))
conv1_biases = tf.Variable(tf.constant(0.1, shape=[32]))
conv1_conv2d = tf.nn.conv2d(X_images, conv1_Weights, strides=[1, 1, 1, 1], padding='SAME') + conv1_biases
conv1_activated = tf.nn.relu(conv1_conv2d)
conv1_pooled = tf.nn.max_pool(conv1_activated, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
#convolutional layer 2
conv2_Weights = tf.Variable(tf.truncated_normal([5, 5, 32, 64], stddev=0.1))
conv2_biases = tf.Variable(tf.constant(0.1, shape=[64]))
conv2_conv2d = tf.nn.conv2d(conv1_pooled, conv2_Weights, strides=[1, 1, 1, 1], padding='SAME') + conv2_biases
conv2_activated = tf.nn.relu(conv2_conv2d)
conv2_pooled = tf.nn.max_pool(conv2_activated, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
#full connected layer 1
# -1表示通道数自适应,7*7表示单张特征图大小为7×7(原图为28×28经过2×2池化核池化2次),64表示输入特征图数为64
connect1_flat = tf.reshape(conv2_pooled, [-1, 7 * 7 * 64])
connect1_Weights = tf.Variable(tf.truncated_normal([7 * 7 * 64, 1024], stddev=0.1))
connect1_biases = tf.Variable(tf.constant(0.1, shape=[1024]))
connect1_Wx_plus_b = tf.add(tf.matmul(connect1_flat, connect1_Weights), connect1_biases)
connect1_activated = tf.nn.relu(connect1_Wx_plus_b)
#full connected layer 2
connect2_Weights = tf.Variable(tf.truncated_normal([1024, 10], stddev=0.1))
connect2_biases = tf.Variable(tf.constant(0.1, shape=[10]))
connect2_Wx_plus_b = tf.add(tf.matmul(connect1_activated, connect2_Weights), connect2_biases)
predict_y = tf.nn.softmax(connect2_Wx_plus_b)
#loss and train
loss = tf.reduce_mean(-tf.reduce_sum(y_holder * tf.log(predict_y), 1))
optimizer = tf.train.AdamOptimizer(0.0001)
train = optimizer.minimize(loss)
init = tf.global_variables_initializer()
session = tf.Session()
session.run(init)
for i in range(1001):
train_images, train_labels = mnist.train.next_batch(200)
session.run(train, feed_dict={X_holder:train_images, y_holder:train_labels})
if i % 100 == 0:
correct_prediction = tf.equal(tf.argmax(predict_y, 1), tf.argmax(y_holder, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
test_images, test_labels = mnist.test.next_batch(2000)
train_accuracy = session.run(accuracy, feed_dict={X_holder:train_images, y_holder:train_labels})
test_accuracy = session.run(accuracy, feed_dict={X_holder:test_images, y_holder:test_labels})
print('step:%d train accuracy:%.4f test accuracy:%.4f' % (i, train_accuracy, test_accuracy))
4.保存模型
def save_model(session, model_name):
saver = tf.train.Saver()
save_path = saver.save(session, './models/{}.ckpt'.format(model_name))
print('Save to path:', save_path)
save_model(session, "mnist_cnn_ver001")
5.加载模型
session = tf.Session()
saver = tf.train.Saver()
saver.restore(session, 'models/mnist_cnn.ckpt')
print('load model successful')
import warnings
warnings.filterwarnings('ignore')
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
tf.reset_default_graph()
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
batch_size = 100
X_holder = tf.placeholder(tf.float32)
y_holder = tf.placeholder(tf.float32)
X_images = tf.reshape(X_holder, [-1, 28, 28, 1])
#convolutional layer 1
conv1_Weights = tf.Variable(tf.truncated_normal([5, 5, 1, 32], stddev=0.1), name='conv1_Weights')
conv1_biases = tf.Variable(tf.constant(0.1, shape=[32]), name='conv1_biases')
conv1_conv2d = tf.nn.conv2d(X_images, conv1_Weights, strides=[1, 1, 1, 1], padding='SAME') + conv1_biases
conv1_activated = tf.nn.relu(conv1_conv2d)
conv1_pooled = tf.nn.max_pool(conv1_activated, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
#convolutional layer 2
conv2_Weights = tf.Variable(tf.truncated_normal([5, 5, 32, 64], stddev=0.1), name='conv2_Weights')
conv2_biases = tf.Variable(tf.constant(0.1, shape=[64]), name='conv2_biases')
conv2_conv2d = tf.nn.conv2d(conv1_pooled, conv2_Weights, strides=[1, 1, 1, 1], padding='SAME') + conv2_biases
conv2_activated = tf.nn.relu(conv2_conv2d)
conv2_pooled = tf.nn.max_pool(conv2_activated, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
#full connected layer 1
connect1_flat = tf.reshape(conv2_pooled, [-1, 7 * 7 * 64])
connect1_Weights = tf.Variable(tf.truncated_normal([7 * 7 * 64, 1024], stddev=0.1), name='connect1_Weights')
connect1_biases = tf.Variable(tf.constant(0.1, shape=[1024]), name='connect1_biases')
connect1_Wx_plus_b = tf.add(tf.matmul(connect1_flat, connect1_Weights), connect1_biases)
connect1_activated = tf.nn.relu(connect1_Wx_plus_b)
#full connected layer 2
connect2_Weights = tf.Variable(tf.truncated_normal([1024, 10], stddev=0.1), name='connect2_Weights')
connect2_biases = tf.Variable(tf.constant(0.1, shape=[10]), name='connect2_biases')
connect2_Wx_plus_b = tf.add(tf.matmul(connect1_activated, connect2_Weights), connect2_biases)
predict_y = tf.nn.softmax(connect2_Wx_plus_b)
#loss and train
loss = tf.reduce_mean(-tf.reduce_sum(y_holder * tf.log(predict_y), 1))
optimizer = tf.train.AdamOptimizer(0.0001)
train = optimizer.minimize(loss)
session = tf.Session()
saver = tf.train.Saver()
saver.restore(session, 'save_model/mnist_cnn.ckpt')
correct_prediction = tf.equal(tf.argmax(predict_y, 1), tf.argmax(y_holder, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print('load model successful')
train_images, train_labels = mnist.train.next_batch(5000)
test_images, test_labels = mnist.test.next_batch(5000)
train_accuracy = session.run(accuracy, feed_dict={X_holder:train_images, y_holder:train_labels})
test_accuracy = session.run(accuracy, feed_dict={X_holder:test_images, y_holder:test_labels})
print('train accuracy:%.4f test accuracy:%.4f' %(train_accuracy, test_accuracy))
6.模型测试
import math
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
def drawDigit_test(position, image, title, isTrue):
plt.subplot(*position)
plt.imshow(image.reshape(-1, 28), cmap='gray_r')
plt.axis('off')
if not isTrue:
plt.title(title, color='red')
else:
plt.title(title)
def batchDraw_test(batch_size):
images,labels = mnist.test.next_batch(batch_size)
predict_labels = session.run(predict_y, feed_dict={X_holder:images, y_holder:labels})
image_number = images.shape[0]
row_number = math.ceil(image_number ** 0.5)
column_number = row_number
plt.figure(figsize=(row_number+8, column_number+8))
for i in range(row_number):
for j in range(column_number):
index = i * column_number + j
if index < image_number:
position = (row_number, column_number, index+1)
image = images[index]
actual = np.argmax(labels[index])
predict = np.argmax(predict_labels[index])
isTrue = actual==predict
title = 'actual:%d\npredict:%d' %(actual,predict)
drawDigit_test(position, image, title, isTrue)
batchDraw_test(100)
plt.show()
从上面的运行结果可以看出,100个数字中只错了1个,准确率为99%左右。