35-卷积神经网络识别手写数字

2019-10-05  本文已影响0人  jxvl假装

一个卷积层包括:卷积、激活、池化三个层

案例

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data


def weight_variables(shape):
    """
    初始化权重的函数
    :param shape: 形状
    :return: 权重
    """
    w = tf.Variable(tf.random_normal(shape=shape, mean=0.0, stddev=1.0))
    return w


def bias_variables(shape):
    """
    初始化权重
    :param shape:形状
    :return: 偏置项
    """
    b = tf.Variable(tf.constant(value=0.0, shape=shape))
    return b


def model():
    """
    自定义的卷积模型
    :return:
    """
    # 准备数据的占位符 x[None, 784] y_true[None, 10]
    with tf.variable_scope("data"):
        x = tf.placeholder(tf.float32, [None, 784])
        y_true = tf.placeholder(tf.int32, [None, 10])
    # 卷积层1:卷积:5*5*1,32个,strides=1、激活:tf.nn.relu、池化
    with tf.variable_scope("conv1"):
        # 随机初始化权重,5*5,即用5*5的过滤器,输入通道1,输出通道32、偏置[32]
        w_conv1 = weight_variables([5, 5, 1, 32])
        b_conv1 = bias_variables([32])
        # 对x进行形状的改变:28*28,通道数1
        x_reshape = tf.reshape(x, [-1, 28, 28, 1])  # 改变形状的时候,不知道的不能填None,而是填-1
        # [None, 28, 28, 1]--->[None, 28, 28, 32]:因为输出通道是32。然后再对卷积的结构进行relu
        x_relu1 = tf.nn.relu(tf.nn.conv2d(input=x_reshape, filter=w_conv1, strides=[1, 1, 1, 1],
                                          padding="SAME") + b_conv1)  # stride的值表示上下左右的步长都是1
        # 池化 2*2, strides 2  [None, 28, 28, 32]--->[None, 14, 14, 32]。即28*28的图片变成了14*14的, 还是有32张
        x_pool1 = tf.nn.max_pool(value=x_relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
    # 卷积层2:5*5*32, 64个filter, strides:1, 池化
    # 5*5是窗口大小,现在是要观察32张数据,所以每个过滤器应该带32张5*5的权重去观察
    with tf.variable_scope("conv2"):
        # 随机初始化权重,权重[5, 5, 32, 64] 偏置[64]
        w_conv2 = weight_variables([5, 5, 32, 64])
        b_conv2 = bias_variables([64])
        # 卷积,激活,池化
        # [None, 14, 14, 32] ---> [None, 14, 14, 64]
        x_relu2 = tf.nn.relu(tf.nn.conv2d(x_pool1, w_conv2, strides=[1, 1, 1, 1], padding="SAME") + b_conv2)
        # 池化 2*2, strides=2, [None, 14, 14, 64]--->[None, 7, 7, 64]
        x_pool2 = tf.nn.max_pool(x_relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
    # 全连接层 [None, 7, 7, 64] ---> [None, 7*7*64]*[7*7*64, 10] + [10] = [None, 10]
    with tf.variable_scope("full_connection"):
        # 随机初始化权重和偏置
        w_fc = weight_variables([7 * 7 * 64, 10])
        b_fc = bias_variables([10])
        # 修改形状[None, 72, 72, 64] ---> [None, 7*7*64]
        x_fc_reshape = tf.reshape(x_pool2, [-1, 7 * 7 * 64])
        # 进行矩阵运算得出每个样本的10个结果
        y_predict = tf.matmul(x_fc_reshape, w_fc) + b_fc

    return x, y_true, y_predict


def conv_fc():
    # 获取真实的数据
    mnist = input_data.read_data_sets("./", one_hot=True)
    x, y_true, y_predict = model()
    #进行交叉熵计算
    with tf.variable_scope("soft_loss"):
        loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_true, logits=y_predict))
    #梯度下降求出损失
    with tf.variable_scope("optimizer"):
        train_op = tf.train.GradientDescentOptimizer(0.001).minimize(loss)    #注意:对于神经网络来说,学习率一般都是非常小的
    #计算准确率
    with tf.variable_scope("accuracy"):
        equal_list = tf.equal(tf.argmax(y_true, 1), tf.argmax(y_predict, 1))
        accruacy = tf.reduce_mean(tf.cast(equal_list, tf.float32))
    #定义一个初始化op
    init_op = tf.global_variables_initializer()
    #开启会话运行
    with tf.Session() as sess:
        sess.run(init_op)

        #循环训练数据
        for i in range(1000):
            #取出真实存在的特征值和目标值
            mnist_x, mnist_y = mnist.train.next_batch(50)
            #运行train_op训练
            sess.run(train_op, feed_dict={x:mnist_x, y_true:mnist_y})
            print("训练{}步,准确率为{}".format(i, sess.run(accruacy, feed_dict={x:mnist_x, y_true:mnist_y})))

if __name__ == "__main__":
    conv_fc()

上一篇下一篇

猜你喜欢

热点阅读