卷积神经网络识别手写数字mnist v2

2020-04-15  本文已影响0人  small瓜瓜
import tensorflow as tf
from tensorflow.keras import datasets, Sequential, layers


def preprocess(pre_x, pre_y):
    pre_x = tf.cast(pre_x, dtype=tf.float32) / 255.
    pre_y = tf.cast(pre_y, dtype=tf.int32)
    pre_y = tf.one_hot(pre_y, depth=10)
    return pre_x, pre_y


# 超参数
epochs = 5
batch_size = 52
learning_rate = 2e-2

(x_train, y_train), (x_test, y_test) = datasets.mnist.load_data()
(x_train, y_train), (x_test, y_test) = preprocess(x_train, y_train), preprocess(x_test, y_test)

network = Sequential([
    layers.Reshape((28, 28, 1)),
    layers.Conv2D(3, 4, 2, activation=tf.nn.leaky_relu),
    layers.BatchNormalization(),
    layers.Conv2D(12, 3, 1, activation=tf.nn.leaky_relu),
    layers.BatchNormalization(),
    layers.Conv2D(28, 5, 2, activation=tf.nn.leaky_relu),
    layers.BatchNormalization(),
    layers.Conv2D(10, 4, 1, activation=tf.nn.sigmoid),
    layers.Reshape((10,))
])

network.build((None, 28, 28, 1))
network.summary()

optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)

network.compile(loss=tf.keras.losses.categorical_crossentropy, optimizer=optimizer,
                metrics=['accuracy'])

network.fit(x_train, y_train, epochs=epochs, validation_data=(x_test, y_test))

network.evaluate(x_test, y_test)
# - 1s 89us/sample - loss: 0.0621 - accuracy: 0.9844
# 全部使用卷积
# Model: "sequential"
# _________________________________________________________________
# Layer (type)                 Output Shape              Param #
# =================================================================
# reshape (Reshape)            multiple                  0
# _________________________________________________________________
# conv2d (Conv2D)              multiple                  51
# _________________________________________________________________
# batch_normalization (BatchNo multiple                  12
# _________________________________________________________________
# conv2d_1 (Conv2D)            multiple                  336
# _________________________________________________________________
# batch_normalization_1 (Batch multiple                  48
# _________________________________________________________________
# conv2d_2 (Conv2D)            multiple                  8428
# _________________________________________________________________
# batch_normalization_2 (Batch multiple                  112
# _________________________________________________________________
# conv2d_3 (Conv2D)            multiple                  4490
# _________________________________________________________________
# reshape_1 (Reshape)          multiple                  0
# =================================================================
# Total params: 13,477
# Trainable params: 13,391
# Non-trainable params: 86
# _________________________________________________________________
import tensorflow as tf
from tensorflow.keras import datasets, Sequential, layers


def preprocess(pre_x, pre_y):
    pre_x = tf.cast(pre_x, dtype=tf.float32) / 255.
    pre_y = tf.cast(pre_y, dtype=tf.int32)
    pre_y = tf.one_hot(pre_y, depth=10)
    return pre_x, pre_y


# 超参数
epochs = 5
batch_size = 52
learning_rate = 2e-2

(x_train, y_train), (x_test, y_test) = datasets.mnist.load_data()
(x_train, y_train), (x_test, y_test) = preprocess(x_train, y_train), preprocess(x_test, y_test)

input = layers.Input((28, 28))
x = layers.Reshape((28, 28, 1))(input)
x = layers.Conv2D(3, 4, 2, activation=tf.nn.leaky_relu)(x)
x = layers.BatchNormalization()(x)
x = layers.Conv2D(12, 3, 1, activation=tf.nn.leaky_relu)(x)
x = layers.BatchNormalization()(x)

x = layers.Conv2D(28, 5, 2, activation=tf.nn.leaky_relu)(x)
x = layers.BatchNormalization()(x)

x = layers.Conv2D(10, 4, 1, activation=tf.nn.sigmoid)(x)

x = layers.Reshape((10,))(x)

network = tf.keras.Model(input, x)

network.build((None, 28, 28, 1))
network.summary()

optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)

network.compile(loss=tf.keras.losses.categorical_crossentropy, optimizer=optimizer,
                metrics=['accuracy'])

network.fit(x_train, y_train, epochs=epochs, validation_data=(x_test, y_test))

network.evaluate(x_test, y_test)
# - 1s 89us/sample - loss: 0.0621 - accuracy: 0.9844
# 全部使用卷积
# Model: "sequential"
# _________________________________________________________________
# Layer (type)                 Output Shape              Param #
# =================================================================
# reshape (Reshape)            multiple                  0
# _________________________________________________________________
# conv2d (Conv2D)              multiple                  51
# _________________________________________________________________
# batch_normalization (BatchNo multiple                  12
# _________________________________________________________________
# conv2d_1 (Conv2D)            multiple                  336
# _________________________________________________________________
# batch_normalization_1 (Batch multiple                  48
# _________________________________________________________________
# conv2d_2 (Conv2D)            multiple                  8428
# _________________________________________________________________
# batch_normalization_2 (Batch multiple                  112
# _________________________________________________________________
# conv2d_3 (Conv2D)            multiple                  4490
# _________________________________________________________________
# reshape_1 (Reshape)          multiple                  0
# =================================================================
# Total params: 13,477
# Trainable params: 13,391
# Non-trainable params: 86
# _________________________________________________________________

将全连接改成了卷积,网络用了两种方式构建

上一篇 下一篇

猜你喜欢

热点阅读