tensorflow--经典网络:LeNet、AlexNet、V

2020-06-13  本文已影响0人  tu7jako

1、LeNet

LeNet卷积神经网络是LeCun于1998年提出,是卷积神经网络的开篇之作。通过共享卷积核减少了网络的参数。

LeNet.png
在统计卷积神经网络层数时,一般只统计卷积计算层和全连接计算层,其余操作可以认为是卷积计算层的附属。LeNet一共有5层网络:C1卷积层,C3卷积层,C5、F6、Output三层全连接层。
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Dropout, Flatten, Dense
import matplotlib.pyplot as plt

import os


# 加载数据集
cifar10 = tf.keras.datasets.cifar10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0

class LeNet(Model):
    def __init__(self):
        super(LeNet, self).__init__()
        self.c1 = Conv2D(filters=6, kernel_size=(5, 5), activation="sigmoid")
        self.p1 = MaxPool2D(pool_size=(2, 2), strides=2)

        self.c2 = Conv2D(filters=16, kernel_size=(5, 5), activation="sigmoid")
        self.p2 = MaxPool2D(pool_size=(2, 2), strides=2)

        self.flatten = Flatten()
        self.f1 = Dense(120, activation="sigmoid")
        self.f2 = Dense(84, activation="sigmoid")
        self.f3 = Dense(10, activation="softmax")

    def call(self, x):
        x = self.c1(x)
        x = self.p1(x)

        x = self.c2(x)
        x = self.p2(x)

        x = self.flatten(x)
        x = self.f1(x)
        x = self.f2(x)
        y = self.f3(x)
        return y

        
# model = BaseLine()
model = LeNet()

# 配置训练方法
model.compile(
    optimizer="adam",
    loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
    metrics=["sparse_categorical_accuracy"]
)

# 断点续训,读取模型
# checkpoint_save_path = "cifar10/BaseLine.ckpt"
checkpoint_save_path = "cifar10/LeNet.ckpt"
if os.path.exists(checkpoint_save_path + ".index"):
    print("*******load the model******")
    model.load_weights(checkpoint_save_path)
cp_callback = tf.keras.callbacks.ModelCheckpoint(
    filepath=checkpoint_save_path,
    save_weights_only=True,
    save_best_only=True
)

# 训练模型
history = model.fit(x_train, y_train, batch_size=32, epochs=5, validation_data=(x_test, y_test),
                    validation_freq=1, callbacks=[cp_callback])

# 打印网络结构和参数
model.summary()

# 写入参数
with open("cifar10_lenet_weights.txt", "w") as f:
    for v in model.trainable_variables:
        f.write(str(v.name) + "\n")
        f.write(str(v.shape) + "\n")
        f.write(str(v.numpy()) + "\n")


# 显示训练和预测的acc、loss曲线
acc = history.history["sparse_categorical_accuracy"]
val_acc = history.history["val_sparse_categorical_accuracy"]
loss = history.history["loss"]
val_loss = history.history["val_loss"]
plt.subplot(1, 2, 1)
plt.plot(acc, label="train acc")
plt.plot(val_acc, label="validation acc")
plt.title("train & validation acc")
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(loss, label="train loss")
plt.plot(val_loss, label="validation loss")
plt.title("train & validation loss")
plt.legend()
plt.show()

绘图结果如下:


lenetplot.png

2、AlexNet

AlexNet网络诞生于2012年,当年ImageNet竞赛的冠军,top5错误率为16.4%。AlexNet使用relu激活函数,提升了训练速度;使用dropout,缓解了过拟合。

AlexNet.png
AlexNet使用了八层网络结构:
class AlexNet(Model):
    def __init__(self):
        super(AlexNet, self).__init__()
        self.c1 = Conv2D(filters=96, kernel_size=(3, 3))
        self.b1 = BatchNormalization()
        self.a1 = Activation("relu")
        self.p1 = MaxPool2D(pool_size=(3, 3), strides=2)

        self.c2 = Conv2D(filters=256, kernel_size=(3, 3))
        self.b2 = BatchNormalization()
        self.a2 = Activation("relu")
        self.p2 = MaxPool2D(pool_size=(3, 3), strides=2)

        self.c3 = Conv2D(filters=384, kernel_size=(3, 3), padding="same",
                         activation="relu")

        self.c4 = Conv2D(filters=384, kernel_size=(3, 3), padding="same",
                         activation="relu")

        self.c5 = Conv2D(filters=256, kernel_size=(3, 3), padding="same",
                         activation="relu")
        self.p3 = MaxPool2D(pool_size=(3, 3), strides=2)

        self.flatten = Flatten()
        self.f1 = Dense(2048, activation="relu")
        self.d1 = Dropout(0.5)
        self.f2 = Dense(2048, activation="relu")
        self.d2 = Dropout(0.5)
        self.f3 = Dense(10, activation="softmax")

    def call(self, x):
        x = self.c1(x)
        x = self.b1(x)
        x = self.a1(x)
        x = self.p1(x)

        x = self.c2(x)
        x = self.b2(x)
        x = self.a2(x)
        x = self.p2(x)

        x = self.c3(x)

        x = self.c4(x)

        x = self.c5(x)
        x = self.p3(x)

        x = self.flatten(x)
        x = self.f1(x)
        x = self.d1(x)

        x = self.f2(x)
        x = self.d2(x)

        y = self.f3(x)
        return y

ps:AlexNet速度明显慢了很多。LeNet大概喝口水就跑完了,这个AlexNet跑了有50min左右(毕竟神经元个数明显增多了嘛)·····最后结果有九百多万个参数····
pps: 我把GPU相关软件装好了———原来8G的CPU跑50分钟,现在2G的GPU跑了大概5分钟-----55555----GPU万岁

3、VGGNet

VGGNet是2014年ImageNet竞赛的亚军,top5错误率减小到了7.3%。VGGNet使用小尺寸卷积核,在减少参数的同时,提高了识别准确率。VGGNet的网络结构规整,非常适合硬件加速。
VGGNet有16层网络结构:

VGGNet.png
相关代码如下(没信心跑这个模型):
class VGGNet(Model):
    def __init__(self):
        super(VGGNet, self).__init__()
        self.c1 = Conv2D(filters=64, kernel_size=(3, 3), padding='same')
        self.b1 = BatchNormalization()
        self.a1 = Activation('relu')

        self.c2 = Conv2D(filters=64, kernel_size=(3, 3), padding='same', )
        self.b2 = BatchNormalization()
        self.a2 = Activation('relu')
        self.p1 = MaxPool2D(pool_size=(2, 2), strides=2, padding='same')
        self.d1 = Dropout(0.2)

        self.c3 = Conv2D(filters=128, kernel_size=(3, 3), padding='same')
        self.b3 = BatchNormalization()
        self.a3 = Activation('relu')

        self.c4 = Conv2D(filters=128, kernel_size=(3, 3), padding='same')
        self.b4 = BatchNormalization()
        self.a4 = Activation('relu')
        self.p2 = MaxPool2D(pool_size=(2, 2), strides=2, padding='same')
        self.d2 = Dropout(0.2)

        self.c5 = Conv2D(filters=256, kernel_size=(3, 3), padding='same')
        self.b5 = BatchNormalization()
        self.a5 = Activation('relu')

        self.c6 = Conv2D(filters=256, kernel_size=(3, 3), padding='same')
        self.b6 = BatchNormalization()
        self.a6 = Activation('relu')

        self.c7 = Conv2D(filters=256, kernel_size=(3, 3), padding='same')
        self.b7 = BatchNormalization()
        self.a7 = Activation('relu')
        self.p3 = MaxPool2D(pool_size=(2, 2), strides=2, padding='same')
        self.d3 = Dropout(0.2)

        self.c8 = Conv2D(filters=512, kernel_size=(3, 3), padding='same')
        self.b8 = BatchNormalization()
        self.a8 = Activation('relu')

        self.c9 = Conv2D(filters=512, kernel_size=(3, 3), padding='same')
        self.b9 = BatchNormalization()
        self.a9 = Activation('relu')

        self.c10 = Conv2D(filters=512, kernel_size=(3, 3), padding='same')
        self.b10 = BatchNormalization()
        self.a10 = Activation('relu')
        self.p4 = MaxPool2D(pool_size=(2, 2), strides=2, padding='same')
        self.d4 = Dropout(0.2)

        self.c11 = Conv2D(filters=512, kernel_size=(3, 3), padding='same')
        self.b11 = BatchNormalization()
        self.a11 = Activation('relu')

        self.c12 = Conv2D(filters=512, kernel_size=(3, 3), padding='same')
        self.b12 = BatchNormalization()
        self.a12 = Activation('relu')
        
        self.c13 = Conv2D(filters=512, kernel_size=(3, 3), padding='same')
        self.b13 = BatchNormalization()
        self.a13 = Activation('relu')
        self.p5 = MaxPool2D(pool_size=(2, 2), strides=2, padding='same')
        self.d5 = Dropout(0.2)

        self.flatten = Flatten()
        self.f1 = Dense(512, activation='relu')
        self.d6 = Dropout(0.2)
        self.f2 = Dense(512, activation='relu')
        self.d7 = Dropout(0.2)
        self.f3 = Dense(10, activation='softmax')

    def call(self, x):
        x = self.c1(x)
        x = self.b1(x)
        x = self.a1(x)
        x = self.c2(x)
        x = self.b2(x)
        x = self.a2(x)
        x = self.p1(x)
        x = self.d1(x)

        x = self.c3(x)
        x = self.b3(x)
        x = self.a3(x)
        x = self.c4(x)
        x = self.b4(x)
        x = self.a4(x)
        x = self.p2(x)
        x = self.d2(x)

        x = self.c5(x)
        x = self.b5(x)
        x = self.a5(x)
        x = self.c6(x)
        x = self.b6(x)
        x = self.a6(x)
        x = self.c7(x)
        x = self.b7(x)
        x = self.a7(x)
        x = self.p3(x)
        x = self.d3(x)

        x = self.c8(x)
        x = self.b8(x)
        x = self.a8(x)
        x = self.c9(x)
        x = self.b9(x)
        x = self.a9(x)
        x = self.c10(x)
        x = self.b10(x)
        x = self.a10(x)
        x = self.p4(x)
        x = self.d4(x)

        x = self.c11(x)
        x = self.b11(x)
        x = self.a11(x)
        x = self.c12(x)
        x = self.b12(x)
        x = self.a12(x)
        x = self.c13(x)
        x = self.b13(x)
        x = self.a13(x)
        x = self.p5(x)
        x = self.d5(x)

        x = self.flatten(x)
        x = self.f1(x)
        x = self.d6(x)
        x = self.f2(x)
        x = self.d7(x)
        y = self.f3(x)
        return y

4、InceptionNet

InceptionNet诞生于2014年,是当年ImageNet竞赛的冠军,top5错误率为6.67%。InceptionNet引入了Inception结构块,在同一网络内使用了不同尺寸的卷积核,提升了模型的感知力;使用了批标准化,缓解了梯度消失。
InceptionNet的核心是它的基本单元Inception结构块,无论是GoogleNet,也就是Inception v1,还是InceptionNet的后续版本,比如v2、v3、v4版本,都是基于Inception结构块搭建的网络。Inception结构块在同一层网络中使用了多个尺寸的卷积核,可以提取不同尺寸的特征。通过11的卷积核,作用到输入特征图的每个像素点;通过设定少于输入特征图深度的11卷积核个数,减少了输出特征图深度,起到了降维的作用,减少了参数量和计算量。

InceptionNet.png
Inception结构块包含四个分支:

送到卷积连接器的特征尺寸相同,卷积连接器会把收到的这四路特征数据按深度方向拼接,形成Inception结构块的输出。
代码如下:

# 由于Inception结构块中的卷积均采用了CBA结构,先卷积,再BN,再采用relu激活函数,
# 所以为了代码复用,定义一个新的类ConvBNrelu
class ConvBNRelu(Model):
    def __init__(self, ch, kernelsz=3, strides=1, padding="same"):
        super(ConvBNRelu, self).__init__()
        self.model = tf.keras.models.Sequential([
            Conv2D(ch, kernel_size=kernelsz, strides=strides, padding=padding),
            BatchNormalization(),
            Activation("relu")
        ])

    def call(self, x):
        x = self.model(x)
        return x


class InceptionBlk(Model):
    def __init__(self, ch, strides=1):
        super(InceptionBlk, self).__init__()
        self.ch = ch
        self.strides = strides
        # 第一个分支
        self.c1 = ConvBNRelu(ch, kernelsz=1, strides=strides)

        # 第二个分支
        self.c2_1 = ConvBNRelu(ch, kernelsz=1, strides=strides)
        self.c2_2 = ConvBNRelu(ch, kernelsz=3, strides=1)

        # 第三个分支
        self.c3_1 = ConvBNRelu(ch, kernelsz=1, strides=strides)
        self.c3_2 = ConvBNRelu(ch, kernelsz=5, strides=1)

        # 第四个分支
        self.p4_1 = MaxPool2D(3, strides=1, padding="same")
        self.c4_2 = ConvBNRelu(ch, kernelsz=1, strides=strides)

    def call(self, x):
        """分别经历四个分支的传播"""
        x1 = self.c1(x)

        x2_1 = self.c2_1(x)
        x2_2 = self.c2_2(x2_1)

        x3_1 = self.c3_1(x)
        x3_2 = self.c3_2(x3_1)

        x4_1 = self.p4_1(x)
        x4_2 = self.c4_2(x4_1)

        # 将四个分支的输出堆叠在一起,并指定堆叠的维度是沿深度方向
        x = tf.concat([x1, x2_2, x3_2, x4_2], axis=3)
        return x

有了Inception结构块后,就可以搭建出一个精简版本的InceptionNet了。


Inception10.png

代码如下:


class Inception10(Model):
    def __init__(self, num_blocks, num_classes, init_ch=16, **kwargs):
        """
        :param num_blocks: block数量
        :param num_classes: n分类
        :param init_ch: 默认输出深度16
        :param kwargs:
        """
        super(Inception10, self).__init__(**kwargs)
        self.in_channels = init_ch
        self.out_channels = init_ch
        self.num_blocks = num_blocks
        self.init_ch = init_ch
        self.c1 = ConvBNRelu(init_ch)  # 其余参数已默认

        # 4个Inception结构块顺序相连,每两个结构块组成一个block。
        # 每个block中第一个结构块步长为2,第二个步长为1。这使得
        # 第一个结构块输出特征图尺寸减半。因此把输出特征图深度加深,尽可能
        # 保证特征抽取中信息的承载量一致
        self.blocks = tf.keras.models.Sequential()
        for block_id in range(num_blocks):
            for layer_id in range(2):
                if layer_id == 0:
                    block = InceptionBlk(self.out_channels, strides=2)
                else:
                    block = InceptionBlk(self.out_channels, strides=1)
                self.blocks.add(block)
            # block_0通道数为16,经过4个分支,输出深度为4*16=64;由于*=2了,所以
            # block_1通道数为32,同样经过4个分支,输出深度为4*32=128;
            self.out_channels *= 2
        self.p1 = tf.keras.layers.GlobalAveragePooling2D()  # 将128通道的数据送入平均池化
        self.f1 = Dense(num_classes, activation="softmax")  # 送入10分类的全连接

    def call(self, x):
        x = self.c1(x)
        x = self.blocks(x)
        x = self.p1(x)
        y = self.f1(x)
        return y


model = Inception10(num_blocks=2, num_classes=10)

5、ResNet

ResNet诞生于2015年,是当年ImageNet竞赛的冠军,Top5错误率为3.57%。ResNet提出了层间残差跳连,引入了前方信息,缓解梯度消失,使神经网络层数增加成为可能。

net_comparision.png
通过上图可见,在探索卷积实现特征提取的道路上,通过加深网络层数,可以取得越来越好的效果。但是单纯的堆积网络模型的层数会使模型退化,以至于后面的特征丢失了前面特征的原本模样。于是,ResNet的作者用了一根跳连线,将前面的特征直接接到了后面,使得输出Fx包含了堆叠卷积的非线性输出F(x)和跳过这两层堆叠卷积直接连接过来的恒等映射x,让它们对应元素相加。这一操作,有效缓解了神经网络模型堆叠导致的退化,使得神经网络可以向着更深层级发展。
res块.png

ResNet块中有两种情况:

封装的ResNet块代码如下:

class ResnetBlock(Model):
    def __init__(self, filters, strides=1, residual_path=False):
        super(ResnetBlock, self).__init__()
        self.filters = filters
        self.strides = strides
        self.residual_path = residual_path

        self.c1 = Conv2D(filters, (3, 3), strides=strides, padding="same", use_bias=False)
        self.b1 = BatchNormalization()
        self.a1 = Activation("relu")

        self.c2 = Conv2D(filters, (3, 3), strides==1, padding="same"., use_bias=False)
        self.b2 = BatchNormalization()

        if residual_path:  # 堆叠卷积层前后维度不同为True
            self.down_c1 = Conv2D(filters, (1, 1), strides=strides, padding="same", use_bias=False)
            self.down_b1 = BatchNormalization()

        self.a2 = Activation("relu")

    def call(self, inputs):
        residual = inputs

        x = self.c1(inputs)
        x = self.b1(x)
        x = self.a1(x)

        x = self.c2(x)
        y = self.b2(x)

        if self.residual_path:  # 堆叠卷积层前后维度不同为True
            residual = self.down_c1(inputs)
            residual = self.down_b1(residual)

        out = self.a2(y + residual)  # 堆叠卷积和跳连卷积相加
        return out

根据ResNet块,可以搭建一个ResNet18的神经网络


ResNet18.png

代码如下:


class ResNet18(Model):
    def __init__(self, block_list, inital_filters=64):
        """
        :param block_list: 每个block有几个卷积层
        :param inital_filters:
        """
        super(ResNet18, self).__init__()
        self.num_blocks = len(block_list)  # 共有几个block
        self.block_list = block_list
        self.out_filters = inital_filters

        self.c1 = Conv2D(self.out_filters, (3, 3), strides=1, padding="same",
                         use_bias=False, kernel_initializer="he_normal")
        self.b1 = BatchNormalization()
        self.a1 = Activation("relu")

        self.blocks = tf.keras.models.Sequential()

        # 构建ResNet网络结构: 每一个ResNet块有两层卷积,
        for block_id in range(len(block_list)):
            for layer_id in range(block_list[block_id]):
                if block_id != 0 and layer_id == 0:  # 对除第一个block以外的每个block的输入进行下采样
                    block = ResnetBlock(self.out_filters, strides=2, residual_path=True)
                else:
                    block = ResnetBlock(self.out_filters, residual_path=False)
                self.blocks.add(block)
            self.out_filters *= 2  # 下一个block的卷积核数是上一个block的2倍
        self.p1 = tf.keras.layers.GlobalAveragePooling2D()
        self.f1 = Dense(10)

    def call(self, inputs):
        x = self.c1(inputs)
        x = self.b1(x)
        x = self.a1(x)

        x = self.blocks(x)
        x = self.p1(x)
        y = self.f1(x)
        return y

model = ResNet18([2, 2, 2, 2])
上一篇 下一篇

猜你喜欢

热点阅读