Python3入门机器学习实战大数据 爬虫Python AI Sql大数据,机器学习,人工智能

3.5 卷积神经网络进阶-Inception-mobile_ne

2018-10-07  本文已影响4人  9c0ddf06559c

4.2.5 Inception-mobile_net实战

def separable_conv_block(x,
                  output_channel_number,
                  name):
    """separable_conv block implementation"""
    """
    Args:
    - x: 输入数据
    - output_channel_number: 经过深度可分离卷积之后,再经过1*1 的卷积生成的通道数目
    - name: 每组的卷积命名
    """
    # variable_scope 在这个scope下命名不会有冲突 conv1 = 'conv1' => scope_name/conv1
    with tf.variable_scope(name):
        input_channel = x.get_shape().as_list()[-1]
        # 将x 在 第四个维度(axis+1) 上 拆分成 input_channel 份
        # channel_wise_x: [channel1, channel2, ...]
        channel_wise_x = tf.split(x, input_channel, axis = 3)
        output_channels = []
        for i in range(len(channel_wise_x)):
            output_channel = tf.layers.conv2d(channel_wise_x[i],
                                              1,
                                              (3,3),
                                              strides = (1,1),
                                              padding = 'same',
                                              activation = tf.nn.relu,
                                              name = 'conv_%d' % i)
            output_channels.append(output_channel)
        concat_layers = tf.concat(output_channels, axis = 3)
        conv1_1 = tf.layers.conv2d(concat_layers,
                                   output_channel_number,
                                   (1,1),
                                   strides = (1,1),
                                   padding = 'same',
                                   activation = tf.nn.relu,
                                   name = 'conv1_1')
        return conv1_1
        
        
x = tf.placeholder(tf.float32, [None, 3072])
y = tf.placeholder(tf.int64, [None])

# 将向量变成具有三通道的图片的格式
x_image = tf.reshape(x, [-1,3,32,32])
# 32*32
x_image = tf.transpose(x_image, perm = [0, 2, 3, 1])

# conv1:神经元图,feature map,输出图像
conv1 = tf.layers.conv2d(x_image,
                           32, # output channel number
                           (3,3), # kernal size
                           padding = 'same', # same 代表输出图像的大小没有变化,valid 代表不做padding
                           activation = tf.nn.relu,
                           name = 'conv1')
# 16*16
pooling1 = tf.layers.max_pooling2d(conv1,
                                   (2, 2), # kernal size
                                   (2, 2), # stride
                                   name = 'pool1' # name为了给这一层做一个命名,这样会让图打印出来的时候会是一个有意义的图
                                  )

separable_2a = separable_conv_block(pooling1, 
                                    32,
                                    name = 'separable_2a')

separable_2b = separable_conv_block(separable_2a, 
                                    32,
                                    name = 'separable_2b')

pooling2 = tf.layers.max_pooling2d(separable_2b,
                                   (2, 2), 
                                   (2, 2), 
                                   name = 'pool2' 
                                  )

separable_3a = separable_conv_block(pooling2, 
                                    32,
                                    name = 'separable_3a')

separable_3b = separable_conv_block(separable_3a, 
                                    32,
                                    name = 'separable_3b')

pooling3 = tf.layers.max_pooling2d(separable_3b,
                                   (2, 2), 
                                   (2, 2), 
                                   name = 'pool3')

# [None, 4*4*42] 将三通道的图形转换成矩阵
flatten = tf.layers.flatten(pooling3)
y_ = tf.layers.dense(flatten, 10)


# 交叉熵
loss = tf.losses.sparse_softmax_cross_entropy(labels=y, logits=y_)
# y_-> softmax
# y -> one_hot
# loss = ylogy_


# bool
predict = tf.argmax(y_, 1)
# [1,0,1,1,1,0,0,0]
correct_prediction = tf.equal(predict, y)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float64))

with tf.name_scope('train_op'):
    train_op = tf.train.AdamOptimizer(1e-3).minimize(loss)

这里的准确率是10000次百分之60,这是因为mobile net 的 参数减小和计算率减小影响了准确率。

上一篇下一篇

猜你喜欢

热点阅读