卷积网络与特征可视化
2019-06-06 本文已影响0人
Byte猫
人们常说神经网络的解释性不强,即神经网络模型是一个“黑盒”,它学到的经验很难用人类可以理解的方式呈现(反例是树模型,可解释性强)。这种说法不完全正确,卷积神经网络学习到的“经验”就非常适合可视化,因为很大程度上它们是视觉概念的表示。
可视化中间激活方法
可视化中间激活(层的输出通常被称为该层的激活,即激活函数的输出),是指对于给定输入,展示网络各个卷积层和池化层输出的特征图。
首先我们找一张可爱的猫咪镇楼......
然后将该图片读取,并处理成张量格式
from keras.preprocessing import image # 将图像处理为4D张量形式
import matplotlib.pyplot as plt
import numpy as np
import os
# 忽略硬件加速的警告信息
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# 获取当前目录地址
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
# 设置图像参数尺寸
target_size = (224, 224, 3)
def path_to_tensor(img_path):
'''图片格式处理'''
img = image.load_img(img_path, target_size=target_size)
img_tensor = image.img_to_array(img)
img_tensor = np.expand_dims(img_tensor, axis=0).astype('float32')/255
return img_tensor
if __name__ == '__main__':
# 读取图片并进行格式处理
img_path = os.path.join(FILE_DIR, 'cat.jpg')
img_tensor = path_to_tensor(img_path)
卷积网络使用了keras自带的VGG16,提取特征
# 模型初始化
model = vgg16.VGG16(weights='imagenet', include_top=False)
model.summary()
然后抽取中间层输出,主要有两种方式
# 采用K.function抽取中间层
layer_1 = K.function([model.layers[0].input], [model.layers[1].output])
layer_2 = K.function([model.layers[0].input], [model.get_layer('block1_conv2').output])
# 构造一个新模型提取输出
activation_model = Model(inputs=model.layers[0].input, outputs=model.layers[3].output)
feature_maps1 = layer_1([img_tensor])[0]
feature_maps2 = layer_2([img_tensor])[0]
feature_maps3 = activation_model.predict([img_tensor])[0]
#plt.imshow(feature_maps1[0,:,:,3], cmap='viridis')
#plt.imshow(feature_maps2[0,:,:,3], cmap='viridis')
plt.imshow(feature_maps3[:,:,60], cmap='viridis') # 可以改变数字以切换通道查看不同的特征图
plt.show()
接下来我们将中间层激活的所有通道可视化
#-*- coding:utf-8 -*-
from keras import backend as K
from keras.models import Model
from keras.applications import vgg16
from keras.preprocessing import image # 将图像处理为4D张量形式
import matplotlib.pyplot as plt
import numpy as np
import os
# 忽略硬件加速的警告信息
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# 获取当前目录地址
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
# 设置图像参数尺寸
target_size = (224, 224, 3)
def path_to_tensor(img_path):
'''图片格式处理'''
img = image.load_img(img_path, target_size=target_size)
img_tensor = image.img_to_array(img)
img_tensor = np.expand_dims(img_tensor, axis=0).astype('float32')/255
return img_tensor
if __name__ == '__main__':
# 读取图片并进行格式处理
img_path = os.path.join(FILE_DIR, 'cat.jpg')
img_tensor = path_to_tensor(img_path)
# 模型初始化
model = vgg16.VGG16(weights='imagenet', include_top=False)
# model.summary()
# 构造一个新模型提取输出
layer_outputs = [layer.output for layer in model.layers[1:8]]
activation_model = Model(inputs=[model.layers[0].input], outputs=layer_outputs)
activations = activation_model.predict([img_tensor])
layer_names = []
for layer in model.layers[1:8]:
layer_names.append(layer.name)
# 每行显示通道数量
images_per_row = 16
# 循环打印每一层的特征图
for layer_name, layer_activation in zip(layer_names, activations):
print(layer_name)
print(layer_activation.shape)
# 特征图中通道个数
n_features = layer_activation.shape[-1]
# 特征图形状为(1, width, height, array_len)
size = layer_activation.shape[1]
# 将激活通道平铺
n_cols = n_features // images_per_row # 需要多少行才能排满
display_grid = np.zeros((n_cols*size, images_per_row*size))
for col in range(n_cols):
for row in range(images_per_row):
# 定位特征通道
channel_image = layer_activation[:,:,:,(col*images_per_row+row)]
# 对特征进行后处理使其更美观
channel_image -= channel_image.mean()
channel_image *= 64
channel_image += 128
channel_image = np.clip(channel_image, 0, 255).astype('uint8')
display_grid[col * size : (col + 1) * size, row * size : (row + 1) * size] = channel_image
plt.title(layer_name)
plt.imshow(display_grid)
plt.show()
block1_conv1
block1_conv2
block2_conv1
block3_conv1
随着模型越来越深,提取的通道数越来越多,特征也更为抽象。