example

2020-07-07  本文已影响0人  skullfang
import os
import tensorflow as tf
tf.executing_eagerly()
from sklearn.utils import shuffle
import numpy as np
import matplotlib.pyplot as plt
AUTOTUNE = tf.data.experimental.AUTOTUNE
print(tf.__version__)
2.2.0

预处理

def load_sample(sample_dir,shuffleflag = True):
    '''递归读取文件。只支持一级。返回文件名、数值标签、数值对应的标签名'''
    print ('loading sample  dataset..')
    lfilenames = []
    labelsnames = []
    for (dirpath, dirnames, filenames) in os.walk(sample_dir):#递归遍历文件夹
        for filename in filenames:                            #遍历所有文件名
            #print(dirnames)
            filename_path = os.sep.join([dirpath, filename])
            lfilenames.append(filename_path)               #添加文件名
            labelsnames.append( dirpath.split('\\')[-1] )#添加文件名对应的标签

    lab= list(sorted(set(labelsnames)))  #生成标签名称列表
    labdict=dict( zip( lab  ,list(range(len(lab)))  )) #生成字典

    labels = [labdict[i] for i in labelsnames]
    if shuffleflag == True:
        return shuffle(np.asarray( lfilenames),np.asarray( labels)),np.asarray(lab)
    else:
        return (np.asarray( lfilenames),np.asarray( labels)),np.asarray(lab)
def _distorted_image(image,size,ch=1,shuffleflag = False,cropflag  = False,
                     brightnessflag=False,contrastflag=False):    #定义函数,实现变化图片
    distorted_image =tf.image.random_flip_left_right(image)

    if cropflag == True:                                                #随机裁剪
        s = tf.random.uniform((1,2),int(size[0]*0.8),size[0],tf.int32)
        distorted_image = tf.image.random_crop(distorted_image, [s[0][0],s[0][0],ch])

    distorted_image = tf.image.random_flip_up_down(distorted_image)#上下随机翻转
    if brightnessflag == True:#随机变化亮度
        distorted_image = tf.image.random_brightness(distorted_image,max_delta=10)
    if contrastflag == True:   #随机变化对比度
        distorted_image = tf.image.random_contrast(distorted_image,lower=0.2, upper=1.8)
    if shuffleflag==True:
        distorted_image = tf.random.shuffle(distorted_image)#沿着第0维乱序
    return distorted_image

from skimage import transform
def _random_rotated30(image, label): #定义函数实现图片随机旋转操作

    def _rotated(image):                #封装好的skimage模块,来进行图片旋转30度
        shift_y, shift_x = np.array(image.shape[:2],np.float32) / 2.
        tf_rotate = transform.SimilarityTransform(rotation=np.deg2rad(30))
        tf_shift = transform.SimilarityTransform(translation=[-shift_x, -shift_y])
        tf_shift_inv = transform.SimilarityTransform(translation=[shift_x, shift_y])
        image_rotated = transform.warp(image, (tf_shift + (tf_rotate + tf_shift_inv)).inverse)
        return image_rotated

    def _rotatedwrap():
        image_rotated = tf.py_func( _rotated,[image],[tf.float64])   #调用第三方函数
        return tf.cast(image_rotated,tf.float32)[0]

    a = tf.random.uniform([1],0,2,tf.int32)#实现随机功能
    image_decoded = tf.cond(pred=tf.equal(tf.constant(0),a[0]),true_fn=lambda: image,false_fn=_rotatedwrap)

    return image_decoded, label
def dataset(directory,size,batchsize,random_rotated=False,shuffleflag = True):#定义函数,创建数据集
    """ parse  dataset."""
    (filenames,labels),_ =load_sample(directory,shuffleflag=False) #载入文件名称与标签
#     print("labels len",len(labels))
#     p=0
#     f=0
#     for l in labels:
#         if l==1:
#             p+=1
#         elif l==0:
#             f+=1
#         else:
#             pass
#             #print(l)
#     print("positive label",p)
#     print("native label",f)
    #print(filenames,labels)
    def _parseone(filename, label):                         #解析一个图片文件
        """ Reading and handle  image"""
        image_string = tf.io.read_file(filename)         #读取整个文件
        image_decoded = tf.image.decode_image(image_string)
        image_decoded.set_shape([None, None, None])    # 必须有这句,不然下面会转化失败
        image_decoded = _distorted_image(image_decoded,size)#对图片做扭曲变化
        image_decoded = tf.image.resize(image_decoded, size)  #变化尺寸
        image_decoded = _norm_image(image_decoded,size)#归一化
        image_decoded = tf.cast(image_decoded, dtype=tf.float32)
        label = tf.cast(  tf.reshape(label, [1,]) ,tf.int32  )#将label 转为张量
        return image_decoded, label

    dataset = tf.data.Dataset.from_tensor_slices((filenames, labels))#生成Dataset对象


    if shuffleflag == True:#乱序
        dataset = dataset.shuffle(10000)

    dataset = dataset.map(_parseone)   #有图片内容的数据集

    if random_rotated == True:#旋转
        dataset = dataset.map(_random_rotated30)

    dataset = dataset.batch(batchsize) #批次划分数据集
    dataset = dataset.prefetch(1)

    return dataset
def _norm_image(image,size,ch=1,flattenflag = False):    #定义函数,实现归一化,并且拍平
    image_decoded = image/127.5-1#image/255.0
    if flattenflag==True:
        image_decoded = tf.reshape(image_decoded, [size[0]*size[1]*ch])
    return image_decoded
from tensorflow.keras.applications.resnet50 import ResNet50

size = [224,224]
batchsize = 10

sample_dir=r"./apple2orange/train"
testsample_dir = r"./apple2orange/test"

traindataset = dataset(sample_dir,size,batchsize)#训练集
#testdataset = dataset(testsample_dir,size,batchsize,shuffleflag = False)#测试集
loading sample  dataset..
ds=traindataset
ds
<PrefetchDataset shapes: ((None, 224, 224, None), (None, 1)), types: (tf.float32, tf.int32)>
image_batch, label_batch = next(iter(ds))
label_batch
<tf.Tensor: shape=(10, 1), dtype=int32, numpy=
array([[0],
       [1],
       [0],
       [1],
       [1],
       [0],
       [1],
       [0],
       [0],
       [1]], dtype=int32)>

构建模型

#构建模型
def create_model():
    img_size = (224, 224, 3)
    inputs = tf.keras.Input(shape=img_size)
    #inputs =preprocess_input(inputs)
    #conv_base = ResNet50(weights='resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',input_tensor=inputs,input_shape = img_size
    #                 ,include_top=False, backend=tf.keras.backend, layers=tf.keras.layers, models=tf.keras.models, utils=tf.keras.utils)#创建ResNet网络

    conv_base = ResNet50(weights='resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',input_tensor=inputs,input_shape = img_size
                     ,include_top=False)#创建ResNet网络

    model = tf.keras.models.Sequential()
    model.add(conv_base)
    model.add(tf.keras.layers.Flatten())
    model.add(tf.keras.layers.Dense(256, activation='relu'))
    model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
    conv_base.trainable = False
    model.summary()
    model.compile(loss='binary_crossentropy',
                  optimizer=tf.keras.optimizers.RMSprop(lr=2e-5),
                  metrics=['acc'])
    return model

保存模型checkpoint

checkpoint_path = "training_1/cp.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
# 创建一个保存模型权重的回调
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
                                                 save_weights_only=True,
                                                 verbose=1)

可视化

tbCallBack = tf.keras.callbacks.TensorBoard(log_dir='./logs',  # log 目录
                 histogram_freq=0,  # 按照何等频率(epoch)来计算直方图,0为不计算
#                  batch_size=32,     # 用多大量的数据计算直方图
                 write_graph=True,  # 是否存储网络结构图
                 write_grads=True, # 是否可视化梯度直方图
                 write_images=True,# 是否可视化参数
                 embeddings_freq=0, 
                 embeddings_layer_names=None, 
                 embeddings_metadata=None)
WARNING:tensorflow:`write_grads` will be ignored in TensorFlow 2.0 for the `TensorBoard` Callback.

构建并训练

model=create_model()
Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
resnet50 (Model)             (None, 7, 7, 2048)        23587712  
_________________________________________________________________
flatten (Flatten)            (None, 100352)            0         
_________________________________________________________________
dense (Dense)                (None, 256)               25690368  
_________________________________________________________________
dense_1 (Dense)              (None, 1)                 257       
=================================================================
Total params: 49,278,337
Trainable params: 25,690,625
Non-trainable params: 23,587,712
_________________________________________________________________
model.fit(ds, 
          epochs=50,callbacks=[cp_callback,tbCallBack])
Epoch 1/50
202/202 [==============================] - ETA: 0s - loss: 0.7283 - acc: 0.5839
Epoch 00001: saving model to training_1/cp.ckpt
202/202 [==============================] - 51s 254ms/step - loss: 0.7283 - acc: 0.5839
Epoch 2/50
202/202 [==============================] - ETA: 0s - loss: 0.6098 - acc: 0.6778
Epoch 00002: saving model to training_1/cp.ckpt
202/202 [==============================] - 51s 253ms/step - loss: 0.6098 - acc: 0.6778
Epoch 3/50
202/202 [==============================] - ETA: 0s - loss: 0.5876 - acc: 0.7001
Epoch 00003: saving model to training_1/cp.ckpt
202/202 [==============================] - 52s 255ms/step - loss: 0.5876 - acc: 0.7001
Epoch 4/50
202/202 [==============================] - ETA: 0s - loss: 0.5464 - acc: 0.7224
Epoch 00004: saving model to training_1/cp.ckpt
202/202 [==============================] - 51s 251ms/step - loss: 0.5464 - acc: 0.7224
Epoch 5/50
202/202 [==============================] - ETA: 0s - loss: 0.5202 - acc: 0.7299
Epoch 00005: saving model to training_1/cp.ckpt
202/202 [==============================] - 51s 252ms/step - loss: 0.5202 - acc: 0.7299
Epoch 6/50
202/202 [==============================] - ETA: 0s - loss: 0.5141 - acc: 0.7537
Epoch 00006: saving model to training_1/cp.ckpt
202/202 [==============================] - 51s 253ms/step - loss: 0.5141 - acc: 0.7537
Epoch 7/50
202/202 [==============================] - ETA: 0s - loss: 0.4856 - acc: 0.7642
Epoch 00007: saving model to training_1/cp.ckpt
202/202 [==============================] - 51s 254ms/step - loss: 0.4856 - acc: 0.7642
Epoch 8/50
202/202 [==============================] - ETA: 0s - loss: 0.4822 - acc: 0.7696
Epoch 00008: saving model to training_1/cp.ckpt
202/202 [==============================] - 51s 252ms/step - loss: 0.4822 - acc: 0.7696
Epoch 9/50
202/202 [==============================] - ETA: 0s - loss: 0.4578 - acc: 0.7810
Epoch 00009: saving model to training_1/cp.ckpt
202/202 [==============================] - 51s 251ms/step - loss: 0.4578 - acc: 0.7810
Epoch 10/50
202/202 [==============================] - ETA: 0s - loss: 0.4549 - acc: 0.7776
Epoch 00010: saving model to training_1/cp.ckpt
202/202 [==============================] - 51s 253ms/step - loss: 0.4549 - acc: 0.7776
Epoch 11/50
202/202 [==============================] - ETA: 0s - loss: 0.4397 - acc: 0.7989
Epoch 00011: saving model to training_1/cp.ckpt
202/202 [==============================] - 51s 252ms/step - loss: 0.4397 - acc: 0.7989
Epoch 12/50
202/202 [==============================] - ETA: 0s - loss: 0.4168 - acc: 0.7999
Epoch 00012: saving model to training_1/cp.ckpt
202/202 [==============================] - 51s 251ms/step - loss: 0.4168 - acc: 0.7999
Epoch 13/50
202/202 [==============================] - ETA: 0s - loss: 0.4257 - acc: 0.8034
Epoch 00013: saving model to training_1/cp.ckpt
202/202 [==============================] - 50s 249ms/step - loss: 0.4257 - acc: 0.8034
Epoch 14/50
202/202 [==============================] - ETA: 0s - loss: 0.3998 - acc: 0.8203
Epoch 00014: saving model to training_1/cp.ckpt
202/202 [==============================] - 50s 250ms/step - loss: 0.3998 - acc: 0.8203
Epoch 15/50
202/202 [==============================] - ETA: 0s - loss: 0.3978 - acc: 0.8188
Epoch 00015: saving model to training_1/cp.ckpt
202/202 [==============================] - 51s 251ms/step - loss: 0.3978 - acc: 0.8188
Epoch 16/50
202/202 [==============================] - ETA: 0s - loss: 0.3801 - acc: 0.8322
Epoch 00016: saving model to training_1/cp.ckpt
202/202 [==============================] - 51s 251ms/step - loss: 0.3801 - acc: 0.8322
Epoch 17/50
202/202 [==============================] - ETA: 0s - loss: 0.3734 - acc: 0.8272
Epoch 00017: saving model to training_1/cp.ckpt
202/202 [==============================] - 51s 251ms/step - loss: 0.3734 - acc: 0.8272
Epoch 18/50
202/202 [==============================] - ETA: 0s - loss: 0.3756 - acc: 0.8267
Epoch 00018: saving model to training_1/cp.ckpt
202/202 [==============================] - 51s 253ms/step - loss: 0.3756 - acc: 0.8267
Epoch 19/50
202/202 [==============================] - ETA: 0s - loss: 0.3671 - acc: 0.8411
Epoch 00019: saving model to training_1/cp.ckpt
202/202 [==============================] - 51s 253ms/step - loss: 0.3671 - acc: 0.8411
Epoch 20/50
202/202 [==============================] - ETA: 0s - loss: 0.3677 - acc: 0.8391
Epoch 00020: saving model to training_1/cp.ckpt
202/202 [==============================] - 52s 255ms/step - loss: 0.3677 - acc: 0.8391
Epoch 21/50
202/202 [==============================] - ETA: 0s - loss: 0.3422 - acc: 0.8481
Epoch 00021: saving model to training_1/cp.ckpt
202/202 [==============================] - 51s 252ms/step - loss: 0.3422 - acc: 0.8481
Epoch 22/50
202/202 [==============================] - ETA: 0s - loss: 0.3449 - acc: 0.8421
Epoch 00022: saving model to training_1/cp.ckpt
202/202 [==============================] - 51s 252ms/step - loss: 0.3449 - acc: 0.8421
Epoch 23/50
202/202 [==============================] - ETA: 0s - loss: 0.3285 - acc: 0.8500
Epoch 00023: saving model to training_1/cp.ckpt
202/202 [==============================] - 51s 252ms/step - loss: 0.3285 - acc: 0.8500
Epoch 24/50
202/202 [==============================] - ETA: 0s - loss: 0.3275 - acc: 0.8615
Epoch 00024: saving model to training_1/cp.ckpt
202/202 [==============================] - 51s 252ms/step - loss: 0.3275 - acc: 0.8615
Epoch 25/50
202/202 [==============================] - ETA: 0s - loss: 0.3347 - acc: 0.8500
Epoch 00025: saving model to training_1/cp.ckpt
202/202 [==============================] - 51s 251ms/step - loss: 0.3347 - acc: 0.8500
Epoch 26/50
202/202 [==============================] - ETA: 0s - loss: 0.3269 - acc: 0.8550
Epoch 00026: saving model to training_1/cp.ckpt
202/202 [==============================] - 51s 252ms/step - loss: 0.3269 - acc: 0.8550
Epoch 27/50
202/202 [==============================] - ETA: 0s - loss: 0.3192 - acc: 0.8669
Epoch 00027: saving model to training_1/cp.ckpt
202/202 [==============================] - 51s 252ms/step - loss: 0.3192 - acc: 0.8669
Epoch 28/50
202/202 [==============================] - ETA: 0s - loss: 0.2982 - acc: 0.8694
Epoch 00028: saving model to training_1/cp.ckpt
202/202 [==============================] - 51s 253ms/step - loss: 0.2982 - acc: 0.8694
Epoch 29/50
202/202 [==============================] - ETA: 0s - loss: 0.2920 - acc: 0.8739
Epoch 00029: saving model to training_1/cp.ckpt
202/202 [==============================] - 52s 256ms/step - loss: 0.2920 - acc: 0.8739
Epoch 30/50
202/202 [==============================] - ETA: 0s - loss: 0.3030 - acc: 0.8689
Epoch 00030: saving model to training_1/cp.ckpt
202/202 [==============================] - 52s 255ms/step - loss: 0.3030 - acc: 0.8689
Epoch 31/50
202/202 [==============================] - ETA: 0s - loss: 0.2910 - acc: 0.8739
Epoch 00031: saving model to training_1/cp.ckpt
202/202 [==============================] - 51s 254ms/step - loss: 0.2910 - acc: 0.8739
Epoch 32/50
202/202 [==============================] - ETA: 0s - loss: 0.2879 - acc: 0.8744
Epoch 00032: saving model to training_1/cp.ckpt
202/202 [==============================] - 51s 254ms/step - loss: 0.2879 - acc: 0.8744
Epoch 33/50
202/202 [==============================] - ETA: 0s - loss: 0.2665 - acc: 0.8868
Epoch 00033: saving model to training_1/cp.ckpt
202/202 [==============================] - 51s 253ms/step - loss: 0.2665 - acc: 0.8868
Epoch 34/50
202/202 [==============================] - ETA: 0s - loss: 0.2758 - acc: 0.8823
Epoch 00034: saving model to training_1/cp.ckpt
202/202 [==============================] - 51s 253ms/step - loss: 0.2758 - acc: 0.8823
Epoch 35/50
202/202 [==============================] - ETA: 0s - loss: 0.2724 - acc: 0.8798
Epoch 00035: saving model to training_1/cp.ckpt
202/202 [==============================] - 51s 252ms/step - loss: 0.2724 - acc: 0.8798
Epoch 36/50
202/202 [==============================] - ETA: 0s - loss: 0.2643 - acc: 0.8878
Epoch 00036: saving model to training_1/cp.ckpt
202/202 [==============================] - 50s 250ms/step - loss: 0.2643 - acc: 0.8878
Epoch 37/50
202/202 [==============================] - ETA: 0s - loss: 0.2605 - acc: 0.8848
Epoch 00037: saving model to training_1/cp.ckpt
202/202 [==============================] - 51s 252ms/step - loss: 0.2605 - acc: 0.8848
Epoch 38/50
202/202 [==============================] - ETA: 0s - loss: 0.2626 - acc: 0.8962
Epoch 00038: saving model to training_1/cp.ckpt
202/202 [==============================] - 51s 253ms/step - loss: 0.2626 - acc: 0.8962
Epoch 39/50
202/202 [==============================] - ETA: 0s - loss: 0.2523 - acc: 0.8982
Epoch 00039: saving model to training_1/cp.ckpt
202/202 [==============================] - 51s 253ms/step - loss: 0.2523 - acc: 0.8982
Epoch 40/50
202/202 [==============================] - ETA: 0s - loss: 0.2488 - acc: 0.8947
Epoch 00040: saving model to training_1/cp.ckpt
202/202 [==============================] - 51s 251ms/step - loss: 0.2488 - acc: 0.8947
Epoch 41/50
202/202 [==============================] - ETA: 0s - loss: 0.2471 - acc: 0.8937
Epoch 00041: saving model to training_1/cp.ckpt
202/202 [==============================] - 52s 255ms/step - loss: 0.2471 - acc: 0.8937
Epoch 42/50
202/202 [==============================] - ETA: 0s - loss: 0.2358 - acc: 0.8962
Epoch 00042: saving model to training_1/cp.ckpt
202/202 [==============================] - 51s 253ms/step - loss: 0.2358 - acc: 0.8962
Epoch 43/50
202/202 [==============================] - ETA: 0s - loss: 0.2445 - acc: 0.8987
Epoch 00043: saving model to training_1/cp.ckpt
202/202 [==============================] - 51s 252ms/step - loss: 0.2445 - acc: 0.8987
Epoch 44/50
202/202 [==============================] - ETA: 0s - loss: 0.2381 - acc: 0.9027
Epoch 00044: saving model to training_1/cp.ckpt
202/202 [==============================] - 51s 251ms/step - loss: 0.2381 - acc: 0.9027
Epoch 45/50
202/202 [==============================] - ETA: 0s - loss: 0.2232 - acc: 0.9121
Epoch 00045: saving model to training_1/cp.ckpt
202/202 [==============================] - 51s 254ms/step - loss: 0.2232 - acc: 0.9121
Epoch 46/50
202/202 [==============================] - ETA: 0s - loss: 0.2243 - acc: 0.9052
Epoch 00046: saving model to training_1/cp.ckpt
202/202 [==============================] - 52s 255ms/step - loss: 0.2243 - acc: 0.9052
Epoch 47/50
202/202 [==============================] - ETA: 0s - loss: 0.2200 - acc: 0.9121
Epoch 00047: saving model to training_1/cp.ckpt
202/202 [==============================] - 52s 257ms/step - loss: 0.2200 - acc: 0.9121
Epoch 48/50
202/202 [==============================] - ETA: 0s - loss: 0.2171 - acc: 0.9091
Epoch 00048: saving model to training_1/cp.ckpt
202/202 [==============================] - 52s 258ms/step - loss: 0.2171 - acc: 0.9091
Epoch 49/50
202/202 [==============================] - ETA: 0s - loss: 0.2184 - acc: 0.9116
Epoch 00049: saving model to training_1/cp.ckpt
202/202 [==============================] - 53s 260ms/step - loss: 0.2184 - acc: 0.9116
Epoch 50/50
202/202 [==============================] - ETA: 0s - loss: 0.2115 - acc: 0.9181
Epoch 00050: saving model to training_1/cp.ckpt
202/202 [==============================] - 52s 257ms/step - loss: 0.2115 - acc: 0.9181





<tensorflow.python.keras.callbacks.History at 0x7f11f8549e10>

保存为h5

model.save("weight.h5")

预测

def predict(image_batch):
    predict_logits=model.predict(image_batch)
    predict_label=[int(np.round(logit)) for logit in predict_logits]
    return predict_label

验证

model.evaluate(image_batch,label_batch)
1/1 [==============================] - 0s 2ms/step - loss: 0.3004 - acc: 0.8000





[0.3003780245780945, 0.800000011920929]

加载

load_model=create_model()
load_model.load_weights(checkpoint_path)
Model: "sequential_1"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
resnet50 (Model)             (None, 7, 7, 2048)        23587712  
_________________________________________________________________
flatten_1 (Flatten)          (None, 100352)            0         
_________________________________________________________________
dense_2 (Dense)              (None, 256)               25690368  
_________________________________________________________________
dense_3 (Dense)              (None, 1)                 257       
=================================================================
Total params: 49,278,337
Trainable params: 25,690,625
Non-trainable params: 23,587,712
_________________________________________________________________





<tensorflow.python.training.tracking.util.CheckpointLoadStatus at 0x7f11784fe910>

重新验证

load_model.evaluate(image_batch,label_batch)
1/1 [==============================] - 0s 6ms/step - loss: 0.3004 - acc: 0.8000





[0.3003780245780945, 0.800000011920929]

上一篇下一篇

猜你喜欢

热点阅读