TensorFlow技术帖TensorFlowTensorflow从入门到精通

Tensorflow 利用 Inception(v3) 模型进行

2018-06-07  本文已影响71人  MLGirl

关于此案例的教程,网上不胜枚举,但是都没有一个很完整的代码和结果演示。这里,小编将所有代码 撸一遍,想学习的伙伴们要仔细看!

模型背景:

Inception(v3) 模型是Google 训练好的最新一个图像识别模型,我们可以利用它来对我们的图像进行识别。

下载地址:

https://storage.googleapis.com/download.tensorflow.org/models/inception_dec_2015.zip

文件描述:

语言环境:

功能描述:

能够识别我们在网上找的一些阿猫阿狗鹦鹉兔子老虎飞机等一些图片

文件结构:

image

代码实现:

#!/usr/bin/env python
# _*_coding:utf-8 _*_

import matplotlib.pyplot as plt
import tensorflow as tf
import os
import numpy as np
from PIL import Image
__title__ = ''
__author__ = "wenyali"
__mtime__ = "2018/6/7"

class NodeLookup(object):
    def __init__(self,label_lookup_path=None,uid_lookup_path=None):
        if not label_lookup_path:
            label_lookup_path = os.path.join(
                model_dir, 'imagenet_2012_challenge_label_map_proto.pbtxt')
        if not uid_lookup_path:
            uid_lookup_path = os.path.join(
                model_dir, 'imagenet_synset_to_human_label_map.txt')
        self.node_lookup = self.load(label_lookup_path, uid_lookup_path)


    def load(self, label_lookup_path, uid_lookup_path):
        if not tf.gfile.Exists(uid_lookup_path):
            tf.logging.fatal('File does not exist %s', uid_lookup_path)
        if not tf.gfile.Exists(label_lookup_path):
            tf.logging.fatal('File does not exist %s', label_lookup_path)
        # 加载分类字符串n********对应分类名称的文件
        proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines()
        uid_to_human = {}
        #一行一行读取数据
        for line in proto_as_ascii_lines :
            #去掉换行符
            line=line.strip('\n')
            #按照'\t'分割
            parsed_items = line.split('\t')
            #获取分类编号
            uid = parsed_items[0]
            #获取分类名称
            human_string = parsed_items[1]
            #保存编号字符串n********与分类名称映射关系
            uid_to_human[uid] = human_string

        # 加载分类字符串n********对应分类编号1-1000的文件
        proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()
        node_id_to_uid = {}
        for line in proto_as_ascii:
            if line.startswith('  target_class:'):
                #获取分类编号1-1000
                target_class = int(line.split(': ')[1])
            if line.startswith('  target_class_string:'):
                #获取编号字符串n********
                target_class_string = line.split(': ')[1]
                #保存分类编号1-1000与编号字符串n********映射关系
                node_id_to_uid[target_class] = target_class_string[1:-2] # 去掉首尾的双引号

        #建立分类编号1-1000对应分类名称的映射关系
        node_id_to_name = {}
        for key, val in node_id_to_uid.items():
            #获取分类名称
            name = uid_to_human[val]
            #建立分类编号1-1000到分类名称的映射关系
            node_id_to_name[key] = name
        return node_id_to_name

    #传入分类编号1-1000返回分类名称
    def id_to_string(self, node_id):
        if node_id not in self.node_lookup:
            return ''
        return self.node_lookup[node_id]

model_dir = "./inception-2015-12-05"

with tf.gfile.FastGFile(os.path.join(model_dir,
                      'classify_image_graph_def.pb'), 'rb') as f:
  graph_def = tf.GraphDef()
  graph_def.ParseFromString(f.read())
  tf.import_graph_def(graph_def, name='')


with tf.Session() as sess:
    softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')
    #遍历目录
    for root,dirs,files in os.walk('image/'):
        for file in files:
            #载入图片
            image_data = tf.gfile.FastGFile(os.path.join(root,file), 'rb').read()
            predictions = sess.run(softmax_tensor,{'DecodeJpeg/contents:0': image_data})#图片格式是jpg格式
            #predictions = sess.run(softmax_tensor,{'DecodeGif/contents:0': image_data})#图片格式是jpg格式
            predictions = np.squeeze(predictions)#把结果转为1维数据
            #打印图片路径及名称
            image_path = os.path.join(root,file)
            print(image_path)
            #显示图片
            img=Image.open(image_path)
            plt.imshow(img)
            plt.axis('off')
            plt.show()

            #排序
            top_k = predictions.argsort()[-5:][::-1]
            node_lookup = NodeLookup()
            for node_id in top_k:
                #获取分类名称
                human_string = node_lookup.id_to_string(node_id)
                #获取该分类的置信度
                score = predictions[node_id]
                print('%s (score = %.5f)' % (human_string, score))
            print()

项目结果:

image/cat.png
tiger cat (score = 0.31934)
Egyptian cat (score = 0.31811)
tabby, tabby cat (score = 0.22258)
lynx, catamount (score = 0.02150)
Persian cat (score = 0.00604)
image/cat_fish.png
tabby, tabby cat (score = 0.24471)
cup (score = 0.23318)
water jug (score = 0.09043)
tiger cat (score = 0.07697)
goblet (score = 0.06291)
image/dog1.jpg
golden retriever (score = 0.41099)
tennis ball (score = 0.06002)
Pembroke, Pembroke Welsh corgi (score = 0.05309)
Border collie (score = 0.04154)
Brittany spaniel (score = 0.02695)
image/dog2.jpg
Pomeranian (score = 0.88244)
Pekinese, Pekingese, Peke (score = 0.01942)
toy poodle (score = 0.00269)
Maltese dog, Maltese terrier, Maltese (score = 0.00233)
Samoyed, Samoyede (score = 0.00211)
image/girl.jpg
suit, suit of clothes (score = 0.45349)
jean, blue jean, denim (score = 0.18616)
miniskirt, mini (score = 0.10581)
trench coat (score = 0.03698)
fur coat (score = 0.01893)
image
上一篇 下一篇

猜你喜欢

热点阅读