YOLO/SSD深度学习目标检测深度学习之目标检测

吴恩达深度学习:目标检测之YOLO算法

2018-01-15  本文已影响2687人  koreyoshi_41f9

在学习到目标检测这一课时,在完成课后编程练习,用YOLO实现目标检测时,从Jupyter Notebook上下载yolo.h5,然后准备在自己本地上跑自己的图片,可是执行到load_model(‘model_data/yolo.h5’)时遇到了下面的问题(本人win10系统):

报错信息

raw_code = codecs.decode(code.encode('ascii'), 'base64')
UnicodeEncodeError: 'ascii' codec can't encode character '\xe3' in position 0: ordinal not in range(128)

各种百度无解,然后在查看keras的Git仓库,说keras 2.1.2中load原来的model时会出现这个问题,需要拉最新的master安装,试了之后还是不行。那好吧,我就换keras的版本,将所有keras的版本都试了一遍,还是不行,调用load_model(‘model_data/yolo.h5’)就提示:Python已停止响应
将代码上传到Jupyter Notebook运行可以,代表代码没有问题。后来经过尝试,是下载的yolo.h5文件有问题。好吧废话有点多,现在来说说解决方案。

解决办法
根据YAD2K的git仓库
执行下面的代码生成自己的yolo.h5文件

Linux系统

wget http://pjreddie.com/media/files/yolo.weights
wget https://raw.githubusercontent.com/pjreddie/darknet/master/cfg/yolo.cfg
./yad2k.py yolo.cfg yolo.weights model_data/yolo.h5
./test_yolo.py model_data/yolo.h5  # output in images/out/

Windows系统

拷贝地址到浏览器下载权重: http://pjreddie.com/media/files/yolo.weights
拷贝地址到浏览器下载配置:https://raw.githubusercontent.com/pjreddie/darknet/master/cfg/yolo.cfg
在CMD中执行如下命令:
python yad2k.py yolo.cfg yolo.weights model_data/yolo.h5
python test_yolo.py model_data/yolo.h5  # output in images/out/

然后用自己生成的yolo.h5文件跑自己的代码,是不是发现可以了。如果还是报这个错,可以考虑换一下keras版本,我用的是2.1.0,很多人根据我这个方法搞定这个问题了喔,希望对你有帮助。

自己实现的调用摄像头实现的代码

菜鸟一枚,所以代码写的很丑,不过能跑起来,大神勿喷。
通过OpenCV采集图像,但是处理图像是用的PIL,因此中间有个转换的过程,有时间在将PIL改成OpenCV。

# coding: utf-8
import os
import cv2 as cv
import numpy as np
import colorsys
import os
import random
import tensorflow as tf
from keras import backend as K
from keras.models import load_model
from yad2k.models.keras_yolo import yolo_head, yolo_boxes_to_corners
from PIL import Image, ImageDraw, ImageFont

class YOLO():
    def __init__(self,model_path = 'model_data/yolo.h5',anchors_path = 'model_data/yolo_anchors.txt',classes_path = 'model_data/coco_classes.txt',image_shape = (480., 640.),max_boxes=10,score_threshold = 0.3,iou_threshold = 0.5):
        self.model_path = model_path
        self.anchors_path = anchors_path
        self.classes_path = classes_path
        self.image_shape = image_shape
        self.max_boxes = max_boxes
        self.score_threshold = score_threshold
        self.iou_threshold = iou_threshold

        self.class_names = self._read_classes(self.classes_path)
        self.anchors = self._read_anchors(self.anchors_path)
        self.sess = K.get_session()
        self.boxes, self.scores, self.classes = self.generate()

    def _read_classes(self,classes_path):
        with open(classes_path) as f:
            class_names = f.readlines()
        class_names = [c.strip() for c in class_names]
        return class_names

    def _read_anchors(self,anchors_path):
        with open(anchors_path) as f:
            anchors = f.readline()
            anchors = [float(x) for x in anchors.split(',')]
            anchors = np.array(anchors).reshape(-1, 2)
        return anchors

    def generate(self):
        model_path = os.path.expanduser(self.model_path)
        assert model_path.endswith('.h5'), 'Keras model must be a .h5 file.'
        self.yolo_model = load_model(model_path)

        yolo_outputs = yolo_head(self.yolo_model.output, self.anchors, len(self.class_names))
        scores, boxes, classes = self.yolo_eval(yolo_outputs, self.image_shape,max_boxes=self.max_boxes,score_threshold=self.score_threshold, iou_threshold=self.iou_threshold)
        return boxes, scores, classes

    def yolo_filter_boxes(self,box_confidence, boxes, box_class_probs, threshold=.6):
        box_scores = box_confidence * box_class_probs
        box_classes = K.argmax(box_scores, axis=-1)
        box_class_scores = K.max(box_scores, axis=-1)

        filtering_mask = box_class_scores >= threshold

        scores = tf.boolean_mask(box_class_scores, filtering_mask)
        boxes = tf.boolean_mask(boxes, filtering_mask)
        classes = tf.boolean_mask(box_classes, filtering_mask)

        return scores, boxes, classes

    def yolo_non_max_suppression(self,scores, boxes, classes, max_boxes=10, iou_threshold=0.5):
        max_boxes_tensor = K.variable(max_boxes, dtype='int32')  # tensor to be used in tf.image.non_max_suppression()
        K.get_session().run(tf.variables_initializer([max_boxes_tensor]))  # initialize variable max_boxes_tensor

        nms_indices = tf.image.non_max_suppression(boxes, scores, max_boxes_tensor, iou_threshold=iou_threshold)

        scores = K.gather(scores, nms_indices)
        boxes = K.gather(boxes, nms_indices)
        classes = K.gather(classes, nms_indices)

        return scores, boxes, classes

    def scale_boxes(delf,boxes, image_shape):
        """ Scales the predicted boxes in order to be drawable on the image"""
        height = image_shape[0]
        width = image_shape[1]
        image_dims = K.stack([height, width, height, width])
        image_dims = K.reshape(image_dims, [1, 4])
        boxes = boxes * image_dims
        return boxes

    def yolo_eval(self,yolo_outputs, image_shape=(720., 1280.), max_boxes=10, score_threshold=.6, iou_threshold=.5):
        box_confidence, box_xy, box_wh, box_class_probs = yolo_outputs
        boxes = yolo_boxes_to_corners(box_xy, box_wh)

        scores, boxes, classes = self.yolo_filter_boxes(box_confidence, boxes, box_class_probs, score_threshold)

        # Scale boxes back to original image shape.
        boxes = self.scale_boxes(boxes, image_shape)

        scores, boxes, classes = self.yolo_non_max_suppression(scores, boxes, classes, max_boxes, iou_threshold)

        return scores, boxes, classes

    def draw_boxes(delf,image, out_scores, out_boxes, out_classes, class_names, colors):

        font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
                                  size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
        thickness = (image.size[0] + image.size[1]) // 300

        for i, c in reversed(list(enumerate(out_classes))):
            predicted_class = class_names[c]
            box = out_boxes[i]
            score = out_scores[i]

            label = '{} {:.2f}'.format(predicted_class, score)

            draw = ImageDraw.Draw(image)
            label_size = draw.textsize(label, font)

            top, left, bottom, right = box
            top = max(0, np.floor(top + 0.5).astype('int32'))
            left = max(0, np.floor(left + 0.5).astype('int32'))
            bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
            right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
            print(label, (left, top), (right, bottom))

            if top - label_size[1] >= 0:
                text_origin = np.array([left, top - label_size[1]])
            else:
                text_origin = np.array([left, top + 1])

            # My kingdom for a good redistributable image drawing library.
            for i in range(thickness):
                draw.rectangle([left + i, top + i, right - i, bottom - i], outline=colors[c])
            draw.rectangle([tuple(text_origin), tuple(text_origin + label_size)], fill=colors[c])
            draw.text(text_origin, label, fill=(0, 0, 0), font=font)
            del draw

    def generate_colors(self,class_names):
        hsv_tuples = [(x / len(class_names), 1., 1.) for x in range(len(class_names))]
        colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
        colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors))
        random.seed(10101)  # Fixed seed for consistent colors across runs.
        random.shuffle(colors)  # Shuffle colors to decorrelate adjacent classes.
        random.seed(None)  # Reset seed to default.
        return colors


    def detect(self, image, model_image_size = (608, 608)):
        # Preprocess your image
        resized_image = image.resize(tuple(reversed(model_image_size)), Image.BICUBIC)
        image_data = np.array(resized_image, dtype='float32')
        image_data /= 255.
        image_data = np.expand_dims(image_data, 0)  # Add batch dimension.

        out_scores, out_boxes, out_classes = self.sess.run([self.scores, self.boxes, self.classes],
                                                      feed_dict={self.yolo_model.input: image_data, K.learning_phase(): 0})
        # Generate colors for drawing bounding boxes.
        colors = self.generate_colors(self.class_names)
        # Draw bounding boxes on the image file
        self.draw_boxes(image, out_scores, out_boxes, out_classes, self.class_names, colors)

        return image

    def close_session(self):
        self.sess.close()



if __name__ == '__main__':
    yolo = YOLO()
    cv.namedWindow("camera", 1)

    capture = cv.VideoCapture(0)            #开启摄像头
    num = 0;
    while True:
        result, img = capture.retrieve()
        image = Image.fromarray(cv.cvtColor(img,cv.COLOR_BGR2RGB))
        image = yolo.detect(image)
        image.save("test.jpg", quality=100)
        im = cv.cvtColor(np.asarray(image),cv.COLOR_RGB2BGR)
        cv.imshow("camera", im)
        key = cv.waitKey(100)

    yolo.close_session()
    del (capture)
    cv.DestroyWindow("camera")

电脑太渣(公司配的办公电脑HP EliteBook 840 G2,没有GPU),跑一张图片大概需要3~4秒,所以代码跑起来效果不好,各位大佬电脑好的应该可以实现实时检测。

上张自己跑的图: IMG_3786.JPG IMG_3787.JPG

我也是站在巨人的肩膀上,部分代码来源于吴恩达老师人工智能课程yad2k

上一篇下一篇

猜你喜欢

热点阅读