tensorflow

tensorflow serving (三):request/g

2019-05-09  本文已影响0人  夕一啊

保存pb模型的时候就定义好了输入输出节点的名字,这里是input和output

request

使用docker起服务,默认的8501端口是request端口
传入数据是 图片,json不接受np array, 所以需要转成list,但是这样很慢

import os
import requests
from time import time
import cv2
import json
import numpy as np

url = 'http://localhost:7475/v1/models/pse:predict'

def predict_one(img_path):
    start_time = time()
    print(img_path)
    img = cv2.imread(img_path)

    time1 = time()
    print("read pic", time1 - start_time, " s")
    # predict_request_data = '{"instances" : [{"input": %s}]}' % img.tolist() # json格式对应json, data格式对应字符串
    predict_request_json = {"instances" : [{"input":  img.tolist() }]}
    time2 = time()
    print("to list", time2 - time1, " s")
 
    r = requests.post(url, json=predict_request_json)
    #r = requests.post(url, data=predict_request_data) # json格式对应json, data格式对应字符串

    kernel = np.array(json.loads(r.content.decode("utf-8"))["predictions"][0])[:,:,0]
    print(kernel.shape)

    end_time = time()
    print(end_time - start_time, " s")

gRPC

gRPC的端口映射到宿主机的8500端口,不需要转换图片为list速度会快很多

pip install tensorflow_serving_api
import tensorflow as tf
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc

import grpc
import cv2
import numpy as np
from time import time
import os

channel = grpc.insecure_channel("localhost:7475")
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
request = predict_pb2.PredictRequest()

# 指定启动tensorflow serving时配置的model_name和是保存模型时的方法名
request.model_spec.name = "pse"
request.model_spec.signature_name = tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY


def predct_one(pic_path):
    print(pic_path)
    start_time = time()
    img = cv2.imread(pic_path)
    print("orig size:", img.shape)
    # img = cv2.resize(img,(1280,960))
    img = np.expand_dims(img, 0)

    # 输入tensor后序列化
    request.inputs["input"].ParseFromString(tf.contrib.util.make_tensor_proto(img, dtype=tf.float32).SerializeToString())
    time1 = time()
    print("read pic", time1 - start_time, " s")
    response = stub.Predict(request)

    results = {}
    for key in response.outputs:
        tensor_proto = response.outputs[key]
        results[key] = tf.contrib.util.make_ndarray(tensor_proto)

    # 从results中取所需要的结果
    kernel = results["output"]
    print(kernel.shape)
    print(time() - start_time, " s")
上一篇 下一篇

猜你喜欢

热点阅读