玩转树莓派

树莓派4B上基于MNN进行深度学习模型推理

2020-12-29  本文已影响0人  Daisy丶

MNN是一个轻量级的深度神经网络推理引擎,在端侧加载深度神经网络模型进行推理预测。目前,MNN已经在阿里巴巴的手机淘宝、手机天猫、优酷等20多个App中使用,覆盖直播、短视频、搜索推荐、商品图像搜索、互动营销、权益发放、安全风控等场景。此外,IoT等场景下也有若干应用。

Documentshttps://www.yuque.com/mnn/en/about
Githubhttps://github.com/alibaba/MNN

环境

C++依赖库安装:

由于树莓派平台是基于ARM芯片的,很多库都要从源码编译,不能直接使用二进制文件。

CMAKE

下载cmake https://cmake.org/

安装 cmake之前需要确认已经安装makegccg++,用make -v | gcc -v | g++ -v可查看是否已经安装,如果没有安装用apt-get安装一下:

sudo apt-get install gcc
sudo apt-get install g++
sudo apt-get install make

安装openSSL

sudo apt-get install openssl
sudo apt-get install libssl-dev

编译安装:

./bootstrap
make -j4
sudo make install

查看版本确定成功安装:

cmake --version
cmake

Protobuf

安装依赖包:

sudo apt-get install autoconf automake libtool curl unzip

编译安装:

./autogen.sh
./configure
make -j4
make check
sudo make install
sudo ldconfig # refresh shared library cache.

查看版本确定成功安装:

protoc --version
Protobuf

OpenCV

安装依赖包:

sudo apt-get install build-essential    
sudo apt-get install git libgtk2.0-dev pkg-config libavcodec-dev libavformat-dev libswscale-dev 
sudo apt-get install libtbb2 libtbb-dev libjpeg-dev libpng-dev libtiff-dev libjasper-dev libdc1394-22-dev

libjasper-dev 安装失败解决方案:

sudo add-apt-repository "deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu-ports/ xenial main multiverse restricted universe"
sudo apt update
sudo apt install libjasper1 libjasper-dev

编译安装:

mkdir build
sudo cmake -D CMAKE_BUILD_TYPE=Release -D CMAKE_INSTALL_PREFIX=/usr/local -D OPENCV_EXTRA_MODULES_PATH=/home/ubuntu/opencv-4.5.0/opencv_contrib-4.5.0/modules/ ..
sudo make install -j4

MNN

树莓派4B有一块500MHzBroadcom VideoCore IV GPU,因此MNN可以尝试使用OpenCL进行加速:

sudo apt-get install ocl-icd-opencl-dev

编辑CMakeLists,打开OpenCL选项。

option(MNN_OPENCL "Enable OpenCL" ON)

编译MNN

./schema/generate.sh
mkdir build
cd build
sudo cmake .. -DMNN_BUILD_CONVERTER=true && make -j4

模型推理

MNN支持TensorflowCaffeONNX等主流模型文件格式,支持CNNRNNGAN等常用网络。
支持 149 个TensorflowOp、47 个CaffeOp、74 个ONNX Op;各计算设备支持的MNN Op数:CPU110个,Metal 55个,OpenCL 29个,Vulkan31个。

导出ONNX模型

由于Depth-Wise模型拥有更小的参数量,更适用于边缘端的计算需求,这里以imageNet1000分类的mobilenetV2模型为例子。

import torch
import torch.onnx as onnx
import torchvision.models as models


if __name__ == '__main__':
    net = models.mobilenet_v2()
    net.eval()

    dummy_input = torch.zeros((1, 3, 224, 224))
    input_names = ["input"]
    outout_names = ["output"]
    onnx.export(net,
                dummy_input,
                "mobilenet_v2.onnx",
                verbose=True,
                opset_version=11,
                input_names=input_names,
                output_names=outout_names,
                dynamic_axes=None
    )

转换为MNN模型

转换onnx模型为mnn模型:

./build/MNNConvert -f ONNX --modelFile mobilenet_v2.onnx --MNNModel mobilenet_v2.mnn --bizCode biz

参数说明:

Usage:
  MNNConvert [OPTION...]

  -h, --help                    Convert Other Model Format To MNN Model

  -v, --version                 显示当前转换器版本
  
  -f, --framework arg           需要进行转换的模型类型, ex: [TF,CAFFE,ONNX,TFLITE,MNN]
  
      --modelFile arg           需要进行转换的模型文件名, ex: *.pb,*caffemodel
      
      --prototxt arg            caffe模型结构描述文件, ex: *.prototxt
      
      --MNNModel arg            转换之后保存的MNN模型文件名, ex: *.mnn
      
      --fp16                    将conv/matmul/LSTM的float32参数保存为float16,
                                模型将减小一半,精度基本无损
      
      --benchmarkModel          不保存模型中conv/matmul/BN等层的参数,仅用于benchmark测试
      
      --bizCode arg             MNN模型Flag, ex: MNN
      
      --debug                   使用debug模型显示更多转换信息
      
      --forTraining             保存训练相关算子,如BN/Dropout,default: false
      
      --weightQuantBits arg     arg=2~8,此功能仅对conv/matmul/LSTM的float32权值进行量化,
                                仅优化模型大小,加载模型后会解码为float32,量化位宽可选2~8,
                                运行速度和float32模型一致。8bit时精度基本无损,模型大小减小4倍
                                default: 0,即不进行权值量化
      
      --compressionParamsFile arg
                                使用MNN模型压缩工具箱生成的模型压缩信息文件
                                
      --saveStaticModel         固定输入形状,保存静态模型, default: false
      
      --inputConfigFile arg     保存静态模型所需要的配置文件, ex: ~/config.txt
convert

C++ API 推理

code:

#include <iostream>
#include <string>
#include <vector>
#include <cstring>
#include <chrono>
#include <cmath>

#include <opencv2/opencv.hpp>
#include "MNN/Interpreter.hpp"

using namespace std;
using namespace MNN;


int main() {
    string testImagePath = "/home/ubuntu/MNN/test.png";

    string modelFile = "/home/ubuntu/mobilenet_v2.mnn";
    string mode = "fp16";
    string deviceType = "gpu";
    int numThread = 1;

    // build network
    Interpreter* net = Interpreter::createFromFile(modelFile.c_str());

    // build config
    ScheduleConfig config;

    // set cpu thread used
    config.numThread = numThread;

    // set host device
    if (deviceType == "cpu")
        config.type = static_cast<MNNForwardType>(MNN_FORWARD_CPU);
    if (deviceType == "gpu")
        config.type = static_cast<MNNForwardType>(MNN_FORWARD_OPENCL);

    // set precision
    BackendConfig backendConfig;
    if (mode == "fp16")
        backendConfig.precision = static_cast<BackendConfig::PrecisionMode>(BackendConfig::Precision_Low);
    if(mode == "half")
        backendConfig.precision = static_cast<BackendConfig::PrecisionMode>(BackendConfig::Precision_Normal);
    if (mode == "fp32")
        backendConfig.precision = static_cast<BackendConfig::PrecisionMode>(BackendConfig::Precision_High);

    // set power use
    backendConfig.power = static_cast<BackendConfig::PowerMode>(BackendConfig::Power_Normal);
    // set memory use
    backendConfig.memory = static_cast<BackendConfig::MemoryMode>(BackendConfig::Memory_Normal);

    config.backendConfig = &backendConfig;

    // build session use config
    Session* session = net->createSession(config);

    // get input and output node of network
    Tensor* modelInputTensor = net->getSessionInput(session, NULL);
    Tensor* modelOutputTensor = net->getSessionOutput(session, NULL);

    // image preprocess
    cv::Scalar mean = {0.485, 0.456, 0.406};
    cv::Scalar stdv = {1 /0.229, 1 / 0.224, 1 / 0.225};

    cv::Mat img = cv::imread(testImagePath);

    cv::resize(img, img, cv::Size(224, 224));
    img.convertTo(img, CV_32F, 1 / 255.0);
    img = ((img - mean) / stdv);

    Tensor* inputTensor = Tensor::create<float>({1, 224, 224, 3}, NULL, Tensor::TENSORFLOW);
    Tensor* outputTensor = Tensor::create<float>({1, 1000}, NULL, Tensor::CAFFE);

    memcpy(inputTensor->host<float>(), img.data, inputTensor->size());

    // inference
    auto start = chrono::high_resolution_clock::now();

    modelInputTensor->copyFromHostTensor(inputTensor);
    net->runSession(session);
    modelOutputTensor->copyToHostTensor(outputTensor);

    auto end = chrono::high_resolution_clock::now();
    double cost = chrono::duration<double, milli>(end - start).count();

    cout << "device: " << deviceType << ", mode: " << mode << endl;
    cout << "inference time: " << to_string(cost) << "ms" << endl;

    // post-process
    vector<float> confidence;
    confidence.resize(1000);
    memcpy(confidence.data(), outputTensor->host<float>(), outputTensor->size());

    // delete point
    delete inputTensor;
    delete outputTensor;
    delete net;

    return 0;
}

CMake:

cmake_minimum_required(VERSION 3.17)
project(MNN)

set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")

find_package(OpenCV REQUIRED)

if(OpenCV_FOUND)
    message(STATUS "OpenCV library: ${OpenCV_INSTALL_PATH}")
    message(STATUS "    version: ${OpenCV_VERSION}")
    message(STATUS "    libraries: ${OpenCV_LIBS}")
    message(STATUS "    include path: ${OpenCV_INCLUDE_DIRS}")
else()
    message(FATAL_ERROR "Error! OpenCV not found!")

    set(OpenCV_INCLUDE_DIRS "/usr/local/include/opencv4")
    set(OpenCV_LIBS "/usr/local/lib")
endif()

set(MNN_INCLUDE_DIR "/home/ubuntu/MNN-1.1.0/include")
set(MNN_LIBRARIES "/home/ubuntu/MNN-1.1.0/build/libMNN.so")

message(STATUS "  MNN  libraries: ${MNN_LIBRARIES}")
message(STATUS "  MNN  include path: ${MNN_INCLUDE_DIR}")

add_executable(MNN main.cpp)

target_include_directories(MNN PUBLIC
        ${OpenCV_INCLUDE_DIRS}
        ${MNN_INCLUDE_DIR}
        )
target_link_libraries(MNN PUBLIC
        ${MNN_LIBRARIES}
        ${OpenCV_LIBS}
)

编译:

mkdir build
cd build
sudo cmake .. & make -j4

性能对比:

ARMv8.2这个指令集架构引入了新的fp16运算和 int8 dot指令,优化得当就能大幅加速深度学习框架的推理效率。由于树莓派4B的CPU内核为Cortex A72,对应的指令集架构是ARMv8-A,因此在CPU上无法实现fp16int8的加速。MNN本身的fp16只作用在参数的存储上,在计算时还是会转换成fp32送入CPU中进行计算。gpu在调用opencl后端的时候会出现ERROR CODE : -1001的错误,暂时还无法进行测试。可以看到mobilenet_v2在单线程下的速度可以达到90ms左右,多线程的速度还可以进一步加快。

Device FP32 HALF FP16
cpu 95.56ms 96.74ms 95.18ms
gpu - - -
上一篇 下一篇

猜你喜欢

热点阅读