opencv C++常用算子封装

2021-07-13  本文已影响0人  1037号森林里一段干木头

简介:封装一些opencv C++的算子,达到一种开箱即用的效果,在算法预研阶段能快速出结果,可以当做调用示例,也可直接使用避免每次都重复编写。

在这里插入图片描述

@[toc]

#pragma once
#include "opencv.hpp"

namespace cvbag
{
    bool isImageEmpty(const cv::Mat &image, const std::string functionName);

    int getAllImagePath( std::string folder, std::vector<cv::String> &imagePathList, bool flg=false);

    int showImage(const cv::Mat &image, const std::string winName = "img", const int waitKeyMode = 0, const int destroyMode = 0);

    int gaussBlur(const cv::Mat &image, cv::Mat &dst, const int ksize = 5, double sigma = 1.0);

    int sobel_x(const cv::Mat &image, cv::Mat &dst, const int ksize = 3);

    int sobel_y(const cv::Mat &image, cv::Mat &dst, const int ksize = 3);

    int sobel_xy(const cv::Mat &image, cv::Mat &dst, const int ksize = 3);

    int canny(const cv::Mat &image, cv::Mat &dst, const int low = 100, const int heigh = 200);

    int otsu(const cv::Mat &image, cv::Mat &dst);

    int threshold(const cv::Mat &image, cv::Mat &dst, const int th = 128, const int mode = 0, const int maxval = 255);

    int adaptiveThreshold(const cv::Mat &image, cv::Mat &dst, int blockSize = 11, double C = 15, double maxval = 255,
        int adaptiveMethod = cv::ADAPTIVE_THRESH_MEAN_C, int thresholdType = cv::THRESH_BINARY_INV);

    int  findContours(const cv::Mat &binaryImage, std::vector<std::vector<cv::Point>> &contours, int topologyMode = 1, int contoursType = 1);

    int drawContours(cv::Mat &image, const std::vector<std::vector<cv::Point>> &contours, int contoursIdx = -1, int b = 0, int g = 0, int r = 255);

    int dilate(const cv::Mat &binaryImage, cv::Mat &dst, const int ksize = 3, const int kernelMode = 0);

    int erode(const cv::Mat &binaryImage, cv::Mat &dst, const int ksize = 3, const int kernelMode = 0);

    int open(const cv::Mat &binaryImage, cv::Mat &dst, const int ksize = 3, const int kernelMode = 0);

    int close(const cv::Mat &binaryImage, cv::Mat &dst, const int ksize = 3, const int kernelMode = 0);

    //use the table to transform the pixels 
    int gammaTransform(const cv::Mat &image, cv::Mat &dst, const int  table[]);
    //if the input image's format  is 3 channels then use below method to transform the pixels
    int gammaTransform_threeChannels(const cv::Mat &image, cv::Mat &dst, const int table[]);
    //a[i] = int(pow(i / 255.0, gamma) *255.0);
    int getGammaTable(const double gamma, int *a, const int num = 256);
    //gamma API
    int gamma(const cv::Mat &image, cv::Mat &dst, const double gamma = 1.0);

    //gamma piecewise linear function transform
    int getGammaTable_piecewiseLinear(int *a, const int src1, const int dst1 , const int src2 , const int dst2 );
    //gamma_picewiseLinear 
    /*
        f(x) = (dst1 / src1) * x; if x<src1
          = [(dst2 - dst1) / (src2 - src1)] * (x - src1) + dst1; if x>=src1 and x<src2;
          = [(255 - dst2) / (255 - src2)] * (x - src2) + dst2; if x>=src2;
    */
    int gamma_picewiseLinear(const cv::Mat &image, cv::Mat &dst,
        const int src1 = 80, const int dst1 = 60, const int src2 = 160, const int dst2 = 180);
    
};
#include "imageProcess.h"

bool cvbag::isImageEmpty(const cv::Mat &image, const std::string functionName)
{
    if (image.empty())
    {
        std::cout << "ERROR \t in cvbag::"<<functionName<<" ,the input image is empty!\n";
        return true;
    }

    return false;
}

int cvbag::getAllImagePath(std::string folder, std::vector<cv::String> &imagePathList, bool flg)
{
    cv::glob(folder, imagePathList, flg);
    return 0;
}

int cvbag::showImage(const cv::Mat &image, const std::string winName, const int waitKeyMode, const int destroyMode)
{
    if (cvbag::isImageEmpty(image,"showImage"))
    {
        return - 1;
    }

    cv::namedWindow(winName, CV_WINDOW_NORMAL);
    cv::imshow(winName, image);
    cv::waitKey(waitKeyMode);
    if (destroyMode == 1)
    {
        cv::destroyWindow(winName);
    }

    return 0;
}

int cvbag::gaussBlur(const cv::Mat &image, cv::Mat &dst, const int ksize, double sigma)
{
    if (cvbag::isImageEmpty(image, "gaussBlur"))
    {
        return -1;
    }
    cv::GaussianBlur(image, dst, cv::Size(ksize, ksize), sigma);
    return 0;
}

int cvbag::sobel_x(const cv::Mat &image, cv::Mat &dst, const int ksize)
{
    if (cvbag::isImageEmpty(image, "sobel_x"))
    {
        return -1;
    }

    cv::Sobel(image, dst, CV_64F, 1, 0, ksize);
    return 0;
}

int cvbag::sobel_y(const cv::Mat &image, cv::Mat &dst, const int ksize)
{
    if (cvbag::isImageEmpty(image, "sobel_y"))
    {
        return -1;
    }

    cv::Sobel(image, dst, CV_64F, 0, 1, ksize);
    return 0;
}

int cvbag::sobel_xy(const cv::Mat &image, cv::Mat &dst, const int ksize)
{
    if (cvbag::isImageEmpty(image, "sobel_xy"))
    {
        return -1;
    }
    cv::Mat sobel_x, sobel_y;
    cvbag::sobel_x(image, sobel_x, ksize);
    cvbag::sobel_y(image, sobel_y, ksize);
    cv::addWeighted(sobel_x, 0.5, sobel_y, 0.5, 0, dst);
    return 0;
}

int cvbag::canny(const cv::Mat &image, cv::Mat &dst, const int low, const int heigh)
{
    if (cvbag::isImageEmpty(image, "canny")) return -1;

    cv::Canny(image, dst, low, heigh);
    return 0;
}

int cvbag::otsu(const cv::Mat &image, cv::Mat &dst)
{
    if (cvbag::isImageEmpty(image, "adaptiveThreshold")) return -1;
    if (image.channels() == 3) cv::cvtColor(image, dst, cv::COLOR_BGR2GRAY);
    cv::threshold(dst, dst, 0, 255, 8);
    return 0;
}

int cvbag::threshold(const cv::Mat &image, cv::Mat &dst, const int th , const int mode, const int maxval)
{
    /*
    enum ThresholdTypes {
    THRESH_BINARY     = 0, //!< \f[\texttt{dst} (x,y) =  \fork{\texttt{maxval}}{if \(\texttt{src}(x,y) > \texttt{thresh}\)}{0}{otherwise}\f]
    THRESH_BINARY_INV = 1, //!< \f[\texttt{dst} (x,y) =  \fork{0}{if \(\texttt{src}(x,y) > \texttt{thresh}\)}{\texttt{maxval}}{otherwise}\f]
    THRESH_TRUNC      = 2, //!< \f[\texttt{dst} (x,y) =  \fork{\texttt{threshold}}{if \(\texttt{src}(x,y) > \texttt{thresh}\)}{\texttt{src}(x,y)}{otherwise}\f]
    THRESH_TOZERO     = 3, //!< \f[\texttt{dst} (x,y) =  \fork{\texttt{src}(x,y)}{if \(\texttt{src}(x,y) > \texttt{thresh}\)}{0}{otherwise}\f]
    THRESH_TOZERO_INV = 4, //!< \f[\texttt{dst} (x,y) =  \fork{0}{if \(\texttt{src}(x,y) > \texttt{thresh}\)}{\texttt{src}(x,y)}{otherwise}\f]
    THRESH_MASK       = 7,
    THRESH_OTSU       = 8, //!< flag, use Otsu algorithm to choose the optimal threshold value
    THRESH_TRIANGLE   = 16 //!< flag, use Triangle algorithm to choose the optimal threshold value
};
    */

    if (cvbag::isImageEmpty(image, "adaptiveThreshold")) return -1;
    if (image.channels() == 3) cv::cvtColor(image, dst, cv::COLOR_BGR2GRAY);
    cv::threshold(dst, dst, th, maxval, mode);

    return 0;
}

int cvbag::adaptiveThreshold(const cv::Mat &image, cv::Mat &dst, int blockSize, double C, double maxval,
    int adaptiveMethod, int thresholdType)
{
    if (cvbag::isImageEmpty(image, "adaptiveThreshold")) return -1;
    //blockSize must be an odd number,like 3,5,7...
    if (blockSize % 2 == 0) blockSize += 1;
    if (image.channels() == 3) {
        cv::cvtColor(image, dst, cv::COLOR_BGR2GRAY);
        cv::adaptiveThreshold(dst, dst, maxval, adaptiveMethod, thresholdType, blockSize, C);
    } 
    else {
        cv::adaptiveThreshold(image, dst, maxval, adaptiveMethod, thresholdType, blockSize, C);
    }

    
    return 0;
}

int  cvbag::findContours(const cv::Mat &binaryImage, std::vector<std::vector<cv::Point>> &contours,     int topologyMode , int contoursType )
{
    /** Contour retrieval modes */
    /*enum
    {
        CV_RETR_EXTERNAL = 0,
        CV_RETR_LIST = 1,
        CV_RETR_CCOMP = 2,
        CV_RETR_TREE = 3,
        CV_RETR_FLOODFILL = 4
    };*/

    /** Contour approximation methods */
    /*enum
    {
        CV_CHAIN_CODE = 0,
        CV_CHAIN_APPROX_NONE = 1,
        CV_CHAIN_APPROX_SIMPLE = 2,
        CV_CHAIN_APPROX_TC89_L1 = 3,
        CV_CHAIN_APPROX_TC89_KCOS = 4,
        CV_LINK_RUNS = 5
    };*/

    if (cvbag::isImageEmpty(binaryImage, "findContours")) return -1;

    std::vector<cv::Vec4i> hierarchy;
    if (binaryImage.channels() == 3) return -1;
    cv::findContours(binaryImage, contours, topologyMode, contoursType);

    return 0;
}

int cvbag::drawContours(cv::Mat &image, const std::vector<std::vector<cv::Point>> &contours,    int contoursIdx , int b , int g , int r )
{
    if (cvbag::isImageEmpty(image, "drawContours")) return -1;
    cv::drawContours(image, contours, contoursIdx, cv::Scalar(b, g, r));
    return 0;
}

int cvbag::dilate(const cv::Mat &binaryImage, cv::Mat &dst, const  int ksize , const int kernelMode )
{
    /*  MORPH_ERODE
        MORPH_DILATE
        MORPH_OPEN      dst = open(src, element) = dilate(erode(src, element))
        MORPH_CLOSE     dst = close(src, element) = erode(dilate(src, element))
        MORPH_GRADIENT      dst = morph_grad(src, element) = dilate(src, element)−erode(src, element)
        MORPH_TOPHAT        dst = tophat(src, element) = src−open(src, element)
        MORPH_BLACKHAT      dst = blackhat(src, element) = close(src, element)−src
        MORPH_HITMISS
        "hit or miss" . - Only supported for CV_8UC1 binary images.
        A tutorial canbe found in the documentation
    */

    /*
    enum MorphTypes{
    MORPH_ERODE    = 0,
    MORPH_DILATE   = 1,
    MORPH_OPEN     = 2,
    MORPH_CLOSE    = 3,
    MORPH_GRADIENT = 4,
    MORPH_TOPHAT   = 5,
    MORPH_BLACKHAT = 6,
    MORPH_HITMISS  = 7
     };
    */
    /*
    enum MorphShapes {
    MORPH_RECT    = 0, //!< a rectangular structuring element:  \f[E_{ij}=1\f]
    MORPH_CROSS   = 1, //!< a cross-shaped structuring element:
                       //!< \f[E_{ij} =  \fork{1}{if i=\texttt{anchor.y} or j=\texttt{anchor.x}}{0}{otherwise}\f]
    MORPH_ELLIPSE = 2 //!< an elliptic structuring element, that is, a filled ellipse inscribed
                      //!< into the rectangle Rect(0, 0, esize.width, 0.esize.height)
    };
    */
    if (cvbag::isImageEmpty(binaryImage, "dilate")) return -1;
    if (binaryImage.channels() == 3) return -1;
    cv::Mat element = cv::getStructuringElement(kernelMode, cv::Size(ksize, ksize));
    cv::morphologyEx(binaryImage, dst, 1, element);
    return 0;
}

int cvbag::erode(const cv::Mat &binaryImage, cv::Mat &dst, const int ksize , const int kernelMode )
{
    /*  MORPH_ERODE
        MORPH_DILATE
        MORPH_OPEN      dst = open(src, element) = dilate(erode(src, element))
        MORPH_CLOSE     dst = close(src, element) = erode(dilate(src, element))
        MORPH_GRADIENT      dst = morph_grad(src, element) = dilate(src, element)−erode(src, element)
        MORPH_TOPHAT        dst = tophat(src, element) = src−open(src, element)
        MORPH_BLACKHAT      dst = blackhat(src, element) = close(src, element)−src
        MORPH_HITMISS
        "hit or miss" . - Only supported for CV_8UC1 binary images.
        A tutorial canbe found in the documentation
    */

    /*
    enum MorphTypes{
    MORPH_ERODE    = 0,
    MORPH_DILATE   = 1,
    MORPH_OPEN     = 2,
    MORPH_CLOSE    = 3,
    MORPH_GRADIENT = 4,
    MORPH_TOPHAT   = 5,
    MORPH_BLACKHAT = 6,
    MORPH_HITMISS  = 7
     };
    */
    /*
    enum MorphShapes {
    MORPH_RECT    = 0, //!< a rectangular structuring element:  \f[E_{ij}=1\f]
    MORPH_CROSS   = 1, //!< a cross-shaped structuring element:
                       //!< \f[E_{ij} =  \fork{1}{if i=\texttt{anchor.y} or j=\texttt{anchor.x}}{0}{otherwise}\f]
    MORPH_ELLIPSE = 2 //!< an elliptic structuring element, that is, a filled ellipse inscribed
                      //!< into the rectangle Rect(0, 0, esize.width, 0.esize.height)
    };
    */
    if (cvbag::isImageEmpty(binaryImage, "erode")) return -1;
    if ( binaryImage.channels() == 3) return -1;
    cv::Mat element = cv::getStructuringElement(kernelMode, cv::Size(ksize, ksize));
    cv::morphologyEx(binaryImage, dst, 0, element);
    return 0;
}

int cvbag::open(const cv::Mat &binaryImage, cv::Mat &dst, const int ksize, const int kernelMode)
{
    
    if (cvbag::isImageEmpty(binaryImage, "erode")) return -1;
    if (binaryImage.channels() == 3) return -1;
    cv::Mat element = cv::getStructuringElement(kernelMode, cv::Size(ksize, ksize));
    cv::morphologyEx(binaryImage, dst, 2, element);
    return 0;
}

int cvbag::close(const cv::Mat &binaryImage, cv::Mat &dst, const int ksize, const int kernelMode)
{

    if (cvbag::isImageEmpty(binaryImage, "erode")) return -1;
    if (binaryImage.channels() == 3) return -1;
    cv::Mat element = cv::getStructuringElement(kernelMode, cv::Size(ksize, ksize));
    cv::morphologyEx(binaryImage, dst, 3, element);
    return 0;
}

int cvbag::gammaTransform_threeChannels(const cv::Mat &image, cv::Mat &dst, const int table[])
{
    std::vector<cv::Mat> channelsImage;
    std::vector<cv::Mat> channelsImage_dst;
    cv::split(image, channelsImage);
    for (int i = 0; i < channelsImage.size(); i++)
    {
        channelsImage_dst.push_back(cv::Mat::zeros(cv::Size(image.cols, image.rows), CV_8UC1));

    }

    for (int i = 0; i < channelsImage.size(); i++)
    {
        gammaTransform(channelsImage[i], channelsImage_dst[i], table);
    }
    cv::merge(channelsImage_dst, dst);

    return 0;
}

int cvbag::getGammaTable(const double gamma, int *a, const int num )
{
    for (int i = 0; i < num; i++)
    {
        a[i] = int(pow(i / 255.0, gamma) *255.0);
    }

    return 0;
}

int cvbag::gamma(const cv::Mat &image, cv::Mat &dst, const double gamma )
{
    if (image.empty()) return -1;

    int table[256] = { 0 };
    getGammaTable(gamma, table);

    gammaTransform(image, dst, table);


    return 0;
}

int cvbag::gammaTransform(const cv::Mat &image, cv::Mat &dst, const int  table[])
{
    if (image.empty()) return -1;

    if (image.channels() == 3)
    {
        gammaTransform_threeChannels(image, dst, table);
    }

    if (dst.empty()) dst = cv::Mat::zeros(cv::Size(image.cols, image.rows), CV_8UC1);

    const uchar * ps = NULL;
    uchar * pd = NULL;

    for (int i = 0; i < image.rows; i++)
    {
        ps = image.ptr<uchar>(i);
        pd = dst.ptr<uchar>(i);
        for (int j = 0; j < image.cols; j++)
        {
            *(pd + j) = table[int(*(ps + j))];
        }
    }
    return 0;
}

int cvbag::gamma_picewiseLinear(const cv::Mat &image, cv::Mat &dst, const int src1, const int dst1, const int src2, const int dst2)
{
    if (image.empty()) return -1;

    int table[256] = { 0 };
    if (src1<=0 | src1 > 255 | src1>=src2 | src2 > 255 | dst1<0 | dst1 > 255 | dst2<0 | dst2 > 255) return -1;

    getGammaTable_piecewiseLinear(table,src1,dst1,src2,dst2);

    gammaTransform(image, dst, table);

    return 0;
}

int cvbag::getGammaTable_piecewiseLinear(int *a, const int src1 , const int dst1 , const int src2 , const int dst2 )
{
    for (int i = 0; i < src1; i++)
    {
        a[i] = int(float(dst1) / float(src1)*i);
    }

    for (int i = src1; i < src2; i++)
    {
        a[i] = int(float(dst2 - dst1) / float(src2 - src1) * (i - src1) + dst1);
    }

    for (int i = src2; i < 256; i++)
    {
        a[i] = int(float(255 - dst2) / float(255 - src2) * (i - src2) + dst2);
    }

    return 0;
}


int demo()
{
    //const std::string fileName = "K:\\imageData\\golden_pad\\204.bmp";
    const std::string fileName = "K:\\imageData\\lena\\Lena.png";

    //cvbag tool;

    //showImage
    cv::Mat image = cv::imread(fileName);
    cvbag::showImage(image, "image",0,0);

    //gauss smooth
    cv::Mat gauss;
    cvbag::gaussBlur(image, gauss);
    cvbag::showImage(gauss, "gauss");

    //sobel edge detect
    cv::Mat sobel_xy;
    cvbag::sobel_x(gauss, sobel_xy);
    ////convert format CV_64F to CV_8U ,so that the image can use cv::imshow to show image.
    cv::convertScaleAbs(sobel_xy, sobel_xy);
    cvbag::showImage(sobel_xy, "sobel_xy");

    //otsu
    cv::Mat otsu;
    cvbag::otsu(gauss, otsu);
    cvbag::showImage(otsu, "otsu");

    //threshold
    cv::Mat threshold;
    cvbag::threshold(gauss, threshold, 120,8+1);
    cvbag::showImage(threshold, "threshold");

    //adaptiveThreshold
    cv::Mat adaptiveThreshold;
    cvbag::adaptiveThreshold(gauss, adaptiveThreshold,11,8 );
    cvbag::showImage(adaptiveThreshold, "adaptiveThreshold");

    //canny
    cv::Mat canny;
    cvbag::canny(image, canny, 120, 180);
    cvbag::showImage(canny, "canny");

    //contours
    std::vector<std::vector<cv::Point>> contours;
    cvbag::findContours(adaptiveThreshold, contours);
    cv::Mat conImage = gauss.clone();
    cvbag::drawContours(conImage, contours, -1, 0, 255, 0);
    cvbag::showImage(conImage, "contours");

    //dilate
    cv::Mat dilate;
    cvbag::dilate(otsu, dilate, 3, 0);
    cvbag::showImage(dilate, "dilate");

    //erode
    cv::Mat erode;
    cvbag::erode(otsu, erode, 3, 0);
    cvbag::showImage(erode, "erode");

    //open
    cv::Mat open;
    cvbag::open(otsu, open);
    cvbag::showImage(open, "open");

    //close
    cv::Mat close;
    cvbag::close(otsu, close);
    cvbag::showImage(close, "close");

    //gamma
    cv::Mat gamma;
    cvbag::gamma(image, gamma, 2);
    cvbag::showImage(gamma, "gamma");

    //gamma_piecewiseLinaer
    cv::Mat gamma_piece;
    cvbag::gamma_picewiseLinear(image, gamma_piece, 120, 60, 160, 200);
    cvbag::showImage(gamma_piece, "gamma_piece");

    return 0;
}



int main()
{
    demo();
    return 0;
}
上一篇下一篇

猜你喜欢

热点阅读