OpenGL系列之十六:实现大眼特效

2022-12-26  本文已影响0人  itfitness

目录

效果展示

相关文章

OpenGL系列之一:OpenGL第一个程序
OpenGL系列之二:绘制三角形
OpenGL系列之三:三角形顶点增加颜色
OpenGL系列之四:绘制四边形
OpenGL系列之五:绘制点和线
OpenGL系列之六:绘制立方体
OpenGL系列之七:纹理贴图
OpenGL系列之八:立方体纹理贴图
OpenGL系列之九:glsl着色器语言
OpenGL系列之十:VAO、VBO、EBO的应用
OpenGL系列之十一:Shader图片转场切换动画
OpenGL系列之十二:Shader燃烧动画
OpenGL系列之十三:实现Shader绚丽动画
OpenGL系列之十四:实现相机抖音特效
OpenGL系列之十五:实现美颜相机

实现步骤

1.实现相机预览

这里我们使用OpenGL系列之十四:实现相机抖音特效
,这篇文章的代码为基础

2.获取相机texture数据

增加GLHelper,用于获取到相机texture数据,这里使用的是FBO,基本都是固定的,代码如下:

class GLHelper {
    var camTextureId: IntArray = IntArray(1)
    protected var fbo = IntArray(1)
    protected var fboTexureId = IntArray(1)
    protected var drawTexureId = IntArray(1)

    private val vss = """attribute vec2 vPosition;
attribute vec2 vTexCoord;
varying vec2 texCoord;
void main() {
  texCoord = vTexCoord;
  gl_Position = vec4 (vPosition.x, vPosition.y, 0.0, 1.0);
}"""

    private val fssCam2FBO = """#extension GL_OES_EGL_image_external : require
precision mediump float;
uniform samplerExternalOES sTexture;
varying vec2 texCoord;
void main() {
  gl_FragColor = texture2D(sTexture,texCoord);
}"""

    private val vertexCoords = floatArrayOf(-1f, -1f, -1f, 1f, 1f, -1f, 1f, 1f)
    private val textureCoords = floatArrayOf(0f, 1f, 0f, 0f, 1f, 1f, 1f, 0f)

    private var vertexCoordsBuffer: FloatBuffer? = null
    private var textureCoordsBuffer: FloatBuffer? = null

    private var progCam2FBO = -1
    private var vcCam2FBO = 0
    private var tcCam2FBO = 0

    var textureWidth = 0
    var textureHeight = 0


    var predictor:SCRFD = SCRFD()

    fun release(){
        predictor.release()
    }

    private var context:Context

    constructor(context:Context){
        this.context = context
        val modelFile = File(context.getExternalFilesDir(Environment.DIRECTORY_DOWNLOADS), "model.pdmodel")
        val paramsFile = File(context.getExternalFilesDir(Environment.DIRECTORY_DOWNLOADS), "model.pdiparams")
//        predictor = SCRFD(modelFile.absolutePath, paramsFile.absolutePath)
        val option = RuntimeOption()
        option.setCpuThreadNum(2)
        option.setLitePowerMode(LitePowerMode.LITE_POWER_HIGH)
        if (Boolean.parseBoolean( "true")) {
            option.enableLiteFp16()
        }
        predictor.init(modelFile.absolutePath, paramsFile.absolutePath, option)
    }

    fun processFboTexture(textureWidth:Int,textureHeight:Int){
        this.textureWidth = textureWidth
        this.textureHeight = textureHeight
        // Destroy FBO and draw textures
        GLES20.glBindFramebuffer(GLES20.GL_FRAMEBUFFER, 0);
        GLES20.glDeleteFramebuffers(1, fbo, 0);
        GLES20.glDeleteTextures(1, drawTexureId, 0);
        GLES20.glDeleteTextures(1, fboTexureId, 0);
        // Normal texture for storing modified camera preview data(RGBA format)
        GLES20.glGenTextures(1, drawTexureId, 0);
        GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, drawTexureId[0]);
        GLES20.glTexImage2D(GLES20.GL_TEXTURE_2D, 0, GLES20.GL_RGBA, textureWidth, textureHeight, 0,
            GLES20.GL_RGBA, GLES20.GL_UNSIGNED_BYTE, null);
        GLES20.glTexParameteri(GLES20.GL_TEXTURE_2D, GLES20.GL_TEXTURE_WRAP_S, GLES20.GL_CLAMP_TO_EDGE);
        GLES20.glTexParameteri(GLES20.GL_TEXTURE_2D, GLES20.GL_TEXTURE_WRAP_T, GLES20.GL_CLAMP_TO_EDGE);
        GLES20.glTexParameteri(GLES20.GL_TEXTURE_2D, GLES20.GL_TEXTURE_MIN_FILTER, GLES20.GL_NEAREST);
        GLES20.glTexParameteri(GLES20.GL_TEXTURE_2D, GLES20.GL_TEXTURE_MAG_FILTER, GLES20.GL_NEAREST);
        // FBO texture for storing camera preview data(RGBA format)
        GLES20.glGenTextures(1, fboTexureId, 0);
        GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, fboTexureId[0]);
        GLES20.glTexImage2D(GLES20.GL_TEXTURE_2D, 0, GLES20.GL_RGBA, textureWidth, textureHeight, 0,
            GLES20.GL_RGBA, GLES20.GL_UNSIGNED_BYTE, null);
        GLES20.glTexParameteri(GLES20.GL_TEXTURE_2D, GLES20.GL_TEXTURE_WRAP_S, GLES20.GL_CLAMP_TO_EDGE);
        GLES20.glTexParameteri(GLES20.GL_TEXTURE_2D, GLES20.GL_TEXTURE_WRAP_T, GLES20.GL_CLAMP_TO_EDGE);
        GLES20.glTexParameteri(GLES20.GL_TEXTURE_2D, GLES20.GL_TEXTURE_MIN_FILTER, GLES20.GL_NEAREST);
        GLES20.glTexParameteri(GLES20.GL_TEXTURE_2D, GLES20.GL_TEXTURE_MAG_FILTER, GLES20.GL_NEAREST);
        // Generate FBO and bind to FBO texture
        GLES20.glGenFramebuffers(1, fbo, 0);
        GLES20.glBindFramebuffer(GLES20.GL_FRAMEBUFFER, fbo[0]);
        GLES20.glFramebufferTexture2D(GLES20.GL_FRAMEBUFFER, GLES20.GL_COLOR_ATTACHMENT0, GLES20.GL_TEXTURE_2D,
            fboTexureId[0], 0);
    }

    fun initFboProgram(){
        val bytes: Int = vertexCoords.size * java.lang.Float.SIZE / java.lang.Byte.SIZE
        vertexCoordsBuffer =
            ByteBuffer.allocateDirect(bytes).order(ByteOrder.nativeOrder()).asFloatBuffer()
        textureCoordsBuffer =
            ByteBuffer.allocateDirect(bytes).order(ByteOrder.nativeOrder()).asFloatBuffer()
        vertexCoordsBuffer?.put(vertexCoords)?.position(0)
        textureCoordsBuffer?.put(textureCoords)?.position(0)

        // Create vertex and fragment shaders
        // camTextureId->fboTexureId

        // Create vertex and fragment shaders
        // camTextureId->fboTexureId
        progCam2FBO = Utils.createShaderProgram(vss, fssCam2FBO)
        vcCam2FBO = GLES20.glGetAttribLocation(progCam2FBO, "vPosition")
        tcCam2FBO = GLES20.glGetAttribLocation(progCam2FBO, "vTexCoord")
        GLES20.glEnableVertexAttribArray(vcCam2FBO)
        GLES20.glEnableVertexAttribArray(tcCam2FBO)
    }

    fun getTextureData(matrix: FloatArray):GLData{
        // camTextureId->fboTexureId

        // camTextureId->fboTexureId
        GLES20.glBindFramebuffer(GLES20.GL_FRAMEBUFFER, fbo[0])
        GLES20.glViewport(0, 0, textureWidth, textureHeight)
        GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT)
        GLES20.glUseProgram(progCam2FBO)
        GLES20.glVertexAttribPointer(
            vcCam2FBO,
            2,
            GLES20.GL_FLOAT,
            false,
            4 * 2,
            vertexCoordsBuffer
        )
        textureCoordsBuffer!!.clear()
        textureCoordsBuffer?.put(transformTextureCoordinates(textureCoords, matrix))
        textureCoordsBuffer!!.position(0)
        GLES20.glVertexAttribPointer(
            tcCam2FBO,
            2,
            GLES20.GL_FLOAT,
            false,
            4 * 2,
            textureCoordsBuffer
        )
        GLES20.glActiveTexture(GLES20.GL_TEXTURE0)
        GLES20.glBindTexture(GLES11Ext.GL_TEXTURE_EXTERNAL_OES, camTextureId[0])
        GLES20.glUniform1i(GLES20.glGetUniformLocation(progCam2FBO, "sTexture"), 0)
        GLES20.glDrawArrays(GLES20.GL_TRIANGLE_STRIP, 0, 4)
        GLES20.glFlush()

        var targetTexureId = fboTexureId[0]

        //========================Kotlin代码生成图像数据============================
        val pixelBuffer = ByteBuffer.allocate(textureWidth * textureHeight * 4)
        GLES20.glReadPixels(
            0,
            0,
            textureWidth,
            textureHeight,
            GLES20.GL_RGBA,
            GLES20.GL_UNSIGNED_BYTE,
            pixelBuffer
        )
        //========================Kotlin代码生成图像数据============================


        //======================非C代码转Texture====================================
        var result:FaceDetectionResult = FaceDetectionResult().apply {
            mInitialized = false
        }
        if(predictor.initialized()){
            var ARGB8888ImageBitmap =
                Bitmap.createBitmap(textureWidth, textureHeight, Bitmap.Config.ARGB_8888)
            ARGB8888ImageBitmap.copyPixelsFromBuffer(pixelBuffer)


            result = predictor.predict(
                ARGB8888ImageBitmap, 0.5f, 0.4f
            )

            LogUtils.eTag("人脸",result.mLandmarks)

            ARGB8888ImageBitmap.recycle()
        }
//
//        ImageUtils.save(ARGB8888ImageBitmap, File(context.getExternalFilesDir(Environment.DIRECTORY_DOWNLOADS),"aaa.png"),
//            Bitmap.CompressFormat.PNG
//        )
//        targetTexureId = drawTexureId[0]
//        // Update a bitmap to the GL texture if modified
//        GLES20.glActiveTexture(targetTexureId)
//        // GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, targetTexureId);
//        GLES20.glBindTexture(GL10.GL_TEXTURE_2D, targetTexureId)
//        GLUtils.texImage2D(GL10.GL_TEXTURE_2D, 0, ARGB8888ImageBitmap, 0)

//        GLES20.glBindFramebuffer(GLES20.GL_FRAMEBUFFER, 0);
        //======================非C代码转Texture END====================================

        targetTexureId = drawTexureId[0]

        return GLData(targetTexureId,pixelBuffer.array(),result)
    }

    private fun transformTextureCoordinates(coords: FloatArray, matrix: FloatArray): FloatArray? {
        val result = FloatArray(coords.size)
        val vt = FloatArray(4)
        var i = 0
        while (i < coords.size) {
            val v = floatArrayOf(coords[i], coords[i + 1], 0f, 1f)
            Matrix.multiplyMV(vt, 0, matrix, 0, v, 0)
            result[i] = vt[0]
            result[i + 1] = vt[1]
            i += 2
        }
        return result
    }
}

GLHelper,主要是用于获取texture数据和进行人脸识别的,在getTextureData方法中将图像数据和人脸识别的数据进行返回,关于人脸识别部分稍后在下面说明

3.GLRender中加入GLHelper

在GLRender中的onSurfaceCreated、onSurfaceChanged、onDrawFrame加入GLHelper的处理,首先是onSurfaceCreated中初始化渲染FBO的Program,然后在onSurfaceChanged中初始化相关的texture,最后在onDrawFrame中根据相机textureID获取到需要的数据

override fun onSurfaceCreated(p0: GL10?, p1: EGLConfig?) {
        //初始化opengl
        ndkInitGL(cameraView.context.assets)
        //将SurfaceTexture上面的数据用作OpenGL纹理
        surfaceTexture.attachToGLContext(glHelper.camTextureId[0])
        //新的数据帧有效时的回调接口
        surfaceTexture.setOnFrameAvailableListener(this)

        glHelper.initFboProgram()
    }

    override fun onSurfaceChanged(p0: GL10?, p1: Int, p2: Int) {
        ndkResizeGL(p1,p2)
        glHelper.processFboTexture(p1,p2)
    }

    override fun onDrawFrame(p0: GL10?) {
        GLES30.glClearColor(0.0f, 0.0f, 0.0f, 1.0f)
        GLES30.glClear(GLES30.GL_COLOR_BUFFER_BIT or GLES30.GL_DEPTH_BUFFER_BIT)
        //更新画面
        surfaceTexture.updateTexImage()

        val matrix = FloatArray(16)
        surfaceTexture.getTransformMatrix(matrix)

        val glData = glHelper.getTextureData(matrix)

        //opengl渲染
        ndkPaintGLWithBuffer(glData.textureId,glData.pixelBuffer!!,glHelper.textureWidth,glHelper.textureHeight,glData.faceData,eyeScale)
    }
4.增加人脸识别库

这里我们使用的是FastDeploy,可以点击这个链接下载Android相关的案例代码,获取到最新的SDK

5.GLRender中加入JNI函数

我们增加如下JNI函数,其中textureId为我们要渲染的纹理,pixelBuffer为相机的像素数据,textureWidth为纹理的宽,textureHeight为纹理的高,faceData为人脸数据,eyeScale为眼睛放大的值,另外这里也使用了OpenCV,具体的引入方式可以参考这篇文章:Android NDK开发:Opencv实现人脸识别

private external fun ndkPaintGLWithBuffer(
        textureId: Int,
        pixelBuffer: ByteArray,
        textureWidth: Int,
        textureHeight: Int,
        faceData: FaceDetectionResult,
        eyeScale:Float
    )
extern "C"
JNIEXPORT void JNICALL
Java_com_itfitness_openglcamera_render_GLRender_ndkPaintGLWithBuffer(JNIEnv *env, jobject thiz,
                                                           jint texture_id, jbyteArray pixel_buffer,
                                                           jint texture_width,
                                                           jint texture_height,jobject faceData,jfloat eyeScale) {

    jbyte *dataBuffer = (jbyte *) env->GetByteArrayElements(pixel_buffer, JNI_FALSE);

    cv::Mat imageSrc(texture_height, texture_width, CV_8UC4, dataBuffer);

    cv::cvtColor(imageSrc,imageSrc,cv::COLOR_RGBA2BGR);

    //获致obj中对象的class
    jclass clazz = env->GetObjectClass(faceData);

    jfieldID mInitializedFildId =  env->GetFieldID(clazz,"mInitialized", "Z");

    jboolean mInitialized = env->GetBooleanField(faceData,mInitializedFildId);

    glm::vec2 u_LeftEyeCenterPos(0,0);// 左眼中心点
    glm::vec2 u_RightEyeCenterPos(0,0);// 右眼中心点
    float u_ScaleRatio = eyeScale;//放大系数
    float u_Radius = 0.0;// 影响半径
    glm::vec2 u_ImgSize(texture_width,texture_height);//图片分辨率
    //检测成功
    if(mInitialized){
        // 获取java中age字段的ID(最后一个参数是age的签名)
        jfieldID fieldmBoxes = env->GetFieldID(clazz, "mBoxes", "[[F");
        jfieldID fieldmLandmarks = env->GetFieldID(clazz, "mLandmarks", "[[F");
        // 获取age字段对应的值
        jobject mBoxes = env->GetObjectField(faceData, fieldmBoxes);
        jobject mLandmarks = env->GetObjectField(faceData, fieldmLandmarks);

        // Cast it to a jdoublearray
        jobjectArray *faceRectArr = reinterpret_cast<jobjectArray *>(&mBoxes);
        jobjectArray *landmarksArr = reinterpret_cast<jobjectArray *>(&mLandmarks);

        jsize faceNum = env->GetArrayLength(*faceRectArr);


        for(int i = 0 ; i < faceNum ; i++){
            jobject facePoints = env->GetObjectArrayElement(*faceRectArr,i);

            jfloatArray *faceRect = reinterpret_cast<jfloatArray *>(&facePoints);

            float* faces = env->GetFloatArrayElements(*faceRect,JNI_FALSE);

            float x1 = faces[0];
            float y1 = faces[1];
            float x2 = faces[2];
            float y2 = faces[3];

            cv::rectangle(imageSrc, cv::Point(x1,y1), cv::Point(x2,y2),Scalar(0,0,255),5);

            //人眼数据
            for(int j = i * 5 ;j < i * 5 + 5 ; j++){
                jobject landmarkPointArray = env->GetObjectArrayElement(*landmarksArr,j);

                jfloatArray *landmarkPoints = reinterpret_cast<jfloatArray *>(&landmarkPointArray);

                float* landmarkPoint = env->GetFloatArrayElements(*landmarkPoints,JNI_FALSE);

                float p1x = landmarkPoint[0];
                float p1y = landmarkPoint[1];
                //这里只取了第一个人脸的眼部位置
                if(j == 0){
                    u_LeftEyeCenterPos.x = p1x;
                    u_LeftEyeCenterPos.y = p1y;
                } else if(j == 1){
                    u_RightEyeCenterPos.x = p1x;
                    u_RightEyeCenterPos.y = p1y;
                }

//                cv::circle(imageSrc,cv::Point(p1x,p1y),3,Scalar(0,0,255),3);
            }
        }
        if(faceNum > 0){
            u_Radius = (u_RightEyeCenterPos.x - u_LeftEyeCenterPos.x) / 2.0;
        }
    }



    cv::cvtColor(imageSrc,imageSrc,cv::COLOR_BGR2RGBA);
    glActiveTexture(texture_id);
    glBindTexture(GL_TEXTURE_2D,texture_id);
    glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, imageSrc.cols, imageSrc.rows, 0, GL_RGBA, GL_UNSIGNED_BYTE, imageSrc.data);
    glBindFramebuffer(GL_FRAMEBUFFER, 0);


    ccRender->paintGL(texture_id,u_Radius,u_ScaleRatio,u_LeftEyeCenterPos,u_RightEyeCenterPos,u_ImgSize);
}

与之前的文章OpenGL系列之十四:实现相机抖音特效中代码不同的是,在CCRender中增加了一些用于实现大眼效果的变量,如下所示

#ifndef OPENGLDEMO_CCRENDER_H
#define OPENGLDEMO_CCRENDER_H

#include "CCOpenGLShader.h"
#include "CCOpenGLVAO.h"
#include "CCOpenGLBuffer.h"

class CCRender{
public:
    CCRender();
    ~CCRender();

    glm::vec2 u_LeftEyeCenterPos;// 左眼中心点
    glm::vec2 u_RightEyeCenterPos;// 右眼中心点
    float u_ScaleRatio = 0.5;//放大系数
    float u_Radius = 0.0;// 影响半径
    glm::vec2 u_ImgSize;//图片分辨率

    //旋转角度
    float m_angle =0.0f;
    int textureId;
    //变换的值
    float u_value = 0.0f;
//    CCTexture ccTexture;
    CCOpenGLShader ccOpenGlShader;
    CCOpenGLVAO          ccVAO;
    CCOpenGLBuffer*       ccVBO;
    CCOpenGLBuffer*       ccEBO;
    void initGL();
    void paintGL(int textureId,float radios,float scale,glm::vec2 u_LeftEyeCenterPos,glm::vec2 u_RightEyeCenterPos,glm::vec2 u_ImgSize);
    void resizeGL(int width,int height);
    void drawShaderAnim();
    void setupRenderingObject();
    void release();
};


#endif //OPENGLDEMO_CCRENDER_H

绘制的时候也要进行相应的赋值

/**
 * 绘制
 */
void CCRender::paintGL(int textId,float radios,float scale,glm::vec2 LeftEyeCenterPos,glm::vec2 RightEyeCenterPos,glm::vec2 ImgSize)
{
    u_LeftEyeCenterPos = LeftEyeCenterPos;
    u_RightEyeCenterPos = RightEyeCenterPos;
    u_ImgSize = ImgSize;
    textureId = textId;
    u_Radius = radios;
    u_ScaleRatio = scale;
    //清空颜色缓冲区或深度缓冲区
    glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
    drawShaderAnim();
}

/**
 * Shader动画
 */
void CCRender::drawShaderAnim() {

    u_value += 0.015f;

    glm::mat4x4  objectMat;
    glm::mat4x4  objectTransMat = glm::translate(glm::mat4(1.0f), glm::vec3(0.0f, 0.0f, -1));
    glm::mat4x4  objectRotMat = glm::rotate(glm::mat4(1.0f),0.0f,glm::vec3(0.0f, 0.0f, 1.0) );
    glm::mat4x4  objectScaleMat = glm::scale(glm::mat4(1.0f),glm::vec3(0.6f, 0.6f, 0.0) );

    glm::mat4 projMat = glm::perspective(glm::radians(60.0f), 1.0f, 0.0f, 1000.0f);

    objectMat = projMat* objectRotMat * objectTransMat * objectScaleMat;

    ccOpenGlShader.Bind();

    ccOpenGlShader.SetUniformValue("u_mat",objectMat);
    //设置变化的值
    ccOpenGlShader.SetUniformValue("uValue",u_value);


    ccOpenGlShader.SetUniformValue("u_ImgSize",u_ImgSize);
    ccOpenGlShader.SetUniformValue("u_RightEyeCenterPos",u_RightEyeCenterPos);
    ccOpenGlShader.SetUniformValue("u_LeftEyeCenterPos",u_LeftEyeCenterPos);
    ccOpenGlShader.SetUniformValue("u_Radius",u_Radius);
    ccOpenGlShader.SetUniformValue("u_ScaleRatio",u_ScaleRatio);



    ccVAO.Bind();

    glActiveTexture(GL_TEXTURE0);
    //给变量utexture0设置值
    glBindTexture(GL_TEXTURE_2D,textureId);
    ccOpenGlShader.SetUniformValue("utexture0",0);

    glDrawElements(GL_TRIANGLE_STRIP,6,GL_UNSIGNED_SHORT,(void *)0);

    glBindTexture(GL_TEXTURE_2D, 0);
    ccOpenGlShader.Release();
    ccVAO.Release();
}
6.顶点着色器和片元着色器

这里的大眼效果,参考了这篇文章:https://blog.csdn.net/Kennethdroid/article/details/104907763
顶点着色器如下

#version 300 es

layout(location = 0) in vec3 a_position;
layout(location = 1) in vec2 a_uv;

uniform mat4   u_mat;
out vec2 o_uv;

void main(void)
{
    o_uv = a_uv;
    gl_Position = u_mat * vec4(a_position,1.0);

}

这里的大眼效果主要在片元着色器中,如下

#version 300 es
#extension GL_OES_EGL_image_external_essl3 : require
//高精度绘制,否则会模糊
precision highp float;
//precision mediump float;
uniform sampler2D utexture0;
uniform float uValue;
uniform vec2 iResolution;
in vec2 o_uv;
out vec4 fragColor;

uniform highp vec2 u_LeftEyeCenterPos;// 左眼中心点
uniform highp vec2 u_RightEyeCenterPos;// 右眼中心点
uniform highp float u_ScaleRatio;//放大系数
uniform highp float u_Radius;// 影响半径
uniform vec2 u_ImgSize;//图片分辨率

vec2 warpEyes(vec2 centerPos, vec2 curPos, float radius, float scaleRatio)
{
    vec2 result = curPos;
    vec2 imgCurPos = curPos * u_ImgSize;
    float d = distance(imgCurPos, centerPos);

    if (d < radius)
    {
        float gamma = 1.0 - scaleRatio * pow(smoothstep(0.0, 1.0, d / radius) - 1.0, 2.0);
        result = centerPos + gamma * (imgCurPos - centerPos);
        result = result / u_ImgSize;

    }
    return result;

}

void main()
{
//    fragColor = texture(utexture0,o_uv);
    if(u_Radius > 0.0){
        vec2 newTexCoord = warpEyes(u_LeftEyeCenterPos, o_uv, u_Radius, u_ScaleRatio);
        newTexCoord = warpEyes(u_RightEyeCenterPos, newTexCoord, u_Radius, u_ScaleRatio);
        fragColor = texture(utexture0, newTexCoord);
    }else{
        fragColor = texture(utexture0,o_uv);
    }
}
7.大眼控制

这里的缩放大小的控制,我是使用了seekbar进行了控制

<?xml version="1.0" encoding="utf-8"?>
<RelativeLayout xmlns:android="http://schemas.android.com/apk/res/android"
    xmlns:app="http://schemas.android.com/apk/res-auto"
    xmlns:tools="http://schemas.android.com/tools"
    android:layout_width="match_parent"
    android:layout_height="match_parent"
    tools:context=".MainActivity">

    <com.itfitness.openglcamera.widget.CameraView
        android:id="@+id/gl_surface"
        android:layout_width="match_parent"
        android:layout_height="match_parent"/>


    <SeekBar
        android:id="@+id/seekbar"
        android:layout_marginBottom="20dp"
        android:layout_alignParentBottom="true"
        android:max="100"
        android:layout_width="match_parent"
        android:layout_height="wrap_content"/>

</RelativeLayout>

最大缩放两倍

class MainActivity : AppCompatActivity() {
    override fun onCreate(savedInstanceState: Bundle?) {
        super.onCreate(savedInstanceState)
        setContentView(R.layout.activity_main)

        val glSurface = findViewById<CameraView>(R.id.gl_surface)
        val seekbar = findViewById<SeekBar>(R.id.seekbar)
        seekbar.setOnSeekBarChangeListener(object : SeekBar.OnSeekBarChangeListener{
            override fun onProgressChanged(seekBar: SeekBar?, progress: Int, fromUser: Boolean) {
                glSurface.setEyeScale(progress / 100.0f * 2f)
            }

            override fun onStartTrackingTouch(seekBar: SeekBar?) {

            }

            override fun onStopTrackingTouch(seekBar: SeekBar?) {

            }
        })
    }
}

案例源码

https://gitee.com/itfitness/opengl-camera-process-oes

上一篇下一篇

猜你喜欢

热点阅读