canvas人脸识别
2020-04-03 本文已影响0人
即将牛逼的蛋蛋
前段时间学习了canvas的基础用法,写了一个时钟,滤镜,写滤镜的时候就在考虑,可不可以通过canvas播放视频,这样别人无法获取到视频源,于是写了播放视频,后来通过调用摄像头,写入到并且写入到canvas,加滤镜。既然可以加滤镜读取到每一个像素点,那么是否可以加上人脸识别呢。
本文的重点来了,人脸识别。使用了face-api.js以及tracking.js。重点介绍face-api.js
上代码
代码里重要的注释都有,怎么把摄像头输出到canvas,这块不是重点,下面一文会介绍摄像头视频实时检测
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>精确识别,速度慢</title>
<style>
video {
width: 500px;
margin: auto;
background-color: aquamarine;
display: block;
}
.imgcontainer {
display: inline-block;
position: relative;
}
#rect {}
.rectkuangkuang {
position: absolute;
border: 1px solid lightblue;
width: 0;
height: 0;
}
</style>
<script src="https://cdn.bootcss.com/jquery/3.4.1/jquery.min.js"></script>
<script src="https://cdn.bootcss.com/tracking.js/1.1.3/tracking-min.js"></script>
<script src="./face-pi.js"></script>
</head>
<body>
<video id="video"></video>
<canvas width="500" height="500" id="canvas"></canvas>
<canvas width="500" height="500" id="canvas1"></canvas>
<div class="imgcontainer" style="display: none;">
<img style="width:500px;height:500px" id="image" src="" />
<div id="rect" class="rectkuangkuang"></div>
</div>
<button class="photo">拍照</button>
<button class="identify">识别</button>
<img src="./timg111.jpg" alt="" id="testimg" style="width: 500px;display: none;">
<script>
$(function () {
var video = document.getElementById('video');
const canvas = document.getElementById('canvas');
const canvas1 = document.getElementById('canvas1');
const img = document.getElementById('image');
const rectKuang = document.getElementById('rect');
const testimg = document.getElementById('testimg')
const context = canvas.getContext('2d');
const context1 = canvas1.getContext('2d');
if (navigator.mediaDevices.getUserMedia) {
//最新的标准API
navigator.mediaDevices.getUserMedia({ video: { width: 1000, height: 1000 } }).then(success).catch(error);
} else if (navigator.webkitGetUserMedia) {
//webkit核心浏览器
navigator.webkitGetUserMedia({ video: { width: 1000, height: 1000 } }, success, error)
} else if (navigator.mozGetUserMedia) {
//firfox浏览器
navigator.mozGetUserMedia({ video: { width: 1000, height: 1000 } }, success, error);
} else if (navigator.getUserMedia) {
//旧版API
navigator.getUserMedia({ video: { width: 1000, height: 1000 } }, success, error);
}
function success(stream) {
//兼容webkit核心浏览器
// let CompatibleURL = window.URL || window.webkitURL;
//将视频流设置为video元素的源
// console.log(stream);
//video.src = CompatibleURL.createObjectURL(stream);
video.srcObject = stream;
video.play();
timer = setInterval(function () {
context.drawImage(video, 0, (400 - 288) / 2, video.offsetWidth, video.offsetHeight);//绘制视频
}, 0);
}
function error(error) {
console.log(`访问用户媒体设备失败${error.name}, ${error.message}`);
}
// 拍照
$('.photo').click(function () {
// context1.drawImage(testimg, 0, 0, testimg.offsetWidth, testimg.offsetHeight)
context1.drawImage(video, 0, (400 - 288) / 2, video.offsetWidth, video.offsetHeight)
var dateUrl = canvas.toDataURL('image/png');
img.src = dateUrl;
})
async function run() {
// face加载模块是异步的,所以使用await,全部调用完毕
// 使用mtcnn检测人脸,速度相对来说较快
await faceapi.loadMtcnnModel('./weights')
console.log(11111)
// 加载面部识别的标记
await faceapi.loadFaceLandmarkModel('./weights')
console.log(22222)
// 加载面部识别模块
await faceapi.loadFaceRecognitionModel('./weights')
console.log(33333)
// 加载小脸检测 即远距离检测
// await faceapi.loadTinyFaceDetectorModel('./weights')
// console.log(4444)
// 识别面部的算法模块
await faceapi.loadSsdMobilenetv1Model('./weights')
console.log(5555)
// 识别表情模块
// await faceapi.loadFaceExpressionModel('./weights')
// console.log(6666)
}
run();
// 设置自动识别图片大小的宽高
// const displaySize = { width: testimg.width, height: testimg.height }
const displaySize = { width: 500, height: (500) }
async function recognize() {
// 获取轮廓图和面部表情
const detectionsWithExpressions = await faceapi
// 检测照片中所有的面部
.detectAllFaces(img)
// 获取面部的识别标记
.withFaceLandmarks()
// 自动识别面部表情
// .withFaceExpressions()
// 自动识别大小
const resizedResults = faceapi.resizeResults(detectionsWithExpressions, displaySize)
// 画轮廓
faceapi.draw.drawDetections(canvas1, resizedResults)
// 画出面部标记点 68个
faceapi.draw.drawFaceLandmarks(canvas1, resizedResults)
// const minProbability = 0.05
// 画表情
// faceapi.draw.drawFaceExpressions(canvas1, resizedResults, minProbability)
}
$('.identify').click(function () {
recognize();
})
})
</script>
</body>
</html>