Face recognition H5 page (open the specified camera navigator, draw the face frame tracking.js)

Reference code 1

Import two js files

Front-end face recognition framework: https://trackingjs.com
Download the zip package to get face.min.js and tracking-min.js inside
After decompression, look carefully

<script src="./js/tracking-min.js"></script>
<script src="./js/face-min.js"></script>

 <style>
#video_bind,
#canvas_bind {
width: 375rem;
height: 486rem;
object-fit: cover;
}


#canvas_bind {
position: absolute;
top: 0;
z-index: 2;
/* z-index: -1; */
}

.tip-box {
margin-top: 32rem;
font-size: 24rem;
}
</style>
<div>
<video width="375" height="486" id="video_bind" autoplay playsinline webkit-playsinline="true">

</video>
<div class="tip-box text-center"></div>
<canvas id="canvas_bind"></canvas>
</div>

<script src="./js/tracking-min.js"></script>
<script src="./js/face-min.js"></script>
<script type="text/javascript">
var tipFlag = false // Whether to detect
var faceflag = false // whether to take pictures
var informationTitle = document.querySelector(".tip-box") //Face tips
// Get video and canvas instances
var facevideo = document.getElementById('video_bind');
var facecanvas = document.getElementById('canvas_bind');
facecanvas.width = facecanvas.clientWidth;
facecanvas.height = facecanvas.clientHeight;
\t\t
var videoWidth = videoHeight = 0
facevideo.addEventListener('canplay', function() {
videoWidth = this. videoWidth;
videoHeight = this. videoHeight;
});

var facecontext = facecanvas. getContext('2d');
var tracker = new tracking. ObjectTracker('face');
// Every time the pop-up box is opened, the photos not taken by the canvas are cleared first
facecontext.clearRect(0, 0, facecanvas.width, facecanvas.height);

if (navigator. mediaDevices === undefined) {
navigator. mediaDevices = {}
}
if (navigator. mediaDevices. getUserMedia === undefined) {
navigator.mediaDevices.getUserMedia = function(constraints) {
var getUserMedia = navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
if (!getUserMedia) {
return Promise.reject(new Error("getUserMedia is not implemented in this browser"));
}
return new Promise(function(resolve, reject) {
getUserMedia. call(navigator, constraints, resolve, reject);
})
}
}

navigator.mediaDevices.getUserMedia({
audio: false,
//Open camera Method 1:
video: {
facingMode: "environment"
}

//Open camera method 2:
// video: {
      // deviceId: "77604ab12bb8b7cf337c4069da4e701d3786adb8a4115436d0aef1792175c289"}

}).then(function(stream) {
// Use the package that monitors the face
tracker.setInitialScale(4);
tracker.setStepSize(2);
tracker.setEdgesDensity(0.1);

//Open the camera method 1 (there is a problem here, he will only open the default camera on the non-mobile terminal)
var tra = tracking. track('#video_bind', tracker, {
camera: true
});
\t\t\t
//Open the camera method 2: (the above video needs to be changed, where the parameters come from, see code reference 2)
// facevideo.srcObject = stream;
       // facevideo.play();
      // var tra = tracking.track('#video_bind', tracker);



var timer = null;
// Create a listener that fires every frame
tracker.on('track', function(event) {
if (!tipFlag) {
facecontext.clearRect(0, 0, facecanvas.width, facecanvas.height);
if (event. data. length === 0) {
//no face detected
if (!faceflag & amp; & amp; !timer) {
timer = setTimeout(() => {
informationTitle.innerHTML = 'No face detected'
}, 500)
}
} else if (event.data.length === 1) { // The length represents how many faces are detected
window.clearTimeout(timer);
timer = null;
informationTitle.innerHTML = 'Please put your face in the center of the screen';
// detected a face
if (!tipFlag) {
// Draw a rectangle for the detected face
event.data.forEach(function(rect) {
facecontext.strokeStyle = '#a64ceb';
facecontext.strokeRect(rect.x, rect.y, rect.width, rect.height);
});
let rect = event. data[0];
//Determine whether the face is in the middle of the screen
if (!faceflag & amp; & amp; rect.x > facevideo.clientWidth * 0.3 & amp; & amp; rect.x < facevideo
.clientWidth * 0.7) { // Detect a face and take a photo, delay 0.5 seconds
informationTitle.innerHTML = 'Recognition, please do not move~';
faceflag = true;
tipFlag = true;
setTimeout(() => {
tackPhoto() // take a photo
}, 500);
}

}
} else {
// multiple faces detected
if (!faceflag) {
informationTitle.innerHTML = 'Only one person can perform face recognition! '
}
}
}
});

function tackPhoto() {
// Why call getObjectFitSize, because the size and area of the picture captured by the camera and the picture drawn may be inconsistent
// So you need to put the face in the middle of the screen, draw the middle part of the picture, if you don't need to call the second method directly
const {
sx,
sy,
width,
sheight,
x,
y,
width,
height
} = getObjectFitSize('cover', facevideo.clientWidth, facevideo.clientHeight, videoWidth, videoHeight)
facecontext.drawImage(facevideo, sx, sy, width, sheight, x, y, width, height);
// second way
// facecontext.drawImage(facevideo, 0, 0, facevideo.clientWidth, facevideo.clientHeight);
var snapData = facecanvas.toDataURL('image/png');
var imgSrc = "data:image/png;" + snapData;
// document.querySelector("img").src = imgSrc;

sessionStorage.setItem("faceImage", imgSrc);
// history.go(-1);
history.back()
facevideo.srcObject.getTracks().forEach(track => track.stop());
// cancel listening
tra. stop();
}

}).catch(function(err) {
informationTitle.innerHTML = 'Failed to open camera'
})

/**
* Calculate image cropping or placement
* @param {*} type contain, cover is only compatible with these two modes for the time being
* @param {*} containerWidth container width
* @param {*} containerHeight container height
* @param {*} imgWidth image width
* @param {*} imgHeight image height
* @return {*} All input parameters of canvas drawImage
*/
function getObjectFitSize(
type = "cover",
containerWidth,
containerHeight,
imgWidth,
imgHeight
) {
let radio = 1, // ratio of container to image
sx = 0, // The x coordinate position to start cutting.
sy = 0, // The y-coordinate position where clipping starts.
swidth = imgWidth, // The width of the cropped image.
sheight = imgHeight, // The height of the cropped image.
x = 0, // The x-coordinate position to place the image on the canvas.
y = 0, // The y coordinate position of placing the image on the canvas.
width = containerWidth, // The width of the image to use (stretch or shrink the image).
height = containerHeight; // The height of the image to use (stretch or shrink the image).
let cWHRatio = containerWidth / containerHeight;
let iWHRatio = imgWidth / imgHeight;
if (type === "cover") {
// cover mode, needs to be cropped
if (iWHRatio >= cWHRatio) {
// Horizontal image, match height first, crop width
radio = containerHeight / imgHeight;
sx = (imgWidth - containerWidth / radio) / 2;
width = containerWidth / radio;
height = imgHeight;
} else {
// vertical image, match width first, crop height
radio = containerWidth / imgWidth;
sy = (imgHeight - containerHeight / radio) / 2;
width = imgWidth;
height = containerHeight / radio;
}
} else if (type === "contain") {
if (iWHRatio >= cWHRatio) {
// Horizontal image, match width first, adapt height
radio = containerWidth / imgWidth;
y = (containerHeight - imgHeight * radio) / 2;
height = imgHeight * radio;
} else {
// vertical image, match height first, adaptive width
radio = containerHeight / imgHeight;
x = (containerWidth - imgWidth * radio) / 2;
width = imgWidth * radio;
}
}
return {
sx,
sy,
width,
sheight,
x,
y,
width,
height,
};
}
</script>

Reference code 2:

<html>
<head>
<meta charset="utf-8">
<title>Web acquisition setting and switching camera</title>
<script src="//i2.wp.com/cdn.staticfile.org/jquery/1.10.2/jquery.min.js"></script>
</head>
<body>
<div id="container-video">
<video id="video" width="500" height="300" autoplay></video>
\t\t\t<br>
<select id="video-all"></select>
\t\t\t<br>
<button id="snap">Camera camera function</button>
\t\t\t<br>
<canvas id="canvas" width="500" height="300"></canvas>
</div>
<script type="text/javascript">
window.onload = () => {
cameraSwitchApply();
}
/**
* [Camera default use] or [Camera switching use]
*/
function cameraSwitchApply(){
var canvas = document. getElementById("canvas");
var context = canvas. getContext("2d");
var Mvideo = document. getElementById("video");
var videoAll = document. getElementById("video-all");
\t\t\t\t
// Get the name and device id of the camera
//Set camera options in select
let VideoAllInfo=[];
navigator.mediaDevices.enumerateDevices().then((devices) => {
if (devices) {
console.info(devices);
devices.forEach((value,index)=>{
/**
//kind: "videoinput"; refers to the camera input device.
//kind: "audiooutput"Microphone or notebook's own microphone input device.
//kind: "audioinput" Microphone or the laptop's own microphone input device.
* {
"deviceId":"d31e8fcdf03bd590df66d4cdef290e71431e3c8acc5e617948be13b6de35844a",
"kind":"videoinput",
"label":"Logitech HD Webcam C270 (046d:0825)",
"groupId":"7fb1407dbd9602f5e481567380a9fa9c360898f2de42be1abb0d201c75dea18f"
},
*/
if(value.kind=="videoinput"){
VideoAllInfo.push(value);
}
});
if(VideoAllInfo. length>0){
let videoAllItem="";
VideoAllInfo.forEach((value,index)=>{
let ItemDom=`<option value="${value.deviceId}">${value.label}</option>`
videoAllItem += ItemDom;
});
videoAll. innerHTML = videoAllItem;
}
}
});
\t\t\t\t
//Monitor the change of select: switch camera
document.getElementById("video-all").onchange = () => {
if (document.getElementById("video-all").children.length > 0) {
let selIndex = document.getElementById("video-all").selectedIndex; // //Get the index value of the currently selected option
let selectedValue = document.getElementById("video-all").options[selIndex].value; //Get the selected value
let selectedText = document.getElementById("video-all").options[selIndex].innerText; //Get the selected content
console.log(selectedValue,selectedText);
// switch camera
setCatchVideo(Mvideo, selectedValue);
};
}
//Set the default camera
setCatchVideo(Mvideo,"");
//Camera camera function
document.getElementById("snap").onclick = () => {
context.drawImage(Mvideo, 0, 0, 500, 300);
}
}
\t\t\t
/**
* Set camera and switch camera
* @param {Object} videoDom camera object
* @param {Object} videoID camera deviceId
*/
function setCatchVideo(videoDom, videoID){
let videoObj;
if(videoID==""){
//Set the default camera
videoObj = {
"video": true
}
}else{
// switch camera
videoObj = {
"video": { deviceId: videoID},
};
}
let errBack = function(error) {
console.log("Video capture error: ", error.code,error);
};
if (navigator.getUserMedia) { //under normal circumstances
navigator.getUserMedia(videoObj, (stream) => {
videoDom.srcObject = stream;
videoDom.play();
}, errBack);
} else if (navigator. webkitGetUserMedia) { // WebKit
navigator.webkitGetUserMedia(videoObj, (stream) => {
videoDom.src = window.webkitURL.createObjectURL(stream);
videoDom.play();
}, errBack);
} else if (navigator.mozGetUserMedia) { // Firefox
navigator.mozGetUserMedia(videoObj, (stream) => {
videoDom.src = window.URL.createObjectURL(stream);
videoDom.play();
}, errBack);
};
}
\t\t\t
</script>


</body>
</html>