1. Configuration file
# Project created by QtCreator 2023-09-22T10:34:23 # #------------------------------------------------ QT + = core gui greaterThan(QT_MAJOR_VERSION, 4): QT + = widgets TARGET = project TEMPLATE = app SOURCES + = main.cpp\ widget.cpp HEADERS + = widget.h FORMS + = widget.ui INCLUDEPATH + = D:/opencv/opencv3.4-qt-intall/install/include INCLUDEPATH + = D:/opencv/opencv3.4-qt-intall/install/include/opencv INCLUDEPATH + = D:/opencv/opencv3.4-qt-intall/install/include/opencv2 LIBS + = D:/opencv/opencv3.4-qt-intall/install/x86/mingw/lib/libopencv_*.a
2.Header file
#ifndef WIDGET_H #define WIDGET_H #include <QWidget> #include <opencv2/opencv.hpp> #include <iostream> #include <math.h> #include<opencv2/face.hpp> #include <vector> #include <map> #include <QMessageBox> #include <QDebug> #include <QFile> #include <QTextStream> #include <QDateTime> #include <QTimerEvent> using namespace cv; using namespace cv::face; using namespace std; namespace Ui { classWidget; } class Widget : public QWidget { Q_OBJECT public: explicit Widget(QWidget *parent = 0); ~Widget(); private slots: void on_openCameraBtn_clicked(); void on_closeCameraBtn_clicked(); void on_studyBtn_clicked(); private: Ui::Widget *ui; /******************Function module 1: Camera acquisition and display****************/ VideoCapture v; //Video stream object Mat src; //Get the original camera image Mat rgb; //Storage rgb image Mat gray; //Grayscale image Mat dst; //Equalization graph CascadeClassifier c; //cascade classifier vector<Rect> faces; //Face rectangular frame container int camera_id; //Camera timer void timerEvent(QTimerEvent *e); //Rewrite the timer event processing function /******************Function module 2: Face input operation************************/ int study_id; //Timer for face entry Ptr<FaceRecognizer> recognizer; //Pointer of face recognizer vector<Mat> studyFaces; //array for face learning vector<int> studyLabs; //Face label array int flag; //Mark whether faces are being recorded int count; //Record the number of learning times /******************Function module 3: Face detection**********************/ int check_id; //Timer for face detection }; #endif // WIDGET_H
3. Source files
#include "widget.h" #include "ui_widget.h" Widget::Widget(QWidget *parent): QWidget(parent), ui(new Ui::Widget) { ui->setupUi(this); ui->loginBtn->setEnabled(false); //Set the login button to an unavailable state ui->closeCameraBtn->setEnabled(false); //Close camera button is disabled //Load the face classification model to the cascade classifier if(!c.load("D:\opencv\resources\haarcascade_frontalface_alt2.xml")) { QMessageBox::information(this,"failed","face classification model download failed"); return; } //Create a face recognizer object QFile file("D:\opencv\resources\myface.xml"); if(file.exists()) { //Indicates that the face recognition model exists and can be downloaded directly. recognizer = LBPHFaceRecognizer::load<LBPHFaceRecognizer>("D:\opencv\\resources\myface.xml"); }else { //The face model does not exist, you need to create one recognizer = LBPHFaceRecognizer::create(); } //When the system starts, the timer for face detection must be started. check_id = this->startTimer(2000); //Check every 2 seconds flag = 0; //Indicates that it is in the detection process at the beginning recognizer->setThreshold(70); //Set the credibility. When the detection credibility is lower than 100, the recognition is successful. } Widget::~Widget() { delete ui; } //Open the slot function corresponding to the camera button void Widget::on_openCameraBtn_clicked() { //Open the camera if(!v.open(0)) { QMessageBox::information(this,"Failed","Camera opening failed"); return; } //Start the timer and display the content from the camera to the lab of the UI interface every 20 milliseconds. camera_id = this->startTimer(20); //Set the button to be disabled ui->openCameraBtn->setEnabled(false); ui->closeCameraBtn->setEnabled(true); } //Close the slot function corresponding to the camera button void Widget::on_closeCameraBtn_clicked() { //Close timer this->killTimer(camera_id); //Set the start button to available state ui->openCameraBtn->setEnabled(true); ui->closeCameraBtn->setEnabled(false); ui->faceLab->clear(); //Close the camera v.release(); } //Rewritten timer event processing function void Widget::timerEvent(QTimerEvent *e) { //Determine which timer is in place if(e->timerId() == camera_id) { //1. Read an image from the camera v.read(src); //2. Flip flip(src,src, 1); //3. Reset the size cv::resize(src,src,Size(300,300)); //4. Convert to rgb image cvtColor(src,rgb,CV_BGR2RGB); //5. Grayscale processing cvtColor(rgb, gray, CV_BGR2GRAY); //6. Equalization processing equalizeHist(gray,dst); //7. Use the cascade classifier to find the face rectangular frame c.detectMultiScale(dst, faces); //8. Draw the rectangular frame of the face onto the rgb image for(quint32 i=0; i<faces.size(); i + + ) { rectangle(rgb, faces[i], Scalar(255,0,0), 2); } //9. Construct an image that QT can recognize by using the Mat type rgb image. QImage img(rgb.data, rgb.cols, rgb.rows, rgb.cols*rgb.channels(), QImage::Format_RGB888); //10. Convert the qimage image into a qpixmap image and display it to the UI interface ui->faceLab->setPixmap(QPixmap::fromImage(img)); } //Determine whether the timer for face entry is in place if(e->timerId() == study_id) { qDebug()<<"Entering, please wait..."; //Define the container to store the face area framed by the rectangular frame in the camera Mat face = src(faces[0]); //Resize the face cv::resize(face,face,Size(100,100)); //Grayscale processing cvtColor(face,face,CV_BGR2GRAY); //Equalization processing equalizeHist(face,face); //Put the processed face image into the learning container studyFaces.push_back(face); studyLabs.push_back(1); count + + ; if(count==60) //Judge whether learning has been completed { //Update face recognition model //Function prototype: virtual void update(InputArrayOfArrays src, InputArray labels); //Function: Convert the given image model to a data model //Parameter 1: image array //Parameter 2: label array recognizer->update(studyFaces,studyLabs); //Save the face data model to a local disk file recognizer->save("D:\opencv\resources\myface.xml"); //Follow-up operations this->killTimer(study_id); //Close the timer ui->studyBtn->setEnabled(true); //Set the button to the available state studyFaces.clear(); //Clear the container studyLabs.clear(); flag = 0; //Set flag to 0, indicating that you can continue to monitor faces count = 0; QMessageBox::information(this,"successful", "entry successful"); } } //Determine whether the face detection timer is in place if(e->timerId() == check_id) { //Determine whether detection can be performed if(flag==0) { if(faces.empty() || recognizer.empty())return; qDebug()<<"Looking for faces"; QFile file("D:\opencv\resources\myface.xml"); //The face model exists if(file.exists()) { //1. Get the face area in the camera Mat face = src(faces[0]); //2. Reset the size cv::resize(face,face,Size(100,100)); //3. Grayscale processing cvtColor(face,face, CV_BGR2GRAY); //4. Equalization processing equalizeHist(face,face); //5. Prepare the variables to accept the predicted results int lab = -1; doubleconf = 0.0; //6. Face prediction recognizer->predict(face, lab, conf); qDebug()<<"lab: "<<lab<<" conf: "<<conf; //7. Determine the predicted results if(lab != -1) { //Prediction successful, given button permissions ui->loginBtn->setEnabled(true); } } } } } //Slot function corresponding to the face input button void Widget::on_studyBtn_clicked() { qDebug()<<"Start typing...."; study_id = this->startTimer(50); //Study every 50 milliseconds count =0; //Clear counter ui->studyBtn->setEnabled(false); //Button cannot be used flag = 1; //Indicates that logging is in progress }
4. Main function
#include "widget.h" #include <QApplication> int main(int argc, char *argv[]) { QApplication a(argc, argv); Widget w; w.show(); return a.exec(); }
The knowledge points of the article match the official knowledge archive, and you can further learn relevant knowledge. Python entry skill treeArtificial intelligenceMachine learning toolkit Scikit-learn357849 people are learning the system