1.OpenCV(EigenFace、FisherFace、LBPH)php
https://docs.opencv.org/master/db/d3a/facedetect_8cpp-example.htmlhtml
/*This program demonstrates usage of the Cascade classifier class*/ #include "opencv2/objdetect.hpp" #include "opencv2/highgui.hpp" #include "opencv2/imgproc.hpp" #include using namespace std; using namespace cv; static void help() { cout << "\nThis program demonstrates the use of cv::CascadeClassifier class to detect objects (Face + eyes). You can use Haar or LBP features.\n" "This classifier can recognize many kinds of rigid objects, once the appropriate classifier is trained.\n" "It's most known use is for faces.\n" "Usage:\n" "./facedetect [--cascade=<cascade_path> this is the primary trained classifier such as frontal face]\n" " [--nested-cascade[=nested_cascade_path this an optional secondary classifier such as eyes]]\n" " [--scale=]\n" " [--try-flip]\n" " [filename|camera_index]\n\n" "see facedetect.cmd for one call:\n" "./facedetect --cascade=\"../../data/haarcascades/haarcascade_frontalface_alt.xml\" --nested-cascade=\"../../data/haarcascades/haarcascade_eye_tree_eyeglasses.xml\" --scale=1.3\n\n" "During execution:\n\tHit any key to quit.\n" "\tUsing OpenCV version " << CV_VERSION << "\n" << endl; } void detectAndDraw( Mat& img, CascadeClassifier& cascade, CascadeClassifier& nestedCascade, double scale, bool tryflip ); string cascadeName; string nestedCascadeName; int main( int argc, const char** argv ) { VideoCapture capture; Mat frame, image; string inputName; bool tryflip; CascadeClassifier cascade, nestedCascade; double scale; cv::CommandLineParser parser(argc, argv, "{help h||}" "{cascade|../../data/haarcascades/haarcascade_frontalface_alt.xml|}" "{nested-cascade|../../data/haarcascades/haarcascade_eye_tree_eyeglasses.xml|}" "{scale|1|}{try-flip||}{@filename||}" ); if (parser.has("help")) { help(); return 0; } cascadeName = parser.get("cascade"); nestedCascadeName = parser.get("nested-cascade"); scale = parser.get("scale"); if (scale < 1) scale = 1; tryflip = parser.has("try-flip"); inputName = parser.get("@filename"); if (!parser.check()) { parser.printErrors(); return 0; } if ( !nestedCascade.load( nestedCascadeName ) ) cerr << "WARNING: Could not load classifier cascade for nested objects" << endl; if( !cascade.load( cascadeName ) ) { cerr << "ERROR: Could not load classifier cascade" << endl; help(); return -1; } if( inputName.empty() || (isdigit(inputName[0]) && inputName.size() == 1) ) { int camera = inputName.empty() ? 0 : inputName[0] - '0'; if(!capture.open(camera)) cout << "Capture from camera #" << camera << " didn't work" << endl; } else if( inputName.size() ) { image = imread( inputName, 1 ); if( image.empty() ) { if(!capture.open( inputName )) cout << "Could not read " << inputName << endl; } } else { image = imread( "../data/lena.jpg", 1 ); if(image.empty()) cout << "Couldn't read ../data/lena.jpg" << endl; } if( capture.isOpened() ) { cout << "Video capturing has been started ..." << endl; for(;;) { capture >> frame; if( frame.empty() ) break; Mat frame1 = frame.clone(); detectAndDraw( frame1, cascade, nestedCascade, scale, tryflip ); char c = (char)waitKey(10); if( c == 27 || c == 'q' || c == 'Q' ) break; } } else { cout << "Detecting face(s) in " << inputName << endl; if( !image.empty() ) { detectAndDraw( image, cascade, nestedCascade, scale, tryflip ); waitKey(0); } else if( !inputName.empty() ) { /* assume it is a text file containing the list of the image filenames to be processed - one per line */ FILE* f = fopen( inputName.c_str(), "rt" ); if( f ) { char buf[1000+1]; while( fgets( buf, 1000, f ) ) { int len = (int)strlen(buf); while( len > 0 && isspace(buf[len-1]) ) len--; buf[len] = '\0'; cout << "file " << buf << endl; image = imread( buf, 1 ); if( !image.empty() ) { detectAndDraw( image, cascade, nestedCascade, scale, tryflip ); char c = (char)waitKey(0); if( c == 27 || c == 'q' || c == 'Q' ) break; } else { cerr << "Aw snap, couldn't read image " << buf << endl; } } fclose(f); } } } return 0; } void detectAndDraw( Mat& img, CascadeClassifier& cascade, CascadeClassifier& nestedCascade, double scale, bool tryflip ) { double t = 0; vector faces, faces2; const static Scalar colors[] = { Scalar(255,0,0), Scalar(255,128,0), Scalar(255,255,0), Scalar(0,255,0), Scalar(0,128,255), Scalar(0,255,255), Scalar(0,0,255), Scalar(255,0,255) }; Mat gray, smallImg; cvtColor( img, gray, COLOR_BGR2GRAY ); double fx = 1 / scale; resize( gray, smallImg, Size(), fx, fx, INTER_LINEAR_EXACT ); equalizeHist( smallImg, smallImg ); t = (double)getTickCount(); cascade.detectMultiScale( smallImg, faces, 1.1, 2, 0 //|CASCADE_FIND_BIGGEST_OBJECT //|CASCADE_DO_ROUGH_SEARCH |CASCADE_SCALE_IMAGE, Size(30, 30) ); if( tryflip ) { flip(smallImg, smallImg, 1); cascade.detectMultiScale( smallImg, faces2, 1.1, 2, 0 //|CASCADE_FIND_BIGGEST_OBJECT //|CASCADE_DO_ROUGH_SEARCH |CASCADE_SCALE_IMAGE, Size(30, 30) ); for( vector::const_iterator r = faces2.begin(); r != faces2.end(); ++r ) { faces.push_back(Rect(smallImg.cols - r->x - r->width, r->y, r->width, r->height)); } } t = (double)getTickCount() - t; printf( "detection time = %g ms\n", t*1000/getTickFrequency()); for ( size_t i = 0; i < faces.size(); i++ ) { Rect r = faces[i]; Mat smallImgROI; vector nestedObjects; Point center; Scalar color = colors[i%8]; int radius; double aspect_ratio = (double)r.width/r.height; if( 0.75 < aspect_ratio && aspect_ratio < 1.3 ) { center.x = cvRound((r.x + r.width*0.5)*scale); center.y = cvRound((r.y + r.height*0.5)*scale); radius = cvRound((r.width + r.height)*0.25*scale); circle( img, center, radius, color, 3, 8, 0 ); } else rectangle( img, Point(cvRound(r.x*scale), cvRound(r.y*scale)), Point(cvRound((r.x + r.width-1)*scale), cvRound((r.y + r.height-1)*scale)), color, 3, 8, 0); if( nestedCascade.empty() ) continue; smallImgROI = smallImg( r ); nestedCascade.detectMultiScale( smallImgROI, nestedObjects, 1.1, 2, 0 //|CASCADE_FIND_BIGGEST_OBJECT //|CASCADE_DO_ROUGH_SEARCH //|CASCADE_DO_CANNY_PRUNING |CASCADE_SCALE_IMAGE, Size(30, 30) ); for ( size_t j = 0; j < nestedObjects.size(); j++ ) { Rect nr = nestedObjects[j]; center.x = cvRound((r.x + nr.x + nr.width*0.5)*scale); center.y = cvRound((r.y + nr.y + nr.height*0.5)*scale); radius = cvRound((nr.width + nr.height)*0.25*scale); circle( img, center, radius, color, 3, 8, 0 ); } } imshow( "result", img ); }
https://docs.opencv.org/master/db/d7c/group__face.htmlpython
/*Eigenfaces in OpenCV*/ /* * Copyright (c) 2011. Philipp Wagner <bytefish[at]gmx[dot]de>. * Released to public domain under terms of the BSD Simplified license. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the organization nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * See <http://www.opensource.org/licenses/bsd-license> */ #include "opencv2/core.hpp" #include "opencv2/face.hpp" #include "opencv2/highgui.hpp" #include "opencv2/imgproc.hpp" #include #include #include using namespace cv; using namespace cv::face; using namespace std; static Mat norm_0_255(InputArray _src) { Mat src = _src.getMat(); // Create and return normalized image: Mat dst; switch(src.channels()) { case 1: cv::normalize(_src, dst, 0, 255, NORM_MINMAX, CV_8UC1); break; case 3: cv::normalize(_src, dst, 0, 255, NORM_MINMAX, CV_8UC3); break; default: src.copyTo(dst); break; } return dst; } static void read_csv(const string& filename, vector& images, vector& labels, char separator = ';') { std::ifstream file(filename.c_str(), ifstream::in); if (!file) { string error_message = "No valid input file was given, please check the given filename."; CV_Error(Error::StsBadArg, error_message); } string line, path, classlabel; while (getline(file, line)) { stringstream liness(line); getline(liness, path, separator); getline(liness, classlabel); if(!path.empty() && !classlabel.empty()) { images.push_back(imread(path, 0)); labels.push_back(atoi(classlabel.c_str())); } } } int main(int argc, const char *argv[]) { // Check for valid command line arguments, print usage // if no arguments were given. if (argc < 2) { cout << "usage: " << argv[0] << " <output_folder> " << endl; exit(1); } string output_folder = "."; if (argc == 3) { output_folder = string(argv[2]); } // Get the path to your CSV. string fn_csv = string(argv[1]); // These vectors hold the images and corresponding labels. vector images; vector labels; // Read in the data. This can fail if no valid // input filename is given. try { read_csv(fn_csv, images, labels); } catch (cv::Exception& e) { cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl; // nothing more we can do exit(1); } // Quit if there are not enough images for this demo. if(images.size() <= 1) { string error_message = "This demo needs at least 2 images to work. Please add more images to your data set!"; CV_Error(Error::StsError, error_message); } // Get the height from the first image. We'll need this // later in code to reshape the images to their original // size: int height = images[0].rows; // The following lines simply get the last images from // your dataset and remove it from the vector. This is // done, so that the training data (which we learn the // cv::BasicFaceRecognizer on) and the test data we test // the model with, do not overlap. Mat testSample = images[images.size() - 1]; int testLabel = labels[labels.size() - 1]; images.pop_back(); labels.pop_back(); // The following lines create an Eigenfaces model for // face recognition and train it with the images and // labels read from the given CSV file. // This here is a full PCA, if you just want to keep // 10 principal components (read Eigenfaces), then call // the factory method like this: // // EigenFaceRecognizer::create(10); // // If you want to create a FaceRecognizer with a // confidence threshold (e.g. 123.0), call it with: // // EigenFaceRecognizer::create(10, 123.0); // // If you want to use _all_ Eigenfaces and have a threshold, // then call the method like this: // // EigenFaceRecognizer::create(0, 123.0); // Ptr model = EigenFaceRecognizer::create(); model->train(images, labels); // The following line predicts the label of a given // test image: int predictedLabel = model->predict(testSample); // // To get the confidence of a prediction call the model with: // // int predictedLabel = -1; // double confidence = 0.0; // model->predict(testSample, predictedLabel, confidence); // string result_message = format("Predicted class = %d / Actual class = %d.", predictedLabel, testLabel); cout << result_message << endl; // Here is how to get the eigenvalues of this Eigenfaces model: Mat eigenvalues = model->getEigenValues(); // And we can do the same to display the Eigenvectors (read Eigenfaces): Mat W = model->getEigenVectors(); // Get the sample mean from the training data Mat mean = model->getMean(); // Display or save: if(argc == 2) { imshow("mean", norm_0_255(mean.reshape(1, images[0].rows))); } else { imwrite(format("%s/mean.png", output_folder.c_str()), norm_0_255(mean.reshape(1, images[0].rows))); } // Display or save the Eigenfaces: for (int i = 0; i < min(10, W.cols); i++) { string msg = format("Eigenvalue #%d = %.5f", i, eigenvalues.at(i)); cout << msg << endl; // get eigenvector #i Mat ev = W.col(i).clone(); // Reshape to original size & normalize to [0...255] for imshow. Mat grayscale = norm_0_255(ev.reshape(1, height)); // Show the image & apply a Jet colormap for better sensing. Mat cgrayscale; applyColorMap(grayscale, cgrayscale, COLORMAP_JET); // Display or save: if(argc == 2) { imshow(format("eigenface_%d", i), cgrayscale); } else { imwrite(format("%s/eigenface_%d.png", output_folder.c_str(), i), norm_0_255(cgrayscale)); } } // Display or save the image reconstruction at some predefined steps: for(int num_components = min(W.cols, 10); num_components < min(W.cols, 300); num_components+=15) { // slice the eigenvectors from the model Mat evs = Mat(W, Range::all(), Range(0, num_components)); Mat projection = LDA::subspaceProject(evs, mean, images[0].reshape(1,1)); Mat reconstruction = LDA::subspaceReconstruct(evs, mean, projection); // Normalize the result: reconstruction = norm_0_255(reconstruction.reshape(1, images[0].rows)); // Display or save: if(argc == 2) { imshow(format("eigenface_reconstruction_%d", num_components), reconstruction); } else { imwrite(format("%s/eigenface_reconstruction_%d.png", output_folder.c_str(), num_components), reconstruction); } } // Display if we are not writing to an output folder: if(argc == 2) { waitKey(0); } return 0; }
/*Fisherfaces in OpenCV*/ /* * Copyright (c) 2011. Philipp Wagner <bytefish[at]gmx[dot]de>. * Released to public domain under terms of the BSD Simplified license. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the organization nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * See <http://www.opensource.org/licenses/bsd-license> */ #include "opencv2/core.hpp" #include "opencv2/face.hpp" #include "opencv2/highgui.hpp" #include "opencv2/imgproc.hpp" #include #include #include using namespace cv; using namespace cv::face; using namespace std; static Mat norm_0_255(InputArray _src) { Mat src = _src.getMat(); // Create and return normalized image: Mat dst; switch(src.channels()) { case 1: cv::normalize(_src, dst, 0, 255, NORM_MINMAX, CV_8UC1); break; case 3: cv::normalize(_src, dst, 0, 255, NORM_MINMAX, CV_8UC3); break; default: src.copyTo(dst); break; } return dst; } static void read_csv(const string& filename, vector& images, vector& labels, char separator = ';') { std::ifstream file(filename.c_str(), ifstream::in); if (!file) { string error_message = "No valid input file was given, please check the given filename."; CV_Error(Error::StsBadArg, error_message); } string line, path, classlabel; while (getline(file, line)) { stringstream liness(line); getline(liness, path, separator); getline(liness, classlabel); if(!path.empty() && !classlabel.empty()) { images.push_back(imread(path, 0)); labels.push_back(atoi(classlabel.c_str())); } } } int main(int argc, const char *argv[]) { // Check for valid command line arguments, print usage // if no arguments were given. if (argc < 2) { cout << "usage: " << argv[0] << " <output_folder> " << endl; exit(1); } string output_folder = "."; if (argc == 3) { output_folder = string(argv[2]); } // Get the path to your CSV. string fn_csv = string(argv[1]); // These vectors hold the images and corresponding labels. vector images; vector labels; // Read in the data. This can fail if no valid // input filename is given. try { read_csv(fn_csv, images, labels); } catch (cv::Exception& e) { cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl; // nothing more we can do exit(1); } // Quit if there are not enough images for this demo. if(images.size() <= 1) { string error_message = "This demo needs at least 2 images to work. Please add more images to your data set!"; CV_Error(Error::StsError, error_message); } // Get the height from the first image. We'll need this // later in code to reshape the images to their original // size: int height = images[0].rows; // The following lines simply get the last images from // your dataset and remove it from the vector. This is // done, so that the training data (which we learn the // cv::BasicFaceRecognizer on) and the test data we test // the model with, do not overlap. Mat testSample = images[images.size() - 1]; int testLabel = labels[labels.size() - 1]; images.pop_back(); labels.pop_back(); // The following lines create an Fisherfaces model for // face recognition and train it with the images and // labels read from the given CSV file. // If you just want to keep 10 Fisherfaces, then call // the factory method like this: // // FisherFaceRecognizer::create(10); // // However it is not useful to discard Fisherfaces! Please // always try to use _all_ available Fisherfaces for // classification. // // If you want to create a FaceRecognizer with a // confidence threshold (e.g. 123.0) and use _all_ // Fisherfaces, then call it with: // // FisherFaceRecognizer::create(0, 123.0); // Ptr model = FisherFaceRecognizer::create(); model->train(images, labels); // The following line predicts the label of a given // test image: int predictedLabel = model->predict(testSample); // // To get the confidence of a prediction call the model with: // // int predictedLabel = -1; // double confidence = 0.0; // model->predict(testSample, predictedLabel, confidence); // string result_message = format("Predicted class = %d / Actual class = %d.", predictedLabel, testLabel); cout << result_message << endl; // Here is how to get the eigenvalues of this Eigenfaces model: Mat eigenvalues = model->getEigenValues(); // And we can do the same to display the Eigenvectors (read Eigenfaces): Mat W = model->getEigenVectors(); // Get the sample mean from the training data Mat mean = model->getMean(); // Display or save: if(argc == 2) { imshow("mean", norm_0_255(mean.reshape(1, images[0].rows))); } else { imwrite(format("%s/mean.png", output_folder.c_str()), norm_0_255(mean.reshape(1, images[0].rows))); } // Display or save the first, at most 16 Fisherfaces: for (int i = 0; i < min(16, W.cols); i++) { string msg = format("Eigenvalue #%d = %.5f", i, eigenvalues.at(i)); cout << msg << endl; // get eigenvector #i Mat ev = W.col(i).clone(); // Reshape to original size & normalize to [0...255] for imshow. Mat grayscale = norm_0_255(ev.reshape(1, height)); // Show the image & apply a Bone colormap for better sensing. Mat cgrayscale; applyColorMap(grayscale, cgrayscale, COLORMAP_BONE); // Display or save: if(argc == 2) { imshow(format("fisherface_%d", i), cgrayscale); } else { imwrite(format("%s/fisherface_%d.png", output_folder.c_str(), i), norm_0_255(cgrayscale)); } } // Display or save the image reconstruction at some predefined steps: for(int num_component = 0; num_component < min(16, W.cols); num_component++) { // Slice the Fisherface from the model: Mat ev = W.col(num_component); Mat projection = LDA::subspaceProject(ev, mean, images[0].reshape(1,1)); Mat reconstruction = LDA::subspaceReconstruct(ev, mean, projection); // Normalize the result: reconstruction = norm_0_255(reconstruction.reshape(1, images[0].rows)); // Display or save: if(argc == 2) { imshow(format("fisherface_reconstruction_%d", num_component), reconstruction); } else { imwrite(format("%s/fisherface_reconstruction_%d.png", output_folder.c_str(), num_component), reconstruction); } } // Display if we are not writing to an output folder: if(argc == 2) { waitKey(0); } return 0; }
/*Local Binary Patterns Histograms in OpenCV*/ /* * Copyright (c) 2011. Philipp Wagner <bytefish[at]gmx[dot]de>. * Released to public domain under terms of the BSD Simplified license. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the organization nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * See <http://www.opensource.org/licenses/bsd-license> */ #include "opencv2/core.hpp" #include "opencv2/face.hpp" #include "opencv2/highgui.hpp" #include #include #include using namespace cv; using namespace cv::face; using namespace std; static void read_csv(const string& filename, vector& images, vector& labels, char separator = ';') { std::ifstream file(filename.c_str(), ifstream::in); if (!file) { string error_message = "No valid input file was given, please check the given filename."; CV_Error(Error::StsBadArg, error_message); } string line, path, classlabel; while (getline(file, line)) { stringstream liness(line); getline(liness, path, separator); getline(liness, classlabel); if(!path.empty() && !classlabel.empty()) { images.push_back(imread(path, 0)); labels.push_back(atoi(classlabel.c_str())); } } } int main(int argc, const char *argv[]) { // Check for valid command line arguments, print usage // if no arguments were given. if (argc != 2) { cout << "usage: " << argv[0] << " " << endl; exit(1); } // Get the path to your CSV. string fn_csv = string(argv[1]); // These vectors hold the images and corresponding labels. vector images; vector labels; // Read in the data. This can fail if no valid // input filename is given. try { read_csv(fn_csv, images, labels); } catch (cv::Exception& e) { cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl; // nothing more we can do exit(1); } // Quit if there are not enough images for this demo. if(images.size() <= 1) { string error_message = "This demo needs at least 2 images to work. Please add more images to your data set!"; CV_Error(Error::StsError, error_message); } // The following lines simply get the last images from // your dataset and remove it from the vector. This is // done, so that the training data (which we learn the // cv::LBPHFaceRecognizer on) and the test data we test // the model with, do not overlap. Mat testSample = images[images.size() - 1]; int testLabel = labels[labels.size() - 1]; images.pop_back(); labels.pop_back(); // The following lines create an LBPH model for // face recognition and train it with the images and // labels read from the given CSV file. // // The LBPHFaceRecognizer uses Extended Local Binary Patterns // (it's probably configurable with other operators at a later // point), and has the following default values // // radius = 1 // neighbors = 8 // grid_x = 8 // grid_y = 8 // // So if you want a LBPH FaceRecognizer using a radius of // 2 and 16 neighbors, call the factory method with: // // cv::face::LBPHFaceRecognizer::create(2, 16); // // And if you want a threshold (e.g. 123.0) call it with its default values: // // cv::face::LBPHFaceRecognizer::create(1,8,8,8,123.0) // Ptr model = LBPHFaceRecognizer::create(); model->train(images, labels); // The following line predicts the label of a given // test image: int predictedLabel = model->predict(testSample); // // To get the confidence of a prediction call the model with: // // int predictedLabel = -1; // double confidence = 0.0; // model->predict(testSample, predictedLabel, confidence); // string result_message = format("Predicted class = %d / Actual class = %d.", predictedLabel, testLabel); cout << result_message << endl; // First we'll use it to set the threshold of the LBPHFaceRecognizer // to 0.0 without retraining the model. This can be useful if // you are evaluating the model: // model->setThreshold(0.0); // Now the threshold of this model is set to 0.0. A prediction // now returns -1, as it's impossible to have a distance below // it predictedLabel = model->predict(testSample); cout << "Predicted class = " << predictedLabel << endl; // Show some informations about the model, as there's no cool // Model data to display as in Eigenfaces/Fisherfaces. // Due to efficiency reasons the LBP images are not stored // within the model: cout << "Model Information:" << endl; string model_info = format("\tLBPH(radius=%i, neighbors=%i, grid_x=%i, grid_y=%i, threshold=%.2f)", model->getRadius(), model->getNeighbors(), model->getGridX(), model->getGridY(), model->getThreshold()); cout << model_info << endl; // We could get the histograms for example: vector histograms = model->getHistograms(); // But should I really visualize it? Probably the length is interesting: cout << "Size of the histograms: " << histograms[0].total() << endl; return 0; }
https://blog.csdn.net/zuidao3105/article/details/79346591git
在OpenCV中主要使用了两种特征(即两种方法)进行人脸检测,Haar特征和LBP特征。使用已经训练好的XML格式的分类器进行人脸检测。在OpenCV的安装目录下的sources文件夹里的data文件夹里能够看到下图所示的内容(opencv\sources\data\haarcascades ):
文件夹的名字“haarcascades”、“hogcascades”和“lbpcascades”分别表示经过“haar”、“hog”和“lbp”三种不一样的特征而训练出的分类器:"haar"特征主要用于人脸检测,“hog”特征主要用于行人检测,“lbp”特征主要用于人脸识别,“eye”特征主要用于眼睛的检测识别。 实现人脸检测主要依赖于detectMultiScale()函数,下面简单说一下函数参数的含义,先看函数原型:
CV_WRAP virtual void detectMultiScale( const Mat& image, CV_OUT vector& objects, double scaleFactor=1.1, int minNeighbors=3, int flags=0, Size minSize=Size(), Size maxSize=Size() );
各参数含义: const Mat& image: 须要被检测的图像(灰度图) vector& objects: 保存被检测出的人脸位置坐标序列 double scaleFactor: 每次图片缩放的比例 int minNeighbors: 每个人脸至少要检测到多少次才算是真的人脸 int flags: 决定是缩放分类器来检测,仍是缩放图像 Size(): 表示人脸的最大最小尺寸
/*检测图片中的人脸*/ #include #include <opencv2/opencv.hpp> #include <opencv2/calib3d/calib3d.hpp> using namespace std; using namespace cv; int main() { Mat image, image_gray; //定义两个Mat变量,用于存储每一帧的图像 image = imread("F://1.png"); imshow("原图", image); cvtColor(image, image_gray, CV_BGR2GRAY);//转为灰度图 equalizeHist(image_gray, image_gray);//直方图均衡化,增长对比度方便处理 CascadeClassifier eye_Classifier; //载入分类器 CascadeClassifier face_cascade; //载入分类器 //加载分类训练器,OpenCv官方文档提供的xml文档,能够直接调用 //xml文档路径 opencv\sources\data\haarcascades if (!eye_Classifier.load("F:\\haarcascade_eye.xml")) //须要将xml文档放在本身指定的路径下 { cout << "Load haarcascade_eye.xml failed!" << endl; return 0; } if (!face_cascade.load("F:\\haarcascade_frontalface_alt.xml")) { cout << "Load haarcascade_frontalface_alt failed!" << endl; return 0; } //vector 是个类模板 须要提供明确的模板实参 vector则是个肯定的类 模板的实例化 vector eyeRect; vector faceRect; //检测关于眼睛部位位置 eye_Classifier.detectMultiScale(image_gray, eyeRect, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, Size(30, 30)); for (size_t eyeIdx = 0; eyeIdx < eyeRect.size(); eyeIdx++) { rectangle(image, eyeRect[eyeIdx], Scalar(0, 0, 255)); //用矩形画出检测到的位置 } //检测关于脸部位置 face_cascade.detectMultiScale(image_gray, faceRect, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, Size(30, 30)); for (size_t i = 0; i < faceRect.size(); i++) { rectangle(image, faceRect[i], Scalar(0, 0, 255)); //用矩形画出检测到的位置 } imshow("人脸识别图", image); //显示当前帧 waitKey(0); } return 0;
/*检测摄像头中的人脸*/ #include #include <opencv2/opencv.hpp> #include <opencv2/calib3d/calib3d.hpp> using namespace std; using namespace cv; int main() { Mat image, image_gray; //定义两个Mat变量,用于存储每一帧的图像 VideoCapture capture(0); //从摄像头读入视频 while (1) //循环显示每一帧 { capture >> image; //读取当前帧 //image = imread("F://1.png"); //imshow("原图", image); cvtColor(image, image_gray, CV_BGR2GRAY);//转为灰度图 equalizeHist(image_gray, image_gray);//直方图均衡化,增长对比度方便处理 CascadeClassifier eye_Classifier; //载入分类器 CascadeClassifier face_cascade; //载入分类器 //加载分类训练器,OpenCv官方文档提供的xml文档,能够直接调用 //xml文档路径 opencv\sources\data\haarcascades if (!eye_Classifier.load("F:\\haarcascade_eye.xml")) //须要将xml文档放在本身指定的路径下 { cout << "Load haarcascade_eye.xml failed!" << endl; return 0; } if (!face_cascade.load("F:\\haarcascade_frontalface_alt.xml")) { cout << "Load haarcascade_frontalface_alt failed!" << endl; return 0; } //vector 是个类模板 须要提供明确的模板实参 vector则是个肯定的类 模板的实例化 vector eyeRect; vector faceRect; //检测关于眼睛部位位置 eye_Classifier.detectMultiScale(image_gray, eyeRect, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, Size(30, 30));//检测 for (size_t eyeIdx = 0; eyeIdx < eyeRect.size(); eyeIdx++) { rectangle(image, eyeRect[eyeIdx], Scalar(0, 0, 255)); //用矩形画出检测到的位置 } //检测关于脸部位置 face_cascade.detectMultiScale(image_gray, faceRect, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, Size(30, 30));//检测 for (size_t i = 0; i < faceRect.size(); i++) { rectangle(image, faceRect[i], Scalar(0, 0, 255)); //用矩形画出检测到的位置 } imshow("人脸识别图", image); //显示当前帧 char c = waitKey(30); //延时30ms,即每秒播放33帧图像 if (c == 27) break; } return 0; }
https://blog.csdn.net/sinat_26917383/article/details/69831495github
HAAR级联检测基础算法
https://blog.csdn.net/shadow_guo/article/details/44114421数据库
调整基于HAAR特征的AdaBoost级联分类器的物体识别的参数网络
2.Cloud APIapp
Baidu AI:https://ai.baidu.com/docs#/Face-Detect/top (face qualities/landmark72)dom
Face++:https://console.faceplusplus.com.cn/documents/4888373(HumanBody/Gesture)
MS:https://docs.microsoft.com/en-us/azure/cognitive-services/face/overview(Face detection/Face landmarks/Head pose/Face attributes)
3.Open Library
https://github.com/TadasBaltrusaitis/OpenFace
https://github.com/ageitgey/face_recognition(http://dlib.net)
import face_recognition known_image = face_recognition.load_image_file("biden.jpg") unknown_image = face_recognition.load_image_file("unknown.jpg") biden_encoding = face_recognition.face_encodings(known_image)[0] unknown_encoding = face_recognition.face_encodings(unknown_image)[0] results = face_recognition.compare_faces([biden_encoding], unknown_encoding)
https://github.com/seetaface/SeetaFaceEngine
http://www.arcsoft.com.cn/ai/arcface.html
https://github.com/accord-net/framework
http://www.aforgenet.com/framework/
http://www.emgu.com/wiki/index.php/Main_Page
4.Face Recognition with DL
https://github.com/mbhybird/facenet
https://github.com/mbhybird/openface
Multi-task Cascaded Convolutional Networks(MTCNN)
http://www.javashuo.com/article/p-wtdquuzn-gm.html
在咱们的流水线中的第一步是人脸检测。很明显在咱们区分人脸以前须要在图片中将脸标记出来。
若是你有在最近十年里面用过相机的话,你可能已经见过正在运行中的人脸检测了:
面部识别是相机的一个伟大的功能。当相机能够自动挑出面部的时候,这将确保在拍照片的瞬间全部的脸都对准焦点了。不过咱们使用它是为了别的目的--寻找咱们想在下一步要传递的照片区域。
2000年年初的时候,当Paul Viola和Michael Jones发明了一种能够在廉价相机上面快速运行的面部检测技术后,人脸检测成为了主流。然而如今更可靠的解决方案出现了。咱们如今用的是2005年发明的一个叫作方向梯度直方图,简称为HOG。
为了识别出图片中的脸,首先咱们须要将图片转换为黑白色,由于在识别面部的时候咱们不须要颜色数据。
而后咱们须要依次遍历图片中的每一个像素。对于单个像素,咱们也须要看直接包围它的其余元素:
咱们的目标是比较这个像素与周围像素的深度。而后咱们要画一个箭头来表明图像变暗的方向:
若是你对这个图像中的每一个像素都重复这个过程,最后每一个像素,最终每一个像素会被一个箭头取代。这些箭头被称为梯度(gradients),它们能显示出图像上从明亮到黑暗的流动过程:
这看起来没有明确的目的,但其实这颇有必要。若是咱们直接分析像素,同一我的明暗不一样的两张照片将具备彻底不一样的像素值。可是若是只考虑亮度变化方向(direction)的话,明暗图像将会有一样的结果。这使得问题变得更容易解决!
可是保存每一个像素的梯度太过细节化了,咱们最终颇有可能捡了芝麻丢了西瓜。若是能从更高的角度上观察基本的明暗流动,咱们就能够看出图像的基本规律,这会比以前更好。
为了作到这一点,咱们将图像分割成一些 16×16 像素的小方块。在每一个小方块中,咱们将计算出每一个主方向上有多少个梯度(有多少指向上,指向右上,指向右等)。而后咱们将用指向性最强那个方向的箭头来代替原来的那个小方块。
最终的结果是,咱们把原始图像转换成了一个很是简单的表达形式,这种表达形式能够用一种简单的方式来捕获面部的基本结构:
原始图像被表示成了 HOG 形式,以捕获图像的主要特征,不管图像明暗度如何。
为了在这个 HOG 图像中找到脸部,咱们要所须要作的,就是找到咱们的图像中,与已知的一些 HOG 图案中,看起来最类似的部分。这些 HOG 图案都是从其余面部训练数据中提取出来的:
使用这种技术,咱们如今能够轻松地在任何图片中找到脸部:
当当当,咱们把图片中的脸部分离出来了。 但如今,咱们要处理的问题就是,对于电脑来讲,面朝不一样方向的同一张脸是两我的:
人类能够很轻松地识别出到两个图片都是同一我的,但电脑会认为这两张图片是两个彻底不一样的人。
为了解决这一点,咱们将试图扭曲每一个图片,使得眼睛和嘴唇老是在图像中的样本位置(sample place)。 这将使咱们在接下来的步骤中,更容易比较脸部之间的不一样。
为此,咱们将使用一种称为面部特征点估计(face landmark estimation)的算法。 不少方法均可以作到这一点,但此次咱们会使用由 瓦希德·卡奇米(Vahid Kazemi)和约瑟菲娜·沙利文(Josephine Sullivan)在 2014 年发明的方法。
基本思路是找到 68 我的脸上广泛存在的特征点( landmarks)——包括下巴的顶部、每只眼睛的外部轮廓、每条眉毛的内部轮廓等。接下来咱们训练一个机器学习算法,让它可以在任何脸部找到这 68 个特定的点:
咱们将在每一张脸上定位的 68 个特征点。这张图片的做者是在OpenFace工做的卡内基梅隆大学 Ph.D. 布兰东·阿莫斯(Brandon Amos)。
这是在测试图片上定位 68 个特征点的结果:
你也可使用这一技术来实现本身的 Snapchat 实时 3D 脸部过滤器!
如今,咱们知道了眼睛和嘴巴在哪儿,咱们将图像进行旋转、缩放和错切,使得眼睛和嘴巴尽量靠近中心。咱们不会作任何花哨的三维扭曲,由于这会让图像失真。咱们只会使用那些可以保持图片相对平行的基本图像变换,例如旋转和缩放(称为仿射变换):
如今不管人脸朝向哪边,咱们都能将眼睛和嘴巴向中间挪动到大体相同的位置。这将使咱们的下一步更加准确。
如今咱们要面临最核心的问题了——准确识别不一样的人脸。这才是这件事的有趣之处!
最简单的人脸识别方法,是把咱们在第二步中发现的未知人脸,与咱们已经标注了的人脸图片做比较。当咱们发现未知的面孔与一个之前标注过的面孔看起来及其类似的时候,它确定是同一我的。这个想看起来很完美,对吧?
实际上这种方法有一个巨大的问题。像 Facebook 这种拥有数十亿用户和数万亿张照片的网站,是不可能去循环比较每张先前标记的脸的,这太浪费时间了。他们须要在毫秒内识别人脸,而不是几个小时。
咱们须要的方法是从每张人脸上提取一些基本的测量数值。而后,咱们能够用一样的方式测量未知的面孔,并找到最接近测量数值的那张已知的脸。例如,咱们能够测量每一个耳朵的大小、眼距、鼻子的长度等。若是你曾经看过像《犯罪现场调查》这样的电视剧,你就知道我在说什么了。
好的,因此为了创建咱们的已知脸部数据库呢,咱们应该测量面部的哪些数值?耳朵的大小?鼻子的长度?眼睛的颜色?还有什么?
事实证实,对于咱们人类来讲一些显而易见的测量值(好比眼睛颜色),对计算机来讲没什么意义。研究人员发现,最准确的方法是让计算机本身找出它要收集的测量值。深度学习在寻找哪些部分的测量值比较重要方面表现的比人类更好。
因此,解决方案是训练一个深度卷积神经网络。可是,并非让它去识别图片中的物体,这一次咱们的训练是要让它为脸部生成 128 个测量值。
每次训练要观察三个不一样的脸部图像:
1. 加载一张已知的人的面部训练图像
2. 加载同一我的的另外一张照片
3. 加载另一我的的照片
而后,算法查看它本身为这三个图片生成的测量值。再而后,稍微调整神经网络,以确保第一张和第二张生成的测量值接近,而第二张和第三张生成的测量值略有不一样。
在为几千我的的数百万图像重复该步骤几百万次以后,神经网络学习了如何可靠地为每一个人生成 128 个测量值。对于同一我的的任何十张不一样的照片,它都应该给出大体相同的测量值。
机器学习专业人士把每张脸的 128 个测量值称为一个嵌入(embedding)。将复杂的原始数据(如图片)缩减为可由计算机生成的一个数列的方法,在机器学习(特别是语言翻译)中出现了不少次。咱们正在使用的这种脸部提取方法是由 Google 的研究人员在 2015 年发明的,但也有许多相似方法存在。
这个经过训练卷积神经网络来输出脸部嵌入的过程,须要大量的数据和强大的计算能力。即便使用昂贵的 Nvidia Telsa 显卡,你也须要大约 24 小时的连续训练,才能得到良好的准确性。
但一旦网络训练完成,它就能够为每一张脸生成测量值,即便以前它从未见过这张脸!因此这种训练只需一次便可。幸运的是,OpenFace 上面的大牛已经作完了这些,而且他们发布了几个训练过能够直接使用的网络。谢谢Brandon Amos他的团队!
因此咱们须要作的,就是经过他们预训练的网络来处理咱们的脸部图像,以得到 128 个测量值。这是咱们测试图像的一些测量值:
那么,这 128 个数字到底测量了脸部的哪些部分?咱们固然不知道,可是这对咱们并不重要。咱们关心的是,当看到同一我的两张不一样的图片时,咱们的网络能获得几乎相同的数值。
最后这一步其实是整个过程当中最简单的一步。咱们要作的就是找到数据库中,与咱们的测试图像的测量值最接近的那我的。
你能够经过任何基本的机器学习分类算法来达成这一目标。咱们并不须要太花哨的深度学习技巧。咱们将使用一个简单的线性 SVM 分类器,但实际上还有不少其余的分类算法可使用。
咱们须要作的是训练一个分类器,它能够从一个新的测试图像中获取测量结果,并找出最匹配的那我的。分类器运行一次只须要几毫秒,分类器的结果就是人的名字!
因此让咱们试一下咱们的系统。首先,我使用Will Ferrell, Chad Smith and Jimmy Falon三人每人 20 张照片的嵌入来训练分类器:
嗯……就是这些训练数据!
接下来,在这个分类器上运行了威尔·法瑞尔和查德·史密斯在吉米·法伦的节目上互相模仿的那个视频的每一帧:
https://cdn-images-1.medium.com/max/800/1*_GNyjR3JlPoS9grtIVmKFQ.gif
总结:
1. 使用 HOG 算法给图片编码,以建立图片的简化版本。使用这个简化的图像,找到其中看起来最像通用 HOG 面部编码的部分。
2. 经过找到脸上的主要特征点,找出脸部的姿式。一旦咱们找到这些特征点,就利用它们把图像扭曲,使眼睛和嘴巴居中。
3. 把上一步获得的面部图像放入神经网络中,神经网络知道如何找到 128 个特征测量值。保存这 128 个测量值。
4. 看看咱们过去已经测量过的全部脸部,找出哪一个人的测量值和咱们要测量的面部最接近。这就是你要找的人!
5.Object Detection
https://pjreddie.com/darknet/yolo/
https://github.com/allanzelener/YAD2K
# Installation git clone https://github.com/allanzelener/yad2k.git cd yad2k # [Option 1] To replicate the conda environment: conda env create -f environment.yml source activate yad2k # [Option 2] Install everything globaly. pip install numpy h5py pillow pip install tensorflow-gpu # CPU-only: conda install -c conda-forge tensorflow pip install keras # Possibly older release: conda install keras # Quick Start wget http://pjreddie.com/media/files/yolo.weights wget https://raw.githubusercontent.com/pjreddie/darknet/master/cfg/yolo.cfg ./yad2k.py yolo.cfg yolo.weights model_data/yolo.h5 ./test_yolo.py model_data/yolo.h5 # output in images/out/
https://github.com/mbhybird/keras-yolo3
# Quick Start wget https://pjreddie.com/media/files/yolov3.weights python convert.py yolov3.cfg yolov3.weights model_data/yolo.h5 python yolo.py or python yolo_video.py