OpenFace学习(2):FaceNet+SVM匹配人脸
前言
在前面的博客中(OpenFace學習(1):安裝配置及人臉比對),介紹了OpenFace的安裝配置,以及一個人臉匹配的demo。其中只是匹配了幾張圖片中人臉,對每個人臉的特征向量很粗略地采用歐氏距離測量,效果也還不錯。本文中將使用SVM來對每個人臉的特征向量進行分類,進行人臉比對。
demo
代碼文件有四個:
- featrure_extract.py:提取人臉信息,每一張人臉提取為一個128維的特征向量;
- feature_train.py:訓練SVM分類器;
- feature_classify.py:使用SVM分類人臉特征向量。
- logs.py:控制臺打印日志信息配置。
不說廢話了,下面直接上代碼。
logs.py
# *_*coding:utf-8 *_* # author: 許鴻斌 # 郵箱:2775751197@qq.comimport logging import sys# 獲取logger實例,如果參數為空則返回root logger logger = logging.getLogger('Test') # 指定logger輸出格式 formatter = logging.Formatter('%(asctime)s %(levelname)-8s: %(message)s') # 文件日志 # file_handler = logging.FileHandler("test.log") # file_handler.setFormatter(formatter) # 可以通過setFormatter指定輸出格式 # 控制臺日志 console_handler = logging.StreamHandler(sys.stdout) console_handler.formatter = formatter # 也可以直接給formatter賦值 # 為logger添加的日志處理器 # logger.addHandler(file_handler) logger.addHandler(console_handler) # 指定日志的最低輸出級別,默認為WARN級別 logger.setLevel(logging.INFO)featrure_extract.py
# *_*coding:utf-8 *_* # author: 許鴻斌 # 郵箱:2775751197@qq.comfrom log import logger import os import cv2 import time import openface import cPickle# dlib和openface模型配置 imgDim = 96 dlib_model_dir = '/home/xhb/文檔/Packages/openface/models/dlib' openface_model_dir = '/home/xhb/文檔/Packages/openface/models/openface' # 導入dlib人臉特征點檢測器 align = openface.AlignDlib(os.path.join(dlib_model_dir, "shape_predictor_68_face_landmarks.dat")) # 導入FaceNet net = openface.TorchNeuralNet(os.path.join(openface_model_dir, 'nn4.small2.v1.t7'), imgDim)def getRep(imgPath, verbose=False):logger.info("Processing {}.".format(imgPath))bgrImg = cv2.imread(imgPath)if bgrImg is None:raise Exception("Unable to load image: {}".format(imgPath))rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)if verbose:logger.info("Original size: {}".format(rgbImg.shape)) start = time.time()faceBoundingBox = align.getLargestFaceBoundingBox(rgbImg)if faceBoundingBox is None:raise Exception("Unable to find a face: {}".format(imgPath))if verbose:logger.info("Face detection took {} seconds.".format(time.time() - start))start = time.time()alignedFace = align.align(imgDim, rgbImg, faceBoundingBox, landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE) if alignedFace is None:raise Exception("Unable to align image: {}".format(imgPath))if verbose:logger.info("Face alignment took {} seconds.".format(time.time() - start))start = time.time()rep = net.forward(alignedFace) if verbose:logger.info("OpenFace forward pass took {} seconds.".format(time.time()-start))logger.info("Representation:")logger.info(rep)return reppwd = os.getcwd() img_dir = os.path.join(pwd, 'training_images') people_name_list = os.listdir(img_dir) # logger.info(res) people_rep_dict = {} for people_name in people_name_list:face_dir = os.path.join(img_dir, people_name)img_name_list = os.listdir(face_dir)# logger.info(img_path_list)people_rep_list = []for img_name in img_name_list:img_path = os.path.join(face_dir, img_name)# logger.info('Processing: {}'.format(img_path))rep = getRep(img_path, verbose=False)people_rep_list.append(rep)people_rep_dict[people_name] = people_rep_listlogger.info('Face vectors has been generated!')# logger.info(people_rep_dict) rep_dir = os.path.join(pwd, 'representation_vectors') # 保存生成的特征向量 cPickle.dump(people_rep_dict, open(os.path.join(rep_dir, 'rep.pkl'), 'wb')) logger.info('Vectors saved in {}'.format(os.path.join(rep_dir, 'rep.pkl')))feature_train.py
# *_*coding:utf-8 *_* # author: 許鴻斌 # 郵箱:2775751197@qq.comfrom log import logger import os import cPickle from sklearn.svm import SVCpwd = os.getcwd() rep_dir = os.path.join(pwd, 'representation_vectors') people_rep_dict = cPickle.load(open(os.path.join(rep_dir, 'rep.pkl'), 'rb'))people_names = people_rep_dict.keys() logger.info(people_names) nClasses = len(people_names) label = 0 vector_list = [] label_list = [] for people_name in people_names:vectors = people_rep_dict[people_name]label += 1for vector in vectors:vector_list.append(vector)label_list.append(label)logger.info('vector_list: {}'.format(len(vector_list))) logger.info('label_list: {}'.format(len(label_list)))# linearSVM logger.info('Start training linear svm classifier.') clf = SVC(C=1, kernel='linear', probability=True) clf.fit(vector_list, label_list)# 保存模型和labels clf_name = 'classifier_{}.pkl'.format('linear_svm') logger.info('classifier saved as: {}'.format(os.path.join(pwd, clf_name))) cPickle.dump(clf, open(os.path.join(pwd, clf_name), 'wb')) labels_name = 'labels.pkl' logger.info('labels saved as: {}'.format(os.path.join(pwd, labels_name))) cPickle.dump(people_names, open(os.path.join(pwd, labels_name), 'wb'))feature_classify.py
# *_*coding:utf-8 *_* # author: 許鴻斌 # 郵箱:2775751197@qq.comfrom log import logger import os import cPickle from sklearn.svm import SVC import cv2 import time import openface import numpy as np# dlib和openface模型配置 imgDim = 96 dlib_model_dir = '/home/xhb/文檔/Packages/openface/models/dlib' openface_model_dir = '/home/xhb/文檔/Packages/openface/models/openface' # 導入dlib人臉特征點檢測器 align = openface.AlignDlib(os.path.join(dlib_model_dir, "shape_predictor_68_face_landmarks.dat")) # 導入FaceNet net = openface.TorchNeuralNet(os.path.join(openface_model_dir, 'nn4.small2.v1.t7'), imgDim)def getRep(imgPath, verbose=False, multiple=False):logger.info("Processing {}.".format(imgPath))bgrImg = cv2.imread(imgPath)if bgrImg is None:raise Exception("Unable to load image: {}".format(imgPath))rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)if verbose:logger.info("Original size: {}".format(rgbImg.shape)) start = time.time()if multiple:faceBoundingBoxs = align.getAllFaceBoundingBoxes(rgbImg)else:faceBoundingBox = align.getLargestFaceBoundingBox(rgbImg)faceBoundingBoxs = [faceBoundingBox]if len(faceBoundingBoxs) == 0 or (not multiple and faceBoundingBox is None):raise Exception("Unable to find a face: {}".format(imgPath))if verbose:logger.info("Face detection took {} seconds.".format(time.time() - start))reps = []for bb in faceBoundingBoxs:start = time.time()alignedFace = align.align(imgDim, rgbImg, bb, landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)if alignedFace is None:raise Exception("Unable to align image: {}".format(imgPath))if verbose:logger.info("Face alignment took {} seconds.".format(time.time() - start))logger.info("This bbox is centered at {}, {}".format(bb.center().x, bb.center().y))start = time.time()rep = net.forward(alignedFace) if verbose:logger.info("OpenFace forward pass took {} seconds.".format(time.time()-start))# logger.info("Representation:")# logger.info(rep)reps.append((bb.center().x, rep))sreps = sorted(reps, key=lambda x: x[0])return srepspwd = os.getcwd() clf_name = 'classifier_{}.pkl'.format('linear_svm') clf = cPickle.load(open(os.path.join(pwd, clf_name), 'r')) label_name = 'labels.pkl' labels = cPickle.load(open(os.path.join(pwd, label_name), 'r'))# logger.info(clf) # logger.info(labels)test_image_path = os.path.join(pwd, 'test_images', 'inesta1.jpg') reps = getRep(test_image_path, multiple=True) if len(reps) > 1:logger.info("List of faces in image from left to right") for r in reps:rep = r[1].reshape(1, -1)bbx = r[0]start = time.time()pred = clf.predict_proba(rep).ravel()for name, prob in zip(labels, pred):logger.info('Probablity of {} is: {}'.format(name, prob))max_prob_index = np.argmax(pred)person_name = labels[max_prob_index]confidence = pred[max_prob_index]logger.info("Prediction took {} seconds.".format(time.time() - start))logger.info("Predict {} @ x={} with {:.2f} confidence.".format(person_name.decode('utf-8'), bbx, confidence))補充說明
代碼部分很簡單,大部分看看就能懂了,不多做介紹了。稍微補充幾個要注意的地方:
1、收集人臉圖片
在代碼文件同一個目錄下建立一個training_images文件夾,其中對應每個人再建立相應的文件夾:
比如說,我這里就直接建了三個文件夾:inesta、obama、trump。
里面對應的就是收集的若干張圖片:
注意圖片只能包含單張人臉,因為后面要訓練SVM分類這些人臉,多張人臉會造成混淆。
2、模型路徑
跟上篇博客中一樣,需要自己修改一下對應模型的路徑。模型下載鏈接在上篇博客中已給出,請自行下載:OpenFace學習(1):安裝配置及人臉比對。
imgDim用這里的默認值就可以了,dlib_model_dir是存放dlib的人臉特征點檢測器模型(shape_predictor_68_face_landmarks.dat)的路徑,openface_model_dir是存放OpenFace開源的FaceNet網絡訓練好的模型(nn4.small2.v1.t7)的路徑。
運行結果
運行featrure_extract.py,會自動解析training_images文件夾中的圖片,并將生成的特征向量和標簽保存在rep.pkl文件中。
運行feature_train.py,讀取rep.pkl中的特征向量,訓練SVM分類器。SVM分類器保存在’classifier_linear_svm.pkl’中,標簽保存在labels.pkl中。
運行feature_classify.py,對測試圖片使用SVM進行分類,給出預測結果。
總結
以上是生活随笔為你收集整理的OpenFace学习(2):FaceNet+SVM匹配人脸的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: python dlib学习(十二):面部
- 下一篇: 神经风格迁移(Neural Style