0
0
Fork 0
mirror of https://github.com/boltgolt/howdy.git synced 2024-09-19 09:51:19 +02:00

replace face_recognition with dlib+numpy; add video_capture.release(); minor code cleanup

This commit is contained in:
dmig 2018-12-09 14:27:21 +07:00
parent 8671be425d
commit 2c48350b23
No known key found for this signature in database
GPG key ID: A4A245B3AD37C4FC

View file

@ -8,16 +8,17 @@ import json
import configparser
import builtins
import cv2
import numpy as np
# Try to import face_recognition and give a nice error if we can't
# Try to import dlib and give a nice error if we can't
# Add should be the first point where import issues show up
try:
import face_recognition
import dlib
except ImportError as err:
print(err)
print("\nCan't import the face_recognition module, check the output of")
print("pip3 show face_recognition")
print("\nCan't import the dlib module, check the output of")
print("pip3 show dlib")
sys.exit(1)
# Get the absolute path to the current directory
@ -27,6 +28,21 @@ path = os.path.abspath(__file__ + '/..')
config = configparser.ConfigParser()
config.read(path + "/../config.ini")
use_cnn = config.getboolean('core', 'use_cnn', fallback=False)
if use_cnn:
face_detector = dlib.cnn_face_detection_model_v1(
path + '/../dlib-data/mmod_human_face_detector.dat'
)
else:
face_detector = dlib.get_frontal_face_detector()
pose_predictor = dlib.shape_predictor(
path + '/../dlib-data/shape_predictor_5_face_landmarks.dat'
)
face_encoder = dlib.face_recognition_model_v1(
path + '/../dlib-data/dlib_face_recognition_resnet_model_v1.dat'
)
user = builtins.howdy_user
# The permanent file to store the encoded model in
enc_file = path + "/../models/" + user + ".dat"
@ -127,29 +143,34 @@ while frames < 60:
frames += 1
# Get the encodings in the frame
enc = face_recognition.face_encodings(frame)
# Get all faces from that frame as encodings
face_locations = face_detector(gsframe, 1) # upsample 1 time
# If we've found at least one, we can continue
if enc:
if face_locations:
break
else:
video_capture.release()
# If more than 1 faces are detected we can't know wich one belongs to the user
if len(face_locations) > 1:
print("Multiple faces detected, aborting")
sys.exit(1)
elif not face_locations:
print("No face detected, aborting")
sys.exit(1)
# If more than 1 faces are detected we can't know wich one belongs to the user
if len(enc) > 1:
print("Multiple faces detected, aborting")
sys.exit(1)
face_location = face_locations[0]
if use_cnn:
face_location = face_location.rect
# Totally clean array that can be exported as JSON
clean_enc = []
# Get the encodings in the frame
face_landmark = pose_predictor(frame, face_location)
face_encoding = np.array(
face_encoder.compute_face_descriptor(frame, face_landmark, 1) # num_jitters=1
)
# Copy the values into a clean array so we can export it as JSON later on
for point in enc[0]:
clean_enc.append(point)
insert_model["data"].append(clean_enc)
insert_model["data"].append(face_encoding.tolist())
# Insert full object into the list
encodings.append(insert_model)