0
0
Fork 0
mirror of https://github.com/boltgolt/howdy.git synced 2024-09-19 09:51:19 +02:00

replace face_recognition with dlib+numpy; add video_capture.release(); minor code cleanup

This commit is contained in:
dmig 2018-12-09 14:27:21 +07:00
parent 8671be425d
commit 2c48350b23
No known key found for this signature in database
GPG key ID: A4A245B3AD37C4FC

View file

@ -8,16 +8,17 @@ import json
import configparser import configparser
import builtins import builtins
import cv2 import cv2
import numpy as np
# Try to import face_recognition and give a nice error if we can't # Try to import dlib and give a nice error if we can't
# Add should be the first point where import issues show up # Add should be the first point where import issues show up
try: try:
import face_recognition import dlib
except ImportError as err: except ImportError as err:
print(err) print(err)
print("\nCan't import the face_recognition module, check the output of") print("\nCan't import the dlib module, check the output of")
print("pip3 show face_recognition") print("pip3 show dlib")
sys.exit(1) sys.exit(1)
# Get the absolute path to the current directory # Get the absolute path to the current directory
@ -27,6 +28,21 @@ path = os.path.abspath(__file__ + '/..')
config = configparser.ConfigParser() config = configparser.ConfigParser()
config.read(path + "/../config.ini") config.read(path + "/../config.ini")
use_cnn = config.getboolean('core', 'use_cnn', fallback=False)
if use_cnn:
face_detector = dlib.cnn_face_detection_model_v1(
path + '/../dlib-data/mmod_human_face_detector.dat'
)
else:
face_detector = dlib.get_frontal_face_detector()
pose_predictor = dlib.shape_predictor(
path + '/../dlib-data/shape_predictor_5_face_landmarks.dat'
)
face_encoder = dlib.face_recognition_model_v1(
path + '/../dlib-data/dlib_face_recognition_resnet_model_v1.dat'
)
user = builtins.howdy_user user = builtins.howdy_user
# The permanent file to store the encoded model in # The permanent file to store the encoded model in
enc_file = path + "/../models/" + user + ".dat" enc_file = path + "/../models/" + user + ".dat"
@ -127,29 +143,34 @@ while frames < 60:
frames += 1 frames += 1
# Get the encodings in the frame # Get all faces from that frame as encodings
enc = face_recognition.face_encodings(frame) face_locations = face_detector(gsframe, 1) # upsample 1 time
# If we've found at least one, we can continue # If we've found at least one, we can continue
if enc: if face_locations:
break break
else:
video_capture.release()
# If more than 1 faces are detected we can't know wich one belongs to the user
if len(face_locations) > 1:
print("Multiple faces detected, aborting")
sys.exit(1)
elif not face_locations:
print("No face detected, aborting") print("No face detected, aborting")
sys.exit(1) sys.exit(1)
# If more than 1 faces are detected we can't know wich one belongs to the user face_location = face_locations[0]
if len(enc) > 1: if use_cnn:
print("Multiple faces detected, aborting") face_location = face_location.rect
sys.exit(1)
# Totally clean array that can be exported as JSON # Get the encodings in the frame
clean_enc = [] face_landmark = pose_predictor(frame, face_location)
face_encoding = np.array(
face_encoder.compute_face_descriptor(frame, face_landmark, 1) # num_jitters=1
)
# Copy the values into a clean array so we can export it as JSON later on insert_model["data"].append(face_encoding.tolist())
for point in enc[0]:
clean_enc.append(point)
insert_model["data"].append(clean_enc)
# Insert full object into the list # Insert full object into the list
encodings.append(insert_model) encodings.append(insert_model)