0
0
Fork 0
mirror of https://github.com/boltgolt/howdy.git synced 2024-09-19 09:51:19 +02:00

Merge branch 'dmig-master' into dev

This commit is contained in:
boltgolt 2018-12-21 16:52:13 +01:00
commit 54998e9a97
No known key found for this signature in database
GPG key ID: BECEC9937E1AAE26
11 changed files with 350 additions and 174 deletions

View file

@ -28,6 +28,14 @@ Install the `howdy` package from the AUR. For AUR installation instructions, tak
You will need to do some additional configuration steps. Please read the [ArchWiki entry](https://wiki.archlinux.org/index.php/Howdy) for more information.
### Fedora
The `howdy` package is now available in a [Fedora COPR repository](https://copr.fedorainfracloud.org/coprs/luya/howdy/) by simply execute the following command from a terminal:
```
sudo dnf copr enable luya/howdy
sudo dnf install howdy
```
## Setup
After installation, you need to let Howdy learn your face. Run `sudo howdy add` to add a face model.

4
debian/control vendored
View file

@ -9,7 +9,9 @@ Vcs-Git: https://github.com/boltgolt/howdy
Package: howdy
Homepage: https://github.com/boltgolt/howdy
Architecture: all
Depends: ${misc:Depends}, git, python3, python3-pip, python3-dev, python3-setuptools, libpam-python, fswebcam, libopencv-dev, python-opencv, cmake, streamer
Depends: ${misc:Depends}, curl|wget, python3-pip, python3-dev, python3-setuptools, libpam-python, fswebcam, libopencv-dev, python3-opencv, cmake, streamer
Recommends: libatlas-base-dev | libopenblas-dev | liblapack-dev
Suggests: nvidia-cuda-dev (>= 7.5)
Description: Howdy: Windows Hello style authentication for Linux.
Use your built-in IR emitters and camera in combination with face recognition
to prove who you are.

118
debian/postinst vendored
View file

@ -2,6 +2,7 @@
# Installation script to install howdy
# Executed after primary apt install
def col(id):
"""Add color escape sequences"""
if id == 1: return "\033[32m"
@ -9,15 +10,15 @@ def col(id):
if id == 3: return "\033[31m"
return "\033[0m"
# Import required modules
import fileinput
import subprocess
import time
import sys
import os
import re
import signal
import fileinput
import urllib.parse
import tarfile
from shutil import rmtree, which
# Don't run unless we need to configure the install
# Will also happen on upgrade but we will catch that later on
@ -29,6 +30,7 @@ def log(text):
"""Print a nicely formatted line to stdout"""
print("\n>>> " + col(1) + text + col(0) + "\n")
def handleStatus(status):
"""Abort if a command fails"""
if (status != 0):
@ -36,6 +38,8 @@ def handleStatus(status):
sys.exit(1)
sc = subprocess.call
# We're not in fresh configuration mode so don't continue the setup
if not os.path.exists("/tmp/howdy_picked_device"):
# Check if we have an older config we can restore
@ -85,64 +89,108 @@ picked = in_file.read()
in_file.close()
# Remove the temporary file
subprocess.call(["rm /tmp/howdy_picked_device"], shell=True)
os.unlink("/tmp/howdy_picked_device")
log("Upgrading pip to the latest version")
# Update pip
handleStatus(subprocess.call(["pip3 install --upgrade pip"], shell=True))
handleStatus(sc(["pip3", "install", "--upgrade", "pip"]))
log("Cloning dlib")
dlib_archive = '/tmp/dlib_latest.tar.gz'
# Clone the dlib git to /tmp, but only the last commit
handleStatus(subprocess.call(["git", "clone", "--depth", "1", "https://github.com/davisking/dlib.git", "/tmp/dlib_clone"]))
log('Downloading dlib')
loader = which('curl')
LOADER_CMD = None
if loader:
LOADER_CMD = [loader, '--silent', '--retry', '5', '--location', '--output']
else:
loader = which('wget')
LOADER_CMD = [loader, '--quiet', '--tries', '5', '--output-document']
cmd = LOADER_CMD + [dlib_archive, 'https://api.github.com/repos/davisking/dlib/tarball/latest']
handleStatus(sc(cmd))
DLIB_DIR = None
excludes = re.compile(
'davisking-dlib-\w+/(dlib/(http_client|java|matlab|test/)|'
'(docs|examples|python_examples)|'
'tools/(archive|convert_dlib_nets_to_caffe|htmlify|imglab|python/test|visual_studio_natvis))'
)
with tarfile.open(dlib_archive) as tf:
for item in tf:
# tarball contains directory davisking-dlib-<commit id>, so peek into archive for the name
if not DLIB_DIR:
DLIB_DIR = item.name
# extract only files sufficent for building
if not excludes.match(item.name):
tf.extract(item, '/tmp')
os.unlink(dlib_archive)
log("Building dlib")
# Start the build without GPU
handleStatus(subprocess.call(["cd /tmp/dlib_clone/; python3 setup.py install --yes USE_AVX_INSTRUCTIONS --no DLIB_USE_CUDA"], shell=True))
# Start the build
cmd = ["python3", "setup.py", "install"]
flags = ''
with open('/proc/cpuinfo') as info:
for line in info:
if 'flags' in line:
flags = line
break
if 'avx' in flags:
cmd += ["--yes", "USE_AVX_INSTRUCTIONS"]
elif 'sse4' in flags:
cmd += ["--yes", "USE_SSE4_INSTRUCTIONS"]
elif 'sse3' in flags:
cmd += ["--yes", "USE_SSE3_INSTRUCTIONS"]
elif 'sse2' in flags:
cmd += ["--yes", "USE_SSE2_INSTRUCTIONS"]
sp = subprocess.run(cmd, cwd=DLIB_DIR, stderr=subprocess.STDOUT)
handleStatus(sp.returncode)
# simple check for CUDA
cuda_used = 'DLIB WILL USE CUDA' in sp.stdout
log("Cleaning up dlib")
# Remove the no longer needed git clone
handleStatus(subprocess.call(["rm", "-rf", "/tmp/dlib_clone"]))
print("Temporary dlib files removed")
del sp
rmtree(DLIB_DIR)
log("Installing python dependencies")
log("Temporary dlib files removed")
# Install direct dependencies so pip does not freak out with the manual dlib install
handleStatus(subprocess.call(["pip3", "install", "--cache-dir", "/tmp/pip_howdy", "face_recognition_models==0.3.0", "Click>=6.0", "numpy", "Pillow"]))
log("Installing face_recognition")
# Install face_recognition though pip
handleStatus(subprocess.call(["pip3", "install", "--cache-dir", "/tmp/pip_howdy", "--no-deps", "face_recognition==1.2.2"]))
try:
import cv2
except Exception as e:
log("Reinstalling opencv2")
handleStatus(subprocess.call(["pip3", "install", "opencv-python"]))
log("Configuring howdy")
# Manually change the camera id to the one picked
for line in fileinput.input(["/lib/security/howdy/config.ini"], inplace = 1):
print(line.replace("device_path = none", "device_path = " + picked), end="")
for line in fileinput.input(["/lib/security/howdy/config.ini"], inplace=1):
print(
line
.replace("device_path = none", "device_path = " + picked)
.replace("use_cnn = false", "use_cnn = " + str(cuda_used).lower()),
end=""
)
print("Camera ID saved")
# Secure the howdy folder
handleStatus(subprocess.call(["chmod 744 -R /lib/security/howdy/"], shell=True))
handleStatus(sc(["chmod 744 -R /lib/security/howdy/"], shell=True))
# Allow anyone to execute the python CLI
handleStatus(subprocess.call(["chmod 755 /lib/security/howdy"], shell=True))
handleStatus(subprocess.call(["chmod 755 /lib/security/howdy/cli.py"], shell=True))
handleStatus(subprocess.call(["chmod 755 -R /lib/security/howdy/cli"], shell=True))
os.chmod('/lib/security/howdy', 0o755)
os.chmod('/lib/security/howdy/cli.py', 0o755)
handleStatus(sc(["chmod 755 -R /lib/security/howdy/cli"], shell=True))
print("Permissions set")
# Make the CLI executable as howdy
handleStatus(subprocess.call(["ln -s /lib/security/howdy/cli.py /usr/local/bin/howdy"], shell=True))
handleStatus(subprocess.call(["chmod +x /usr/local/bin/howdy"], shell=True))
os.symlink("/lib/security/howdy/cli.py", "/usr/local/bin/howdy")
os.chmod("/usr/local/bin/howdy", 0o755)
print("Howdy command installed")
log("Adding howdy as PAM module")

19
debian/prerm vendored
View file

@ -6,6 +6,7 @@
import subprocess
import sys
import os
from shutil import rmtree
# Only run when we actually want to remove
if "remove" not in sys.argv and "purge" not in sys.argv:
@ -17,12 +18,12 @@ if not os.path.exists("/lib/security/howdy/cli"):
# Remove files and symlinks
try:
subprocess.call(["rm /usr/local/bin/howdy"], shell=True)
except e:
os.unlink('/usr/local/bin/howdy')
except Exception:
print("Can't remove executable")
try:
subprocess.call(["rm /usr/share/bash-completion/completions/howdy"], shell=True)
except e:
os.unlink('/usr/share/bash-completion/completions/howdy')
except Exception:
print("Can't remove autocompletion script")
# Refresh and remove howdy from pam-config
@ -30,15 +31,15 @@ try:
subprocess.call(["pam-auth-update --package"], shell=True)
subprocess.call(["rm /usr/share/pam-configs/howdy"], shell=True)
subprocess.call(["pam-auth-update --package"], shell=True)
except e:
except Exception:
print("Can't remove pam module")
# Remove full installation folder, just to be sure
try:
subprocess.call(["rm -rf /lib/security/howdy"], shell=True)
except e:
rmtree('/lib/security/howdy')
except Exception:
# This error is normal
pass
# Remove face_recognition and dlib
subprocess.call(["pip3 uninstall face_recognition face_recognition_models dlib -y --no-cache-dir"], shell=True)
# Remove dlib
subprocess.call(['pip3', 'uninstall', 'dlib', '-y', '--no-cache-dir'])

View file

@ -8,25 +8,41 @@ import json
import configparser
import builtins
import cv2
import numpy as np
# Try to import face_recognition and give a nice error if we can't
# Try to import dlib and give a nice error if we can't
# Add should be the first point where import issues show up
try:
import face_recognition
import dlib
except ImportError as err:
print(err)
print("\nCan't import the face_recognition module, check the output of")
print("pip3 show face_recognition")
print("\nCan't import the dlib module, check the output of")
print("pip3 show dlib")
sys.exit(1)
# Get the absolute path to the current file
path = os.path.dirname(os.path.abspath(__file__))
# Get the absolute path to the current directory
path = os.path.abspath(__file__ + '/..')
# Read config from disk
config = configparser.ConfigParser()
config.read(path + "/../config.ini")
use_cnn = config.getboolean('core', 'use_cnn', fallback=False)
if use_cnn:
face_detector = dlib.cnn_face_detection_model_v1(
path + '/../dlib-data/mmod_human_face_detector.dat'
)
else:
face_detector = dlib.get_frontal_face_detector()
pose_predictor = dlib.shape_predictor(
path + '/../dlib-data/shape_predictor_5_face_landmarks.dat'
)
face_encoder = dlib.face_recognition_model_v1(
path + '/../dlib-data/dlib_face_recognition_resnet_model_v1.dat'
)
user = builtins.howdy_user
# The permanent file to store the encoded model in
enc_file = path + "/../models/" + user + ".dat"
@ -46,8 +62,8 @@ except FileNotFoundError:
# Print a warning if too many encodings are being added
if len(encodings) > 3:
print("WARNING: Every additional model slows down the face recognition engine")
print("Press ctrl+C to cancel\n")
print("NOTICE: Each additional model slows down the face recognition engine slightly")
print("Press Ctrl+C to cancel\n")
print("Adding face model for the user " + user)
@ -63,7 +79,7 @@ if builtins.howdy_args.y:
print('Using default label "%s" because of -y flag' % (label, ))
else:
# Ask the user for a custom label
label_in = input("Enter a label for this new model [" + label + "]: ")
label_in = input("Enter a label for this new model [" + label + "] (max 24 characters): ")
# Set the custom label (if any) and limit it to 24 characters
if label_in != "":
@ -89,13 +105,13 @@ else:
video_capture = cv2.VideoCapture(config.get("video", "device_path"))
# Force MJPEG decoding if true
if config.getboolean("video", "force_mjpeg"):
if config.getboolean("video", "force_mjpeg", fallback=False):
# Set a magic number, will enable MJPEG but is badly documentated
video_capture.set(cv2.CAP_PROP_FOURCC, 1196444237)
# Set the frame width and height if requested
fw = config.getint("video", "frame_width")
fh = config.getint("video", "frame_height")
fw = config.getint("video", "frame_width", fallback=-1)
fh = config.getint("video", "frame_height", fallback=-1)
if fw != -1:
video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, fw)
@ -103,7 +119,7 @@ if fh != -1:
video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, fh)
# Request a frame to wake the camera up
video_capture.read()
video_capture.grab()
print("\nPlease look straight into the camera")
@ -114,39 +130,55 @@ time.sleep(2)
enc = []
# Count the amount or read frames
frames = 0
dark_threshold = config.getfloat("video", "dark_threshold")
# Loop through frames till we hit a timeout
while frames < 60:
frames += 1
# Grab a single frame of video
# Don't remove ret, it doesn't work without it
ret, frame = video_capture.read()
gsframe = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Get the encodings in the frame
enc = face_recognition.face_encodings(frame)
# Create a histogram of the image with 8 values
hist = cv2.calcHist([gsframe], [0], None, [8], [0, 256])
# All values combined for percentage calculation
hist_total = np.sum(hist)
# If the image is fully black or the frame exceeds threshold,
# skip to the next frame
if hist_total == 0 or (hist[0] / hist_total * 100 > dark_threshold):
continue
frames += 1
# Get all faces from that frame as encodings
face_locations = face_detector(gsframe, 1) # upsample 1 time
# If we've found at least one, we can continue
if enc:
if face_locations:
break
if not enc:
video_capture.release()
# If more than 1 faces are detected we can't know wich one belongs to the user
if len(face_locations) > 1:
print("Multiple faces detected, aborting")
sys.exit(1)
elif not face_locations:
print("No face detected, aborting")
sys.exit(1)
# If more than 1 faces are detected we can't know wich one belongs to the user
if len(enc) > 1:
print("Multiple faces detected, aborting")
sys.exit(1)
face_location = face_locations[0]
if use_cnn:
face_location = face_location.rect
# Totally clean array that can be exported as JSON
clean_enc = []
# Get the encodings in the frame
face_landmark = pose_predictor(frame, face_location)
face_encoding = np.array(
face_encoder.compute_face_descriptor(frame, face_landmark, 1) # num_jitters=1
)
# Copy the values into a clean array so we can export it as JSON later on
for point in enc[0]:
clean_enc.append(point)
insert_model["data"].append(clean_enc)
insert_model["data"].append(face_encoding.tolist())
# Insert full object into the list
encodings.append(insert_model)

View file

@ -1,3 +1,4 @@
#! /usr/bin/python3
# Show a windows with the video stream and testing information
# Import required modules
@ -6,7 +7,7 @@ import os
import sys
import time
import cv2
import face_recognition
import dlib
# Get the absolute path to the current file
path = os.path.dirname(os.path.abspath(__file__))
@ -24,13 +25,13 @@ if config.get("video", "recording_plugin") == "ffmpeg":
video_capture = cv2.VideoCapture(config.get("video", "device_path"))
# Force MJPEG decoding if true
if config.getboolean("video", "force_mjpeg"):
if config.getboolean("video", "force_mjpeg", fallback=False):
# Set a magic number, will enable MJPEG but is badly documented
video_capture.set(cv2.CAP_PROP_FOURCC, 1196444237)
# Set the frame width and height if requested
fw = config.getint("video", "frame_width")
fh = config.getint("video", "frame_height")
fw = config.getint("video", "frame_width", fallback=-1)
fh = config.getint("video", "frame_height", fallback=-1)
if fw != -1:
video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, fw)
@ -59,6 +60,15 @@ def print_text(line_number, text):
"""Print the status text by line number"""
cv2.putText(overlay, text, (10, height - 10 - (10 * line_number)), cv2.FONT_HERSHEY_SIMPLEX, .3, (0, 255, 0), 0, cv2.LINE_AA)
use_cnn = config.getboolean('core', 'use_cnn', fallback=False)
if use_cnn:
face_detector = dlib.cnn_face_detection_model_v1(
path + '/../dlib-data/mmod_human_face_detector.dat'
)
else:
face_detector = dlib.get_frontal_face_detector()
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
# Open the window and attach a a mouse listener
cv2.namedWindow("Howdy Test")
@ -80,21 +90,25 @@ rec_tm = 0
# Wrap everything in an keyboard interupt handler
try:
while True:
frame_tm = time.time()
# Increment the frames
total_frames += 1
sec_frames += 1
# Id we've entered a new second
if sec != int(time.time()):
if sec != int(frame_tm):
# Set the last seconds FPS
fps = sec_frames
# Set the new second and reset the counter
sec = int(time.time())
sec = int(frame_tm)
sec_frames = 0
# Grab a single frame of video
ret, frame = (video_capture.read())
ret, frame = video_capture.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame = clahe.apply(frame)
# Make a frame to put overlays in
overlay = frame.copy()
@ -108,7 +122,7 @@ try:
# Fill with the overal containing percentage
hist_perc = []
# Loop though all values to calculate a pensentage and add it to the overlay
# Loop though all values to calculate a percentage and add it to the overlay
for index, value in enumerate(hist):
value_perc = float(value[0]) / hist_total * 100
hist_perc.append(value_perc)
@ -143,17 +157,20 @@ try:
rec_tm = time.time()
# Get the locations of all faces and their locations
face_locations = face_recognition.face_locations(frame)
face_locations = face_detector(frame, 1) # upsample 1 time
rec_tm = time.time() - rec_tm
# Loop though all faces and paint a circle around them
for loc in face_locations:
if use_cnn:
loc = loc.rect
# Get the center X and Y from the rectangular points
x = int((loc[1] - loc[3]) / 2) + loc[3]
y = int((loc[2] - loc[0]) / 2) + loc[0]
x = int((loc.right() - loc.left()) / 2) + loc.left()
y = int((loc.bottom() - loc.top()) / 2) + loc.top()
# Get the raduis from the with of the square
r = (loc[1] - loc[3]) / 2
r = (loc.right() - loc.left()) / 2
# Add 20% padding
r = int(r + (r * 0.2))
@ -171,9 +188,11 @@ try:
if cv2.waitKey(1) != -1:
raise KeyboardInterrupt()
frame_time = time.time() - frame_tm
# Delay the frame if slowmode is on
if slow_mode:
time.sleep(.55)
time.sleep(.5 - frame_time)
# On ctrl+C
except KeyboardInterrupt:

View file

@ -5,18 +5,40 @@
import time
# Start timing
timings = [time.time()]
timings = {
'st': time.time()
}
# Import required modules
import cv2
import sys
import os
import json
import configparser
import cv2
import dlib
import numpy as np
import _thread as thread
# Read config from disk
config = configparser.ConfigParser()
config.read(os.path.dirname(os.path.abspath(__file__)) + "/config.ini")
def init_detector(lock):
global face_detector, pose_predictor, face_encoder
if use_cnn:
face_detector = dlib.cnn_face_detection_model_v1(
PATH + '/dlib-data/mmod_human_face_detector.dat'
)
else:
face_detector = dlib.get_frontal_face_detector()
pose_predictor = dlib.shape_predictor(
PATH + '/dlib-data/shape_predictor_5_face_landmarks.dat'
)
face_encoder = dlib.face_recognition_model_v1(
PATH + '/dlib-data/dlib_face_recognition_resnet_model_v1.dat'
)
# Note the time it took to initialize detectors
timings['ll'] = time.time() - timings['ll']
lock.release()
def stop(status):
@ -26,13 +48,13 @@ def stop(status):
# Make sure we were given an username to tast against
try:
if not isinstance(sys.argv[1], str):
sys.exit(1)
except IndexError:
if len(sys.argv) < 2:
sys.exit(12)
# The username of the authenticating user
# Get the absolute path to the current directory
PATH = os.path.abspath(__file__ + '/..')
# The username of the user being authenticated
user = sys.argv[1]
# The model file contents
models = []
@ -40,10 +62,19 @@ models = []
encodings = []
# Amount of ingnored dark frames
dark_tries = 0
# Total amount of frames captured
frames = 0
# face recognition/detection instances
face_detector = None
pose_predictor = None
face_encoder = None
# Try to load the face model from the models folder
try:
models = json.load(open(os.path.dirname(os.path.abspath(__file__)) + "/models/" + user + ".dat"))
models = json.load(open(PATH + "/models/" + user + ".dat"))
for model in models:
encodings += model["data"]
except FileNotFoundError:
sys.exit(10)
@ -51,12 +82,29 @@ except FileNotFoundError:
if len(models) < 1:
sys.exit(10)
# Put all models together into 1 array
for model in models:
encodings += model["data"]
# Read config from disk
config = configparser.ConfigParser()
config.read(PATH + "/config.ini")
# Add the time needed to start the script
timings.append(time.time())
# CNN usage flag
use_cnn = config.getboolean('core', 'use_cnn', fallback=False)
timeout = config.getint("video", "timout", fallback=5)
dark_threshold = config.getfloat("video", "dark_threshold", fallback=50.0)
video_certainty = config.getfloat("video", "certainty", fallback=3.5) / 10
end_report = config.getboolean("debug", "end_report", fallback=False)
# Save the time needed to start the script
timings['in'] = time.time() - timings['st']
# Import face recognition, takes some time
timings['ll'] = time.time()
lock = thread.allocate_lock()
lock.acquire()
thread.start_new_thread(init_detector, (lock, ))
# Start video capture on the IR camera
timings['ic'] = time.time()
# Check if the user explicitly set ffmpeg as recorder
if config.get("video", "recording_plugin") == "ffmpeg":
@ -70,15 +118,13 @@ else:
video_capture = cv2.VideoCapture(config.get("video", "device_path"))
# Force MJPEG decoding if true
if config.getboolean("video", "force_mjpeg"):
# Set a magic number, will enable MJPEG but is badly documentated
video_capture.set(cv2.CAP_PROP_FOURCC, 1196444237)
# Get the height and width config values
fw = config.getint("video", "frame_width")
fh = config.getint("video", "frame_height")
if config.getboolean("video", "force_mjpeg", fallback=False):
# Set a magic number, will enable MJPEG but is badly documented
video_capture.set(cv2.CAP_PROP_FOURCC, 1196444237) # 1196444237 is 'GPJM' in ASCII
# Set the frame width and height if requested
fw = config.getint("video", "frame_width", fallback=-1)
fh = config.getint("video", "frame_height", fallback=-1)
if fw != -1:
video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, fw)
if fh != -1:
@ -86,17 +132,24 @@ if fh != -1:
# Capture a single frame so the camera becomes active
# This will let the camera adjust its light levels while we're importing for faster scanning
video_capture.read()
video_capture.grab()
# Note the time it took to open the camera
timings.append(time.time())
timings['ic'] = time.time() - timings['ic']
# wait for thread to finish
lock.acquire()
lock.release()
del lock
# Import face recognition, takes some time
import face_recognition
timings.append(time.time())
# Fetch the max frame height
max_height = int(config.get("video", "max_height"))
max_height = config.getfloat("video", "max_height", fallback=0.0)
# Get the height of the image
height = video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT) or 1
# Calculate the amount the image has to shrink
scaling_factor = (max_height / height) or 1
# Fetch config settings out of the loop
timeout = config.getint("video", "timeout")
@ -105,89 +158,88 @@ end_report = config.getboolean("debug", "end_report")
# Start the read loop
frames = 0
timings['fr'] = time.time()
while True:
# Increment the frame count every loop
frames += 1
# Stop if we've exceded the time limit
if time.time() - timings[3] > timeout:
if time.time() - timings['fr'] > timeout:
stop(11)
# Grab a single frame of video
# Don't remove ret, it doesn't work without it
ret, frame = video_capture.read()
_, frame = video_capture.read()
gsframe = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Create a histogram of the image with 8 values
hist = cv2.calcHist([frame], [0], None, [8], [0, 256])
hist = cv2.calcHist([gsframe], [0], None, [8], [0, 256])
# All values combined for percentage calculation
hist_total = int(sum(hist)[0])
hist_total = np.sum(hist)
# If the image is fully black, skip to the next frame
if hist_total == 0:
# If the image is fully black or the frame exceeds threshold,
# skip to the next frame
if hist_total == 0 or (hist[0] / hist_total * 100 > dark_threshold):
dark_tries += 1
continue
# Scrip the frame if it exceeds the threshold
if float(hist[0]) / hist_total * 100 > dark_threshold:
dark_tries += 1
continue
# Get the height and with of the image
height, width = frame.shape[:2]
# If the hight is too high
if max_height < height:
# Calculate the amount the image has to shrink
scaling_factor = max_height / float(height)
if scaling_factor != 1:
# Apply that factor to the frame
frame = cv2.resize(frame, None, fx=scaling_factor, fy=scaling_factor, interpolation=cv2.INTER_AREA)
# Save the new size for diagnostics
scale_height, scale_width = frame.shape[:2]
gsframe = cv2.resize(gsframe, None, fx=scaling_factor, fy=scaling_factor, interpolation=cv2.INTER_AREA)
# Get all faces from that frame as encodings
face_encodings = face_recognition.face_encodings(frame)
face_locations = face_detector(gsframe, 1) # upsample 1 time
# Loop through each face
for face_encoding in face_encodings:
for fl in face_locations:
if use_cnn:
fl = fl.rect
face_landmark = pose_predictor(frame, fl)
face_encoding = np.array(
face_encoder.compute_face_descriptor(frame, face_landmark, 1) # num_jitters=1
)
# Match this found face against a known face
matches = face_recognition.face_distance(encodings, face_encoding)
matches = np.linalg.norm(encodings - face_encoding, axis=1)
# Check if any match is certain enough to be the user we're looking for
match_index = 0
for match in matches:
match_index += 1
# Get best match
match_index = np.argmin(matches)
match = matches[match_index]
# Try to find a match that's confident enough
if match * 10 < config.getfloat("video", "certainty") and match > 0:
timings.append(time.time())
# Check if a match that's confident enough
if 0 < match < video_certainty:
timings['tt'] = time.time() - timings['st']
timings['fr'] = time.time() - timings['fr']
# If set to true in the config, print debug text
if end_report:
def print_timing(label, offset):
"""Helper function to print a timing from the list"""
print(" %s: %dms" % (label, round((timings[1 + offset] - timings[offset]) * 1000)))
# If set to true in the config, print debug text
if end_report:
def print_timing(label, k):
"""Helper function to print a timing from the list"""
print(" %s: %dms" % (label, round(timings[k] * 1000)))
print("Time spent")
print_timing("Starting up", 0)
print_timing("Opening the camera", 1)
print_timing("Importing face_recognition", 2)
print_timing("Searching for known face", 3)
print("Time spent")
print_timing("Starting up", 'in')
print(" Open cam + load libs: %dms" % (round(max(timings['ll'], timings['ic']) * 1000, )))
print_timing(" Opening the camera", 'ic')
print_timing(" Importing recognition libs", 'll')
print_timing("Searching for known face", 'fr')
print_timing("Total time", 'tt')
print("\nResolution")
print(" Native: %dx%d" % (height, width))
print(" Used: %dx%d" % (scale_height, scale_width))
print("\nResolution")
width = video_capture.get(cv2.CAP_PROP_FRAME_WIDTH) or 1
print(" Native: %dx%d" % (height, width))
# Save the new size for diagnostics
scale_height, scale_width = frame.shape[:2]
print(" Used: %dx%d" % (scale_height, scale_width))
# Show the total number of frames and calculate the FPS by deviding it by the total scan time
print("\nFrames searched: %d (%.2f fps)" % (frames, frames / (timings[4] - timings[3])))
print("Dark frames ignored: %d " % (dark_tries, ))
print("Certainty of winning frame: %.3f" % (match * 10, ))
# Show the total number of frames and calculate the FPS by deviding it by the total scan time
print("\nFrames searched: %d (%.2f fps)" % (frames, frames / timings['fr']))
print("Dark frames ignored: %d " % (dark_tries, ))
print("Certainty of winning frame: %.3f" % (match * 10, ))
# Catch older 3-encoding models
if match_index not in models:
match_index = 0
print("Winning model: %d (\"%s\")" % (match_index, models[match_index]["label"]))
print("Winning model: %d (\"%s\")" % (match_index, models[match_index]["label"]))
# End peacefully
stop(0)
# End peacefully
stop(0)

View file

@ -21,6 +21,11 @@ dismiss_lockscreen = false
# The howdy command will still function
disabled = false
# Use CNN instead of HOG
# CNN model is much more accurate than the HOG based model, but takes much more
# computational power to run, and is meant to be executed on a GPU to attain reasonable speed.
use_cnn = false
[video]
# The certainty of the detected face belonging to the user of the account
# On a scale from 1 to 10, values above 5 are not recommended

2
src/dlib-data/.gitignore vendored Normal file
View file

@ -0,0 +1,2 @@
*.dat
*.dat.bz2

7
src/dlib-data/Readme.md Normal file
View file

@ -0,0 +1,7 @@
Download and unpack `dlib` data files from https://github.com/davisking/dlib-models repository:
```shell
wget https://github.com/davisking/dlib-models/raw/master/dlib_face_recognition_resnet_model_v1.dat.bz2
wget https://github.com/davisking/dlib-models/raw/master/mmod_human_face_detector.dat.bz2
wget https://github.com/davisking/dlib-models/raw/master/shape_predictor_5_face_landmarks.dat.bz2
bunzip *bz2
```

View file

@ -5,5 +5,5 @@ set -e
# Confirm the cv2 module has been installed correctly
sudo /usr/bin/env python3 -c "import cv2; print(cv2.__version__);"
# Confirm the face_recognition module has been installed correctly
sudo /usr/bin/env python3 -c "import face_recognition; print(face_recognition.__version__);"
# Confirm the dlib module has been installed correctly
sudo /usr/bin/env python3 -c "import dlib; print(dlib.__version__);"