0
0
Fork 0
mirror of https://github.com/boltgolt/howdy.git synced 2024-09-19 09:51:19 +02:00

Merge branch 'dev' into patch-1

This commit is contained in:
boltgolt 2020-06-21 17:14:59 +02:00 committed by GitHub
commit 9988ee416f
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
9 changed files with 165 additions and 134 deletions

View file

@ -1,6 +1,11 @@
sudo: required
dist: xenial
language: python
python: "3.6"
python:
- "3.4"
- "3.6"
- "3.7"
- "3.8-dev"
script:
# Build the binary (.deb)

2
debian/control vendored
View file

@ -9,7 +9,7 @@ Vcs-Git: https://github.com/boltgolt/howdy
Package: howdy
Homepage: https://github.com/boltgolt/howdy
Architecture: all
Depends: ${misc:Depends}, curl|wget, python3, python3-pip, python3-dev, python3-setuptools, libpam-python, fswebcam, libopencv-dev, cmake, streamer
Depends: ${misc:Depends}, curl|wget, python3, python3-pip, python3-dev, python3-setuptools, libpam-python, fswebcam, libopencv-dev, cmake, fswebcam
Recommends: libatlas-base-dev | libopenblas-dev | liblapack-dev
Suggests: nvidia-cuda-dev (>= 7.5)
Description: Howdy: Windows Hello style authentication for Linux.

6
debian/postinst vendored
View file

@ -109,6 +109,12 @@ log("Upgrading pip to the latest version")
# Update pip
handleStatus(sc(["pip3", "install", "--upgrade", "pip"]))
log("Upgrading numpy to the lateset version")
# Update numpy
handleStatus(subprocess.call(["pip3", "install", "--upgrade", "numpy"]))
log("Downloading and unpacking data files")
# Run the bash script to download and unpack the .dat files needed

7
debian/preinst vendored
View file

@ -73,7 +73,12 @@ for dev in devices:
print("Trying " + device_name)
# Let fswebcam keep the camera open in the background
sub = subprocess.Popen(["streamer -t 1:0:0 -c /dev/v4l/by-path/" + dev + " -b 16 -f rgb24 -o /dev/null 1>/dev/null 2>/dev/null"], shell=True, preexec_fn=os.setsid)
sub = subprocess.Popen(
"fswebcam -l 1 -d /dev/v4l/by-path/%s" % dev,
shell=True,
preexec_fn=os.setsid,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE)
try:
# Ask the user if this is the right one

View file

@ -9,7 +9,7 @@ Version: 2.5.1
%if %{with_snapshot}
Release: 0.1.git.%{date}%{shortcommit}%{?dist}
%else
Release: 3%{?dist}
Release: 4%{?dist}
%endif
Summary: Windows Hello™ style authentication for Linux
@ -27,12 +27,9 @@ BuildRequires: polkit-devel
%if 0%{?fedora}
# We need python3-devel for pathfix.py
BuildRequires: python3-devel
Requires: python3dist(dlib) >= 6.0
Requires: python3dist(v4l2)
Requires: python3-face_recognition
Supplements: python3-face_recognition_models
Requires: python3dist(dlib) >= 6.0
Requires: python3-opencv
Requires: python3-pam
Requires: pam_python
%endif

View file

@ -9,6 +9,7 @@ import configparser
import builtins
import cv2
import numpy as np
from recorders.video_capture import VideoCapture
# Try to import dlib and give a nice error if we can't
# Add should be the first point where import issues show up
@ -35,10 +36,6 @@ if not os.path.isfile(path + "/../dlib-data/shape_predictor_5_face_landmarks.dat
config = configparser.ConfigParser()
config.read(path + "/../config.ini")
if not os.path.exists(config.get("video", "device_path")):
print("Camera path is not configured correctly, please edit the 'device_path' config value.")
sys.exit(1)
use_cnn = config.getboolean("core", "use_cnn", fallback=False)
if use_cnn:
face_detector = dlib.cnn_face_detection_model_v1(path + "/../dlib-data/mmod_human_face_detector.dat")
@ -98,35 +95,8 @@ insert_model = {
"data": []
}
# Check if the user explicitly set ffmpeg as recorder
if config.get("video", "recording_plugin") == "ffmpeg":
# Set the capture source for ffmpeg
from recorders.ffmpeg_reader import ffmpeg_reader
video_capture = ffmpeg_reader(config.get("video", "device_path"), config.get("video", "device_format"))
elif config.get("video", "recording_plugin") == "pyv4l2":
# Set the capture source for pyv4l2
from recorders.pyv4l2_reader import pyv4l2_reader
video_capture = pyv4l2_reader(config.get("video", "device_path"), config.get("video", "device_format"))
else:
# Start video capture on the IR camera through OpenCV
video_capture = cv2.VideoCapture(config.get("video", "device_path"), cv2.CAP_V4L)
# Force MJPEG decoding if true
if config.getboolean("video", "force_mjpeg", fallback=False):
# Set a magic number, will enable MJPEG but is badly documentated
video_capture.set(cv2.CAP_PROP_FOURCC, 1196444237)
# Set the frame width and height if requested
fw = config.getint("video", "frame_width", fallback=-1)
fh = config.getint("video", "frame_height", fallback=-1)
if fw != -1:
video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, fw)
if fh != -1:
video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, fh)
# Request a frame to wake the camera up
video_capture.grab()
# Set up video_capture
video_capture = VideoCapture(config)
print("\nPlease look straight into the camera")
@ -139,12 +109,14 @@ enc = []
frames = 0
dark_threshold = config.getfloat("video", "dark_threshold")
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
# Loop through frames till we hit a timeout
while frames < 60:
# Grab a single frame of video
# Don't remove ret, it doesn't work without it
ret, frame = video_capture.read()
frame, gsframe = video_capture.read_frame()
gsframe = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gsframe = clahe.apply(gsframe)
# Create a histogram of the image with 8 values
hist = cv2.calcHist([gsframe], [0], None, [8], [0, 256])
@ -165,8 +137,6 @@ while frames < 60:
if face_locations:
break
video_capture.release()
# If more than 1 faces are detected we can't know wich one belongs to the user
if len(face_locations) > 1:
print("Multiple faces detected, aborting")

View file

@ -7,6 +7,7 @@ import sys
import time
import cv2
import dlib
from recorders.video_capture import VideoCapture
# Get the absolute path to the current file
path = os.path.dirname(os.path.abspath(__file__))
@ -20,22 +21,7 @@ if config.get("video", "recording_plugin") != "opencv":
print("Aborting")
sys.exit(12)
# Start capturing from the configured webcam
video_capture = cv2.VideoCapture(config.get("video", "device_path"), cv2.CAP_V4L)
# Force MJPEG decoding if true
if config.getboolean("video", "force_mjpeg", fallback=False):
# Set a magic number, will enable MJPEG but is badly documented
video_capture.set(cv2.CAP_PROP_FOURCC, 1196444237)
# Set the frame width and height if requested
fw = config.getint("video", "frame_width", fallback=-1)
fh = config.getint("video", "frame_height", fallback=-1)
if fw != -1:
video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, fw)
if fh != -1:
video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, fh)
video_capture = VideoCapture(config)
# Read exposure and dark_thresholds from config to use in the main loop
exposure = config.getint("video", "exposure", fallback=-1)
@ -109,21 +95,12 @@ try:
sec_frames = 0
# Grab a single frame of video
ret, frame = video_capture.read()
try:
# Convert from color to grayscale
# First processing of frame, so frame errors show up here
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
except RuntimeError:
pass
except cv2.error:
print("\nUnknown camera, please check your 'device_path' config value.\n")
raise
_, frame = video_capture.read_frame()
frame = clahe.apply(frame)
# Make a frame to put overlays in
overlay = frame.copy()
overlay = cv2.cvtColor(overlay, cv2.COLOR_GRAY2BGR)
# Fetch the frame height and width
height, width = frame.shape[:2]
@ -190,6 +167,7 @@ try:
# Add the overlay to the frame with some transparency
alpha = 0.65
frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)
cv2.addWeighted(overlay, alpha, frame, 1 - alpha, 0, frame)
# Show the image in a window
@ -211,8 +189,8 @@ try:
# are captured and even after a delay it does not
# always work. Setting exposure at every frame is
# reliable though.
video_capture.set(cv2.CAP_PROP_AUTO_EXPOSURE, 1.0) # 1 = Manual
video_capture.set(cv2.CAP_PROP_EXPOSURE, float(exposure))
video_capture.intenal.set(cv2.CAP_PROP_AUTO_EXPOSURE, 1.0) # 1 = Manual
video_capture.intenal.set(cv2.CAP_PROP_EXPOSURE, float(exposure))
# On ctrl+C
except KeyboardInterrupt:
@ -220,5 +198,4 @@ except KeyboardInterrupt:
print("\nClosing window")
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()

View file

@ -18,6 +18,7 @@ import cv2
import dlib
import numpy as np
import _thread as thread
from recorders.video_capture import VideoCapture
def init_detector(lock):
@ -46,13 +47,6 @@ def init_detector(lock):
timings["ll"] = time.time() - timings["ll"]
lock.release()
def stop(status):
"""Stop the execution and close video stream"""
video_capture.release()
sys.exit(status)
# Make sure we were given an username to tast against
if len(sys.argv) < 2:
sys.exit(12)
@ -94,7 +88,7 @@ config.read(PATH + "/config.ini")
# Get all config values needed
use_cnn = config.getboolean("core", "use_cnn", fallback=False)
timeout = config.getint("video", "timout", fallback=5)
timeout = config.getint("video", "timeout", fallback=5)
dark_threshold = config.getfloat("video", "dark_threshold", fallback=50.0)
video_certainty = config.getfloat("video", "certainty", fallback=3.5) / 10
end_report = config.getboolean("debug", "end_report", fallback=False)
@ -113,36 +107,7 @@ thread.start_new_thread(init_detector, (lock, ))
# Start video capture on the IR camera
timings["ic"] = time.time()
# Check if the user explicitly set ffmpeg as recorder
if config.get("video", "recording_plugin") == "ffmpeg":
# Set the capture source for ffmpeg
from recorders.ffmpeg_reader import ffmpeg_reader
video_capture = ffmpeg_reader(config.get("video", "device_path"), config.get("video", "device_format"))
elif config.get("video", "recording_plugin") == "pyv4l2":
# Set the capture source for pyv4l2
from recorders.pyv4l2_reader import pyv4l2_reader
video_capture = pyv4l2_reader(config.get("video", "device_path"), config.get("video", "device_format"))
else:
# Start video capture on the IR camera through OpenCV
video_capture = cv2.VideoCapture(config.get("video", "device_path"), cv2.CAP_V4L)
# Force MJPEG decoding if true
if config.getboolean("video", "force_mjpeg", fallback=False):
# Set a magic number, will enable MJPEG but is badly documented
# 1196444237 is "GPJM" in ASCII
video_capture.set(cv2.CAP_PROP_FOURCC, 1196444237)
# Set the frame width and height if requested
fw = config.getint("video", "frame_width", fallback=-1)
fh = config.getint("video", "frame_height", fallback=-1)
if fw != -1:
video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, fw)
if fh != -1:
video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, fh)
# Capture a single frame so the camera becomes active
# This will let the camera adjust its light levels while we're importing for faster scanning
video_capture.grab()
video_capture = VideoCapture(config)
# Read exposure from config to use in the main loop
exposure = config.getint("video", "exposure", fallback=-1)
@ -158,7 +123,7 @@ del lock
# Fetch the max frame height
max_height = config.getfloat("video", "max_height", fallback=0.0)
# Get the height of the image
height = video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT) or 1
height = video_capture.internal.get(cv2.CAP_PROP_FRAME_HEIGHT) or 1
# Calculate the amount the image has to shrink
scaling_factor = (max_height / height) or 1
@ -172,30 +137,20 @@ end_report = config.getboolean("debug", "end_report")
frames = 0
timings["fr"] = time.time()
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
while True:
# Increment the frame count every loop
frames += 1
# Stop if we've exceded the time limit
if time.time() - timings["fr"] > timeout:
stop(11)
sys.exit(11)
# Grab a single frame of video
ret, frame = video_capture.read()
frame, gsframe = video_capture.read_frame()
if frames == 1 and ret is False:
print("Could not read from camera")
exit(12)
try:
# Convert from color to grayscale
# First processing of frame, so frame errors show up here
gsframe = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
except RuntimeError:
gsframe = frame
except cv2.error:
print("\nUnknown camera, please check your 'device_path' config value.\n")
raise
gsframe = clahe.apply(gsframe)
# Create a histogram of the image with 8 values
hist = cv2.calcHist([gsframe], [0], None, [8], [0, 256])
@ -255,7 +210,7 @@ while True:
print_timing("Total time", "tt")
print("\nResolution")
width = video_capture.get(cv2.CAP_PROP_FRAME_WIDTH) or 1
width = video_capture.fw or 1
print(" Native: %dx%d" % (height, width))
# Save the new size for diagnostics
scale_height, scale_width = frame.shape[:2]
@ -269,7 +224,7 @@ while True:
print("Winning model: %d (\"%s\")" % (match_index, models[match_index]["label"]))
# End peacefully
stop(0)
sys.exit(0)
if exposure != -1:
# For a strange reason on some cameras (e.g. Lenoxo X1E)
@ -277,5 +232,5 @@ while True:
# are captured and even after a delay it does not
# always work. Setting exposure at every frame is
# reliable though.
video_capture.set(cv2.CAP_PROP_AUTO_EXPOSURE, 1.0) # 1 = Manual
video_capture.set(cv2.CAP_PROP_EXPOSURE, float(exposure))
video_capture.intenal.set(cv2.CAP_PROP_AUTO_EXPOSURE, 1.0) # 1 = Manual
video_capture.intenal.set(cv2.CAP_PROP_EXPOSURE, float(exposure))

View file

@ -0,0 +1,116 @@
# Top level class for a video capture providing simplified API's for common
# functions
# Import required modules
import configparser
import cv2
import os
import sys
"""
Class to provide boilerplate code to build a video recorder with the
correct settings from the config file.
The internal recorder can be accessed with 'video_capture.internal'
"""
class VideoCapture:
"""
Creates a new VideoCapture instance depending on the settings in the
provided config file.
Config can either be a string to the path, or a pre-setup configparser.
"""
def __init__(self, config):
if isinstance(config, str):
self.config = configparser.ConfigParser()
self.config.read(config)
else:
self.config = config
# Check device path
if not os.path.exists(config.get("video", "device_path")):
print("Camera path is not configured correctly, " +
"please edit the 'device_path' config value.")
sys.exit(1)
# Create reader
self.internal = None # The internal video recorder
self.fw = None # The frame width
self.fh = None # The frame height
self._create_reader()
# Request a frame to wake the camera up
self.internal.grab()
"""
Frees resources when destroyed
"""
def __del__(self):
if self is not None:
self.internal.release()
"""
Reads a frame, returns the frame and an attempted grayscale conversion of
the frame in a tuple:
(frame, grayscale_frame)
If the grayscale conversion fails, both items in the tuple are identical.
"""
def read_frame(self):
# Grab a single frame of video
# Don't remove ret, it doesn't work without it
ret, frame = self.internal.read()
if not ret:
print("Failed to read camera specified in your 'device_path', " +
"aborting")
sys.exit(1)
try:
# Convert from color to grayscale
# First processing of frame, so frame errors show up here
gsframe = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
except RuntimeError:
gsframe = frame
except cv2.error:
print("\nAn error occurred in OpenCV\n")
raise
return frame, gsframe
"""
Sets up the video reader instance
"""
def _create_reader(self):
if self.config.get("video", "recording_plugin") == "ffmpeg":
# Set the capture source for ffmpeg
from recorders.ffmpeg_reader import ffmpeg_reader
self.internal = ffmpeg_reader(
self.config.get("video", "device_path"),
self.config.get("video", "device_format")
)
elif self.config.get("video", "recording_plugin") == "pyv4l2":
# Set the capture source for pyv4l2
from recorders.pyv4l2_reader import pyv4l2_reader
self.internal = pyv4l2_reader(
self.config.get("video", "device_path"),
self.config.get("video", "device_format")
)
else:
# Start video capture on the IR camera through OpenCV
self.internal = cv2.VideoCapture(
self.config.get("video", "device_path")
)
# Force MJPEG decoding if true
if self.config.getboolean("video", "force_mjpeg", fallback=False):
# Set a magic number, will enable MJPEG but is badly documentated
self.internal.set(cv2.CAP_PROP_FOURCC, 1196444237)
# Set the frame width and height if requested
self.fw = self.config.getint("video", "frame_width", fallback=-1)
self.fh = self.config.getint("video", "frame_height", fallback=-1)
if self.fw != -1:
self.internal.set(cv2.CAP_PROP_FRAME_WIDTH, self.fw)
if self.fh != -1:
self.internal.set(cv2.CAP_PROP_FRAME_HEIGHT, self.fh)