0
0
Fork 0
mirror of https://github.com/boltgolt/howdy.git synced 2024-09-19 09:51:19 +02:00

Added the option to save snapshots

This commit is contained in:
boltgolt 2020-06-21 21:40:47 +02:00
parent 6e9169e87c
commit 51e17420d7
No known key found for this signature in database
GPG key ID: BECEC9937E1AAE26
5 changed files with 115 additions and 7 deletions

3
.gitignore vendored
View file

@ -103,6 +103,9 @@ ENV/
# generated models
/src/models
# snapshots
/src/snapshots
# build files
debian/howdy.substvars
debian/files

View file

@ -16,6 +16,8 @@ import json
import configparser
import cv2
import dlib
import datetime
import snapshot
import numpy as np
import _thread as thread
from recorders.video_capture import VideoCapture
@ -48,6 +50,18 @@ def init_detector(lock):
lock.release()
def make_snapshot(type):
"""Generate snapshot after detection"""
snapshot.generate(snapframes, [
type + " LOGIN",
"Date: " + datetime.datetime.utcnow().strftime("%Y/%m/%d %H:%M:%S UTC"),
"Scan time: " + str(round(time.time() - timings["fr"], 2)) + "s",
"Frames: " + str(frames) + " (" + str(round(frames / (time.time() - timings["fr"]), 2)) + "FPS)",
"Hostname: " + os.uname().nodename,
"Best certainty value: " + str(round(lowest_certainty * 10, 1))
])
# Make sure we were given an username to tast against
if len(sys.argv) < 2:
sys.exit(12)
@ -67,7 +81,11 @@ black_tries = 0
dark_tries = 0
# Total amount of frames captured
frames = 0
# face recognition/detection instances
# Captured frames for snapshot capture
snapframes = []
# Tracks the lowest certainty value in the loop
lowest_certainty = 10
# Face recognition/detection instances
face_detector = None
pose_predictor = None
face_encoder = None
@ -95,6 +113,8 @@ timeout = config.getint("video", "timeout", fallback=5)
dark_threshold = config.getfloat("video", "dark_threshold", fallback=50.0)
video_certainty = config.getfloat("video", "certainty", fallback=3.5) / 10
end_report = config.getboolean("debug", "end_report", fallback=False)
capture_failed = config.getboolean("snapshots", "capture_failed", fallback=False)
capture_successful = config.getboolean("snapshots", "capture_successful", fallback=False)
# Save the time needed to start the script
timings["in"] = time.time() - timings["st"]
@ -150,7 +170,11 @@ while True:
# Stop if we've exceded the time limit
if time.time() - timings["fr"] > timeout:
if (dark_tries == valid_frames):
# Create a timeout snapshot if enabled
if capture_failed:
make_snapshot("FAILED")
if dark_tries == valid_frames:
print("All frames were too dark, please check dark_threshold in config")
print("Average darkness: " + str(dark_running_total / valid_frames) + ", Threshold: " + str(dark_threshold))
sys.exit(13)
@ -159,9 +183,14 @@ while True:
# Grab a single frame of video
frame, gsframe = video_capture.read_frame()
gsframe = clahe.apply(gsframe)
# If snapshots have been turned on
if capture_failed or capture_successful:
# Start capturing frames for the snapshot
if len(snapframes) < 3:
snapframes.append(frame)
# Create a histogram of the image with 8 values
hist = cv2.calcHist([gsframe], [0], None, [8], [0, 256])
# All values combined for percentage calculation
@ -210,10 +239,14 @@ while True:
match_index = np.argmin(matches)
match = matches[match_index]
# Update certainty if we have a new low
if lowest_certainty > match:
lowest_certainty = match
# Check if a match that's confident enough
if 0 < match < video_certainty:
timings["tt"] = time.time() - timings["st"]
timings["fr"] = time.time() - timings["fr"]
timings["fl"] = time.time() - timings["fr"]
# If set to true in the config, print debug text
if end_report:
@ -227,7 +260,7 @@ while True:
print(" Open cam + load libs: %dms" % (round(max(timings["ll"], timings["ic"]) * 1000, )))
print_timing(" Opening the camera", "ic")
print_timing(" Importing recognition libs", "ll")
print_timing("Searching for known face", "fr")
print_timing("Searching for known face", "fl")
print_timing("Total time", "tt")
print("\nResolution")
@ -238,13 +271,17 @@ while True:
print(" Used: %dx%d" % (scale_height, scale_width))
# Show the total number of frames and calculate the FPS by deviding it by the total scan time
print("\nFrames searched: %d (%.2f fps)" % (frames, frames / timings["fr"]))
print("\nFrames searched: %d (%.2f fps)" % (frames, frames / timings["fl"]))
print("Black frames ignored: %d " % (black_tries, ))
print("Dark frames ignored: %d " % (dark_tries, ))
print("Certainty of winning frame: %.3f" % (match * 10, ))
print("Winning model: %d (\"%s\")" % (match_index, models[match_index]["label"]))
# Make snapshot if enabled
if capture_successful:
make_snapshot("SUCCESSFUL")
# End peacefully
sys.exit(0)

View file

@ -30,6 +30,7 @@ use_cnn = false
[video]
# The certainty of the detected face belonging to the user of the account
# On a scale from 1 to 10, values above 5 are not recommended
# Lower is better
certainty = 3.5
# The number of seconds to search before timing out
@ -37,7 +38,7 @@ timeout = 4
# The path of the device to capture frames from
# Should be set automatically by an installer if your distro has one
device_path = "/dev/video1"
device_path = none
# Scale down the video feed to this maximum height
# Speeds up face recognition but can make it less precise
@ -73,6 +74,14 @@ force_mjpeg = false
# OPENCV only.
exposure = -1
[snapshots]
# Capture snapshots of failed login attempts and save them to disk with metadata
# Snapshots are saved to the "snapshots" folder
capture_failed = true
# Do the same as the option above but for successful attemtps
capture_successful = true
[debug]
# Show a short but detailed diagnostic report in console
# Enabling this can cause some UI apps to fail, only enable it to debug

BIN
src/logo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 3 KiB

59
src/snapshot.py Normal file
View file

@ -0,0 +1,59 @@
# Create and save snapshots of auth attemtps
# Import modules
import cv2
import os
import datetime
import numpy as np
def generate(frames, text_lines):
"""Generate a shapshot from given frames"""
# Don't execute if no frames were given
if len(frames) == 0:
return
# Get the path to the containing folder
abpath = os.path.dirname(os.path.abspath(__file__))
# Get frame dimentions
frame_height, frame_width, cc = frames[0].shape
# Spread the given frames out horizontally
snap = np.concatenate(frames, axis=1)
# Create colors
pad_color = [44, 44, 44]
text_color = [255, 255, 255]
# Add a gray square at the bottom of the image
snap = cv2.copyMakeBorder(snap, 0, len(text_lines) * 20 + 40, 0, 0, cv2.BORDER_CONSTANT, value=pad_color)
# Add the Howdy logo if there's space to do so
if len(frames) > 1:
# Load the logo from file
logo = cv2.imread(abpath + "/logo.png")
# Calculate the position of the logo
logo_y = frame_height + 20
logo_x = frame_width * len(frames) - 210
# Overlay the logo on top of the image
snap[logo_y:logo_y+57, logo_x:logo_x+180] = logo
# Go through each line
line_number = 0
for line in text_lines:
# Calculate how far the line should be from the top
padding_top = frame_height + 30 + (line_number * 20)
# Print the line onto the image
cv2.putText(snap, line, (30, padding_top), cv2.FONT_HERSHEY_SIMPLEX, .4, text_color, 0, cv2.LINE_AA)
line_number += 1
# Made sure a snapshot folder exist
if not os.path.exists(abpath + "/snapshots"):
os.makedirs(abpath + "/snapshots")
# Generate a filename based on the current time
filename = datetime.datetime.utcnow().strftime("%Y%m%dT%H%M%S.jpg")
# Write the image to that file
cv2.imwrite(abpath + "/snapshots/" + filename, snap)