diff --git a/src/compare.py b/src/compare.py index d0ea5ea..6c1c31b 100644 --- a/src/compare.py +++ b/src/compare.py @@ -282,6 +282,14 @@ while True: if capture_successful: make_snapshot("SUCCESSFUL") + import rubberstamps + rubberstamps.execute(config, { + "video_capture": video_capture, + "face_detector": face_detector, + "pose_predictor": pose_predictor, + "clahe": clahe + }) + # End peacefully sys.exit(0) diff --git a/src/pam.py b/src/pam.py index e569210..95efc8d 100644 --- a/src/pam.py +++ b/src/pam.py @@ -73,20 +73,17 @@ def doAuth(pamh): # Status 13 means the image was too dark elif status == 13: + pamh.conversation(pamh.Message(pamh.PAM_ERROR_MSG, "Face detection image too dark")) syslog.syslog(syslog.LOG_INFO, "Failure, image too dark") syslog.closelog() - pamh.conversation(pamh.Message(pamh.PAM_ERROR_MSG, "Face detection image too dark")) return pamh.PAM_AUTH_ERR # Status 1 is probably a python crash elif status == 1: - # Show the success message if it isn't suppressed - if not config.getboolean("core", "no_confirmation"): - pamh.conversation(pamh.Message(pamh.PAM_TEXT_INFO, "Identified face as " + pamh.get_user())) - - syslog.syslog(syslog.LOG_INFO, "Login approved") + pamh.conversation(pamh.Message(pamh.PAM_ERROR_MSG, "Howdy encountered error, check stderr")) + syslog.syslog(syslog.LOG_INFO, "Failure, process crashed while authenticating") syslog.closelog() - return pamh.PAM_SUCCESS + return pamh.PAM_SYSTEM_ERR # Status 0 is a successful exit elif status == 0: diff --git a/src/rubberstamps/__init__.py b/src/rubberstamps/__init__.py new file mode 100644 index 0000000..933c524 --- /dev/null +++ b/src/rubberstamps/__init__.py @@ -0,0 +1,35 @@ +import os + +from importlib.machinery import SourceFileLoader + + +class RubberStamp: + def create_shorthands(self): + self.video_capture = self.opencv["video_capture"] + self.face_detector = self.opencv["face_detector"] + self.pose_predictor = self.opencv["pose_predictor"] + self.clahe = self.opencv["clahe"] + + +def execute(config, opencv): + dir_path = os.path.dirname(os.path.realpath(__file__)) + + for filename in os.listdir(dir_path): + if not os.path.isfile(dir_path + "/" + filename): + continue + + if filename in ["__init__.py", ".gitignore"]: + continue + + class_name = filename.split(".")[0] + module = SourceFileLoader(class_name, dir_path + "/" + filename).load_module() + constructor = getattr(module, class_name) + + instance = constructor() + instance.config = config + instance.opencv = opencv + + instance.create_shorthands() + result = instance.run() + + print(result) diff --git a/src/rubberstamps/nod.py b/src/rubberstamps/nod.py new file mode 100644 index 0000000..2784ec5 --- /dev/null +++ b/src/rubberstamps/nod.py @@ -0,0 +1,64 @@ +import cv2 +import time + +from rubberstamps import RubberStamp + +min_distance = 10 +min_directions = 3 +failsafe = True +timeout = 5 + + +class nod(RubberStamp): + def run(self): + last_reldist = -1 + last_nosepoint = {"x": -1, "y": -1} + recorded_nods = {"x": [], "y": []} + + starttime = time.time() + + while True: + if time.time() > starttime + timeout: + return not failsafe + + ret, frame = self.video_capture.read_frame() + + frame = self.clahe.apply(frame) + + face_locations = self.face_detector(frame, 1) + + if len(face_locations) != 1: + continue + + face_landmarks = self.pose_predictor(frame, face_locations[0]) + + reldist = face_landmarks.part(0).x - face_landmarks.part(2).x + avg_reldist = (last_reldist + reldist) / 2 + + for axis in ["x", "y"]: + nosepoint = getattr(face_landmarks.part(4), axis) + + if last_nosepoint[axis] == -1: + last_nosepoint[axis] = nosepoint + last_reldist = reldist + + movement = (nosepoint - last_nosepoint[axis]) * 100 / avg_reldist + + if movement < -min_distance or movement > min_distance: + if len(recorded_nods[axis]) == 0: + recorded_nods[axis].append(movement < 0) + + elif recorded_nods[axis][-1] != (movement < 0): + recorded_nods[axis].append(movement < 0) + + if len(recorded_nods[axis]) >= min_directions: + return axis == "y" + + last_reldist = reldist + last_nosepoint[axis] = nosepoint + + frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR) + + cv2.imshow("Howdy Test", frame) + if cv2.waitKey(1) != -1: + raise KeyboardInterrupt()