0
0
Fork 0
mirror of https://github.com/boltgolt/howdy.git synced 2024-09-12 09:41:18 +02:00

Merge pull request #758 from kianmeng/fix-typos

Fix typos
This commit is contained in:
boltgolt 2023-02-28 12:26:04 +01:00 committed by GitHub
commit e881cc2593
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
16 changed files with 27 additions and 27 deletions

View file

@ -105,7 +105,7 @@ class StickyWindow(gtk.Window):
ctx.set_source_surface(self.logo_surface)
ctx.paint()
# Calculate main message positioning, as the text is heigher if there's a subtext
# Calculate main message positioning, as the text is higher if there's a subtext
if self.subtext:
ctx.move_to(380, 145)
else:

View file

@ -179,7 +179,7 @@ class OnboardingWindow(gtk.Window):
self.treeview = gtk.TreeView()
self.treeview.set_vexpand(True)
# Set the coloums
# Set the columns
for i, column in enumerate([_("Camera identifier or path"), _("Recommended")]):
cell = gtk.CellRendererText()
cell.set_property("ellipsize", pango.EllipsizeMode.END)

View file

@ -41,7 +41,7 @@ class MainWindow(gtk.Window):
self.treeview = gtk.TreeView()
self.treeview.set_vexpand(True)
# Set the coloums
# Set the columns
for i, column in enumerate([_("ID"), _("Created"), _("Label")]):
col = gtk.TreeViewColumn(column, gtk.CellRendererText(), text=i)
self.treeview.append_column(col)
@ -75,7 +75,7 @@ class MainWindow(gtk.Window):
user = 'none'
if self.active_user: user = self.active_user
# Execute the list commond to get the models
# Execute the list command to get the models
status, output = subprocess.getstatusoutput(["howdy list --plain -U " + user])
# Create a datamodel

View file

@ -1,5 +1,5 @@
#!/usr/bin/python3
# Used to check cameras before commiting to install
# Used to check cameras before committing to install
# Executed before primary apt install of files
import subprocess

View file

@ -25,7 +25,7 @@ _howdy() {
[ "$status" == "false" ] && COMPREPLY="true" || COMPREPLY="false"
return 0
;;
# List the users availible
# List the users available
"-U")
COMPREPLY=( $(compgen -u -- ${cur}) )
return 0

View file

@ -43,7 +43,7 @@ parser.add_argument(
metavar="command",
choices=["add", "clear", "config", "disable", "list", "remove", "set", "snapshot", "test", "version"])
# Add an argument for the extra arguments of diable and remove
# Add an argument for the extra arguments of disable and remove
parser.add_argument(
_("arguments"),
help=_("Optional arguments for the add, disable, remove and set commands."),
@ -94,7 +94,7 @@ if os.geteuid() != 0:
print("\tsudo howdy " + " ".join(sys.argv[1:]))
sys.exit(1)
# Beond this point the user can't change anymore, if we still have root as user we need to abort
# Beyond this point the user can't change anymore, if we still have root as user we need to abort
if args.user == "root":
print(_("Can't run howdy commands as root, please run this command with the --user flag"))
sys.exit(1)

View file

@ -186,7 +186,7 @@ if not face_locations:
print(_("No face detected, aborting"))
sys.exit(1)
# If more than 1 faces are detected we can't know wich one belongs to the user
# If more than 1 faces are detected we can't know which one belongs to the user
elif len(face_locations) > 1:
print(_("Multiple faces detected, aborting"))
sys.exit(1)

View file

@ -98,7 +98,7 @@ sec = int(time.time())
# recognition time
rec_tm = 0
# Wrap everything in an keyboard interupt handler
# Wrap everything in an keyboard interrupt handler
try:
while True:
frame_tm = time.time()
@ -131,7 +131,7 @@ try:
hist = cv2.calcHist([frame], [0], None, [8], [0, 256])
# All values combined for percentage calculation
hist_total = int(sum(hist)[0])
# Fill with the overal containing percentage
# Fill with the overall containing percentage
hist_perc = []
# Loop though all values to calculate a percentage and add it to the overlay
@ -139,7 +139,7 @@ try:
value_perc = float(value[0]) / hist_total * 100
hist_perc.append(value_perc)
# Top left pont, 10px margins
# Top left point, 10px margins
p1 = (20 + (10 * index), 10)
# Bottom right point makes the bar 10px thick, with an height of half the percentage
p2 = (10 + (10 * index), int(value_perc / 2 + 10))

View file

@ -86,7 +86,7 @@ def send_to_ui(type, message):
"""Send message to the auth ui"""
global gtk_proc
# Only execute of the proccess started
# Only execute of the process started
if "gtk_proc" in globals():
# Format message so the ui can parse it
message = type + "=" + message + " \n"
@ -155,7 +155,7 @@ save_successful = config.getboolean("snapshots", "save_successful", fallback=Fal
gtk_stdout = config.getboolean("debug", "gtk_stdout", fallback=False)
rotate = config.getint("video", "rotate", fallback=0)
# Send the gtk outupt to the terminal if enabled in the config
# Send the gtk output to the terminal if enabled in the config
gtk_pipe = sys.stdout if gtk_stdout else subprocess.DEVNULL
# Start the auth ui, register it to be always be closed on exit
@ -353,7 +353,7 @@ while True:
scale_height, scale_width = frame.shape[:2]
print(_(" Used: %dx%d") % (scale_height, scale_width))
# Show the total number of frames and calculate the FPS by deviding it by the total scan time
# Show the total number of frames and calculate the FPS by dividing it by the total scan time
print(_("\nFrames searched: %d (%.2f fps)") % (frames, frames / timings["fl"]))
print(_("Black frames ignored: %d ") % (black_tries, ))
print(_("Dark frames ignored: %d ") % (dark_tries, ))

View file

@ -209,7 +209,7 @@ auto identify(pam_handle_t *pamh, int flags, int argc, const char **argv,
// Will contain the responses from PAM functions
int pam_res = PAM_IGNORE;
// Check if we shoud continue
// Check if we should continue
if ((pam_res = check_enabled(config)) != PAM_SUCCESS) {
return pam_res;
}

View file

@ -44,7 +44,7 @@ auto optional_task<T>::wait(std::chrono::duration<R, P> dur)
}
// Get the value.
// WARNING: The function hould be run only if the task has successfully been
// WARNING: The function should be run only if the task has successfully been
// stopped.
template <typename T> auto optional_task<T>::get() -> T {
assert(!is_active && spawned);

View file

@ -97,7 +97,7 @@ class ffmpeg_reader:
)
def read(self):
""" Read a sigle frame from the self.video array. Will record a video if array is empty. """
""" Read a single frame from the self.video array. Will record a video if array is empty. """
# First time we are called, we want to initialize the camera by probing it, to ensure we have height/width
# and then take numframes of video to fill the buffer for faster recognition.

View file

@ -72,11 +72,11 @@ class pyv4l2_reader:
self.frame = Frame(self.device_name)
def grab(self):
""" Read a sigle frame from the IR camera. """
""" Read a single frame from the IR camera. """
self.read()
def read(self):
""" Read a sigle frame from the IR camera. """
""" Read a single frame from the IR camera. """
if not self.frame:
self.record()

View file

@ -24,7 +24,7 @@ class VideoCapture:
Config can either be a string to the path, or a pre-setup configparser.
"""
# Parse config from string if nedded
# Parse config from string if needed
if isinstance(config, str):
self.config = configparser.ConfigParser()
self.config.read(config)

View file

@ -136,7 +136,7 @@ def execute(config, gtk_proc, opencv):
# Error out if a key has been set that was not declared by the module before
if key not in instance.options:
print("Unknow config option for rubberstamp " + type + ": " + key)
print("Unknown config option for rubberstamp " + type + ": " + key)
continue
# Convert the argument string to an int or float if the declared option has that type

View file

@ -22,7 +22,7 @@ class nod(RubberStamp):
last_reldist = -1
# Last point the nose was at
last_nosepoint = {"x": -1, "y": -1}
# Contans booleans recording successful nods and their directions
# Contains booleans recording successful nods and their directions
recorded_nods = {"x": [], "y": []}
starttime = time.time()
@ -38,7 +38,7 @@ class nod(RubberStamp):
# Detect all faces in the frame
face_locations = self.face_detector(frame, 1)
# Only continue if exacty 1 face is visible in the frame
# Only continue if exactly 1 face is visible in the frame
if len(face_locations) != 1:
continue
@ -47,10 +47,10 @@ class nod(RubberStamp):
# Calculate the relative distance between the 2 eyes
reldist = face_landmarks.part(0).x - face_landmarks.part(2).x
# Avarage this out with the distance found in the last frame to smooth it out
# Average this out with the distance found in the last frame to smooth it out
avg_reldist = (last_reldist + reldist) / 2
# Calulate horizontal movement (shaking head) and vertical movement (nodding)
# Calculate horizontal movement (shaking head) and vertical movement (nodding)
for axis in ["x", "y"]:
# Get the location of the nose on the active axis
nosepoint = getattr(face_landmarks.part(4), axis)
@ -61,7 +61,7 @@ class nod(RubberStamp):
last_reldist = reldist
mindist = self.options["min_distance"]
# Get the relative movement by taking the distance traveled and deviding it by eye distance
# Get the relative movement by taking the distance traveled and dividing it by eye distance
movement = (nosepoint - last_nosepoint[axis]) * 100 / max(avg_reldist, 1)
# If the movement is over the minimal distance threshold