Initial commit
This commit is contained in:
69
hand tracking/main.py
Normal file
69
hand tracking/main.py
Normal file
@@ -0,0 +1,69 @@
|
||||
import cv2
|
||||
import mediapipe as mp
|
||||
import time
|
||||
import math
|
||||
import pyautogui
|
||||
|
||||
cap = cv2.VideoCapture(0)
|
||||
pyautogui.PAUSE = 0.01
|
||||
mpHands = mp.solutions.hands
|
||||
hands = mpHands.Hands(max_num_hands=1, min_tracking_confidence=0.80, min_detection_confidence=0.90)
|
||||
mpDraw = mp.solutions.drawing_utils
|
||||
testing = 23
|
||||
click_dis = 35
|
||||
sensitivity = 3.5
|
||||
|
||||
|
||||
def get_distance(first, second, height, width):
|
||||
dist_x = (results.multi_hand_landmarks[0].landmark[first].x - results.multi_hand_landmarks[0].landmark[
|
||||
second].x) * width
|
||||
dist_y = (results.multi_hand_landmarks[0].landmark[first].y - results.multi_hand_landmarks[0].landmark[
|
||||
second].y) * height
|
||||
return math.sqrt(abs(dist_x ** 2 + dist_y ** 2))
|
||||
|
||||
|
||||
x, y = None, None
|
||||
|
||||
while True:
|
||||
success, img = cap.read()
|
||||
h, w, c = img.shape
|
||||
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
||||
results = hands.process(imgRGB)
|
||||
if results.multi_hand_landmarks:
|
||||
dist_palm = round(get_distance(0, 9, h, w) / 100, 3)
|
||||
if not x and not y:
|
||||
x, y = results.multi_hand_landmarks[0].landmark[4].x * w, results.multi_hand_landmarks[0].landmark[4].y * h
|
||||
dis_1 = get_distance(4, 8, h, w)
|
||||
cv2.putText(img, f"""dist 4-8: {round(dis_1)}/{round(testing * dist_palm, 2)}""", (0, 15),
|
||||
cv2.FONT_HERSHEY_PLAIN, 1.5, (255, 0, 255), thickness=2)
|
||||
cv2.putText(img, f"""MOVE: {"True" if dis_1 < testing else "False"}""", (0, 40),
|
||||
cv2.FONT_HERSHEY_PLAIN, 1.5, (255, 0, 255), thickness=2)
|
||||
cv2.putText(img, f"""dist 4-12: {round(get_distance(12, 4, h, w))}/{round(click_dis * dist_palm, 2)}""",
|
||||
(0, 65),
|
||||
cv2.FONT_HERSHEY_PLAIN, 1.5, (255, 0, 255), thickness=2)
|
||||
cv2.putText(img, f"""CLICK: {"True" if get_distance(12, 4, h, w) < click_dis * dist_palm else "False"}""",
|
||||
(0, 90),
|
||||
cv2.FONT_HERSHEY_PLAIN, 1.5, (255, 0, 255), thickness=2)
|
||||
cv2.putText(img, f"""dsit 0-9: {dist_palm}""", (0, 115),
|
||||
cv2.FONT_HERSHEY_PLAIN, 1.5, (255, 0, 255), thickness=2)
|
||||
for handLms in results.multi_hand_landmarks:
|
||||
mpDraw.draw_landmarks(img, handLms)
|
||||
if dis_1 < testing * dist_palm and get_distance(12, 4, h, w) < click_dis * dist_palm:
|
||||
pyautogui.dragRel(-(results.multi_hand_landmarks[0].landmark[3].x * w - x) * sensitivity,
|
||||
(results.multi_hand_landmarks[0].landmark[3].y * h - y) * sensitivity, duration=0.001)
|
||||
elif dis_1 < testing * dist_palm:
|
||||
pyautogui.moveRel(-(results.multi_hand_landmarks[0].landmark[3].x * w - x) * sensitivity,
|
||||
(results.multi_hand_landmarks[0].landmark[3].y * h - y) * sensitivity, duration=0.001)
|
||||
|
||||
elif get_distance(12, 4, h, w) < click_dis * dist_palm and not click:
|
||||
pyautogui.click()
|
||||
click = True
|
||||
# mouse.click("left")
|
||||
print("clicked")
|
||||
else:
|
||||
click = False
|
||||
x, y = results.multi_hand_landmarks[0].landmark[3].x * w, results.multi_hand_landmarks[0].landmark[3].y * h
|
||||
else:
|
||||
x, y = None, None
|
||||
cv2.imshow("Image", img)
|
||||
cv2.waitKey(1)
|
84
hand tracking/new_demo.py
Normal file
84
hand tracking/new_demo.py
Normal file
@@ -0,0 +1,84 @@
|
||||
import cv2
|
||||
import mediapipe as mp
|
||||
import time
|
||||
import math
|
||||
import threading
|
||||
import pyautogui
|
||||
|
||||
cap = cv2.VideoCapture(0)
|
||||
pyautogui.PAUSE = 0.01
|
||||
mpHands = mp.solutions.hands
|
||||
hands = mpHands.Hands(max_num_hands=1, min_tracking_confidence=0.95, min_detection_confidence=0.90)
|
||||
mpDraw = mp.solutions.drawing_utils
|
||||
mov_dis = 0.2379
|
||||
click_dis = 0.2823
|
||||
sensitivity = 3.5
|
||||
|
||||
testing8_12 = []
|
||||
testing0_5 = []
|
||||
|
||||
testing3_5 = []
|
||||
|
||||
def get_distance(first, second, height, width):
|
||||
dist_x = (results.multi_hand_landmarks[0].landmark[first].x - results.multi_hand_landmarks[0].landmark[
|
||||
second].x) * width
|
||||
dist_y = (results.multi_hand_landmarks[0].landmark[first].y - results.multi_hand_landmarks[0].landmark[
|
||||
second].y) * height
|
||||
return math.sqrt(abs(dist_x ** 2 + dist_y ** 2))
|
||||
|
||||
def dist(point1, point2, pointa, pointb, pointc, pointd):
|
||||
dis12 = get_distance(point1, point2, h, w)
|
||||
distab = get_distance(pointa, pointb, h, w)
|
||||
distcd = get_distance(pointc, pointd, h, w)
|
||||
testing8_12.append(dis12)
|
||||
testing0_5.append(distab)
|
||||
testing3_5.append(distcd)
|
||||
print(f"8-12: {round(sum(testing8_12)/len(testing8_12), 2)} | 0-5 {round(sum(testing0_5)/len(testing0_5), 2)} | 3-5 {round(sum(testing3_5)/len(testing3_5), 2)}")
|
||||
|
||||
|
||||
|
||||
x, y = None, None
|
||||
click = False
|
||||
|
||||
while True:
|
||||
success, img = cap.read()
|
||||
h, w, c = img.shape
|
||||
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
||||
results = hands.process(imgRGB)
|
||||
if results.multi_hand_landmarks:
|
||||
dist_palm = round(get_distance(0, 5, h, w))
|
||||
if not x and not y:
|
||||
x, y = results.multi_hand_landmarks[0].landmark[4].x * w, results.multi_hand_landmarks[0].landmark[4].y * h
|
||||
dis_1 = get_distance(12, 8, h, w)
|
||||
cv2.putText(img, f"""MOVE: {round(dis_1)}/{round(mov_dis * dist_palm, 2)} - {"true" if dis_1 < mov_dis * dist_palm else "false"}""", (0, 15),
|
||||
cv2.FONT_HERSHEY_PLAIN, 1.5, (255, 0, 255), thickness=2)
|
||||
cv2.putText(img, f"""CLICK: {round(get_distance(5, 3, h, w))}/{round(click_dis * dist_palm, 2)} - {"true" if get_distance(5, 3, h, w) < click_dis * dist_palm else "false"}""", (0, 40),
|
||||
cv2.FONT_HERSHEY_PLAIN, 1.5, (255, 0, 255), thickness=2)
|
||||
cv2.putText(img,
|
||||
f"""DRAG: {"true" if dis_1 < mov_dis * dist_palm and get_distance(5, 3, h, w) < click_dis * dist_palm else "false"}""",
|
||||
(0, 65),
|
||||
cv2.FONT_HERSHEY_PLAIN, 1.5, (255, 0, 255), thickness=2)
|
||||
for handLms in results.multi_hand_landmarks:
|
||||
mpDraw.draw_landmarks(img, handLms, mpHands.HAND_CONNECTIONS)
|
||||
if dis_1 < mov_dis * dist_palm and get_distance(5, 3, h, w) < click_dis * dist_palm:
|
||||
pyautogui.mouseDown()
|
||||
pyautogui.moveRel(-(results.multi_hand_landmarks[0].landmark[8].x * w - x) * sensitivity,
|
||||
(results.multi_hand_landmarks[0].landmark[8].y * h - y) * sensitivity, duration=0.001)
|
||||
elif dis_1 < mov_dis * dist_palm:
|
||||
pyautogui.mouseUp()
|
||||
pyautogui.moveRel(-(results.multi_hand_landmarks[0].landmark[8].x * w - x) * sensitivity,
|
||||
(results.multi_hand_landmarks[0].landmark[8].y * h - y) * sensitivity, duration=0.001)
|
||||
|
||||
elif get_distance(5, 3, h, w) < click_dis * dist_palm and not click:
|
||||
pyautogui.mouseUp()
|
||||
pyautogui.click()
|
||||
click = True
|
||||
else:
|
||||
pyautogui.mouseUp()
|
||||
click = False
|
||||
x, y = results.multi_hand_landmarks[0].landmark[8].x * w, results.multi_hand_landmarks[0].landmark[8].y * h
|
||||
dist(8, 12, 0, 5, 3, 5)
|
||||
else:
|
||||
x, y = None, None
|
||||
cv2.imshow("Image", img)
|
||||
cv2.waitKey(1)
|
45
hand tracking/smoothing.py
Normal file
45
hand tracking/smoothing.py
Normal file
@@ -0,0 +1,45 @@
|
||||
import math
|
||||
|
||||
|
||||
def smoothing_factor(t_e, cutoff):
|
||||
r = 2 * math.pi * cutoff * t_e
|
||||
return r / (r + 1)
|
||||
|
||||
|
||||
def exponential_smoothing(a, x, x_prev):
|
||||
return a * x + (1 - a) * x_prev
|
||||
|
||||
|
||||
class OneEuroFilter:
|
||||
def __init__(self, t0, x0, dx0=0.0, min_cutoff=1.0, beta=0.0,
|
||||
d_cutoff=1.0):
|
||||
"""Initialize the one euro filter."""
|
||||
# The parameters.
|
||||
self.min_cutoff = float(min_cutoff)
|
||||
self.beta = float(beta)
|
||||
self.d_cutoff = float(d_cutoff)
|
||||
# Previous values.
|
||||
self.x_prev = float(x0)
|
||||
self.dx_prev = float(dx0)
|
||||
self.t_prev = float(t0)
|
||||
|
||||
def __call__(self, t, x):
|
||||
"""Compute the filtered signal."""
|
||||
t_e = t - self.t_prev
|
||||
|
||||
# The filtered derivative of the signal.
|
||||
a_d = smoothing_factor(t_e, self.d_cutoff)
|
||||
dx = (x - self.x_prev) / t_e
|
||||
dx_hat = exponential_smoothing(a_d, dx, self.dx_prev)
|
||||
|
||||
# The filtered signal.
|
||||
cutoff = self.min_cutoff + self.beta * abs(dx_hat)
|
||||
a = smoothing_factor(t_e, cutoff)
|
||||
x_hat = exponential_smoothing(a, x, self.x_prev)
|
||||
|
||||
# Memorize the previous values.
|
||||
self.x_prev = x_hat
|
||||
self.dx_prev = dx_hat
|
||||
self.t_prev = t
|
||||
|
||||
return x_hat
|
Reference in New Issue
Block a user