Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

models_update (update 1 of 2) #34

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -41,3 +41,12 @@ app.*.map.json
/android/app/debug
/android/app/profile
/android/app/release


# Model Related
/models/MLvenv
/models/ASL_Alphabet_Dataset
/models/dataset.zip
/models/data.pickle
/models/tempTesting
/models/model.p
4 changes: 1 addition & 3 deletions models/TrainData_preprocess.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,7 @@
import matplotlib.pyplot as plt

#defining dataset directories
DATA_DIR_1 = './ASL_Alphabet_Dataset/asl_alphabet_train'
#DATA_DIR_1 = './temp'
#DATA_DIR_1 = './temp'
DATA_DIR_1 = './ASL_Alphabet_Dataset/asl_alphabet_train/asl_alphabet_train'

#mp_holistic = mp.solutions.holistic
mp_hands = mp.solutions.hands
Expand Down
20 changes: 20 additions & 0 deletions models/faceRecog.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
from deepface import DeepFace


def recog(img_1_path, img_2_path):
'''
Returns a truth value after comparing the images

Parameters:
img_1_path: path of image to be checked
imt_2_path: path of original image
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nitpick (typo): Fix typo in parameter name in docstring

'imt_2_path' should be 'img_2_path'


Returns:
A truth value
True if the faces match, else False
'''
obj = DeepFace.verify(img_1_path, img_2_path
, model_name = 'ArcFace', detector_backend = 'retinaface')

return obj["verified"]

120 changes: 120 additions & 0 deletions models/gestureClassify.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
import pickle
import numpy as np
import cv2
import mediapipe as mp
from zipfile import ZipFile


def detectGesture(img):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

issue (complexity): Consider refactoring model loading and return patterns to reduce redundant operations

Two suggestions to reduce complexity while maintaining functionality:

  1. Move model loading to module level to avoid repeated ZIP operations:
# At module level
_model = None

def _load_model():
    global _model
    if _model is None:
        with ZipFile("./model.zip", 'r') as zObject:
            zObject.extract("model.p", path="./")
        _model = pickle.load(open('./model.p', 'rb'))['model']
    return _model

# In detectGesture(), replace model loading with:
model = _load_model()
  1. Simplify compare() with consistent return types:
def compare(detectOutput, gestureClass):
    if detectOutput == 0:
        return False, 'Hand Not Detected'
    if detectOutput == 1:
        return False, 'Gesture Not Inferred'
    return (detectOutput == gestureClass, 
            'Correct Gesture' if detectOutput == gestureClass else 'Incorrect Gesture')

These changes improve performance by loading the model once and make the code more maintainable with consistent return patterns.

'''
Returns class of gesture detected in the image

Parameters:
img: cv2 image or np array

Returns:
1. if prediction made by model: string with class
2. 0 if hand not detected
3. 1 if hand detected, but gesture not detected
'''


with ZipFile("./model.zip", 'r') as zObject:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

suggestion (performance): Consider caching the model instead of extracting it on every function call

Repeatedly extracting the zip file for each detection will cause significant performance overhead. Consider loading the model once at module level or implementing a caching mechanism.

MODEL_ZIP = ZipFile("./model.zip", 'r')

def __init__(self):
    self.model_object = MODEL_ZIP

zObject.extract( "model.p", path="./")
zObject.close()

model= pickle.load(open('./model.p', 'rb'))['model']
#capture = cv2.VideoCapture(0)

mp_hands = mp.solutions.hands
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles

hands = mp_hands.Hands(static_image_mode=True, min_detection_confidence=0.5)

H, W, _ = img.shape
frame_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)


#while True:
data_aux = []
x_ = []
y_ = []

# ret, frame = capture.read()
# H, W, _ = frame.shape

# frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

results = hands.process(frame_rgb)
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
mp_drawing.draw_landmarks(img,
hand_landmarks,
mp_hands.HAND_CONNECTIONS,
mp_drawing.DrawingSpec(color=(121,22,76), thickness=2, circle_radius=4),
mp_drawing.DrawingSpec(color=(121,44,250), thickness=2, circle_radius=2))

for hand_landmarks in results.multi_hand_landmarks:
for i in range(len(hand_landmarks.landmark)):
x = hand_landmarks.landmark[i].x
y = hand_landmarks.landmark[i].y
x_.append(x)
y_.append(y)

for i in range(len(hand_landmarks.landmark)):
x = hand_landmarks.landmark[i].x
y = hand_landmarks.landmark[i].y
data_aux.append(x - min(x_))
data_aux.append(y - min(y_))

x1 = int(min(x_) * W) - 10
y1 = int(min(y_) * H) - 10
x2 = int(max(x_) * W) - 10
y2 = int(max(y_) * H) - 10
data_aux = data_aux[:42]
prediction = model.predict([np.asarray(data_aux)])
if len(prediction):
predicted_character = prediction[0]

return str(predicted_character)

return 1

return 0

#cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 0), 4)
#cv2.putText(frame, predicted_character, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 1.3, (0, 0, 0), 3,
# cv2.LINE_AA)

#cv2.imshow('frame', frame)
#cv2.waitKey(1)
#if cv2.waitKey(10) & 0xFF == ord('q'):
# break

#capture.release()
#cv2.destroyAllWindows()

def compare(detectOutput, gestureClass):
'''
Parameters:
detectOutput: str or int (output from detectGesture function)
gestureClass: str

Returns:
1. True if gesture matches
2. False if there's some error, or gesture does not matches, along with a string with the description of the error
'''

if not detectOutput:
return False, 'Hand Not Detected'
else:
if detectOutput == 1:
return False, 'Gesture Not Inferred'
else:
if detectOutput == gestureClass:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

issue (bug_risk): Missing return statement in the True case

The function will fall through to return False even when the gestures match. Add 'return True' here.

True
else:
return False, 'Incorrect Gesture'

#print(compare(detectGesture(cv2.imread('./tempTesting/gesture.jpeg')), 'A'))
75 changes: 0 additions & 75 deletions models/main.py

This file was deleted.

Binary file added models/model.zip
Binary file not shown.
9 changes: 9 additions & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
numpy
pandas
matplotlib
scikit-learn
mediapipe
opencv-python
tf-keras
deepface
flask