From e848385a91c9f9346fc13c3af0c6cddc65215b83 Mon Sep 17 00:00:00 2001 From: subha-18 <77602195+subha-18@users.noreply.github.com> Date: Thu, 21 Oct 2021 20:28:19 +0530 Subject: [PATCH 1/3] Add files via upload --- task1.py | 55 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 task1.py diff --git a/task1.py b/task1.py new file mode 100644 index 0000000..dac3fbc --- /dev/null +++ b/task1.py @@ -0,0 +1,55 @@ +import cv2 + +#(width, height) = (130, 100) + +# url = '/video' +# cap=cv2.VideoCapture(url) + + +cap=cv2.VideoCapture(0) + +while (cap.isOpened()): + ret, img = cap.read() + img=cv2.flip(img, 1) + cv2.rectangle(img, (20, 20), (250, 250), (255,0,0), 4, cv2.LINE_AA) + cv2.imshow("RGB Output", img) + img1 = img[20:250,20:250] + imCopy = img1.copy() + gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY) + blur = cv2.GaussianBlur(gray, (35, 35), 0) + + cv2.imshow("Grayscale",gray) + cv2.imshow('Blur', blur) + + ret, thresh1 = cv2.threshold(blur, 10, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) + #hand_resize = cv2.resize(thresh1, (width, height)) + cv2.imshow("Thresh binary ", thresh1) + + ret, thresh2 = cv2.threshold(blur, 10, 255, cv2.THRESH_TOZERO + cv2.THRESH_OTSU) + cv2.imshow("Tozero ", thresh2) + + ret, thresh3 = cv2.threshold(blur, 10, 255, cv2.THRESH_TRUNC + cv2.THRESH_OTSU) + cv2.imshow("Trunc ", thresh3) + + contours, hierarchy = cv2.findContours(thresh1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + cv2.drawContours(imCopy, contours, -1, (0, 255, 0)) + cv2.imshow('Contours_tree', imCopy) + + imCopy3 = img1.copy() + contours, hierarchy = cv2.findContours(thresh1, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE) + cv2.drawContours(imCopy3, contours, -1, (0, 255, 0)) + cv2.imshow('Contours_list', imCopy3) + + imCopy2 = img1.copy() + contours, hierarchy = cv2.findContours(thresh1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) + cv2.drawContours(imCopy2, contours, -1, (0, 255, 0)) + cv2.imshow('Contours_external', imCopy2) + + + + k = 0xFF & cv2.waitKey(10) + if k == ord('q'): + break + +cap.release() +cv2.destroyAllWindows() From e92f1a75794bffba4f5c2d416caac75c2d5da6d5 Mon Sep 17 00:00:00 2001 From: subha-18 <77602195+subha-18@users.noreply.github.com> Date: Sun, 24 Oct 2021 21:08:59 +0530 Subject: [PATCH 2/3] Add files via upload --- collect-data.py | 83 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 83 insertions(+) create mode 100644 collect-data.py diff --git a/collect-data.py b/collect-data.py new file mode 100644 index 0000000..b70e5ef --- /dev/null +++ b/collect-data.py @@ -0,0 +1,83 @@ +import cv2 +import os + +if not os.path.exists("data"): + os.makedirs("data") + os.makedirs("data/train") + os.makedirs("data/train/0") + os.makedirs("data/train/1") + os.makedirs("data/train/2") + os.makedirs("data/train/3") + os.makedirs("data/train/4") + os.makedirs("data/train/5") + + +mode = 'train' +directory = 'data/'+mode+'/' + +# url = '/video' +# cap=cv2.VideoCapture(url) + +cap=cv2.VideoCapture(0) + +while True: + _, frame = cap.read() + frame = cv2.flip(frame, 1) + + cv2.putText(frame, "subha-18", (int(0.35*frame.shape[1]),int(0.25*frame.shape[1]) ), cv2.FONT_HERSHEY_SIMPLEX, 1, (147,20,255), 3) + + count = {'zero': len(os.listdir(directory+"/0")), + 'one': len(os.listdir(directory+"/1")), + 'two': len(os.listdir(directory+"/2")), + 'three': len(os.listdir(directory+"/3")), + 'four': len(os.listdir(directory+"/4")), + 'five': len(os.listdir(directory+"/5"))} + + cv2.putText(frame, "MODE : "+mode, (10, 50), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0,0,0), 1) + cv2.putText(frame, "IMAGE COUNT", (10, 100), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0,0,0), 1) + cv2.putText(frame, "ZERO : "+str(count['zero']), (10, 120), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255,153,51), 1) + cv2.putText(frame, "ONE : "+str(count['one']), (10, 140), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (204,102,0), 1) + cv2.putText(frame, "TWO : "+str(count['two']), (10, 160), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255,102,102), 1) + cv2.putText(frame, "THREE : "+str(count['three']), (10, 180), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255,51,51), 1) + cv2.putText(frame, "FOUR : "+str(count['four']), (10, 200), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (204,0,0), 1) + cv2.putText(frame, "FIVE : "+str(count['five']), (10, 220), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (153,0,76), 1) + + + x1 = int(0.5*frame.shape[1]) + y1 = 10 + x2 = frame.shape[1]-10 + y2 = int(0.5*frame.shape[1]) + cv2.rectangle(frame, (x1-1, y1-1), (x2+1, y2+1), (255,0,0) ,3) + roi = frame[y1:y2, x1:x2] + roi = cv2.resize(roi, (300, 300)) + cv2.putText(frame, "R.O.I", (int(0.75*frame.shape[1]),int(0.55*frame.shape[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,225,0), 3) + cv2.imshow("Frame", frame) + + + + + roi = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY) + roi = cv2.GaussianBlur(roi, (15, 15), 0) + _, roi = cv2.threshold(roi, 120, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) + cv2.imshow("ROI", roi) + + + + interrupt = cv2.waitKey(10) + if interrupt & 0xFF == 27: + break + if interrupt & 0xFF == ord('0'): + cv2.imwrite(directory+'0/'+str(count['zero'])+'.jpg', roi) + if interrupt & 0xFF == ord('1'): + cv2.imwrite(directory+'1/'+str(count['one'])+'.jpg', roi) + if interrupt & 0xFF == ord('2'): + cv2.imwrite(directory+'2/'+str(count['two'])+'.jpg', roi) + if interrupt & 0xFF == ord('3'): + cv2.imwrite(directory+'3/'+str(count['three'])+'.jpg', roi) + if interrupt & 0xFF == ord('4'): + cv2.imwrite(directory+'4/'+str(count['four'])+'.jpg', roi) + if interrupt & 0xFF == ord('5'): + cv2.imwrite(directory+'5/'+str(count['five'])+'.jpg', roi) + +cap.release() +cv2.destroyAllWindows() From 5c5d8d8540b3403cce029a4faf9ac5cdb903e7d5 Mon Sep 17 00:00:00 2001 From: subha-18 <77602195+subha-18@users.noreply.github.com> Date: Thu, 28 Oct 2021 11:45:31 +0530 Subject: [PATCH 3/3] Add files via upload --- collect-data_modified.py | 107 +++++++++++++++++++++++++++++++++++++++ prediction.py | 61 ++++++++++++++++++++++ train_model.py | 52 +++++++++++++++++++ 3 files changed, 220 insertions(+) create mode 100644 collect-data_modified.py create mode 100644 prediction.py create mode 100644 train_model.py diff --git a/collect-data_modified.py b/collect-data_modified.py new file mode 100644 index 0000000..054c6fe --- /dev/null +++ b/collect-data_modified.py @@ -0,0 +1,107 @@ +import cv2 +import os + +if not os.path.exists("data"): + os.makedirs("data") + os.makedirs("data/train") + os.makedirs("data/train/0") + os.makedirs("data/train/1") + os.makedirs("data/train/2") + os.makedirs("data/train/3") + os.makedirs("data/train/4") + os.makedirs("data/train/5") + os.makedirs("data/train/6") + os.makedirs("data/train/7") + os.makedirs("data/train/8") + os.makedirs("data/train/9") + + +mode = 'train' +directory = 'data/'+mode+'/' + +# url = '/video' +# cap=cv2.VideoCapture(url) + +cap=cv2.VideoCapture(0) + +while True: + _, frame = cap.read() + frame = cv2.flip(frame, 1) + + cv2.putText(frame, "subha-18", (int(0.35*frame.shape[1]),int(0.25*frame.shape[1]) ), cv2.FONT_HERSHEY_SIMPLEX, 1, (147,20,255), 3) + + count = {'zero': len(os.listdir(directory+"/0")), + 'one(index)': len(os.listdir(directory+"/1")), + 'two': len(os.listdir(directory+"/2")), + 'three': len(os.listdir(directory+"/3")), + 'four': len(os.listdir(directory+"/4")), + 'five': len(os.listdir(directory+"/5")), + 'one_thumb': len(os.listdir(directory+"/6")), + 'two_2nd_4th': len(os.listdir(directory+"/7")), + 'three_3rd_4th_5th': len(os.listdir(directory+"/8")), + 'blank': len(os.listdir(directory+"/9")) + } + + cv2.putText(frame, "MODE : "+mode, (10, 50), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0,0,0), 1) + cv2.putText(frame, "IMAGE COUNT", (10, 100), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0,0,0), 1) + cv2.putText(frame, "ZERO : "+str(count['zero']), (10, 120), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255,153,51), 1) + cv2.putText(frame, "ONE : "+str(count['one(index)']), (10, 140), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (204,102,0), 1) + cv2.putText(frame, "TWO : "+str(count['two']), (10, 160), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255,102,102), 1) + cv2.putText(frame, "THREE : "+str(count['three']), (10, 180), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255,51,51), 1) + cv2.putText(frame, "FOUR : "+str(count['four']), (10, 200), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (204,0,0), 1) + cv2.putText(frame, "FIVE : "+str(count['five']), (10, 220), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (153,0,76), 1) + cv2.putText(frame, "one_thumb(6) : "+str(count['one_thumb']), (10, 240), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (153,0,76), 1) + cv2.putText(frame, "two_2nd_4th(7) : "+str(count['two_2nd_4th']), (10, 260), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (153,0,76), 1) + cv2.putText(frame, "three_3rd_4th_5th(8) : "+str(count['three_3rd_4th_5th']), (10, 280), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (153,0,76), 1) + cv2.putText(frame, "blank(9): "+str(count['blank']), (10, 300), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (153,0,76), 1) + + + + x1 = int(0.5*frame.shape[1]) + y1 = 10 + x2 = frame.shape[1]-10 + y2 = int(0.5*frame.shape[1]) + cv2.rectangle(frame, (x1-1, y1-1), (x2+1, y2+1), (255,0,0) ,3) + roi = frame[y1:y2, x1:x2] + roi = cv2.resize(roi, (300, 300)) + cv2.putText(frame, "R.O.I", (int(0.75*frame.shape[1]),int(0.55*frame.shape[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,225,0), 3) + cv2.imshow("Frame", frame) + + + + + roi = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY) + roi = cv2.GaussianBlur(roi, (5, 5), 0) + _, roi = cv2.threshold(roi, 120, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) + cv2.imshow("ROI", roi) + + + + interrupt = cv2.waitKey(10) + if interrupt & 0xFF == 27: + break + if interrupt & 0xFF == ord('0'): + cv2.imwrite(directory+'0/'+str(count['zero'])+'.jpg', roi) + if interrupt & 0xFF == ord('1'): + cv2.imwrite(directory+'1/'+str(count['one(index)'])+'.jpg', roi) + if interrupt & 0xFF == ord('2'): + cv2.imwrite(directory+'2/'+str(count['two'])+'.jpg', roi) + if interrupt & 0xFF == ord('3'): + cv2.imwrite(directory+'3/'+str(count['three'])+'.jpg', roi) + if interrupt & 0xFF == ord('4'): + cv2.imwrite(directory+'4/'+str(count['four'])+'.jpg', roi) + if interrupt & 0xFF == ord('5'): + cv2.imwrite(directory+'5/'+str(count['five'])+'.jpg', roi) + if interrupt & 0xFF == ord('6'): + cv2.imwrite(directory+'6/'+str(count['one_thumb'])+'.jpg', roi) + if interrupt & 0xFF == ord('7'): + cv2.imwrite(directory+'7/'+str(count['two_2nd_4th'])+'.jpg', roi) + if interrupt & 0xFF == ord('8'): + cv2.imwrite(directory+'8/'+str(count['three_3rd_4th_5th'])+'.jpg', roi) + if interrupt & 0xFF == ord('9'): + cv2.imwrite(directory+'9/'+str(count['blank'])+'.jpg', roi) + + + +cap.release() +cv2.destroyAllWindows() diff --git a/prediction.py b/prediction.py new file mode 100644 index 0000000..f1b9d62 --- /dev/null +++ b/prediction.py @@ -0,0 +1,61 @@ +from keras.models import model_from_json +import operator +import cv2 +import numpy as np +import math + +json_file = open("model-bw.json", "r") +model_json = json_file.read() +json_file.close() +loaded_model = model_from_json(model_json) +loaded_model.load_weights("model-bw.h5") +print("Loaded model from disk") + +cap = cv2.VideoCapture(0) + +categories = {0: 'ZERO', 1: 'ONE', 2: 'TWO', 3: 'THREE', 4: 'FOUR', 5: 'FIVE', 6:'One', 7:'Two', 8:'Three', 9:'Blank'} + +while True: + _, frame = cap.read() + frame = cv2.flip(frame, 1) + + x1 = int(0.5*frame.shape[1]) + y1 = 10 + x2 = frame.shape[1]-10 + y2 = int(0.5*frame.shape[1]) + + cv2.putText(frame, "Expressando - TDOC 2021", (175, 450), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (225,255,0), 3) + cv2.rectangle(frame, (x1-1, y1-1), (x2+1, y2+1), (255,255,255) ,3) + roi = frame[y1:y2, x1:x2] + + + + roi = cv2.resize(roi, (64, 64)) + roi = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY) + cv2.putText(frame, "R.O.I", (440, 350), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0,225,0), 3) + + _, test_image = cv2.threshold(roi, 120, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) + cv2.imshow("ROI", test_image) + + result = loaded_model.predict(test_image.reshape(1, 64, 64, 1)) + prediction = {'ZERO': result[0][0], + 'ONE': result[0][1], + 'TWO': result[0][2], + 'THREE': result[0][3], + 'FOUR': result[0][4], + 'FIVE': result[0][5], + 'One': result[0][6], + 'Two': result[0][7], + 'Three': result[0][8], + 'Blank': result[0][9]} + prediction = sorted(prediction.items(), key=operator.itemgetter(1), reverse=True) #(0.9 = FIVE, 0.7, 0.6, 0.5, 0.4) + cv2.putText(frame, "PREDICTION:", (30, 90), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2) + cv2.putText(frame, prediction[0][0], (180, 130), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2) + cv2.imshow("Frame",frame) + interrupt = cv2.waitKey(10) + if interrupt & 0xFF == 27: + break + + +cap.release() +cv2.destroyAllWindows() diff --git a/train_model.py b/train_model.py new file mode 100644 index 0000000..23ba0a7 --- /dev/null +++ b/train_model.py @@ -0,0 +1,52 @@ +from keras.models import Sequential +from keras.layers import Convolution2D, MaxPooling2D, Flatten, Dense + +classifier = Sequential() + +classifier.add(Convolution2D(32, (3, 3), input_shape=(64, 64, 1), activation='relu')) +classifier.add(MaxPooling2D(pool_size=(2, 2))) + +classifier.add(Convolution2D(32, (3, 3), activation='relu')) +classifier.add(MaxPooling2D(pool_size=(2, 2))) + +classifier.add(Flatten()) + +classifier.add(Dense(units=128, activation='relu')) +classifier.add(Dense(units=10, activation='softmax')) + +classifier.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) + + +from keras.preprocessing.image import ImageDataGenerator + +train_datagen = ImageDataGenerator( + rescale=1./255, + shear_range=0.2, + zoom_range=0.2, + horizontal_flip=True) + +test_datagen = ImageDataGenerator(rescale=1./255) #epoch + +training_set = train_datagen.flow_from_directory('data/train', + target_size=(64, 64), + batch_size=10, + color_mode='grayscale', + class_mode='categorical') + +test_set = test_datagen.flow_from_directory('data/test', + target_size=(64, 64), + batch_size=10, + color_mode='grayscale', + class_mode='categorical') + +classifier.fit_generator( + training_set, + epochs=10, + validation_data=test_set) + +#Saving +model_json = classifier.to_json() +with open("model-bw.json", "w") as json_file: + json_file.write(model_json) +classifier.save_weights('model-bw.h5') +