-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathDashbard.py
209 lines (155 loc) · 6.69 KB
/
Dashbard.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
# >py file of CNN_DSB.ipynb file
# Foul Detection Model
import os
import joblib
from sklearn.feature_extraction.text import CountVectorizer
# Assuming cv is your CountVectorizer instance
cv = CountVectorizer()
# Load foul_detector_model with try-except block
try:
md = joblib.load("foul_detector_model.joblib")
# Get the absolute path to the current directory
current_directory = os.path.abspath(os.getcwd())
# Construct the absolute path to the vocabulary file
vocabulary_path = os.path.join(current_directory, "vocabulary.joblib")
# Check if the vocabulary file exists before loading
if os.path.exists(vocabulary_path):
# Load the vocabulary
vocabulary = joblib.load(vocabulary_path)
# Set the vocabulary for CountVectorizer
cv.vocabulary_ = vocabulary
else:
print("Vocabulary file not found.")
except (ValueError, KeyError) as e:
print(f"Error loading the model or vocabulary: {e}")
# Additional troubleshooting or fallback action may be needed here
# You might want to retrain and save the model if there are no compatibility issues
def detect(test_data):
# Assuming the model was trained with the same CountVectorizer instance
df = cv.transform([test_data]).toarray()
m = int(md.predict(df))
return m
# Voice detection model
import os
import numpy as np
import librosa
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from tensorflow.keras import layers, models, callbacks
# Function to extract features from audio data
def extract_features(file_path):
audio_data, sample_rate = librosa.load(file_path, res_type='kaiser_fast')
mfccs = librosa.feature.mfcc(y=audio_data, sr=sample_rate, n_mfcc=13)
return np.mean(mfccs, axis=1)
# Load the model
loaded_model = models.load_model('speaker_identification_model.h5')
# Load the label encoder
loaded_label_encoder = LabelEncoder()
loaded_label_encoder.classes_ = np.load('label_encoder.npy')
def Recog_voice(new_audio_file_wav):
# Example: Get predictions for a new WAV audio file
#new_audio_file_wav = 'd_v.wav'
new_features_wav = extract_features(new_audio_file_wav)
# Reshape the input data to match the model's input shape
new_features_reshaped_wav = new_features_wav.reshape(1, len(new_features_wav), 1)
# Make predictions
predictions_wav = loaded_model.predict(new_features_reshaped_wav) # Use loaded_model instead of model
# Get the index with the highest predicted value
predicted_index_wav = np.argmax(predictions_wav)
# Decode the predicted labels using the loaded label encoder
predicted_label_wav = loaded_label_encoder.inverse_transform(np.array([predicted_index_wav])).reshape(1, -1)
print(f'Predicted Speaker: {predicted_label_wav[0]}')
return predicted_label_wav[0]
# Storing Database
import cv2
import librosa
import numpy as np
import mysql.connector
def storedata(audio_file_path):
mydb = mysql.connector.connect(
host ="localhost",
user ="root",
password ="Anushka@123",
database = "foul_db"
)
print(mydb)
cur = mydb.cursor()
# cur.execute("CREATE TABLE Students (id INT AUTO_INCREMENT PRIMARY KEY, name VARCHAR(255) NOT NULL)")
#cur.execute("CREATE TABLE Defaulters_list (ID INT PRIMARY KEY, audio VARCHAR(255) NOT NULL, fine DECIMAL(10, 2) NOT NULL, FOREIGN KEY (ID) REFERENCES Students(id))")
# Uncomment the next line if you're inserting data into the Students table
# cur.execute("INSERT INTO Students (name) VALUES ('Kratika')")
# Execute the SELECT query
select_query = f"SELECT id FROM Students WHERE name='{S_name[0]}'"
cur.execute(select_query)
# Fetch the result
result = cur.fetchone()
# Check if result is not None
if result:
student_id = result[0]
fine=100
# Use Binary class to handle binary data
insert_query = "INSERT INTO Defaulters_list (ID,audio,fine) VALUES (%s, %s,%s)"
cur.execute(insert_query, (student_id,audio_file_path,fine))
# Commit the changes
mydb.commit()
else:
print("No student found with the given name.")
# Close the cursor
cur.close()
# Dashboard
import os
import speech_recognition as sr
class Dashboard:
def __init__(self, save_folder="audio_inputs"):
self.recognizer = sr.Recognizer()
self.microphone = sr.Microphone()
self.save_folder = save_folder
os.makedirs(self.save_folder, exist_ok=True)
self.count = 0 # Initialize count as an instance variable
@staticmethod # Use static method as it doesn't depend on instance data
def uniquecode():
count_file_path = "count.txt"
with open(count_file_path, "r") as count_file:
count = int(count_file.read()) # Read count from file
with open(count_file_path, "w") as count_file:
count += 1 # Increment count
count_file.write(str(count)) # Write updated count to file
return count
def convert_audio_to_text(self):
current_uniquecode = self.uniquecode()
# Specify the folder path to save the audio file
audio_file_path = os.path.join(self.save_folder, f"audio_input_{current_uniquecode}.wav")
with self.microphone as source:
print("Listening...")
try:
audio_data = self.recognizer.listen(source, timeout=5) # Adjust timeout as needed
# Save the audio data to a file
with open(audio_file_path, "wb") as audio_file:
audio_file.write(audio_data.get_wav_data())
text = self.recognizer.recognize_google(audio_data)
return text, audio_file_path
except sr.UnknownValueError:
return "Could not understand audio", None
except sr.RequestError as e:
return f"Error connecting to Google API: {e}", None
# Example usage:
if __name__ == "__main__":
# Specify the custom folder path
custom_folder_path = "audio_inputs"
# Create an instance of Dashboard with the custom folder path
dashboard = Dashboard(save_folder=custom_folder_path)
# Example usage:
result, audio_file_path = dashboard.convert_audio_to_text()
if audio_file_path:
print("Text from audio:")
print(result)
# Example usage of undefined functions
output = detect(result)
if output==0:
print("Status: All Good")
else:
# Pass the captured audio file path to Recog_voice (replace with your actual function)
print("Status: Foul Detected")
S_name = Recog_voice(audio_file_path)
storedata(audio_file_path)
#