-
Notifications
You must be signed in to change notification settings - Fork 36
/
add_persons.py
175 lines (137 loc) · 5.78 KB
/
add_persons.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
import argparse
import os
import shutil
import cv2
import numpy as np
import torch
from torchvision import transforms
from face_detection.scrfd.detector import SCRFD
from face_detection.yolov5_face.detector import Yolov5Face
from face_recognition.arcface.model import iresnet_inference
from face_recognition.arcface.utils import read_features
# Check if CUDA is available and set the device accordingly
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Initialize the face detector (Choose one of the detectors)
# detector = Yolov5Face(model_file="face_detection/yolov5_face/weights/yolov5n-face.pt")
detector = SCRFD(model_file="face_detection/scrfd/weights/scrfd_2.5g_bnkps.onnx")
# Initialize the face recognizer
recognizer = iresnet_inference(
model_name="r100", path="face_recognition/arcface/weights/arcface_r100.pth", device=device
)
@torch.no_grad()
def get_feature(face_image):
"""
Extract facial features from an image using the face recognition model.
Args:
face_image (numpy.ndarray): Input facial image.
Returns:
numpy.ndarray: Extracted facial features.
"""
# Define a series of image preprocessing steps
face_preprocess = transforms.Compose(
[
transforms.ToTensor(),
transforms.Resize((112, 112)),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
]
)
# Convert the image to RGB format
face_image = cv2.cvtColor(face_image, cv2.COLOR_BGR2RGB)
# Apply the defined preprocessing to the image
face_image = face_preprocess(face_image).unsqueeze(0).to(device)
# Use the model to obtain facial features
emb_img_face = recognizer(face_image)[0].cpu().numpy()
# Normalize the features
images_emb = emb_img_face / np.linalg.norm(emb_img_face)
return images_emb
def add_persons(backup_dir, add_persons_dir, faces_save_dir, features_path):
"""
Add a new person to the face recognition database.
Args:
backup_dir (str): Directory to save backup data.
add_persons_dir (str): Directory containing images of the new person.
faces_save_dir (str): Directory to save the extracted faces.
features_path (str): Path to save face features.
"""
# Initialize lists to store names and features of added images
images_name = []
images_emb = []
# Read the folder with images of the new person, extract faces, and save them
for name_person in os.listdir(add_persons_dir):
person_image_path = os.path.join(add_persons_dir, name_person)
# Create a directory to save the faces of the person
person_face_path = os.path.join(faces_save_dir, name_person)
os.makedirs(person_face_path, exist_ok=True)
for image_name in os.listdir(person_image_path):
if image_name.endswith(("png", "jpg", "jpeg")):
input_image = cv2.imread(os.path.join(person_image_path, image_name))
# Detect faces and landmarks using the face detector
bboxes, landmarks = detector.detect(image=input_image)
# Extract faces
for i in range(len(bboxes)):
# Get the number of files in the person's path
number_files = len(os.listdir(person_face_path))
# Get the location of the face
x1, y1, x2, y2, score = bboxes[i]
# Extract the face from the image
face_image = input_image[y1:y2, x1:x2]
# Path to save the face
path_save_face = os.path.join(person_face_path, f"{number_files}.jpg")
# Save the face to the database
cv2.imwrite(path_save_face, face_image)
# Extract features from the face
images_emb.append(get_feature(face_image=face_image))
images_name.append(name_person)
# Check if no new person is found
if images_emb == [] and images_name == []:
print("No new person found!")
return None
# Convert lists to arrays
images_emb = np.array(images_emb)
images_name = np.array(images_name)
# Read existing features if available
features = read_features(features_path)
if features is not None:
# Unpack existing features
old_images_name, old_images_emb = features
# Combine new features with existing features
images_name = np.hstack((old_images_name, images_name))
images_emb = np.vstack((old_images_emb, images_emb))
print("Update features!")
# Save the combined features
np.savez_compressed(features_path, images_name=images_name, images_emb=images_emb)
# Move the data of the new person to the backup data directory
for sub_dir in os.listdir(add_persons_dir):
dir_to_move = os.path.join(add_persons_dir, sub_dir)
shutil.move(dir_to_move, backup_dir, copy_function=shutil.copytree)
print("Successfully added new person!")
if __name__ == "__main__":
# Parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument(
"--backup-dir",
type=str,
default="./datasets/backup",
help="Directory to save person data.",
)
parser.add_argument(
"--add-persons-dir",
type=str,
default="./datasets/new_persons",
help="Directory to add new persons.",
)
parser.add_argument(
"--faces-save-dir",
type=str,
default="./datasets/data/",
help="Directory to save faces.",
)
parser.add_argument(
"--features-path",
type=str,
default="./datasets/face_features/feature",
help="Path to save face features.",
)
opt = parser.parse_args()
# Run the main function
add_persons(**vars(opt))