-
-
Notifications
You must be signed in to change notification settings - Fork 41
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
1 changed file
with
72 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,72 @@ | ||
import os | ||
import json | ||
import numpy as np | ||
from typing import Dict, List | ||
from tensorflow import keras | ||
from tensorflow.keras import layers | ||
from edgeiq import EdgeIQ | ||
from edgeiq.object_detection import ObjectDetector | ||
from edgeiq.video import VideoStream | ||
|
||
class EdgeFunctions: | ||
def __init__(self, model_path: str, device: str = "CPU"): | ||
self.model_path = model_path | ||
self.device = device | ||
self.model = keras.models.load_model(model_path) | ||
self.object_detector = ObjectDetector(self.model, device) | ||
|
||
def classify_image(self, image_path: str) -> Dict[str, float]: | ||
image = keras.preprocessing.image.load_img(image_path, target_size=(224, 224)) | ||
input_data = keras.preprocessing.image.img_to_array(image) | ||
input_data = np.expand_dims(input_data, axis=0) | ||
predictions = self.model.predict(input_data) | ||
return {label: confidence for label, confidence in zip(self.model.labels, predictions[0])} | ||
|
||
def detect_objects(self, image_path: str) -> List[Dict[str, float]]: | ||
image = keras.preprocessing.image.load_img(image_path, target_size=(224, 224)) | ||
input_data = keras.preprocessing.image.img_to_array(image) | ||
input_data = np.expand_dims(input_data, axis=0) | ||
predictions = self.object_detector.detect(input_data) | ||
return [{"label": label, "confidence": confidence, "bbox": [x, y, w, h]} for label, confidence, x, y, w, h in zip(self.object_detector.labels, predictions[0], *predictions[1:])] | ||
|
||
def process_video(self, video_path: str) -> None: | ||
video_stream = VideoStream(video_path) | ||
while True: | ||
frame = video_stream.read() | ||
if frame is None: | ||
break | ||
# Pre-process frame | ||
frame = cv2.resize(frame, (224, 224)) | ||
frame = frame / 255.0 | ||
frame = np.expand_dims(frame, axis=0) | ||
# Make predictions | ||
predictions = self.object_detector.detect(frame) | ||
# Post-process predictions | ||
for prediction in predictions: | ||
# Draw bounding boxes and labels on the frame | ||
cv2.rectangle(frame, (prediction["bbox"][0], prediction["bbox"][1]), (prediction["bbox"][0] + prediction["bbox"][2], prediction["bbox"][1] + prediction["bbox"][3]), (0, 255, 0), 2) | ||
cv2.putText(frame, prediction["label"], (prediction["bbox"][0], prediction["bbox"][1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) | ||
# Display the frame | ||
cv2.imshow("Frame", frame) | ||
if cv2.waitKey(1) & 0xFF == ord("q"): | ||
break | ||
video_stream.release() | ||
cv2.destroyAllWindows() | ||
|
||
def optimize_model(self) -> None: | ||
# Optimize the model for edge deployment using EdgeIQ | ||
edgeiq_model = EdgeIQ.optimize_model(self.model, "edgeiq_config.json", device=self.device) | ||
edgeiq_model.save("optimized_model.h5") | ||
|
||
def deploy_model(self) -> None: | ||
# Deploy the optimized model to the edge device | ||
edgeiq_model = EdgeIQ.deploy_model("optimized_model.h5", self.device) | ||
print("Model deployed to edge device") | ||
|
||
# Example usage: | ||
edge_functions = EdgeFunctions("model.h5", device="GPU") | ||
print(edge_functions.classify_image("image.jpg")) | ||
print(edge_functions.detect_objects("image.jpg")) | ||
edge_functions.process_video("video.mp4") | ||
edge_functions.optimize_model() | ||
edge_functions.deploy_model() |