diff --git a/ai/risk_assessment/model_evaluation.py b/ai/risk_assessment/model_evaluation.py index ef356aade..3005f223a 100644 --- a/ai/risk_assessment/model_evaluation.py +++ b/ai/risk_assessment/model_evaluation.py @@ -1,6 +1,7 @@ import pandas as pd from sklearn.metrics import classification_report, confusion_matrix + def evaluate_model_performance(model, X_test, y_test): """ Evaluates the performance of the trained model using various metrics. @@ -8,5 +9,9 @@ def evaluate_model_performance(model, X_test, y_test): y_pred = model.predict(X_test) report = classification_report(y_test, y_pred) matrix = confusion_matrix(y_test, y_pred) - df = pd.DataFrame(matrix, columns=['Predicted No', 'Predicted Yes'], index=['Actual No', 'Actual Yes']) + df = pd.DataFrame( + matrix, + columns=["Predicted No", "Predicted Yes"], + index=["Actual No", "Actual Yes"], + ) return report, df