-
Notifications
You must be signed in to change notification settings - Fork 2
/
Performance_assessment.py
72 lines (59 loc) · 2.21 KB
/
Performance_assessment.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
### Methods to assess accuracy of prediction ####
from math import sqrt
####### Accuracy for classification problems ######
# Get accuracy of prediction #
def getAccuracy(actual,predicted):
correct = 0
for x in range(len(actual)):
if actual[x] == predicted[x]:
correct += 1
return correct / float(len(actual)) * 100.00
# Calculate a Confusion Matrix #
def confusion_matrix(actual, predicted):
unique = set(actual)
matrix = [list() for x in range(len(unique))]
for i in range(len(unique)):
matrix[i] = [0 for x in range(len(unique))]
lookup = dict()
for i, value in enumerate(unique):
lookup[value] = i
for i in range(len(actual)):
x = lookup[actual[i]]
y = lookup[predicted[i]]
matrix[x][y] += 1
return unique, matrix
# Printing a confusion matrix
def print_confusion_matrix(unique, matrix):
print('Unique prediction values:')
print('(P)' + ' '.join(str(x) for x in unique))
print('(A)---')
print("Confusion Matrix:")
for i, x in enumerate(unique):
print("%s| %s" % (x, ' '.join(str(x) for x in matrix[i])))
# Recall classification estimator
def recall_precision_calc(matrix):
for i in range(len(matrix[0])):
row_values = matrix[i] # row values of matrix
col_values = [row[i] for row in matrix] # column values of matrix
tp = col_values[i]
fp = sum(row_values)-row_values[i] # sum all row values - ones in diagonal
fn = sum(col_values)-col_values[i] # sum all col values - ones in diagonal
recall = tp / (tp + fn)
precision = tp / (tp + fp)
F1_score = 2 * (precision * recall) / (precision + recall)
return recall, precision, F1_score
###### Accuracy methods for Regression problems ########
# Calculate mean absolute error (MAE) #
def mae_metric(actual, predicted):
sum_error = 0.0
for i in range(len(actual)):
sum_error += abs(predicted[i] - actual[i])
return sum_error / float(len(actual))
# Calculate root mean squared error #
def rmse_metric(actual, predicted):
sum_error = 0.0
for i in range(len(actual)):
prediction_error = predicted[i] - actual[i]
sum_error += (prediction_error ** 2)
mean_error = sum_error / float(len(actual))
return sqrt(mean_error)