-
Notifications
You must be signed in to change notification settings - Fork 2
/
voxel_uncertainty_measures.py
73 lines (57 loc) · 2.5 KB
/
voxel_uncertainty_measures.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
"""
Implementation of standard predictive uncertainty measures for image segmentation (in 3D)
"""
import numpy as np
def renyi_entropy_of_expected(probs, alpha=0.8):
"""
Renyi entropy is a generalised version of Shannon - the two are equivalent for alpha=1
:param probs: array [num_models, num_voxels_X, num_voxels_Y, num_voxels_Z, num_classes]
:return: array [num_voxels_X, num_voxels_Y, num_voxels_Z,]
"""
scale = 1. / (1. - alpha)
mean_probs = np.mean(probs, axis=0)
return scale * np.log(np.sum(mean_probs ** alpha, axis=-1))
def renyi_expected_entropy(probs, alpha=0.8):
"""
:param probs: array [num_models, num_voxels_X, num_voxels_Y, num_voxels_Z, num_classes]
:return: array [num_voxels_X, num_voxels_Y, num_voxels_Z,]
"""
scale = 1. / (1. - alpha)
return np.mean(scale * np.log(np.sum(probs ** alpha, axis=-1)), axis=0)
def entropy_of_expected(probs, epsilon=1e-10):
"""
:param probs: array [num_models, num_voxels_X, num_voxels_Y, num_voxels_Z, num_classes]
:return: array [num_voxels_X, num_voxels_Y, num_voxels_Z,]
"""
mean_probs = np.mean(probs, axis=0)
log_probs = -np.log(mean_probs + epsilon)
return np.sum(mean_probs * log_probs, axis=-1)
def expected_entropy(probs, epsilon=1e-10):
"""
:param probs: array [num_models, num_voxels_X, num_voxels_Y, num_voxels_Z, num_classes]
:return: array [num_voxels_X, num_voxels_Y, num_voxels_Z,]
"""
log_probs = -np.log(probs + epsilon)
return np.mean(np.sum(probs * log_probs, axis=-1), axis=0)
def ensemble_uncertainties_classification(probs: np.ndarray, epsilon: float = 1e-10) -> dict:
"""
Compute voxel scale uncertainty measures.
:param probs: array [num_models, num_voxels_X, num_voxels_Y, num_voxels_Z, num_classes]
:param epsilon: small float to avoid 0 in logs
:return: Dictionary of uncertainties
"""
mean_probs = np.mean(probs, axis=0)
mean_lprobs = np.mean(np.log(probs + epsilon), axis=0)
conf = np.max(mean_probs, axis=-1)
eoe = entropy_of_expected(probs, epsilon)
exe = expected_entropy(probs, epsilon)
mutual_info = eoe - exe
epkl = -np.sum(mean_probs * mean_lprobs, axis=-1) - exe
uncertainty = {'confidence': -1 * conf,
'entropy_of_expected': eoe,
'expected_entropy': exe,
'mutual_information': mutual_info,
'epkl': epkl,
'reverse_mutual_information': epkl - mutual_info,
}
return uncertainty