Skip to content

Commit

Permalink
merge
Browse files Browse the repository at this point in the history
  • Loading branch information
FabianIsensee committed Oct 6, 2023
2 parents f2474ef + b8a3749 commit 8a8bfd0
Show file tree
Hide file tree
Showing 13 changed files with 27 additions and 27 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,8 @@ def collect_foreground_intensities(segmentation: np.ndarray, images: np.ndarray,
"""
images=image with multiple channels = shape (c, x, y(, z))
"""
assert len(images.shape) == 4
assert len(segmentation.shape) == 4
assert images.ndim == 4
assert segmentation.ndim == 4

assert not np.any(np.isnan(segmentation)), "Segmentation contains NaN values. grrrr.... :-("
assert not np.any(np.isnan(images)), "Images contains NaN values. grrrr.... :-("
Expand Down
4 changes: 2 additions & 2 deletions nnunetv2/imageio/natural_image_reager_writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,15 +37,15 @@ def read_images(self, image_fnames: Union[List[str], Tuple[str, ...]]) -> Tuple[
images = []
for f in image_fnames:
npy_img = io.imread(f)
if len(npy_img.shape) == 3:
if npy_img.ndim == 3:
# rgb image, last dimension should be the color channel and the size of that channel should be 3
# (or 4 if we have alpha)
assert npy_img.shape[-1] == 3 or npy_img.shape[-1] == 4, "If image has three dimensions then the last " \
"dimension must have shape 3 or 4 " \
f"(RGB or RGBA). Image shape here is {npy_img.shape}"
# move RGB(A) to front, add additional dim so that we have shape (1, c, X, Y), where c is either 3 or 4
images.append(npy_img.transpose((2, 0, 1))[:, None])
elif len(npy_img.shape) == 2:
elif npy_img.ndim == 2:
# grayscale image
images.append(npy_img[None, None])

Expand Down
4 changes: 2 additions & 2 deletions nnunetv2/imageio/nibabel_reader_writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def read_images(self, image_fnames: Union[List[str], Tuple[str, ...]]) -> Tuple[
spacings_for_nnunet = []
for f in image_fnames:
nib_image = nibabel.load(f)
assert len(nib_image.shape) == 3, 'only 3d images are supported by NibabelIO'
assert nib_image.ndim == 3, 'only 3d images are supported by NibabelIO'
original_affine = nib_image.affine

original_affines.append(original_affine)
Expand Down Expand Up @@ -120,7 +120,7 @@ def read_images(self, image_fnames: Union[List[str], Tuple[str, ...]]) -> Tuple[
spacings_for_nnunet = []
for f in image_fnames:
nib_image = nibabel.load(f)
assert len(nib_image.shape) == 3, 'only 3d images are supported by NibabelIO'
assert nib_image.ndim == 3, 'only 3d images are supported by NibabelIO'
original_affine = nib_image.affine
reoriented_image = nib_image.as_reoriented(io_orientation(original_affine))
reoriented_affine = reoriented_image.affine
Expand Down
10 changes: 5 additions & 5 deletions nnunetv2/imageio/simpleitk_reader_writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,21 +39,21 @@ def read_images(self, image_fnames: Union[List[str], Tuple[str, ...]]) -> Tuple[
origins.append(itk_image.GetOrigin())
directions.append(itk_image.GetDirection())
npy_image = sitk.GetArrayFromImage(itk_image)
if len(npy_image.shape) == 2:
if npy_image.ndim == 2:
# 2d
npy_image = npy_image[None, None]
max_spacing = max(spacings[-1])
spacings_for_nnunet.append((max_spacing * 999, *list(spacings[-1])[::-1]))
elif len(npy_image.shape) == 3:
elif npy_image.ndim == 3:
# 3d, as in original nnunet
npy_image = npy_image[None]
spacings_for_nnunet.append(list(spacings[-1])[::-1])
elif len(npy_image.shape) == 4:
elif npy_image.ndim == 4:
# 4d, multiple modalities in one file
spacings_for_nnunet.append(list(spacings[-1])[::-1][1:])
pass
else:
raise RuntimeError(f"Unexpected number of dimensions: {len(npy_image.shape)} in file {f}")
raise RuntimeError(f"Unexpected number of dimensions: {npy_image.ndim} in file {f}")

images.append(npy_image)
spacings_for_nnunet[-1] = list(np.abs(spacings_for_nnunet[-1]))
Expand Down Expand Up @@ -115,7 +115,7 @@ def read_seg(self, seg_fname: str) -> Tuple[np.ndarray, dict]:
return self.read_images((seg_fname, ))

def write_seg(self, seg: np.ndarray, output_fname: str, properties: dict) -> None:
assert len(seg.shape) == 3, 'segmentation must be 3d. If you are exporting a 2d segmentation, please provide it as shape 1,x,y'
assert seg.ndim == 3, 'segmentation must be 3d. If you are exporting a 2d segmentation, please provide it as shape 1,x,y'
output_dimension = len(properties['sitk_stuff']['spacing'])
assert 1 < output_dimension < 4
if output_dimension == 2:
Expand Down
4 changes: 2 additions & 2 deletions nnunetv2/imageio/tif_reader_writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def read_images(self, image_fnames: Union[List[str], Tuple[str, ...]]) -> Tuple[
images = []
for f in image_fnames:
image = tifffile.imread(f)
if len(image.shape) != 3:
if image.ndim != 3:
raise RuntimeError(f"Only 3D images are supported! File: {f}")
images.append(image[None])

Expand Down Expand Up @@ -83,7 +83,7 @@ def read_seg(self, seg_fname: str) -> Tuple[np.ndarray, dict]:
ending_length = len(ending)

seg = tifffile.imread(seg_fname)
if len(seg.shape) != 3:
if seg.ndim != 3:
raise RuntimeError(f"Only 3D images are supported! File: {seg_fname}")
seg = seg[None]

Expand Down
4 changes: 2 additions & 2 deletions nnunetv2/inference/predict_from_raw_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -546,7 +546,7 @@ def _internal_maybe_mirror_and_predict(self, x: torch.Tensor) -> torch.Tensor:
if mirror_axes is not None:
# check for invalid numbers in mirror_axes
# x should be 5d for 3d images and 4d for 2d. so the max value of mirror_axes cannot exceed len(x.shape) - 3
assert max(mirror_axes) <= len(x.shape) - 3, 'mirror_axes does not match the dimension of the input!'
assert max(mirror_axes) <= x.ndim - 3, 'mirror_axes does not match the dimension of the input!'

num_predictons = 2 ** len(mirror_axes)
if 0 in mirror_axes:
Expand Down Expand Up @@ -582,7 +582,7 @@ def predict_sliding_window_return_logits(self, input_image: torch.Tensor) \
# So autocast will only be active if we have a cuda device.
with torch.no_grad():
with torch.autocast(self.device.type, enabled=True) if self.device.type == 'cuda' else dummy_context():
assert len(input_image.shape) == 4, 'input_image must be a 4D np.ndarray or torch.Tensor (c, x, y, z)'
assert input_image.ndim == 4, 'input_image must be a 4D np.ndarray or torch.Tensor (c, x, y, z)'

if self.verbose: print(f'Input shape: {input_image.shape}')
if self.verbose: print("step_size:", self.tile_step_size)
Expand Down
2 changes: 1 addition & 1 deletion nnunetv2/preprocessing/cropping/cropping.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ def create_nonzero_mask(data):
:return: the mask is True where the data is nonzero
"""
from scipy.ndimage import binary_fill_holes
assert len(data.shape) == 4 or len(data.shape) == 3, "data must have shape (C, X, Y, Z) or shape (C, X, Y)"
assert data.ndim in (3, 4), "data must have shape (C, X, Y, Z) or shape (C, X, Y)"
nonzero_mask = np.zeros(data.shape[1:], dtype=bool)
for c in range(data.shape[0]):
this_mask = data[c] != 0
Expand Down
8 changes: 4 additions & 4 deletions nnunetv2/preprocessing/resampling/default_resampling.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ def resample_data_or_seg_to_spacing(data: np.ndarray,
pass

if data is not None:
assert len(data.shape) == 4, "data must be c x y z"
assert data.ndim == 4, "data must be c x y z"

shape = np.array(data[0].shape)
new_shape = compute_new_shape(shape[1:], current_spacing, new_spacing)
Expand Down Expand Up @@ -116,7 +116,7 @@ def resample_data_or_seg_to_shape(data: Union[torch.Tensor, np.ndarray],
pass

if data is not None:
assert len(data.shape) == 4, "data must be c x y z"
assert data.ndim == 4, "data must be c x y z"

data_reshaped = resample_data_or_seg(data, new_shape, is_seg, axis, order, do_separate_z, order_z=order_z)
return data_reshaped
Expand All @@ -136,8 +136,8 @@ def resample_data_or_seg(data: np.ndarray, new_shape: Union[Tuple[float, ...], L
:param order_z: only applies if do_separate_z is True
:return:
"""
assert len(data.shape) == 4, "data must be (c, x, y, z)"
assert len(new_shape) == len(data.shape) - 1
assert data.ndim == 4, "data must be (c, x, y, z)"
assert len(new_shape) == data.ndim - 1

if is_seg:
resize_fn = resize_segmentation
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def __init__(self, ds_scales: Union[List, Tuple],

def __call__(self, **data_dict):
if self.axes is None:
axes = list(range(2, len(data_dict[self.input_key].shape)))
axes = list(range(2, data_dict[self.input_key].ndim))
else:
axes = self.axes

Expand Down
2 changes: 1 addition & 1 deletion nnunetv2/training/nnUNetTrainer/nnUNetTrainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -935,7 +935,7 @@ def validation_step(self, batch: dict) -> dict:
target = target[0]

# the following is needed for online evaluation. Fake dice (green line)
axes = [0] + list(range(2, len(output.shape)))
axes = [0] + list(range(2, output.ndim))

if self.label_manager.has_regions:
predicted_segmentation_onehot = (torch.sigmoid(output) > 0.5).long()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def validation_step(self, batch: dict) -> dict:
l = self.loss(output, target)

# the following is needed for online evaluation. Fake dice (green line)
axes = [0] + list(range(2, len(output.shape)))
axes = [0] + list(range(2, output.ndim))

if self.label_manager.has_regions:
predicted_segmentation_onehot = (torch.sigmoid(output) > 0.5).long()
Expand Down
2 changes: 1 addition & 1 deletion nnunetv2/utilities/json_export.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ def recursive_fix_for_json_export(my_dict: dict):
if isinstance(my_dict[k], dict):
recursive_fix_for_json_export(my_dict[k])
elif isinstance(my_dict[k], np.ndarray):
assert len(my_dict[k].shape) == 1, 'only 1d arrays are supported'
assert my_dict[k].ndim == 1, 'only 1d arrays are supported'
my_dict[k] = fix_types_iterable(my_dict[k], output_type=list)
elif isinstance(my_dict[k], (np.bool_,)):
my_dict[k] = bool(my_dict[k])
Expand Down
6 changes: 3 additions & 3 deletions nnunetv2/utilities/overlay_plots.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,9 +65,9 @@ def generate_overlay(input_image: np.ndarray, segmentation: np.ndarray, mapping:
# create a copy of image
image = np.copy(input_image)

if len(image.shape) == 2:
if image.ndim == 2:
image = np.tile(image[:, :, None], (1, 1, 3))
elif len(image.shape) == 3:
elif image.ndim == 3:
if image.shape[2] == 1:
image = np.tile(image, (1, 1, 3))
else:
Expand Down Expand Up @@ -139,7 +139,7 @@ def plot_overlay(image_file: str, segmentation_file: str, image_reader_writer: B
assert image.shape == seg.shape, "image and seg do not have the same shape: %s, %s" % (
image_file, segmentation_file)

assert len(image.shape) == 3, 'only 3D images/segs are supported'
assert image.ndim == 3, 'only 3D images/segs are supported'

selected_slice = select_slice_to_plot2(image, seg)
# print(image.shape, selected_slice)
Expand Down

0 comments on commit 8a8bfd0

Please sign in to comment.