Skip to content

Commit

Permalink
Remove copied froms for deprecated models (#31153)
Browse files Browse the repository at this point in the history
* Remove copied froms for deprecated models

* Remove automatically in script
  • Loading branch information
amyeroberts authored Jun 3, 2024
1 parent 97e5a70 commit 5b5b48b
Show file tree
Hide file tree
Showing 21 changed files with 23 additions and 185 deletions.
21 changes: 0 additions & 21 deletions src/transformers/models/deprecated/deta/image_processing_deta.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,6 @@
SUPPORTED_ANNOTATION_FORMATS = (AnnotationFormat.COCO_DETECTION, AnnotationFormat.COCO_PANOPTIC)


# Copied from transformers.models.detr.image_processing_detr.get_size_with_aspect_ratio
def get_size_with_aspect_ratio(image_size, size, max_size=None) -> Tuple[int, int]:
"""
Computes the output image size given the input image size and the desired output size.
Expand Down Expand Up @@ -110,7 +109,6 @@ def get_size_with_aspect_ratio(image_size, size, max_size=None) -> Tuple[int, in
return (oh, ow)


# Copied from transformers.models.detr.image_processing_detr.get_resize_output_image_size
def get_resize_output_image_size(
input_image: np.ndarray,
size: Union[int, Tuple[int, int], List[int]],
Expand Down Expand Up @@ -139,7 +137,6 @@ def get_resize_output_image_size(
return get_size_with_aspect_ratio(image_size, size, max_size)


# Copied from transformers.models.detr.image_processing_detr.get_image_size_for_max_height_width
def get_image_size_for_max_height_width(
input_image: np.ndarray,
max_height: int,
Expand Down Expand Up @@ -175,7 +172,6 @@ def get_image_size_for_max_height_width(
return new_height, new_width


# Copied from transformers.models.detr.image_processing_detr.get_numpy_to_framework_fn
def get_numpy_to_framework_fn(arr) -> Callable:
"""
Returns a function that converts a numpy array to the framework of the input array.
Expand All @@ -200,7 +196,6 @@ def get_numpy_to_framework_fn(arr) -> Callable:
raise ValueError(f"Cannot convert arrays of type {type(arr)}")


# Copied from transformers.models.detr.image_processing_detr.safe_squeeze
def safe_squeeze(arr: np.ndarray, axis: Optional[int] = None) -> np.ndarray:
"""
Squeezes an array, but only if the axis specified has dim 1.
Expand All @@ -214,7 +209,6 @@ def safe_squeeze(arr: np.ndarray, axis: Optional[int] = None) -> np.ndarray:
return arr


# Copied from transformers.models.detr.image_processing_detr.normalize_annotation
def normalize_annotation(annotation: Dict, image_size: Tuple[int, int]) -> Dict:
image_height, image_width = image_size
norm_annotation = {}
Expand All @@ -229,15 +223,13 @@ def normalize_annotation(annotation: Dict, image_size: Tuple[int, int]) -> Dict:
return norm_annotation


# Copied from transformers.models.detr.image_processing_detr.max_across_indices
def max_across_indices(values: Iterable[Any]) -> List[Any]:
"""
Return the maximum value across all indices of an iterable of values.
"""
return [max(values_i) for values_i in zip(*values)]


# Copied from transformers.models.detr.image_processing_detr.get_max_height_width
def get_max_height_width(
images: List[np.ndarray], input_data_format: Optional[Union[str, ChannelDimension]] = None
) -> List[int]:
Expand All @@ -256,7 +248,6 @@ def get_max_height_width(
return (max_height, max_width)


# Copied from transformers.models.detr.image_processing_detr.make_pixel_mask
def make_pixel_mask(
image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]] = None
) -> np.ndarray:
Expand All @@ -275,7 +266,6 @@ def make_pixel_mask(
return mask


# Copied from transformers.models.detr.image_processing_detr.convert_coco_poly_to_mask
def convert_coco_poly_to_mask(segmentations, height: int, width: int) -> np.ndarray:
"""
Convert a COCO polygon annotation to a mask.
Expand Down Expand Up @@ -310,7 +300,6 @@ def convert_coco_poly_to_mask(segmentations, height: int, width: int) -> np.ndar
return masks


# Copied from transformers.models.detr.image_processing_detr.prepare_coco_detection_annotation with DETR->DETA
def prepare_coco_detection_annotation(
image,
target,
Expand Down Expand Up @@ -371,7 +360,6 @@ def prepare_coco_detection_annotation(
return new_target


# Copied from transformers.models.detr.image_processing_detr.masks_to_boxes
def masks_to_boxes(masks: np.ndarray) -> np.ndarray:
"""
Compute the bounding boxes around the provided panoptic segmentation masks.
Expand Down Expand Up @@ -406,7 +394,6 @@ def masks_to_boxes(masks: np.ndarray) -> np.ndarray:
return np.stack([x_min, y_min, x_max, y_max], 1)


# Copied from transformers.models.detr.image_processing_detr.prepare_coco_panoptic_annotation with DETR->DETA
def prepare_coco_panoptic_annotation(
image: np.ndarray,
target: Dict,
Expand Down Expand Up @@ -448,7 +435,6 @@ def prepare_coco_panoptic_annotation(
return new_target


# Copied from transformers.models.detr.image_processing_detr.resize_annotation
def resize_annotation(
annotation: Dict[str, Any],
orig_size: Tuple[int, int],
Expand Down Expand Up @@ -594,7 +580,6 @@ def __init__(
self.do_pad = do_pad
self.pad_size = pad_size

# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_annotation with DETR->DETA
def prepare_annotation(
self,
image: np.ndarray,
Expand Down Expand Up @@ -683,7 +668,6 @@ def resize(
)
return image

# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize_annotation
def resize_annotation(
self,
annotation,
Expand All @@ -697,7 +681,6 @@ def resize_annotation(
"""
return resize_annotation(annotation, orig_size=orig_size, target_size=size, resample=resample)

# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.rescale
def rescale(
self,
image: np.ndarray,
Expand Down Expand Up @@ -726,15 +709,13 @@ def rescale(
"""
return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format)

# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.normalize_annotation
def normalize_annotation(self, annotation: Dict, image_size: Tuple[int, int]) -> Dict:
"""
Normalize the boxes in the annotation from `[top_left_x, top_left_y, bottom_right_x, bottom_right_y]` to
`[center_x, center_y, width, height]` format and from absolute to relative pixel values.
"""
return normalize_annotation(annotation, image_size=image_size)

# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._update_annotation_for_padded_image
def _update_annotation_for_padded_image(
self,
annotation: Dict,
Expand Down Expand Up @@ -778,7 +759,6 @@ def _update_annotation_for_padded_image(
new_annotation[key] = value
return new_annotation

# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._pad_image
def _pad_image(
self,
image: np.ndarray,
Expand Down Expand Up @@ -812,7 +792,6 @@ def _pad_image(
)
return padded_image, annotation

# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.pad
def pad(
self,
images: List[np.ndarray],
Expand Down
Loading

0 comments on commit 5b5b48b

Please sign in to comment.