Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

use isinstance instead of type comparison #26748

Closed
wants to merge 10 commits into from
Closed
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions examples/flax/image-captioning/run_image_captioning_flax.py
Original file line number Diff line number Diff line change
Expand Up @@ -1174,7 +1174,7 @@ def evaluation_loop(
with open(output_file, "w", encoding="UTF-8") as fp:
json.dump(generations, fp, ensure_ascii=False, indent=4)

def evaluate(rng: jax.random.PRNGKey, dataset: Dataset, ckpt_dir: str = ""):
def evaluation(rng: jax.random.PRNGKey, dataset: Dataset, ckpt_dir: str = ""):
evaluation_loop(rng, dataset, metric_key_prefix="eval", ckpt_dir=ckpt_dir)

def predict(rng: jax.random.PRNGKey, dataset: Dataset):
Expand Down Expand Up @@ -1247,7 +1247,7 @@ def predict(rng: jax.random.PRNGKey, dataset: Dataset):
):
ckpt_dir = f"ckpt_epoch_{epoch + 1}_step_{cur_step}"
commit_msg = f"Saving weights and logs of epoch {epoch + 1} - step {cur_step}"
evaluate(input_rng, eval_dataset, ckpt_dir)
evaluation(input_rng, eval_dataset, ckpt_dir)
save_ckpt(ckpt_dir=ckpt_dir, commit_msg=commit_msg)

# ======================== Epoch End ==============================
Expand All @@ -1270,7 +1270,7 @@ def predict(rng: jax.random.PRNGKey, dataset: Dataset):
if training_args.do_eval and (training_args.eval_steps is None or training_args.eval_steps <= 0):
ckpt_dir = f"ckpt_epoch_{epoch + 1}_step_{cur_step}"
commit_msg = f"Saving weights and logs of epoch {epoch + 1} - step {cur_step}"
evaluate(input_rng, eval_dataset, ckpt_dir)
evaluation(input_rng, eval_dataset, ckpt_dir)
save_ckpt(ckpt_dir=ckpt_dir, commit_msg=commit_msg)

# ======================== Evaluating | Predicting ==============================
Expand Down
2 changes: 1 addition & 1 deletion examples/flax/text-classification/run_flax_glue.py
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ def __post_init__(self):
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
self.task_name = self.task_name.lower() if type(self.task_name) == str else self.task_name
self.task_name = self.task_name.lower() if isinstance(self.task_name, str) else self.task_name


def create_train_state(
Expand Down
2 changes: 1 addition & 1 deletion examples/legacy/pytorch-lightning/run_glue.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ class GLUETransformer(BaseTransformer):
mode = "sequence-classification"

def __init__(self, hparams):
if type(hparams) == dict:
if isinstance(hparams, dict):
hparams = Namespace(**hparams)
hparams.glue_output_mode = glue_output_modes[hparams.task]
num_labels = glue_tasks_num_labels[hparams.task]
Expand Down
2 changes: 1 addition & 1 deletion examples/legacy/pytorch-lightning/run_ner.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ class NERTransformer(BaseTransformer):
mode = "token-classification"

def __init__(self, hparams):
if type(hparams) == dict:
if isinstance(hparams, dict):
hparams = Namespace(**hparams)
module = import_module("tasks")
try:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def __init__(self, config):
self.early_exit_entropy = [-1 for _ in range(config.num_hidden_layers)]

def set_early_exit_entropy(self, x):
if (type(x) is float) or (type(x) is int):
if isinstance(x, (float, int)):
for i in range(len(self.early_exit_entropy)):
self.early_exit_entropy[i] = x
else:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,7 @@ def print_model_summary(model, name_width=25, line_width=180, ignore=None):
continue
if type(mod) in ignore:
continue
if [True for s in ignore if type(s) is str and s in name]:
if [True for s in ignore if isinstance(s, str) and s in name]:
continue
act_str = f"Act:{input_q.extra_repr()}"
wgt_str = f"Wgt:{weight_q.extra_repr()}"
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@
"rhoknp>=1.1.0,<1.3.1",
"rjieba",
"rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff>=0.0.241,<=0.0.259",
"ruff>=0.0.241",
kashif marked this conversation as resolved.
Show resolved Hide resolved
"sacrebleu>=1.4.12,<2.0.0",
"sacremoses",
"safetensors>=0.3.1",
Expand Down
4 changes: 2 additions & 2 deletions src/transformers/data/data_collator.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ def torch_default_data_collator(features: List[InputDataClass]) -> Dict[str, Any
if isinstance(first["label_ids"], torch.Tensor):
batch["labels"] = torch.stack([f["label_ids"] for f in features])
else:
dtype = torch.long if type(first["label_ids"][0]) is int else torch.float
dtype = torch.long if isinstance(first["label_ids"][0], int) else torch.float
batch["labels"] = torch.tensor([f["label_ids"] for f in features], dtype=dtype)

# Handling of all other possible keys.
Expand Down Expand Up @@ -196,7 +196,7 @@ def numpy_default_data_collator(features: List[InputDataClass]) -> Dict[str, Any
if isinstance(first["label_ids"], np.ndarray):
batch["labels"] = np.stack([f["label_ids"] for f in features])
else:
dtype = np.int64 if type(first["label_ids"][0]) is int else np.float32
dtype = np.int64 if isinstance(first["label_ids"][0], int) else np.float32
batch["labels"] = np.array([f["label_ids"] for f in features], dtype=dtype)

# Handling of all other possible keys.
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/dependency_versions_table.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"ruff": "ruff>=0.0.241",
kashif marked this conversation as resolved.
Show resolved Hide resolved
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
Expand Down
4 changes: 2 additions & 2 deletions src/transformers/models/blip/modeling_blip_text.py
Original file line number Diff line number Diff line change
Expand Up @@ -747,13 +747,13 @@ def forward(
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_hidden_states is not None:
if type(encoder_hidden_states) == list:
if isinstance(encoder_hidden_states, list):
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size()
else:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)

if type(encoder_attention_mask) == list:
if isinstance(encoder_attention_mask, list):
encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask]
elif encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
Expand Down
4 changes: 2 additions & 2 deletions src/transformers/models/blip/modeling_tf_blip_text.py
Original file line number Diff line number Diff line change
Expand Up @@ -741,13 +741,13 @@ def call(
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_hidden_states is not None:
if type(encoder_hidden_states) == list:
if isinstance(encoder_hidden_states, list):
encoder_batch_size, encoder_sequence_length, _ = shape_list(encoder_hidden_states[0])
else:
encoder_batch_size, encoder_sequence_length, _ = shape_list(encoder_hidden_states)
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)

if type(encoder_attention_mask) == list:
if isinstance(encoder_attention_mask, list):
encoder_extended_attention_mask = [invert_attention_mask(mask) for mask in encoder_attention_mask]
elif encoder_attention_mask is None:
encoder_attention_mask = tf.ones(encoder_hidden_shape)
Expand Down
4 changes: 2 additions & 2 deletions src/transformers/models/blip_2/modeling_blip_2.py
Original file line number Diff line number Diff line change
Expand Up @@ -1140,13 +1140,13 @@ def forward(
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_hidden_states is not None:
if type(encoder_hidden_states) == list:
if isinstance(encoder_hidden_states, list):
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size()
else:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)

if type(encoder_attention_mask) == list:
if isinstance(encoder_attention_mask, list):
encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask]
elif encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/deberta/configuration_deberta.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ def __init__(
self.position_biased_input = position_biased_input

# Backwards compatibility
if type(pos_att_type) == str:
if isinstance(pos_att_type, str):
pos_att_type = [x.strip() for x in pos_att_type.lower().split("|")]

self.pos_att_type = pos_att_type
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ def __init__(
self.position_biased_input = position_biased_input

# Backwards compatibility
if type(pos_att_type) == str:
if isinstance(pos_att_type, str):
pos_att_type = [x.strip() for x in pos_att_type.lower().split("|")]

self.pos_att_type = pos_att_type
Expand Down
8 changes: 4 additions & 4 deletions src/transformers/models/esm/modeling_esmfold.py
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,7 @@ def dict_multimap(fn, dicts):
new_dict = {}
for k, v in first.items():
all_v = [d[k] for d in dicts]
if type(v) is dict:
if isinstance(v, dict):
new_dict[k] = dict_multimap(fn, all_v)
else:
new_dict[k] = fn(all_v)
Expand Down Expand Up @@ -1060,7 +1060,7 @@ def __init__(self, r: float, batch_dim: Union[int, List[int]]):
super().__init__()

self.r = r
if type(batch_dim) == int:
if isinstance(batch_dim, int):
batch_dim = [batch_dim]
self.batch_dim = batch_dim
self.dropout = nn.Dropout(self.r)
Expand Down Expand Up @@ -2254,7 +2254,7 @@ def infer(
seqs: Union[str, List[str]],
position_ids=None,
):
if type(seqs) is str:
if isinstance(seqs, str):
lst = [seqs]
else:
lst = seqs
Expand Down Expand Up @@ -2312,7 +2312,7 @@ def output_to_pdb(output: Dict) -> List[str]:

def infer_pdb(self, seqs, *args, **kwargs) -> str:
"""Returns the pdb (file) string from the model given an input sequence."""
assert type(seqs) is str
assert isinstance(seqs, str)
output = self.infer(seqs, *args, **kwargs)
return self.output_to_pdb(output)[0]

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1288,7 +1288,7 @@ def prepare_inputs_for_generation(
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
**kwargs,
):
if type(spout) is list:
if isinstance(spout, list):
spout = torch.tensor(spout).float()
if input_ids is not None:
spout = spout.to(input_ids.device)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -375,7 +375,7 @@ def _batch_encode_plus(
verbose: bool = True,
) -> BatchEncoding:
# This tokenizer converts input text pairs into Prefix input and subsequent input
if type(batch_text_or_text_pairs[0]) is tuple or type(batch_text_or_text_pairs[0]) is list:
if type(batch_text_or_text_pairs[0]) is tuple or isinstance(batch_text_or_text_pairs[0], list):
kashif marked this conversation as resolved.
Show resolved Hide resolved
# As a single text with an explicit un-prefix position
batch_prefix_texts = []
for pref, txt in batch_text_or_text_pairs:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1193,13 +1193,13 @@ def forward(
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_hidden_states is not None:
if type(encoder_hidden_states) == list:
if isinstance(encoder_hidden_states, list):
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size()
else:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)

if type(encoder_attention_mask) == list:
if isinstance(encoder_attention_mask, list):
encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask]
elif encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -251,7 +251,7 @@ def remove_numbers(lines):
def _clean(s):
return re.sub(r"(?:[\d_]|\*\*)", "", s).strip()

if type(lines) is str:
if isinstance(lines, str):
return _clean(lines)
out = []
for l in lines:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ def convert_weight_and_push(
)

from_output = from_model(x)
from_output = from_output[-1] if type(from_output) is list else from_output
from_output = from_output[-1] if isinstance(from_output, list) else from_output

# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ def __init__(
# Loading a torch-saved transfo-xl vocab dict with pickle results in an integer
# Entering this if statement means that we tried to load a torch-saved file with pickle, and we failed.
# We therefore load it with torch, if it's available.
if type(vocab_dict) == int:
if isinstance(vocab_dict, int):
if not is_torch_available():
raise ImportError(
"Not trying to load dict with PyTorch as you need to install pytorch to load "
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/yoso/modeling_yoso.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ def to_contiguous(input_tensors):


def normalize(input_tensors):
if type(input_tensors) is list:
if isinstance(input_tensors, list):
out = []
for tensor in input_tensors:
out.append(nn.functional.normalize(tensor, p=2, dim=-1))
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/trainer_pt_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -896,7 +896,7 @@ def metrics_format(self, metrics: Dict[str, float]) -> Dict[str, float]:
metrics_copy[k] = _secs2timedelta(v)
elif k == "total_flos":
metrics_copy[k] = f"{ int(v) >> 30 }GF"
elif type(metrics_copy[k]) == float:
elif isinstance(metrics_copy[k], float):
metrics_copy[k] = round(v, 4)

return metrics_copy
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ class {{cookiecutter.camelcase_modelname}}Config(PretrainedConfig):
{% else -%}
keys_to_ignore_at_inference = ["past_key_values"]
{% endif -%}

{% if cookiecutter.is_encoder_decoder_model == "False" %}
{%- else %}
attribute_map = {
Expand Down Expand Up @@ -238,4 +238,3 @@ def __init__(
**kwargs
)


Original file line number Diff line number Diff line change
Expand Up @@ -541,7 +541,7 @@ def prepare_{{cookiecutter.lowercase_modelname}}_inputs_dict(
class Flax{{cookiecutter.camelcase_modelname}}ModelTest(FlaxModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration,
Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration,
Flax{{cookiecutter.camelcase_modelname}}ForQuestionAnswering,
Flax{{cookiecutter.camelcase_modelname}}ForSequenceClassification,
Flax{{cookiecutter.camelcase_modelname}}Model,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -474,7 +474,7 @@ def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=1e-5, nam
def prepare_pt_inputs_from_tf_inputs(self, tf_inputs_dict):
pt_inputs_dict = {}
for name, key in tf_inputs_dict.items():
if type(key) == bool:
if isinstance(key, bool):
pt_inputs_dict[name] = key
elif name == "input_values":
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32)
Expand Down
2 changes: 1 addition & 1 deletion tests/models/lxmert/test_modeling_lxmert.py
Original file line number Diff line number Diff line change
Expand Up @@ -751,7 +751,7 @@ def prepare_tf_inputs_from_pt_inputs(self, pt_inputs_dict):
tf_inputs_dict[key] = self.prepare_pt_inputs_from_tf_inputs(value)
elif isinstance(value, (list, tuple)):
tf_inputs_dict[key] = (self.prepare_pt_inputs_from_tf_inputs(iter_value) for iter_value in value)
elif type(value) == bool:
elif isinstance(value, bool):
tf_inputs_dict[key] = value
elif key == "input_values":
tf_inputs_dict[key] = tf.convert_to_tensor(value.cpu().numpy(), dtype=tf.float32)
Expand Down
2 changes: 1 addition & 1 deletion tests/models/lxmert/test_modeling_tf_lxmert.py
Original file line number Diff line number Diff line change
Expand Up @@ -499,7 +499,7 @@ def prepare_pt_inputs_from_tf_inputs(self, tf_inputs_dict):
pt_inputs_dict[key] = self.prepare_pt_inputs_from_tf_inputs(value)
elif isinstance(value, (list, tuple)):
pt_inputs_dict[key] = (self.prepare_pt_inputs_from_tf_inputs(iter_value) for iter_value in value)
elif type(key) == bool:
elif isinstance(key, bool):
pt_inputs_dict[key] = value
elif key == "input_values":
pt_inputs_dict[key] = torch.from_numpy(value.numpy()).to(torch.float32)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -407,7 +407,7 @@ def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=1e-5, nam
def prepare_pt_inputs_from_tf_inputs(self, tf_inputs_dict):
pt_inputs_dict = {}
for name, key in tf_inputs_dict.items():
if type(key) == bool:
if isinstance(key, bool):
pt_inputs_dict[name] = key
elif name == "input_values":
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32)
Expand Down
2 changes: 1 addition & 1 deletion tests/test_modeling_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -1990,7 +1990,7 @@ def prepare_tf_inputs_from_pt_inputs(self, pt_inputs_dict):
tf_inputs_dict = {}
for key, tensor in pt_inputs_dict.items():
# skip key that does not exist in tf
if type(tensor) == bool:
if isinstance(tensor, bool):
tf_inputs_dict[key] = tensor
elif key == "input_values":
tf_inputs_dict[key] = tf.convert_to_tensor(tensor.cpu().numpy(), dtype=tf.float32)
Expand Down
2 changes: 1 addition & 1 deletion tests/test_modeling_tf_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -576,7 +576,7 @@ def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=1e-5, nam
def prepare_pt_inputs_from_tf_inputs(self, tf_inputs_dict):
pt_inputs_dict = {}
for name, key in tf_inputs_dict.items():
if type(key) == bool:
if isinstance(key, bool):
pt_inputs_dict[name] = key
elif name == "input_values":
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32)
Expand Down
6 changes: 3 additions & 3 deletions utils/notification_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
import os
import re
import sys
import time
from time import sleep
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Personal opinion, but I think the previous version was better. Using time.sleep makes it more obvious which functionality and library is being used.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

right the issue was that there is a function time defined in the Message class which gives a "Redefinition" warning...

from typing import Dict, List, Optional, Union

import requests
Expand Down Expand Up @@ -649,7 +649,7 @@ def post_reply(self):
thread_ts=self.thread_ts["ts"],
)

time.sleep(1)
sleep(1)

for job, job_result in self.additional_results.items():
if len(job_result["failures"]):
Expand All @@ -672,7 +672,7 @@ def post_reply(self):
thread_ts=self.thread_ts["ts"],
)

time.sleep(1)
sleep(1)


def retrieve_artifact(artifact_path: str, gpu: Optional[str]):
Expand Down
Loading