Skip to content

Commit

Permalink
Merge pull request #1253 from hanhainebula/master
Browse files Browse the repository at this point in the history
Fix bugs
  • Loading branch information
hanhainebula authored Nov 22, 2024
2 parents ce3a9f8 + dfdace3 commit 90ac133
Show file tree
Hide file tree
Showing 10 changed files with 31 additions and 14 deletions.
7 changes: 5 additions & 2 deletions FlagEmbedding/abc/inference/AbsEmbedder.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,9 +81,12 @@ def stop_self_pool(self):
if self.pool is not None:
self.stop_multi_process_pool(self.pool)
self.pool = None
self.model.to('cpu')
try:
self.model.to('cpu')
torch.cuda.empty_cache()
except:
pass
gc.collect()
torch.cuda.empty_cache()

@staticmethod
def get_target_devices(devices: Union[str, int, List[str], List[int]]) -> List[str]:
Expand Down
7 changes: 5 additions & 2 deletions FlagEmbedding/abc/inference/AbsReranker.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,9 +82,12 @@ def stop_self_pool(self):
if self.pool is not None:
self.stop_multi_process_pool(self.pool)
self.pool = None
self.model.to('cpu')
try:
self.model.to('cpu')
torch.cuda.empty_cache()
except:
pass
gc.collect()
torch.cuda.empty_cache()

@staticmethod
def get_target_devices(devices: Union[str, int, List[str], List[int]]) -> List[str]:
Expand Down
3 changes: 2 additions & 1 deletion FlagEmbedding/inference/embedder/decoder_only/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,8 @@ def encode_single_device(

# tokenize without padding to get the correct length
all_inputs = []
for start_index in trange(0, len(sentences), batch_size, desc='pre tokenize'):
for start_index in trange(0, len(sentences), batch_size, desc='pre tokenize',
disable=len(sentences) < 256):
sentences_batch = sentences[start_index:start_index + batch_size]
inputs_batch = self.tokenizer(
sentences_batch,
Expand Down
10 changes: 7 additions & 3 deletions FlagEmbedding/inference/embedder/decoder_only/icl.py
Original file line number Diff line number Diff line change
Expand Up @@ -178,9 +178,12 @@ def stop_self_query_pool(self):
if self.query_pool is not None:
self.stop_multi_process_pool(self.query_pool)
self.query_pool = None
self.model.to('cpu')
try:
self.model.to('cpu')
torch.cuda.empty_cache()
except:
pass
gc.collect()
torch.cuda.empty_cache()

def encode_queries(
self,
Expand Down Expand Up @@ -483,7 +486,8 @@ def encode_single_device(

# tokenize without padding to get the correct length
all_inputs = []
for start_index in trange(0, len(sentences), batch_size, desc='pre tokenize'):
for start_index in trange(0, len(sentences), batch_size, desc='pre tokenize',
disable=len(sentences) < 256):
sentences_batch = sentences[start_index:start_index + batch_size]
inputs_batch = self.tokenizer(
sentences_batch,
Expand Down
3 changes: 2 additions & 1 deletion FlagEmbedding/inference/embedder/encoder_only/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,8 @@ def encode_single_device(

# tokenize without padding to get the correct length
all_inputs = []
for start_index in trange(0, len(sentences), batch_size, desc='pre tokenize'):
for start_index in trange(0, len(sentences), batch_size, desc='pre tokenize',
disable=len(sentences) < 256):
sentences_batch = sentences[start_index:start_index + batch_size]
inputs_batch = self.tokenizer(
sentences_batch,
Expand Down
3 changes: 2 additions & 1 deletion FlagEmbedding/inference/embedder/encoder_only/m3.py
Original file line number Diff line number Diff line change
Expand Up @@ -369,7 +369,8 @@ def _process_colbert_vecs(colbert_vecs: np.ndarray, attention_mask: list):

# tokenize without padding to get the correct length
all_inputs = []
for start_index in trange(0, len(sentences), batch_size, desc='pre tokenize'):
for start_index in trange(0, len(sentences), batch_size, desc='pre tokenize',
disable=len(sentences) < 256):
sentences_batch = sentences[start_index:start_index + batch_size]
inputs_batch = self.tokenizer(
sentences_batch,
Expand Down
3 changes: 2 additions & 1 deletion FlagEmbedding/inference/reranker/decoder_only/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -309,7 +309,8 @@ def compute_score_single_gpu(
# tokenize without padding to get the correct length
all_queries_inputs = []
all_passages_inputs = []
for start_index in trange(0, len(sentence_pairs), batch_size, desc="pre tokenize"):
for start_index in trange(0, len(sentence_pairs), batch_size, desc="pre tokenize",
disable=len(sentence_pairs) < 128):
sentences_batch = sentence_pairs[start_index:start_index + batch_size]
queries = [s[0] for s in sentences_batch]
passages = [s[1] for s in sentences_batch]
Expand Down
3 changes: 2 additions & 1 deletion FlagEmbedding/inference/reranker/decoder_only/layerwise.py
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,8 @@ def compute_score_single_gpu(
# tokenize without padding to get the correct length
all_queries_inputs = []
all_passages_inputs = []
for start_index in trange(0, len(sentence_pairs), batch_size, desc="pre tokenize"):
for start_index in trange(0, len(sentence_pairs), batch_size, desc="pre tokenize",
disable=len(sentence_pairs) < 128):
sentences_batch = sentence_pairs[start_index:start_index + batch_size]
queries = [s[0] for s in sentences_batch]
passages = [s[1] for s in sentences_batch]
Expand Down
3 changes: 2 additions & 1 deletion FlagEmbedding/inference/reranker/decoder_only/lightweight.py
Original file line number Diff line number Diff line change
Expand Up @@ -262,7 +262,8 @@ def compute_score_single_gpu(
# tokenize without padding to get the correct length
all_queries_inputs = []
all_passages_inputs = []
for start_index in trange(0, len(sentence_pairs), batch_size, desc="pre tokenize"):
for start_index in trange(0, len(sentence_pairs), batch_size, desc="pre tokenize",
disable=len(sentence_pairs) < 128):
sentences_batch = sentence_pairs[start_index:start_index + batch_size]
queries = [s[0] for s in sentences_batch]
passages = [s[1] for s in sentences_batch]
Expand Down
3 changes: 2 additions & 1 deletion FlagEmbedding/inference/reranker/encoder_only/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,8 @@ def compute_score_single_gpu(

# tokenize without padding to get the correct length
all_inputs = []
for start_index in trange(0, len(sentence_pairs), batch_size, desc="pre tokenize"):
for start_index in trange(0, len(sentence_pairs), batch_size, desc="pre tokenize",
disable=len(sentence_pairs) < 128):
sentences_batch = sentence_pairs[start_index:start_index + batch_size]
queries = [s[0] for s in sentences_batch]
passages = [s[1] for s in sentences_batch]
Expand Down

0 comments on commit 90ac133

Please sign in to comment.