Skip to content

Commit

Permalink
Merge pull request #15 from magic-research/dev
Browse files Browse the repository at this point in the history
fix bug in extracting answers from LLM output
  • Loading branch information
ermu2001 authored May 3, 2024
2 parents 9ac002d + d2f478e commit e2032aa
Showing 1 changed file with 4 additions and 3 deletions.
7 changes: 4 additions & 3 deletions tasks/eval/model_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,16 +157,17 @@ def pllava_answer(conv: Conversation, model, processor, img_list, do_sample=True
top_p=top_p, repetition_penalty=repetition_penalty, length_penalty=length_penalty, temperature=temperature,
stopping_criteria=stopping_criteria,)
output_text = processor.batch_decode(output_token, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
if "###" in output_text:
output_text = "###".join(output_text.split('###')[:-1]) # remove the stop sign '###'

if print_res: # debug usage
print('### PROMPTING LM WITH: ', prompt)
print('### LM OUTPUT TEXT: ', output_text)
if conv.roles[-1] == "<|im_start|>assistant\n":
split_tag = "<|im_start|> assistant\n"
else:
split_tag = conv.roles[-1]
output_text = output_text.split(split_tag)[-1].rstrip(conv.sep if isinstance(conv.sep, str) else conv.sep[1]).strip()
output_text = output_text.split(split_tag)[-1]
ending = conv.sep if isinstance(conv.sep, str) else conv.sep[1]
output_text = output_text.removesuffix(ending)
conv.messages[-1][1] = output_text
return output_text, conv

0 comments on commit e2032aa

Please sign in to comment.