-
Notifications
You must be signed in to change notification settings - Fork 0
/
flant5-inference.py
78 lines (58 loc) · 2.37 KB
/
flant5-inference.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import os
import datasets
import json, sys
path_data = sys.argv[1]
path_model = sys.argv[2]
path_result = sys.argv[3]
print(f"Inferencing with {path_data}, {path_model}, {path_result}")
def prepare_data(data_name):
dataset = {}
for dtyp in ['test']:
dataset[dtyp] = []
with open(path_data+f'/{data_name}_{dtyp}.jsonl', 'r') as file:
for line in file:
data = json.loads(line)
dataset[dtyp].append(data)
return dataset
data_PLOS = prepare_data('PLOS')
data_eLife = prepare_data('eLife')
import json
file_path = path_data+'/PLOS_test.json'
with open(file_path, 'w') as json_file:
json.dump(data_PLOS, json_file)
file_path = path_data+'/eLife_test.json'
with open(file_path, 'w') as json_file:
json.dump(data_eLife, json_file)
ddict_elife_test = datasets.DatasetDict()
for split in ["test"]:
ddict_elife_test.update(datasets.load_dataset("json", data_files={split: path_data+"/eLife_test.json"}, field=split))
ddict_plos_test = datasets.DatasetDict()
for split in ["test"]:
ddict_plos_test.update(datasets.load_dataset("json", data_files={split: path_data+"/PLOS_test.json"}, field=split))
from transformers import pipeline
from random import randrange
import os
import zipfile
def unzip_file(zip_file, extract_dir):
with zipfile.ZipFile(zip_file, 'r') as zip_ref:
zip_ref.extractall(extract_dir)
zip_file = path_model+'/flan-t5-base-merged.zip'
zip_folder = path_model+'/flan-t5-base-merged'
extract_dir = path_model
if os.path.exists(zip_file) and not os.path.exists(zip_folder):
unzip_file(zip_file, extract_dir)
summarizer = pipeline("summarization", model=path_model+"/flan-t5-base-merged", min_length=260, max_length=300, truncation=True) # device=2
from transformers import pipeline
import datasets
def write_strings_to_file(strings, filename):
with open(filename, 'w') as file:
for string in strings:
file.write(string + '\n')
out_list = summarizer(ddict_elife_test['test']['article'], batch_size=8)
out_list_summ = [o['summary_text'] for o in out_list]
output_file = path_result + "/elife.txt"
write_strings_to_file(out_list_summ, output_file)
out_list = summarizer(ddict_plos_test['test']['article'], batch_size=8)
out_list_summ = [o['summary_text'] for o in out_list]
output_file = path_result + "/plos.txt"
write_strings_to_file(out_list_summ, output_file)