-
Notifications
You must be signed in to change notification settings - Fork 0
/
dataloader.py
80 lines (73 loc) ยท 2.5 KB
/
dataloader.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
import pandas as pd
import torch
from tqdm.auto import tqdm
from transformers import AutoTokenizer
class CustomDataset(torch.utils.data.Dataset):
def __init__(
self,
data_file,
state,
text_columns,
target_columns=None,
delete_columns=None,
max_length=512,
model_name="klue/roberta-small",
):
self.state = state
self.data = pd.read_csv(data_file)
self.text_columns = text_columns
self.delete_columns = delete_columns if delete_columns is not None else []
self.max_length = max_length
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
if self.state == "test":
self.inputs = self.preprocessing(self.data)
else:
self.target_columns = target_columns if target_columns is not None else []
self.inputs, self.targets = self.preprocessing(self.data)
def __getitem__(self, idx):
if self.state == "test":
return {"input_ids": torch.tensor(self.inputs[idx])}
else:
if len(self.targets) == 0:
return torch.tensor(self.inputs[idx])
else:
return {
"input_ids": torch.tensor(self.inputs[idx]),
"labels": torch.tensor(self.targets[idx]),
}
def __len__(self):
return len(self.inputs)
def tokenizing(self, dataframe: pd.DataFrame) -> list:
'''
ํ ํฌ๋์ด์ง
Args :
dataframe (DataFrame): ํ ํฌ๋์ด์งํ ๋ฐ์ดํฐ
Return :
data (list) : ํ์ตํ ๋ฌธ์ฅ ํ ํฐ ๋ฆฌ์คํธ
'''
data = []
for _, item in tqdm(
dataframe.iterrows(), desc="Tokenizing", total=len(dataframe)
):
text = "[SEP]".join(
[item[text_column] for text_column in self.text_columns]
)
outputs = self.tokenizer(
text,
add_special_tokens=True,
padding="max_length",
truncation=True,
max_length=self.max_length,
)
data.append(outputs["input_ids"])
return data
def preprocessing(self, data) :
inputs = self.tokenizing(data)
if self.state == "test":
return inputs
else:
try:
targets = data[self.target_columns].values.tolist()
except:
targets = []
return inputs, targets