-
Notifications
You must be signed in to change notification settings - Fork 15
/
dataset.py
80 lines (68 loc) · 3.28 KB
/
dataset.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
import torch
import os
import json, dill
import numpy as np
from torch.utils.data import DataLoader
class LMDataset(torch.utils.data.Dataset):
def __init__(self, filepath):
self.input_ids, self.labels = self.process_data(filepath)
def process_data(self, filepath):
input_ids = torch.from_numpy(np.load(os.path.join(filepath, 'inputs.npy')))
labels = torch.from_numpy(np.load(os.path.join(filepath, 'labels.npy')))
return input_ids, labels
def __getitem__(self, idx):
return {
'input_ids': self.input_ids[idx],
'labels': self.labels[idx]
}
def __len__(self):
return self.input_ids.size(0)
class LMSortDataset(torch.utils.data.Dataset):
def __init__(self, filepath):
self.input_ids, self.labels = self.process_data(filepath)
def process_data(self, filepath):
input_ids = torch.from_numpy(np.load(os.path.join(filepath, 'inputs_sort.npy')))
labels = torch.from_numpy(np.load(os.path.join(filepath, 'labels_sort.npy')))
return input_ids, labels
def __getitem__(self, idx):
return {
'input_ids': self.input_ids[idx],
'labels': self.labels[idx]
}
def __len__(self):
return self.input_ids.size(0)
class LMPackDataset(torch.utils.data.Dataset):
def __init__(self, filepath):
self.input_ids, self.attention_masks, self.labels, self.weights, self.nums = self.process_data(filepath)
self.num_gpus = torch.cuda.device_count()
def process_data(self, filepath):
input_ids = torch.from_numpy(np.load(os.path.join(filepath, 'inputs_pack.npy')))
labels = torch.from_numpy(np.load(os.path.join(filepath, 'labels_pack.npy')))
weights = torch.from_numpy(np.load(os.path.join(filepath, 'weights_pack.npy')))
attention_masks = json.load(open(os.path.join(filepath, 'attention_masks_pack.json')))
num_gpus = torch.cuda.device_count()
l = (input_ids.size(0) // num_gpus) * num_gpus
input_ids, labels, weights, attention_masks = input_ids[:l, :], labels[:l, :], weights[:l, :], attention_masks[:l]
nums = [weights[i*num_gpus:(i+1)*num_gpus, :].sum() for i in range(l//num_gpus)]
return input_ids, attention_masks, labels, weights, nums
def __getitem__(self, idx):
if idx < 32: # reduce GPU memory usage during first few steps
max_length_tmp = 32768
attention_mask_tmp = []
for pos in self.attention_masks[idx]:
if pos < max_length_tmp:
attention_mask_tmp.append(pos)
attention_mask_tmp.append(max_length_tmp)
return {
'input_ids': self.input_ids[idx, :max_length_tmp],
'attention_mask': torch.tensor(attention_mask_tmp, dtype=torch.int32),
'labels': (self.labels[idx, :max_length_tmp], self.weights[idx, :max_length_tmp]*2, self.nums[idx//self.num_gpus])
}
else:
return {
'input_ids': self.input_ids[idx],
'attention_mask': torch.tensor(self.attention_masks[idx], dtype=torch.int32),
'labels': (self.labels[idx], self.weights[idx], self.nums[idx//self.num_gpus])
}
def __len__(self):
return self.input_ids.size(0)