forked from lxe/simple-llm-finetuner
-
Notifications
You must be signed in to change notification settings - Fork 0
/
trainer.py
307 lines (234 loc) · 9.75 KB
/
trainer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
import os
import gc
import torch
import transformers
import peft
import datasets
from contextlib import nullcontext
from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig
from config import (
HAS_CUDA,
MODEL,
DEVICE_MAP,
TRAINING_PARAMS,
LORA_TRAINING_PARAMS,
GENERATION_PARAMS
)
class Trainer():
def __init__(self):
self.model = None
self.model_name = None
self.lora_name = None
self.loras = {}
self.tokenizer = None
self.trainer = None
self.should_abort = False
def unload_model(self):
del self.model
del self.tokenizer
self.model = None
self.model_name = None
self.tokenizer = None
if (HAS_CUDA):
with torch.no_grad():
torch.cuda.empty_cache()
gc.collect()
def load_model(self, model_name, force=False, **kwargs):
assert model_name is not None
if (model_name == self.model_name and not force):
return
if (self.model is not None):
self.unload_model()
if 'GPTQ' in model_name:
quantize_config = BaseQuantizeConfig(bits=4, group_size=128, desc_act=False)
self.model = AutoGPTQForCausalLM.from_quantized("facebook/opt-125m",
use_safetensors=True,
device="cuda:0",
quantize_config=quantize_config)
else:
self.model = transformers.AutoModelForCausalLM.from_pretrained(
model_name,
device_map=DEVICE_MAP,
load_in_8bit=True,
torch_dtype=torch.float16,
)
#Clear the collection that tracks which adapters are loaded, as they are associated with self.model
self.loras = {}
if model_name.startswith('decapoda-research/llama'):
self.tokenizer = transformers.LlamaTokenizer.from_pretrained(model_name)
else:
self.tokenizer = transformers.AutoTokenizer.from_pretrained(model_name)
self.tokenizer.pad_token_id = 0
self.model_name = model_name
def load_lora(self, lora_name, replace_model=True):
assert self.model is not None
assert lora_name is not None
if (lora_name == self.lora_name):
return
if lora_name in self.loras:
self.lora_name = lora_name
self.model.set_adapter(lora_name)
return
peft_config = peft.PeftConfig.from_pretrained(lora_name)
if not replace_model:
assert peft_config.base_model_name_or_path == self.model_name
if peft_config.base_model_name_or_path != self.model_name:
self.load_model(peft_config.base_model_name_or_path)
assert self.model_name is not None
assert self.model is not None
if hasattr(self.model, 'load_adapter'):
self.model.load_adapter(lora_name, adapter_name=lora_name)
else:
self.model = peft.PeftModel.from_pretrained(self.model, lora_name, adapter_name=lora_name)
self.model.set_adapter(lora_name)
if (self.model_name.startswith('cerebras')):
self.model.half()
self.lora_name = lora_name
self.loras[lora_name] = True
def unload_lora(self):
self.lora_name = None
def generate(self, prompt, **kwargs):
assert self.model is not None
assert self.model_name is not None
assert self.tokenizer is not None
kwargs = { **GENERATION_PARAMS, **kwargs }
inputs = self.tokenizer(str(prompt), return_tensors="pt")
input_ids = inputs["input_ids"].to(self.model.device)
if self.model.config.pad_token_id is None:
kwargs['pad_token_id'] = self.model.config.eos_token_id
if (kwargs['do_sample']):
del kwargs['num_beams']
generation_config = transformers.GenerationConfig(
use_cache=False,
**kwargs
)
disable_lora = nullcontext()
if self.lora_name is None and hasattr(self.model, 'disable_adapter'):
disable_lora = self.model.disable_adapter()
with torch.no_grad(), disable_lora:
output = self.model.generate(
input_ids=input_ids,
attention_mask=torch.ones_like(input_ids),
generation_config=generation_config
)[0].to(self.model.device)
return self.tokenizer.decode(output, skip_special_tokens=True).strip()
def tokenize_sample(self, item, max_seq_length, add_eos_token=True):
assert self.tokenizer is not None
result = self.tokenizer(
item["text"],
truncation=True,
max_length=max_seq_length,
padding="max_length",
)
result = {
"input_ids": result["input_ids"][:-1],
"attention_mask": result["attention_mask"][:-1],
}
if (
result["input_ids"][-1] != self.tokenizer.eos_token_id
and len(result["input_ids"]) < max_seq_length
and add_eos_token
):
result["input_ids"].append(self.tokenizer.eos_token_id)
result["attention_mask"].append(1)
return result
def tokenize_training_text(self, training_text, max_seq_length, separator="\n\n\n", **kwargs):
samples = training_text.split(separator)
samples = [x.strip() for x in samples]
def to_dict(text):
return { 'text': text }
samples = [to_dict(x) for x in samples]
training_dataset = datasets.Dataset.from_list(samples)
training_dataset = training_dataset.shuffle().map(
lambda x: self.tokenize_sample(x, max_seq_length),
batched=False
)
return training_dataset
def train(self, training_text=None, new_peft_model_name=None, **kwargs):
assert self.should_abort is False
assert self.model is not None
assert self.model_name is not None
assert self.tokenizer is not None
kwargs = { **TRAINING_PARAMS, **LORA_TRAINING_PARAMS, **kwargs }
self.lora_name = None
self.loras = {}
train_dataset = self.tokenize_training_text(training_text, **kwargs)
if hasattr(self.model, 'disable_adapter'):
self.load_model(self.model_name, force=True)
self.model = peft.prepare_model_for_int8_training(self.model)
self.model = peft.get_peft_model(self.model, peft.LoraConfig(
r=kwargs['lora_r'],
lora_alpha=kwargs['lora_alpha'],
lora_dropout=kwargs['lora_dropout'],
bias="none",
task_type="CAUSAL_LM",
))
if not os.path.exists('lora'):
os.makedirs('lora')
sanitized_model_name = self.model_name.replace('/', '_').replace('.', '_')
output_dir = f"lora/{sanitized_model_name}_{new_peft_model_name}"
training_args = transformers.TrainingArguments(
per_device_train_batch_size=kwargs['micro_batch_size'],
gradient_accumulation_steps=kwargs['gradient_accumulation_steps'],
num_train_epochs=kwargs['epochs'],
learning_rate=kwargs['learning_rate'],
fp16=True,
optim='adamw_torch',
logging_steps=20,
save_total_limit=3,
output_dir=output_dir,
)
# _trainer = self
# class LoggingCallback(transformers.TrainerCallback):
# def on_log(self, args, state, control, logs=None, **kwargs):
# _trainer.log += json.dumps(logs) + '\n'
def should_abort():
return self.should_abort
def reset_abort():
self.should_abort = False
class AbortCallback(transformers.TrainerCallback):
def on_step_end(self, args, state, control, **kwargs):
if should_abort():
print("Stopping training...")
control.should_training_stop = True
def on_train_end(self, args, state, control, **kwargs):
if should_abort():
control.should_save = False
# class CustomTrainer(transformers.Trainer):
# def __init__(self, *args, **kwargs):
# super().__init__(*args, **kwargs)
# self.abort_training = False
# def stop_training(self):
# print("Stopping training...")
# self.abort_training = True
# def training_step(self, model, inputs):
# if self.abort_training:
# raise RuntimeError("Training aborted.")
# return super().training_step(model, inputs)
self.trainer = transformers.Trainer(
model=self.model,
train_dataset=train_dataset,
args=training_args,
data_collator=transformers.DataCollatorForLanguageModeling(
self.tokenizer,
mlm=False,
),
callbacks=[AbortCallback()]
)
self.model.config.use_cache = False
result = self.trainer.train(resume_from_checkpoint=False)
if not should_abort():
self.model.save_pretrained(output_dir)
reset_abort()
return result
def abort_training(self):
self.should_abort = True
if __name__ == '__main__':
t = Trainer()
t.load_model(MODEL)
prompt = "Human: How is cheese made?\n\nAssistant:"
print(t.generate(prompt))
t.load_lora('lora/melon-mango-orange')
print(t.generate(prompt))
t.unload_lora()
print(t.generate(prompt))