-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathexecute.py
259 lines (199 loc) · 10 KB
/
execute.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
import pandas as pd
import os
from enums import suffix_dict, model_dict, Suffix
from parameter_parser import default_model_params, application_list
from model.classifier import Classifier
from model.policy_learner import PolicyLearner
from run_classification import train as train_c_iad, evaluate as evaluate_c_iad
from run_classification_gcn import train as train_c_itr, evaluate as evaluate_c_itr
from run_policy_learning import train as train_pl_iad, evaluate_action_trace as evaluate_pl_iad
from run_policy_learning_gcn import train as train_pl_itr, evaluate_action_trace as evaluate_pl_itr
from run_classification import generate_iad_files as generate_iad_files_code
#from run_classification import generate_iad_files_long as generate_iad_files_code
from run_ditrl_pipeline import train_pipeline as train_pipeline_code, generate_itr_files_gcn as generate_itr_files_code
def make_model_name(args, lfd_params, backbone=False):
# saved backbone models
save_id = lfd_params.model.save_id
old_save_dir = os.path.join(lfd_params.base_model_dir, save_id)
new_save_id = f"{args.app}_{args.suffix}_{args.model}_{args.cur_repeat}"
new_save_dir = os.path.join(lfd_params.model_save_dir, new_save_id)
if backbone or not os.path.exists(old_save_dir):
print("directory ["+old_save_dir+"] does not exist, proceeding anyways")
return new_save_id
if not os.path.exists(new_save_dir):
os.makedirs(new_save_dir)
from shutil import copy2
for f in os.listdir(old_save_dir):
copy2(os.path.join(old_save_dir, f), new_save_dir)
return new_save_id
def define_model(args, lfd_params, train, app=None, suffix=None, use_bottleneck=False, backbone=False):
'''
The models are modular and you need this function describes how they are constructed and which
layers are and are not included in the model depending on the inference architecture.
'''
backbone_id = model_dict[args.model]
filename = make_model_name(args, lfd_params, backbone=backbone)
if app is None:
app = args.app
use_feature_extractor = False
use_spatial = False
use_pipeline = False
use_temporal = False
train_feature_extractor = False
train_spatial = False
train_pipeline = False
train_temporal = False
if suffix == Suffix.BACKBONE:
use_feature_extractor = True
use_spatial = True
train_feature_extractor = train
train_spatial = train
elif suffix == Suffix.GENERATE_IAD:
use_feature_extractor = True
use_spatial = False
elif suffix == Suffix.PIPELINE:
use_pipeline = True
train_pipeline = train
elif suffix in [Suffix.LINEAR, Suffix.LINEAR_IAD, Suffix.LSTM_IAD, Suffix.LSTM, Suffix.TCN]:
use_spatial = True
train_spatial = train
elif suffix == Suffix.DITRL:
use_temporal = True
train_temporal = train
else:
print(f"ERROR: execute.py: suffix '{suffix}' not available")
return None
# classifier
if app == 'c' or suffix in [Suffix.PIPELINE, suffix.GENERATE_IAD]:
return Classifier(lfd_params, filename, backbone_id, suffix,
use_feature_extractor=use_feature_extractor, train_feature_extractor=train_feature_extractor,
use_bottleneck=use_bottleneck,
use_spatial=use_spatial, train_spatial=train_spatial,
use_pipeline=use_pipeline, train_pipeline=train_pipeline,
use_temporal=use_temporal, train_temporal=train_temporal)
# policy_learner
return PolicyLearner(lfd_params, filename, backbone_id, suffix,
use_feature_extractor=use_feature_extractor, train_feature_extractor=train_feature_extractor,
use_bottleneck=use_bottleneck,
use_spatial=use_spatial, train_spatial=train_spatial,
use_pipeline=use_pipeline, train_pipeline=train_pipeline,
use_temporal=use_temporal, train_temporal=train_temporal,
train_policy=train)
def generate_iad_files(args, lfd_params, model):
backbone_id = args.model
for mode in ['train', 'evaluation']:
generate_iad_files_code(lfd_params, model, mode, backbone=backbone_id)
def generate_itr_files(args, lfd_params, model):
backbone_id = args.model
model = train_pipeline_code(lfd_params, model)
model.save_model()
for mode in ['train', 'evaluation']:
generate_itr_files_code(lfd_params, model, mode, backbone=backbone_id)
def train(args, lfd_params, model):
print("train suffix:", args.suffix)
if args.app == 'c':
if args.suffix in ['backbone']:
return train_c_iad(lfd_params, model, verbose=True, input_dtype="video")
elif args.suffix in ['linear', 'lstm', 'tcn']:
return train_c_iad(lfd_params, model, verbose=False, input_dtype="iad")
elif args.suffix in ['ditrl']:
return train_c_itr(lfd_params, model, verbose=False, input_dtype="gcn")
else:
if args.suffix in ['linear', 'lstm', 'tcn']:
return train_pl_iad(lfd_params, model, verbose=False, input_dtype="iad")
elif args.suffix in ['ditrl']:
return train_pl_itr(lfd_params, model, verbose=False, input_dtype="gcn")
else:
print(f"suffix '{args.suffix}' is not intended for use with policy learning")
def evaluate(args, lfd_params, model, mode):
if args.app == 'c':
if args.suffix in ['backbone']:
return evaluate_c_iad(lfd_params, model, verbose=True, mode=mode, input_dtype="video")
elif args.suffix in ['linear', 'lstm', 'tcn']:
return evaluate_c_iad(lfd_params, model, verbose=False, mode=mode, input_dtype="iad")
elif args.suffix in ['ditrl']:
return evaluate_c_itr(lfd_params, model, verbose=False, mode=mode, input_dtype="gcn")
else:
if args.suffix in ['linear', 'lstm', 'tcn']:
return evaluate_pl_iad(lfd_params, model, verbose=False, mode=mode, input_dtype="iad", ablation=False)
elif args.suffix in ['ditrl']:
return evaluate_pl_itr(lfd_params, model, verbose=False, mode=mode, input_dtype="gcn", ablation=False)
else:
print(f"suffix '{args.suffix}' is not intended for use with policy learning")
def generate_files(args, lfd_params, backbone=False):
print("Generate Files...")
if args.generate_files:
print("Generate IAD...")
use_bottleneck = False
if suffix_dict[args.suffix] not in [Suffix.LINEAR, Suffix.LSTM]:
use_bottleneck = True
model = define_model(args, lfd_params, train=False, app='c', suffix=Suffix.GENERATE_IAD,
use_bottleneck=use_bottleneck,
backbone=backbone)
generate_iad_files(args, lfd_params, model)
if args.suffix in ['ditrl'] and (args.generate_gcn_files or args.generate_files):
print("Generate ITR...")
model = define_model(args, lfd_params, train=True, app='c', suffix=Suffix.PIPELINE)
generate_itr_files(args, lfd_params, model)
print("Done!")
def execute_func(args, lfd_params, cur_repeat, backbone=False):
suffix = suffix_dict[args.suffix]
args.cur_repeat = cur_repeat
# generate files
if (args.generate_files or args.generate_gcn_files) and args.suffix not in ['backbone']:
generate_files(args, lfd_params, backbone)
# train
if not args.eval_only:
print("Train Model...")
model = define_model(args, lfd_params, train=True, suffix=suffix)
model = train(args, lfd_params, model)
model.save_model()
print("Done!")
# eval
print("Evaluate Model...")
model = define_model(args, lfd_params, train=False, suffix=suffix)
train_df = evaluate(args, lfd_params, model, mode="train")
eval_df = evaluate(args, lfd_params, model, mode="evaluation")
print("Done!")
# generate output
train_df["mode"] = ["train"] * len(train_df)
eval_df["mode"] = ["evaluation"] * len(eval_df)
df = pd.concat([train_df, eval_df])
save_path = os.path.join(model.filename, "results.csv")
print("save_path:", save_path)
df.to_csv(save_path)
return df
def parse_exec_args():
import argparse
parser = argparse.ArgumentParser(description='Execute file')
parser.add_argument('app', help='classifier(c)/policy_learner(pl)', choices=['c', 'pl'])
parser.add_argument('model', help='model_id', choices=model_dict.keys())
parser.add_argument('suffix', help='suffix', choices=['backbone', 'linear', 'lstm', 'tcn', 'ditrl'])
parser.set_defaults(generate_gcn_files=False)
parser.add_argument('--gen_gcn', help='generate only the GCN files (requires IAD files to already exist)',
dest='generate_gcn_files', action='store_true')
parser.set_defaults(generate_files=False)
parser.add_argument('--gen', help='generates IAD files', dest='generate_files', action='store_true')
parser.set_defaults(eval_only=False)
parser.add_argument('--eval', help='evaluate only', dest='eval_only', action='store_true')
parser.add_argument('--frames', help='number of frames', default=64, type=int)
parser.add_argument('--repeat', help='repeat code runs', default=1, type=int)
parser.add_argument('--application', help='application', default="block_construction_timed",
choices=application_list)
parser.add_argument('--gpu', help='which gpu to run on', default=0, type=int)
return parser.parse_args()
########
def exec_repeats(args, lfd_params):
for r in range(args.repeat):
execute_func(args, lfd_params, r)
if __name__ == '__main__':
# set model parameters
args = parse_exec_args()
lfd_params = default_model_params()
lfd_params.gpus = [args.gpu]
lfd_params.set_application(args.application)
lfd_params.set_model_params(model_dict[args.model], end_point=-1)
lfd_params.input_frames = args.frames
lfd_params.model.iad_frames = args.frames
# run code
exec_repeats(args, lfd_params)