-
Notifications
You must be signed in to change notification settings - Fork 21
/
driver_clouseg.py
124 lines (103 loc) · 5.06 KB
/
driver_clouseg.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
import argparse
import functools
import multiprocessing as mp
from pathlib import Path
import mongoengine
import yaml
from reducto.data_loader import dump_json
from reducto.differencer import DiffComposer
from reducto.evaluator import MetricComposer
from reducto.inferencer import Yolo
from reducto.model import Segment, Inference, InferenceResult, DiffVector, FrameEvaluation
from tqdm import tqdm
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--pipeline', type=str, default='pipeline.yaml')
parser.add_argument('-s', '--segment_pattern', default='segment???.mp4')
parser.add_argument('--no_session', action='store_true')
parser.add_argument('--skip_inference', action='store_true')
parser.add_argument('--skip_diffeval', action='store_true')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
configuration = args.pipeline
with open(configuration, 'r') as y:
config = yaml.load(y, Loader=yaml.FullLoader)
# data preparation
dataset_root = config['environs']['dataset_root']
dataset_name = config['environs']['dataset_name']
subsets = config['environs']['subsets']
segments = []
for ss in subsets:
p = Path(dataset_root) / dataset_name / ss
segments += [f for f in sorted(p.iterdir()) if f.match(args.segment_pattern)]
mongo_host = config['mongo']['host']
mongo_port = config['mongo']['port']
mongoengine.connect(dataset_name, host=mongo_host, port=mongo_port)
print(f'connected to {mongo_host}:{mongo_port} on dataset {dataset_name}')
differ_dict_path = Path(config['environs']['thresh_root']) / f'{dataset_name}.json'
differ_types = config['differencer']['types']
# component preparation
model = Yolo(no_session=args.no_session)
differ = DiffComposer.from_jsonfile(differ_dict_path, differ_types)
evaluator = MetricComposer.from_json(config['evaluator'])
# pipeline running
pbar = tqdm(total=len(segments))
for segment in segments:
# -- segment ---------------------------------------------------
segment_record = Segment.find_or_save(segment.parent.name, segment.name)
# -- inference -------------------------------------------------
inference_record = Inference.objects(
segment=segment_record,
model=model.name,
).first()
if inference_record:
inference = inference_record.to_json()
else:
inference = model.infer_video(segment)
inference_record = Inference(
segment=segment_record,
model=model.name,
result=[InferenceResult.from_json(inf) for _, inf in inference.items()],
)
inference_record.save()
dump_json(inference, f'data/inference/{dataset_name}/{segment.parent.name}/{segment.stem}.json', mkdir=True)
# -- skip if required ------------------------------------------
if args.skip_diffeval:
pbar.update()
continue
# -- evaluation ------------------------------------------------
frame_pairs = evaluator.get_frame_pairs(inference, diff_results)
per_frame_evaluations = {}
for metric in evaluator.keys:
metric_evaluations = FrameEvaluation.objects(segment=segment_record, evaluator=metric)
pairs = [(me.ground_truth, me.comparision) for me in metric_evaluations]
pairs_pending = [p for p in frame_pairs if p not in pairs]
with mp.Pool() as pool:
eval_f = functools.partial(evaluator.evaluate_frame_pair, inference=inference, metric=metric)
metric_evaluations_new = pool.map(eval_f, pairs_pending)
pair_evaluations_new = {
pair: evaluation
for pair, evaluation in zip(pairs_pending, metric_evaluations_new)
}
for pair, evaluation in pair_evaluations_new.items():
frame_evaluation_record = FrameEvaluation(
segment=segment_record,
model=model.name,
evaluator=metric,
ground_truth=pair[0],
comparision=pair[1],
result=evaluation[metric],
)
frame_evaluation_record.save()
for me in metric_evaluations:
if not per_frame_evaluations.get((me.ground_truth, me.comparision), None):
per_frame_evaluations[(me.ground_truth, me.comparision)] = {}
per_frame_evaluations[(me.ground_truth, me.comparision)][metric] = me.result
for pair, evaluation in pair_evaluations_new.items():
if not per_frame_evaluations.get(pair, None):
per_frame_evaluations[pair] = {}
per_frame_evaluations[pair][metric] = evaluation[metric]
evaluations = evaluator.evaluate(inference, diff_results, per_frame_evaluations, segment)
dump_json(evaluations, f'data/evaluation/{dataset_name}/{segment.parent.name}/{segment.stem}.json', mkdir=True)
pbar.update()