-
Notifications
You must be signed in to change notification settings - Fork 11
/
export_model.py
146 lines (109 loc) · 5.45 KB
/
export_model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
import os
import argparse
from pathlib import Path
import tensorflow as tf
import tf2onnx
from model_export import dump_graph
from models.model_v4 import Model_v4
from models.utils import load_weights
from run_model_v4 import load_config
def main():
parser = argparse.ArgumentParser(fromfile_prefix_chars='@')
parser.add_argument('--checkpoint_name', type=str, required=True)
parser.add_argument('--output_path', type=str, default=None)
parser.add_argument('--hack_upsampling_op', default=False, action='store_true')
parser.add_argument('--test_input', type=float, nargs=4, default=None)
parser.add_argument('--latent_space', choices=['normal', 'uniform', 'constant', 'none'], default='normal')
parser.add_argument('--export_format', choices=['pbtxt', 'onnx'], default='pbtxt')
parser.add_argument('--upload_to_mlflow', action='store_true')
parser.add_argument('--aws_access_key_id', type=str, required=False)
parser.add_argument('--aws_secret_access_key', type=str, required=False)
parser.add_argument('--mlflow_url', type=str, required=False)
parser.add_argument('--s3_url', type=str, required=False)
parser.add_argument('--mlflow_model_name', type=str, required=False)
args, _ = parser.parse_known_args()
if args.upload_to_mlflow:
assert args.export_format == 'onnx', 'Only onnx export format is supported when uploading to MLFlow'
assert args.aws_access_key_id, 'You need to specify aws_access_key_id to upload model to MLFlow'
assert args.aws_secret_access_key, 'You need to specify aws_secret_access_key to upload model to MLFlow'
assert args.mlflow_url, 'You need to specify mlflow_url to upload model to MLFlow'
assert args.s3_url, 'You need to specify s3_url to upload model to MLFlow'
assert args.mlflow_model_name, 'You need to specify mlflow_model_name to upload model to MLFlow'
if args.export_format == 'onnx':
assert not args.hack_upsampling_op, 'This option is only applicable for the pbtxt format'
assert args.test_input is None, 'This option is only applicable for the pbtxt format'
print("")
print("----" * 10)
print("Arguments:")
for k, v in vars(args).items():
print(f" {k} : {v}")
print("----" * 10)
print("")
if args.output_path is None:
if args.export_format == 'pbtxt':
args.output_path = Path('model_export/model_v4')
else:
args.output_path = Path('model_export/onnx')
args.output_path.mkdir(parents=True, exist_ok=True)
model_path = Path('saved_models') / args.checkpoint_name
full_model = Model_v4(load_config(model_path / 'config.yaml'))
load_weights(full_model, model_path)
model = full_model.generator
args.latent_dim = full_model.latent_dim
args.features_dim = 4
if getattr(full_model, "full_feature_space", False):
args.features_dim = 6
input_signature, preprocess = construct_preprocess(args, full_model._f)
# TODO: make this configurable through the model config
def postprocess(x):
x = 10**x - 1
return tf.where(x < 1.0, 0.0, x)
@tf.function(input_signature=input_signature)
def to_save(x):
return postprocess(model(preprocess(x)))
if args.export_format == 'pbtxt':
dump_graph.model_to_graph(
to_save,
output_file=Path(args.output_path) / "graph.pbtxt",
test_input=args.test_input,
hack_upsampling=args.hack_upsampling_op,
)
else:
onnx_model, _ = tf2onnx.convert.from_function(
to_save,
input_signature=input_signature,
output_path=Path(args.output_path) / f'{args.checkpoint_name}.onnx',
)
if args.upload_to_mlflow:
import mlflow
os.environ['AWS_ACCESS_KEY_ID'] = args.aws_access_key_id
os.environ['AWS_SECRET_ACCESS_KEY'] = args.aws_secret_access_key
os.environ['MLFLOW_S3_ENDPOINT_URL'] = args.s3_url
mlflow.set_tracking_uri(args.mlflow_url)
mlflow.set_experiment('model_export')
mlflow.log_artifact(str(model_path / 'config.yaml'), artifact_path='model_onnx')
mlflow.onnx.log_model(onnx_model, artifact_path='model_onnx', registered_model_name=args.mlflow_model_name)
def construct_preprocess(args, func):
latent_input_gen = None
predefined_batch_size = None if args.export_format == 'pbtxt' else 1
if args.latent_space == 'normal':
def latent_input_gen(batch_size):
return tf.random.normal(shape=(batch_size, args.latent_dim), dtype='float32')
elif args.latent_space == 'uniform':
def latent_input_gen(batch_size):
return tf.random.uniform(shape=(batch_size, args.latent_dim), dtype='float32')
elif args.latent_space == 'constant':
raise NotImplementedError() # TODO: implement this
if latent_input_gen is None:
input_signature = [tf.TensorSpec(shape=[predefined_batch_size, 36], dtype=tf.float32)]
def preprocess(x):
return tf.concat([func(x[..., : args.features_dim]), x[..., args.features_dim :]], axis=-1)
else:
input_signature = [tf.TensorSpec(shape=[predefined_batch_size, args.features_dim], dtype=tf.float32)]
def preprocess(x):
size = tf.shape(x)[0]
latent_input = latent_input_gen(size)
return tf.concat([func(x), latent_input], axis=-1)
return input_signature, preprocess
if __name__ == '__main__':
main()