Skip to content

Commit

Permalink
【PPSCI Export&Infer No.3】euler_beam (#796)
Browse files Browse the repository at this point in the history
* 【PPSCI Export&Infer No.3】euler_beam

* Update examples/euler_beam/euler_beam.py

Co-authored-by: HydrogenSulfate <[email protected]>

* update config

---------

Co-authored-by: HydrogenSulfate <[email protected]>
  • Loading branch information
GreatV and HydrogenSulfate authored Mar 4, 2024
1 parent f0aacff commit b014b24
Show file tree
Hide file tree
Showing 3 changed files with 87 additions and 1 deletion.
12 changes: 12 additions & 0 deletions docs/zh/examples/euler_beam.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,18 @@
python euler_beam.py mode=eval EVAL.pretrained_model_path=https://paddle-org.bj.bcebos.com/paddlescience/models/euler_beam/euler_beam_pretrained.pdparams
```

=== "模型导出命令"

``` sh
python euler_beam.py mode=export
```

=== "模型推理命令"

``` sh
python euler_beam.py mode=infer
```

| 预训练模型 | 指标 |
|:--| :--|
| [euler_beam_pretrained.pdparams](https://paddle-org.bj.bcebos.com/paddlescience/models/euler_beam/euler_beam_pretrained.pdparams) | loss(L2Rel_Metric): 0.00000<br>L2Rel.u(L2Rel_Metric): 0.00080 |
Expand Down
20 changes: 20 additions & 0 deletions examples/euler_beam/conf/euler_beam.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@ hydra:
- TRAIN.checkpoint_path
- TRAIN.pretrained_model_path
- EVAL.pretrained_model_path
- INFER.pretrained_model_path
- INFER.export_path
- mode
- output_dir
- log_freq
Expand Down Expand Up @@ -56,3 +58,21 @@ EVAL:
pretrained_model_path: null
eval_with_no_grad: true
total_size: 100

INFER:
pretrained_model_path: "https://paddle-org.bj.bcebos.com/paddlescience/models/euler_beam/euler_beam_pretrained.pdparams"
export_path: ./inference/euler_beam
pdmodel_path: ${INFER.export_path}.pdmodel
pdpiparams_path: ${INFER.export_path}.pdiparams
device: gpu
engine: native
precision: fp32
onnx_path: ${INFER.export_path}.onnx
ir_optim: true
min_subgraph_size: 10
gpu_mem: 4000
gpu_id: 0
max_batch_size: 100
num_cpu_threads: 4
total_size: 100
batch_size: 100
56 changes: 55 additions & 1 deletion examples/euler_beam/euler_beam.py
Original file line number Diff line number Diff line change
Expand Up @@ -219,14 +219,68 @@ def u_solution_func(out):
solver.visualize()


def export(cfg: DictConfig):
# set model
model = ppsci.arch.MLP(**cfg.MODEL)

# initialize solver
solver = ppsci.solver.Solver(
model,
pretrained_model_path=cfg.INFER.pretrained_model_path,
)
# export model
from paddle.static import InputSpec

input_spec = [
{key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys},
]
solver.export(input_spec, cfg.INFER.export_path)


def inference(cfg: DictConfig):
from deploy.python_infer import pinn_predictor

predictor = pinn_predictor.PINNPredictor(cfg)

# set geometry
geom = {"interval": ppsci.geometry.Interval(0, 1)}
input_dict = geom["interval"].sample_interior(cfg.INFER.total_size, evenly=True)

output_dict = predictor.predict({"x": input_dict["x"]}, cfg.INFER.batch_size)

# mapping data to cfg.INFER.output_keys
output_dict = {
store_key: output_dict[infer_key]
for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys())
}

def u_solution_func(out):
"""compute ground truth for u as label data"""
x = out["x"]
return -(x**4) / 24 + x**3 / 6 - x**2 / 4

ppsci.visualize.save_plot_from_1d_dict(
"./euler_beam_pred",
{**input_dict, **output_dict, "u_label": u_solution_func(input_dict)},
("x",),
("u", "u_label"),
)


@hydra.main(version_base=None, config_path="./conf", config_name="euler_beam.yaml")
def main(cfg: DictConfig):
if cfg.mode == "train":
train(cfg)
elif cfg.mode == "eval":
evaluate(cfg)
elif cfg.mode == "export":
export(cfg)
elif cfg.mode == "infer":
inference(cfg)
else:
raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'")
raise ValueError(
f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'"
)


if __name__ == "__main__":
Expand Down

0 comments on commit b014b24

Please sign in to comment.