Skip to content

Commit

Permalink
Fix git mistake with config files
Browse files Browse the repository at this point in the history
  • Loading branch information
arthurfeeney committed Nov 4, 2023
1 parent 0e5abb5 commit 7266019
Show file tree
Hide file tree
Showing 21 changed files with 80 additions and 434 deletions.
29 changes: 0 additions & 29 deletions conf/experiment/ffno/pb_temp.yaml

This file was deleted.

29 changes: 0 additions & 29 deletions conf/experiment/ffno/pb_vel.yaml

This file was deleted.

29 changes: 0 additions & 29 deletions conf/experiment/fno/pb_vel.yaml

This file was deleted.

28 changes: 0 additions & 28 deletions conf/experiment/gfno/pb_temp.yaml

This file was deleted.

29 changes: 0 additions & 29 deletions conf/experiment/gfno_test/cosine.yaml

This file was deleted.

30 changes: 0 additions & 30 deletions conf/experiment/gfno_test/pb_temp_mode_96_width_20.yaml

This file was deleted.

29 changes: 0 additions & 29 deletions conf/experiment/gfno_test/vel_cosine.yaml

This file was deleted.

Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
torch_dataset_name: temp_input_dataset
torch_dataset_name: temp_input_dataset

# torch distributed does not support complex parameters
distributed: False

train:
Expand All @@ -12,18 +11,21 @@ train:
push_forward_steps: 1
use_coords: True
noise: True
downsample_factor: 1
downsample_factor: 2

model:
model_name: gcnn
width: 32
reflection: False
model_name: factorized_fno
modes: 64
width: 256
dropout: 0.0
n_layers: 7
layer_norm: True

optimizer:
initial_lr: 1e-3
weight_decay: 1e-5
weight_decay: 0.01

lr_scheduler:
name: step
patience: 75
name: 'step'
factor: 0.5
patience: 75
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
torch_dataset_name: temp_input_dataset
torch_dataset_name: temp_input_dataset

# torch distributed does not support complex parameters
distributed: False

train:
Expand All @@ -12,19 +11,21 @@ train:
push_forward_steps: 1
use_coords: True
noise: True
downsample_factor: 2
downsample_factor: 2

model:
model_name: gfno
modes: 96
width: 8
reflection: False
model_name: factorized_fno
modes: 64
width: 256
dropout: 0.0
n_layers: 8
layer_norm: True

optimizer:
initial_lr: 1e-3
weight_decay: 1e-5
weight_decay: 0.01

lr_scheduler:
name: step
patience: 75
name: 'step'
factor: 0.5
patience: 75
Original file line number Diff line number Diff line change
Expand Up @@ -11,19 +11,22 @@ train:
push_forward_steps: 1
use_coords: True
noise: True
downsample_factor: 4
downsample_factor: 2

model:
model_name: fno
fmode_frac: [0.66, 0.66]
hidden_channels: 64
modes: [64, 64]
hidden_channels: 256
domain_padding: [0.1, 0.1]
n_layers: 4
norm: 'group_norm'
n_layers: 6
norm: 'instance_norm'
rank: 0.1

optimizer:
initial_lr: 1e-3
weight_decay: 1e-6
weight_decay: 0.01

lr_scheduler:
name: cosine
name: step
factor: 0.5
patience: 75
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,15 @@ train:
model:
model_name: gfno
modes: 64
width: 28
width: 128
reflection: False
domain_padding: 0.1

optimizer:
initial_lr: 1e-3
weight_decay: 1e-5
weight_decay: 0.01

lr_scheduler:
name: step
patience: 75
factor: 0.5
patience: 75
Original file line number Diff line number Diff line change
Expand Up @@ -12,19 +12,20 @@ train:
push_forward_steps: 1
use_coords: True
noise: True
downsample_factor: 2
downsample_factor: 4

model:
model_name: gfno
modes: 64
width: 16
width: 64
reflection: False
domain_padding: 0.0

optimizer:
initial_lr: 1e-3
weight_decay: 1e-5
weight_decay: 0.01

lr_scheduler:
name: step
patience: 75
factor: 0.5
patience: 75
Original file line number Diff line number Diff line change
Expand Up @@ -11,15 +11,17 @@ train:
push_forward_steps: 1
use_coords: False
noise: True
downsample_factor: 1
downsample_factor: 1

model:
model_name: unet_arena
hidden_channels: 32

optimizer:
initial_lr: 1e-3
weight_decay: 1e-6
weight_decay: 0.01

lr_scheduler:
name: cosine
name: step
factor: 0.5
patience: 75
Loading

0 comments on commit 7266019

Please sign in to comment.