-
Notifications
You must be signed in to change notification settings - Fork 35
/
semantic_kitti_unet32_spherical_transformer.yaml
78 lines (72 loc) · 1.98 KB
/
semantic_kitti_unet32_spherical_transformer.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
DATA:
data_name: semantic_kitti
data_root: YOUR_DATA_ROOT/SemanticKITTI/dataset ## Fill in the data path
label_mapping: util/semantic-kitti.yaml
classes: 19
fea_dim: 6
voxel_size: [0.05, 0.05, 0.05]
voxel_max: 120000
TRAIN:
# arch
arch: unet_spherical_transformer
input_c: 4
m: 32
block_reps: 2
block_residual: True
layers: [32, 64, 128, 256, 256]
quant_size_scale: 24
patch_size: 1
window_size: 6
use_xyz: True
sync_bn: True # adopt sync_bn or not
rel_query: True
rel_key: True
rel_value: True
drop_path_rate: 0.3
max_batch_points: 1000000
class_weight: [ 3.1557, 8.7029, 7.8281, 6.1354, 6.3161, 7.9937, 8.9704,
10.1922, 1.6155, 4.2187, 1.9385, 5.5455, 2.0198, 2.6261, 1.3212,
5.1102, 2.5492, 5.8585, 7.3929]
xyz_norm: False
pc_range: [[-51.2, -51.2, -4], [51.2, 51.2, 2.4]]
window_size_sphere: [2, 2, 80]
window_size_scale: [2.0, 1.5]
sphere_layers: [1,2,3,4,5]
grad_checkpoint_layers: []
a: 0.0125
loss_name: ce_loss
use_tta: False
vote_num: 4
# training
aug: True
transformer_lr_scale: 0.1
scheduler_update: step
scheduler: Poly
power: 0.9
use_amp: True
train_gpu: [0,1,2,3]
workers: 16 # data loader workers
batch_size: 8 # batch size for training
batch_size_val: 8 # batch size for validation during training, memory and speed tradeoff
base_lr: 0.006
epochs: 50
start_epoch: 0
momentum: 0.9
weight_decay: 0.02
drop_rate: 0.5
ignore_label: 255
manual_seed: 123
print_freq: 10
save_freq: 1
save_path: runs/semantic_kitti_unet32_spherical_transformer
weight: # path to initial weight (default: none)
resume: # path to latest checkpoint (default: none)
evaluate: True # evaluate on validation set, extra gpu memory needed and small batch_size_val is recommend
eval_freq: 1
val: False
Distributed:
dist_url: tcp://127.0.0.1:6789
dist_backend: 'nccl'
multiprocessing_distributed: True
world_size: 1
rank: 0