-
Notifications
You must be signed in to change notification settings - Fork 2
/
train_nusc_radar.py
129 lines (96 loc) · 3.44 KB
/
train_nusc_radar.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
from datetime import datetime
import os
import math
import time
import torch
import torch.nn as nn
import numpy as np
import utils
import random
from tqdm import tqdm
from tensorboardX import SummaryWriter
from torch.optim import lr_scheduler
from utils import PolynomialLRDecay
from dataloader.nusc_loader1 import NuScenesLoader, NuScenesLoader1
from loss import OrdinalRegressionLoss
from valid_loader import train_one_epoch, validation
# set arguments
BATCH_SIZE = 8
EPOCHS = 30
LR = 0.01
END_LR = 0.00001
POLY_POWER = 0.9
LR_PATIENCE = 10
MOMENTUM = 0.9
WEIGHT_DECAY = 0.0005
MAX_ITER = 300000
WORKERS = 3
SEED = 1984
PRINT_FREQ = 2500
SIZE = (350,800)
NSWEEPS = 5
RGB_ONLY = False
# min value (meter) for benchmark training set: 1.9766
# max value (meter) for benchmark training set: 90.4414
# min value (meter) for eigen training set: 0.704
# max value (meter) for eigen training set: 79.729
ORD_NUM = 80
GAMMA = 0.3
ALPHA = 1
BETA = 80
BETA = 80
# set random seed
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
np.random.seed(SEED)
random.seed(SEED)
# create output dir,
# _now = datetime.now().strftime('%Y_%m_%d_%H_%M_%S')n
# DIR_NAME = '{}_{}'.format('radar0_450_800',_now)
output_dir = os.path.join('./result','radar'.format(NSWEEPS, SIZE[0], SIZE[1]))
train_dir = os.path.join(output_dir, 'train')
val_dir = os.path.join(output_dir, 'valid')
logdir = os.path.join(output_dir, 'log')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if not os.path.exists(train_dir):
os.makedirs(train_dir)
if not os.path.exists(val_dir):
os.makedirs(val_dir)
if not os.path.exists(logdir):
os.makedirs(logdir)
print('OUTPUT_DIR = {}'.format(output_dir))
# set dataloader
DATA_ROOT = '/home/auto/work/data/sets/nuscenes'
train_set = NuScenesLoader(data_root=DATA_ROOT, mode='train', nsweeps=NSWEEPS)
val_set = NuScenesLoader1(data_root=DATA_ROOT)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=BATCH_SIZE, shuffle=True, num_workers=WORKERS)
val_loader = torch.utils.data.DataLoader(val_set, batch_size=1, shuffle=False, num_workers=2)
# create model
from model import rvmde
model = rvmde.RVMDE(input_size=SIZE, pretrained=True)
# data=torch.load('./paper_weights_rvmde.pth')
# model.load_state_dict(data)
# model=model.cuda()
#model = torch.load("/home/auto/work/RVMDE/paper_weights_rvmde.pth")
print('GPU number: {}'.format(torch.cuda.device_count()))
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# if GPU number > 1, then use multiple GPUs
#if torch.cuda.device_count() > 1:
# print("Let's use", torch.cuda.device_count(), "GPUs!")
# model = nn.DataParallel(model)
model.to(device)
# optimizer
optimizer = torch.optim.SGD(model.parameters(), lr=LR, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
scheduler = PolynomialLRDecay(optimizer, max_decay_steps=MAX_ITER, end_learning_rate=END_LR, power=POLY_POWER)
# loss function
ord_loss = OrdinalRegressionLoss(ord_num=ORD_NUM, beta=BETA)
logger = SummaryWriter(logdir)
epochbar = tqdm(total=EPOCHS)
for epoch in range(EPOCHS):
train_one_epoch(device, train_loader, model, train_dir, ord_loss, optimizer, epoch, logger, PRINT_FREQ, BETA=BETA, GAMMA=GAMMA, ORD_NUM=80.0, RGB_ONLY=RGB_ONLY)
validation(val_loader, model)
# save model and checkpoint per epoch
checkpoint_filename = os.path.join(output_dir, 'checkpoint-{}.pth.tar'.format(str(epoch)))
torch.save(model, checkpoint_filename)
epochbar.update(1)