-
Notifications
You must be signed in to change notification settings - Fork 1
/
main.py
135 lines (103 loc) · 5.01 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
import os
import random
import numpy as np
import torch
from glob import glob
import time
import torch
from torch.utils.data import DataLoader
import torch.nn as nn
from segmentation_models_pytorch import Unet
import numpy as np
import pandas as pd
from classes import Model_Training as MT
from classes import DataDrive as DD
from classes import loss_functions as LF
from classes import Unet as U
""" Seeding the randomness. """
def seeding(seed):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
""" Create a directory. """
def create_dir(path):
if not os.path.exists(path):
os.makedirs(path)
""" Calculate the time taken """
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
if __name__ == "__main__":
""" seeding"""
seeding(42)
"""Directories"""
create_dir("/home/surya/projects/Breast-Cancer-Segmentation-using-Deep-Learning/files")
create_dir("/home/surya/projects/Breast-Cancer-Segmentation-using-Deep-Learning/train_data")
"""Loading Data"""
train_x = sorted(glob("/home/surya/projects/Breast-Cancer-Segmentation-using-Deep-Learning/Datasets/new_dataset/train/image/*"))
train_y = sorted(glob("/home/surya/projects/Breast-Cancer-Segmentation-using-Deep-Learning/Datasets/new_dataset/train/mask/*"))
valid_x = sorted(glob("/home/surya/projects/Breast-Cancer-Segmentation-using-Deep-Learning/Datasets/new_dataset/test/image/*"))
valid_y = sorted(glob("/home/surya/projects/Breast-Cancer-Segmentation-using-Deep-Learning/Datasets/new_dataset/test/mask/*"))
data_ = f"Dataset size:\nTrain: {len(train_x)} - Valid: {len(valid_x)}\n"
print(data_)
"""Hyper parameters"""
size = (512, 512)
batch_size = 3
epochs = 20
learning_rate = 1e-4
"""Boolean to set whether the encder is trained or not"""
pre_trained = False
""" Dataset and loader"""
train_data = DD.DataDrive(train_x, train_y)
valid_data = DD.DataDrive(valid_x, valid_y)
train_loader = DataLoader(dataset=train_data, batch_size=batch_size, shuffle=True, num_workers=2)
valid_loader = DataLoader(dataset=valid_data, batch_size=batch_size, shuffle=False, num_workers=2)
encoders = ["None", "resnet50", "resnet101", "resnext50_32x4d", "resnext101_32x8d", "densenet121", "densenet201"]
#encoders = [ "densenet121", "densenet201"]
for encoder_name in encoders:
device = torch.device('cuda')
if pre_trained and encoder_name != "None":
model = Unet(encoder_name, encoder_weights="imagenet", classes=1, activation=None)
model_name = "Unet_" +"pre_trained_"+ encoder_name
elif pre_trained == False and encoder_name != "None":
model = Unet(encoder_name, encoder_weights=None, classes=1, activation=None)
model_name = "Unet_"+ encoder_name
else:
model = U.Unet()
model_name = "Unet_"+ encoder_name
checkpoint_path = "/home/surya/projects/Breast-Cancer-Segmentation-using-Deep-Learning/files/" + model_name +".pth"
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=5, verbose=True)
loss_function = LF.DiceBCELoss()
""" Training the model """
best_valid_loss = float("inf")
losses_values = np.array(["epoch", "Train", "Test"])
M = MT.Model_Training(model, train_loader, valid_loader, optimizer, device, loss_function)
for epoch in range(epochs):
start_time = time.time()
train_loss = M.train()
valid_loss = M.evaluate()
""" Saving the model """
if valid_loss < best_valid_loss:
data_str = f"Valid loss improved from {best_valid_loss:2.4f} to {valid_loss:2.4f}. Saving checkpoint: {checkpoint_path}"
print(data_str)
best_valid_loss = valid_loss
torch.save(model.state_dict(), checkpoint_path)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
data_str = f'Epoch: {epoch + 1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s\n'
data_str += f'\tTrain Loss: {train_loss:.3f}\n'
data_str += f'\t Val. Loss: {valid_loss:.3f}\n'
losses_values = np.vstack((losses_values, np.array([epoch, train_loss, valid_loss])))
print(data_str)
C = pd.Index(["Epoch", "Train", "Valid"], name="columns")
df = pd.DataFrame(data=losses_values, columns=C)
df.drop(index=df.index[0], axis=0, inplace=True)
csv_path = "/home/surya/projects/Breast-Cancer-Segmentation-using-Deep-Learning/train_data/" + model_name + ".csv"
df.to_csv(csv_path, index=False)