diff --git a/datasets/cifar.py b/datasets/cifar.py index e241c55..7a402b5 100644 --- a/datasets/cifar.py +++ b/datasets/cifar.py @@ -7,10 +7,7 @@ class CIFAR10Instance(datasets.CIFAR10): """CIFAR10Instance Dataset. """ def __getitem__(self, index): - if self.train: - img, target = self.train_data[index], self.train_labels[index] - else: - img, target = self.test_data[index], self.test_labels[index] + img, target = self.data[index], self.targets[index] # doing this so that it is consistent with all other datasets # to return a PIL Image diff --git a/main.py b/main.py index d691467..b3cb57c 100644 --- a/main.py +++ b/main.py @@ -217,7 +217,7 @@ def train(train_loader, model, lemniscate, criterion, optimizer, epoch): # measure data loading time data_time.update(time.time() - end) - index = index.cuda(async=True) + index = index.cuda(non_blocking=True) # compute output feature = model(input) diff --git a/test.py b/test.py index bd9f945..3b85add 100644 --- a/test.py +++ b/test.py @@ -25,7 +25,7 @@ def NN(epoch, net, lemniscate, trainloader, testloader, recompute_memory=0): trainloader.dataset.transform = testloader.dataset.transform temploader = torch.utils.data.DataLoader(trainloader.dataset, batch_size=100, shuffle=False, num_workers=1) for batch_idx, (inputs, targets, indexes) in enumerate(temploader): - targets = targets.cuda(async=True) + targets = targets.cuda(non_blocking=True) batchSize = inputs.size(0) features = net(inputs) trainFeatures[:, batch_idx*batchSize:batch_idx*batchSize+batchSize] = features.data.t() @@ -35,7 +35,7 @@ def NN(epoch, net, lemniscate, trainloader, testloader, recompute_memory=0): end = time.time() with torch.no_grad(): for batch_idx, (inputs, targets, indexes) in enumerate(testloader): - targets = targets.cuda(async=True) + targets = targets.cuda(non_blocking=True) batchSize = inputs.size(0) features = net(inputs) net_time.update(time.time() - end) @@ -75,7 +75,7 @@ def kNN(epoch, net, lemniscate, trainloader, testloader, K, sigma, recompute_mem if hasattr(trainloader.dataset, 'imgs'): trainLabels = torch.LongTensor([y for (p, y) in trainloader.dataset.imgs]).cuda() else: - trainLabels = torch.LongTensor(trainloader.dataset.train_labels).cuda() + trainLabels = torch.LongTensor(trainloader.dataset.targets).cuda() C = trainLabels.max() + 1 if recompute_memory: @@ -83,11 +83,11 @@ def kNN(epoch, net, lemniscate, trainloader, testloader, K, sigma, recompute_mem trainloader.dataset.transform = testloader.dataset.transform temploader = torch.utils.data.DataLoader(trainloader.dataset, batch_size=100, shuffle=False, num_workers=1) for batch_idx, (inputs, targets, indexes) in enumerate(temploader): - targets = targets.cuda(async=True) + targets = targets.cuda(non_blocking=True) batchSize = inputs.size(0) features = net(inputs) trainFeatures[:, batch_idx*batchSize:batch_idx*batchSize+batchSize] = features.data.t() - trainLabels = torch.LongTensor(temploader.dataset.train_labels).cuda() + trainLabels = torch.LongTensor(temploader.dataset.targets).cuda() trainloader.dataset.transform = transform_bak top1 = 0. @@ -97,7 +97,7 @@ def kNN(epoch, net, lemniscate, trainloader, testloader, K, sigma, recompute_mem retrieval_one_hot = torch.zeros(K, C).cuda() for batch_idx, (inputs, targets, indexes) in enumerate(testloader): end = time.time() - targets = targets.cuda(async=True) + targets = targets.cuda(non_blocking=True) batchSize = inputs.size(0) features = net(inputs) net_time.update(time.time() - end) @@ -133,4 +133,3 @@ def kNN(epoch, net, lemniscate, trainloader, testloader, K, sigma, recompute_mem print(top1*100./total) return top1/total -