Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

the worst result ! #4

Open
aligoglos opened this issue Jul 8, 2019 · 1 comment
Open

the worst result ! #4

aligoglos opened this issue Jul 8, 2019 · 1 comment

Comments

@aligoglos
Copy link

I tried to test your model with my images and wrote some code but result is awful !
can you check and say what is my mistake?

First i add dataset.py :

import os
import numpy as np
import skimage.io as io
import skimage.color as color

from torch.utils.data import DataLoader
import torch.utils.data as data
import torchvision.transforms as transforms


IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP']

def is_image_file(filename):
    return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)


def _get_paths_from_images(path):
    assert os.path.isdir(path), '{:s} is not a valid directory'.format(path)
    images = []
    for dirpath, _, fnames in sorted(os.walk(path)):
        for fname in sorted(fnames):
            if is_image_file(fname):
                img_path = os.path.join(dirpath, fname)
                images.append(img_path)
    assert images, '{:s} has no valid image file'.format(path)
    return images



def get_image_paths(dataroot):
    env, paths = None, None
    if dataroot is not None:
        paths = sorted(_get_paths_from_images(dataroot))
    return env, paths

def generate_loader(
    path, scale,
    batch_size=64, num_workers=1,
    shuffle=True, drop_last=False
):
    dataset = TestDataset(path, scale)

    return DataLoader(
        dataset,
        batch_size=batch_size, num_workers=num_workers,
        shuffle=shuffle, drop_last=drop_last
    )

class TestDataset(data.Dataset):
    def __init__(self, dirname, scale):
        super(TestDataset, self).__init__()
        self.name = dirname.split("/")[-1]
        self.scale = scale
        all_files = get_image_paths(dataroot=dirname)
        self.lr = all_files[1]
        self.transform = transforms.Compose([
            transforms.ToTensor()
        ])

    def __getitem__(self, index):
        filename = self.lr[index].split("\\")[-1]
        lr = io.imread(self.lr[index])
        if len(lr.shape) == 2:
            lr = color.gray2rgb(lr)
        return self.transform(lr), filename

    def __len__(self):
        return len(self.lr)

and then wrote tester class :

import torch
from dataset import generate_loader
import utility


class Tester:

    def __init__(self, args, my_model, ckp):
        self.args = args
        self.scale = args.scale
        self.ckp = ckp
        self.model = my_model
        self.data_path = args.dir_data
        self.error_last = 1e8
        del args, ckp, my_model

    def test_on_images(self):
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.model.to(device)
        loader = generate_loader(
            self.data_path,
            self.scale,
            batch_size=1, num_workers=1,
            shuffle=False, drop_last=False
        )
        self.model.eval()
        with torch.no_grad():
            for i, (LR, filename) in enumerate(loader):
                lr = LR.to(device, dtype=torch.float)
                sr = self.model(lr, self.scale)
                sr = utility.quantize(sr, self.args.rgb_range)
                save_list = [sr]
                self.ckp.save_results(filename, save_list, self.scale, 1)

and changed main.py :

if __name__ == '__main__':
    if checkpoint.ok:
        model = model.Model(args, checkpoint)
        t = Tester(args, model, checkpoint)
        t.test_on_images()
        checkpoint.done()

but results were very bad. when rgb_range = 255 result is whole black image.
and when rgb_range = 1 results are:

('1 jpg',)_x 1 _SR_1

('2 jpg',)_x 1 _SR_1

and i set scale = 4 but size not increased.

@yyknight
Copy link
Owner

Actually, this work is implemented for NTIRE 2019 Real-SR competition. The competition provided coupled LR and SR images in the same size. Therefore, we removed the upsampling module in our network.
About your question, it looks like your input images are normalized in (0,1), or somewhere normalized the image, while our model is trained with RGB_range = 255. So you cannot get good results with rgb_range=1. And you get the black image because all the pixels' value is set to (0,1) when rgb_range = 255

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

2 participants