Skip to content

Commit

Permalink
Merge pull request #96 from idealo/dev
Browse files Browse the repository at this point in the history
Dev
  • Loading branch information
datitran authored Jan 8, 2020
2 parents 5ee2a14 + 13a2eb8 commit d1b979f
Show file tree
Hide file tree
Showing 34 changed files with 685 additions and 805 deletions.
2 changes: 1 addition & 1 deletion .bumpversion.cfg
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
[bumpversion]
current_version = 2.1.1
current_version = 2.2.0
commit = False
tag = True

Expand Down
10 changes: 5 additions & 5 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,13 @@ git:
lfs_skip_smudge: true
language: python
python:
- 3.6
- 3.6
install:
- pip install flake8 -e ".[tests, docs]"
- pip install flake8 -e ".[tests, docs]"
script:
- flake8 . --count --show-source --statistics --select=E9,F63,F7,F82
- pytest -vs --cov=ISR --show-capture=no --disable-pytest-warnings tests/
- cd mkdocs && sh build_docs.sh
- flake8 . --count --show-source --statistics --select=E9,F63,F7,F82
- pytest -vs --cov=ISR --show-capture=no --disable-pytest-warnings tests/
- cd mkdocs && sh build_docs.sh
deploy:
provider: pages
skip_cleanup: true
Expand Down
2 changes: 1 addition & 1 deletion ISR/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
from . import assistant

__version__ = '2.1.1'
__version__ = '2.2.0'
24 changes: 13 additions & 11 deletions ISR/assistant.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
import os
import numpy as np
from importlib import import_module

import numpy as np

from ISR.utils.utils import setup, parse_args
from ISR.utils.logger import get_logger

Expand All @@ -13,37 +15,37 @@ def run(config_file, default=False, training=False, prediction=False):
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
logger = get_logger(__name__)
session_type, generator, conf, dataset = setup(config_file, default, training, prediction)

lr_patch_size = conf['session'][session_type]['patch_size']
scale = conf['generators'][generator]['x']

module = _get_module(generator)
gen = module.make_model(conf['generators'][generator], lr_patch_size)
if session_type == 'prediction':
from ISR.predict.predictor import Predictor

pr_h = Predictor(input_dir=conf['test_sets'][dataset])
pr_h.get_predictions(gen, conf['weights_paths']['generator'])

elif session_type == 'training':
from ISR.train.trainer import Trainer

hr_patch_size = lr_patch_size * scale
if conf['default']['feature_extractor']:
from ISR.models.cut_vgg19 import Cut_VGG19

out_layers = conf['feature_extractor']['vgg19']['layers_to_extract']
f_ext = Cut_VGG19(patch_size=hr_patch_size, layers_to_extract=out_layers)
else:
f_ext = None

if conf['default']['discriminator']:
from ISR.models.discriminator import Discriminator

discr = Discriminator(patch_size=hr_patch_size, kernel_size=3)
else:
discr = None

trainer = Trainer(
generator=gen,
discriminator=discr,
Expand Down Expand Up @@ -73,7 +75,7 @@ def run(config_file, default=False, training=False, prediction=False):
batch_size=conf['session'][session_type]['batch_size'],
monitored_metrics=conf['session'][session_type]['monitored_metrics'],
)

else:
logger.error('Invalid choice.')

Expand Down
9 changes: 5 additions & 4 deletions ISR/models/cut_vgg19.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from tensorflow.keras.models import Model
from tensorflow.keras.applications.vgg19 import VGG19

from ISR.utils.logger import get_logger


Expand All @@ -16,25 +17,25 @@ class Cut_VGG19:
Attributes:
loss_model: multi-output vgg architecture with <layers_to_extract> as output layers.
"""

def __init__(self, patch_size, layers_to_extract):
self.patch_size = patch_size
self.input_shape = (patch_size,) * 2 + (3,)
self.layers_to_extract = layers_to_extract
self.logger = get_logger(__name__)

if len(self.layers_to_extract) > 0:
self._cut_vgg()
else:
self.logger.error('Invalid VGG instantiation: extracted layer must be > 0')
raise ValueError('Invalid VGG instantiation: extracted layer must be > 0')

def _cut_vgg(self):
"""
Loads pre-trained VGG, declares as output the intermediate
layers selected by self.layers_to_extract.
"""

vgg = VGG19(weights='imagenet', include_top=False, input_shape=self.input_shape)
vgg.trainable = False
outputs = [vgg.layers[i].output for i in self.layers_to_extract]
Expand Down
16 changes: 8 additions & 8 deletions ISR/models/discriminator.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import tensorflow as tf
from tensorflow.keras.layers import concatenate, Flatten, Input, Activation, Dense, Conv2D, BatchNormalization, LeakyReLU
from tensorflow.keras.layers import Input, Activation, Dense, Conv2D, BatchNormalization, \
LeakyReLU
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam

Expand All @@ -22,7 +22,7 @@ class Discriminator:
conv block.
"""

def __init__(self, patch_size, kernel_size=3):
self.patch_size = patch_size
self.kernel_size = kernel_size
Expand All @@ -35,10 +35,10 @@ def __init__(self, patch_size, kernel_size=3):
self.model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
self.model._name = 'discriminator'
self.name = 'srgan-large'

def _conv_block(self, input, filters, strides, batch_norm=True, count=None):
""" Convolutional layer + Leaky ReLU + conditional BN. """

x = Conv2D(
filters,
kernel_size=self.kernel_size,
Expand All @@ -50,10 +50,10 @@ def _conv_block(self, input, filters, strides, batch_norm=True, count=None):
if batch_norm:
x = BatchNormalization(momentum=0.8)(x)
return x

def _build_disciminator(self):
""" Puts the discriminator's layers together. """

HR = Input(shape=(self.patch_size, self.patch_size, 3))
x = self._conv_block(HR, filters=64, strides=1, batch_norm=False, count=1)
for i in range(self.block_num):
Expand All @@ -68,6 +68,6 @@ def _build_disciminator(self):
# x = Flatten()(x)
x = Dense(1, name='Dense_last')(x)
HR_v_SR = Activation('sigmoid')(x)

discriminator = Model(inputs=HR, outputs=HR_v_SR)
return discriminator
13 changes: 7 additions & 6 deletions ISR/models/imagemodel.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import numpy as np

from ISR.utils.image_processing import (
process_array,
process_output,
Expand All @@ -12,7 +13,7 @@ class ImageModel:
Contains functions that are common across the super-scaling models.
"""

def predict(self, input_image_array, by_patch_of_size=None, batch_size=10, padding_size=2):
"""
Processes the image array into a suitable format
Expand All @@ -29,20 +30,20 @@ def predict(self, input_image_array, by_patch_of_size=None, batch_size=10, paddi
Returns:
sr_img: image output.
"""

if by_patch_of_size:
lr_img = process_array(input_image_array, expand=False)
patches, p_shape = split_image_into_overlapping_patches(
lr_img, patch_size=by_patch_of_size, padding_size=padding_size
)
# return patches
for i in range(0, len(patches), batch_size):
batch = self.model.predict(patches[i : i + batch_size])
batch = self.model.predict(patches[i: i + batch_size])
if i == 0:
collect = batch
else:
collect = np.append(collect, batch, axis=0)

scale = self.scale
padded_size_scaled = tuple(np.multiply(p_shape[0:2], scale)) + (3,)
scaled_image_shape = tuple(np.multiply(input_image_array.shape[0:2], scale)) + (3,)
Expand All @@ -52,10 +53,10 @@ def predict(self, input_image_array, by_patch_of_size=None, batch_size=10, paddi
target_shape=scaled_image_shape,
padding_size=padding_size * scale,
)

else:
lr_img = process_array(input_image_array)
sr_img = self.model.predict(lr_img)[0]

sr_img = process_output(sr_img)
return sr_img
81 changes: 61 additions & 20 deletions ISR/models/rdn.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,18 +2,50 @@
from tensorflow.keras.initializers import RandomUniform
from tensorflow.keras.layers import concatenate, Input, Activation, Add, Conv2D, Lambda, UpSampling2D
from tensorflow.keras.models import Model

from ISR.models.imagemodel import ImageModel

WEIGHTS_URLS = {
'psnr-large': {
'arch_params': {'C': 6, 'D': 20, 'G': 64, 'G0': 64, 'x': 2},
'url': 'https://public-asai-dl-models.s3.eu-central-1.amazonaws.com/ISR/rdn-C6-D20-G64-G064-x2/PSNR-driven/rdn-C6-D20-G64-G064-x2_PSNR_epoch086.hdf5',
'name': 'rdn-C6-D20-G64-G064-x2_PSNR_epoch086.hdf5'
},
'psnr-small': {
'arch_params': {'C': 3, 'D': 10, 'G': 64, 'G0': 64, 'x': 2},
'url': 'https://public-asai-dl-models.s3.eu-central-1.amazonaws.com/ISR/rdn-C3-D10-G64-G064-x2/PSNR-driven/rdn-C3-D10-G64-G064-x2_PSNR_epoch134.hdf5',
'name': 'rdn-C3-D10-G64-G064-x2_PSNR_epoch134.hdf5',
},
'noise-cancel': {
'arch_params': {'C': 6, 'D': 20, 'G': 64, 'G0': 64, 'x': 2},
'url': 'https://public-asai-dl-models.s3.eu-central-1.amazonaws.com/ISR/rdn-C6-D20-G64-G064-x2/ArtefactCancelling/rdn-C6-D20-G64-G064-x2_ArtefactCancelling_epoch219.hdf5',
'name': 'rdn-C6-D20-G64-G064-x2_ArtefactCancelling_epoch219.hdf5',
}
}


def make_model(arch_params, patch_size):
""" Returns the model.
Used to select the model.
"""

return RDN(arch_params, patch_size)


def get_network(weights):
if weights in WEIGHTS_URLS.keys():
arch_params = WEIGHTS_URLS[weights]['arch_params']
url = WEIGHTS_URLS[weights]['url']
name = WEIGHTS_URLS[weights]['name']
else:
raise ValueError('Available RDN network weights: {}'.format(list(WEIGHTS_URLS.keys())))
c_dim = 3
kernel_size = 3
upscaling = 'ups'
return arch_params, c_dim, kernel_size, upscaling, url, name


class RDN(ImageModel):
"""Implementation of the Residual Dense Network for image super-scaling.
Expand All @@ -28,6 +60,8 @@ class RDN(ImageModel):
upscaling: string, 'ups' or 'shuffle', determines which implementation
of the upscaling layer to use.
init_extreme_val: extreme values for the RandomUniform initializer.
weights: string, if not empty, download and load pre-trained weights.
Overrides other parameters.
Attributes:
C: integer, number of conv layer inside each residual dense blocks (RDB).
Expand All @@ -40,16 +74,20 @@ class RDN(ImageModel):
model._name: identifies this network as the generator network
in the compound model built by the trainer class.
"""

def __init__(
self,
arch_params={},
patch_size=None,
c_dim=3,
kernel_size=3,
upscaling='ups',
init_extreme_val=0.05,
self,
arch_params={},
patch_size=None,
c_dim=3,
kernel_size=3,
upscaling='ups',
init_extreme_val=0.05,
weights=''
):
if weights:
arch_params, c_dim, kernel_size, upscaling, url, fname = get_network(weights)

self.params = arch_params
self.C = self.params['C']
self.D = self.params['D']
Expand All @@ -66,10 +104,13 @@ def __init__(
self.model = self._build_rdn()
self.model._name = 'generator'
self.name = 'rdn'

if weights:
weights_path = tf.keras.utils.get_file(fname=fname, origin=url)
self.model.load_weights(weights_path)

def _upsampling_block(self, input_layer):
""" Upsampling block for old weights. """

x = Conv2D(
self.c_dim * self.scale ** 2,
kernel_size=3,
Expand All @@ -78,10 +119,10 @@ def _upsampling_block(self, input_layer):
kernel_initializer=self.initializer,
)(input_layer)
return UpSampling2D(size=self.scale, name='UPsample')(x)

def _pixel_shuffle(self, input_layer):
""" PixelShuffle implementation of the upscaling layer. """

x = Conv2D(
self.c_dim * self.scale ** 2,
kernel_size=3,
Expand All @@ -93,10 +134,10 @@ def _pixel_shuffle(self, input_layer):
lambda x: tf.nn.depth_to_space(x, block_size=self.scale, data_format='NHWC'),
name='PixelShuffle',
)(x)

def _UPN(self, input_layer):
""" Upscaling layers. With old weights use _upsampling_block instead of _pixel_shuffle. """

x = Conv2D(
64,
kernel_size=5,
Expand All @@ -116,7 +157,7 @@ def _UPN(self, input_layer):
return self._upsampling_block(x)
else:
raise ValueError('Invalid choice of upscaling layer.')

def _RDBs(self, input_layer):
"""RDBs blocks.
Expand Down Expand Up @@ -149,11 +190,11 @@ def _RDBs(self, input_layer):
# Local Residual Learning F_{i,LF} + F_{i-1}
rdb_in = Add(name='LRL_%d' % (d))([x, rdb_in])
rdb_concat.append(rdb_in)

assert len(rdb_concat) == self.D

return concatenate(rdb_concat, axis=3, name='LRLs_Concat')

def _build_rdn(self):
LR_input = Input(shape=(self.patch_size, self.patch_size, 3), name='LR')
F_m1 = Conv2D(
Expand Down Expand Up @@ -199,5 +240,5 @@ def _build_rdn(self):
kernel_initializer=self.initializer,
name='SR',
)(FU)

return Model(inputs=LR_input, outputs=SR)
Loading

0 comments on commit d1b979f

Please sign in to comment.