-
Notifications
You must be signed in to change notification settings - Fork 74
/
render_multiview_images.py
79 lines (65 loc) · 2.79 KB
/
render_multiview_images.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
import argparse
import math
import glob
import numpy as np
import sys
import os
import torch
from torchvision.utils import save_image
from tqdm import tqdm
import curriculums
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def show(tensor_img):
if len(tensor_img.shape) > 3:
tensor_img = tensor_img.squeeze(0)
tensor_img = tensor_img.permute(1, 2, 0).squeeze().cpu().numpy()
plt.imshow(tensor_img)
plt.show()
def generate_img(gen, z, **kwargs):
with torch.no_grad():
img, depth_map = generator.staged_forward(z, **kwargs)
tensor_img = img.detach()
img_min = img.min()
img_max = img.max()
img = (img - img_min)/(img_max-img_min)
img = img.permute(0, 2, 3, 1).squeeze().cpu().numpy()
return img, tensor_img, depth_map
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('path', type=str)
parser.add_argument('--seeds', nargs='+', default=[0, 1, 2])
parser.add_argument('--output_dir', type=str, default='imgs')
parser.add_argument('--max_batch_size', type=int, default=2400000)
parser.add_argument('--lock_view_dependence', action='store_true')
parser.add_argument('--image_size', type=int, default=256)
parser.add_argument('--ray_step_multiplier', type=int, default=2)
parser.add_argument('--curriculum', type=str, default='CelebA')
opt = parser.parse_args()
curriculum = getattr(curriculums, opt.curriculum)
curriculum['num_steps'] = curriculum[0]['num_steps'] * opt.ray_step_multiplier
curriculum['img_size'] = opt.image_size
curriculum['psi'] = 0.7
curriculum['v_stddev'] = 0
curriculum['h_stddev'] = 0
curriculum['lock_view_dependence'] = opt.lock_view_dependence
curriculum['last_back'] = curriculum.get('eval_last_back', False)
curriculum['nerf_noise'] = 0
curriculum = {key: value for key, value in curriculum.items() if type(key) is str}
os.makedirs(opt.output_dir, exist_ok=True)
generator = torch.load(opt.path, map_location=torch.device(device))
ema_file = opt.path.split('generator')[0] + 'ema.pth'
ema = torch.load(ema_file)
ema.copy_to(generator.parameters())
generator.set_device(device)
generator.eval()
face_angles = [-0.5, -0.25, 0., 0.25, 0.5]
face_angles = [a + curriculum['h_mean'] for a in face_angles]
for seed in tqdm(opt.seeds):
images = []
for i, yaw in enumerate(face_angles):
curriculum['h_mean'] = yaw
torch.manual_seed(seed)
z = torch.randn((1, 256), device=device)
img, tensor_img, depth_map = generate_img(generator, z, **curriculum)
images.append(tensor_img)
save_image(torch.cat(images), os.path.join(opt.output_dir, f'grid_{seed}.png'), normalize=True)