forked from Acly/comfyui-inpaint-nodes
-
Notifications
You must be signed in to change notification settings - Fork 0
/
util.py
105 lines (80 loc) · 3.37 KB
/
util.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
from __future__ import annotations
import torch
import torch.nn.functional as F
import numpy as np
from torch import Tensor
def to_torch(image: Tensor, mask: Tensor | None = None):
if len(image.shape) == 3:
image = image.unsqueeze(0)
image = image.permute(0, 3, 1, 2) # BHWC -> BCHW
if mask is not None:
if len(mask.shape) == 3: # BHW -> B1HW
mask = mask.unsqueeze(1)
elif len(mask.shape) == 2: # HW -> B1HW
mask = mask.unsqueeze(0).unsqueeze(0)
if image.shape[2:] != mask.shape[2:]:
raise ValueError(
f"Image and mask must be the same size. {image.shape[2:]} != {mask.shape[2:]}"
)
return image, mask
def to_comfy(image: Tensor):
return image.permute(0, 2, 3, 1) # BCHW -> BHWC
# torch pad does not support padding greater than image size with "reflect" mode
def pad_reflect_once(x: Tensor, original_padding: tuple[int, int, int, int]):
_, _, h, w = x.shape
padding = np.array(original_padding)
size = np.array([w, w, h, h])
initial_padding = np.minimum(padding, size - 1)
additional_padding = padding - initial_padding
x = F.pad(x, tuple(initial_padding), mode="reflect")
if np.any(additional_padding > 0):
x = F.pad(x, tuple(additional_padding), mode="constant")
return x
def resize_square(image: Tensor, mask: Tensor, size: int):
_, _, h, w = image.shape
pad_w, pad_h, prev_size = 0, 0, w
if w == size and h == size:
return image, mask, (pad_w, pad_h, prev_size)
if w < h:
pad_w = h - w
prev_size = h
elif h < w:
pad_h = w - h
prev_size = w
image = pad_reflect_once(image, (0, pad_w, 0, pad_h))
mask = pad_reflect_once(mask, (0, pad_w, 0, pad_h))
if image.shape[-1] != size:
image = F.interpolate(image, size=size, mode="nearest-exact")
mask = F.interpolate(mask, size=size, mode="nearest-exact")
return image, mask, (pad_w, pad_h, prev_size)
def undo_resize_square(image: Tensor, original_size: tuple[int, int, int]):
_, _, h, w = image.shape
pad_w, pad_h, prev_size = original_size
if prev_size != w or prev_size != h:
image = F.interpolate(image, size=prev_size, mode="bilinear")
return image[:, :, 0 : prev_size - pad_h, 0 : prev_size - pad_w]
def _gaussian_kernel(radius: int, sigma: float):
x = torch.linspace(-radius, radius, steps=radius * 2 + 1)
pdf = torch.exp(-0.5 * (x / sigma).pow(2))
return pdf / pdf.sum()
def gaussian_blur(image: Tensor, radius: int, sigma: float = 0):
c = image.shape[-3]
if sigma <= 0:
sigma = 0.3 * (radius - 1) + 0.8
kernel = _gaussian_kernel(radius, sigma).to(image.device)
kernel_x = kernel[..., None, :].repeat(c, 1, 1).unsqueeze(1)
kernel_y = kernel[..., None].repeat(c, 1, 1).unsqueeze(1)
image = F.pad(image, (radius, radius, radius, radius), mode="reflect")
image = F.conv2d(image, kernel_x, groups=c)
image = F.conv2d(image, kernel_y, groups=c)
return image
def binary_erosion(mask: Tensor, radius: int):
kernel = torch.ones(1, 1, radius * 2 + 1, radius * 2 + 1, device=mask.device)
mask = F.pad(mask, (radius, radius, radius, radius), mode="constant", value=1)
mask = F.conv2d(mask, kernel, groups=1)
mask = (mask == kernel.numel()).to(mask.dtype)
return mask
def make_odd(x):
if x > 0 and x % 2 == 0:
return x + 1
return x