Skip to content

Commit

Permalink
Pull request (Debug checkbox) from magic-research#28
Browse files Browse the repository at this point in the history
  • Loading branch information
andygock committed Dec 7, 2023
1 parent fe6e2fc commit b3f53e2
Show file tree
Hide file tree
Showing 3 changed files with 31 additions and 51 deletions.
22 changes: 11 additions & 11 deletions demo/animate.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ def __init__(self, config="configs/prompts/animation.yaml") -> None:

print("Initialization Done!")

def __call__(self, source_image, motion_sequence, random_seed, step, guidance_scale, size=512):
def __call__(self, source_image, motion_sequence, random_seed, step, guidance_scale, debug, size=512):
prompt = n_prompt = ""
random_seed = int(random_seed)
step = int(step)
Expand Down Expand Up @@ -171,17 +171,17 @@ def __call__(self, source_image, motion_sequence, random_seed, step, guidance_sc
source_image = source_image,
).videos

source_images = np.array([source_image] * original_length)
source_images = rearrange(torch.from_numpy(source_images), "t h w c -> 1 c t h w") / 255.0
samples_per_video.append(source_images)

control = control / 255.0
control = rearrange(control, "t h w c -> 1 c t h w")
control = torch.from_numpy(control)
samples_per_video.append(control[:, :, :original_length])
if debug:
source_images = np.array([source_image] * original_length)
source_images = rearrange(torch.from_numpy(source_images), "t h w c -> 1 c t h w") / 255.0
samples_per_video.append(source_images)

control = control / 255.0
control = rearrange(control, "t h w c -> 1 c t h w")
control = torch.from_numpy(control)
samples_per_video.append(control[:, :, :original_length])

samples_per_video.append(sample[:, :, :original_length])

samples_per_video = torch.cat(samples_per_video)

time_str = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S")
Expand All @@ -192,4 +192,4 @@ def __call__(self, source_image, motion_sequence, random_seed, step, guidance_sc
save_videos_grid(samples_per_video, animation_path)

return animation_path


9 changes: 5 additions & 4 deletions demo/gradio_animate.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@

animator = MagicAnimate()

def animate(reference_image, motion_sequence_state, seed, steps, guidance_scale):
return animator(reference_image, motion_sequence_state, seed, steps, guidance_scale)
def animate(reference_image, motion_sequence_state, seed, steps, guidance_scale, debug):
return animator(reference_image, motion_sequence_state, seed, steps, guidance_scale, debug)

with gr.Blocks() as demo:

Expand Down Expand Up @@ -49,6 +49,7 @@ def animate(reference_image, motion_sequence_state, seed, steps, guidance_scale)
random_seed = gr.Textbox(label="Random seed", value=1, info="default: -1")
sampling_steps = gr.Textbox(label="Sampling steps", value=25, info="default: 25")
guidance_scale = gr.Textbox(label="Guidance scale", value=7.5, info="default: 7.5")
debug = gr.Checkbox(label="Debug", value=True)
submit = gr.Button("Animate")

def read_video(video):
Expand All @@ -74,7 +75,7 @@ def read_image(image, size=512):
# when the `submit` button is clicked
submit.click(
animate,
[reference_image, motion_sequence, random_seed, sampling_steps, guidance_scale],
[reference_image, motion_sequence, random_seed, sampling_steps, guidance_scale, debug],
animation
)

Expand All @@ -94,4 +95,4 @@ def read_image(image, size=512):
)


demo.launch(share=False)
demo.launch(share=True)
51 changes: 15 additions & 36 deletions demo/gradio_animate_dist.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,31 +10,16 @@
# its affiliates is strictly prohibited.
import argparse
import imageio
import os, datetime
import numpy as np
import gradio as gr
from PIL import Image
from subprocess import PIPE, run

os.makedirs("./demo/tmp", exist_ok=True)
savedir = f"demo/outputs"
os.makedirs(savedir, exist_ok=True)
from demo.animate import MagicAnimate

def animate(reference_image, motion_sequence, seed, steps, guidance_scale):
time_str = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S")
animation_path = f"{savedir}/{time_str}.mp4"
save_path = "./demo/tmp/input_reference_image.png"
Image.fromarray(reference_image).save(save_path)
command = "python -m demo.animate_dist --reference_image {} --motion_sequence {} --random_seed {} --step {} --guidance_scale {} --save_path {}".format(
save_path,
motion_sequence,
seed,
steps,
guidance_scale,
animation_path
)
run(command, stdout=PIPE, stderr=PIPE, universal_newlines=True, shell=True)
return animation_path
animator = MagicAnimate()

def animate(reference_image, motion_sequence_state, seed, steps, guidance_scale, debug):
return animator(reference_image, motion_sequence_state, seed, steps, guidance_scale, debug)

with gr.Blocks() as demo:

Expand Down Expand Up @@ -64,23 +49,17 @@ def animate(reference_image, motion_sequence, seed, steps, guidance_scale):
random_seed = gr.Textbox(label="Random seed", value=1, info="default: -1")
sampling_steps = gr.Textbox(label="Sampling steps", value=25, info="default: 25")
guidance_scale = gr.Textbox(label="Guidance scale", value=7.5, info="default: 7.5")
debug = gr.Checkbox(label="Debug", value=True)
submit = gr.Button("Animate")

def read_video(video, size=512):
size = int(size)
def read_video(video):
reader = imageio.get_reader(video)
# fps = reader.get_meta_data()['fps']
frames = []
for img in reader:
frames.append(np.array(Image.fromarray(img).resize((size, size))))
save_path = "./demo/tmp/input_motion_sequence.mp4"
imageio.mimwrite(save_path, frames, fps=25)
return save_path
fps = reader.get_meta_data()['fps']
return video

def read_image(image, size=512):
img = np.array(Image.fromarray(image).resize((size, size)))
return img

return np.array(Image.fromarray(image).resize((size, size)))

# when user uploads a new video
motion_sequence.upload(
read_video,
Expand All @@ -96,15 +75,15 @@ def read_image(image, size=512):
# when the `submit` button is clicked
submit.click(
animate,
[reference_image, motion_sequence, random_seed, sampling_steps, guidance_scale],
[reference_image, motion_sequence, random_seed, sampling_steps, guidance_scale, debug],
animation
)

# Examples
gr.Markdown("## Examples")
gr.Examples(
examples=[
["inputs/applications/source_image/monalisa.png", "inputs/applications/driving/densepose/running.mp4"],
["inputs/applications/source_image/monalisa.png", "inputs/applications/driving/densepose/running.mp4"],
["inputs/applications/source_image/demo4.png", "inputs/applications/driving/densepose/demo4.mp4"],
["inputs/applications/source_image/0002.png", "inputs/applications/driving/densepose/demo4.mp4"],
["inputs/applications/source_image/dalle2.jpeg", "inputs/applications/driving/densepose/running2.mp4"],
Expand All @@ -115,5 +94,5 @@ def read_image(image, size=512):
outputs=animation,
)

demo.queue(max_size=10)
demo.launch(share=True)

demo.launch(share=True)

0 comments on commit b3f53e2

Please sign in to comment.