From e0793be6fac54af708358c928e0a6b0f2e2f6eda Mon Sep 17 00:00:00 2001 From: transpchan Date: Sat, 18 Feb 2023 19:59:50 +0800 Subject: [PATCH] add webui --- infer.sh | 16 ----- notebook.ipynb | 14 ++-- train.py | 10 +-- webui.py | 178 +++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 189 insertions(+), 29 deletions(-) delete mode 100644 infer.sh create mode 100644 webui.py diff --git a/infer.sh b/infer.sh deleted file mode 100644 index 3a4ecc8..0000000 --- a/infer.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -rm -r "./results" -mkdir "./results" - -#note that this version uses a different folder structure than CoNRv1 or CoNR on the Megvii github - -torchrun \ ---nproc_per_node=1 train.py --mode=test --dataloaders=2 \ ---test_input_poses_images=./poses/ \ ---test_input_person_images=./character_sheet/char1/ \ ---test_output_dir=./results/ \ ---test_checkpoint_dir=./weights/ - -echo Generating Video... -ffmpeg -r 30 -y -i ./results/%d.png -r 30 -c:v libx264 output.mp4 -r 30 -echo DONE. \ No newline at end of file diff --git a/notebook.ipynb b/notebook.ipynb index 0c6309c..d3fe4b5 100644 --- a/notebook.ipynb +++ b/notebook.ipynb @@ -51,8 +51,8 @@ "#@markdown You may also replace the link to the latest weight on https://github.com/transpchan/Live3D-v2/releases.\n", "\n", "!mkdir weights\n", - "#!wget https://github.com/transpchan/Live3D-v2/releases/download/checkpoints-2.1/checkpoints.zip\n", - "!wget https://github.com/transpchan/Live3D-v2/releases/download/checkpoints-2.2/checkpoints.zip\n", + "#!curl -O -J -L https://github.com/transpchan/Live3D-v2/releases/download/checkpoints-2.1/checkpoints.zip\n", + "!curl -O -J -L https://github.com/transpchan/Live3D-v2/releases/download/checkpoints-2.2/checkpoints.zip\n", "!unzip checkpoints.zip -d ./weights/" ] }, @@ -106,18 +106,18 @@ "!rm -r poses/\n", "!mkdir character_sheet/\n", "if character == 'short_hair':\n", - " !wget https://github.com/transpchan/Live3D-v2/releases/download/samples/short_hair_images.zip\n", + " !curl -O -J -L https://github.com/transpchan/Live3D-v2/releases/download/samples/short_hair_images.zip\n", " !unzip -j short_hair_images.zip -x '__MACOSX/*' -d character_sheet/character/ \n", "elif character == 'double_ponytail':\n", - " !wget https://github.com/transpchan/Live3D-v2/releases/download/samples/double_ponytail_images.zip\n", + " !curl -O -J -L https://github.com/transpchan/Live3D-v2/releases/download/samples/double_ponytail_images.zip\n", " !unzip -j double_ponytail_images.zip -x '__MACOSX/*' -d character_sheet/character/\n", "else:\n", " print(\"Please upload your character sheets to /content/CoNR/character_sheet/ \")\n", "if character == 'short_hair':\n", - " !wget https://github.com/transpchan/Live3D-v2/releases/download/samples/short_hair.zip\n", + " !curl -O -J -L https://github.com/transpchan/Live3D-v2/releases/download/samples/short_hair.zip\n", " !unzip -j short_hair.zip -d poses/\n", "elif character == 'double_ponytail':\n", - " !wget https://github.com/transpchan/Live3D-v2/releases/download/samples/double_ponytail.zip\n", + " !curl -O -J -L https://github.com/transpchan/Live3D-v2/releases/download/samples/double_ponytail.zip\n", " !unzip -j double_ponytail.zip -d poses/ \n", "else:\n", " print(\"Please upload your UDP sequences or poses images to /content/CoNR/poses/ .\")" @@ -152,7 +152,6 @@ "!pip install open3d\n", "!mkdir results\n", "!python3 train.py --mode=test \\\n", - "--dataloaders=2 \\\n", "--test_input_poses_images=./character_sheet/character/ \\\n", "--test_input_person_images=./character_sheet/ \\\n", "--test_output_dir=./results/ \\\n", @@ -288,7 +287,6 @@ "#@markdown For sample data, this process may take about 40 minutes. You can stop earlier to get an shorter result (by clicking stop on the left).\n", "!mkdir results\n", "!python3 train.py --mode=test \\\n", - "--dataloaders=2 \\\n", "--test_input_poses_images=./poses/ \\\n", "--test_input_person_images=./character_sheet/ \\\n", "--test_output_dir=./results/ \\\n", diff --git a/train.py b/train.py index fdc37ad..8f1a094 100644 --- a/train.py +++ b/train.py @@ -157,23 +157,23 @@ def build_args(): parser.add_argument('--batch_size', type=int, default=1, help='minibatch size') - parser.add_argument('--dataloaders', type=int, default=2, + parser.add_argument('--dataloaders', type=int, default=0, help='Num of dataloaders') parser.add_argument('--mode', default="test", choices=['train', 'test'], help='Training mode or Testing mode') parser.add_argument('--test_input_person_images', - type=str, default="./test_data/test_images/", + type=str, default="./character_sheet/", help='Directory to input character sheets') parser.add_argument('--test_input_poses_images', type=str, - default="./test_data/test_poses_images/", + default="./poses/", help='Directory to input UDP sequences or pose images') parser.add_argument('--test_checkpoint_dir', type=str, - default=None, + default="./weights/", help='Directory to model weights') parser.add_argument('--test_output_dir', type=str, - default="./saved_models/resu2/images/test/", + default="./results/", help='Directory to output images') parser.add_argument('--test_rnn_iterate_on_last_frames', type=int, default=0) diff --git a/webui.py b/webui.py new file mode 100644 index 0000000..f0a1336 --- /dev/null +++ b/webui.py @@ -0,0 +1,178 @@ +# Copyright 2023 (c) Live3D-v2 transpchan. + +# Licensed under the GPL License, Version 3.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.gnu.org/licenses/gpl-3.0.html +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Do NOT remove copyright notice when redistributing or reusing this GUI for other purposes. + +import gradio as gr +import os +import sys +import shutil +from pathlib import Path +import subprocess +if __name__ == "__main__": + langdata = {"NO_CHAR_SHEET": ["## Please select at least two character design sheets", "## 请选择至少两张角色设计图"], + "NO_UDP": ["## MMD2UDP plugin fails to run, check if your .vmd or .pmx files are correct.", "## MMD2UDP插件运行失败,请检查您的.vmd或.pmx文件是否正确"], + "NO_PMX": ["## Please select a .zip file containing PMX mesh", "## 请选择一个包含 PMX模型 的.zip 文件"], + "NO_MOTION": ["## Please select a MMD motion file .vmd", "## 请选择一个MMD动作文件.vmd"], + "NO_CAMERA": ["## Please select a MMD camera file .vmd", "## 请选择一个MMD相机文件.vmd"], + "MMD2UDP_FAIL": ["## MMD2UDP plugin fails to run, check if your .vmd or .pmx files are correct.", "## MMD2UDP插件运行失败,请检查您的.vmd或.pmx文件是否正确"], + "GEN_FAIL": ["## An error occurs during video generation process. Please post the full command line to https://github.com/transpchan/Live3D-v2/issues.", "## 在视频生成过程中发生错误。请将完整的命令行发布到https://github.com/transpchan/Live3D-v2/issues."], + "CONV_FAIL": ["## An error occurs during video conversion process. Please post the full command line to https://github.com/transpchan/Live3D-v2/issues.", "## 在视频转换过程中发生错误。请将完整的命令行发布到 https://github.com/transpchan/Live3D-v2/issues."], + "DONE": ["## Done! There is also a output_adobe_premiere.mov with transparent background in the folder. Please include our link github.com/transpchan/Live3D-v2 when sharing the video.", "## 完成!文件夹中还有一个output_adobe_premiere.mov,是透明背景的视频。根据中国大陆的AIGC相关法律法规,本程序生成视频需要在醒目位置增加水印。使用者检查生成结果并对其负责"], + + "SELECT_CHAR_SHEET": ["Select at least two .PNG files with transparent background", "选择至少两张透明背景的.PNG文件"], + + "SELECT_PMX": ["Select a .zip file containing a MMD mesh (.pmx) file and texture files", "选择一个包含MMD模型(.pmx)文件和纹理文件的.zip文件"], + "SELECT_MOTION": ["Select a MMD motion file .vmd", "选择一个MMD动作文件.vmd"], + "SELECT_CAMERA": ["Select a MMD camera file .vmd", "选择一个MMD相机文件.vmd"], + "RUN": ["Run", "运行"], + "EXAMPLE": ["### Example (select to run)", "### 例子(点击运行)"], + "UDP_POINTCLOUD": ["UDP Pointcloud Generation", "UDP点云生成"], + "UDP_POINTCLOUD_DESC": ["## UDP Pointcloud Generation from set of character design images", "## 从一组角色设计图生成UDP点云"], + "VIDEO_GEN":["Video Generation", "视频生成"], + "VIDEO_GEN_DESC": ["## Animate your hand-drawn character with a MikuMikuDance-covnerted UDP sequence", "## 使用MikuMikuDance转换的UDP序列为手绘角色添加动作"], + "COLORIZE_MESH": ["Colorize Mesh", "模型上色"], + "COLORIZE_MESH_DESC": ["## Colorize mesh (change skin) using a set of character design sheets. ", "## 使用一组角色设计图为模型上色(更改皮肤)。"], + } + + if len(sys.argv) > 1 and sys.argv[1] == "zh": + languageid = 1 + else: + languageid = 0 + Lang = {} + for each in langdata: + Lang[each] = langdata[each][languageid] + + def conr_fn(examples=None, character_sheets=None, mmd_pmx=None, mmd_motion=None, mmd_camera=None): + if (examples is None or examples == "") and (character_sheets is None or len(character_sheets) < 2): + return Lang["NO_CHAR_SHEET"], None + + if Path("character_sheet").exists(): + shutil.rmtree(Path("character_sheet")) + if Path("./MMD2UDP/output").exists(): + shutil.rmtree(Path("./MMD2UDP/output")) + if Path("results").exists(): + shutil.rmtree(Path("results")) + Path("character_sheet").mkdir(parents=True, exist_ok=True) + Path("./MMD2UDP/output").mkdir(parents=True, exist_ok=True) + Path("results").mkdir(parents=True, exist_ok=True) + if examples is not None and examples != "": + poses = Path(examples) + character = Path(examples+"_images") + + else: + for i, e in enumerate(character_sheets): + with open(f"character_sheet/{i}.png", "wb") as f: + e.seek(0) + f.write(e.read()) + e.seek(0) + poses = Path("./MMD2UDP/output") + character = Path("./character_sheet") + if mmd_pmx is not None: + with open("./MMD2UDP/model.zip", "wb") as f: + mmd_pmx.seek(0) + f.write(mmd_pmx.read()) + mmd_pmx.seek(0) + else: + return Lang["NO_PMX"], None + if mmd_motion is not None: + with open("./MMD2UDP/motion.vmd", "wb") as f: + mmd_motion.seek(0) + f.write(mmd_motion.read()) + mmd_motion.seek(0) + else: + return Lang["NO_MOTION"], None + if mmd_camera is not None: + with open("./MMD2UDP/camera.vmd", "wb") as f: + mmd_camera.seek(0) + f.write(mmd_camera.read()) + mmd_camera.seek(0) + cwd = os.getcwd() + os.chdir("MMD2UDP") + if os.name == "nt": + os.system("UltraDensePose.exe") + else: + os.system("udp") + os.chdir(cwd) + if len(list(poses.glob("*"))) == 0: + return Lang["NO_UDP"], None + + else: + ret = os.system( + "python train.py --test_input_poses_images={} --test_input_person_images={}".format(poses, character)) + + if ret != 0: + return Lang["GEN_FAIL"], None + torun = 'ffmpeg -r 30 -y -i ./results/%d.png -i watermark.png -filter_complex "overlay=x=(main_w-overlay_w)/2:y=(overlay_h)/2" -c:v libx264 -strict -2 -pix_fmt yuv420p output.mp4' + + ret = os.system(torun) + if ret != 0: + return Lang["CONV_FAIL"], None + + torun = 'ffmpeg -r 30 -y -i ./results/%d.png -i watermark.png -filter_complex "overlay=x=(main_w-overlay_w)/2:y=(overlay_h)/2" -c:v qtrle output_adobe_premiere.mov' + ret = os.system(torun) + + return Lang["DONE"], "output.mp4" + + +with gr.Blocks(title="Live3D-v2") as ui: + gr.Markdown("## [Live3D-v2](https://github.com/transpchan/Live3D-v2)") + gr.Markdown( + " [Live3D-v2](https://github.com/transpchan/Live3D-v2) (Oct. 2022) is an incremental update to the MIT-Licenced CoNR (Jul. 2022). CoNR is also known as [Live3D-v1 public beta](https://github.com/transpchan/Live3D) (Sep. 2021). Credits: MMD2UDP plugin by [KurisuMakise004](https://github.com/KurisuMakise004/MMD2UDP). Drawings from MIT-Licenced CoNR. ") + + with gr.Tab(Lang["VIDEO_GEN"]): + ret = gr.Markdown(Lang["VIDEO_GEN_DESC"]) + + with gr.Row(): + with gr.Column(variant="panel"): + gr.Markdown(Lang["SELECT_CHAR_SHEET"]) + character_sheets = gr.File( + label="Character Design Sheet", file_count="multiple", file_types=[".png", ".webp"]) + + with gr.Column(variant="panel"): + + # string input + examples = gr.Text(label="UDP_Sequence", visible=False) + gr.Markdown(Lang["SELECT_PMX"]) + mmd_pmx = gr.UploadButton( + label="Zip (.zip)", file_count="single", file_types=[".zip"]) + gr.Markdown(Lang["SELECT_MOTION"]) + mmd_motion = gr.UploadButton( + label="MMD motion (.vmd)", file_count="single", file_types=[".vmd"]) + gr.Markdown(Lang["SELECT_CAMERA"]) + mmd_camera = gr.UploadButton( + label="MMD camera (.vmd)", file_count="single", file_types=[".vmd"]) + + # os.system("sh download.sh") + + with gr.Column(): + run = gr.Button(Lang["RUN"]) + video = gr.Video() + run.click(fn=conr_fn, inputs=[examples, + character_sheets, mmd_pmx, mmd_motion, mmd_camera], outputs=[ret, video]) + + with gr.Row(): + gr.Examples(fn=conr_fn, inputs=[examples], outputs=[ret, video], + examples=["short_hair", "double_ponytail"], cache_examples=False, examples_per_page=10, run_on_click=True, label=Lang["EXAMPLE"]) + + with gr.Tab(Lang["UDP_POINTCLOUD"]): + + gr.Markdown(Lang["UDP_POINTCLOUD_DESC"]) + gr.Markdown( + "## Not supported on Windows yet. Use the [Colab](https://colab.research.google.com/github/transpchan/Live3D-v2/blob/main/notebook.ipynb) instead.") + with gr.Tab(Lang["COLORIZE_MESH"]): + + gr.Markdown(Lang["COLORIZE_MESH_DESC"]) + gr.Markdown( + "## Not supported on Windows yet. This is usually for game/mod developers [1](https://www.bilibili.com/video/BV1ae4y1Y7ga/), [2](https://www.bilibili.com/video/BV1fB4y1V7ga), [3](https://www.youtube.com/watch?v=HK7dhP7UXzs).") + +ui.launch(inbrowser=True)