From 90d871f163b8da1316c3859a2d7bd8433f0aacbf Mon Sep 17 00:00:00 2001 From: DGX Date: Tue, 9 Jan 2024 17:13:16 +0000 Subject: [PATCH] reorganization --- latentblending/__init__.py | 4 ++ .../blending_engine.py | 6 +-- .../diffusers_holder.py | 2 +- .../example1_standard.py | 2 +- .../example2_multitrans.py | 19 +------ gradio_ui.py => latentblending/gradio_ui.py | 0 movie_util.py => latentblending/movie_util.py | 0 utils.py => latentblending/utils.py | 0 requirements.txt | 8 +-- setup.py | 19 +++++++ test_latentblending.py | 54 ------------------- 11 files changed, 34 insertions(+), 80 deletions(-) create mode 100644 latentblending/__init__.py rename latent_blending.py => latentblending/blending_engine.py (99%) rename diffusers_holder.py => latentblending/diffusers_holder.py (99%) rename example1_standard.py => latentblending/example1_standard.py (95%) rename example2_multitrans.py => latentblending/example2_multitrans.py (70%) rename gradio_ui.py => latentblending/gradio_ui.py (100%) rename movie_util.py => latentblending/movie_util.py (100%) rename utils.py => latentblending/utils.py (100%) create mode 100644 setup.py delete mode 100644 test_latentblending.py diff --git a/latentblending/__init__.py b/latentblending/__init__.py new file mode 100644 index 0000000..c681970 --- /dev/null +++ b/latentblending/__init__.py @@ -0,0 +1,4 @@ +from .blending_engine import BlendingEngine +from .diffusers_holder import DiffusersHolder +from .movie_utils import MovieSaver +from .utils import interpolate_spherical, add_frames_linear_interp, interpolate_linear, get_spacing, get_time, yml_load, yml_save diff --git a/latent_blending.py b/latentblending/blending_engine.py similarity index 99% rename from latent_blending.py rename to latentblending/blending_engine.py index 1ba5da1..034b173 100644 --- a/latent_blending.py +++ b/latentblending/blending_engine.py @@ -5,16 +5,16 @@ import warnings import time from tqdm.auto import tqdm from PIL import Image -from movie_util import MovieSaver +from latentblending.movie_util import MovieSaver from typing import List, Optional import lpips -from utils import interpolate_spherical, interpolate_linear, add_frames_linear_interp, yml_load, yml_save +from latentblending.utils import interpolate_spherical, interpolate_linear, add_frames_linear_interp, yml_load, yml_save warnings.filterwarnings('ignore') torch.backends.cudnn.benchmark = False torch.set_grad_enabled(False) -class LatentBlending(): +class BlendingEngine(): def __init__( self, dh: None, diff --git a/diffusers_holder.py b/latentblending/diffusers_holder.py similarity index 99% rename from diffusers_holder.py rename to latentblending/diffusers_holder.py index cd9f3f1..2f50950 100644 --- a/diffusers_holder.py +++ b/latentblending/diffusers_holder.py @@ -3,7 +3,7 @@ import numpy as np import warnings from typing import Any, Callable, Dict, List, Optional, Tuple, Union -from utils import interpolate_spherical +from latentblending.utils import interpolate_spherical from diffusers import DiffusionPipeline, StableDiffusionControlNetPipeline, ControlNetModel from diffusers.models.attention_processor import ( AttnProcessor2_0, diff --git a/example1_standard.py b/latentblending/example1_standard.py similarity index 95% rename from example1_standard.py rename to latentblending/example1_standard.py index 706b642..7b5bda2 100644 --- a/example1_standard.py +++ b/latentblending/example1_standard.py @@ -1,6 +1,6 @@ import torch import warnings -from latent_blending import LatentBlending +from blending_engine import BlendingEngine from diffusers_holder import DiffusersHolder from diffusers import AutoPipelineForText2Image diff --git a/example2_multitrans.py b/latentblending/example2_multitrans.py similarity index 70% rename from example2_multitrans.py rename to latentblending/example2_multitrans.py index 320923f..191b1de 100644 --- a/example2_multitrans.py +++ b/latentblending/example2_multitrans.py @@ -1,21 +1,6 @@ -# Copyright 2022 Lunar Ring. All rights reserved. -# Written by Johannes Stelzer, email stelzer@lunar-ring.ai twitter @j_stelzer -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - import torch import warnings -from latent_blending import LatentBlending +from blending_engine import BlendingEngine from diffusers_holder import DiffusersHolder from diffusers import AutoPipelineForText2Image from movie_util import concatenate_movies @@ -42,7 +27,7 @@ list_prompts.append("photo of a house, high detail") # You can optionally specify the seeds list_seeds = [95437579, 33259350, 956051013] fp_movie = 'movie_example2.mp4' -lb = LatentBlending(dh) +lb = BlendingEngine(dh) list_movie_parts = [] for i in range(len(list_prompts) - 1): diff --git a/gradio_ui.py b/latentblending/gradio_ui.py similarity index 100% rename from gradio_ui.py rename to latentblending/gradio_ui.py diff --git a/movie_util.py b/latentblending/movie_util.py similarity index 100% rename from movie_util.py rename to latentblending/movie_util.py diff --git a/utils.py b/latentblending/utils.py similarity index 100% rename from utils.py rename to latentblending/utils.py diff --git a/requirements.txt b/requirements.txt index 8d812e6..3f4291a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ lpips==0.1.4 -opencv-python==4.7.0.68 +opencv-python ffmpeg-python -diffusers["torch"]==0.23.0 -transformers==4.35.2 -pytest \ No newline at end of file +diffusers==0.25.0 +transformers +pytest diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..8268a93 --- /dev/null +++ b/setup.py @@ -0,0 +1,19 @@ +from setuptools import setup, find_packages + +# Read requirements.txt and store its contents in a list +with open('requirements.txt') as f: + required = f.read().splitlines() + +setup( + name='latentblending', + version='0.2', + url='https://github.com/lunarring/latentblending', + description='Butter-smooth video transitions', + long_description=open('README.md').read(), + install_requires=required, + dependency_links=[ + 'git+https://github.com/lunarring/lunar_tools#egg=lunar_tools' + ], + include_package_data=False, +) + diff --git a/test_latentblending.py b/test_latentblending.py deleted file mode 100644 index 78f1688..0000000 --- a/test_latentblending.py +++ /dev/null @@ -1,54 +0,0 @@ -import unittest -from latent_blending import LatentBlending -from diffusers_holder import DiffusersHolder -from diffusers import DiffusionPipeline -import torch - -default_pipe = "stabilityai/stable-diffusion-xl-base-1.0" - - -class TestDiffusersHolder(unittest.TestCase): - - def test_load_diffusers_holder(self): - pipe = DiffusionPipeline.from_pretrained(default_pipe, torch_dtype=torch.float16).to('cuda') - dh = DiffusersHolder(pipe) - self.assertIsNotNone(dh, "Failed to load DiffusersHolder") - - -class TestSingleImageGeneration(unittest.TestCase): - - def test_single_image_generation(self): - pipe = DiffusionPipeline.from_pretrained(default_pipe, torch_dtype=torch.float16).to('cuda') - dh = DiffusersHolder(pipe) - dh.set_dimensions((1024, 704)) - dh.set_num_inference_steps(40) - prompt = "Your prompt here" - text_embeddings = dh.get_text_embedding(prompt) - generator = torch.Generator(device=dh.device).manual_seed(int(420)) - latents_start = dh.get_noise() - list_latents_1 = dh.run_diffusion(text_embeddings, latents_start) - img_orig = dh.latent2image(list_latents_1[-1]) - self.assertIsNotNone(img_orig, "Failed to generate an image") - - -class TestImageTransition(unittest.TestCase): - - def test_image_transition(self): - pipe = DiffusionPipeline.from_pretrained(default_pipe, torch_dtype=torch.float16).to('cuda') - dh = DiffusersHolder(pipe) - lb = LatentBlending(dh) - - lb.set_prompt1('photo of my first prompt1') - lb.set_prompt2('photo of my second prompt') - depth_strength = 0.6 - t_compute_max_allowed = 10 - num_inference_steps = 30 - imgs_transition = lb.run_transition( - depth_strength=depth_strength, - num_inference_steps=num_inference_steps, - t_compute_max_allowed=t_compute_max_allowed) - - self.assertTrue(len(imgs_transition) > 0, "No transition images generated") - -if __name__ == '__main__': - unittest.main()