reorganization
This commit is contained in:
parent
145569519d
commit
90d871f163
|
@ -0,0 +1,4 @@
|
||||||
|
from .blending_engine import BlendingEngine
|
||||||
|
from .diffusers_holder import DiffusersHolder
|
||||||
|
from .movie_utils import MovieSaver
|
||||||
|
from .utils import interpolate_spherical, add_frames_linear_interp, interpolate_linear, get_spacing, get_time, yml_load, yml_save
|
|
@ -5,16 +5,16 @@ import warnings
|
||||||
import time
|
import time
|
||||||
from tqdm.auto import tqdm
|
from tqdm.auto import tqdm
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
from movie_util import MovieSaver
|
from latentblending.movie_util import MovieSaver
|
||||||
from typing import List, Optional
|
from typing import List, Optional
|
||||||
import lpips
|
import lpips
|
||||||
from utils import interpolate_spherical, interpolate_linear, add_frames_linear_interp, yml_load, yml_save
|
from latentblending.utils import interpolate_spherical, interpolate_linear, add_frames_linear_interp, yml_load, yml_save
|
||||||
warnings.filterwarnings('ignore')
|
warnings.filterwarnings('ignore')
|
||||||
torch.backends.cudnn.benchmark = False
|
torch.backends.cudnn.benchmark = False
|
||||||
torch.set_grad_enabled(False)
|
torch.set_grad_enabled(False)
|
||||||
|
|
||||||
|
|
||||||
class LatentBlending():
|
class BlendingEngine():
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
dh: None,
|
dh: None,
|
|
@ -3,7 +3,7 @@ import numpy as np
|
||||||
import warnings
|
import warnings
|
||||||
|
|
||||||
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
||||||
from utils import interpolate_spherical
|
from latentblending.utils import interpolate_spherical
|
||||||
from diffusers import DiffusionPipeline, StableDiffusionControlNetPipeline, ControlNetModel
|
from diffusers import DiffusionPipeline, StableDiffusionControlNetPipeline, ControlNetModel
|
||||||
from diffusers.models.attention_processor import (
|
from diffusers.models.attention_processor import (
|
||||||
AttnProcessor2_0,
|
AttnProcessor2_0,
|
|
@ -1,6 +1,6 @@
|
||||||
import torch
|
import torch
|
||||||
import warnings
|
import warnings
|
||||||
from latent_blending import LatentBlending
|
from blending_engine import BlendingEngine
|
||||||
from diffusers_holder import DiffusersHolder
|
from diffusers_holder import DiffusersHolder
|
||||||
from diffusers import AutoPipelineForText2Image
|
from diffusers import AutoPipelineForText2Image
|
||||||
|
|
|
@ -1,21 +1,6 @@
|
||||||
# Copyright 2022 Lunar Ring. All rights reserved.
|
|
||||||
# Written by Johannes Stelzer, email stelzer@lunar-ring.ai twitter @j_stelzer
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
import warnings
|
import warnings
|
||||||
from latent_blending import LatentBlending
|
from blending_engine import BlendingEngine
|
||||||
from diffusers_holder import DiffusersHolder
|
from diffusers_holder import DiffusersHolder
|
||||||
from diffusers import AutoPipelineForText2Image
|
from diffusers import AutoPipelineForText2Image
|
||||||
from movie_util import concatenate_movies
|
from movie_util import concatenate_movies
|
||||||
|
@ -42,7 +27,7 @@ list_prompts.append("photo of a house, high detail")
|
||||||
# You can optionally specify the seeds
|
# You can optionally specify the seeds
|
||||||
list_seeds = [95437579, 33259350, 956051013]
|
list_seeds = [95437579, 33259350, 956051013]
|
||||||
fp_movie = 'movie_example2.mp4'
|
fp_movie = 'movie_example2.mp4'
|
||||||
lb = LatentBlending(dh)
|
lb = BlendingEngine(dh)
|
||||||
|
|
||||||
list_movie_parts = []
|
list_movie_parts = []
|
||||||
for i in range(len(list_prompts) - 1):
|
for i in range(len(list_prompts) - 1):
|
|
@ -1,6 +1,6 @@
|
||||||
lpips==0.1.4
|
lpips==0.1.4
|
||||||
opencv-python==4.7.0.68
|
opencv-python
|
||||||
ffmpeg-python
|
ffmpeg-python
|
||||||
diffusers["torch"]==0.23.0
|
diffusers==0.25.0
|
||||||
transformers==4.35.2
|
transformers
|
||||||
pytest
|
pytest
|
|
@ -0,0 +1,19 @@
|
||||||
|
from setuptools import setup, find_packages
|
||||||
|
|
||||||
|
# Read requirements.txt and store its contents in a list
|
||||||
|
with open('requirements.txt') as f:
|
||||||
|
required = f.read().splitlines()
|
||||||
|
|
||||||
|
setup(
|
||||||
|
name='latentblending',
|
||||||
|
version='0.2',
|
||||||
|
url='https://github.com/lunarring/latentblending',
|
||||||
|
description='Butter-smooth video transitions',
|
||||||
|
long_description=open('README.md').read(),
|
||||||
|
install_requires=required,
|
||||||
|
dependency_links=[
|
||||||
|
'git+https://github.com/lunarring/lunar_tools#egg=lunar_tools'
|
||||||
|
],
|
||||||
|
include_package_data=False,
|
||||||
|
)
|
||||||
|
|
|
@ -1,54 +0,0 @@
|
||||||
import unittest
|
|
||||||
from latent_blending import LatentBlending
|
|
||||||
from diffusers_holder import DiffusersHolder
|
|
||||||
from diffusers import DiffusionPipeline
|
|
||||||
import torch
|
|
||||||
|
|
||||||
default_pipe = "stabilityai/stable-diffusion-xl-base-1.0"
|
|
||||||
|
|
||||||
|
|
||||||
class TestDiffusersHolder(unittest.TestCase):
|
|
||||||
|
|
||||||
def test_load_diffusers_holder(self):
|
|
||||||
pipe = DiffusionPipeline.from_pretrained(default_pipe, torch_dtype=torch.float16).to('cuda')
|
|
||||||
dh = DiffusersHolder(pipe)
|
|
||||||
self.assertIsNotNone(dh, "Failed to load DiffusersHolder")
|
|
||||||
|
|
||||||
|
|
||||||
class TestSingleImageGeneration(unittest.TestCase):
|
|
||||||
|
|
||||||
def test_single_image_generation(self):
|
|
||||||
pipe = DiffusionPipeline.from_pretrained(default_pipe, torch_dtype=torch.float16).to('cuda')
|
|
||||||
dh = DiffusersHolder(pipe)
|
|
||||||
dh.set_dimensions((1024, 704))
|
|
||||||
dh.set_num_inference_steps(40)
|
|
||||||
prompt = "Your prompt here"
|
|
||||||
text_embeddings = dh.get_text_embedding(prompt)
|
|
||||||
generator = torch.Generator(device=dh.device).manual_seed(int(420))
|
|
||||||
latents_start = dh.get_noise()
|
|
||||||
list_latents_1 = dh.run_diffusion(text_embeddings, latents_start)
|
|
||||||
img_orig = dh.latent2image(list_latents_1[-1])
|
|
||||||
self.assertIsNotNone(img_orig, "Failed to generate an image")
|
|
||||||
|
|
||||||
|
|
||||||
class TestImageTransition(unittest.TestCase):
|
|
||||||
|
|
||||||
def test_image_transition(self):
|
|
||||||
pipe = DiffusionPipeline.from_pretrained(default_pipe, torch_dtype=torch.float16).to('cuda')
|
|
||||||
dh = DiffusersHolder(pipe)
|
|
||||||
lb = LatentBlending(dh)
|
|
||||||
|
|
||||||
lb.set_prompt1('photo of my first prompt1')
|
|
||||||
lb.set_prompt2('photo of my second prompt')
|
|
||||||
depth_strength = 0.6
|
|
||||||
t_compute_max_allowed = 10
|
|
||||||
num_inference_steps = 30
|
|
||||||
imgs_transition = lb.run_transition(
|
|
||||||
depth_strength=depth_strength,
|
|
||||||
num_inference_steps=num_inference_steps,
|
|
||||||
t_compute_max_allowed=t_compute_max_allowed)
|
|
||||||
|
|
||||||
self.assertTrue(len(imgs_transition) > 0, "No transition images generated")
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
unittest.main()
|
|
Loading…
Reference in New Issue