adapted example2

This commit is contained in:
Johannes Stelzer 2024-01-09 15:59:06 +01:00
parent 7d3fa4b3a0
commit 57e4de2d6c
1 changed files with 3 additions and 13 deletions

View File

@ -17,24 +17,20 @@ import torch
import warnings import warnings
from latent_blending import LatentBlending from latent_blending import LatentBlending
from diffusers_holder import DiffusersHolder from diffusers_holder import DiffusersHolder
from diffusers import DiffusionPipeline from diffusers import AutoPipelineForText2Image
from movie_util import concatenate_movies from movie_util import concatenate_movies
torch.set_grad_enabled(False) torch.set_grad_enabled(False)
torch.backends.cudnn.benchmark = False torch.backends.cudnn.benchmark = False
warnings.filterwarnings('ignore') warnings.filterwarnings('ignore')
# %% First let us spawn a stable diffusion holder. Uncomment your version of choice. # %% First let us spawn a stable diffusion holder. Uncomment your version of choice.
pretrained_model_name_or_path = "stabilityai/stable-diffusion-xl-base-1.0" pipe = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16")
pipe = DiffusionPipeline.from_pretrained(pretrained_model_name_or_path, torch_dtype=torch.float16)
pipe.to('cuda') pipe.to('cuda')
dh = DiffusersHolder(pipe) dh = DiffusersHolder(pipe)
# %% Let's setup the multi transition # %% Let's setup the multi transition
fps = 30 fps = 30
duration_single_trans = 20 duration_single_trans = 10
depth_strength = 0.25 # Specifies how deep (in terms of diffusion iterations the first branching happens)
size_output = (1280, 768)
num_inference_steps = 30
# Specify a list of prompts below # Specify a list of prompts below
list_prompts = [] list_prompts = []
@ -45,12 +41,8 @@ list_prompts.append("photo of a house, high detail")
# You can optionally specify the seeds # You can optionally specify the seeds
list_seeds = [95437579, 33259350, 956051013] list_seeds = [95437579, 33259350, 956051013]
t_compute_max_allowed = 20 # per segment
fp_movie = 'movie_example2.mp4' fp_movie = 'movie_example2.mp4'
lb = LatentBlending(dh) lb = LatentBlending(dh)
lb.set_dimensions(size_output)
lb.dh.set_num_inference_steps(num_inference_steps)
list_movie_parts = [] list_movie_parts = []
for i in range(len(list_prompts) - 1): for i in range(len(list_prompts) - 1):
@ -69,8 +61,6 @@ for i in range(len(list_prompts) - 1):
# Run latent blending # Run latent blending
lb.run_transition( lb.run_transition(
recycle_img1=recycle_img1, recycle_img1=recycle_img1,
depth_strength=depth_strength,
t_compute_max_allowed=t_compute_max_allowed,
fixed_seeds=fixed_seeds) fixed_seeds=fixed_seeds)
# Save movie # Save movie