moved examples
This commit is contained in:
56
examples/multi_trans.py
Normal file
56
examples/multi_trans.py
Normal file
@@ -0,0 +1,56 @@
|
||||
import torch
|
||||
import warnings
|
||||
from diffusers import AutoPipelineForText2Image
|
||||
from latentblending.movie_util import concatenate_movies
|
||||
from latentblending.blending_engine import BlendingEngine
|
||||
from latentblending.diffusers_holder import DiffusersHolder
|
||||
torch.set_grad_enabled(False)
|
||||
torch.backends.cudnn.benchmark = False
|
||||
warnings.filterwarnings('ignore')
|
||||
|
||||
# %% First let us spawn a stable diffusion holder. Uncomment your version of choice.
|
||||
pipe = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16")
|
||||
pipe.to('cuda')
|
||||
dh = DiffusersHolder(pipe)
|
||||
|
||||
# %% Let's setup the multi transition
|
||||
fps = 30
|
||||
duration_single_trans = 10
|
||||
|
||||
# Specify a list of prompts below
|
||||
list_prompts = []
|
||||
list_prompts.append("Photo of a house, high detail")
|
||||
list_prompts.append("Photo of an elephant in african savannah")
|
||||
list_prompts.append("photo of a house, high detail")
|
||||
|
||||
|
||||
# You can optionally specify the seeds
|
||||
list_seeds = [95437579, 33259350, 956051013]
|
||||
fp_movie = 'movie_example2.mp4'
|
||||
be = BlendingEngine(dh)
|
||||
|
||||
list_movie_parts = []
|
||||
for i in range(len(list_prompts) - 1):
|
||||
# For a multi transition we can save some computation time and recycle the latents
|
||||
if i == 0:
|
||||
be.set_prompt1(list_prompts[i])
|
||||
be.set_prompt2(list_prompts[i + 1])
|
||||
recycle_img1 = False
|
||||
else:
|
||||
be.swap_forward()
|
||||
be.set_prompt2(list_prompts[i + 1])
|
||||
recycle_img1 = True
|
||||
|
||||
fp_movie_part = f"tmp_part_{str(i).zfill(3)}.mp4"
|
||||
fixed_seeds = list_seeds[i:i + 2]
|
||||
# Run latent blending
|
||||
be.run_transition(
|
||||
recycle_img1=recycle_img1,
|
||||
fixed_seeds=fixed_seeds)
|
||||
|
||||
# Save movie
|
||||
be.write_movie_transition(fp_movie_part, duration_single_trans)
|
||||
list_movie_parts.append(fp_movie_part)
|
||||
|
||||
# Finally, concatente the result
|
||||
concatenate_movies(fp_movie, list_movie_parts)
|
26
examples/single_trans.py
Normal file
26
examples/single_trans.py
Normal file
@@ -0,0 +1,26 @@
|
||||
import torch
|
||||
import warnings
|
||||
from diffusers import AutoPipelineForText2Image
|
||||
from latentblending.blending_engine import BlendingEngine
|
||||
from latentblending.diffusers_holder import DiffusersHolder
|
||||
|
||||
warnings.filterwarnings('ignore')
|
||||
torch.set_grad_enabled(False)
|
||||
torch.backends.cudnn.benchmark = False
|
||||
|
||||
# %% First let us spawn a stable diffusion holder. Uncomment your version of choice.
|
||||
pipe = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16")
|
||||
pipe.to("cuda")
|
||||
|
||||
dh = DiffusersHolder(pipe)
|
||||
|
||||
be = BlendingEngine(dh)
|
||||
be.set_prompt1("photo of underwater landscape, fish, und the sea, incredible detail, high resolution")
|
||||
be.set_prompt2("rendering of an alien planet, strange plants, strange creatures, surreal")
|
||||
be.set_negative_prompt("blurry, ugly, pale")
|
||||
|
||||
# Run latent blending
|
||||
be.run_transition()
|
||||
|
||||
# Save movie
|
||||
be.write_movie_transition('movie_example1.mp4', duration_transition=12)
|
Reference in New Issue
Block a user