diff --git a/example1_standard.py b/example1_standard.py index eac5bfb..b8500d4 100644 --- a/example1_standard.py +++ b/example1_standard.py @@ -29,33 +29,14 @@ pipe = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo", torch pipe.to("cuda") dh = DiffusersHolder(pipe) -# %% Next let's set up all parameters -depth_strength = 0.55 # Specifies how deep (in terms of diffusion iterations the first branching happens) -t_compute_max_allowed = 10 # Determines the quality of the transition in terms of compute time you grant it -num_inference_steps = 4 -size_output = (1024, 1024) - -prompt1 = "underwater landscape, fish, und the sea, incredible detail, high resolution" -prompt2 = "rendering of an alien planet, strange plants, strange creatures, surreal" -negative_prompt = "blurry, ugly, pale" # Optional - -fp_movie = 'movie_example1.mp4' -duration_transition = 12 # In seconds - -# Spawn latent blending lb = LatentBlending(dh) -lb.set_prompt1(prompt1) -lb.set_prompt2(prompt2) -lb.set_dimensions(size_output) -lb.set_negative_prompt(negative_prompt) -lb.set_guidance_scale(0) +lb.set_prompt1("photo of underwater landscape, fish, und the sea, incredible detail, high resolution") +lb.set_prompt2("rendering of an alien planet, strange plants, strange creatures, surreal") +lb.set_negative_prompt("blurry, ugly, pale") # Run latent blending -lb.run_transition( - depth_strength=depth_strength, - num_inference_steps=num_inference_steps, - t_compute_max_allowed=t_compute_max_allowed) +lb.run_transition() # Save movie -lb.write_movie_transition(fp_movie, duration_transition) +lb.write_movie_transition('movie_example1.mp4', duration_transition=12)