From 76475d890b128e33bdfda2eaddead0a1ade4f3fc Mon Sep 17 00:00:00 2001 From: lunar Date: Sat, 3 Dec 2022 11:18:23 +0000 Subject: [PATCH] cleanup --- example1_standard.py | 2 +- example2_inpaint.py | 30 ++++++++++++++++++------------ example3_multitrans.py | 16 ++++++++++------ 3 files changed, 29 insertions(+), 19 deletions(-) diff --git a/example1_standard.py b/example1_standard.py index 12df741..bc67c3e 100644 --- a/example1_standard.py +++ b/example1_standard.py @@ -40,7 +40,7 @@ sdh = StableDiffusionHolder(fp_ckpt, fp_config, device) #%% Next let's set up all parameters quality = 'medium' -deepth_strength = 0.65 +deepth_strength = 0.65 # Specifies how deep (in terms of diffusion iterations the first branching happens) fixed_seeds = [69731932, 504430820] prompt1 = "photo of a beautiful cherry forest covered in white flowers, ambient light, very detailed, magic" diff --git a/example2_inpaint.py b/example2_inpaint.py index 7e6fc2e..91e8855 100644 --- a/example2_inpaint.py +++ b/example2_inpaint.py @@ -35,23 +35,27 @@ torch.set_grad_enabled(False) #%% First let us spawn a stable diffusion holder device = "cuda" -deepth_strength = 0.65 fp_ckpt= "../stable_diffusion_models/ckpt/512-inpainting-ema.ckpt" fp_config = '../stablediffusion/configs//stable-diffusion/v2-inpainting-inference.yaml' - sdh = StableDiffusionHolder(fp_ckpt, fp_config, device) -#%% Let's make a source image and mask. -quality = 'low' +#%% Let's first make a source image and mask. +quality = 'medium' +deepth_strength = 0.65 #Specifies how deep (in terms of diffusion iterations the first branching happens) +duration_transition = 7 # In seconds +fps = 30 seed0 = 190791709 +# Spawn latent blending lb = LatentBlending(sdh) lb.autosetup_branching(quality=quality, deepth_strength=deepth_strength) prompt1 = "photo of a futuristic alien temple in a desert, mystic, glowing, organic, intricate, sci-fi movie, mesmerizing, scary" lb.set_prompt1(prompt1) lb.init_inpainting(init_empty=True) lb.set_seed(seed0) + +# Run diffusion list_latents = lb.run_diffusion(lb.text_embedding1) image_source = lb.sdh.latent2image(list_latents[-1]) @@ -60,25 +64,27 @@ mask_image[340:420, 170:280] = 0 mask_image = Image.fromarray(mask_image) -#%% Next let's set up all parameters +#%% Now let us compute a transition video with inpainting +# First inject back the latents that we already computed for our source image. lb.inject_latents(list_latents, inject_img1=True) +# Then setup the seeds. Keep the one from the first image fixed_seeds = [seed0, 6579436] - -prompt1 = "photo of a futuristic alien temple in a desert, mystic, glowing, organic, intricate, sci-fi movie, mesmerizing, scary" + +# Fix the prompts for the target prompt2 = "aerial photo of a futuristic alien temple in a blue coastal area, the sun is shining with a bright light" lb.set_prompt1(prompt1) lb.set_prompt2(prompt2) lb.init_inpainting(image_source, mask_image) + +# Run latent blending imgs_transition = lb.run_transition(recycle_img1=True, fixed_seeds=fixed_seeds) -#% let's get more cheap frames via linear interpolation -duration_transition = 3 -fps = 60 +# Let's get more cheap frames via linear interpolation (duration_transition*fps frames) imgs_transition_ext = add_frames_linear_interp(imgs_transition, duration_transition, fps) -# movie saving -fp_movie = "/home/lugo/git/latentblending/test.mp4" +# Save as MP4 +fp_movie = "movie_example2.mp4" if os.path.isfile(fp_movie): os.remove(fp_movie) ms = MovieSaver(fp_movie, fps=fps, shape_hw=[lb.height, lb.width]) diff --git a/example3_multitrans.py b/example3_multitrans.py index cb5772b..c87b3b7 100644 --- a/example3_multitrans.py +++ b/example3_multitrans.py @@ -31,20 +31,19 @@ from stable_diffusion_holder import StableDiffusionHolder torch.set_grad_enabled(False) #%% First let us spawn a stable diffusion holder -device = "cuda:0" +device = "cuda" fp_ckpt = "../stable_diffusion_models/ckpt/768-v-ema.ckpt" fp_config = '../stablediffusion/configs/stable-diffusion/v2-inference-v.yaml' sdh = StableDiffusionHolder(fp_ckpt, fp_config, device) -#%% MULTITRANS +#%% Let's setup the multi transition fps = 30 duration_single_trans = 15 -quality = 'high' -deepth_strength = 0.55 -lb = LatentBlending(sdh) -lb.autosetup_branching(quality=quality, deepth_strength=deepth_strength) +quality = 'medium' +deepth_strength = 0.55 #Specifies how deep (in terms of diffusion iterations the first branching happens) +# Specify a list of prompts below list_prompts = [] list_prompts.append("surrealistic statue made of glitter and dirt, standing in a lake, atmospheric light, strange glow") list_prompts.append("statue of a mix between a tree and human, made of marble, incredibly detailed") @@ -53,9 +52,14 @@ list_prompts.append("statue of a spider that looked like a human") list_prompts.append("statue of a bird that looked like a scorpion") list_prompts.append("statue of an ancient cybernetic messenger annoucing good news, golden, futuristic") +# You can optionally specify the seeds list_seeds = [954375479, 332539350, 956051013, 408831845, 250009012, 675588737] +lb = LatentBlending(sdh) +lb.autosetup_branching(quality=quality, deepth_strength=deepth_strength) + fp_movie = "movie_example3.mp4" + ms = MovieSaver(fp_movie, fps=fps) lb.run_multi_transition(