cleanup
This commit is contained in:
parent
f914ad45e7
commit
76475d890b
|
@ -40,7 +40,7 @@ sdh = StableDiffusionHolder(fp_ckpt, fp_config, device)
|
||||||
|
|
||||||
#%% Next let's set up all parameters
|
#%% Next let's set up all parameters
|
||||||
quality = 'medium'
|
quality = 'medium'
|
||||||
deepth_strength = 0.65
|
deepth_strength = 0.65 # Specifies how deep (in terms of diffusion iterations the first branching happens)
|
||||||
fixed_seeds = [69731932, 504430820]
|
fixed_seeds = [69731932, 504430820]
|
||||||
|
|
||||||
prompt1 = "photo of a beautiful cherry forest covered in white flowers, ambient light, very detailed, magic"
|
prompt1 = "photo of a beautiful cherry forest covered in white flowers, ambient light, very detailed, magic"
|
||||||
|
|
|
@ -35,23 +35,27 @@ torch.set_grad_enabled(False)
|
||||||
|
|
||||||
#%% First let us spawn a stable diffusion holder
|
#%% First let us spawn a stable diffusion holder
|
||||||
device = "cuda"
|
device = "cuda"
|
||||||
deepth_strength = 0.65
|
|
||||||
fp_ckpt= "../stable_diffusion_models/ckpt/512-inpainting-ema.ckpt"
|
fp_ckpt= "../stable_diffusion_models/ckpt/512-inpainting-ema.ckpt"
|
||||||
fp_config = '../stablediffusion/configs//stable-diffusion/v2-inpainting-inference.yaml'
|
fp_config = '../stablediffusion/configs//stable-diffusion/v2-inpainting-inference.yaml'
|
||||||
|
|
||||||
sdh = StableDiffusionHolder(fp_ckpt, fp_config, device)
|
sdh = StableDiffusionHolder(fp_ckpt, fp_config, device)
|
||||||
|
|
||||||
|
|
||||||
#%% Let's make a source image and mask.
|
#%% Let's first make a source image and mask.
|
||||||
quality = 'low'
|
quality = 'medium'
|
||||||
|
deepth_strength = 0.65 #Specifies how deep (in terms of diffusion iterations the first branching happens)
|
||||||
|
duration_transition = 7 # In seconds
|
||||||
|
fps = 30
|
||||||
seed0 = 190791709
|
seed0 = 190791709
|
||||||
|
|
||||||
|
# Spawn latent blending
|
||||||
lb = LatentBlending(sdh)
|
lb = LatentBlending(sdh)
|
||||||
lb.autosetup_branching(quality=quality, deepth_strength=deepth_strength)
|
lb.autosetup_branching(quality=quality, deepth_strength=deepth_strength)
|
||||||
prompt1 = "photo of a futuristic alien temple in a desert, mystic, glowing, organic, intricate, sci-fi movie, mesmerizing, scary"
|
prompt1 = "photo of a futuristic alien temple in a desert, mystic, glowing, organic, intricate, sci-fi movie, mesmerizing, scary"
|
||||||
lb.set_prompt1(prompt1)
|
lb.set_prompt1(prompt1)
|
||||||
lb.init_inpainting(init_empty=True)
|
lb.init_inpainting(init_empty=True)
|
||||||
lb.set_seed(seed0)
|
lb.set_seed(seed0)
|
||||||
|
|
||||||
|
# Run diffusion
|
||||||
list_latents = lb.run_diffusion(lb.text_embedding1)
|
list_latents = lb.run_diffusion(lb.text_embedding1)
|
||||||
image_source = lb.sdh.latent2image(list_latents[-1])
|
image_source = lb.sdh.latent2image(list_latents[-1])
|
||||||
|
|
||||||
|
@ -60,25 +64,27 @@ mask_image[340:420, 170:280] = 0
|
||||||
mask_image = Image.fromarray(mask_image)
|
mask_image = Image.fromarray(mask_image)
|
||||||
|
|
||||||
|
|
||||||
#%% Next let's set up all parameters
|
#%% Now let us compute a transition video with inpainting
|
||||||
|
# First inject back the latents that we already computed for our source image.
|
||||||
lb.inject_latents(list_latents, inject_img1=True)
|
lb.inject_latents(list_latents, inject_img1=True)
|
||||||
|
|
||||||
|
# Then setup the seeds. Keep the one from the first image
|
||||||
fixed_seeds = [seed0, 6579436]
|
fixed_seeds = [seed0, 6579436]
|
||||||
|
|
||||||
prompt1 = "photo of a futuristic alien temple in a desert, mystic, glowing, organic, intricate, sci-fi movie, mesmerizing, scary"
|
# Fix the prompts for the target
|
||||||
prompt2 = "aerial photo of a futuristic alien temple in a blue coastal area, the sun is shining with a bright light"
|
prompt2 = "aerial photo of a futuristic alien temple in a blue coastal area, the sun is shining with a bright light"
|
||||||
lb.set_prompt1(prompt1)
|
lb.set_prompt1(prompt1)
|
||||||
lb.set_prompt2(prompt2)
|
lb.set_prompt2(prompt2)
|
||||||
lb.init_inpainting(image_source, mask_image)
|
lb.init_inpainting(image_source, mask_image)
|
||||||
|
|
||||||
|
# Run latent blending
|
||||||
imgs_transition = lb.run_transition(recycle_img1=True, fixed_seeds=fixed_seeds)
|
imgs_transition = lb.run_transition(recycle_img1=True, fixed_seeds=fixed_seeds)
|
||||||
|
|
||||||
#% let's get more cheap frames via linear interpolation
|
# Let's get more cheap frames via linear interpolation (duration_transition*fps frames)
|
||||||
duration_transition = 3
|
|
||||||
fps = 60
|
|
||||||
imgs_transition_ext = add_frames_linear_interp(imgs_transition, duration_transition, fps)
|
imgs_transition_ext = add_frames_linear_interp(imgs_transition, duration_transition, fps)
|
||||||
|
|
||||||
# movie saving
|
# Save as MP4
|
||||||
fp_movie = "/home/lugo/git/latentblending/test.mp4"
|
fp_movie = "movie_example2.mp4"
|
||||||
if os.path.isfile(fp_movie):
|
if os.path.isfile(fp_movie):
|
||||||
os.remove(fp_movie)
|
os.remove(fp_movie)
|
||||||
ms = MovieSaver(fp_movie, fps=fps, shape_hw=[lb.height, lb.width])
|
ms = MovieSaver(fp_movie, fps=fps, shape_hw=[lb.height, lb.width])
|
||||||
|
|
|
@ -31,20 +31,19 @@ from stable_diffusion_holder import StableDiffusionHolder
|
||||||
torch.set_grad_enabled(False)
|
torch.set_grad_enabled(False)
|
||||||
|
|
||||||
#%% First let us spawn a stable diffusion holder
|
#%% First let us spawn a stable diffusion holder
|
||||||
device = "cuda:0"
|
device = "cuda"
|
||||||
fp_ckpt = "../stable_diffusion_models/ckpt/768-v-ema.ckpt"
|
fp_ckpt = "../stable_diffusion_models/ckpt/768-v-ema.ckpt"
|
||||||
fp_config = '../stablediffusion/configs/stable-diffusion/v2-inference-v.yaml'
|
fp_config = '../stablediffusion/configs/stable-diffusion/v2-inference-v.yaml'
|
||||||
sdh = StableDiffusionHolder(fp_ckpt, fp_config, device)
|
sdh = StableDiffusionHolder(fp_ckpt, fp_config, device)
|
||||||
|
|
||||||
|
|
||||||
#%% MULTITRANS
|
#%% Let's setup the multi transition
|
||||||
fps = 30
|
fps = 30
|
||||||
duration_single_trans = 15
|
duration_single_trans = 15
|
||||||
quality = 'high'
|
quality = 'medium'
|
||||||
deepth_strength = 0.55
|
deepth_strength = 0.55 #Specifies how deep (in terms of diffusion iterations the first branching happens)
|
||||||
lb = LatentBlending(sdh)
|
|
||||||
lb.autosetup_branching(quality=quality, deepth_strength=deepth_strength)
|
|
||||||
|
|
||||||
|
# Specify a list of prompts below
|
||||||
list_prompts = []
|
list_prompts = []
|
||||||
list_prompts.append("surrealistic statue made of glitter and dirt, standing in a lake, atmospheric light, strange glow")
|
list_prompts.append("surrealistic statue made of glitter and dirt, standing in a lake, atmospheric light, strange glow")
|
||||||
list_prompts.append("statue of a mix between a tree and human, made of marble, incredibly detailed")
|
list_prompts.append("statue of a mix between a tree and human, made of marble, incredibly detailed")
|
||||||
|
@ -53,9 +52,14 @@ list_prompts.append("statue of a spider that looked like a human")
|
||||||
list_prompts.append("statue of a bird that looked like a scorpion")
|
list_prompts.append("statue of a bird that looked like a scorpion")
|
||||||
list_prompts.append("statue of an ancient cybernetic messenger annoucing good news, golden, futuristic")
|
list_prompts.append("statue of an ancient cybernetic messenger annoucing good news, golden, futuristic")
|
||||||
|
|
||||||
|
# You can optionally specify the seeds
|
||||||
list_seeds = [954375479, 332539350, 956051013, 408831845, 250009012, 675588737]
|
list_seeds = [954375479, 332539350, 956051013, 408831845, 250009012, 675588737]
|
||||||
|
|
||||||
|
lb = LatentBlending(sdh)
|
||||||
|
lb.autosetup_branching(quality=quality, deepth_strength=deepth_strength)
|
||||||
|
|
||||||
fp_movie = "movie_example3.mp4"
|
fp_movie = "movie_example3.mp4"
|
||||||
|
|
||||||
ms = MovieSaver(fp_movie, fps=fps)
|
ms = MovieSaver(fp_movie, fps=fps)
|
||||||
|
|
||||||
lb.run_multi_transition(
|
lb.run_multi_transition(
|
||||||
|
|
Loading…
Reference in New Issue