diff --git a/example1_standard.py b/example1_standard.py index ea042e2..c32e4dc 100644 --- a/example1_standard.py +++ b/example1_standard.py @@ -40,18 +40,19 @@ sdh = StableDiffusionHolder(fp_ckpt, fp_config, device) #%% Next let's set up all parameters quality = 'medium' -depth_strength = 0.65 # Specifies how deep (in terms of diffusion iterations the first branching happens) +depth_strength = 0.35 # Specifies how deep (in terms of diffusion iterations the first branching happens) fixed_seeds = [69731932, 504430820] -prompt1 = "photo of a beautiful cherry forest covered in white flowers, ambient light, very detailed, magic" -prompt2 = "photo of an golden statue with a funny hat, surrounded by ferns and vines, grainy analog photograph, mystical ambience, incredible detail" +# prompt1 = "A person in an open filed of grass watching a television, red colors dominate the scene, eerie light, dark clouds on the horizon, artistically rendered by Richter" +prompt1 = "A person in a bar, people around him, a glass of baer, artistically rendered in the style of Hopper" +prompt2 = "A person with a sad expression, looking at a painting of an older man, all in the style of Lucien Freud" duration_transition = 12 # In seconds fps = 30 # Spawn latent blending lb = LatentBlending(sdh) -lb.autosetup_branching(quality=quality, depth_strength=depth_strength) +lb.load_branching_profile(quality=quality, depth_strength=depth_strength) lb.set_prompt1(prompt1) lb.set_prompt2(prompt2) diff --git a/example2_inpaint.py b/example2_inpaint.py index 91e8855..2fa4c95 100644 --- a/example2_inpaint.py +++ b/example2_inpaint.py @@ -25,7 +25,6 @@ import torch from tqdm.auto import tqdm from diffusers import StableDiffusionInpaintPipeline from PIL import Image -import matplotlib.pyplot as plt import torch from movie_util import MovieSaver from typing import Callable, List, Optional, Union @@ -42,21 +41,21 @@ sdh = StableDiffusionHolder(fp_ckpt, fp_config, device) #%% Let's first make a source image and mask. quality = 'medium' -deepth_strength = 0.65 #Specifies how deep (in terms of diffusion iterations the first branching happens) +depth_strength = 0.65 #Specifies how deep (in terms of diffusion iterations the first branching happens) duration_transition = 7 # In seconds fps = 30 seed0 = 190791709 # Spawn latent blending lb = LatentBlending(sdh) -lb.autosetup_branching(quality=quality, deepth_strength=deepth_strength) +lb.load_branching_profile(quality=quality, depth_strength=depth_strength) prompt1 = "photo of a futuristic alien temple in a desert, mystic, glowing, organic, intricate, sci-fi movie, mesmerizing, scary" lb.set_prompt1(prompt1) lb.init_inpainting(init_empty=True) lb.set_seed(seed0) # Run diffusion -list_latents = lb.run_diffusion(lb.text_embedding1) +list_latents = lb.run_diffusion([lb.text_embedding1]) image_source = lb.sdh.latent2image(list_latents[-1]) mask_image = 255*np.ones([512,512], dtype=np.uint8) diff --git a/example3_multitrans.py b/example3_multitrans.py index 28499b7..96e2cc4 100644 --- a/example3_multitrans.py +++ b/example3_multitrans.py @@ -22,7 +22,6 @@ import warnings import torch from tqdm.auto import tqdm from PIL import Image -import matplotlib.pyplot as plt import torch from movie_util import MovieSaver from typing import Callable, List, Optional, Union @@ -32,8 +31,8 @@ torch.set_grad_enabled(False) #%% First let us spawn a stable diffusion holder device = "cuda" -fp_ckpt = "../stable_diffusion_models/ckpt/768-v-ema.ckpt" -fp_config = '../stablediffusion/configs/stable-diffusion/v2-inference-v.yaml' +fp_ckpt = "../stable_diffusion_models/ckpt/v2-1_768-ema-pruned.ckpt" +fp_config = 'configs/v2-inference-v.yaml' sdh = StableDiffusionHolder(fp_ckpt, fp_config, device) @@ -56,16 +55,14 @@ list_prompts.append("statue of an ancient cybernetic messenger annoucing good ne list_seeds = [954375479, 332539350, 956051013, 408831845, 250009012, 675588737] lb = LatentBlending(sdh) -lb.autosetup_branching(quality=quality, depth_strength=depth_strength) +lb.load_branching_profile(quality=quality, depth_strength=depth_strength) fp_movie = "movie_example3.mp4" -ms = MovieSaver(fp_movie, fps=fps) - lb.run_multi_transition( + fp_movie, list_prompts, list_seeds, - ms=ms, fps=fps, duration_single_trans=duration_single_trans )