fixes for SDXL 1.0
This commit is contained in:
parent
512fd56afa
commit
40ee8700ad
|
@ -108,10 +108,10 @@ class DiffusersHolder():
|
||||||
pr_encoder = self.pipe._encode_prompt
|
pr_encoder = self.pipe._encode_prompt
|
||||||
|
|
||||||
prompt_embeds = pr_encoder(
|
prompt_embeds = pr_encoder(
|
||||||
prompt,
|
prompt=prompt,
|
||||||
self.device,
|
device=self.device,
|
||||||
1,
|
num_images_per_prompt=1,
|
||||||
do_classifier_free_guidance,
|
do_classifier_free_guidance=do_classifier_free_guidance,
|
||||||
negative_prompt=self.negative_prompt,
|
negative_prompt=self.negative_prompt,
|
||||||
prompt_embeds=None,
|
prompt_embeds=None,
|
||||||
negative_prompt_embeds=None,
|
negative_prompt_embeds=None,
|
||||||
|
@ -132,12 +132,14 @@ class DiffusersHolder():
|
||||||
@torch.no_grad()
|
@torch.no_grad()
|
||||||
def latent2image(
|
def latent2image(
|
||||||
self,
|
self,
|
||||||
latents: torch.FloatTensor):
|
latents: torch.FloatTensor,
|
||||||
|
convert_numpy=True):
|
||||||
r"""
|
r"""
|
||||||
Returns an image provided a latent representation from diffusion.
|
Returns an image provided a latent representation from diffusion.
|
||||||
Args:
|
Args:
|
||||||
latents: torch.FloatTensor
|
latents: torch.FloatTensor
|
||||||
Result of the diffusion process.
|
Result of the diffusion process.
|
||||||
|
convert_numpy: if converting to numpy
|
||||||
"""
|
"""
|
||||||
if self.use_sd_xl:
|
if self.use_sd_xl:
|
||||||
# make sure the VAE is in float32 mode, as it overflows in float16
|
# make sure the VAE is in float32 mode, as it overflows in float16
|
||||||
|
@ -162,8 +164,12 @@ class DiffusersHolder():
|
||||||
latents = latents.float()
|
latents = latents.float()
|
||||||
|
|
||||||
image = self.pipe.vae.decode(latents / self.pipe.vae.config.scaling_factor, return_dict=False)[0]
|
image = self.pipe.vae.decode(latents / self.pipe.vae.config.scaling_factor, return_dict=False)[0]
|
||||||
image = self.pipe.image_processor.postprocess(image, output_type="pil", do_denormalize=[True] * image.shape[0])
|
image = self.pipe.image_processor.postprocess(image, output_type="pil", do_denormalize=[True] * image.shape[0])[0]
|
||||||
return np.asarray(image[0])
|
if convert_numpy:
|
||||||
|
return np.asarray(image)
|
||||||
|
else:
|
||||||
|
return image
|
||||||
|
|
||||||
|
|
||||||
def prepare_mixing(self, mixing_coeffs, list_latents_mixing):
|
def prepare_mixing(self, mixing_coeffs, list_latents_mixing):
|
||||||
if type(mixing_coeffs) == float:
|
if type(mixing_coeffs) == float:
|
||||||
|
@ -266,7 +272,7 @@ class DiffusersHolder():
|
||||||
return_image: Optional[bool] = False):
|
return_image: Optional[bool] = False):
|
||||||
|
|
||||||
# 0. Default height and width to unet
|
# 0. Default height and width to unet
|
||||||
original_size = (1024, 1024) # FIXME
|
original_size = (self.width_img, self.height_img) # FIXME
|
||||||
crops_coords_top_left = (0, 0) # FIXME
|
crops_coords_top_left = (0, 0) # FIXME
|
||||||
target_size = original_size
|
target_size = original_size
|
||||||
batch_size = 1
|
batch_size = 1
|
||||||
|
@ -277,7 +283,7 @@ class DiffusersHolder():
|
||||||
do_classifier_free_guidance = self.guidance_scale > 1.0
|
do_classifier_free_guidance = self.guidance_scale > 1.0
|
||||||
|
|
||||||
# 1. Check inputs. Raise error if not correct & 2. Define call parameters
|
# 1. Check inputs. Raise error if not correct & 2. Define call parameters
|
||||||
list_mixing_coeffs = self.prepare_mixing()
|
list_mixing_coeffs = self.prepare_mixing(mixing_coeffs, list_latents_mixing)
|
||||||
|
|
||||||
# 3. Encode input prompt (already encoded outside bc of mixing, just split here)
|
# 3. Encode input prompt (already encoded outside bc of mixing, just split here)
|
||||||
prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds = text_embeddings
|
prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds = text_embeddings
|
||||||
|
@ -517,39 +523,27 @@ steps:
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|
||||||
|
|
||||||
controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-scribble", torch_dtype=torch.float16)
|
#%%
|
||||||
pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
pretrained_model_name_or_path = "stabilityai/stable-diffusion-xl-base-1.0"
|
||||||
"runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
|
pipe = DiffusionPipeline.from_pretrained(pretrained_model_name_or_path, torch_dtype=torch.float16)
|
||||||
).to("cuda")
|
pipe.to('cuda:1') # xxx
|
||||||
|
|
||||||
|
#%%
|
||||||
self = DiffusersHolder(pipe)
|
self = DiffusersHolder(pipe)
|
||||||
|
# xxx
|
||||||
|
self.set_dimensions(1024, 704)
|
||||||
|
self.set_num_inference_steps(40)
|
||||||
|
# self.set_dimensions(1536, 1024)
|
||||||
|
prompt = "Surreal painting of eerie, nebulous glow of an indigo moon, a spine-chilling spectacle unfolds; a baroque, marbled hand reaches out from a viscous, purple lake clutching a melting clock, its face distorted in a never-ending scream of hysteria, while a cluster of laughing orchids, their petals morphed into grotesque human lips, festoon a crimson tree weeping blood instead of sap, a psychedelic cat with an unnaturally playful grin and mismatched eyes lounges atop a floating vintage television showing static, an albino peacock with iridescent, crystalline feathers dances around a towering, inverted pyramid on top of which a humanoid figure with an octopus head lounges seductively, all against the backdrop of a sprawling cityscape where buildings are inverted and writhing as if alive, and the sky is punctuated by floating aquatic creatures glowing neon, adding a touch of haunting beauty to this otherwise deeply unsettling tableau"
|
||||||
|
text_embeddings = self.get_text_embedding(prompt)
|
||||||
|
generator = torch.Generator(device=self.device).manual_seed(int(420))
|
||||||
|
latents_start = self.get_noise()
|
||||||
|
list_latents_1 = self.run_diffusion(text_embeddings, latents_start)
|
||||||
|
img_orig = self.latent2image(list_latents_1[-1])
|
||||||
|
|
||||||
# get text encoding
|
|
||||||
|
|
||||||
# get image encoding
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#%%
|
|
||||||
# # pretrained_model_name_or_path = "stabilityai/stable-diffusion-xl-base-0.9"
|
|
||||||
# pretrained_model_name_or_path = "stabilityai/stable-diffusion-2-1"
|
|
||||||
# pipe = DiffusionPipeline.from_pretrained(pretrained_model_name_or_path, torch_dtype=torch.float16)
|
|
||||||
# pipe.to('cuda')
|
|
||||||
# # xxx
|
|
||||||
# self = DiffusersHolder(pipe)
|
|
||||||
# # xxx
|
|
||||||
# self.set_num_inference_steps(50)
|
|
||||||
# # self.set_dimensions(1536, 1024)
|
|
||||||
# prompt = "photo of a beautiful cherry forest covered in white flowers, ambient light, very detailed, magic"
|
|
||||||
# text_embeddings = self.get_text_embedding(prompt)
|
|
||||||
# generator = torch.Generator(device=self.device).manual_seed(int(420))
|
|
||||||
# latents_start = self.get_noise()
|
|
||||||
# list_latents_1 = self.run_diffusion(text_embeddings, latents_start)
|
|
||||||
# img_orig = self.latent2image(list_latents_1[-1])
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# %%
|
# %%
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -20,34 +20,37 @@ import warnings
|
||||||
warnings.filterwarnings('ignore')
|
warnings.filterwarnings('ignore')
|
||||||
import warnings
|
import warnings
|
||||||
from latent_blending import LatentBlending
|
from latent_blending import LatentBlending
|
||||||
from stable_diffusion_holder import StableDiffusionHolder
|
from diffusers_holder import DiffusersHolder
|
||||||
|
from diffusers import DiffusionPipeline
|
||||||
from movie_util import concatenate_movies
|
from movie_util import concatenate_movies
|
||||||
from huggingface_hub import hf_hub_download
|
from huggingface_hub import hf_hub_download
|
||||||
|
|
||||||
# %% First let us spawn a stable diffusion holder. Uncomment your version of choice.
|
# %% First let us spawn a stable diffusion holder. Uncomment your version of choice.
|
||||||
fp_ckpt = hf_hub_download(repo_id="stabilityai/stable-diffusion-2-1-base", filename="v2-1_512-ema-pruned.ckpt")
|
pretrained_model_name_or_path = "stabilityai/stable-diffusion-xl-base-1.0"
|
||||||
# fp_ckpt = hf_hub_download(repo_id="stabilityai/stable-diffusion-2-1", filename="v2-1_768-ema-pruned.ckpt")
|
pipe = DiffusionPipeline.from_pretrained(pretrained_model_name_or_path, torch_dtype=torch.float16)
|
||||||
sdh = StableDiffusionHolder(fp_ckpt)
|
pipe.to('cuda:1')
|
||||||
|
dh = DiffusersHolder(pipe)
|
||||||
|
|
||||||
# %% Let's setup the multi transition
|
# %% Let's setup the multi transition
|
||||||
fps = 30
|
fps = 30
|
||||||
duration_single_trans = 6
|
duration_single_trans = 20
|
||||||
depth_strength = 0.55 # Specifies how deep (in terms of diffusion iterations the first branching happens)
|
depth_strength = 0.25 # Specifies how deep (in terms of diffusion iterations the first branching happens)
|
||||||
|
|
||||||
# Specify a list of prompts below
|
# Specify a list of prompts below
|
||||||
list_prompts = []
|
list_prompts = []
|
||||||
list_prompts.append("surrealistic statue made of glitter and dirt, standing in a lake, atmospheric light, strange glow")
|
list_prompts.append("A panoramic photo of a sentient mirror maze amidst a neon-lit forest, where bioluminescent mushrooms glow eerily, reflecting off the mirrors, and cybernetic crows, with silver wings and ruby eyes, perch ominously, David Lynch, Gaspar Noé, Photograph.")
|
||||||
list_prompts.append("statue of a mix between a tree and human, made of marble, incredibly detailed")
|
list_prompts.append("An unsettling tableau of spectral butterflies with clockwork wings, swirling around an antique typewriter perched precariously atop a floating, gnarled tree trunk, a stormy twilight sky, David Lynch's dreamscape, meticulously crafted.")
|
||||||
list_prompts.append("weird statue of a frog monkey, many colors, standing next to the ruins of an ancient city")
|
# list_prompts.append("A haunting tableau of an antique dollhouse swallowed by a giant venus flytrap under the neon glow of an alien moon, its uncanny light reflecting from shattered porcelain faces and marbles, in a quiet, abandoned amusement park.")
|
||||||
# list_prompts.append("statue of a spider that looked like a human")
|
|
||||||
# list_prompts.append("statue of a bird that looked like a scorpion")
|
|
||||||
# list_prompts.append("statue of an ancient cybernetic messenger annoucing good news, golden, futuristic")
|
|
||||||
|
|
||||||
# You can optionally specify the seeds
|
# You can optionally specify the seeds
|
||||||
list_seeds = [954375479, 332539350, 956051013, 408831845, 250009012, 675588737]
|
list_seeds = [95437579, 33259350, 956051013, 408831845, 250009012, 675588737]
|
||||||
t_compute_max_allowed = 12 # per segment
|
t_compute_max_allowed = 20 # per segment
|
||||||
fp_movie = 'movie_example2.mp4'
|
fp_movie = 'movie_example2.mp4'
|
||||||
lb = LatentBlending(sdh)
|
lb = LatentBlending(dh)
|
||||||
|
lb.dh.set_dimensions(1024, 704)
|
||||||
|
lb.dh.set_num_inference_steps(40)
|
||||||
|
|
||||||
|
|
||||||
list_movie_parts = []
|
list_movie_parts = []
|
||||||
for i in range(len(list_prompts) - 1):
|
for i in range(len(list_prompts) - 1):
|
||||||
|
|
|
@ -111,18 +111,6 @@ class LatentBlending():
|
||||||
self.set_prompt1("")
|
self.set_prompt1("")
|
||||||
self.set_prompt2("")
|
self.set_prompt2("")
|
||||||
|
|
||||||
# def init_mode(self):
|
|
||||||
# r"""
|
|
||||||
# Sets the operational mode. Currently supported are standard, inpainting and x4 upscaling.
|
|
||||||
# """
|
|
||||||
# if isinstance(self.dh.model, LatentUpscaleDiffusion):
|
|
||||||
# self.mode = 'upscale'
|
|
||||||
# elif isinstance(self.dh.model, LatentInpaintDiffusion):
|
|
||||||
# self.dh.image_source = None
|
|
||||||
# self.dh.mask_image = None
|
|
||||||
# self.mode = 'inpaint'
|
|
||||||
# else:
|
|
||||||
# self.mode = 'standard'
|
|
||||||
|
|
||||||
def set_dimensions(self, width=None, height=None):
|
def set_dimensions(self, width=None, height=None):
|
||||||
self.dh.set_dimensions(width, height)
|
self.dh.set_dimensions(width, height)
|
||||||
|
@ -449,6 +437,7 @@ class LatentBlending():
|
||||||
list_compute_steps = self.num_inference_steps - list_idx_injection
|
list_compute_steps = self.num_inference_steps - list_idx_injection
|
||||||
list_compute_steps *= list_nmb_stems
|
list_compute_steps *= list_nmb_stems
|
||||||
t_compute = np.sum(list_compute_steps) * self.dt_per_diff + 0.15 * np.sum(list_nmb_stems)
|
t_compute = np.sum(list_compute_steps) * self.dt_per_diff + 0.15 * np.sum(list_nmb_stems)
|
||||||
|
t_compute += 2*self.num_inference_steps*self.dt_per_diff # outer branches
|
||||||
increase_done = False
|
increase_done = False
|
||||||
for s_idx in range(len(list_nmb_stems) - 1):
|
for s_idx in range(len(list_nmb_stems) - 1):
|
||||||
if list_nmb_stems[s_idx + 1] / list_nmb_stems[s_idx] >= 2:
|
if list_nmb_stems[s_idx + 1] / list_nmb_stems[s_idx] >= 2:
|
||||||
|
|
Loading…
Reference in New Issue