auto config reading

This commit is contained in:
Johannes Stelzer 2023-01-12 10:06:02 +01:00
parent c3cdab663b
commit d5c1c1f428
5 changed files with 40 additions and 28 deletions

View File

@ -32,11 +32,9 @@ from stable_diffusion_holder import StableDiffusionHolder
torch.set_grad_enabled(False)
#%% First let us spawn a stable diffusion holder
device = "cuda"
fp_ckpt = "../stable_diffusion_models/ckpt/v2-1_768-ema-pruned.ckpt"
fp_config = 'configs/v2-inference-v.yaml'
sdh = StableDiffusionHolder(fp_ckpt, fp_config, device)
sdh = StableDiffusionHolder(fp_ckpt)
#%% Next let's set up all parameters

View File

@ -34,11 +34,8 @@ from stable_diffusion_holder import StableDiffusionHolder
torch.set_grad_enabled(False)
#%% First let us spawn a stable diffusion holder
device = "cuda"
fp_ckpt= "../stable_diffusion_models/ckpt/512-inpainting-ema.ckpt"
fp_config = '../stablediffusion/configs//stable-diffusion/v2-inpainting-inference.yaml'
sdh = StableDiffusionHolder(fp_ckpt, fp_config, device)
sdh = StableDiffusionHolder(fp_ckpt)
#%% Let's first make a source image and mask.
quality = 'medium'

View File

@ -31,10 +31,8 @@ from stable_diffusion_holder import StableDiffusionHolder
torch.set_grad_enabled(False)
#%% First let us spawn a stable diffusion holder
device = "cuda"
fp_ckpt = "../stable_diffusion_models/ckpt/v2-1_768-ema-pruned.ckpt"
fp_config = 'configs/v2-inference-v.yaml'
sdh = StableDiffusionHolder(fp_ckpt, fp_config, device)
sdh = StableDiffusionHolder(fp_ckpt)
#%% Let's setup the multi transition

View File

@ -45,17 +45,15 @@ depth_strength_lores = 0.5
device = "cuda"
fp_ckpt_lores = "../stable_diffusion_models/ckpt/v2-1_512-ema-pruned.ckpt"
fp_config_lores = 'configs/v2-inference.yaml'
#%% Define vars for high-resoltion pass
fp_ckpt_hires = "../stable_diffusion_models/ckpt/x4-upscaler-ema.ckpt"
fp_config_hires = 'configs/x4-upscaling.yaml'
depth_strength_hires = 0.65
num_inference_steps_hires = 100
nmb_branches_final_hires = 6
#%% Run low-res pass
sdh = StableDiffusionHolder(fp_ckpt_lores, fp_config_lores, device)
sdh = StableDiffusionHolder(fp_ckpt_lores)
lb = LatentBlending(sdh)
lb.set_prompt1(prompt1)
lb.set_prompt2(prompt2)
@ -64,6 +62,6 @@ lb.set_height(height)
lb.run_upscaling_step1(dp_img, depth_strength_lores, num_inference_steps_lores, nmb_branches_final_lores, fixed_seeds)
#%% Run high-res pass
sdh = StableDiffusionHolder(fp_ckpt_hires, fp_config_hires)
sdh = StableDiffusionHolder(fp_ckpt_hires)
lb = LatentBlending(sdh)
lb.run_upscaling_step2(dp_img, depth_strength_hires, num_inference_steps_hires, nmb_branches_final_hires)

View File

@ -163,8 +163,27 @@ class StableDiffusionHolder:
"""
assert os.path.isfile(fp_ckpt), f"Your model checkpoint file does not exist: {fp_ckpt}"
assert os.path.isfile(fp_config), f"Your config file does not exist: {fp_config}"
self.fp_ckpt = fp_ckpt
# Auto init the config?
if fp_config is None:
fn_ckpt = os.path.basename(fp_ckpt)
if 'depth' in fn_ckpt:
fp_config = 'configs/v2-midas-inference.yaml'
elif 'inpain' in fn_ckpt:
fp_config = 'configs/v2-inpainting-inference.yaml'
elif 'upscaler' in fn_ckpt:
fp_config = 'configs/x4-upscaling.yaml'
elif '512' in fn_ckpt:
fp_config = 'configs/v2-inference.yaml'
elif '768'in fn_ckpt:
fp_config = 'configs/v2-inference-v.yaml'
else:
raise ValueError("auto detect of config failed. please specify fp_config manually!")
assert os.path.isfile(fp_config), f"Your config file does not exist: {fp_config}"
config = OmegaConf.load(fp_config)
@ -174,6 +193,17 @@ class StableDiffusionHolder:
self.model = self.model.to(self.device)
self.sampler = DDIMSampler(self.model)
def init_auto_res(self):
r"""Automatically set the resolution to the one used in training.
"""
if '768' in self.fp_ckpt:
self.height = 768
self.width = 768
else:
self.height = 512
self.width = 512
def set_negative_prompt(self, negative_prompt):
r"""Set the negative prompt. Currenty only one negative prompt is supported
"""
@ -185,17 +215,6 @@ class StableDiffusionHolder:
if len(self.negative_prompt) > 1:
self.negative_prompt = [self.negative_prompt[0]]
def init_auto_res(self):
r"""Automatically set the resolution to the one used in training.
"""
if '768' in self.fp_ckpt:
self.height = 768
self.width = 768
else:
self.height = 512
self.width = 512
def init_inpainting(
self,
@ -571,10 +590,12 @@ if __name__ == "__main__":
# fp_config = '../stablediffusion/configs//stable-diffusion/v2-inpainting-inference.yaml'
fp_ckpt = "../stable_diffusion_models/ckpt/v2-1_768-ema-pruned.ckpt"
fp_config = 'configs/v2-inference-v.yaml'
# fp_config = 'configs/v2-inference-v.yaml'
self = StableDiffusionHolder(fp_ckpt, fp_config, num_inference_steps)
self = StableDiffusionHolder(fp_ckpt, num_inference_steps=num_inference_steps)
xxx
#%%
self.width = 1536