diff --git a/example1_standard.py b/example1_standard.py index bc67c3e..83a40aa 100644 --- a/example1_standard.py +++ b/example1_standard.py @@ -32,8 +32,8 @@ torch.set_grad_enabled(False) #%% First let us spawn a stable diffusion holder device = "cuda:0" -fp_ckpt = "../stable_diffusion_models/ckpt/768-v-ema.ckpt" -fp_config = '../stablediffusion/configs/stable-diffusion/v2-inference-v.yaml' +fp_ckpt = "../stable_diffusion_models/ckpt/v2-1_768-ema-pruned.ckpt" +fp_config = 'configs/v2-inference-v.yaml' sdh = StableDiffusionHolder(fp_ckpt, fp_config, device) diff --git a/ldm/data/__init__.py b/ldm/data/__init__.py old mode 100755 new mode 100644 diff --git a/ldm/data/util.py b/ldm/data/util.py old mode 100755 new mode 100644 diff --git a/ldm/ldm b/ldm/ldm new file mode 120000 index 0000000..213a179 --- /dev/null +++ b/ldm/ldm @@ -0,0 +1 @@ +ldm \ No newline at end of file diff --git a/ldm/models/autoencoder.py b/ldm/models/autoencoder.py old mode 100755 new mode 100644 diff --git a/ldm/models/diffusion/__init__.py b/ldm/models/diffusion/__init__.py old mode 100755 new mode 100644 diff --git a/ldm/models/diffusion/ddim.py b/ldm/models/diffusion/ddim.py old mode 100755 new mode 100644 diff --git a/ldm/models/diffusion/ddpm.py b/ldm/models/diffusion/ddpm.py old mode 100755 new mode 100644 index 63bd3e8..6090212 --- a/ldm/models/diffusion/ddpm.py +++ b/ldm/models/diffusion/ddpm.py @@ -17,8 +17,7 @@ from functools import partial import itertools from tqdm import tqdm from torchvision.utils import make_grid -from pytorch_lightning.utilities.rank_zero import rank_zero_only -# from pytorch_lightning.utilities.distributed import rank_zero_only +from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config @@ -391,7 +390,7 @@ class DDPM(pl.LightningModule): elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: - raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") + raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) diff --git a/ldm/models/diffusion/dpm_solver/__init__.py b/ldm/models/diffusion/dpm_solver/__init__.py old mode 100755 new mode 100644 diff --git a/ldm/models/diffusion/dpm_solver/dpm_solver.py b/ldm/models/diffusion/dpm_solver/dpm_solver.py old mode 100755 new mode 100644 diff --git a/ldm/models/diffusion/dpm_solver/sampler.py b/ldm/models/diffusion/dpm_solver/sampler.py old mode 100755 new mode 100644 diff --git a/ldm/models/diffusion/plms.py b/ldm/models/diffusion/plms.py old mode 100755 new mode 100644 diff --git a/ldm/models/diffusion/sampling_util.py b/ldm/models/diffusion/sampling_util.py old mode 100755 new mode 100644 diff --git a/ldm/modules/attention.py b/ldm/modules/attention.py old mode 100755 new mode 100644 index d504d93..509cd87 --- a/ldm/modules/attention.py +++ b/ldm/modules/attention.py @@ -16,6 +16,9 @@ try: except: XFORMERS_IS_AVAILBLE = False +# CrossAttn precision handling +import os +_ATTN_PRECISION = os.environ.get("ATTN_PRECISION", "fp32") def exists(val): return val is not None @@ -167,9 +170,16 @@ class CrossAttention(nn.Module): q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) - sim = einsum('b i d, b j d -> b i j', q, k) * self.scale + # force cast to fp32 to avoid overflowing + if _ATTN_PRECISION =="fp32": + with torch.autocast(enabled=False, device_type = 'cuda'): + q, k = q.float(), k.float() + sim = einsum('b i d, b j d -> b i j', q, k) * self.scale + else: + sim = einsum('b i d, b j d -> b i j', q, k) * self.scale + del q, k - + if exists(mask): mask = rearrange(mask, 'b ... -> b (...)') max_neg_value = -torch.finfo(sim.dtype).max diff --git a/ldm/modules/diffusionmodules/__init__.py b/ldm/modules/diffusionmodules/__init__.py old mode 100755 new mode 100644 diff --git a/ldm/modules/diffusionmodules/model.py b/ldm/modules/diffusionmodules/model.py old mode 100755 new mode 100644 diff --git a/ldm/modules/diffusionmodules/openaimodel.py b/ldm/modules/diffusionmodules/openaimodel.py old mode 100755 new mode 100644 diff --git a/ldm/modules/diffusionmodules/upscaling.py b/ldm/modules/diffusionmodules/upscaling.py old mode 100755 new mode 100644 diff --git a/ldm/modules/diffusionmodules/util.py b/ldm/modules/diffusionmodules/util.py old mode 100755 new mode 100644 diff --git a/ldm/modules/distributions/__init__.py b/ldm/modules/distributions/__init__.py old mode 100755 new mode 100644 diff --git a/ldm/modules/distributions/distributions.py b/ldm/modules/distributions/distributions.py old mode 100755 new mode 100644 diff --git a/ldm/modules/ema.py b/ldm/modules/ema.py old mode 100755 new mode 100644 diff --git a/ldm/modules/encoders/__init__.py b/ldm/modules/encoders/__init__.py old mode 100755 new mode 100644 diff --git a/ldm/modules/encoders/modules.py b/ldm/modules/encoders/modules.py old mode 100755 new mode 100644 diff --git a/ldm/modules/image_degradation/__init__.py b/ldm/modules/image_degradation/__init__.py old mode 100755 new mode 100644 diff --git a/ldm/modules/image_degradation/bsrgan.py b/ldm/modules/image_degradation/bsrgan.py old mode 100755 new mode 100644 diff --git a/ldm/modules/image_degradation/bsrgan_light.py b/ldm/modules/image_degradation/bsrgan_light.py old mode 100755 new mode 100644 diff --git a/ldm/modules/image_degradation/utils/test.png b/ldm/modules/image_degradation/utils/test.png old mode 100755 new mode 100644 diff --git a/ldm/modules/image_degradation/utils_image.py b/ldm/modules/image_degradation/utils_image.py old mode 100755 new mode 100644 diff --git a/ldm/modules/midas/__init__.py b/ldm/modules/midas/__init__.py old mode 100755 new mode 100644 diff --git a/ldm/modules/midas/api.py b/ldm/modules/midas/api.py old mode 100755 new mode 100644 diff --git a/ldm/modules/midas/midas/__init__.py b/ldm/modules/midas/midas/__init__.py old mode 100755 new mode 100644 diff --git a/ldm/modules/midas/midas/base_model.py b/ldm/modules/midas/midas/base_model.py old mode 100755 new mode 100644 diff --git a/ldm/modules/midas/midas/blocks.py b/ldm/modules/midas/midas/blocks.py old mode 100755 new mode 100644 diff --git a/ldm/modules/midas/midas/dpt_depth.py b/ldm/modules/midas/midas/dpt_depth.py old mode 100755 new mode 100644 diff --git a/ldm/modules/midas/midas/midas_net.py b/ldm/modules/midas/midas/midas_net.py old mode 100755 new mode 100644 diff --git a/ldm/modules/midas/midas/midas_net_custom.py b/ldm/modules/midas/midas/midas_net_custom.py old mode 100755 new mode 100644 diff --git a/ldm/modules/midas/midas/transforms.py b/ldm/modules/midas/midas/transforms.py old mode 100755 new mode 100644 diff --git a/ldm/modules/midas/midas/vit.py b/ldm/modules/midas/midas/vit.py old mode 100755 new mode 100644 diff --git a/ldm/modules/midas/utils.py b/ldm/modules/midas/utils.py old mode 100755 new mode 100644 diff --git a/ldm/util.py b/ldm/util.py old mode 100755 new mode 100644