From 1ba4b578a0a81c718dd94c252440b5f2ca7d1d57 Mon Sep 17 00:00:00 2001
From: Johannes Stelzer <jsdmail@gmail.com>
Date: Tue, 9 Jan 2024 21:11:40 +0100
Subject: [PATCH 1/5] Update README.md

---
 README.md | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/README.md b/README.md
index 2806407..0f0ce40 100644
--- a/README.md
+++ b/README.md
@@ -2,7 +2,6 @@
 
 Latent blending enables video transitions with incredible smoothness between prompts, computed within seconds. Powered by [stable diffusion XL](https://stability.ai/stable-diffusion), this method involves specific mixing of intermediate latent representations to create a seamless transition – with users having the option to fully customize the transition directly in high-resolution. The new version also supports SDXL Turbo, allowing to generate transitions faster than they are typically played back!
 
-
 ```python
 pipe = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16").to("cuda")
 dh = DiffusersHolder(pipe)
@@ -18,6 +17,13 @@ lb.run_transition()
 lb.write_movie_transition('movie_example1.mp4', duration_transition=12)
 
 ```
+
+# Installation
+```commandline
+pip install git+https://github.com/lunarring/latentblending
+```
+
+
 ## Gradio UI
 Coming soon again :)
 
@@ -90,12 +96,6 @@ lb.set_parental_crossfeed(crossfeed_power, crossfeed_range, crossfeed_decay)
 ```
 
 
-# Installation
-#### Packages
-```commandline
-pip install -r requirements.txt
-```
-
 # How does latent blending work?
 ## Method
 ![](animation.gif)

From 6e138c54a2a69c32f5dba4373c90588add035b3e Mon Sep 17 00:00:00 2001
From: Johannes Stelzer <jsdmail@gmail.com>
Date: Tue, 9 Jan 2024 21:12:14 +0100
Subject: [PATCH 2/5] Update README.md

---
 README.md | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/README.md b/README.md
index 0f0ce40..d4b5bcb 100644
--- a/README.md
+++ b/README.md
@@ -5,16 +5,16 @@ Latent blending enables video transitions with incredible smoothness between pro
 ```python
 pipe = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16").to("cuda")
 dh = DiffusersHolder(pipe)
-lb = LatentBlending(dh)
-lb.set_prompt1("photo of underwater landscape, fish, und the sea, incredible detail, high resolution")
-lb.set_prompt2("rendering of an alien planet, strange plants, strange creatures, surreal")
-lb.set_negative_prompt("blurry, ugly, pale")
+be = BlendingEngine(dh)
+be.set_prompt1("photo of underwater landscape, fish, und the sea, incredible detail, high resolution")
+be.set_prompt2("rendering of an alien planet, strange plants, strange creatures, surreal")
+be.set_negative_prompt("blurry, ugly, pale")
 
 # Run latent blending
-lb.run_transition()
+be.run_transition()
 
 # Save movie
-lb.write_movie_transition('movie_example1.mp4', duration_transition=12)
+be.write_movie_transition('movie_example1.mp4', duration_transition=12)
 
 ```
 

From 4501d800442b4729946fa53018f1dd49a00dcf5e Mon Sep 17 00:00:00 2001
From: Johannes Stelzer <jsdmail@gmail.com>
Date: Tue, 9 Jan 2024 21:16:10 +0100
Subject: [PATCH 3/5] Update README.md

---
 README.md | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/README.md b/README.md
index d4b5bcb..1679b54 100644
--- a/README.md
+++ b/README.md
@@ -3,6 +3,10 @@
 Latent blending enables video transitions with incredible smoothness between prompts, computed within seconds. Powered by [stable diffusion XL](https://stability.ai/stable-diffusion), this method involves specific mixing of intermediate latent representations to create a seamless transition – with users having the option to fully customize the transition directly in high-resolution. The new version also supports SDXL Turbo, allowing to generate transitions faster than they are typically played back!
 
 ```python
+from diffusers import AutoPipelineForText2Image
+from latentblending.blending_engine import BlendingEngine
+from latentblending.diffusers_holder import DiffusersHolder
+
 pipe = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16").to("cuda")
 dh = DiffusersHolder(pipe)
 be = BlendingEngine(dh)

From b83d3ee0a0707ab555bf91d749e91a3716aa9822 Mon Sep 17 00:00:00 2001
From: Johannes Stelzer <jsdmail@gmail.com>
Date: Tue, 9 Jan 2024 21:21:23 +0100
Subject: [PATCH 4/5] lpips darwin

---
 latentblending/blending_engine.py | 9 +++++++--
 1 file changed, 7 insertions(+), 2 deletions(-)

diff --git a/latentblending/blending_engine.py b/latentblending/blending_engine.py
index 034b173..8514fe8 100644
--- a/latentblending/blending_engine.py
+++ b/latentblending/blending_engine.py
@@ -8,7 +8,9 @@ from PIL import Image
 from latentblending.movie_util import MovieSaver
 from typing import List, Optional
 import lpips
-from latentblending.utils import interpolate_spherical, interpolate_linear, add_frames_linear_interp, yml_load, yml_save
+import platform
+from latentblending.utils import interpolate_spherical, interpolate_linear,
+ add_frames_linear_interp, yml_load, yml_save
 warnings.filterwarnings('ignore')
 torch.backends.cudnn.benchmark = False
 torch.set_grad_enabled(False)
@@ -64,7 +66,10 @@ class BlendingEngine():
         self.multi_transition_img_first = None
         self.multi_transition_img_last = None
         self.dt_unet_step = 0
-        self.lpips = lpips.LPIPS(net='alex').cuda(self.device)
+        if platform.system() == "Darwin":
+            self.lpips = lpips.LPIPS(net='alex')
+        else:
+            self.lpips = lpips.LPIPS(net='alex').cuda(self.device)
 
         self.set_prompt1("")
         self.set_prompt2("")

From f5965154ba0f7cfef8146a2d1de70ecd968a7a52 Mon Sep 17 00:00:00 2001
From: Johannes Stelzer <jsdmail@gmail.com>
Date: Tue, 9 Jan 2024 21:30:37 +0100
Subject: [PATCH 5/5] trailing comma

---
 latentblending/blending_engine.py | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/latentblending/blending_engine.py b/latentblending/blending_engine.py
index 8514fe8..b06277d 100644
--- a/latentblending/blending_engine.py
+++ b/latentblending/blending_engine.py
@@ -9,8 +9,7 @@ from latentblending.movie_util import MovieSaver
 from typing import List, Optional
 import lpips
 import platform
-from latentblending.utils import interpolate_spherical, interpolate_linear,
- add_frames_linear_interp, yml_load, yml_save
+from latentblending.utils import interpolate_spherical, interpolate_linear, add_frames_linear_interp, yml_load, yml_save
 warnings.filterwarnings('ignore')
 torch.backends.cudnn.benchmark = False
 torch.set_grad_enabled(False)