diff --git a/example1_standard.py b/example1_standard.py
index 9089e54..8f0928c 100644
--- a/example1_standard.py
+++ b/example1_standard.py
@@ -31,10 +31,10 @@ from latent_blending import LatentBlending, add_frames_linear_interp
 from stable_diffusion_holder import StableDiffusionHolder
 torch.set_grad_enabled(False)
 
+
 #%% First let us spawn a stable diffusion holder
 fp_ckpt = "../stable_diffusion_models/ckpt/v2-1_768-ema-pruned.ckpt"
 sdh = StableDiffusionHolder(fp_ckpt)
-
     
 #%% Next let's set up all parameters
 depth_strength = 0.65 # Specifies how deep (in terms of diffusion iterations the first branching happens)
@@ -44,8 +44,8 @@ fixed_seeds = [69731932, 504430820]
 prompt1 = "photo of a beautiful cherry forest covered in white flowers, ambient light, very detailed, magic"
 prompt2 = "photo of an golden statue with a funny hat, surrounded by ferns and vines, grainy analog photograph, mystical ambience, incredible detail"
 
+fp_movie = 'movie_example1.mp4'
 duration_transition = 12 # In seconds
-fps = 30
 
 # Spawn latent blending
 lb = LatentBlending(sdh)
@@ -53,22 +53,11 @@ lb.set_prompt1(prompt1)
 lb.set_prompt2(prompt2)
 
 # Run latent blending
-imgs_transition = lb.run_transition(
+lb.run_transition(
     depth_strength = depth_strength,
     t_compute_max_allowed = t_compute_max_allowed,
     fixed_seeds = fixed_seeds
     )
 
-# Let's get more cheap frames via linear interpolation (duration_transition*fps frames)
-imgs_transition_ext = add_frames_linear_interp(imgs_transition, duration_transition, fps)
-
-# Save as MP4
-fp_movie = "movie_example1.mp4"
-if os.path.isfile(fp_movie):
-    os.remove(fp_movie)
-ms = MovieSaver(fp_movie, fps=fps, shape_hw=[sdh.height, sdh.width])
-for img in tqdm(imgs_transition_ext):
-    ms.write_frame(img)
-ms.finalize()
-
-
+# Save movie
+lb.write_movie_transition(fp_movie, duration_transition)
\ No newline at end of file
diff --git a/example2_multitrans.py b/example2_multitrans.py
new file mode 100644
index 0000000..625d93e
--- /dev/null
+++ b/example2_multitrans.py
@@ -0,0 +1,81 @@
+# Copyright 2022 Lunar Ring. All rights reserved.
+# Written by Johannes Stelzer, email stelzer@lunar-ring.ai twitter @j_stelzer
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os, sys
+import torch
+torch.backends.cudnn.benchmark = False
+import numpy as np
+import warnings
+warnings.filterwarnings('ignore')
+import warnings
+import torch
+from tqdm.auto import tqdm
+from PIL import Image
+import torch
+from movie_util import MovieSaver, concatenate_movies
+from typing import Callable, List, Optional, Union
+from latent_blending import LatentBlending, add_frames_linear_interp
+from stable_diffusion_holder import StableDiffusionHolder
+torch.set_grad_enabled(False)
+
+#%% First let us spawn a stable diffusion holder
+fp_ckpt = "../stable_diffusion_models/ckpt/v2-1_512-ema-pruned.ckpt"
+# fp_ckpt = "../stable_diffusion_models/ckpt/v2-1_768-ema-pruned.ckpt"
+sdh = StableDiffusionHolder(fp_ckpt)
+
+    
+#%% Let's setup the multi transition
+fps = 30
+duration_single_trans = 6
+depth_strength = 0.55 #Specifies how deep (in terms of diffusion iterations the first branching happens)
+
+# Specify a list of prompts below
+list_prompts = []
+list_prompts.append("surrealistic statue made of glitter and dirt, standing in a lake, atmospheric light, strange glow")
+list_prompts.append("statue of a mix between a tree and human, made of marble, incredibly detailed")
+list_prompts.append("weird statue of a frog monkey, many colors, standing next to the ruins of an ancient city")
+list_prompts.append("statue of a spider that looked like a human")
+list_prompts.append("statue of a bird that looked like a scorpion")
+list_prompts.append("statue of an ancient cybernetic messenger annoucing good news, golden, futuristic")
+
+# You can optionally specify the seeds
+list_seeds = [954375479, 332539350, 956051013, 408831845, 250009012, 675588737]
+t_compute_max_allowed = 12 # per segment
+fp_movie = 'movie_example2.mp4'
+lb = LatentBlending(sdh)
+
+list_movie_parts = [] #
+for i in range(len(list_prompts)-1):
+    prompt1 = list_prompts[i]
+    prompt2 = list_prompts[i+1]
+    lb.set_prompt1(prompt1)
+    lb.set_prompt2(prompt2)
+    fp_movie_part = f"tmp_part_{str(i).zfill(3)}.mp4"
+    
+    fixed_seeds = list_seeds[i:i+2]
+    
+    # Run latent blending
+    lb.run_transition(
+        depth_strength = depth_strength,
+        t_compute_max_allowed = t_compute_max_allowed,
+        fixed_seeds = fixed_seeds
+        )
+    
+    # Save movie
+    lb.write_movie_transition(fp_movie_part, duration_single_trans)
+    list_movie_parts.append(fp_movie_part)
+
+# Finally, concatente the result
+concatenate_movies(fp_movie, list_movie_parts)
\ No newline at end of file
diff --git a/latent_blending.py b/latent_blending.py
index ca32f15..ec070bb 100644
--- a/latent_blending.py
+++ b/latent_blending.py
@@ -727,64 +727,6 @@ class LatentBlending():
         return torch.randn((1, C, H, W), generator=generator, device=self.sdh.device)
     
 
-    def run_multi_transition(
-            self,
-            fp_movie: str, 
-            list_prompts: List[str],
-            list_seeds: List[int] = None,
-            fps: float = 24,
-            duration_single_trans: float = 15,
-        ):
-        r"""
-        Runs multiple transitions and stitches them together. You can supply the seeds for each prompt.
-        Args:
-            fp_movie: file path for movie saving
-            list_prompts: List[float]:
-                list of the prompts. There will be a transition starting from the first to the last.
-            list_seeds: List[int] = None: 
-                Random Seeds for each prompt.
-            fps: float:
-                frames per second
-            duration_single_trans: float:
-                The duration of a single transition prompt[i] -> prompt[i+1].
-                The duration of your movie will be duration_single_trans * len(list_prompts)
-            
-        """
-        
-        if list_seeds is None:
-            list_seeds = list(np.random.randint(0, 10e10, len(list_prompts)))
-        assert len(list_prompts) == len(list_seeds), "Supply the same number of prompts and seeds"
-        
-        ms = MovieSaver(fp_movie, fps=fps)
-        
-        for i in range(len(list_prompts)-1):
-            print(f"Starting movie segment {i+1}/{len(list_prompts)-1}")
-            
-            if i==0:
-                self.set_prompt1(list_prompts[i])
-                self.set_prompt2(list_prompts[i+1])
-                recycle_img1 = False
-            else:
-                self.swap_forward()
-                self.set_prompt2(list_prompts[i+1])
-                recycle_img1 = True    
-            
-            local_seeds = [list_seeds[i], list_seeds[i+1]]
-            list_imgs = self.run_transition(recycle_img1=recycle_img1, fixed_seeds=local_seeds)
-            list_imgs_interp = add_frames_linear_interp(list_imgs, fps, duration_single_trans)
-            
-            if i==0:
-                self.multi_transition_img_first = list_imgs[0]
-            
-            # Save movie frame
-            for img in list_imgs_interp:
-                ms.write_frame(img)
-                
-        ms.finalize()
-        self.multi_transition_img_last = list_imgs[-1]
-        
-        print("run_multi_transition: All completed.")
-
 
     @torch.no_grad()
     def run_diffusion(
@@ -1018,6 +960,10 @@ class LatentBlending():
     def write_imgs_transition(self, dp_img):
         r"""
         Writes the transition images into the folder dp_img.
+        Requires run_transition to be completed.
+        Args:
+            dp_img: str
+                Directory, into which the transition images, yaml file and latents are written.
         """
         imgs_transition = self.tree_final_imgs
         os.makedirs(dp_img, exist_ok=True)
@@ -1028,6 +974,32 @@ class LatentBlending():
         fp_yml = os.path.join(dp_img, "lowres.yaml") 
         self.save_statedict(fp_yml)
         
+    def write_movie_transition(self, fp_movie, duration_transition, fps=30):
+        r"""
+        Writes the transition movie to fp_movie, using the given duration and fps..
+        The missing frames are linearly interpolated.
+        Args:
+            fp_movie: str
+                file pointer to the final movie.
+            duration_transition: float
+                duration of the movie in seonds
+            fps: int
+                fps of the movie
+                
+        """
+        
+        # Let's get more cheap frames via linear interpolation (duration_transition*fps frames)
+        imgs_transition_ext = add_frames_linear_interp(self.tree_final_imgs, duration_transition, fps)
+
+        # Save as MP4
+        if os.path.isfile(fp_movie):
+            os.remove(fp_movie)
+        ms = MovieSaver(fp_movie, fps=fps, shape_hw=[self.sdh.height, self.sdh.width])
+        for img in tqdm(imgs_transition_ext):
+            ms.write_frame(img)
+        ms.finalize()
+
+        
         
     def save_statedict(self, fp_yml):
         # Dump everything relevant into yaml