diff --git a/example1_standard.py b/example1_standard.py
index 99854a3..6856787 100644
--- a/example1_standard.py
+++ b/example1_standard.py
@@ -70,8 +70,6 @@ prompt2 = "photo of an eerie statue surrounded by ferns and vines, analog photog
 lb.set_prompt1(prompt1)
 lb.set_prompt2(prompt2)
 
-
-
 imgs_transition = lb.run_transition(list_nmb_branches, list_injection_strength, fixed_seeds=fixed_seeds)
 
 # let's get more cheap frames via linear interpolation
diff --git a/latent_blending.py b/latent_blending.py
index adca9b7..c34b599 100644
--- a/latent_blending.py
+++ b/latent_blending.py
@@ -267,6 +267,7 @@ class LatentBlending():
             self.tree_fracts = []
             self.tree_status = []
             self.tree_final_imgs = [None]*list_nmb_branches[-1]
+            self.tree_final_imgs_timing = [0]*list_nmb_branches[-1]
             
             nmb_blocks_time = len(list_injection_idx_ext)-1
             for t_block in range(nmb_blocks_time):
@@ -321,6 +322,7 @@ class LatentBlending():
             list_compute.extend(list_local_stem[::-1])        
             
         # Diffusion computations start here
+        time_start = time.time()
         for t_block, idx_branch in tqdm(list_compute, desc="computing transition"):
             # print(f"computing t_block {t_block} idx_branch {idx_branch}")
             idx_stop = list_injection_idx_ext[t_block+1]
@@ -352,6 +354,7 @@ class LatentBlending():
             # Convert latents to image directly for the last t_block
             if t_block == nmb_blocks_time-1:
                 self.tree_final_imgs[idx_branch] = self.latent2image(list_latents[-1])
+                self.tree_final_imgs_timing[idx_branch] = time.time() - time_start
             
         return self.tree_final_imgs
                 
@@ -931,6 +934,24 @@ def get_time(resolution=None):
 #%% le main
 if __name__ == "__main__":
 
+    #%% TMP SURGERY
+    num_inference_steps = 20 # Number of diffusion interations
+    list_nmb_branches = [2, 3, 10, 24] # Branching structure: how many branches
+    list_injection_strength = [0.0, 0.6, 0.8, 0.9] # Branching structure: how deep is the blending
+    width = 512 
+    height = 512
+    guidance_scale = 5
+    fixed_seeds = [993621550, 280335986]
+        
+    lb = LatentBlending(pipe, device, height, width, num_inference_steps, guidance_scale)
+    prompt1 = "photo of a beautiful forest covered in white flowers, ambient light, very detailed, magic"
+    prompt2 = "photo of an eerie statue surrounded by ferns and vines, analog photograph kodak portra, mystical ambience, incredible detail"
+    lb.set_prompt1(prompt1)
+    lb.set_prompt2(prompt2)
+    
+    imgs_transition = lb.run_transition(list_nmb_branches, list_injection_strength, fixed_seeds=fixed_seeds)    
+    
+
     #%% LOOP
     list_prompts = []
     list_prompts.append("paiting of a medieval city")
diff --git a/movie_util.py b/movie_util.py
new file mode 100644
index 0000000..08ccf32
--- /dev/null
+++ b/movie_util.py
@@ -0,0 +1,218 @@
+# Copyright 2022 Lunar Ring. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import subprocess
+import os
+import numpy as np
+from tqdm import tqdm
+import cv2
+from typing import Callable, List, Optional, Union
+import ffmpeg # pip install ffmpeg-python. if error with broken pipe: conda update ffmpeg
+
+#%%
+            
+class MovieSaver():
+    def __init__(
+            self, 
+            fp_out: str,  
+            fps: int = 24, 
+            crf: int = 24,
+            codec: str = 'libx264',
+            preset: str ='fast',
+            pix_fmt: str = 'yuv420p', 
+            silent_ffmpeg: bool = True
+        ):
+        r"""
+        Initializes movie saver class - a human friendly ffmpeg wrapper.
+        After you init the class, you can dump numpy arrays x into moviesaver.write_frame(x). 
+        Don't forget toi finalize movie file with moviesaver.finalize().
+        Args:
+            fp_out: str
+                Output file name. If it already exists, it will be deleted.
+            fps: int
+                Frames per second.
+            crf: int
+                ffmpeg doc: the range of the CRF scale is 0–51, where 0 is lossless
+                (for 8 bit only, for 10 bit use -qp 0), 23 is the default, and 51 is worst quality possible. 
+                A lower value generally leads to higher quality, and a subjectively sane range is 17–28. 
+                Consider 17 or 18 to be visually lossless or nearly so; 
+                it should look the same or nearly the same as the input but it isn't technically lossless. 
+                The range is exponential, so increasing the CRF value +6 results in 
+                roughly half the bitrate / file size, while -6 leads to roughly twice the bitrate.  
+            codec: int
+                Number of diffusion steps. Larger values will take more compute time.
+            preset: str
+                Choose between ultrafast, superfast, veryfast, faster, fast, medium, slow, slower, veryslow.
+                ffmpeg doc: A preset is a collection of options that will provide a certain encoding speed 
+                to compression ratio. A slower preset will provide better compression 
+                (compression is quality per filesize). 
+                This means that, for example, if you target a certain file size or constant bit rate, 
+                you will achieve better quality with a slower preset. Similarly, for constant quality encoding,
+                you will simply save bitrate by choosing a slower preset. 
+            pix_fmt: str
+                Pixel format. Run 'ffmpeg -pix_fmts' in your shell to see all options.
+            silent_ffmpeg: bool
+                Surpress the output from ffmpeg.
+        """
+        
+        self.fp_out = fp_out
+        self.fps = fps
+        self.crf = crf
+        self.pix_fmt = pix_fmt
+        self.codec = codec
+        self.preset = preset
+        self.silent_ffmpeg = silent_ffmpeg
+        
+        if os.path.isfile(fp_out):
+            os.remove(fp_out)
+        
+        self.init_done = False
+        self.nmb_frames = 0
+        self.shape_hw = [-1, -1]
+        
+        print(f"MovieSaver initialized. fps={fps} crf={crf} pix_fmt={pix_fmt} codec={codec} preset={preset}")
+        
+        
+    def initialize(self):
+        args = (
+            ffmpeg
+            .input('pipe:', format='rawvideo', pix_fmt='rgb24', s='{}x{}'.format(self.shape_hw[1], self.shape_hw[0]), framerate=self.fps)
+            .output(self.fp_out, crf=self.crf, pix_fmt=self.pix_fmt, c=self.codec, preset=self.preset)
+            .overwrite_output()
+            .compile()
+        )
+        if self.silent_ffmpeg:
+            self.ffmpg_process = subprocess.Popen(args, stdin=subprocess.PIPE, stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
+        else:
+            self.ffmpg_process = subprocess.Popen(args, stdin=subprocess.PIPE)
+        self.init_done = True
+        print(f"First frame initialization done. Movie shape: {self.shape_hw}")
+    
+        
+    def write_frame(self, out_frame: np.ndarray):
+        r"""
+        Function to dump a numpy array as frame of a movie.
+        Args:
+            out_frame: np.ndarray
+                Numpy array, in np.uint8 format. Convert with np.astype(x, np.uint8).
+                Dim 0: y
+                Dim 1: x
+                Dim 2: RGB
+        """
+        
+        assert out_frame.dtype == np.uint8, "Convert to np.uint8 before"
+        assert len(out_frame.shape) == 3, "out_frame needs to be three dimensional, Y X C"
+        assert out_frame.shape[2] == 3, f"need three color channels, but you provided {out_frame.shape[2]}."
+        
+        if not self.init_done:
+            self.shape_hw = out_frame.shape
+            self.initialize()
+            
+        assert self.shape_hw == out_frame.shape, "You cannot change the image size after init. Initialized with {self.shape_hw}, out_frame {out_frame.shape}"
+
+        # write frame        
+        self.ffmpg_process.stdin.write(
+            out_frame
+            .astype(np.uint8)
+            .tobytes()
+        )
+
+        self.nmb_frames += 1
+    
+    
+    def finalize(self):
+        r"""
+        Call this function to finalize the movie. If you forget to call it your movie will be garbage.
+        """
+        self.ffmpg_process.stdin.close()
+        self.ffmpg_process.wait()
+        duration = int(self.nmb_frames / self.fps)
+        print(f"Movie saved, {duration}s playtime, watch her: {self.fp_out}")
+
+
+
+def concatenate_movies(fp_final: str, list_fp_movies: List[str]):
+    r"""
+    Concatenate multiple movie segments into one long movie, using ffmpeg.
+
+    Parameters
+    ----------
+    fp_final : str
+        Full path of the final movie file. Should end with .mp4
+    list_fp_movies : list[str]
+        List of full paths of movie segments. 
+    """
+    assert fp_final.endswith(".mp4"), "fp_final should end with .mp4"
+    for fp in list_fp_movies:
+        assert os.path.isfile(fp), f"Input movie does not exist: {fp}"
+        assert os.path.getsize(fp) > 100, f"Input movie seems empty: {fp}"
+    
+    if os.path.isfile(fp_final):
+        os.remove(fp_final)
+
+    # make a list for ffmpeg
+    list_concat = []
+    for fp_part in list_fp_movies:
+        list_concat.append(f"""file '{fp_part}'""")
+    
+    # save this list
+    fp_list = fp_final[:-3] + "txt"
+    with open(fp_list, "w") as fa:
+        for item in list_concat:
+            fa.write("%s\n" % item)
+            
+    dp_movie = os.path.split(fp_final)[0]
+    cmd = f'ffmpeg -f concat -safe 0 -i {fp_list} -c copy {fp_final}'
+    subprocess.call(cmd, shell=True, cwd=dp_movie)
+    os.remove(fp_list)
+    print(f"concatenate_movies: success! Watch here: {fp_final}")
+
+            
+class MovieReader():
+    r"""
+    Class to read in a movie.
+    """
+    def __init__(self, fp_movie):
+        self.video_player_object = cv2.VideoCapture(fp_movie)
+        self.nmb_frames = int(self.video_player_object.get(cv2.CAP_PROP_FRAME_COUNT))
+        self.fps_movie = int(self.video_player_object.get(cv2.CAP_PROP_FPS))
+        self.shape = [100,100,3]
+        self.shape_is_set = False
+    
+    def get_next_frame(self):
+        success, image = self.video_player_object.read()
+        if success:
+            if not self.shape_is_set:
+                self.shape_is_set = True
+                self.shape = image.shape
+            return image
+        else:
+            return np.zeros(self.shape)
+
+#%%
+if __name__ == "__main__": 
+    fps=2
+    list_fp_movies = []
+    for k in range(4):
+        fp_movie = f"/tmp/my_random_movie_{k}.mp4"
+        list_fp_movies.append(fp_movie)
+        ms = MovieSaver(fp_movie, fps=fps)
+        for fn in tqdm(range(30)):
+            img = (np.random.rand(512, 1024, 3)*255).astype(np.uint8)
+            ms.write_frame(img)
+        ms.finalize()
+    
+    fp_final = "/tmp/my_concatenated_movie.mp4"
+    concatenate_movies(fp_final, list_fp_movies)
+