2022-11-21 09:49:33 +00:00
# Copyright 2022 Lunar Ring. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os , sys
import torch
torch . backends . cudnn . benchmark = False
import numpy as np
import warnings
warnings . filterwarnings ( ' ignore ' )
import warnings
import torch
from tqdm . auto import tqdm
from PIL import Image
2023-01-04 16:37:14 +00:00
# import matplotlib.pyplot as plt
2022-11-21 09:49:33 +00:00
import torch
2022-11-21 23:07:55 +00:00
from movie_util import MovieSaver
2022-11-21 09:49:33 +00:00
from typing import Callable , List , Optional , Union
from latent_blending import LatentBlending , add_frames_linear_interp
2022-11-25 14:34:41 +00:00
from stable_diffusion_holder import StableDiffusionHolder
2022-11-21 09:49:33 +00:00
torch . set_grad_enabled ( False )
2022-11-25 14:34:41 +00:00
#%% First let us spawn a stable diffusion holder
2022-12-02 12:17:13 +00:00
device = " cuda:0 "
2022-12-09 11:06:44 +00:00
fp_ckpt = " ../stable_diffusion_models/ckpt/v2-1_768-ema-pruned.ckpt "
fp_config = ' configs/v2-inference-v.yaml '
2022-11-25 14:34:41 +00:00
2022-11-28 11:41:15 +00:00
sdh = StableDiffusionHolder ( fp_ckpt , fp_config , device )
2022-11-21 09:49:33 +00:00
#%% Next let's set up all parameters
2022-11-28 14:34:18 +00:00
quality = ' medium '
2023-01-08 09:33:11 +00:00
depth_strength = 0.35 # Specifies how deep (in terms of diffusion iterations the first branching happens)
2022-11-28 11:41:15 +00:00
fixed_seeds = [ 69731932 , 504430820 ]
2022-11-21 09:49:33 +00:00
2023-01-08 09:33:11 +00:00
# prompt1 = "A person in an open filed of grass watching a television, red colors dominate the scene, eerie light, dark clouds on the horizon, artistically rendered by Richter"
prompt1 = " A person in a bar, people around him, a glass of baer, artistically rendered in the style of Hopper "
prompt2 = " A person with a sad expression, looking at a painting of an older man, all in the style of Lucien Freud "
2022-11-28 17:14:20 +00:00
2022-12-03 10:55:54 +00:00
duration_transition = 12 # In seconds
fps = 30
# Spawn latent blending
2022-11-28 17:14:20 +00:00
lb = LatentBlending ( sdh )
2023-01-08 09:33:11 +00:00
lb . load_branching_profile ( quality = quality , depth_strength = depth_strength )
2022-11-21 09:49:33 +00:00
lb . set_prompt1 ( prompt1 )
lb . set_prompt2 ( prompt2 )
2022-11-28 14:34:18 +00:00
2022-12-03 10:55:54 +00:00
# Run latent blending
2022-11-28 11:41:15 +00:00
imgs_transition = lb . run_transition ( fixed_seeds = fixed_seeds )
2022-11-21 15:24:06 +00:00
2022-12-03 10:55:54 +00:00
# Let's get more cheap frames via linear interpolation (duration_transition*fps frames)
2022-11-21 09:49:33 +00:00
imgs_transition_ext = add_frames_linear_interp ( imgs_transition , duration_transition , fps )
2022-12-03 10:55:54 +00:00
# Save as MP4
2022-11-28 17:14:20 +00:00
fp_movie = " movie_example1.mp4 "
2022-11-21 09:49:33 +00:00
if os . path . isfile ( fp_movie ) :
os . remove ( fp_movie )
2022-11-28 11:41:15 +00:00
ms = MovieSaver ( fp_movie , fps = fps , shape_hw = [ sdh . height , sdh . width ] )
2022-11-21 09:49:33 +00:00
for img in tqdm ( imgs_transition_ext ) :
ms . write_frame ( img )
ms . finalize ( )