mirror of
https://github.com/easydiffusion/easydiffusion.git
synced 2024-11-28 11:13:13 +01:00
Installer files for v2.5
This commit is contained in:
parent
c10411c506
commit
d07279c266
2
.gitignore
vendored
2
.gitignore
vendored
@ -1,5 +1,3 @@
|
|||||||
__pycache__
|
__pycache__
|
||||||
installer
|
|
||||||
installer.tar
|
|
||||||
dist
|
dist
|
||||||
.idea/*
|
.idea/*
|
||||||
|
BIN
installer/bin/micromamba_win64.exe
Normal file
BIN
installer/bin/micromamba_win64.exe
Normal file
Binary file not shown.
30
installer/bootstrap/bootstrap.bat
Normal file
30
installer/bootstrap/bootstrap.bat
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
@echo off
|
||||||
|
|
||||||
|
set MAMBA_ROOT_PREFIX=%SD_BASE_DIR%\env\mamba
|
||||||
|
set INSTALL_ENV_DIR=%SD_BASE_DIR%\env\installer_env
|
||||||
|
set INSTALLER_YAML_FILE=%SD_BASE_DIR%\installer\yaml\installer-environment.yaml
|
||||||
|
set MICROMAMBA_BINARY_FILE=%SD_BASE_DIR%\installer\bin\micromamba_win64.exe
|
||||||
|
|
||||||
|
@rem initialize the mamba dir
|
||||||
|
if not exist "%MAMBA_ROOT_PREFIX%" mkdir "%MAMBA_ROOT_PREFIX%"
|
||||||
|
|
||||||
|
copy "%MICROMAMBA_BINARY_FILE%" "%MAMBA_ROOT_PREFIX%\micromamba.exe"
|
||||||
|
|
||||||
|
@rem test the mamba binary
|
||||||
|
echo Micromamba version:
|
||||||
|
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" --version
|
||||||
|
|
||||||
|
@rem run the shell hook
|
||||||
|
if not exist "%MAMBA_ROOT_PREFIX%\Scripts" (
|
||||||
|
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" shell hook --log-level 4 -s cmd.exe
|
||||||
|
)
|
||||||
|
|
||||||
|
call "%MAMBA_ROOT_PREFIX%\condabin\mamba_hook.bat"
|
||||||
|
|
||||||
|
@rem create the installer env
|
||||||
|
if not exist "%INSTALL_ENV_DIR%" (
|
||||||
|
call micromamba create -y --prefix "%INSTALL_ENV_DIR%" -f "%INSTALLER_YAML_FILE%"
|
||||||
|
)
|
||||||
|
|
||||||
|
@rem activate
|
||||||
|
call micromamba activate "%INSTALL_ENV_DIR%"
|
19
installer/bootstrap/check-install-dir.bat
Normal file
19
installer/bootstrap/check-install-dir.bat
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
@echo off
|
||||||
|
|
||||||
|
set suggested_dir=%~d0\stable-diffusion-ui
|
||||||
|
|
||||||
|
echo "Please install Stable Diffusion UI at the root of your drive. This avoids problems with path length limits in Windows." & echo.
|
||||||
|
set /p answer="Press Enter to install at %suggested_dir%, or type 'c' (without quotes) to install at the current location (press enter or type 'c'): "
|
||||||
|
|
||||||
|
if /i "%answer:~,1%" NEQ "c" (
|
||||||
|
if exist "%suggested_dir%" (
|
||||||
|
echo. & echo "Sorry, %suggested_dir% already exists! Cannot overwrite that folder!" & echo.
|
||||||
|
pause
|
||||||
|
exit
|
||||||
|
)
|
||||||
|
|
||||||
|
xcopy "%SD_BASE_DIR%" "%suggested_dir%" /s /i /Y /Q
|
||||||
|
echo Please run the %RUN_CMD_FILENAME% file inside %suggested_dir% . Do not use this folder anymore > "%SD_BASE_DIR%/READ_ME - DO_NOT_USE_THIS_FOLDER.txt"
|
||||||
|
|
||||||
|
cd %suggested_dir%
|
||||||
|
)
|
0
installer/installer/__init__.py
Normal file
0
installer/installer/__init__.py
Normal file
46
installer/installer/app.py
Normal file
46
installer/installer/app.py
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
import os
|
||||||
|
import json
|
||||||
|
|
||||||
|
# config
|
||||||
|
PROJECT_REPO_URL = 'https://github.com/cmdr2/stable-diffusion-ui.git'
|
||||||
|
DEFAULT_UPDATE_BRANCH = 'main'
|
||||||
|
|
||||||
|
PROJECT_REPO_DIR_NAME = 'project_repo'
|
||||||
|
STABLE_DIFFUSION_REPO_DIR_NAME = 'stable-diffusion'
|
||||||
|
|
||||||
|
LOG_FILE_NAME = 'run.log'
|
||||||
|
CONFIG_FILE_NAME = 'config.json'
|
||||||
|
|
||||||
|
|
||||||
|
# top-level folders
|
||||||
|
ENV_DIR_NAME = 'env'
|
||||||
|
|
||||||
|
INSTALLER_DIR_NAME = 'installer'
|
||||||
|
UI_DIR_NAME = 'ui'
|
||||||
|
ENGINE_DIR_NAME = 'engine'
|
||||||
|
|
||||||
|
|
||||||
|
# env
|
||||||
|
SD_BASE_DIR = os.environ['SD_BASE_DIR']
|
||||||
|
|
||||||
|
def get_config():
|
||||||
|
config_path = os.path.join(SD_BASE_DIR, CONFIG_FILE_NAME)
|
||||||
|
if not os.path.exists(config_path):
|
||||||
|
return {}
|
||||||
|
|
||||||
|
with open(config_path, "r") as f:
|
||||||
|
return json.load(f)
|
||||||
|
|
||||||
|
|
||||||
|
# references
|
||||||
|
env_dir_path = os.path.join(SD_BASE_DIR, ENV_DIR_NAME)
|
||||||
|
|
||||||
|
installer_dir_path = os.path.join(SD_BASE_DIR, INSTALLER_DIR_NAME)
|
||||||
|
ui_dir_path = os.path.join(SD_BASE_DIR, UI_DIR_NAME)
|
||||||
|
engine_dir_path = os.path.join(SD_BASE_DIR, ENGINE_DIR_NAME)
|
||||||
|
|
||||||
|
project_repo_dir_path = os.path.join(env_dir_path, PROJECT_REPO_DIR_NAME)
|
||||||
|
stable_diffusion_repo_dir_path = os.path.join(env_dir_path, STABLE_DIFFUSION_REPO_DIR_NAME)
|
||||||
|
|
||||||
|
config = get_config()
|
||||||
|
log_file = open(LOG_FILE_NAME, 'wb')
|
38
installer/installer/helpers.py
Normal file
38
installer/installer/helpers.py
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from installer import app
|
||||||
|
|
||||||
|
def run(cmd, run_in_folder=None):
|
||||||
|
if run_in_folder is not None:
|
||||||
|
cmd = f'cd "{run_in_folder}" && {cmd}'
|
||||||
|
|
||||||
|
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
|
||||||
|
|
||||||
|
for c in iter(lambda: p.stdout.read(1), b""):
|
||||||
|
sys.stdout.buffer.write(c)
|
||||||
|
sys.stdout.flush()
|
||||||
|
|
||||||
|
if app.log_file is not None:
|
||||||
|
app.log_file.write(c)
|
||||||
|
app.log_file.flush()
|
||||||
|
|
||||||
|
p.wait()
|
||||||
|
|
||||||
|
return p.returncode == 0
|
||||||
|
|
||||||
|
def log(msg):
|
||||||
|
print(msg)
|
||||||
|
|
||||||
|
app.log_file.write(bytes(msg + "\n", 'utf-8'))
|
||||||
|
app.log_file.flush()
|
||||||
|
|
||||||
|
def show_install_error(error_msg):
|
||||||
|
log(f'''
|
||||||
|
|
||||||
|
Error: {error_msg}. Sorry about that, please try to:
|
||||||
|
1. Run this installer again.
|
||||||
|
2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/blob/main/Troubleshooting.md
|
||||||
|
3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB
|
||||||
|
4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues
|
||||||
|
Thanks!''')
|
20
installer/installer/main.py
Normal file
20
installer/installer/main.py
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
|
||||||
|
|
||||||
|
from installer.tasks import (
|
||||||
|
fetch_project_repo,
|
||||||
|
apply_project_update,
|
||||||
|
)
|
||||||
|
|
||||||
|
tasks = [
|
||||||
|
fetch_project_repo,
|
||||||
|
apply_project_update,
|
||||||
|
]
|
||||||
|
|
||||||
|
def run_tasks():
|
||||||
|
for task in tasks:
|
||||||
|
task.run()
|
||||||
|
|
||||||
|
run_tasks()
|
0
installer/installer/tasks/__init__.py
Normal file
0
installer/installer/tasks/__init__.py
Normal file
17
installer/installer/tasks/apply_project_update.py
Normal file
17
installer/installer/tasks/apply_project_update.py
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
from os import path
|
||||||
|
import shutil
|
||||||
|
|
||||||
|
from installer import app, helpers
|
||||||
|
|
||||||
|
def run():
|
||||||
|
is_developer_mode = app.config.get('is_developer_mode', False)
|
||||||
|
if not is_developer_mode:
|
||||||
|
# @xcopy sd-ui-files\ui ui /s /i /Y
|
||||||
|
# @copy sd-ui-files\scripts\on_sd_start.bat scripts\ /Y
|
||||||
|
# @copy "sd-ui-files\scripts\Start Stable Diffusion UI.cmd" . /Y
|
||||||
|
|
||||||
|
installer_src_path = path.join(app.project_repo_dir_path, 'installer')
|
||||||
|
ui_src_path = path.join(app.project_repo_dir_path, 'ui')
|
||||||
|
engine_src_path = path.join(app.project_repo_dir_path, 'engine')
|
||||||
|
|
||||||
|
shutil.copytree(ui_src_path, app.ui_dir_path, dirs_exist_ok=True)
|
24
installer/installer/tasks/fetch_project_repo.py
Normal file
24
installer/installer/tasks/fetch_project_repo.py
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
from os import path
|
||||||
|
|
||||||
|
from installer import app, helpers
|
||||||
|
|
||||||
|
project_repo_git_path = path.join(app.project_repo_dir_path, '.git')
|
||||||
|
|
||||||
|
def run():
|
||||||
|
branch_name = app.config.get('update_branch', app.DEFAULT_UPDATE_BRANCH)
|
||||||
|
|
||||||
|
if path.exists(project_repo_git_path):
|
||||||
|
helpers.log(f"Stable Diffusion UI's git repository was already installed. Updating from {branch_name}..")
|
||||||
|
|
||||||
|
helpers.run("git reset --hard", run_in_folder=app.project_repo_dir_path)
|
||||||
|
helpers.run(f'git checkout "{branch_name}"', run_in_folder=app.project_repo_dir_path)
|
||||||
|
helpers.run("git pull", run_in_folder=app.project_repo_dir_path)
|
||||||
|
else:
|
||||||
|
helpers.log("\nDownloading Stable Diffusion UI..\n")
|
||||||
|
helpers.log(f"Using the {branch_name} channel\n")
|
||||||
|
|
||||||
|
if helpers.run(f'git clone -b "{branch_name}" {app.PROJECT_REPO_URL} "{app.project_repo_dir_path}"'):
|
||||||
|
helpers.log("Downloaded Stable Diffusion UI")
|
||||||
|
else:
|
||||||
|
helpers.show_install_error(error_msg="Could not download Stable Diffusion UI")
|
||||||
|
exit(1)
|
@ -0,0 +1,171 @@
|
|||||||
|
{
|
||||||
|
"_name_or_path": "clip-vit-large-patch14/",
|
||||||
|
"architectures": [
|
||||||
|
"CLIPModel"
|
||||||
|
],
|
||||||
|
"initializer_factor": 1.0,
|
||||||
|
"logit_scale_init_value": 2.6592,
|
||||||
|
"model_type": "clip",
|
||||||
|
"projection_dim": 768,
|
||||||
|
"text_config": {
|
||||||
|
"_name_or_path": "",
|
||||||
|
"add_cross_attention": false,
|
||||||
|
"architectures": null,
|
||||||
|
"attention_dropout": 0.0,
|
||||||
|
"bad_words_ids": null,
|
||||||
|
"bos_token_id": 0,
|
||||||
|
"chunk_size_feed_forward": 0,
|
||||||
|
"cross_attention_hidden_size": null,
|
||||||
|
"decoder_start_token_id": null,
|
||||||
|
"diversity_penalty": 0.0,
|
||||||
|
"do_sample": false,
|
||||||
|
"dropout": 0.0,
|
||||||
|
"early_stopping": false,
|
||||||
|
"encoder_no_repeat_ngram_size": 0,
|
||||||
|
"eos_token_id": 2,
|
||||||
|
"finetuning_task": null,
|
||||||
|
"forced_bos_token_id": null,
|
||||||
|
"forced_eos_token_id": null,
|
||||||
|
"hidden_act": "quick_gelu",
|
||||||
|
"hidden_size": 768,
|
||||||
|
"id2label": {
|
||||||
|
"0": "LABEL_0",
|
||||||
|
"1": "LABEL_1"
|
||||||
|
},
|
||||||
|
"initializer_factor": 1.0,
|
||||||
|
"initializer_range": 0.02,
|
||||||
|
"intermediate_size": 3072,
|
||||||
|
"is_decoder": false,
|
||||||
|
"is_encoder_decoder": false,
|
||||||
|
"label2id": {
|
||||||
|
"LABEL_0": 0,
|
||||||
|
"LABEL_1": 1
|
||||||
|
},
|
||||||
|
"layer_norm_eps": 1e-05,
|
||||||
|
"length_penalty": 1.0,
|
||||||
|
"max_length": 20,
|
||||||
|
"max_position_embeddings": 77,
|
||||||
|
"min_length": 0,
|
||||||
|
"model_type": "clip_text_model",
|
||||||
|
"no_repeat_ngram_size": 0,
|
||||||
|
"num_attention_heads": 12,
|
||||||
|
"num_beam_groups": 1,
|
||||||
|
"num_beams": 1,
|
||||||
|
"num_hidden_layers": 12,
|
||||||
|
"num_return_sequences": 1,
|
||||||
|
"output_attentions": false,
|
||||||
|
"output_hidden_states": false,
|
||||||
|
"output_scores": false,
|
||||||
|
"pad_token_id": 1,
|
||||||
|
"prefix": null,
|
||||||
|
"problem_type": null,
|
||||||
|
"projection_dim" : 768,
|
||||||
|
"pruned_heads": {},
|
||||||
|
"remove_invalid_values": false,
|
||||||
|
"repetition_penalty": 1.0,
|
||||||
|
"return_dict": true,
|
||||||
|
"return_dict_in_generate": false,
|
||||||
|
"sep_token_id": null,
|
||||||
|
"task_specific_params": null,
|
||||||
|
"temperature": 1.0,
|
||||||
|
"tie_encoder_decoder": false,
|
||||||
|
"tie_word_embeddings": true,
|
||||||
|
"tokenizer_class": null,
|
||||||
|
"top_k": 50,
|
||||||
|
"top_p": 1.0,
|
||||||
|
"torch_dtype": null,
|
||||||
|
"torchscript": false,
|
||||||
|
"transformers_version": "4.16.0.dev0",
|
||||||
|
"use_bfloat16": false,
|
||||||
|
"vocab_size": 49408
|
||||||
|
},
|
||||||
|
"text_config_dict": {
|
||||||
|
"hidden_size": 768,
|
||||||
|
"intermediate_size": 3072,
|
||||||
|
"num_attention_heads": 12,
|
||||||
|
"num_hidden_layers": 12,
|
||||||
|
"projection_dim": 768
|
||||||
|
},
|
||||||
|
"torch_dtype": "float32",
|
||||||
|
"transformers_version": null,
|
||||||
|
"vision_config": {
|
||||||
|
"_name_or_path": "",
|
||||||
|
"add_cross_attention": false,
|
||||||
|
"architectures": null,
|
||||||
|
"attention_dropout": 0.0,
|
||||||
|
"bad_words_ids": null,
|
||||||
|
"bos_token_id": null,
|
||||||
|
"chunk_size_feed_forward": 0,
|
||||||
|
"cross_attention_hidden_size": null,
|
||||||
|
"decoder_start_token_id": null,
|
||||||
|
"diversity_penalty": 0.0,
|
||||||
|
"do_sample": false,
|
||||||
|
"dropout": 0.0,
|
||||||
|
"early_stopping": false,
|
||||||
|
"encoder_no_repeat_ngram_size": 0,
|
||||||
|
"eos_token_id": null,
|
||||||
|
"finetuning_task": null,
|
||||||
|
"forced_bos_token_id": null,
|
||||||
|
"forced_eos_token_id": null,
|
||||||
|
"hidden_act": "quick_gelu",
|
||||||
|
"hidden_size": 1024,
|
||||||
|
"id2label": {
|
||||||
|
"0": "LABEL_0",
|
||||||
|
"1": "LABEL_1"
|
||||||
|
},
|
||||||
|
"image_size": 224,
|
||||||
|
"initializer_factor": 1.0,
|
||||||
|
"initializer_range": 0.02,
|
||||||
|
"intermediate_size": 4096,
|
||||||
|
"is_decoder": false,
|
||||||
|
"is_encoder_decoder": false,
|
||||||
|
"label2id": {
|
||||||
|
"LABEL_0": 0,
|
||||||
|
"LABEL_1": 1
|
||||||
|
},
|
||||||
|
"layer_norm_eps": 1e-05,
|
||||||
|
"length_penalty": 1.0,
|
||||||
|
"max_length": 20,
|
||||||
|
"min_length": 0,
|
||||||
|
"model_type": "clip_vision_model",
|
||||||
|
"no_repeat_ngram_size": 0,
|
||||||
|
"num_attention_heads": 16,
|
||||||
|
"num_beam_groups": 1,
|
||||||
|
"num_beams": 1,
|
||||||
|
"num_hidden_layers": 24,
|
||||||
|
"num_return_sequences": 1,
|
||||||
|
"output_attentions": false,
|
||||||
|
"output_hidden_states": false,
|
||||||
|
"output_scores": false,
|
||||||
|
"pad_token_id": null,
|
||||||
|
"patch_size": 14,
|
||||||
|
"prefix": null,
|
||||||
|
"problem_type": null,
|
||||||
|
"projection_dim" : 768,
|
||||||
|
"pruned_heads": {},
|
||||||
|
"remove_invalid_values": false,
|
||||||
|
"repetition_penalty": 1.0,
|
||||||
|
"return_dict": true,
|
||||||
|
"return_dict_in_generate": false,
|
||||||
|
"sep_token_id": null,
|
||||||
|
"task_specific_params": null,
|
||||||
|
"temperature": 1.0,
|
||||||
|
"tie_encoder_decoder": false,
|
||||||
|
"tie_word_embeddings": true,
|
||||||
|
"tokenizer_class": null,
|
||||||
|
"top_k": 50,
|
||||||
|
"top_p": 1.0,
|
||||||
|
"torch_dtype": null,
|
||||||
|
"torchscript": false,
|
||||||
|
"transformers_version": "4.16.0.dev0",
|
||||||
|
"use_bfloat16": false
|
||||||
|
},
|
||||||
|
"vision_config_dict": {
|
||||||
|
"hidden_size": 1024,
|
||||||
|
"intermediate_size": 4096,
|
||||||
|
"num_attention_heads": 16,
|
||||||
|
"num_hidden_layers": 24,
|
||||||
|
"patch_size": 14,
|
||||||
|
"projection_dim": 768
|
||||||
|
}
|
||||||
|
}
|
332
installer/patches/sd_custom.patch
Normal file
332
installer/patches/sd_custom.patch
Normal file
@ -0,0 +1,332 @@
|
|||||||
|
diff --git a/optimizedSD/ddpm.py b/optimizedSD/ddpm.py
|
||||||
|
index b967b55..35ef520 100644
|
||||||
|
--- a/optimizedSD/ddpm.py
|
||||||
|
+++ b/optimizedSD/ddpm.py
|
||||||
|
@@ -22,7 +22,7 @@ from ldm.util import exists, default, instantiate_from_config
|
||||||
|
from ldm.modules.diffusionmodules.util import make_beta_schedule
|
||||||
|
from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like
|
||||||
|
from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
|
||||||
|
-from samplers import CompVisDenoiser, get_ancestral_step, to_d, append_dims,linear_multistep_coeff
|
||||||
|
+from .samplers import CompVisDenoiser, get_ancestral_step, to_d, append_dims,linear_multistep_coeff
|
||||||
|
|
||||||
|
def disabled_train(self):
|
||||||
|
"""Overwrite model.train with this function to make sure train/eval mode
|
||||||
|
@@ -506,6 +506,8 @@ class UNet(DDPM):
|
||||||
|
|
||||||
|
x_latent = noise if x0 is None else x0
|
||||||
|
# sampling
|
||||||
|
+ if sampler in ('ddim', 'dpm2', 'heun', 'dpm2_a', 'lms') and not hasattr(self, 'ddim_timesteps'):
|
||||||
|
+ self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=False)
|
||||||
|
|
||||||
|
if sampler == "plms":
|
||||||
|
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=False)
|
||||||
|
@@ -528,39 +530,46 @@ class UNet(DDPM):
|
||||||
|
elif sampler == "ddim":
|
||||||
|
samples = self.ddim_sampling(x_latent, conditioning, S, unconditional_guidance_scale=unconditional_guidance_scale,
|
||||||
|
unconditional_conditioning=unconditional_conditioning,
|
||||||
|
- mask = mask,init_latent=x_T,use_original_steps=False)
|
||||||
|
+ mask = mask,init_latent=x_T,use_original_steps=False,
|
||||||
|
+ callback=callback, img_callback=img_callback)
|
||||||
|
|
||||||
|
elif sampler == "euler":
|
||||||
|
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=False)
|
||||||
|
samples = self.euler_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
||||||
|
- unconditional_guidance_scale=unconditional_guidance_scale)
|
||||||
|
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
||||||
|
+ img_callback=img_callback)
|
||||||
|
elif sampler == "euler_a":
|
||||||
|
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=False)
|
||||||
|
samples = self.euler_ancestral_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
||||||
|
- unconditional_guidance_scale=unconditional_guidance_scale)
|
||||||
|
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
||||||
|
+ img_callback=img_callback)
|
||||||
|
|
||||||
|
elif sampler == "dpm2":
|
||||||
|
samples = self.dpm_2_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
||||||
|
- unconditional_guidance_scale=unconditional_guidance_scale)
|
||||||
|
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
||||||
|
+ img_callback=img_callback)
|
||||||
|
elif sampler == "heun":
|
||||||
|
samples = self.heun_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
||||||
|
- unconditional_guidance_scale=unconditional_guidance_scale)
|
||||||
|
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
||||||
|
+ img_callback=img_callback)
|
||||||
|
|
||||||
|
elif sampler == "dpm2_a":
|
||||||
|
samples = self.dpm_2_ancestral_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
||||||
|
- unconditional_guidance_scale=unconditional_guidance_scale)
|
||||||
|
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
||||||
|
+ img_callback=img_callback)
|
||||||
|
|
||||||
|
|
||||||
|
elif sampler == "lms":
|
||||||
|
samples = self.lms_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
||||||
|
- unconditional_guidance_scale=unconditional_guidance_scale)
|
||||||
|
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
||||||
|
+ img_callback=img_callback)
|
||||||
|
+
|
||||||
|
+ yield from samples
|
||||||
|
|
||||||
|
if(self.turbo):
|
||||||
|
self.model1.to("cpu")
|
||||||
|
self.model2.to("cpu")
|
||||||
|
|
||||||
|
- return samples
|
||||||
|
-
|
||||||
|
@torch.no_grad()
|
||||||
|
def plms_sampling(self, cond,b, img,
|
||||||
|
ddim_use_original_steps=False,
|
||||||
|
@@ -599,10 +608,10 @@ class UNet(DDPM):
|
||||||
|
old_eps.append(e_t)
|
||||||
|
if len(old_eps) >= 4:
|
||||||
|
old_eps.pop(0)
|
||||||
|
- if callback: callback(i)
|
||||||
|
- if img_callback: img_callback(pred_x0, i)
|
||||||
|
+ if callback: yield from callback(i)
|
||||||
|
+ if img_callback: yield from img_callback(pred_x0, i)
|
||||||
|
|
||||||
|
- return img
|
||||||
|
+ yield from img_callback(img, len(iterator)-1)
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
|
||||||
|
@@ -706,7 +715,8 @@ class UNet(DDPM):
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
def ddim_sampling(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,
|
||||||
|
- mask = None,init_latent=None,use_original_steps=False):
|
||||||
|
+ mask = None,init_latent=None,use_original_steps=False,
|
||||||
|
+ callback=None, img_callback=None):
|
||||||
|
|
||||||
|
timesteps = self.ddim_timesteps
|
||||||
|
timesteps = timesteps[:t_start]
|
||||||
|
@@ -730,10 +740,13 @@ class UNet(DDPM):
|
||||||
|
unconditional_guidance_scale=unconditional_guidance_scale,
|
||||||
|
unconditional_conditioning=unconditional_conditioning)
|
||||||
|
|
||||||
|
+ if callback: yield from callback(i)
|
||||||
|
+ if img_callback: yield from img_callback(x_dec, i)
|
||||||
|
+
|
||||||
|
if mask is not None:
|
||||||
|
- return x0 * mask + (1. - mask) * x_dec
|
||||||
|
+ x_dec = x0 * mask + (1. - mask) * x_dec
|
||||||
|
|
||||||
|
- return x_dec
|
||||||
|
+ yield from img_callback(x_dec, len(iterator)-1)
|
||||||
|
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
@@ -779,13 +792,16 @@ class UNet(DDPM):
|
||||||
|
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
- def euler_sampling(self, ac, x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None,callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.):
|
||||||
|
+ def euler_sampling(self, ac, x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None,callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.,
|
||||||
|
+ img_callback=None):
|
||||||
|
"""Implements Algorithm 2 (Euler steps) from Karras et al. (2022)."""
|
||||||
|
extra_args = {} if extra_args is None else extra_args
|
||||||
|
cvd = CompVisDenoiser(ac)
|
||||||
|
sigmas = cvd.get_sigmas(S)
|
||||||
|
x = x*sigmas[0]
|
||||||
|
|
||||||
|
+ print(f"Running Euler Sampling with {len(sigmas) - 1} timesteps")
|
||||||
|
+
|
||||||
|
s_in = x.new_ones([x.shape[0]]).half()
|
||||||
|
for i in trange(len(sigmas) - 1, disable=disable):
|
||||||
|
gamma = min(s_churn / (len(sigmas) - 1), 2 ** 0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.
|
||||||
|
@@ -807,13 +823,18 @@ class UNet(DDPM):
|
||||||
|
d = to_d(x, sigma_hat, denoised)
|
||||||
|
if callback is not None:
|
||||||
|
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})
|
||||||
|
+
|
||||||
|
+ if img_callback: yield from img_callback(x, i)
|
||||||
|
+
|
||||||
|
dt = sigmas[i + 1] - sigma_hat
|
||||||
|
# Euler method
|
||||||
|
x = x + d * dt
|
||||||
|
- return x
|
||||||
|
+
|
||||||
|
+ yield from img_callback(x, len(sigmas)-1)
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
- def euler_ancestral_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None, callback=None, disable=None):
|
||||||
|
+ def euler_ancestral_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None, callback=None, disable=None,
|
||||||
|
+ img_callback=None):
|
||||||
|
"""Ancestral sampling with Euler method steps."""
|
||||||
|
extra_args = {} if extra_args is None else extra_args
|
||||||
|
|
||||||
|
@@ -822,6 +843,8 @@ class UNet(DDPM):
|
||||||
|
sigmas = cvd.get_sigmas(S)
|
||||||
|
x = x*sigmas[0]
|
||||||
|
|
||||||
|
+ print(f"Running Euler Ancestral Sampling with {len(sigmas) - 1} timesteps")
|
||||||
|
+
|
||||||
|
s_in = x.new_ones([x.shape[0]]).half()
|
||||||
|
for i in trange(len(sigmas) - 1, disable=disable):
|
||||||
|
|
||||||
|
@@ -837,17 +860,22 @@ class UNet(DDPM):
|
||||||
|
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1])
|
||||||
|
if callback is not None:
|
||||||
|
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
|
||||||
|
+
|
||||||
|
+ if img_callback: yield from img_callback(x, i)
|
||||||
|
+
|
||||||
|
d = to_d(x, sigmas[i], denoised)
|
||||||
|
# Euler method
|
||||||
|
dt = sigma_down - sigmas[i]
|
||||||
|
x = x + d * dt
|
||||||
|
x = x + torch.randn_like(x) * sigma_up
|
||||||
|
- return x
|
||||||
|
+
|
||||||
|
+ yield from img_callback(x, len(sigmas)-1)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
- def heun_sampling(self, ac, x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.):
|
||||||
|
+ def heun_sampling(self, ac, x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.,
|
||||||
|
+ img_callback=None):
|
||||||
|
"""Implements Algorithm 2 (Heun steps) from Karras et al. (2022)."""
|
||||||
|
extra_args = {} if extra_args is None else extra_args
|
||||||
|
|
||||||
|
@@ -855,6 +883,8 @@ class UNet(DDPM):
|
||||||
|
sigmas = cvd.get_sigmas(S)
|
||||||
|
x = x*sigmas[0]
|
||||||
|
|
||||||
|
+ print(f"Running Heun Sampling with {len(sigmas) - 1} timesteps")
|
||||||
|
+
|
||||||
|
|
||||||
|
s_in = x.new_ones([x.shape[0]]).half()
|
||||||
|
for i in trange(len(sigmas) - 1, disable=disable):
|
||||||
|
@@ -876,6 +906,9 @@ class UNet(DDPM):
|
||||||
|
d = to_d(x, sigma_hat, denoised)
|
||||||
|
if callback is not None:
|
||||||
|
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})
|
||||||
|
+
|
||||||
|
+ if img_callback: yield from img_callback(x, i)
|
||||||
|
+
|
||||||
|
dt = sigmas[i + 1] - sigma_hat
|
||||||
|
if sigmas[i + 1] == 0:
|
||||||
|
# Euler method
|
||||||
|
@@ -895,11 +928,13 @@ class UNet(DDPM):
|
||||||
|
d_2 = to_d(x_2, sigmas[i + 1], denoised_2)
|
||||||
|
d_prime = (d + d_2) / 2
|
||||||
|
x = x + d_prime * dt
|
||||||
|
- return x
|
||||||
|
+
|
||||||
|
+ yield from img_callback(x, len(sigmas)-1)
|
||||||
|
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
- def dpm_2_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.):
|
||||||
|
+ def dpm_2_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.,
|
||||||
|
+ img_callback=None):
|
||||||
|
"""A sampler inspired by DPM-Solver-2 and Algorithm 2 from Karras et al. (2022)."""
|
||||||
|
extra_args = {} if extra_args is None else extra_args
|
||||||
|
|
||||||
|
@@ -907,6 +942,8 @@ class UNet(DDPM):
|
||||||
|
sigmas = cvd.get_sigmas(S)
|
||||||
|
x = x*sigmas[0]
|
||||||
|
|
||||||
|
+ print(f"Running DPM2 Sampling with {len(sigmas) - 1} timesteps")
|
||||||
|
+
|
||||||
|
s_in = x.new_ones([x.shape[0]]).half()
|
||||||
|
for i in trange(len(sigmas) - 1, disable=disable):
|
||||||
|
gamma = min(s_churn / (len(sigmas) - 1), 2 ** 0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.
|
||||||
|
@@ -924,7 +961,7 @@ class UNet(DDPM):
|
||||||
|
e_t_uncond, e_t = (x_in + eps * c_out).chunk(2)
|
||||||
|
denoised = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
|
||||||
|
|
||||||
|
-
|
||||||
|
+ if img_callback: yield from img_callback(x, i)
|
||||||
|
|
||||||
|
d = to_d(x, sigma_hat, denoised)
|
||||||
|
# Midpoint method, where the midpoint is chosen according to a rho=3 Karras schedule
|
||||||
|
@@ -945,11 +982,13 @@ class UNet(DDPM):
|
||||||
|
|
||||||
|
d_2 = to_d(x_2, sigma_mid, denoised_2)
|
||||||
|
x = x + d_2 * dt_2
|
||||||
|
- return x
|
||||||
|
+
|
||||||
|
+ yield from img_callback(x, len(sigmas)-1)
|
||||||
|
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
- def dpm_2_ancestral_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None):
|
||||||
|
+ def dpm_2_ancestral_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None,
|
||||||
|
+ img_callback=None):
|
||||||
|
"""Ancestral sampling with DPM-Solver inspired second-order steps."""
|
||||||
|
extra_args = {} if extra_args is None else extra_args
|
||||||
|
|
||||||
|
@@ -957,6 +996,8 @@ class UNet(DDPM):
|
||||||
|
sigmas = cvd.get_sigmas(S)
|
||||||
|
x = x*sigmas[0]
|
||||||
|
|
||||||
|
+ print(f"Running DPM2 Ancestral Sampling with {len(sigmas) - 1} timesteps")
|
||||||
|
+
|
||||||
|
s_in = x.new_ones([x.shape[0]]).half()
|
||||||
|
for i in trange(len(sigmas) - 1, disable=disable):
|
||||||
|
|
||||||
|
@@ -973,6 +1014,9 @@ class UNet(DDPM):
|
||||||
|
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1])
|
||||||
|
if callback is not None:
|
||||||
|
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
|
||||||
|
+
|
||||||
|
+ if img_callback: yield from img_callback(x, i)
|
||||||
|
+
|
||||||
|
d = to_d(x, sigmas[i], denoised)
|
||||||
|
# Midpoint method, where the midpoint is chosen according to a rho=3 Karras schedule
|
||||||
|
sigma_mid = ((sigmas[i] ** (1 / 3) + sigma_down ** (1 / 3)) / 2) ** 3
|
||||||
|
@@ -993,11 +1037,13 @@ class UNet(DDPM):
|
||||||
|
d_2 = to_d(x_2, sigma_mid, denoised_2)
|
||||||
|
x = x + d_2 * dt_2
|
||||||
|
x = x + torch.randn_like(x) * sigma_up
|
||||||
|
- return x
|
||||||
|
+
|
||||||
|
+ yield from img_callback(x, len(sigmas)-1)
|
||||||
|
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
- def lms_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None, order=4):
|
||||||
|
+ def lms_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None, order=4,
|
||||||
|
+ img_callback=None):
|
||||||
|
extra_args = {} if extra_args is None else extra_args
|
||||||
|
s_in = x.new_ones([x.shape[0]])
|
||||||
|
|
||||||
|
@@ -1005,6 +1051,8 @@ class UNet(DDPM):
|
||||||
|
sigmas = cvd.get_sigmas(S)
|
||||||
|
x = x*sigmas[0]
|
||||||
|
|
||||||
|
+ print(f"Running LMS Sampling with {len(sigmas) - 1} timesteps")
|
||||||
|
+
|
||||||
|
ds = []
|
||||||
|
for i in trange(len(sigmas) - 1, disable=disable):
|
||||||
|
|
||||||
|
@@ -1017,6 +1065,7 @@ class UNet(DDPM):
|
||||||
|
e_t_uncond, e_t = (x_in + eps * c_out).chunk(2)
|
||||||
|
denoised = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
|
||||||
|
|
||||||
|
+ if img_callback: yield from img_callback(x, i)
|
||||||
|
|
||||||
|
d = to_d(x, sigmas[i], denoised)
|
||||||
|
ds.append(d)
|
||||||
|
@@ -1027,4 +1076,5 @@ class UNet(DDPM):
|
||||||
|
cur_order = min(i + 1, order)
|
||||||
|
coeffs = [linear_multistep_coeff(cur_order, sigmas.cpu(), i, j) for j in range(cur_order)]
|
||||||
|
x = x + sum(coeff * d for coeff, d in zip(coeffs, reversed(ds)))
|
||||||
|
- return x
|
||||||
|
+
|
||||||
|
+ yield from img_callback(x, len(sigmas)-1)
|
||||||
|
diff --git a/optimizedSD/openaimodelSplit.py b/optimizedSD/openaimodelSplit.py
|
||||||
|
index abc3098..7a32ffe 100644
|
||||||
|
--- a/optimizedSD/openaimodelSplit.py
|
||||||
|
+++ b/optimizedSD/openaimodelSplit.py
|
||||||
|
@@ -13,7 +13,7 @@ from ldm.modules.diffusionmodules.util import (
|
||||||
|
normalization,
|
||||||
|
timestep_embedding,
|
||||||
|
)
|
||||||
|
-from splitAttention import SpatialTransformer
|
||||||
|
+from .splitAttention import SpatialTransformer
|
||||||
|
|
||||||
|
|
||||||
|
class AttentionPool2d(nn.Module):
|
13
installer/patches/sd_env_yaml.patch
Normal file
13
installer/patches/sd_env_yaml.patch
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
diff --git a/environment.yaml b/environment.yaml
|
||||||
|
index 7f25da8..306750f 100644
|
||||||
|
--- a/environment.yaml
|
||||||
|
+++ b/environment.yaml
|
||||||
|
@@ -23,6 +23,8 @@ dependencies:
|
||||||
|
- torch-fidelity==0.3.0
|
||||||
|
- transformers==4.19.2
|
||||||
|
- torchmetrics==0.6.0
|
||||||
|
+ - pywavelets==1.3.0
|
||||||
|
+ - pandas==1.4.4
|
||||||
|
- kornia==0.6
|
||||||
|
- -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
|
||||||
|
- -e git+https://github.com/openai/CLIP.git@main#egg=clip
|
7
installer/yaml/installer-environment.yaml
Normal file
7
installer/yaml/installer-environment.yaml
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
name: stable-diffusion-ui-installer
|
||||||
|
channels:
|
||||||
|
- defaults
|
||||||
|
- conda-forge
|
||||||
|
dependencies:
|
||||||
|
- git
|
||||||
|
- python=3.8.13
|
Loading…
Reference in New Issue
Block a user