mirror of
https://github.com/easydiffusion/easydiffusion.git
synced 2025-01-25 23:58:48 +01:00
commit
ab0218050c
@ -1,101 +0,0 @@
|
|||||||
# this script runs inside the legacy "stable-diffusion" folder
|
|
||||||
|
|
||||||
from sdkit.models import download_model, get_model_info_from_db
|
|
||||||
from sdkit.utils import hash_file_quick
|
|
||||||
|
|
||||||
import os
|
|
||||||
import shutil
|
|
||||||
from glob import glob
|
|
||||||
import traceback
|
|
||||||
|
|
||||||
models_base_dir = os.path.abspath(os.path.join("..", "models"))
|
|
||||||
|
|
||||||
models_to_check = {
|
|
||||||
"stable-diffusion": [
|
|
||||||
{"file_name": "sd-v1-4.ckpt", "model_id": "1.4"},
|
|
||||||
],
|
|
||||||
"gfpgan": [
|
|
||||||
{"file_name": "GFPGANv1.4.pth", "model_id": "1.4"},
|
|
||||||
],
|
|
||||||
"realesrgan": [
|
|
||||||
{"file_name": "RealESRGAN_x4plus.pth", "model_id": "x4plus"},
|
|
||||||
{"file_name": "RealESRGAN_x4plus_anime_6B.pth", "model_id": "x4plus_anime_6"},
|
|
||||||
],
|
|
||||||
"vae": [
|
|
||||||
{"file_name": "vae-ft-mse-840000-ema-pruned.ckpt", "model_id": "vae-ft-mse-840000-ema-pruned"},
|
|
||||||
],
|
|
||||||
}
|
|
||||||
MODEL_EXTENSIONS = { # copied from easydiffusion/model_manager.py
|
|
||||||
"stable-diffusion": [".ckpt", ".safetensors"],
|
|
||||||
"vae": [".vae.pt", ".ckpt", ".safetensors"],
|
|
||||||
"hypernetwork": [".pt", ".safetensors"],
|
|
||||||
"gfpgan": [".pth"],
|
|
||||||
"realesrgan": [".pth"],
|
|
||||||
"lora": [".ckpt", ".safetensors"],
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def download_if_necessary(model_type: str, file_name: str, model_id: str):
|
|
||||||
model_path = os.path.join(models_base_dir, model_type, file_name)
|
|
||||||
expected_hash = get_model_info_from_db(model_type=model_type, model_id=model_id)["quick_hash"]
|
|
||||||
|
|
||||||
other_models_exist = any_model_exists(model_type)
|
|
||||||
known_model_exists = os.path.exists(model_path)
|
|
||||||
known_model_is_corrupt = known_model_exists and hash_file_quick(model_path) != expected_hash
|
|
||||||
|
|
||||||
if known_model_is_corrupt or (not other_models_exist and not known_model_exists):
|
|
||||||
print("> download", model_type, model_id)
|
|
||||||
download_model(model_type, model_id, download_base_dir=models_base_dir)
|
|
||||||
|
|
||||||
|
|
||||||
def init():
|
|
||||||
migrate_legacy_model_location()
|
|
||||||
|
|
||||||
for model_type, models in models_to_check.items():
|
|
||||||
for model in models:
|
|
||||||
try:
|
|
||||||
download_if_necessary(model_type, model["file_name"], model["model_id"])
|
|
||||||
except:
|
|
||||||
traceback.print_exc()
|
|
||||||
fail(model_type)
|
|
||||||
|
|
||||||
print(model_type, "model(s) found.")
|
|
||||||
|
|
||||||
|
|
||||||
### utilities
|
|
||||||
def any_model_exists(model_type: str) -> bool:
|
|
||||||
extensions = MODEL_EXTENSIONS.get(model_type, [])
|
|
||||||
for ext in extensions:
|
|
||||||
if any(glob(f"{models_base_dir}/{model_type}/**/*{ext}", recursive=True)):
|
|
||||||
return True
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def migrate_legacy_model_location():
|
|
||||||
'Move the models inside the legacy "stable-diffusion" folder, to their respective folders'
|
|
||||||
|
|
||||||
for model_type, models in models_to_check.items():
|
|
||||||
for model in models:
|
|
||||||
file_name = model["file_name"]
|
|
||||||
if os.path.exists(file_name):
|
|
||||||
dest_dir = os.path.join(models_base_dir, model_type)
|
|
||||||
os.makedirs(dest_dir, exist_ok=True)
|
|
||||||
shutil.move(file_name, os.path.join(dest_dir, file_name))
|
|
||||||
|
|
||||||
|
|
||||||
def fail(model_name):
|
|
||||||
print(
|
|
||||||
f"""Error downloading the {model_name} model. Sorry about that, please try to:
|
|
||||||
1. Run this installer again.
|
|
||||||
2. If that doesn't fix it, please try to download the file manually. The address to download from, and the destination to save to are printed above this message.
|
|
||||||
3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB
|
|
||||||
4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues
|
|
||||||
Thanks!"""
|
|
||||||
)
|
|
||||||
exit(1)
|
|
||||||
|
|
||||||
|
|
||||||
### start
|
|
||||||
|
|
||||||
init()
|
|
@ -18,7 +18,7 @@ os_name = platform.system()
|
|||||||
modules_to_check = {
|
modules_to_check = {
|
||||||
"torch": ("1.11.0", "1.13.1", "2.0.0"),
|
"torch": ("1.11.0", "1.13.1", "2.0.0"),
|
||||||
"torchvision": ("0.12.0", "0.14.1", "0.15.1"),
|
"torchvision": ("0.12.0", "0.14.1", "0.15.1"),
|
||||||
"sdkit": "1.0.98",
|
"sdkit": "1.0.101",
|
||||||
"stable-diffusion-sdkit": "2.1.4",
|
"stable-diffusion-sdkit": "2.1.4",
|
||||||
"rich": "12.6.0",
|
"rich": "12.6.0",
|
||||||
"uvicorn": "0.19.0",
|
"uvicorn": "0.19.0",
|
||||||
|
@ -79,13 +79,6 @@ call WHERE uvicorn > .tmp
|
|||||||
@echo conda_sd_ui_deps_installed >> ..\scripts\install_status.txt
|
@echo conda_sd_ui_deps_installed >> ..\scripts\install_status.txt
|
||||||
)
|
)
|
||||||
|
|
||||||
@rem Download the required models
|
|
||||||
call python ..\scripts\check_models.py
|
|
||||||
if "%ERRORLEVEL%" NEQ "0" (
|
|
||||||
pause
|
|
||||||
exit /b
|
|
||||||
)
|
|
||||||
|
|
||||||
@>nul findstr /m "sd_install_complete" ..\scripts\install_status.txt
|
@>nul findstr /m "sd_install_complete" ..\scripts\install_status.txt
|
||||||
@if "%ERRORLEVEL%" NEQ "0" (
|
@if "%ERRORLEVEL%" NEQ "0" (
|
||||||
@echo sd_weights_downloaded >> ..\scripts\install_status.txt
|
@echo sd_weights_downloaded >> ..\scripts\install_status.txt
|
||||||
|
@ -51,12 +51,6 @@ if ! command -v uvicorn &> /dev/null; then
|
|||||||
fail "UI packages not found!"
|
fail "UI packages not found!"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Download the required models
|
|
||||||
if ! python ../scripts/check_models.py; then
|
|
||||||
read -p "Press any key to continue"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ `grep -c sd_install_complete ../scripts/install_status.txt` -gt "0" ]; then
|
if [ `grep -c sd_install_complete ../scripts/install_status.txt` -gt "0" ]; then
|
||||||
echo sd_weights_downloaded >> ../scripts/install_status.txt
|
echo sd_weights_downloaded >> ../scripts/install_status.txt
|
||||||
echo sd_install_complete >> ../scripts/install_status.txt
|
echo sd_install_complete >> ../scripts/install_status.txt
|
||||||
|
@ -90,8 +90,8 @@ def init():
|
|||||||
os.makedirs(USER_SERVER_PLUGINS_DIR, exist_ok=True)
|
os.makedirs(USER_SERVER_PLUGINS_DIR, exist_ok=True)
|
||||||
|
|
||||||
# https://pytorch.org/docs/stable/storage.html
|
# https://pytorch.org/docs/stable/storage.html
|
||||||
warnings.filterwarnings('ignore', category=UserWarning, message='TypedStorage is deprecated')
|
warnings.filterwarnings("ignore", category=UserWarning, message="TypedStorage is deprecated")
|
||||||
|
|
||||||
load_server_plugins()
|
load_server_plugins()
|
||||||
|
|
||||||
update_render_threads()
|
update_render_threads()
|
||||||
@ -221,12 +221,41 @@ def open_browser():
|
|||||||
|
|
||||||
webbrowser.open(f"http://localhost:{port}")
|
webbrowser.open(f"http://localhost:{port}")
|
||||||
|
|
||||||
Console().print(Panel(
|
Console().print(
|
||||||
"\n" +
|
Panel(
|
||||||
"[white]Easy Diffusion is ready to serve requests.\n\n" +
|
"\n"
|
||||||
"A new browser tab should have been opened by now.\n" +
|
+ "[white]Easy Diffusion is ready to serve requests.\n\n"
|
||||||
f"If not, please open your web browser and navigate to [bold yellow underline]http://localhost:{port}/\n",
|
+ "A new browser tab should have been opened by now.\n"
|
||||||
title="Easy Diffusion is ready", style="bold yellow on blue"))
|
+ f"If not, please open your web browser and navigate to [bold yellow underline]http://localhost:{port}/\n",
|
||||||
|
title="Easy Diffusion is ready",
|
||||||
|
style="bold yellow on blue",
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def fail_and_die(fail_type: str, data: str):
|
||||||
|
suggestions = [
|
||||||
|
"Run this installer again.",
|
||||||
|
"If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB",
|
||||||
|
"If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues",
|
||||||
|
]
|
||||||
|
|
||||||
|
if fail_type == "model_download":
|
||||||
|
fail_label = f"Error downloading the {data} model"
|
||||||
|
suggestions.insert(
|
||||||
|
1,
|
||||||
|
"If that doesn't fix it, please try to download the file manually. The address to download from, and the destination to save to are printed above this message.",
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
fail_label = "Error while installing Easy Diffusion"
|
||||||
|
|
||||||
|
msg = [f"{fail_label}. Sorry about that, please try to:"]
|
||||||
|
for i, suggestion in enumerate(suggestions):
|
||||||
|
msg.append(f"{i+1}. {suggestion}")
|
||||||
|
msg.append("Thanks!")
|
||||||
|
|
||||||
|
print("\n".join(msg))
|
||||||
|
exit(1)
|
||||||
|
|
||||||
|
|
||||||
def get_image_modifiers():
|
def get_image_modifiers():
|
||||||
|
@ -1,10 +1,14 @@
|
|||||||
import os
|
import os
|
||||||
|
import shutil
|
||||||
|
from glob import glob
|
||||||
|
import traceback
|
||||||
|
|
||||||
from easydiffusion import app
|
from easydiffusion import app
|
||||||
from easydiffusion.types import TaskData
|
from easydiffusion.types import TaskData
|
||||||
from easydiffusion.utils import log
|
from easydiffusion.utils import log
|
||||||
from sdkit import Context
|
from sdkit import Context
|
||||||
from sdkit.models import load_model, scan_model, unload_model
|
from sdkit.models import load_model, scan_model, unload_model, download_model, get_model_info_from_db
|
||||||
|
from sdkit.utils import hash_file_quick
|
||||||
|
|
||||||
KNOWN_MODEL_TYPES = [
|
KNOWN_MODEL_TYPES = [
|
||||||
"stable-diffusion",
|
"stable-diffusion",
|
||||||
@ -13,6 +17,7 @@ KNOWN_MODEL_TYPES = [
|
|||||||
"gfpgan",
|
"gfpgan",
|
||||||
"realesrgan",
|
"realesrgan",
|
||||||
"lora",
|
"lora",
|
||||||
|
"codeformer",
|
||||||
]
|
]
|
||||||
MODEL_EXTENSIONS = {
|
MODEL_EXTENSIONS = {
|
||||||
"stable-diffusion": [".ckpt", ".safetensors"],
|
"stable-diffusion": [".ckpt", ".safetensors"],
|
||||||
@ -21,14 +26,22 @@ MODEL_EXTENSIONS = {
|
|||||||
"gfpgan": [".pth"],
|
"gfpgan": [".pth"],
|
||||||
"realesrgan": [".pth"],
|
"realesrgan": [".pth"],
|
||||||
"lora": [".ckpt", ".safetensors"],
|
"lora": [".ckpt", ".safetensors"],
|
||||||
|
"codeformer": [".pth"],
|
||||||
}
|
}
|
||||||
DEFAULT_MODELS = {
|
DEFAULT_MODELS = {
|
||||||
"stable-diffusion": [ # needed to support the legacy installations
|
"stable-diffusion": [
|
||||||
"custom-model", # only one custom model file was supported initially, creatively named 'custom-model'
|
{"file_name": "sd-v1-4.ckpt", "model_id": "1.4"},
|
||||||
"sd-v1-4", # Default fallback.
|
],
|
||||||
|
"gfpgan": [
|
||||||
|
{"file_name": "GFPGANv1.4.pth", "model_id": "1.4"},
|
||||||
|
],
|
||||||
|
"realesrgan": [
|
||||||
|
{"file_name": "RealESRGAN_x4plus.pth", "model_id": "x4plus"},
|
||||||
|
{"file_name": "RealESRGAN_x4plus_anime_6B.pth", "model_id": "x4plus_anime_6"},
|
||||||
|
],
|
||||||
|
"vae": [
|
||||||
|
{"file_name": "vae-ft-mse-840000-ema-pruned.ckpt", "model_id": "vae-ft-mse-840000-ema-pruned"},
|
||||||
],
|
],
|
||||||
"gfpgan": ["GFPGANv1.3"],
|
|
||||||
"realesrgan": ["RealESRGAN_x4plus"],
|
|
||||||
}
|
}
|
||||||
MODELS_TO_LOAD_ON_START = ["stable-diffusion", "vae", "hypernetwork", "lora"]
|
MODELS_TO_LOAD_ON_START = ["stable-diffusion", "vae", "hypernetwork", "lora"]
|
||||||
|
|
||||||
@ -37,6 +50,8 @@ known_models = {}
|
|||||||
|
|
||||||
def init():
|
def init():
|
||||||
make_model_folders()
|
make_model_folders()
|
||||||
|
migrate_legacy_model_location() # if necessary
|
||||||
|
download_default_models_if_necessary()
|
||||||
getModels() # run this once, to cache the picklescan results
|
getModels() # run this once, to cache the picklescan results
|
||||||
|
|
||||||
|
|
||||||
@ -75,7 +90,7 @@ def resolve_model_to_use(model_name: str = None, model_type: str = None):
|
|||||||
default_models = DEFAULT_MODELS.get(model_type, [])
|
default_models = DEFAULT_MODELS.get(model_type, [])
|
||||||
config = app.getConfig()
|
config = app.getConfig()
|
||||||
|
|
||||||
model_dirs = [os.path.join(app.MODELS_DIR, model_type), app.SD_DIR]
|
model_dir = os.path.join(app.MODELS_DIR, model_type)
|
||||||
if not model_name: # When None try user configured model.
|
if not model_name: # When None try user configured model.
|
||||||
# config = getConfig()
|
# config = getConfig()
|
||||||
if "model" in config and model_type in config["model"]:
|
if "model" in config and model_type in config["model"]:
|
||||||
@ -83,45 +98,41 @@ def resolve_model_to_use(model_name: str = None, model_type: str = None):
|
|||||||
|
|
||||||
if model_name:
|
if model_name:
|
||||||
# Check models directory
|
# Check models directory
|
||||||
models_dir_path = os.path.join(app.MODELS_DIR, model_type, model_name)
|
model_path = os.path.join(model_dir, model_name)
|
||||||
|
if os.path.exists(model_path):
|
||||||
|
return model_path
|
||||||
for model_extension in model_extensions:
|
for model_extension in model_extensions:
|
||||||
if os.path.exists(models_dir_path + model_extension):
|
if os.path.exists(model_path + model_extension):
|
||||||
return models_dir_path + model_extension
|
return model_path + model_extension
|
||||||
if os.path.exists(model_name + model_extension):
|
if os.path.exists(model_name + model_extension):
|
||||||
return os.path.abspath(model_name + model_extension)
|
return os.path.abspath(model_name + model_extension)
|
||||||
|
|
||||||
# Default locations
|
|
||||||
if model_name in default_models:
|
|
||||||
default_model_path = os.path.join(app.SD_DIR, model_name)
|
|
||||||
for model_extension in model_extensions:
|
|
||||||
if os.path.exists(default_model_path + model_extension):
|
|
||||||
return default_model_path + model_extension
|
|
||||||
|
|
||||||
# Can't find requested model, check the default paths.
|
# Can't find requested model, check the default paths.
|
||||||
for default_model in default_models:
|
if model_type == "stable-diffusion":
|
||||||
for model_dir in model_dirs:
|
for default_model in default_models:
|
||||||
default_model_path = os.path.join(model_dir, default_model)
|
default_model_path = os.path.join(model_dir, default_model["file_name"])
|
||||||
for model_extension in model_extensions:
|
if os.path.exists(default_model_path):
|
||||||
if os.path.exists(default_model_path + model_extension):
|
if model_name is not None:
|
||||||
if model_name is not None:
|
log.warn(
|
||||||
log.warn(
|
f"Could not find the configured custom model {model_name}. Using the default one: {default_model_path}"
|
||||||
f"Could not find the configured custom model {model_name}{model_extension}. Using the default one: {default_model_path}{model_extension}"
|
)
|
||||||
)
|
return default_model_path
|
||||||
return default_model_path + model_extension
|
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def reload_models_if_necessary(context: Context, task_data: TaskData):
|
def reload_models_if_necessary(context: Context, task_data: TaskData):
|
||||||
use_upscale_lower = task_data.use_upscale.lower() if task_data.use_upscale else ""
|
face_fix_lower = task_data.use_face_correction.lower() if task_data.use_face_correction else ""
|
||||||
|
upscale_lower = task_data.use_upscale.lower() if task_data.use_upscale else ""
|
||||||
|
|
||||||
model_paths_in_req = {
|
model_paths_in_req = {
|
||||||
"stable-diffusion": task_data.use_stable_diffusion_model,
|
"stable-diffusion": task_data.use_stable_diffusion_model,
|
||||||
"vae": task_data.use_vae_model,
|
"vae": task_data.use_vae_model,
|
||||||
"hypernetwork": task_data.use_hypernetwork_model,
|
"hypernetwork": task_data.use_hypernetwork_model,
|
||||||
"gfpgan": task_data.use_face_correction,
|
"codeformer": task_data.use_face_correction if "codeformer" in face_fix_lower else None,
|
||||||
"realesrgan": task_data.use_upscale if "realesrgan" in use_upscale_lower else None,
|
"gfpgan": task_data.use_face_correction if "gfpgan" in face_fix_lower else None,
|
||||||
"latent_upscaler": True if task_data.use_upscale == "latent_upscaler" else None,
|
"realesrgan": task_data.use_upscale if "realesrgan" in upscale_lower else None,
|
||||||
|
"latent_upscaler": True if "latent_upscaler" in upscale_lower else None,
|
||||||
"nsfw_checker": True if task_data.block_nsfw else None,
|
"nsfw_checker": True if task_data.block_nsfw else None,
|
||||||
"lora": task_data.use_lora_model,
|
"lora": task_data.use_lora_model,
|
||||||
}
|
}
|
||||||
@ -131,6 +142,11 @@ def reload_models_if_necessary(context: Context, task_data: TaskData):
|
|||||||
if context.model_paths.get(model_type) != path
|
if context.model_paths.get(model_type) != path
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if task_data.codeformer_upscale_faces and "realesrgan" not in models_to_reload.keys():
|
||||||
|
models_to_reload["realesrgan"] = resolve_model_to_use(
|
||||||
|
DEFAULT_MODELS["realesrgan"][0]["file_name"], "realesrgan"
|
||||||
|
)
|
||||||
|
|
||||||
if set_vram_optimizations(context) or set_clip_skip(context, task_data): # reload SD
|
if set_vram_optimizations(context) or set_clip_skip(context, task_data): # reload SD
|
||||||
models_to_reload["stable-diffusion"] = model_paths_in_req["stable-diffusion"]
|
models_to_reload["stable-diffusion"] = model_paths_in_req["stable-diffusion"]
|
||||||
|
|
||||||
@ -157,7 +173,13 @@ def resolve_model_paths(task_data: TaskData):
|
|||||||
task_data.use_lora_model = resolve_model_to_use(task_data.use_lora_model, model_type="lora")
|
task_data.use_lora_model = resolve_model_to_use(task_data.use_lora_model, model_type="lora")
|
||||||
|
|
||||||
if task_data.use_face_correction:
|
if task_data.use_face_correction:
|
||||||
task_data.use_face_correction = resolve_model_to_use(task_data.use_face_correction, "gfpgan")
|
if "gfpgan" in task_data.use_face_correction.lower():
|
||||||
|
model_type = "gfpgan"
|
||||||
|
elif "codeformer" in task_data.use_face_correction.lower():
|
||||||
|
model_type = "codeformer"
|
||||||
|
download_if_necessary("codeformer", "codeformer.pth", "codeformer-0.1.0")
|
||||||
|
|
||||||
|
task_data.use_face_correction = resolve_model_to_use(task_data.use_face_correction, model_type)
|
||||||
if task_data.use_upscale and "realesrgan" in task_data.use_upscale.lower():
|
if task_data.use_upscale and "realesrgan" in task_data.use_upscale.lower():
|
||||||
task_data.use_upscale = resolve_model_to_use(task_data.use_upscale, "realesrgan")
|
task_data.use_upscale = resolve_model_to_use(task_data.use_upscale, "realesrgan")
|
||||||
|
|
||||||
@ -167,7 +189,31 @@ def fail_if_models_did_not_load(context: Context):
|
|||||||
if model_type in context.model_load_errors:
|
if model_type in context.model_load_errors:
|
||||||
e = context.model_load_errors[model_type]
|
e = context.model_load_errors[model_type]
|
||||||
raise Exception(f"Could not load the {model_type} model! Reason: " + e)
|
raise Exception(f"Could not load the {model_type} model! Reason: " + e)
|
||||||
# concat 'e', don't use in format string (injection attack)
|
|
||||||
|
|
||||||
|
def download_default_models_if_necessary():
|
||||||
|
for model_type, models in DEFAULT_MODELS.items():
|
||||||
|
for model in models:
|
||||||
|
try:
|
||||||
|
download_if_necessary(model_type, model["file_name"], model["model_id"])
|
||||||
|
except:
|
||||||
|
traceback.print_exc()
|
||||||
|
app.fail_and_die(fail_type="model_download", data=model_type)
|
||||||
|
|
||||||
|
print(model_type, "model(s) found.")
|
||||||
|
|
||||||
|
|
||||||
|
def download_if_necessary(model_type: str, file_name: str, model_id: str):
|
||||||
|
model_path = os.path.join(app.MODELS_DIR, model_type, file_name)
|
||||||
|
expected_hash = get_model_info_from_db(model_type=model_type, model_id=model_id)["quick_hash"]
|
||||||
|
|
||||||
|
other_models_exist = any_model_exists(model_type)
|
||||||
|
known_model_exists = os.path.exists(model_path)
|
||||||
|
known_model_is_corrupt = known_model_exists and hash_file_quick(model_path) != expected_hash
|
||||||
|
|
||||||
|
if known_model_is_corrupt or (not other_models_exist and not known_model_exists):
|
||||||
|
print("> download", model_type, model_id)
|
||||||
|
download_model(model_type, model_id, download_base_dir=app.MODELS_DIR)
|
||||||
|
|
||||||
|
|
||||||
def set_vram_optimizations(context: Context):
|
def set_vram_optimizations(context: Context):
|
||||||
@ -181,6 +227,26 @@ def set_vram_optimizations(context: Context):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def migrate_legacy_model_location():
|
||||||
|
'Move the models inside the legacy "stable-diffusion" folder, to their respective folders'
|
||||||
|
|
||||||
|
for model_type, models in DEFAULT_MODELS.items():
|
||||||
|
for model in models:
|
||||||
|
file_name = model["file_name"]
|
||||||
|
legacy_path = os.path.join(app.SD_DIR, file_name)
|
||||||
|
if os.path.exists(legacy_path):
|
||||||
|
shutil.move(legacy_path, os.path.join(app.MODELS_DIR, model_type, file_name))
|
||||||
|
|
||||||
|
|
||||||
|
def any_model_exists(model_type: str) -> bool:
|
||||||
|
extensions = MODEL_EXTENSIONS.get(model_type, [])
|
||||||
|
for ext in extensions:
|
||||||
|
if any(glob(f"{app.MODELS_DIR}/{model_type}/**/*{ext}", recursive=True)):
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
def set_clip_skip(context: Context, task_data: TaskData):
|
def set_clip_skip(context: Context, task_data: TaskData):
|
||||||
clip_skip = task_data.clip_skip
|
clip_skip = task_data.clip_skip
|
||||||
|
|
||||||
@ -238,17 +304,12 @@ def is_malicious_model(file_path):
|
|||||||
|
|
||||||
def getModels():
|
def getModels():
|
||||||
models = {
|
models = {
|
||||||
"active": {
|
|
||||||
"stable-diffusion": "sd-v1-4",
|
|
||||||
"vae": "",
|
|
||||||
"hypernetwork": "",
|
|
||||||
"lora": "",
|
|
||||||
},
|
|
||||||
"options": {
|
"options": {
|
||||||
"stable-diffusion": ["sd-v1-4"],
|
"stable-diffusion": ["sd-v1-4"],
|
||||||
"vae": [],
|
"vae": [],
|
||||||
"hypernetwork": [],
|
"hypernetwork": [],
|
||||||
"lora": [],
|
"lora": [],
|
||||||
|
"codeformer": ["codeformer"],
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -309,9 +370,4 @@ def getModels():
|
|||||||
if models_scanned > 0:
|
if models_scanned > 0:
|
||||||
log.info(f"[green]Scanned {models_scanned} models. Nothing infected[/]")
|
log.info(f"[green]Scanned {models_scanned} models. Nothing infected[/]")
|
||||||
|
|
||||||
# legacy
|
|
||||||
custom_weight_path = os.path.join(app.SD_DIR, "custom-model.ckpt")
|
|
||||||
if os.path.exists(custom_weight_path):
|
|
||||||
models["options"]["stable-diffusion"].append("custom-model")
|
|
||||||
|
|
||||||
return models
|
return models
|
||||||
|
@ -34,6 +34,7 @@ def init(device):
|
|||||||
context.temp_images = {}
|
context.temp_images = {}
|
||||||
context.partial_x_samples = None
|
context.partial_x_samples = None
|
||||||
context.model_load_errors = {}
|
context.model_load_errors = {}
|
||||||
|
context.enable_codeformer = True
|
||||||
|
|
||||||
from easydiffusion import app
|
from easydiffusion import app
|
||||||
|
|
||||||
@ -160,7 +161,11 @@ def filter_images(req: GenerateImageRequest, task_data: TaskData, images: list,
|
|||||||
filter_params = {}
|
filter_params = {}
|
||||||
if task_data.block_nsfw:
|
if task_data.block_nsfw:
|
||||||
filters_to_apply.append("nsfw_checker")
|
filters_to_apply.append("nsfw_checker")
|
||||||
if task_data.use_face_correction and "gfpgan" in task_data.use_face_correction.lower():
|
if task_data.use_face_correction and "codeformer" in task_data.use_face_correction.lower():
|
||||||
|
filters_to_apply.append("codeformer")
|
||||||
|
|
||||||
|
filter_params["upscale_faces"] = task_data.codeformer_upscale_faces
|
||||||
|
elif task_data.use_face_correction and "gfpgan" in task_data.use_face_correction.lower():
|
||||||
filters_to_apply.append("gfpgan")
|
filters_to_apply.append("gfpgan")
|
||||||
if task_data.use_upscale:
|
if task_data.use_upscale:
|
||||||
if "realesrgan" in task_data.use_upscale.lower():
|
if "realesrgan" in task_data.use_upscale.lower():
|
||||||
|
@ -23,7 +23,7 @@ class GenerateImageRequest(BaseModel):
|
|||||||
sampler_name: str = None # "ddim", "plms", "heun", "euler", "euler_a", "dpm2", "dpm2_a", "lms"
|
sampler_name: str = None # "ddim", "plms", "heun", "euler", "euler_a", "dpm2", "dpm2_a", "lms"
|
||||||
hypernetwork_strength: float = 0
|
hypernetwork_strength: float = 0
|
||||||
lora_alpha: float = 0
|
lora_alpha: float = 0
|
||||||
tiling: str = "none" # "none", "x", "y", "xy"
|
tiling: str = "none" # "none", "x", "y", "xy"
|
||||||
|
|
||||||
|
|
||||||
class TaskData(BaseModel):
|
class TaskData(BaseModel):
|
||||||
@ -51,6 +51,7 @@ class TaskData(BaseModel):
|
|||||||
stream_image_progress: bool = False
|
stream_image_progress: bool = False
|
||||||
stream_image_progress_interval: int = 5
|
stream_image_progress_interval: int = 5
|
||||||
clip_skip: bool = False
|
clip_skip: bool = False
|
||||||
|
codeformer_upscale_faces: bool = False
|
||||||
|
|
||||||
|
|
||||||
class MergeRequest(BaseModel):
|
class MergeRequest(BaseModel):
|
||||||
|
@ -263,7 +263,12 @@
|
|||||||
<div><ul>
|
<div><ul>
|
||||||
<li><b class="settings-subheader">Render Settings</b></li>
|
<li><b class="settings-subheader">Render Settings</b></li>
|
||||||
<li class="pl-5"><input id="stream_image_progress" name="stream_image_progress" type="checkbox"> <label for="stream_image_progress">Show a live preview <small>(uses more VRAM, slower images)</small></label></li>
|
<li class="pl-5"><input id="stream_image_progress" name="stream_image_progress" type="checkbox"> <label for="stream_image_progress">Show a live preview <small>(uses more VRAM, slower images)</small></label></li>
|
||||||
<li class="pl-5"><input id="use_face_correction" name="use_face_correction" type="checkbox"> <label for="use_face_correction">Fix incorrect faces and eyes</label> <div style="display:inline-block;"><input id="gfpgan_model" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" /></div></li>
|
<li class="pl-5">
|
||||||
|
<input id="use_face_correction" name="use_face_correction" type="checkbox"> <label for="use_face_correction">Fix incorrect faces and eyes</label> <div style="display:inline-block;"><input id="gfpgan_model" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" /></div>
|
||||||
|
<div id="codeformer_settings" class="displayNone sub-settings">
|
||||||
|
<input id="codeformer_upscale_faces" name="codeformer_upscale_faces" type="checkbox"><label for="codeformer_upscale_faces">Upscale Faces <small>(improves the resolution of faces)</small></label>
|
||||||
|
</div>
|
||||||
|
</li>
|
||||||
<li class="pl-5">
|
<li class="pl-5">
|
||||||
<input id="use_upscale" name="use_upscale" type="checkbox"> <label for="use_upscale">Scale up by</label>
|
<input id="use_upscale" name="use_upscale" type="checkbox"> <label for="use_upscale">Scale up by</label>
|
||||||
<select id="upscale_amount" name="upscale_amount">
|
<select id="upscale_amount" name="upscale_amount">
|
||||||
@ -276,7 +281,7 @@
|
|||||||
<option value="RealESRGAN_x4plus_anime_6B">RealESRGAN_x4plus_anime_6B</option>
|
<option value="RealESRGAN_x4plus_anime_6B">RealESRGAN_x4plus_anime_6B</option>
|
||||||
<option value="latent_upscaler">Latent Upscaler 2x</option>
|
<option value="latent_upscaler">Latent Upscaler 2x</option>
|
||||||
</select>
|
</select>
|
||||||
<div id="latent_upscaler_settings" class="displayNone">
|
<div id="latent_upscaler_settings" class="displayNone sub-settings">
|
||||||
<label for="latent_upscaler_steps_slider">Upscaling Steps:</label></td><td> <input id="latent_upscaler_steps_slider" name="latent_upscaler_steps_slider" class="editor-slider" value="10" type="range" min="1" max="50"> <input id="latent_upscaler_steps" name="latent_upscaler_steps" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)">
|
<label for="latent_upscaler_steps_slider">Upscaling Steps:</label></td><td> <input id="latent_upscaler_steps_slider" name="latent_upscaler_steps_slider" class="editor-slider" value="10" type="range" min="1" max="50"> <input id="latent_upscaler_steps" name="latent_upscaler_steps" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)">
|
||||||
</div>
|
</div>
|
||||||
</li>
|
</li>
|
||||||
|
@ -1303,7 +1303,7 @@ body.wait-pause {
|
|||||||
display:none !important;
|
display:none !important;
|
||||||
}
|
}
|
||||||
|
|
||||||
#latent_upscaler_settings {
|
.sub-settings {
|
||||||
padding-top: 3pt;
|
padding-top: 3pt;
|
||||||
padding-bottom: 3pt;
|
padding-bottom: 3pt;
|
||||||
padding-left: 5pt;
|
padding-left: 5pt;
|
||||||
@ -1322,7 +1322,7 @@ body.wait-pause {
|
|||||||
box-shadow: 0 0 10px rgba(0, 0, 0, 0.5);
|
box-shadow: 0 0 10px rgba(0, 0, 0, 0.5);
|
||||||
z-index: 9999;
|
z-index: 9999;
|
||||||
animation: slideInRight 0.5s ease forwards;
|
animation: slideInRight 0.5s ease forwards;
|
||||||
transition: bottom 0.5s ease; // Add a transition to smoothly reposition the toasts
|
transition: bottom 0.5s ease; /* Add a transition to smoothly reposition the toasts */
|
||||||
}
|
}
|
||||||
|
|
||||||
.toast-notification-error {
|
.toast-notification-error {
|
||||||
|
@ -87,7 +87,7 @@ let promptStrengthField = document.querySelector("#prompt_strength")
|
|||||||
let samplerField = document.querySelector("#sampler_name")
|
let samplerField = document.querySelector("#sampler_name")
|
||||||
let samplerSelectionContainer = document.querySelector("#samplerSelection")
|
let samplerSelectionContainer = document.querySelector("#samplerSelection")
|
||||||
let useFaceCorrectionField = document.querySelector("#use_face_correction")
|
let useFaceCorrectionField = document.querySelector("#use_face_correction")
|
||||||
let gfpganModelField = new ModelDropdown(document.querySelector("#gfpgan_model"), "gfpgan")
|
let gfpganModelField = new ModelDropdown(document.querySelector("#gfpgan_model"), ["codeformer", "gfpgan"])
|
||||||
let useUpscalingField = document.querySelector("#use_upscale")
|
let useUpscalingField = document.querySelector("#use_upscale")
|
||||||
let upscaleModelField = document.querySelector("#upscale_model")
|
let upscaleModelField = document.querySelector("#upscale_model")
|
||||||
let upscaleAmountField = document.querySelector("#upscale_amount")
|
let upscaleAmountField = document.querySelector("#upscale_amount")
|
||||||
@ -270,7 +270,9 @@ function shiftOrConfirm(e, prompt, fn) {
|
|||||||
confirm(
|
confirm(
|
||||||
'<small>Tip: To skip this dialog, use shift-click or disable the "Confirm dangerous actions" setting in the Settings tab.</small>',
|
'<small>Tip: To skip this dialog, use shift-click or disable the "Confirm dangerous actions" setting in the Settings tab.</small>',
|
||||||
prompt,
|
prompt,
|
||||||
() => { fn(e) }
|
() => {
|
||||||
|
fn(e)
|
||||||
|
}
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1261,6 +1263,10 @@ function getCurrentUserRequest() {
|
|||||||
}
|
}
|
||||||
if (useFaceCorrectionField.checked) {
|
if (useFaceCorrectionField.checked) {
|
||||||
newTask.reqBody.use_face_correction = gfpganModelField.value
|
newTask.reqBody.use_face_correction = gfpganModelField.value
|
||||||
|
|
||||||
|
if (gfpganModelField.value.includes("codeformer")) {
|
||||||
|
newTask.reqBody.codeformer_upscale_faces = document.querySelector("#codeformer_upscale_faces").checked
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (useUpscalingField.checked) {
|
if (useUpscalingField.checked) {
|
||||||
newTask.reqBody.use_upscale = upscaleModelField.value
|
newTask.reqBody.use_upscale = upscaleModelField.value
|
||||||
@ -1574,18 +1580,33 @@ metadataOutputFormatField.disabled = !saveToDiskField.checked
|
|||||||
gfpganModelField.disabled = !useFaceCorrectionField.checked
|
gfpganModelField.disabled = !useFaceCorrectionField.checked
|
||||||
useFaceCorrectionField.addEventListener("change", function(e) {
|
useFaceCorrectionField.addEventListener("change", function(e) {
|
||||||
gfpganModelField.disabled = !this.checked
|
gfpganModelField.disabled = !this.checked
|
||||||
|
|
||||||
|
onFixFaceModelChange()
|
||||||
})
|
})
|
||||||
|
|
||||||
|
function onFixFaceModelChange() {
|
||||||
|
let codeformerSettings = document.querySelector("#codeformer_settings")
|
||||||
|
if (gfpganModelField.value === "codeformer" && !gfpganModelField.disabled) {
|
||||||
|
codeformerSettings.classList.remove("displayNone")
|
||||||
|
} else {
|
||||||
|
codeformerSettings.classList.add("displayNone")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
gfpganModelField.addEventListener("change", onFixFaceModelChange)
|
||||||
|
onFixFaceModelChange()
|
||||||
|
|
||||||
upscaleModelField.disabled = !useUpscalingField.checked
|
upscaleModelField.disabled = !useUpscalingField.checked
|
||||||
upscaleAmountField.disabled = !useUpscalingField.checked
|
upscaleAmountField.disabled = !useUpscalingField.checked
|
||||||
useUpscalingField.addEventListener("change", function(e) {
|
useUpscalingField.addEventListener("change", function(e) {
|
||||||
upscaleModelField.disabled = !this.checked
|
upscaleModelField.disabled = !this.checked
|
||||||
upscaleAmountField.disabled = !this.checked
|
upscaleAmountField.disabled = !this.checked
|
||||||
|
|
||||||
|
onUpscaleModelChange()
|
||||||
})
|
})
|
||||||
|
|
||||||
function onUpscaleModelChange() {
|
function onUpscaleModelChange() {
|
||||||
let upscale4x = document.querySelector("#upscale_amount_4x")
|
let upscale4x = document.querySelector("#upscale_amount_4x")
|
||||||
if (upscaleModelField.value === "latent_upscaler") {
|
if (upscaleModelField.value === "latent_upscaler" && !upscaleModelField.disabled) {
|
||||||
upscale4x.disabled = true
|
upscale4x.disabled = true
|
||||||
upscaleAmountField.value = "2"
|
upscaleAmountField.value = "2"
|
||||||
latentUpscalerSettings.classList.remove("displayNone")
|
latentUpscalerSettings.classList.remove("displayNone")
|
||||||
|
@ -90,7 +90,12 @@ class ModelDropdown {
|
|||||||
|
|
||||||
if (modelsOptions !== undefined) {
|
if (modelsOptions !== undefined) {
|
||||||
// reuse models from cache (only useful for plugins, which are loaded after models)
|
// reuse models from cache (only useful for plugins, which are loaded after models)
|
||||||
this.inputModels = modelsOptions[this.modelKey]
|
this.inputModels = []
|
||||||
|
let modelKeys = Array.isArray(this.modelKey) ? this.modelKey : [this.modelKey]
|
||||||
|
for (let i = 0; i < modelKeys.length; i++) {
|
||||||
|
let key = modelKeys[i]
|
||||||
|
this.inputModels.push(...modelsOptions[key])
|
||||||
|
}
|
||||||
this.populateModels()
|
this.populateModels()
|
||||||
}
|
}
|
||||||
document.addEventListener(
|
document.addEventListener(
|
||||||
@ -98,6 +103,12 @@ class ModelDropdown {
|
|||||||
this.bind(function(e) {
|
this.bind(function(e) {
|
||||||
// reload the models
|
// reload the models
|
||||||
this.inputModels = modelsOptions[this.modelKey]
|
this.inputModels = modelsOptions[this.modelKey]
|
||||||
|
this.inputModels = []
|
||||||
|
let modelKeys = Array.isArray(this.modelKey) ? this.modelKey : [this.modelKey]
|
||||||
|
for (let i = 0; i < modelKeys.length; i++) {
|
||||||
|
let key = modelKeys[i]
|
||||||
|
this.inputModels.push(...modelsOptions[key])
|
||||||
|
}
|
||||||
this.populateModels()
|
this.populateModels()
|
||||||
}, this)
|
}, this)
|
||||||
)
|
)
|
||||||
|
Loading…
Reference in New Issue
Block a user