diff --git a/.gitignore b/.gitignore
index b5157e17..90bf0a44 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,3 +3,4 @@ installer
installer.tar
dist
.idea/*
+node_modules/*
\ No newline at end of file
diff --git a/.prettierignore b/.prettierignore
new file mode 100644
index 00000000..b0f8227f
--- /dev/null
+++ b/.prettierignore
@@ -0,0 +1,9 @@
+*.min.*
+*.py
+*.json
+*.html
+/*
+!/ui
+/ui/easydiffusion
+!/ui/plugins
+!/ui/media
\ No newline at end of file
diff --git a/.prettierrc.json b/.prettierrc.json
new file mode 100644
index 00000000..a42b3fd7
--- /dev/null
+++ b/.prettierrc.json
@@ -0,0 +1,7 @@
+{
+ "printWidth": 120,
+ "tabWidth": 4,
+ "semi": false,
+ "arrowParens": "always",
+ "trailingComma": "es5"
+}
diff --git a/CHANGES.md b/CHANGES.md
index 7e61b5aa..9b2b72c1 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -2,8 +2,9 @@
## v2.5
### Major Changes
-- **Nearly twice as fast** - significantly faster speed of image generation. We're now pretty close to automatic1111's speed. Code contributions are welcome to make our project even faster: https://github.com/easydiffusion/sdkit/#is-it-fast
+- **Nearly twice as fast** - significantly faster speed of image generation. Code contributions are welcome to make our project even faster: https://github.com/easydiffusion/sdkit/#is-it-fast
- **Mac M1/M2 support** - Experimental support for Mac M1/M2. Thanks @michaelgallacher, @JeLuf and vishae.
+- **AMD support for Linux** - Experimental support for AMD GPUs on Linux. Thanks @DianaNites and @JeLuf.
- **Full support for Stable Diffusion 2.1 (including CPU)** - supports loading v1.4 or v2.0 or v2.1 models seamlessly. No need to enable "Test SD2", and no need to add `sd2_` to your SD 2.0 model file names. Works on CPU as well.
- **Memory optimized Stable Diffusion 2.1** - you can now use Stable Diffusion 2.1 models, with the same low VRAM optimizations that we've always had for SD 1.4. Please note, the SD 2.0 and 2.1 models require more GPU and System RAM, as compared to the SD 1.4 and 1.5 models.
- **11 new samplers!** - explore the new samplers, some of which can generate great images in less than 10 inference steps! We've added the Karras and UniPC samplers. Thanks @Schorny for the UniPC samplers.
@@ -21,7 +22,18 @@
Our focus continues to remain on an easy installation experience, and an easy user-interface. While still remaining pretty powerful, in terms of features and speed.
### Detailed changelog
+* 2.5.37 - 19 May 2023 - (beta-only) Two more samplers: DDPM and DEIS. Also disables the samplers that aren't working yet in the Diffusers version. Thanks @ogmaresca.
+* 2.5.37 - 19 May 2023 - (beta-only) Support CLIP-Skip. You can set this option under the models dropdown. Thanks @JeLuf.
+* 2.5.37 - 19 May 2023 - (beta-only) More VRAM optimizations for all modes in diffusers. The VRAM usage for diffusers in "low" and "balanced" should now be equal or less than the non-diffusers version. Performs softmax in half precision, like sdkit does.
+* 2.5.36 - 16 May 2023 - (beta-only) More VRAM optimizations for "balanced" VRAM usage mode.
+* 2.5.36 - 11 May 2023 - (beta-only) More VRAM optimizations for "low" VRAM usage mode.
+* 2.5.36 - 10 May 2023 - (beta-only) Bug fix for "meta" error when using a LoRA in 'low' VRAM usage mode.
+* 2.5.35 - 8 May 2023 - Allow dragging a zoomed-in image (after opening an image with the "expand" button). Thanks @ogmaresca.
+* 2.5.35 - 3 May 2023 - (beta-only) First round of VRAM Optimizations for the "Test Diffusers" version. This change significantly reduces the amount of VRAM used by the diffusers version during image generation. The VRAM usage is still not equal to the "non-diffusers" version, but more optimizations are coming soon.
+* 2.5.34 - 22 Apr 2023 - Don't start the browser in an incognito new profile (on Windows). Thanks @JeLuf.
+* 2.5.33 - 21 Apr 2023 - Install PyTorch 2.0 on new installations (on Windows and Linux).
* 2.5.32 - 19 Apr 2023 - Automatically check for black images, and set full-precision if necessary (for attn). This means custom models based on Stable Diffusion v2.1 will just work, without needing special command-line arguments or editing of yaml config files.
+* 2.5.32 - 18 Apr 2023 - Automatic support for AMD graphics cards on Linux. Thanks @DianaNites and @JeLuf.
* 2.5.31 - 10 Apr 2023 - Reduce VRAM usage while upscaling.
* 2.5.31 - 6 Apr 2023 - Allow seeds upto `4,294,967,295`. Thanks @ogmaresca.
* 2.5.31 - 6 Apr 2023 - Buttons to show the previous/next image in the image popup. Thanks @ogmaresca.
diff --git a/README.md b/README.md
index 51ba812a..2d7fcf5a 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,5 @@
# Easy Diffusion 2.5
-### The easiest way to install and use [Stable Diffusion](https://github.com/CompVis/stable-diffusion) on your own computer.
+### The easiest way to install and use [Stable Diffusion](https://github.com/CompVis/stable-diffusion) on your computer.
Does not require technical knowledge, does not require pre-installed software. 1-click install, powerful features, friendly community.
@@ -16,6 +16,11 @@ Click the download button for your operating system:
+**Hardware requirements:**
+- **Windows:** NVIDIA graphics card, or run on your CPU
+- **Linux:** NVIDIA or AMD graphics card, or run on your CPU
+- **Mac:** M1 or M2, or run on your CPU
+
The installer will take care of whatever is needed. If you face any problems, you can join the friendly [Discord community](https://discord.com/invite/u9yhsFmEkB) and ask for assistance.
## On Windows:
@@ -53,7 +58,7 @@ Just delete the `EasyDiffusion` folder to uninstall all the downloaded packages.
### Image generation
- **Supports**: "*Text to Image*" and "*Image to Image*".
-- **19 Samplers**: `ddim`, `plms`, `heun`, `euler`, `euler_a`, `dpm2`, `dpm2_a`, `lms`, `dpm_solver_stability`, `dpmpp_2s_a`, `dpmpp_2m`, `dpmpp_sde`, `dpm_fast`, `dpm_adaptive`, `unipc_snr`, `unipc_tu`, `unipc_tq`, `unipc_snr_2`, `unipc_tu_2`.
+- **21 Samplers**: `ddim`, `plms`, `heun`, `euler`, `euler_a`, `dpm2`, `dpm2_a`, `lms`, `dpm_solver_stability`, `dpmpp_2s_a`, `dpmpp_2m`, `dpmpp_sde`, `dpm_fast`, `dpm_adaptive`, `ddpm`, `deis`, `unipc_snr`, `unipc_tu`, `unipc_tq`, `unipc_snr_2`, `unipc_tu_2`.
- **In-Painting**: Specify areas of your image to paint into.
- **Simple Drawing Tool**: Draw basic images to guide the AI, without needing an external drawing program.
- **Face Correction (GFPGAN)**
diff --git a/package.json b/package.json
new file mode 100644
index 00000000..fbf1dadb
--- /dev/null
+++ b/package.json
@@ -0,0 +1,9 @@
+{
+ "scripts": {
+ "prettier-fix": "npx prettier --write \"./**/*.js\"",
+ "prettier-check": "npx prettier --check \"./**/*.js\""
+ },
+ "devDependencies": {
+ "prettier": "^1.19.1"
+ }
+}
diff --git a/scripts/check_modules.py b/scripts/check_modules.py
index 8ef43b09..1590f569 100644
--- a/scripts/check_modules.py
+++ b/scripts/check_modules.py
@@ -18,7 +18,7 @@ os_name = platform.system()
modules_to_check = {
"torch": ("1.11.0", "1.13.1", "2.0.0"),
"torchvision": ("0.12.0", "0.14.1", "0.15.1"),
- "sdkit": "1.0.80",
+ "sdkit": "1.0.96",
"stable-diffusion-sdkit": "2.1.4",
"rich": "12.6.0",
"uvicorn": "0.19.0",
@@ -47,6 +47,11 @@ def install(module_name: str, module_version: str):
module_version = "1.13.1+rocm5.2"
elif module_name == "torchvision":
module_version = "0.14.1+rocm5.2"
+ elif os_name == "Darwin":
+ if module_name == "torch":
+ module_version = "1.13.1"
+ elif module_name == "torchvision":
+ module_version = "0.14.1"
install_cmd = f"python -m pip install --upgrade {module_name}=={module_version}"
if index_url:
@@ -70,6 +75,10 @@ def init():
if module_name in ("torch", "torchvision"):
if version(module_name) is None: # allow any torch version
requires_install = True
+ elif os_name == "Darwin" and ( # force mac to downgrade from torch 2.0
+ version("torch").startswith("2.") or version("torchvision").startswith("0.15.")
+ ):
+ requires_install = True
elif version(module_name) not in allowed_versions:
requires_install = True
@@ -121,10 +130,13 @@ def include_cuda_versions(module_versions: tuple) -> tuple:
def is_amd_on_linux():
if os_name == "Linux":
- with open("/proc/bus/pci/devices", "r") as f:
- device_info = f.read()
- if "amdgpu" in device_info and "nvidia" not in device_info:
- return True
+ try:
+ with open("/proc/bus/pci/devices", "r") as f:
+ device_info = f.read()
+ if "amdgpu" in device_info and "nvidia" not in device_info:
+ return True
+ except:
+ return False
return False
diff --git a/scripts/get_config.py b/scripts/get_config.py
new file mode 100644
index 00000000..02523364
--- /dev/null
+++ b/scripts/get_config.py
@@ -0,0 +1,45 @@
+import os
+import argparse
+
+# The config file is in the same directory as this script
+config_directory = os.path.dirname(__file__)
+config_yaml = os.path.join(config_directory, "config.yaml")
+config_json = os.path.join(config_directory, "config.json")
+
+parser = argparse.ArgumentParser(description='Get values from config file')
+parser.add_argument('--default', dest='default', action='store',
+ help='default value, to be used if the setting is not defined in the config file')
+parser.add_argument('key', metavar='key', nargs='+',
+ help='config key to return')
+
+args = parser.parse_args()
+
+
+if os.path.isfile(config_yaml):
+ import yaml
+ with open(config_yaml, 'r') as configfile:
+ try:
+ config = yaml.safe_load(configfile)
+ except Exception as e:
+ print(e)
+ exit()
+elif os.path.isfile(config_json):
+ import json
+ with open(config_json, 'r') as configfile:
+ try:
+ config = json.load(configfile)
+ except Exception as e:
+ print(e)
+ exit()
+else:
+ config = {}
+
+for k in args.key:
+ if k in config:
+ config = config[k]
+ else:
+ if args.default != None:
+ print(args.default)
+ exit()
+
+print(config)
diff --git a/scripts/on_env_start.bat b/scripts/on_env_start.bat
index ee702bb5..44144cfa 100644
--- a/scripts/on_env_start.bat
+++ b/scripts/on_env_start.bat
@@ -12,6 +12,16 @@ if exist "scripts\user_config.bat" (
@call scripts\user_config.bat
)
+if exist "stable-diffusion\env" (
+ @set PYTHONPATH=%PYTHONPATH%;%cd%\stable-diffusion\env\lib\site-packages
+)
+
+if exist "scripts\get_config.py" (
+ @FOR /F "tokens=* USEBACKQ" %%F IN (`python scripts\get_config.py --default=main update_branch`) DO (
+ @SET update_branch=%%F
+ )
+)
+
if "%update_branch%"=="" (
set update_branch=main
)
@@ -58,6 +68,7 @@ if "%update_branch%"=="" (
@copy sd-ui-files\scripts\on_sd_start.bat scripts\ /Y
@copy sd-ui-files\scripts\check_modules.py scripts\ /Y
@copy sd-ui-files\scripts\check_models.py scripts\ /Y
+@copy sd-ui-files\scripts\get_config.py scripts\ /Y
@copy "sd-ui-files\scripts\Start Stable Diffusion UI.cmd" . /Y
@copy "sd-ui-files\scripts\Developer Console.cmd" . /Y
diff --git a/scripts/on_env_start.sh b/scripts/on_env_start.sh
index 4e73ca4e..30465975 100755
--- a/scripts/on_env_start.sh
+++ b/scripts/on_env_start.sh
@@ -4,6 +4,8 @@ source ./scripts/functions.sh
printf "\n\nEasy Diffusion\n\n"
+export PYTHONNOUSERSITE=y
+
if [ -f "scripts/config.sh" ]; then
source scripts/config.sh
fi
@@ -12,6 +14,11 @@ if [ -f "scripts/user_config.sh" ]; then
source scripts/user_config.sh
fi
+export PYTHONPATH=$(pwd)/installer_files/env/lib/python3.8/site-packages:$(pwd)/stable-diffusion/env/lib/python3.8/site-packages
+
+if [ -f "scripts/get_config.py" ]; then
+ export update_branch="$( python scripts/get_config.py --default=main update_branch )"
+fi
if [ "$update_branch" == "" ]; then
export update_branch="main"
@@ -44,6 +51,7 @@ cp sd-ui-files/scripts/on_sd_start.sh scripts/
cp sd-ui-files/scripts/bootstrap.sh scripts/
cp sd-ui-files/scripts/check_modules.py scripts/
cp sd-ui-files/scripts/check_models.py scripts/
+cp sd-ui-files/scripts/get_config.py scripts/
cp sd-ui-files/scripts/start.sh .
cp sd-ui-files/scripts/developer_console.sh .
cp sd-ui-files/scripts/functions.sh scripts/
diff --git a/scripts/on_sd_start.bat b/scripts/on_sd_start.bat
index d8c6f763..ba205c9e 100644
--- a/scripts/on_sd_start.bat
+++ b/scripts/on_sd_start.bat
@@ -6,9 +6,10 @@
@copy sd-ui-files\scripts\on_env_start.bat scripts\ /Y
@copy sd-ui-files\scripts\check_modules.py scripts\ /Y
@copy sd-ui-files\scripts\check_models.py scripts\ /Y
+@copy sd-ui-files\scripts\get_config.py scripts\ /Y
if exist "%cd%\profile" (
- set USERPROFILE=%cd%\profile
+ set HF_HOME=%cd%\profile\.cache\huggingface
)
@rem set the correct installer path (current vs legacy)
@@ -103,14 +104,25 @@ call python --version
@cd ..
@set SD_UI_PATH=%cd%\ui
+
+@FOR /F "tokens=* USEBACKQ" %%F IN (`python scripts\get_config.py --default=9000 net listen_port`) DO (
+ @SET ED_BIND_PORT=%%F
+)
+
+@FOR /F "tokens=* USEBACKQ" %%F IN (`python scripts\get_config.py --default=False net listen_to_network`) DO (
+ if "%%F" EQU "True" (
+ @SET ED_BIND_IP=0.0.0.0
+ ) else (
+ @SET ED_BIND_IP=127.0.0.1
+ )
+)
+
@cd stable-diffusion
@rem set any overrides
set HF_HUB_DISABLE_SYMLINKS_WARNING=true
-@if NOT DEFINED SD_UI_BIND_PORT set SD_UI_BIND_PORT=9000
-@if NOT DEFINED SD_UI_BIND_IP set SD_UI_BIND_IP=0.0.0.0
-@uvicorn main:server_api --app-dir "%SD_UI_PATH%" --port %SD_UI_BIND_PORT% --host %SD_UI_BIND_IP% --log-level error
+@uvicorn main:server_api --app-dir "%SD_UI_PATH%" --port %ED_BIND_PORT% --host %ED_BIND_IP% --log-level error
@pause
diff --git a/scripts/on_sd_start.sh b/scripts/on_sd_start.sh
index 858fa768..820c36ed 100755
--- a/scripts/on_sd_start.sh
+++ b/scripts/on_sd_start.sh
@@ -5,6 +5,7 @@ cp sd-ui-files/scripts/on_env_start.sh scripts/
cp sd-ui-files/scripts/bootstrap.sh scripts/
cp sd-ui-files/scripts/check_modules.py scripts/
cp sd-ui-files/scripts/check_models.py scripts/
+cp sd-ui-files/scripts/get_config.py scripts/
source ./scripts/functions.sh
@@ -74,8 +75,17 @@ python --version
cd ..
export SD_UI_PATH=`pwd`/ui
+export ED_BIND_PORT="$( python scripts/get_config.py --default=9000 net listen_port )"
+case "$( python scripts/get_config.py --default=False net listen_to_network )" in
+ "True")
+ export ED_BIND_IP=0.0.0.0
+ ;;
+ "False")
+ export ED_BIND_IP=127.0.0.1
+ ;;
+esac
cd stable-diffusion
-uvicorn main:server_api --app-dir "$SD_UI_PATH" --port ${SD_UI_BIND_PORT:-9000} --host ${SD_UI_BIND_IP:-0.0.0.0} --log-level error
+uvicorn main:server_api --app-dir "$SD_UI_PATH" --port "$ED_BIND_PORT" --host "$ED_BIND_IP" --log-level error
read -p "Press any key to continue"
diff --git a/ui/easydiffusion/app.py b/ui/easydiffusion/app.py
index 83bb08c1..b6318f01 100644
--- a/ui/easydiffusion/app.py
+++ b/ui/easydiffusion/app.py
@@ -1,17 +1,16 @@
+import json
+import logging
import os
import socket
import sys
-import json
import traceback
-import logging
-import shlex
import urllib
-from rich.logging import RichHandler
-
-from sdkit.utils import log as sdkit_log # hack, so we can overwrite the log config
+import warnings
from easydiffusion import task_manager
from easydiffusion.utils import log
+from rich.logging import RichHandler
+from sdkit.utils import log as sdkit_log # hack, so we can overwrite the log config
# Remove all handlers associated with the root logger object.
for handler in logging.root.handlers[:]:
@@ -55,15 +54,42 @@ APP_CONFIG_DEFAULTS = {
},
}
-IMAGE_EXTENSIONS = [".png", ".apng", ".jpg", ".jpeg", ".jfif", ".pjpeg", ".pjp", ".jxl", ".gif", ".webp", ".avif", ".svg"]
+IMAGE_EXTENSIONS = [
+ ".png",
+ ".apng",
+ ".jpg",
+ ".jpeg",
+ ".jfif",
+ ".pjpeg",
+ ".pjp",
+ ".jxl",
+ ".gif",
+ ".webp",
+ ".avif",
+ ".svg",
+]
CUSTOM_MODIFIERS_DIR = os.path.abspath(os.path.join(SD_DIR, "..", "modifiers"))
-CUSTOM_MODIFIERS_PORTRAIT_EXTENSIONS=[".portrait", "_portrait", " portrait", "-portrait"]
-CUSTOM_MODIFIERS_LANDSCAPE_EXTENSIONS=[".landscape", "_landscape", " landscape", "-landscape"]
+CUSTOM_MODIFIERS_PORTRAIT_EXTENSIONS = [
+ ".portrait",
+ "_portrait",
+ " portrait",
+ "-portrait",
+]
+CUSTOM_MODIFIERS_LANDSCAPE_EXTENSIONS = [
+ ".landscape",
+ "_landscape",
+ " landscape",
+ "-landscape",
+]
+
def init():
os.makedirs(USER_UI_PLUGINS_DIR, exist_ok=True)
os.makedirs(USER_SERVER_PLUGINS_DIR, exist_ok=True)
+ # https://pytorch.org/docs/stable/storage.html
+ warnings.filterwarnings('ignore', category=UserWarning, message='TypedStorage is deprecated')
+
load_server_plugins()
update_render_threads()
@@ -81,14 +107,10 @@ def getConfig(default_val=APP_CONFIG_DEFAULTS):
config["net"] = {}
if os.getenv("SD_UI_BIND_PORT") is not None:
config["net"]["listen_port"] = int(os.getenv("SD_UI_BIND_PORT"))
- else:
- config["net"]["listen_port"] = 9000
if os.getenv("SD_UI_BIND_IP") is not None:
config["net"]["listen_to_network"] = os.getenv("SD_UI_BIND_IP") == "0.0.0.0"
- else:
- config["net"]["listen_to_network"] = True
return config
- except Exception as e:
+ except Exception:
log.warn(traceback.format_exc())
return default_val
@@ -101,50 +123,6 @@ def setConfig(config):
except:
log.error(traceback.format_exc())
- try: # config.bat
- config_bat_path = os.path.join(CONFIG_DIR, "config.bat")
- config_bat = []
-
- if "update_branch" in config:
- config_bat.append(f"@set update_branch={config['update_branch']}")
-
- config_bat.append(f"@set SD_UI_BIND_PORT={config['net']['listen_port']}")
- bind_ip = "0.0.0.0" if config["net"]["listen_to_network"] else "127.0.0.1"
- config_bat.append(f"@set SD_UI_BIND_IP={bind_ip}")
-
- # Preserve these variables if they are set
- for var in PRESERVE_CONFIG_VARS:
- if os.getenv(var) is not None:
- config_bat.append(f"@set {var}={os.getenv(var)}")
-
- if len(config_bat) > 0:
- with open(config_bat_path, "w", encoding="utf-8") as f:
- f.write("\n".join(config_bat))
- except:
- log.error(traceback.format_exc())
-
- try: # config.sh
- config_sh_path = os.path.join(CONFIG_DIR, "config.sh")
- config_sh = ["#!/bin/bash"]
-
- if "update_branch" in config:
- config_sh.append(f"export update_branch={config['update_branch']}")
-
- config_sh.append(f"export SD_UI_BIND_PORT={config['net']['listen_port']}")
- bind_ip = "0.0.0.0" if config["net"]["listen_to_network"] else "127.0.0.1"
- config_sh.append(f"export SD_UI_BIND_IP={bind_ip}")
-
- # Preserve these variables if they are set
- for var in PRESERVE_CONFIG_VARS:
- if os.getenv(var) is not None:
- config_bat.append(f'export {var}="{shlex.quote(os.getenv(var))}"')
-
- if len(config_sh) > 1:
- with open(config_sh_path, "w", encoding="utf-8") as f:
- f.write("\n".join(config_sh))
- except:
- log.error(traceback.format_exc())
-
def save_to_config(ckpt_model_name, vae_model_name, hypernetwork_model_name, vram_usage_level):
config = getConfig()
@@ -233,18 +211,19 @@ def getIPConfig():
def open_browser():
config = getConfig()
ui = config.get("ui", {})
- net = config.get("net", {"listen_port": 9000})
+ net = config.get("net", {})
port = net.get("listen_port", 9000)
if ui.get("open_browser_on_start", True):
import webbrowser
webbrowser.open(f"http://localhost:{port}")
+
def get_image_modifiers():
modifiers_json_path = os.path.join(SD_UI_DIR, "modifiers.json")
modifier_categories = {}
- original_category_order=[]
+ original_category_order = []
with open(modifiers_json_path, "r", encoding="utf-8") as f:
modifiers_file = json.load(f)
@@ -254,14 +233,14 @@ def get_image_modifiers():
# convert modifiers from a list of objects to a dict of dicts
for category_item in modifiers_file:
- category_name = category_item['category']
+ category_name = category_item["category"]
original_category_order.append(category_name)
category = {}
- for modifier_item in category_item['modifiers']:
+ for modifier_item in category_item["modifiers"]:
modifier = {}
- for preview_item in modifier_item['previews']:
- modifier[preview_item['name']] = preview_item['path']
- category[modifier_item['modifier']] = modifier
+ for preview_item in modifier_item["previews"]:
+ modifier[preview_item["name"]] = preview_item["path"]
+ category[modifier_item["modifier"]] = modifier
modifier_categories[category_name] = category
def scan_directory(directory_path: str, category_name="Modifiers"):
@@ -274,12 +253,27 @@ def get_image_modifiers():
modifier_name = entry.name[: -len(file_extension[0])]
modifier_path = f"custom/{entry.path[len(CUSTOM_MODIFIERS_DIR) + 1:]}"
# URL encode path segments
- modifier_path = "/".join(map(lambda segment: urllib.parse.quote(segment), modifier_path.split("/")))
+ modifier_path = "/".join(
+ map(
+ lambda segment: urllib.parse.quote(segment),
+ modifier_path.split("/"),
+ )
+ )
is_portrait = True
is_landscape = True
- portrait_extension = list(filter(lambda e: modifier_name.lower().endswith(e), CUSTOM_MODIFIERS_PORTRAIT_EXTENSIONS))
- landscape_extension = list(filter(lambda e: modifier_name.lower().endswith(e), CUSTOM_MODIFIERS_LANDSCAPE_EXTENSIONS))
+ portrait_extension = list(
+ filter(
+ lambda e: modifier_name.lower().endswith(e),
+ CUSTOM_MODIFIERS_PORTRAIT_EXTENSIONS,
+ )
+ )
+ landscape_extension = list(
+ filter(
+ lambda e: modifier_name.lower().endswith(e),
+ CUSTOM_MODIFIERS_LANDSCAPE_EXTENSIONS,
+ )
+ )
if len(portrait_extension) > 0:
is_landscape = False
@@ -287,24 +281,24 @@ def get_image_modifiers():
elif len(landscape_extension) > 0:
is_portrait = False
modifier_name = modifier_name[: -len(landscape_extension[0])]
-
- if (category_name not in modifier_categories):
+
+ if category_name not in modifier_categories:
modifier_categories[category_name] = {}
-
+
category = modifier_categories[category_name]
- if (modifier_name not in category):
+ if modifier_name not in category:
category[modifier_name] = {}
- if (is_portrait or "portrait" not in category[modifier_name]):
+ if is_portrait or "portrait" not in category[modifier_name]:
category[modifier_name]["portrait"] = modifier_path
-
- if (is_landscape or "landscape" not in category[modifier_name]):
+
+ if is_landscape or "landscape" not in category[modifier_name]:
category[modifier_name]["landscape"] = modifier_path
elif entry.is_dir():
scan_directory(
entry.path,
- entry.name if directory_path==CUSTOM_MODIFIERS_DIR else f"{category_name}/{entry.name}",
+ entry.name if directory_path == CUSTOM_MODIFIERS_DIR else f"{category_name}/{entry.name}",
)
scan_directory(CUSTOM_MODIFIERS_DIR)
@@ -317,12 +311,12 @@ def get_image_modifiers():
# convert the modifiers back into a list of objects
modifier_categories_list = []
for category_name in [*original_category_order, *custom_categories]:
- category = { 'category': category_name, 'modifiers': [] }
+ category = {"category": category_name, "modifiers": []}
for modifier_name in sorted(modifier_categories[category_name].keys(), key=str.casefold):
- modifier = { 'modifier': modifier_name, 'previews': [] }
+ modifier = {"modifier": modifier_name, "previews": []}
for preview_name, preview_path in modifier_categories[category_name][modifier_name].items():
- modifier['previews'].append({ 'name': preview_name, 'path': preview_path })
- category['modifiers'].append(modifier)
+ modifier["previews"].append({"name": preview_name, "path": preview_path})
+ category["modifiers"].append(modifier)
modifier_categories_list.append(category)
return modifier_categories_list
diff --git a/ui/easydiffusion/device_manager.py b/ui/easydiffusion/device_manager.py
index 18069a82..dc705927 100644
--- a/ui/easydiffusion/device_manager.py
+++ b/ui/easydiffusion/device_manager.py
@@ -1,9 +1,9 @@
import os
import platform
-import torch
-import traceback
import re
+import traceback
+import torch
from easydiffusion.utils import log
"""
@@ -118,7 +118,10 @@ def auto_pick_devices(currently_active_devices):
# These already-running devices probably aren't terrible, since they were picked in the past.
# Worst case, the user can restart the program and that'll get rid of them.
devices = list(
- filter((lambda x: x["mem_free"] > mem_free_threshold or x["device"] in currently_active_devices), devices)
+ filter(
+ (lambda x: x["mem_free"] > mem_free_threshold or x["device"] in currently_active_devices),
+ devices,
+ )
)
devices = list(map(lambda x: x["device"], devices))
return devices
@@ -162,6 +165,7 @@ def needs_to_force_full_precision(context):
and (
" 1660" in device_name
or " 1650" in device_name
+ or " 1630" in device_name
or " t400" in device_name
or " t550" in device_name
or " t600" in device_name
@@ -221,9 +225,9 @@ def is_device_compatible(device):
try:
_, mem_total = torch.cuda.mem_get_info(device)
mem_total /= float(10**9)
- if mem_total < 3.0:
+ if mem_total < 1.9:
if is_device_compatible.history.get(device) == None:
- log.warn(f"GPU {device} with less than 3 GB of VRAM is not compatible with Stable Diffusion")
+ log.warn(f"GPU {device} with less than 2 GB of VRAM is not compatible with Stable Diffusion")
is_device_compatible.history[device] = 1
return False
except RuntimeError as e:
diff --git a/ui/easydiffusion/model_manager.py b/ui/easydiffusion/model_manager.py
index dc727eeb..324dcec9 100644
--- a/ui/easydiffusion/model_manager.py
+++ b/ui/easydiffusion/model_manager.py
@@ -3,11 +3,17 @@ import os
from easydiffusion import app
from easydiffusion.types import TaskData
from easydiffusion.utils import log
-
from sdkit import Context
-from sdkit.models import load_model, unload_model, scan_model
+from sdkit.models import load_model, scan_model, unload_model
-KNOWN_MODEL_TYPES = ["stable-diffusion", "vae", "hypernetwork", "gfpgan", "realesrgan", "lora"]
+KNOWN_MODEL_TYPES = [
+ "stable-diffusion",
+ "vae",
+ "hypernetwork",
+ "gfpgan",
+ "realesrgan",
+ "lora",
+]
MODEL_EXTENSIONS = {
"stable-diffusion": [".ckpt", ".safetensors"],
"vae": [".vae.pt", ".ckpt", ".safetensors"],
@@ -44,13 +50,15 @@ def load_default_models(context: Context):
load_model(
context,
model_type,
- scan_model = context.model_paths[model_type] != None and not context.model_paths[model_type].endswith('.safetensors')
+ scan_model=context.model_paths[model_type] != None
+ and not context.model_paths[model_type].endswith(".safetensors"),
)
except Exception as e:
log.error(f"[red]Error while loading {model_type} model: {context.model_paths[model_type]}[/red]")
log.exception(e)
del context.model_paths[model_type]
+
def unload_all(context: Context):
for model_type in KNOWN_MODEL_TYPES:
unload_model(context, model_type)
@@ -114,7 +122,7 @@ def reload_models_if_necessary(context: Context, task_data: TaskData):
if context.model_paths.get(model_type) != path
}
- if set_vram_optimizations(context): # reload SD
+ if set_vram_optimizations(context) or set_clip_skip(context, task_data): # reload SD
models_to_reload["stable-diffusion"] = model_paths_in_req["stable-diffusion"]
for model_type, model_path_in_req in models_to_reload.items():
@@ -149,6 +157,16 @@ def set_vram_optimizations(context: Context):
return False
+def set_clip_skip(context: Context, task_data: TaskData):
+ clip_skip = task_data.clip_skip
+
+ if clip_skip != context.clip_skip:
+ context.clip_skip = clip_skip
+ return True
+
+ return False
+
+
def make_model_folders():
for model_type in KNOWN_MODEL_TYPES:
model_dir_path = os.path.join(app.MODELS_DIR, model_type)
@@ -170,13 +188,23 @@ def is_malicious_model(file_path):
if scan_result.issues_count > 0 or scan_result.infected_files > 0:
log.warn(
":warning: [bold red]Scan %s: %d scanned, %d issue, %d infected.[/bold red]"
- % (file_path, scan_result.scanned_files, scan_result.issues_count, scan_result.infected_files)
+ % (
+ file_path,
+ scan_result.scanned_files,
+ scan_result.issues_count,
+ scan_result.infected_files,
+ )
)
return True
else:
log.debug(
"Scan %s: [green]%d scanned, %d issue, %d infected.[/green]"
- % (file_path, scan_result.scanned_files, scan_result.issues_count, scan_result.infected_files)
+ % (
+ file_path,
+ scan_result.scanned_files,
+ scan_result.issues_count,
+ scan_result.infected_files,
+ )
)
return False
except Exception as e:
@@ -204,13 +232,13 @@ def getModels():
class MaliciousModelException(Exception):
"Raised when picklescan reports a problem with a model"
- pass
def scan_directory(directory, suffixes, directoriesFirst: bool = True):
nonlocal models_scanned
tree = []
for entry in sorted(
- os.scandir(directory), key=lambda entry: (entry.is_file() == directoriesFirst, entry.name.lower())
+ os.scandir(directory),
+ key=lambda entry: (entry.is_file() == directoriesFirst, entry.name.lower()),
):
if entry.is_file():
matching_suffix = list(filter(lambda s: entry.name.endswith(s), suffixes))
diff --git a/ui/easydiffusion/renderer.py b/ui/easydiffusion/renderer.py
index 8a155f82..e26b4389 100644
--- a/ui/easydiffusion/renderer.py
+++ b/ui/easydiffusion/renderer.py
@@ -1,21 +1,23 @@
-import queue
-import time
import json
import pprint
+import queue
+import time
from easydiffusion import device_manager
-from easydiffusion.types import TaskData, Response, Image as ResponseImage, UserInitiatedStop, GenerateImageRequest
-from easydiffusion.utils import get_printable_request, save_images_to_disk, log
-
+from easydiffusion.types import GenerateImageRequest
+from easydiffusion.types import Image as ResponseImage
+from easydiffusion.types import Response, TaskData, UserInitiatedStop
+from easydiffusion.utils import get_printable_request, log, save_images_to_disk
from sdkit import Context
-from sdkit.generate import generate_images
from sdkit.filter import apply_filters
+from sdkit.generate import generate_images
from sdkit.utils import (
- img_to_buffer,
- img_to_base64_str,
- latent_samples_to_images,
diffusers_latent_samples_to_images,
gc,
+ img_to_base64_str,
+ img_to_buffer,
+ latent_samples_to_images,
+ get_device_usage,
)
context = Context() # thread-local
@@ -39,18 +41,29 @@ def init(device):
app_config.get("test_diffusers", False) and app_config.get("update_branch", "main") != "main"
)
+ log.info("Device usage during initialization:")
+ get_device_usage(device, log_info=True, process_usage_only=False)
+
device_manager.device_init(context, device)
def make_images(
- req: GenerateImageRequest, task_data: TaskData, data_queue: queue.Queue, task_temp_images: list, step_callback
+ req: GenerateImageRequest,
+ task_data: TaskData,
+ data_queue: queue.Queue,
+ task_temp_images: list,
+ step_callback,
):
context.stop_processing = False
print_task_info(req, task_data)
images, seeds = make_images_internal(req, task_data, data_queue, task_temp_images, step_callback)
- res = Response(req, task_data, images=construct_response(images, seeds, task_data, base_seed=req.seed))
+ res = Response(
+ req,
+ task_data,
+ images=construct_response(images, seeds, task_data, base_seed=req.seed),
+ )
res = res.json()
data_queue.put(json.dumps(res))
log.info("Task completed")
@@ -59,14 +72,18 @@ def make_images(
def print_task_info(req: GenerateImageRequest, task_data: TaskData):
- req_str = pprint.pformat(get_printable_request(req)).replace("[", "\[")
+ req_str = pprint.pformat(get_printable_request(req, task_data)).replace("[", "\[")
task_str = pprint.pformat(task_data.dict()).replace("[", "\[")
log.info(f"request: {req_str}")
log.info(f"task data: {task_str}")
def make_images_internal(
- req: GenerateImageRequest, task_data: TaskData, data_queue: queue.Queue, task_temp_images: list, step_callback
+ req: GenerateImageRequest,
+ task_data: TaskData,
+ data_queue: queue.Queue,
+ task_temp_images: list,
+ step_callback,
):
images, user_stopped = generate_images_internal(
req,
@@ -155,7 +172,12 @@ def filter_images(task_data: TaskData, images: list, user_stopped):
def construct_response(images: list, seeds: list, task_data: TaskData, base_seed: int):
return [
ResponseImage(
- data=img_to_base64_str(img, task_data.output_format, task_data.output_quality, task_data.output_lossless),
+ data=img_to_base64_str(
+ img,
+ task_data.output_format,
+ task_data.output_quality,
+ task_data.output_lossless,
+ ),
seed=seed,
)
for img, seed in zip(images, seeds)
diff --git a/ui/easydiffusion/server.py b/ui/easydiffusion/server.py
index 92453917..a1aab6c0 100644
--- a/ui/easydiffusion/server.py
+++ b/ui/easydiffusion/server.py
@@ -2,28 +2,30 @@
Notes:
async endpoints always run on the main thread. Without they run on the thread pool.
"""
+import datetime
+import mimetypes
import os
import traceback
-import datetime
from typing import List, Union
+from easydiffusion import app, model_manager, task_manager
+from easydiffusion.types import GenerateImageRequest, MergeRequest, TaskData
+from easydiffusion.utils import log
from fastapi import FastAPI, HTTPException
from fastapi.staticfiles import StaticFiles
-from starlette.responses import FileResponse, JSONResponse, StreamingResponse
from pydantic import BaseModel, Extra
-
-from easydiffusion import app, model_manager, task_manager
-from easydiffusion.types import TaskData, GenerateImageRequest, MergeRequest
-from easydiffusion.utils import log
-
-import mimetypes
+from starlette.responses import FileResponse, JSONResponse, StreamingResponse
log.info(f"started in {app.SD_DIR}")
log.info(f"started at {datetime.datetime.now():%x %X}")
server_api = FastAPI()
-NOCACHE_HEADERS = {"Cache-Control": "no-cache, no-store, must-revalidate", "Pragma": "no-cache", "Expires": "0"}
+NOCACHE_HEADERS = {
+ "Cache-Control": "no-cache, no-store, must-revalidate",
+ "Pragma": "no-cache",
+ "Expires": "0",
+}
class NoCacheStaticFiles(StaticFiles):
@@ -65,11 +67,17 @@ def init():
name="custom-thumbnails",
)
- server_api.mount("/media", NoCacheStaticFiles(directory=os.path.join(app.SD_UI_DIR, "media")), name="media")
+ server_api.mount(
+ "/media",
+ NoCacheStaticFiles(directory=os.path.join(app.SD_UI_DIR, "media")),
+ name="media",
+ )
for plugins_dir, dir_prefix in app.UI_PLUGINS_SOURCES:
server_api.mount(
- f"/plugins/{dir_prefix}", NoCacheStaticFiles(directory=plugins_dir), name=f"plugins-{dir_prefix}"
+ f"/plugins/{dir_prefix}",
+ NoCacheStaticFiles(directory=plugins_dir),
+ name=f"plugins-{dir_prefix}",
)
@server_api.post("/app_config")
@@ -246,8 +254,8 @@ def render_internal(req: dict):
def model_merge_internal(req: dict):
try:
- from sdkit.train import merge_models
from easydiffusion.utils.save_utils import filename_regex
+ from sdkit.train import merge_models
mergeReq: MergeRequest = MergeRequest.parse_obj(req)
@@ -255,7 +263,11 @@ def model_merge_internal(req: dict):
model_manager.resolve_model_to_use(mergeReq.model0, "stable-diffusion"),
model_manager.resolve_model_to_use(mergeReq.model1, "stable-diffusion"),
mergeReq.ratio,
- os.path.join(app.MODELS_DIR, "stable-diffusion", filename_regex.sub("_", mergeReq.out_path)),
+ os.path.join(
+ app.MODELS_DIR,
+ "stable-diffusion",
+ filename_regex.sub("_", mergeReq.out_path),
+ ),
mergeReq.use_fp16,
)
return JSONResponse({"status": "OK"}, headers=NOCACHE_HEADERS)
diff --git a/ui/easydiffusion/task_manager.py b/ui/easydiffusion/task_manager.py
index 91adc04b..c11acbec 100644
--- a/ui/easydiffusion/task_manager.py
+++ b/ui/easydiffusion/task_manager.py
@@ -7,16 +7,18 @@ Notes:
import json
import traceback
-TASK_TTL = 15 * 60 # seconds, Discard last session's task timeout
+TASK_TTL = 30 * 60 # seconds, Discard last session's task timeout
-import torch
-import queue, threading, time, weakref
+import queue
+import threading
+import time
+import weakref
from typing import Any, Hashable
+import torch
from easydiffusion import device_manager
-from easydiffusion.types import TaskData, GenerateImageRequest
+from easydiffusion.types import GenerateImageRequest, TaskData
from easydiffusion.utils import log
-
from sdkit.utils import gc
THREAD_NAME_PREFIX = ""
@@ -167,7 +169,7 @@ class DataCache:
raise Exception("DataCache.put" + ERR_LOCK_FAILED)
try:
self._base[key] = (self._get_ttl_time(ttl), value)
- except Exception as e:
+ except Exception:
log.error(traceback.format_exc())
return False
else:
@@ -264,7 +266,7 @@ def thread_get_next_task():
def thread_render(device):
global current_state, current_state_error
- from easydiffusion import renderer, model_manager
+ from easydiffusion import model_manager, renderer
try:
renderer.init(device)
@@ -337,7 +339,11 @@ def thread_render(device):
current_state = ServerStates.Rendering
task.response = renderer.make_images(
- task.render_request, task.task_data, task.buffer_queue, task.temp_images, step_callback
+ task.render_request,
+ task.task_data,
+ task.buffer_queue,
+ task.temp_images,
+ step_callback,
)
# Before looping back to the generator, mark cache as still alive.
task_cache.keep(id(task), TASK_TTL)
diff --git a/ui/easydiffusion/types.py b/ui/easydiffusion/types.py
index bbec0afa..7a5201ab 100644
--- a/ui/easydiffusion/types.py
+++ b/ui/easydiffusion/types.py
@@ -1,6 +1,7 @@
-from pydantic import BaseModel
from typing import Any
+from pydantic import BaseModel
+
class GenerateImageRequest(BaseModel):
prompt: str = ""
@@ -47,6 +48,7 @@ class TaskData(BaseModel):
metadata_output_format: str = "txt" # or "json"
stream_image_progress: bool = False
stream_image_progress_interval: int = 5
+ clip_skip: bool = False
class MergeRequest(BaseModel):
diff --git a/ui/easydiffusion/utils/save_utils.py b/ui/easydiffusion/utils/save_utils.py
index 384794d1..24b2198c 100644
--- a/ui/easydiffusion/utils/save_utils.py
+++ b/ui/easydiffusion/utils/save_utils.py
@@ -1,14 +1,13 @@
import os
-import time
import re
+import time
+from datetime import datetime
+from functools import reduce
from easydiffusion import app
-from easydiffusion.types import TaskData, GenerateImageRequest
-from functools import reduce
-from datetime import datetime
-
-from sdkit.utils import save_images, save_dicts
+from easydiffusion.types import GenerateImageRequest, TaskData
from numpy import base_repr
+from sdkit.utils import save_dicts, save_images
filename_regex = re.compile("[^a-zA-Z0-9._-]")
img_number_regex = re.compile("([0-9]{5,})")
@@ -16,23 +15,24 @@ img_number_regex = re.compile("([0-9]{5,})")
# keep in sync with `ui/media/js/dnd.js`
TASK_TEXT_MAPPING = {
"prompt": "Prompt",
+ "negative_prompt": "Negative Prompt",
+ "seed": "Seed",
+ "use_stable_diffusion_model": "Stable Diffusion model",
+ "clip_skip": "Clip Skip",
+ "use_vae_model": "VAE model",
+ "sampler_name": "Sampler",
"width": "Width",
"height": "Height",
- "seed": "Seed",
"num_inference_steps": "Steps",
"guidance_scale": "Guidance Scale",
"prompt_strength": "Prompt Strength",
+ "use_lora_model": "LoRA model",
+ "lora_alpha": "LoRA Strength",
+ "use_hypernetwork_model": "Hypernetwork model",
+ "hypernetwork_strength": "Hypernetwork Strength",
"use_face_correction": "Use Face Correction",
"use_upscale": "Use Upscaling",
"upscale_amount": "Upscale By",
- "sampler_name": "Sampler",
- "negative_prompt": "Negative Prompt",
- "use_stable_diffusion_model": "Stable Diffusion model",
- "use_vae_model": "VAE model",
- "use_hypernetwork_model": "Hypernetwork model",
- "hypernetwork_strength": "Hypernetwork Strength",
- "use_lora_model": "LoRA model",
- "lora_alpha": "LoRA Strength",
}
time_placeholders = {
@@ -50,6 +50,7 @@ other_placeholders = {
"$s": lambda req, task_data: str(req.seed),
}
+
class ImageNumber:
_factory = None
_evaluated = False
@@ -57,12 +58,14 @@ class ImageNumber:
def __init__(self, factory):
self._factory = factory
self._evaluated = None
+
def __call__(self) -> int:
if self._evaluated is None:
self._evaluated = self._factory()
return self._evaluated
-def format_placeholders(format: str, req: GenerateImageRequest, task_data: TaskData, now = None):
+
+def format_placeholders(format: str, req: GenerateImageRequest, task_data: TaskData, now=None):
if now is None:
now = time.time()
@@ -75,10 +78,12 @@ def format_placeholders(format: str, req: GenerateImageRequest, task_data: TaskD
return format
+
def format_folder_name(format: str, req: GenerateImageRequest, task_data: TaskData):
format = format_placeholders(format, req, task_data)
return filename_regex.sub("_", format)
+
def format_file_name(
format: str,
req: GenerateImageRequest,
@@ -88,19 +93,22 @@ def format_file_name(
folder_img_number: ImageNumber,
):
format = format_placeholders(format, req, task_data, now)
-
+
if "$n" in format:
format = format.replace("$n", f"{folder_img_number():05}")
-
+
if "$tsb64" in format:
- img_id = base_repr(int(now * 10000), 36)[-7:] + base_repr(int(batch_file_number), 36) # Base 36 conversion, 0-9, A-Z
+ img_id = base_repr(int(now * 10000), 36)[-7:] + base_repr(
+ int(batch_file_number), 36
+ ) # Base 36 conversion, 0-9, A-Z
format = format.replace("$tsb64", img_id)
-
+
if "$ts" in format:
format = format.replace("$ts", str(int(now * 1000) + batch_file_number))
return filename_regex.sub("_", format)
+
def save_images_to_disk(images: list, filtered_images: list, req: GenerateImageRequest, task_data: TaskData):
now = time.time()
app_config = app.getConfig()
@@ -126,7 +134,7 @@ def save_images_to_disk(images: list, filtered_images: list, req: GenerateImageR
output_lossless=task_data.output_lossless,
)
if task_data.metadata_output_format:
- for metadata_output_format in task_data.metadata_output_format.split(','):
+ for metadata_output_format in task_data.metadata_output_format.split(","):
if metadata_output_format.lower() in ["json", "txt", "embed"]:
save_dicts(
metadata_entries,
@@ -142,7 +150,8 @@ def save_images_to_disk(images: list, filtered_images: list, req: GenerateImageR
task_data,
file_number,
now=now,
- suffix="filtered")
+ suffix="filtered",
+ )
save_images(
images,
@@ -171,27 +180,7 @@ def save_images_to_disk(images: list, filtered_images: list, req: GenerateImageR
def get_metadata_entries_for_request(req: GenerateImageRequest, task_data: TaskData):
- metadata = get_printable_request(req)
- metadata.update(
- {
- "use_stable_diffusion_model": task_data.use_stable_diffusion_model,
- "use_vae_model": task_data.use_vae_model,
- "use_hypernetwork_model": task_data.use_hypernetwork_model,
- "use_lora_model": task_data.use_lora_model,
- "use_face_correction": task_data.use_face_correction,
- "use_upscale": task_data.use_upscale,
- }
- )
- if metadata["use_upscale"] is not None:
- metadata["upscale_amount"] = task_data.upscale_amount
- if task_data.use_hypernetwork_model is None:
- del metadata["hypernetwork_strength"]
- if task_data.use_lora_model is None:
- if "lora_alpha" in metadata:
- del metadata["lora_alpha"]
- app_config = app.getConfig()
- if not app_config.get("test_diffusers", False) and "use_lora_model" in metadata:
- del metadata["use_lora_model"]
+ metadata = get_printable_request(req, task_data)
# if text, format it in the text format expected by the UI
is_txt_format = task_data.metadata_output_format.lower() == "txt"
@@ -205,12 +194,33 @@ def get_metadata_entries_for_request(req: GenerateImageRequest, task_data: TaskD
return entries
-def get_printable_request(req: GenerateImageRequest):
- metadata = req.dict()
- del metadata["init_image"]
- del metadata["init_image_mask"]
- if req.init_image is None:
+def get_printable_request(req: GenerateImageRequest, task_data: TaskData):
+ req_metadata = req.dict()
+ task_data_metadata = task_data.dict()
+
+ # Save the metadata in the order defined in TASK_TEXT_MAPPING
+ metadata = {}
+ for key in TASK_TEXT_MAPPING.keys():
+ if key in req_metadata:
+ metadata[key] = req_metadata[key]
+ elif key in task_data_metadata:
+ metadata[key] = task_data_metadata[key]
+
+ # Clean up the metadata
+ if req.init_image is None and "prompt_strength" in metadata:
del metadata["prompt_strength"]
+ if task_data.use_upscale is None and "upscale_amount" in metadata:
+ del metadata["upscale_amount"]
+ if task_data.use_hypernetwork_model is None and "hypernetwork_strength" in metadata:
+ del metadata["hypernetwork_strength"]
+ if task_data.use_lora_model is None and "lora_alpha" in metadata:
+ del metadata["lora_alpha"]
+
+ app_config = app.getConfig()
+ if not app_config.get("test_diffusers", False):
+ for key in (x for x in ["use_lora_model", "lora_alpha", "clip_skip"] if x in metadata):
+ del metadata[key]
+
return metadata
@@ -233,27 +243,28 @@ def make_filename_callback(
return make_filename
+
def _calculate_img_number(save_dir_path: str, task_data: TaskData):
def get_highest_img_number(accumulator: int, file: os.DirEntry) -> int:
if not file.is_file:
return accumulator
-
+
if len(list(filter(lambda e: file.name.endswith(e), app.IMAGE_EXTENSIONS))) == 0:
return accumulator
-
+
get_highest_img_number.number_of_images = get_highest_img_number.number_of_images + 1
-
+
number_match = img_number_regex.match(file.name)
if not number_match:
return accumulator
-
- file_number = number_match.group().lstrip('0')
-
+
+ file_number = number_match.group().lstrip("0")
+
# Handle 00000
return int(file_number) if file_number else 0
-
+
get_highest_img_number.number_of_images = 0
-
+
highest_file_number = -1
if os.path.isdir(save_dir_path):
@@ -267,13 +278,15 @@ def _calculate_img_number(save_dir_path: str, task_data: TaskData):
_calculate_img_number.session_img_numbers[task_data.session_id],
calculated_img_number,
)
-
+
calculated_img_number = calculated_img_number + 1
-
+
_calculate_img_number.session_img_numbers[task_data.session_id] = calculated_img_number
return calculated_img_number
+
_calculate_img_number.session_img_numbers = {}
+
def calculate_img_number(save_dir_path: str, task_data: TaskData):
return ImageNumber(lambda: _calculate_img_number(save_dir_path, task_data))
diff --git a/ui/index.html b/ui/index.html
index 0f163af0..296e9230 100644
--- a/ui/index.html
+++ b/ui/index.html
@@ -30,7 +30,7 @@
'
+ logError(
+ "Stable Diffusion is still starting up, please wait. If this goes on beyond a few minutes, Stable Diffusion has probably crashed. Please check the error message in the command-line window.",
+ event,
+ outputMsg
+ )
+ } else if (typeof event?.response === "object") {
+ let msg = "Stable Diffusion had an error reading the response:
`
logError(msg, event, outputMsg)
}
break
}
}
- if ('update' in event) {
+ if ("update" in event) {
const stepUpdate = event.update
- if (!('step' in stepUpdate)) {
+ if (!("step" in stepUpdate)) {
return
}
// task.instances can be a mix of different tasks with uneven number of steps (Render Vs Filter Tasks)
- const overallStepCount = task.instances.reduce(
- (sum, instance) => sum + (instance.isPending ? Math.max(0, instance.step || stepUpdate.step) / (instance.total_steps || stepUpdate.total_steps) : 1),
- 0 // Initial value
- ) * stepUpdate.total_steps // Scale to current number of steps.
+ const overallStepCount =
+ task.instances.reduce(
+ (sum, instance) =>
+ sum +
+ (instance.isPending
+ ? Math.max(0, instance.step || stepUpdate.step) /
+ (instance.total_steps || stepUpdate.total_steps)
+ : 1),
+ 0 // Initial value
+ ) * stepUpdate.total_steps // Scale to current number of steps.
const totalSteps = task.instances.reduce(
(sum, instance) => sum + (instance.total_steps || stepUpdate.total_steps),
stepUpdate.total_steps * (batchCount - task.batchesDone) // Initial value at (unstarted task count * Nbr of steps)
@@ -759,9 +819,9 @@ function getTaskUpdater(task, reqBody, outputContainer) {
const timeTaken = stepUpdate.step_time // sec
const stepsRemaining = Math.max(0, totalSteps - overallStepCount)
- const timeRemaining = (timeTaken < 0 ? '' : millisecondsToStr(stepsRemaining * timeTaken * 1000))
+ const timeRemaining = timeTaken < 0 ? "" : millisecondsToStr(stepsRemaining * timeTaken * 1000)
outputMsg.innerHTML = `Batch ${task.batchesDone} of ${batchCount}. Generating image(s): ${percent}%. Time remaining (approx): ${timeRemaining}`
- outputMsg.style.display = 'block'
+ outputMsg.style.display = "block"
progressBarInner.style.width = `${percent}%`
if (stepUpdate.output) {
@@ -777,8 +837,8 @@ function abortTask(task) {
}
task.isProcessing = false
task.progressBar.classList.remove("active")
- task['taskStatusLabel'].style.display = 'none'
- task['stopTask'].innerHTML = ' Remove'
+ task["taskStatusLabel"].style.display = "none"
+ task["stopTask"].innerHTML = ' Remove'
if (!task.instances?.some((r) => r.isPending)) {
return
}
@@ -795,30 +855,49 @@ function onTaskErrorHandler(task, reqBody, instance, reason) {
if (!task.isProcessing) {
return
}
- console.log('Render request %o, Instance: %o, Error: %s', reqBody, instance, reason)
+ console.log("Render request %o, Instance: %o, Error: %s", reqBody, instance, reason)
abortTask(task)
- const outputMsg = task['outputMsg']
- logError('Stable Diffusion had an error. Please check the logs in the command-line window.
' + reason + '
' + reason.stack + '
', task, outputMsg)
- setStatus('request', 'error', 'error')
+ const outputMsg = task["outputMsg"]
+ logError(
+ "Stable Diffusion had an error. Please check the logs in the command-line window.
" +
+ reason +
+ "
" +
+ reason.stack +
+ "
",
+ task,
+ outputMsg
+ )
+ setStatus("request", "error", "error")
}
function onTaskCompleted(task, reqBody, instance, outputContainer, stepUpdate) {
- if (typeof stepUpdate === 'object') {
- if (stepUpdate.status === 'succeeded') {
+ if (typeof stepUpdate === "object") {
+ if (stepUpdate.status === "succeeded") {
showImages(reqBody, stepUpdate, outputContainer, false)
} else {
task.isProcessing = false
- const outputMsg = task['outputMsg']
- let msg = ''
- if ('detail' in stepUpdate && typeof stepUpdate.detail === 'string' && stepUpdate.detail.length > 0) {
+ const outputMsg = task["outputMsg"]
+ let msg = ""
+ if ("detail" in stepUpdate && typeof stepUpdate.detail === "string" && stepUpdate.detail.length > 0) {
msg = stepUpdate.detail
- if (msg.toLowerCase().includes('out of memory')) {
+ if (msg.toLowerCase().includes("out of memory")) {
msg += `
Suggestions:
1. If you have set an initial image, please try reducing its dimension to ${MAX_INIT_IMAGE_DIMENSION}x${MAX_INIT_IMAGE_DIMENSION} or smaller.
2. Try picking a lower level in the 'GPU Memory Usage' setting (in the 'Settings' tab).
3. Try generating a smaller image. `
+ } else if (msg.toLowerCase().includes("DefaultCPUAllocator: not enough memory")) {
+ msg += `
+ Reason: Your computer is running out of system RAM!
+
+ Suggestions:
+
+ 1. Try closing unnecessary programs and browser tabs.
+ 2. If that doesn't help, please increase your computer's virtual memory by following these steps for
+ Windows, or
+ Linux.
+ 3. Try restarting your computer. `
}
} else {
msg = `Unexpected Read Error:
")
}
function createTask(task) {
- let taskConfig = ''
+ let taskConfig = ""
if (task.reqBody.init_image !== undefined) {
let h = 80
- let w = task.reqBody.width * h / task.reqBody.height >>0
+ let w = ((task.reqBody.width * h) / task.reqBody.height) >> 0
taskConfig += `
" +
- "Balanced: nearly as fast as High, much lower VRAM usage " +
- "High: fastest, maximum GPU memory usage" +
- "Low: slowest, recommended for GPUs with 3 to 4 GB memory",
+ note:
+ "Faster performance requires more GPU memory (VRAM)
" +
+ "Balanced: nearly as fast as High, much lower VRAM usage " +
+ "High: fastest, maximum GPU memory usage" +
+ "Low: slowest, recommended for GPUs with 3 to 4 GB memory",
icon: "fa-forward",
default: "balanced",
options: [
- {value: "balanced", label: "Balanced"},
- {value: "high", label: "High"},
- {value: "low", label: "Low"}
+ { value: "balanced", label: "Balanced" },
+ { value: "high", label: "High" },
+ { value: "low", label: "Low" },
],
},
{
@@ -172,14 +173,15 @@ var PARAMETERS = [
id: "confirm_dangerous_actions",
type: ParameterType.checkbox,
label: "Confirm dangerous actions",
- note: "Actions that might lead to data loss must either be clicked with the shift key pressed, or confirmed in an 'Are you sure?' dialog",
+ note:
+ "Actions that might lead to data loss must either be clicked with the shift key pressed, or confirmed in an 'Are you sure?' dialog",
icon: "fa-check-double",
default: true,
},
{
id: "listen_to_network",
type: ParameterType.checkbox,
- label: "Make Stable Diffusion available on your network",
+ label: "Make Stable Diffusion available on your network. Please restart the program after changing this.",
note: "Other devices on your network can access this web page",
icon: "fa-network-wired",
default: true,
@@ -189,7 +191,7 @@ var PARAMETERS = [
id: "listen_port",
type: ParameterType.custom,
label: "Network port",
- note: "Port that this server listens to. The '9000' part in 'http://localhost:9000'",
+ note: "Port that this server listens to. The '9000' part in 'http://localhost:9000'. Please restart the program after changing this.",
icon: "fa-anchor",
render: (parameter) => {
return ``
@@ -200,7 +202,8 @@ var PARAMETERS = [
id: "use_beta_channel",
type: ParameterType.checkbox,
label: "Beta channel",
- note: "Get the latest features immediately (but could be less stable). Please restart the program after changing this.",
+ note:
+ "Get the latest features immediately (but could be less stable). Please restart the program after changing this.",
icon: "fa-fire",
default: false,
},
@@ -208,15 +211,16 @@ var PARAMETERS = [
id: "test_diffusers",
type: ParameterType.checkbox,
label: "Test Diffusers",
- note: "Experimental! Can have bugs! Use upcoming features (like LoRA) in our new engine. Please press Save, then restart the program after changing this.",
+ note:
+ "Experimental! Can have bugs! Use upcoming features (like LoRA) in our new engine. Please press Save, then restart the program after changing this.",
icon: "fa-bolt",
default: false,
saveInAppConfig: true,
},
-];
+]
function getParameterSettingsEntry(id) {
- let parameter = PARAMETERS.filter(p => p.id === id)
+ let parameter = PARAMETERS.filter((p) => p.id === id)
if (parameter.length === 0) {
return
}
@@ -224,37 +228,39 @@ function getParameterSettingsEntry(id) {
}
function sliderUpdate(event) {
- if (event.srcElement.id.endsWith('-input')) {
- let slider = document.getElementById(event.srcElement.id.slice(0,-6))
+ if (event.srcElement.id.endsWith("-input")) {
+ let slider = document.getElementById(event.srcElement.id.slice(0, -6))
slider.value = event.srcElement.value
slider.dispatchEvent(new Event("change"))
} else {
- let field = document.getElementById(event.srcElement.id+'-input')
+ let field = document.getElementById(event.srcElement.id + "-input")
field.value = event.srcElement.value
field.dispatchEvent(new Event("change"))
}
}
/**
- * @param {Parameter} parameter
+ * @param {Parameter} parameter
* @returns {string | HTMLElement}
*/
function getParameterElement(parameter) {
switch (parameter.type) {
case ParameterType.checkbox:
- var is_checked = parameter.default ? " checked" : "";
+ var is_checked = parameter.default ? " checked" : ""
return ``
case ParameterType.select:
case ParameterType.select_multiple:
- var options = (parameter.options || []).map(option => ``).join("")
- var multiple = (parameter.type == ParameterType.select_multiple ? 'multiple' : '')
+ var options = (parameter.options || [])
+ .map((option) => ``)
+ .join("")
+ var multiple = parameter.type == ParameterType.select_multiple ? "multiple" : ""
return ``
case ParameterType.slider:
return ` ${parameter.slider_unit}`
case ParameterType.custom:
return parameter.render(parameter)
default:
- console.error(`Invalid type ${parameter.type} for parameter ${parameter.id}`);
+ console.error(`Invalid type ${parameter.type} for parameter ${parameter.id}`)
return "ERROR: Invalid Type"
}
}
@@ -265,31 +271,31 @@ let parametersTable = document.querySelector("#system-settings .parameters-table
* @param {Array | undefined} parameters
* */
function initParameters(parameters) {
- parameters.forEach(parameter => {
+ parameters.forEach((parameter) => {
const element = getParameterElement(parameter)
- const elementWrapper = createElement('div')
+ const elementWrapper = createElement("div")
if (element instanceof Node) {
elementWrapper.appendChild(element)
} else {
elementWrapper.innerHTML = element
}
- const note = typeof parameter.note === 'function' ? parameter.note(parameter) : parameter.note
+ const note = typeof parameter.note === "function" ? parameter.note(parameter) : parameter.note
const noteElements = []
if (note) {
- const noteElement = createElement('small')
+ const noteElement = createElement("small")
if (note instanceof Node) {
noteElement.appendChild(note)
} else {
- noteElement.innerHTML = note || ''
+ noteElement.innerHTML = note || ""
}
noteElements.push(noteElement)
}
- const icon = parameter.icon ? [createElement('i', undefined, ['fa', parameter.icon])] : []
+ const icon = parameter.icon ? [createElement("i", undefined, ["fa", parameter.icon])] : []
- const label = typeof parameter.label === 'function' ? parameter.label(parameter) : parameter.label
- const labelElement = createElement('label', { for: parameter.id })
+ const label = typeof parameter.label === "function" ? parameter.label(parameter) : parameter.label
+ const labelElement = createElement("label", { for: parameter.id })
if (label instanceof Node) {
labelElement.appendChild(label)
} else {
@@ -297,12 +303,12 @@ function initParameters(parameters) {
}
const newrow = createElement(
- 'div',
- { 'data-setting-id': parameter.id, 'data-save-in-app-config': parameter.saveInAppConfig },
+ "div",
+ { "data-setting-id": parameter.id, "data-save-in-app-config": parameter.saveInAppConfig },
undefined,
[
- createElement('div', undefined, undefined, icon),
- createElement('div', undefined, undefined, [labelElement, ...noteElements]),
+ createElement("div", undefined, undefined, icon),
+ createElement("div", undefined, undefined, [labelElement, ...noteElements]),
elementWrapper,
]
)
@@ -314,22 +320,25 @@ function initParameters(parameters) {
initParameters(PARAMETERS)
// listen to parameters from plugins
-PARAMETERS.addEventListener('push', (...items) => {
+PARAMETERS.addEventListener("push", (...items) => {
initParameters(items)
-
- if (items.find(item => item.saveInAppConfig)) {
- console.log('Reloading app config for new parameters', items.map(p => p.id))
+
+ if (items.find((item) => item.saveInAppConfig)) {
+ console.log(
+ "Reloading app config for new parameters",
+ items.map((p) => p.id)
+ )
getAppConfig()
}
})
-let vramUsageLevelField = document.querySelector('#vram_usage_level')
-let useCPUField = document.querySelector('#use_cpu')
-let autoPickGPUsField = document.querySelector('#auto_pick_gpus')
-let useGPUsField = document.querySelector('#use_gpus')
-let saveToDiskField = document.querySelector('#save_to_disk')
-let diskPathField = document.querySelector('#diskPath')
-let metadataOutputFormatField = document.querySelector('#metadata_output_format')
+let vramUsageLevelField = document.querySelector("#vram_usage_level")
+let useCPUField = document.querySelector("#use_cpu")
+let autoPickGPUsField = document.querySelector("#auto_pick_gpus")
+let useGPUsField = document.querySelector("#use_gpus")
+let saveToDiskField = document.querySelector("#save_to_disk")
+let diskPathField = document.querySelector("#diskPath")
+let metadataOutputFormatField = document.querySelector("#metadata_output_format")
let listenToNetworkField = document.querySelector("#listen_to_network")
let listenPortField = document.querySelector("#listen_port")
let useBetaChannelField = document.querySelector("#use_beta_channel")
@@ -337,35 +346,34 @@ let uiOpenBrowserOnStartField = document.querySelector("#ui_open_browser_on_star
let confirmDangerousActionsField = document.querySelector("#confirm_dangerous_actions")
let testDiffusers = document.querySelector("#test_diffusers")
-let saveSettingsBtn = document.querySelector('#save-system-settings-btn')
-
+let saveSettingsBtn = document.querySelector("#save-system-settings-btn")
async function changeAppConfig(configDelta) {
try {
- let res = await fetch('/app_config', {
- method: 'POST',
+ let res = await fetch("/app_config", {
+ method: "POST",
headers: {
- 'Content-Type': 'application/json'
+ "Content-Type": "application/json",
},
- body: JSON.stringify(configDelta)
+ body: JSON.stringify(configDelta),
})
res = await res.json()
- console.log('set config status response', res)
+ console.log("set config status response", res)
} catch (e) {
- console.log('set config status error', e)
+ console.log("set config status error", e)
}
}
async function getAppConfig() {
try {
- let res = await fetch('/get/app_config')
+ let res = await fetch("/get/app_config")
const config = await res.json()
applySettingsFromConfig(config)
// custom overrides
- if (config.update_branch === 'beta') {
+ if (config.update_branch === "beta") {
useBetaChannelField.checked = true
document.querySelector("#updateBranchLabel").innerText = "(beta)"
} else {
@@ -380,45 +388,58 @@ async function getAppConfig() {
if (config.net && config.net.listen_port !== undefined) {
listenPortField.value = config.net.listen_port
}
- if (config.test_diffusers === undefined || config.update_branch === 'main') {
- testDiffusers.checked = false
- document.querySelector("#lora_model_container").style.display = 'none'
- document.querySelector("#lora_alpha_container").style.display = 'none'
+
+ const testDiffusersEnabled = config.test_diffusers && config.update_branch !== "main"
+ testDiffusers.checked = testDiffusersEnabled
+
+ if (!testDiffusersEnabled) {
+ document.querySelector("#lora_model_container").style.display = "none"
+ document.querySelector("#lora_alpha_container").style.display = "none"
+
+ document.querySelectorAll("#sampler_name option.diffusers-only").forEach(option => {
+ option.style.display = "none"
+ })
} else {
- testDiffusers.checked = config.test_diffusers && config.update_branch !== 'main'
- document.querySelector("#lora_model_container").style.display = (testDiffusers.checked ? '' : 'none')
- document.querySelector("#lora_alpha_container").style.display = (testDiffusers.checked && loraModelField.value !== "" ? '' : 'none')
+ document.querySelector("#lora_model_container").style.display = ""
+ document.querySelector("#lora_alpha_container").style.display = loraModelField.value ? "" : "none"
+
+ document.querySelectorAll("#sampler_name option.k_diffusion-only").forEach(option => {
+ option.disabled = true
+ })
+ document.querySelector("#clip_skip_config").classList.remove("displayNone")
}
- console.log('get config status response', config)
+ console.log("get config status response", config)
return config
} catch (e) {
- console.log('get config status error', e)
+ console.log("get config status error", e)
return {}
}
}
function applySettingsFromConfig(config) {
- Array.from(parametersTable.children).forEach(parameterRow => {
- if (parameterRow.dataset.settingId in config && parameterRow.dataset.saveInAppConfig === 'true') {
+ Array.from(parametersTable.children).forEach((parameterRow) => {
+ if (parameterRow.dataset.settingId in config && parameterRow.dataset.saveInAppConfig === "true") {
const configValue = config[parameterRow.dataset.settingId]
- const parameterElement = document.getElementById(parameterRow.dataset.settingId) ||
- parameterRow.querySelector('input') || parameterRow.querySelector('select')
+ const parameterElement =
+ document.getElementById(parameterRow.dataset.settingId) ||
+ parameterRow.querySelector("input") ||
+ parameterRow.querySelector("select")
switch (parameterElement?.tagName) {
- case 'INPUT':
- if (parameterElement.type === 'checkbox') {
+ case "INPUT":
+ if (parameterElement.type === "checkbox") {
parameterElement.checked = configValue
} else {
parameterElement.value = configValue
}
- parameterElement.dispatchEvent(new Event('change'))
+ parameterElement.dispatchEvent(new Event("change"))
break
- case 'SELECT':
+ case "SELECT":
if (Array.isArray(configValue)) {
- Array.from(parameterElement.options).forEach(option => {
+ Array.from(parameterElement.options).forEach((option) => {
if (configValue.includes(option.value || option.text)) {
option.selected = true
}
@@ -426,82 +447,85 @@ function applySettingsFromConfig(config) {
} else {
parameterElement.value = configValue
}
- parameterElement.dispatchEvent(new Event('change'))
+ parameterElement.dispatchEvent(new Event("change"))
break
}
}
})
}
-saveToDiskField.addEventListener('change', function(e) {
+saveToDiskField.addEventListener("change", function(e) {
diskPathField.disabled = !this.checked
metadataOutputFormatField.disabled = !this.checked
})
function getCurrentRenderDeviceSelection() {
- let selectedGPUs = $('#use_gpus').val()
+ let selectedGPUs = $("#use_gpus").val()
if (useCPUField.checked && !autoPickGPUsField.checked) {
- return 'cpu'
+ return "cpu"
}
if (autoPickGPUsField.checked || selectedGPUs.length == 0) {
- return 'auto'
+ return "auto"
}
- return selectedGPUs.join(',')
+ return selectedGPUs.join(",")
}
-useCPUField.addEventListener('click', function() {
- let gpuSettingEntry = getParameterSettingsEntry('use_gpus')
- let autoPickGPUSettingEntry = getParameterSettingsEntry('auto_pick_gpus')
+useCPUField.addEventListener("click", function() {
+ let gpuSettingEntry = getParameterSettingsEntry("use_gpus")
+ let autoPickGPUSettingEntry = getParameterSettingsEntry("auto_pick_gpus")
if (this.checked) {
- gpuSettingEntry.style.display = 'none'
- autoPickGPUSettingEntry.style.display = 'none'
- autoPickGPUsField.setAttribute('data-old-value', autoPickGPUsField.checked)
+ gpuSettingEntry.style.display = "none"
+ autoPickGPUSettingEntry.style.display = "none"
+ autoPickGPUsField.setAttribute("data-old-value", autoPickGPUsField.checked)
autoPickGPUsField.checked = false
} else if (useGPUsField.options.length >= MIN_GPUS_TO_SHOW_SELECTION) {
- gpuSettingEntry.style.display = ''
- autoPickGPUSettingEntry.style.display = ''
- let oldVal = autoPickGPUsField.getAttribute('data-old-value')
- if (oldVal === null || oldVal === undefined) { // the UI started with CPU selected by default
+ gpuSettingEntry.style.display = ""
+ autoPickGPUSettingEntry.style.display = ""
+ let oldVal = autoPickGPUsField.getAttribute("data-old-value")
+ if (oldVal === null || oldVal === undefined) {
+ // the UI started with CPU selected by default
autoPickGPUsField.checked = true
} else {
- autoPickGPUsField.checked = (oldVal === 'true')
+ autoPickGPUsField.checked = oldVal === "true"
}
- gpuSettingEntry.style.display = (autoPickGPUsField.checked ? 'none' : '')
+ gpuSettingEntry.style.display = autoPickGPUsField.checked ? "none" : ""
}
})
-useGPUsField.addEventListener('click', function() {
- let selectedGPUs = $('#use_gpus').val()
- autoPickGPUsField.checked = (selectedGPUs.length === 0)
+useGPUsField.addEventListener("click", function() {
+ let selectedGPUs = $("#use_gpus").val()
+ autoPickGPUsField.checked = selectedGPUs.length === 0
})
-autoPickGPUsField.addEventListener('click', function() {
+autoPickGPUsField.addEventListener("click", function() {
if (this.checked) {
- $('#use_gpus').val([])
+ $("#use_gpus").val([])
}
- let gpuSettingEntry = getParameterSettingsEntry('use_gpus')
- gpuSettingEntry.style.display = (this.checked ? 'none' : '')
+ let gpuSettingEntry = getParameterSettingsEntry("use_gpus")
+ gpuSettingEntry.style.display = this.checked ? "none" : ""
})
-async function setDiskPath(defaultDiskPath, force=false) {
+async function setDiskPath(defaultDiskPath, force = false) {
var diskPath = getSetting("diskPath")
- if (force || diskPath == '' || diskPath == undefined || diskPath == "undefined") {
+ if (force || diskPath == "" || diskPath == undefined || diskPath == "undefined") {
setSetting("diskPath", defaultDiskPath)
}
}
function setDeviceInfo(devices) {
let cpu = devices.all.cpu.name
- let allGPUs = Object.keys(devices.all).filter(d => d != 'cpu')
+ let allGPUs = Object.keys(devices.all).filter((d) => d != "cpu")
let activeGPUs = Object.keys(devices.active)
function ID_TO_TEXT(d) {
let info = devices.all[d]
if ("mem_free" in info && "mem_total" in info) {
- return `${info.name} (${d}) (${info.mem_free.toFixed(1)}Gb free / ${info.mem_total.toFixed(1)} Gb total)`
+ return `${info.name} (${d}) (${info.mem_free.toFixed(1)}Gb free / ${info.mem_total.toFixed(
+ 1
+ )} Gb total)`
} else {
return `${info.name} (${d}) (no memory info)`
}
@@ -510,35 +534,35 @@ function setDeviceInfo(devices) {
allGPUs = allGPUs.map(ID_TO_TEXT)
activeGPUs = activeGPUs.map(ID_TO_TEXT)
- let systemInfoEl = document.querySelector('#system-info')
- systemInfoEl.querySelector('#system-info-cpu').innerText = cpu
- systemInfoEl.querySelector('#system-info-gpus-all').innerHTML = allGPUs.join('')
- systemInfoEl.querySelector('#system-info-rendering-devices').innerHTML = activeGPUs.join('')
+ let systemInfoEl = document.querySelector("#system-info")
+ systemInfoEl.querySelector("#system-info-cpu").innerText = cpu
+ systemInfoEl.querySelector("#system-info-gpus-all").innerHTML = allGPUs.join("")
+ systemInfoEl.querySelector("#system-info-rendering-devices").innerHTML = activeGPUs.join("")
}
function setHostInfo(hosts) {
let port = listenPortField.value
- hosts = hosts.map(addr => `http://${addr}:${port}/`).map(url => `