diff --git a/CHANGES.md b/CHANGES.md index f1360942..df8a7b80 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -22,6 +22,7 @@ Our focus continues to remain on an easy installation experience, and an easy user-interface. While still remaining pretty powerful, in terms of features and speed. ### Detailed changelog +* 2.5.44 - 15 Jul 2023 - (beta-only) Support for multiple LoRA files. * 2.5.43 - 9 Jul 2023 - (beta-only) Support for loading Textual Inversion embeddings. You can find the option in the Image Settings panel. Thanks @JeLuf. * 2.5.43 - 9 Jul 2023 - Improve the startup time of the UI. * 2.5.42 - 4 Jul 2023 - Keyboard shortcuts for the Image Editor. Thanks @JeLuf. diff --git a/scripts/Developer Console.cmd b/scripts/Developer Console.cmd index 0efbda13..e60cf05b 100644 --- a/scripts/Developer Console.cmd +++ b/scripts/Developer Console.cmd @@ -41,6 +41,10 @@ call python --version echo PYTHONPATH=%PYTHONPATH% +if exist "%cd%\profile" ( + set HF_HOME=%cd%\profile\.cache\huggingface +) + @rem done echo. diff --git a/scripts/check_modules.py b/scripts/check_modules.py index 7b431fca..16b518c8 100644 --- a/scripts/check_modules.py +++ b/scripts/check_modules.py @@ -18,7 +18,7 @@ os_name = platform.system() modules_to_check = { "torch": ("1.11.0", "1.13.1", "2.0.0"), "torchvision": ("0.12.0", "0.14.1", "0.15.1"), - "sdkit": "1.0.116", + "sdkit": "1.0.125", "stable-diffusion-sdkit": "2.1.4", "rich": "12.6.0", "uvicorn": "0.19.0", diff --git a/scripts/on_sd_start.bat b/scripts/on_sd_start.bat index 3a8a2961..eddae6b8 100644 --- a/scripts/on_sd_start.bat +++ b/scripts/on_sd_start.bat @@ -104,18 +104,21 @@ call python --version @FOR /F "tokens=* USEBACKQ" %%F IN (`python scripts\get_config.py --default=False net listen_to_network`) DO ( if "%%F" EQU "True" ( - @SET ED_BIND_IP=0.0.0.0 + @FOR /F "tokens=* USEBACKQ" %%G IN (`python scripts\get_config.py --default=0.0.0.0 net bind_ip`) DO ( + @SET ED_BIND_IP=%%G + ) ) else ( @SET ED_BIND_IP=127.0.0.1 ) ) + @cd stable-diffusion @rem set any overrides set HF_HUB_DISABLE_SYMLINKS_WARNING=true -@uvicorn main:server_api --app-dir "%SD_UI_PATH%" --port %ED_BIND_PORT% --host %ED_BIND_IP% --log-level error +@python -m uvicorn main:server_api --app-dir "%SD_UI_PATH%" --port %ED_BIND_PORT% --host %ED_BIND_IP% --log-level error @pause diff --git a/scripts/on_sd_start.sh b/scripts/on_sd_start.sh index e54c72bc..e366bd2a 100755 --- a/scripts/on_sd_start.sh +++ b/scripts/on_sd_start.sh @@ -72,7 +72,7 @@ export SD_UI_PATH=`pwd`/ui export ED_BIND_PORT="$( python scripts/get_config.py --default=9000 net listen_port )" case "$( python scripts/get_config.py --default=False net listen_to_network )" in "True") - export ED_BIND_IP=0.0.0.0 + export ED_BIND_IP=$( python scripts/get_config.py --default=0.0.0.0 net bind_ip) ;; "False") export ED_BIND_IP=127.0.0.1 diff --git a/ui/easydiffusion/model_manager.py b/ui/easydiffusion/model_manager.py index d75292c9..bdecc109 100644 --- a/ui/easydiffusion/model_manager.py +++ b/ui/easydiffusion/model_manager.py @@ -2,6 +2,7 @@ import os import shutil from glob import glob import traceback +from typing import Union from easydiffusion import app from easydiffusion.types import TaskData @@ -93,7 +94,14 @@ def unload_all(context: Context): del context.model_load_errors[model_type] -def resolve_model_to_use(model_name: str = None, model_type: str = None, fail_if_not_found: bool = True): +def resolve_model_to_use(model_name: Union[str, list] = None, model_type: str = None, fail_if_not_found: bool = True): + model_names = model_name if isinstance(model_name, list) else [model_name] + model_paths = [resolve_model_to_use_single(m, model_type, fail_if_not_found) for m in model_names] + + return model_paths[0] if len(model_paths) == 1 else model_paths + + +def resolve_model_to_use_single(model_name: str = None, model_type: str = None, fail_if_not_found: bool = True): model_extensions = MODEL_EXTENSIONS.get(model_type, []) default_models = DEFAULT_MODELS.get(model_type, []) config = app.getConfig() diff --git a/ui/easydiffusion/task_manager.py b/ui/easydiffusion/task_manager.py index 608da41d..a91cd9c6 100644 --- a/ui/easydiffusion/task_manager.py +++ b/ui/easydiffusion/task_manager.py @@ -473,15 +473,15 @@ def start_render_thread(device): render_threads.append(rthread) finally: manager_lock.release() - # timeout = DEVICE_START_TIMEOUT - # while not rthread.is_alive() or not rthread in weak_thread_data or not "device" in weak_thread_data[rthread]: - # if rthread in weak_thread_data and "error" in weak_thread_data[rthread]: - # log.error(f"{rthread}, {device}, error: {weak_thread_data[rthread]['error']}") - # return False - # if timeout <= 0: - # return False - # timeout -= 1 - # time.sleep(1) + timeout = DEVICE_START_TIMEOUT + while not rthread.is_alive() or not rthread in weak_thread_data or not "device" in weak_thread_data[rthread]: + if rthread in weak_thread_data and "error" in weak_thread_data[rthread]: + log.error(f"{rthread}, {device}, error: {weak_thread_data[rthread]['error']}") + return False + if timeout <= 0: + return False + timeout -= 1 + time.sleep(1) return True @@ -535,12 +535,12 @@ def update_render_threads(render_devices, active_devices): if not start_render_thread(device): log.warn(f"{device} failed to start.") - # if is_alive() <= 0: # No running devices, probably invalid user config. - # raise EnvironmentError( - # 'ERROR: No active render devices! Please verify the "render_devices" value in config.json' - # ) + if is_alive() <= 0: # No running devices, probably invalid user config. + raise EnvironmentError( + 'ERROR: No active render devices! Please verify the "render_devices" value in config.json' + ) - # log.debug(f"active devices: {get_devices()['active']}") + log.debug(f"active devices: {get_devices()['active']}") def shutdown_event(): # Signal render thread to close on shutdown diff --git a/ui/easydiffusion/types.py b/ui/easydiffusion/types.py index abf8db29..a9e49a24 100644 --- a/ui/easydiffusion/types.py +++ b/ui/easydiffusion/types.py @@ -1,4 +1,4 @@ -from typing import Any +from typing import Any, List, Union from pydantic import BaseModel @@ -22,7 +22,7 @@ class GenerateImageRequest(BaseModel): sampler_name: str = None # "ddim", "plms", "heun", "euler", "euler_a", "dpm2", "dpm2_a", "lms" hypernetwork_strength: float = 0 - lora_alpha: float = 0 + lora_alpha: Union[float, List[float]] = 0 tiling: str = "none" # "none", "x", "y", "xy" @@ -32,15 +32,14 @@ class TaskData(BaseModel): save_to_disk_path: str = None vram_usage_level: str = "balanced" # or "low" or "medium" - use_face_correction: str = None # or "GFPGANv1.3" - use_upscale: str = None # or "RealESRGAN_x4plus" or "RealESRGAN_x4plus_anime_6B" or "latent_upscaler" + use_face_correction: Union[str, List[str]] = None # or "GFPGANv1.3" + use_upscale: Union[str, List[str]] = None upscale_amount: int = 4 # or 2 latent_upscaler_steps: int = 10 - use_stable_diffusion_model: str = "sd-v1-4" - # use_stable_diffusion_config: str = "v1-inference" - use_vae_model: str = None - use_hypernetwork_model: str = None - use_lora_model: str = None + use_stable_diffusion_model: Union[str, List[str]] = "sd-v1-4" + use_vae_model: Union[str, List[str]] = None + use_hypernetwork_model: Union[str, List[str]] = None + use_lora_model: Union[str, List[str]] = None show_only_filtered_image: bool = False block_nsfw: bool = False diff --git a/ui/easydiffusion/utils/save_utils.py b/ui/easydiffusion/utils/save_utils.py index ff2906a6..75d35dc8 100644 --- a/ui/easydiffusion/utils/save_utils.py +++ b/ui/easydiffusion/utils/save_utils.py @@ -1,6 +1,8 @@ import os import re import time +import regex + from datetime import datetime from functools import reduce @@ -30,11 +32,12 @@ TASK_TEXT_MAPPING = { "lora_alpha": "LoRA Strength", "use_hypernetwork_model": "Hypernetwork model", "hypernetwork_strength": "Hypernetwork Strength", + "use_embedding_models": "Embedding models", "tiling": "Seamless Tiling", "use_face_correction": "Use Face Correction", "use_upscale": "Use Upscaling", "upscale_amount": "Upscale By", - "latent_upscaler_steps": "Latent Upscaler Steps" + "latent_upscaler_steps": "Latent Upscaler Steps", } time_placeholders = { @@ -202,6 +205,9 @@ def get_printable_request(req: GenerateImageRequest, task_data: TaskData): req_metadata = req.dict() task_data_metadata = task_data.dict() + app_config = app.getConfig() + using_diffusers = app_config.get("test_diffusers", False) + # Save the metadata in the order defined in TASK_TEXT_MAPPING metadata = {} for key in TASK_TEXT_MAPPING.keys(): @@ -209,6 +215,24 @@ def get_printable_request(req: GenerateImageRequest, task_data: TaskData): metadata[key] = req_metadata[key] elif key in task_data_metadata: metadata[key] = task_data_metadata[key] + elif key is "use_embedding_models" and using_diffusers: + embeddings_extensions = {".pt", ".bin", ".safetensors"} + def scan_directory(directory_path: str): + used_embeddings = [] + for entry in os.scandir(directory_path): + if entry.is_file(): + entry_extension = os.path.splitext(entry.name)[1] + if entry_extension not in embeddings_extensions: + continue + + embedding_name_regex = regex.compile(r"(^|[\s,])" + regex.escape(os.path.splitext(entry.name)[0]) + r"([+-]*$|[\s,]|[+-]+[\s,])") + if embedding_name_regex.search(req.prompt) or embedding_name_regex.search(req.negative_prompt): + used_embeddings.append(entry.path) + elif entry.is_dir(): + used_embeddings.extend(scan_directory(entry.path)) + return used_embeddings + used_embeddings = scan_directory(os.path.join(app.MODELS_DIR, "embeddings")) + metadata["use_embedding_models"] = ", ".join(used_embeddings) if len(used_embeddings) > 0 else None # Clean up the metadata if req.init_image is None and "prompt_strength" in metadata: @@ -222,8 +246,7 @@ def get_printable_request(req: GenerateImageRequest, task_data: TaskData): if task_data.use_upscale != "latent_upscaler" and "latent_upscaler_steps" in metadata: del metadata["latent_upscaler_steps"] - app_config = app.getConfig() - if not app_config.get("test_diffusers", False): + if not using_diffusers: for key in (x for x in ["use_lora_model", "lora_alpha", "clip_skip", "tiling", "latent_upscaler_steps"] if x in metadata): del metadata[key] diff --git a/ui/index.html b/ui/index.html index 1ebde256..b45fc396 100644 --- a/ui/index.html +++ b/ui/index.html @@ -31,7 +31,7 @@

Easy Diffusion - v2.5.43 + v2.5.44

@@ -162,9 +162,10 @@ - + - + + @@ -224,21 +225,14 @@
Small image sizes can cause bad image quality
- +
- - - - - - - - - -2 2   -
+ + + diff --git a/ui/main.py b/ui/main.py index c5a3d749..f5998622 100644 --- a/ui/main.py +++ b/ui/main.py @@ -1,12 +1,12 @@ from easydiffusion import model_manager, app, server from easydiffusion.server import server_api # required for uvicorn +server.init() + # Init the app model_manager.init() app.init() -server.init() +app.init_render_threads() # start the browser ui app.open_browser() - -app.init_render_threads() diff --git a/ui/media/css/main.css b/ui/media/css/main.css index 54fe9774..c897549a 100644 --- a/ui/media/css/main.css +++ b/ui/media/css/main.css @@ -5,6 +5,8 @@ html { position: relative; + overscroll-behavior-y: none; + color-scheme: dark !important; } body { @@ -1677,6 +1679,10 @@ body.wait-pause { background: var(--background-color3); } +.model_entry .model_name { + width: 70%; +} + .diffusers-disabled-on-startup .diffusers-restart-needed { font-size: 0; } diff --git a/ui/media/js/auto-save.js b/ui/media/js/auto-save.js index bbcbf9a5..1ff51e2e 100644 --- a/ui/media/js/auto-save.js +++ b/ui/media/js/auto-save.js @@ -16,7 +16,9 @@ const SETTINGS_IDS_LIST = [ "clip_skip", "vae_model", "hypernetwork_model", - "lora_model", + "lora_model_0", + "lora_model_1", + "lora_model_2", "sampler_name", "width", "height", @@ -24,7 +26,9 @@ const SETTINGS_IDS_LIST = [ "guidance_scale", "prompt_strength", "hypernetwork_strength", - "lora_alpha", + "lora_alpha_0", + "lora_alpha_1", + "lora_alpha_2", "tiling", "output_format", "output_quality", @@ -176,13 +180,14 @@ function loadSettings() { // So this is likely the first time Easy Diffusion is running. // Initialize vram_usage_level based on the available VRAM function initGPUProfile(event) { - if ( "detail" in event - && "active" in event.detail - && "cuda:0" in event.detail.active - && event.detail.active["cuda:0"].mem_total <4.5 ) - { - vramUsageLevelField.value = "low" - vramUsageLevelField.dispatchEvent(new Event("change")) + if ( + "detail" in event && + "active" in event.detail && + "cuda:0" in event.detail.active && + event.detail.active["cuda:0"].mem_total < 4.5 + ) { + vramUsageLevelField.value = "low" + vramUsageLevelField.dispatchEvent(new Event("change")) } document.removeEventListener("system_info_update", initGPUProfile) } diff --git a/ui/media/js/dnd.js b/ui/media/js/dnd.js index 4e50b638..7128bc69 100644 --- a/ui/media/js/dnd.js +++ b/ui/media/js/dnd.js @@ -292,29 +292,58 @@ const TASK_MAPPING = { use_lora_model: { name: "LoRA model", setUI: (use_lora_model) => { - const oldVal = loraModelField.value - use_lora_model = - use_lora_model === undefined || use_lora_model === null || use_lora_model === "None" - ? "" - : use_lora_model + use_lora_model.forEach((model_name, i) => { + let field = loraModels[i][0] + const oldVal = field.value - if (use_lora_model !== "") { - use_lora_model = getModelPath(use_lora_model, [".ckpt", ".safetensors"]) - use_lora_model = use_lora_model !== "" ? use_lora_model : oldVal + if (model_name !== "") { + model_name = getModelPath(model_name, [".ckpt", ".safetensors"]) + model_name = model_name !== "" ? model_name : oldVal + } + field.value = model_name + }) + + // clear the remaining entries + for (let i = use_lora_model.length; i < loraModels.length; i++) { + loraModels[i][0].value = "" } - loraModelField.value = use_lora_model }, - readUI: () => loraModelField.value, - parse: (val) => val, + readUI: () => { + let values = loraModels.map((e) => e[0].value) + values = values.filter((e) => e.trim() !== "") + values = values.length > 0 ? values : "None" + return values + }, + parse: (val) => { + val = !val || val === "None" ? "" : val + val = Array.isArray(val) ? val : [val] + return val + }, }, lora_alpha: { name: "LoRA Strength", setUI: (lora_alpha) => { - loraAlphaField.value = lora_alpha - updateLoraAlphaSlider() + lora_alpha.forEach((model_strength, i) => { + let field = loraModels[i][1] + field.value = model_strength + }) + + // clear the remaining entries + for (let i = lora_alpha.length; i < loraModels.length; i++) { + loraModels[i][1].value = 0 + } + }, + readUI: () => { + let models = loraModels.filter((e) => e[0].value.trim() !== "") + let values = models.map((e) => e[1].value) + values = values.length > 0 ? values : 0 + return values + }, + parse: (val) => { + val = Array.isArray(val) ? val : [val] + val = val.map((e) => parseFloat(e)) + return val }, - readUI: () => parseFloat(loraAlphaField.value), - parse: (val) => parseFloat(val), }, use_hypernetwork_model: { name: "Hypernetwork model", @@ -426,8 +455,11 @@ function restoreTaskToUI(task, fieldsToSkip) { } if (!("use_lora_model" in task.reqBody)) { - loraModelField.value = "" - loraModelField.dispatchEvent(new Event("change")) + loraModels.forEach((e) => { + e[0].value = "" + e[1].value = 0 + e[0].dispatchEvent(new Event("change")) + }) } // restore the original prompt if provided (e.g. use settings), fallback to prompt as needed (e.g. copy/paste or d&d) diff --git a/ui/media/js/main.js b/ui/media/js/main.js index 83e80f4a..70d27fd5 100644 --- a/ui/media/js/main.js +++ b/ui/media/js/main.js @@ -103,9 +103,6 @@ let vaeModelField = new ModelDropdown(document.querySelector("#vae_model"), "vae let hypernetworkModelField = new ModelDropdown(document.querySelector("#hypernetwork_model"), "hypernetwork", "None") let hypernetworkStrengthSlider = document.querySelector("#hypernetwork_strength_slider") let hypernetworkStrengthField = document.querySelector("#hypernetwork_strength") -let loraModelField = new ModelDropdown(document.querySelector("#lora_model"), "lora", "None") -let loraAlphaSlider = document.querySelector("#lora_alpha_slider") -let loraAlphaField = document.querySelector("#lora_alpha") let outputFormatField = document.querySelector("#output_format") let outputLosslessField = document.querySelector("#output_lossless") let outputLosslessContainer = document.querySelector("#output_lossless_container") @@ -159,6 +156,8 @@ let undoButton = document.querySelector("#undo") let undoBuffer = [] const UNDO_LIMIT = 20 +let loraModels = [] + imagePreview.addEventListener("drop", function(ev) { const data = ev.dataTransfer?.getData("text/plain") if (!data) { @@ -1292,13 +1291,31 @@ function getCurrentUserRequest() { newTask.reqBody.use_hypernetwork_model = hypernetworkModelField.value newTask.reqBody.hypernetwork_strength = parseFloat(hypernetworkStrengthField.value) } - if (testDiffusers.checked && loraModelField.value) { - newTask.reqBody.use_lora_model = loraModelField.value - newTask.reqBody.lora_alpha = parseFloat(loraAlphaField.value) + if (testDiffusers.checked) { + let [modelNames, modelStrengths] = getModelInfo(loraModels) + + if (modelNames.length > 0) { + modelNames = modelNames.length == 1 ? modelNames[0] : modelNames + modelStrengths = modelStrengths.length == 1 ? modelStrengths[0] : modelStrengths + + newTask.reqBody.use_lora_model = modelNames + newTask.reqBody.lora_alpha = modelStrengths + } } return newTask } +function getModelInfo(models) { + let modelInfo = models.map((e) => [e[0].value, e[1].value]) + modelInfo = modelInfo.filter((e) => e[0].trim() !== "") + modelInfo = modelInfo.map((e) => [e[0], parseFloat(e[1])]) + + let modelNames = modelInfo.map((e) => e[0]) + let modelStrengths = modelInfo.map((e) => e[1]) + + return [modelNames, modelStrengths] +} + function getPrompts(prompts) { if (typeof prompts === "undefined") { prompts = promptField.value @@ -1346,7 +1363,8 @@ function getPromptsNumber(prompts) { let promptsToMake = [] let numberOfPrompts = 0 - if (prompts.trim() !== "") { // this needs to stay sort of the same, as the prompts have to be passed through to the other functions + if (prompts.trim() !== "") { + // this needs to stay sort of the same, as the prompts have to be passed through to the other functions prompts = prompts.split("\n") prompts = prompts.map((prompt) => prompt.trim()) prompts = prompts.filter((prompt) => prompt !== "") @@ -1354,7 +1372,11 @@ function getPromptsNumber(prompts) { // estimate number of prompts let estimatedNumberOfPrompts = 0 prompts.forEach((prompt) => { - estimatedNumberOfPrompts += (prompt.match(/{[^}]*}/g) || []).map((e) => (e.match(/,/g) || []).length + 1).reduce( (p,a) => p*a, 1) * (2**(prompt.match(/\|/g) || []).length) + estimatedNumberOfPrompts += + (prompt.match(/{[^}]*}/g) || []) + .map((e) => (e.match(/,/g) || []).length + 1) + .reduce((p, a) => p * a, 1) * + 2 ** (prompt.match(/\|/g) || []).length }) if (estimatedNumberOfPrompts >= 10000) { @@ -1394,7 +1416,8 @@ function applySetOperator(prompts) { return promptsToMake } -function applyPermuteOperator(prompts) { // prompts is array of input, trimmed, filtered and split by \n +function applyPermuteOperator(prompts) { + // prompts is array of input, trimmed, filtered and split by \n let promptsToMake = [] prompts.forEach((prompt) => { let promptMatrix = prompt.split("|") @@ -1414,13 +1437,14 @@ function applyPermuteOperator(prompts) { // prompts is array of input, trimmed, } // returns how many prompts would have to be made with the given prompts -function applyPermuteOperatorNumber(prompts) { // prompts is array of input, trimmed, filtered and split by \n +function applyPermuteOperatorNumber(prompts) { + // prompts is array of input, trimmed, filtered and split by \n let numberOfPrompts = 0 prompts.forEach((prompt) => { let promptCounter = 1 let promptMatrix = prompt.split("|") promptMatrix.shift() - + promptMatrix = promptMatrix.map((p) => p.trim()) promptMatrix = promptMatrix.filter((p) => p !== "") @@ -1510,8 +1534,12 @@ clearAllPreviewsBtn.addEventListener("click", (e) => { }) /* Download images popup */ -showDownloadDialogBtn.addEventListener("click", (e) => { saveAllImagesDialog.showModal() }) -saveAllImagesCloseBtn.addEventListener("click", (e) => { saveAllImagesDialog.close() }) +showDownloadDialogBtn.addEventListener("click", (e) => { + saveAllImagesDialog.showModal() +}) +saveAllImagesCloseBtn.addEventListener("click", (e) => { + saveAllImagesDialog.close() +}) modalDialogCloseOnBackdropClick(saveAllImagesDialog) makeDialogDraggable(saveAllImagesDialog) @@ -1629,15 +1657,11 @@ function renameMakeImageButton() { imageLabel = totalImages + " Images" } if (SD.activeTasks.size == 0) { - if (totalImages >= 10000) - makeImageBtn.innerText = "Make 10000+ images" - else - makeImageBtn.innerText = "Make " + imageLabel + if (totalImages >= 10000) makeImageBtn.innerText = "Make 10000+ images" + else makeImageBtn.innerText = "Make " + imageLabel } else { - if (totalImages >= 10000) - makeImageBtn.innerText = "Enqueue 10000+ images" - else - makeImageBtn.innerText = "Enqueue Next " + imageLabel + if (totalImages >= 10000) makeImageBtn.innerText = "Enqueue 10000+ images" + else makeImageBtn.innerText = "Enqueue Next " + imageLabel } } numOutputsTotalField.addEventListener("change", renameMakeImageButton) @@ -1829,36 +1853,6 @@ function updateHypernetworkStrengthContainer() { hypernetworkModelField.addEventListener("change", updateHypernetworkStrengthContainer) updateHypernetworkStrengthContainer() -/********************* LoRA alpha **********************/ -function updateLoraAlpha() { - loraAlphaField.value = loraAlphaSlider.value / 100 - loraAlphaField.dispatchEvent(new Event("change")) -} - -function updateLoraAlphaSlider() { - if (loraAlphaField.value < -2) { - loraAlphaField.value = -2 - } else if (loraAlphaField.value > 2) { - loraAlphaField.value = 2 - } - - loraAlphaSlider.value = loraAlphaField.value * 100 - loraAlphaSlider.dispatchEvent(new Event("change")) -} - -loraAlphaSlider.addEventListener("input", updateLoraAlpha) -loraAlphaField.addEventListener("input", updateLoraAlphaSlider) -updateLoraAlpha() - -function updateLoraAlphaContainer() { - const loraModelContainer = document.querySelector("#lora_model_container") - if (loraModelContainer && window.getComputedStyle(loraModelContainer).display !== "none") { - document.querySelector("#lora_alpha_container").style.display = loraModelField.value === "" ? "none" : "" - } -} -loraModelField.addEventListener("change", updateLoraAlphaContainer) -updateLoraAlphaContainer() - /********************* JPEG/WEBP Quality **********************/ function updateOutputQuality() { outputQualityField.value = 0 | outputQualitySlider.value @@ -2076,9 +2070,8 @@ function resumeClient() { }) } - function splashScreen(force = false) { - const splashVersion = splashScreenPopup.dataset['version'] + const splashVersion = splashScreenPopup.dataset["version"] const lastSplash = localStorage.getItem("lastSplashScreenVersion") || 0 if (testDiffusers.checked) { if (force || lastSplash < splashVersion) { @@ -2088,8 +2081,9 @@ function splashScreen(force = false) { } } - -document.getElementById("logo_img").addEventListener("click", (e) => { splashScreen(true) }) +document.getElementById("logo_img").addEventListener("click", (e) => { + splashScreen(true) +}) promptField.addEventListener("input", debounce(renameMakeImageButton, 1000)) @@ -2142,21 +2136,21 @@ document.getElementById("toggle-cloudflare-tunnel").addEventListener("click", as /* Embeddings */ -function updateEmbeddingsList(filter="") { - function html(model, prefix="", filter="") { +function updateEmbeddingsList(filter = "") { + function html(model, prefix = "", filter = "") { filter = filter.toLowerCase() - let toplevel="" - let folders="" - - model?.forEach( m => { - if (typeof(m) == "string") { - if (m.toLowerCase().search(filter)!=-1) { + let toplevel = "" + let folders = "" + + model?.forEach((m) => { + if (typeof m == "string") { + if (m.toLowerCase().search(filter) != -1) { toplevel += ` ` } } else { - let subdir = html(m[1], prefix+m[0]+"/", filter) + let subdir = html(m[1], prefix + m[0] + "/", filter) if (subdir != "") { - folders += `

${prefix}${m[0]}

` + subdir + folders += `

${prefix}${m[0]}

` + subdir } } }) @@ -2174,7 +2168,7 @@ function updateEmbeddingsList(filter="") { insertAtCursor(promptField, text) } } else { - let pad="" + let pad = "" if (e.shiftKey) { if (!negativePromptField.value.endsWith(" ")) { pad = " " @@ -2189,14 +2183,26 @@ function updateEmbeddingsList(filter="") { } } - embeddingsList.innerHTML = html(modelsOptions.embeddings, "", filter) - embeddingsList.querySelectorAll("button").forEach( (b) => { b.addEventListener("click", onButtonClick)}) + // Remove after fixing https://github.com/huggingface/diffusers/issues/3922 + let warning = "" + if (vramUsageLevelField.value == "low") { + warning = ` +
+ Warning: Your GPU memory profile is set to "Low". Embeddings currently only work in "Balanced" mode! +
` + } + // END of remove block + + embeddingsList.innerHTML = warning + html(modelsOptions.embeddings, "", filter) + embeddingsList.querySelectorAll("button").forEach((b) => { + b.addEventListener("click", onButtonClick) + }) } -embeddingsButton.addEventListener("click", () => { +embeddingsButton.addEventListener("click", () => { updateEmbeddingsList() - embeddingsSearchBox.value="" - embeddingsDialog.showModal() + embeddingsSearchBox.value = "" + embeddingsDialog.showModal() }) embeddingsDialogCloseBtn.addEventListener("click", (e) => { embeddingsDialog.close() @@ -2208,7 +2214,6 @@ embeddingsSearchBox.addEventListener("input", (e) => { modalDialogCloseOnBackdropClick(embeddingsDialog) makeDialogDraggable(embeddingsDialog) - if (testDiffusers.checked) { document.getElementById("embeddings-container").classList.remove("displayNone") } @@ -2235,3 +2240,43 @@ prettifyInputs(document) // set the textbox as focused on start promptField.focus() promptField.selectionStart = promptField.value.length + +// multi-models +function addModelEntry(i, modelContainer, modelsList, modelType, defaultValue, strengthStep) { + let nameId = modelType + "_model_" + i + let strengthId = modelType + "_alpha_" + i + + const modelEntry = document.createElement("div") + modelEntry.className = "model_entry" + modelEntry.innerHTML = ` + +
+ ` + + let modelName = new ModelDropdown(modelEntry.querySelector(".model_name"), modelType, "None") + let modelStrength = modelEntry.querySelector(".model_strength") + + modelContainer.appendChild(modelEntry) + modelsList.push([modelName, modelStrength]) +} + +function createLoRAEntries() { + let container = document.querySelector("#lora_model_container .model_entries") + for (let i = 0; i < 3; i++) { + addModelEntry(i, container, loraModels, "lora", 0.5, 0.02) + } +} +createLoRAEntries() + +// chrome-like spinners only on hover +function showSpinnerOnlyOnHover(e) { + e.addEventListener("mouseenter", () => { + e.setAttribute("type", "number") + }) + e.addEventListener("mouseleave", () => { + e.removeAttribute("type") + }) + e.removeAttribute("type") +} + +document.querySelectorAll("input[type=number]").forEach(showSpinnerOnlyOnHover) diff --git a/ui/media/js/parameters.js b/ui/media/js/parameters.js index b6f48d96..475992a0 100644 --- a/ui/media/js/parameters.js +++ b/ui/media/js/parameters.js @@ -436,7 +436,6 @@ async function getAppConfig() { if (!testDiffusersEnabled) { document.querySelector("#lora_model_container").style.display = "none" - document.querySelector("#lora_alpha_container").style.display = "none" document.querySelector("#tiling_container").style.display = "none" document.querySelectorAll("#sampler_name option.diffusers-only").forEach((option) => { @@ -444,7 +443,6 @@ async function getAppConfig() { }) } else { document.querySelector("#lora_model_container").style.display = "" - document.querySelector("#lora_alpha_container").style.display = loraModelField.value ? "" : "none" document.querySelector("#tiling_container").style.display = "" document.querySelectorAll("#sampler_name option.k_diffusion-only").forEach((option) => { diff --git a/ui/media/js/utils.js b/ui/media/js/utils.js index 6ab24712..bbacbb47 100644 --- a/ui/media/js/utils.js +++ b/ui/media/js/utils.js @@ -1074,6 +1074,12 @@ async function deleteKeys(keyToDelete) { function modalDialogCloseOnBackdropClick(dialog) { dialog.addEventListener('mousedown', function (event) { + // Firefox creates an event with clientX|Y = 0|0 when choosing an