From 0127714929c9eb5567a4dd4a0360df2d771db48f Mon Sep 17 00:00:00 2001 From: JeLuF Date: Mon, 22 May 2023 21:19:31 +0200 Subject: [PATCH 1/9] Add 'ED is ready, go to localhost:9000' msg to log Sometimes the browser window does not open (esp. on Linux and Mac). Show a prominent message to the log so that users don't wait for hours. --- ui/easydiffusion/app.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/ui/easydiffusion/app.py b/ui/easydiffusion/app.py index b6318f01..3064e151 100644 --- a/ui/easydiffusion/app.py +++ b/ui/easydiffusion/app.py @@ -10,6 +10,8 @@ import warnings from easydiffusion import task_manager from easydiffusion.utils import log from rich.logging import RichHandler +from rich.console import Console +from rich.panel import Panel from sdkit.utils import log as sdkit_log # hack, so we can overwrite the log config # Remove all handlers associated with the root logger object. @@ -213,11 +215,19 @@ def open_browser(): ui = config.get("ui", {}) net = config.get("net", {}) port = net.get("listen_port", 9000) + if ui.get("open_browser_on_start", True): import webbrowser webbrowser.open(f"http://localhost:{port}") + Console().print(Panel( + "\n" + + "[white]Easy Diffusion is ready to serve requests.\n\n" + + "A new browser tab should have been opened by now.\n" + + f"If not, please open your web browser and navigate to [bold yellow underline]http://localhost:{port}/\n", + title="Easy Diffusion is ready", style="bold yellow on blue")) + def get_image_modifiers(): modifiers_json_path = os.path.join(SD_UI_DIR, "modifiers.json") From 2bab4341a3d658e1d4516b68f66449467265329e Mon Sep 17 00:00:00 2001 From: cmdr2 Date: Tue, 23 May 2023 16:53:53 +0530 Subject: [PATCH 2/9] Add 'Latent Upscaler' as an option in the upscaling dropdown --- ui/easydiffusion/model_manager.py | 7 +++-- ui/easydiffusion/renderer.py | 24 +++++++++++++---- ui/easydiffusion/types.py | 3 ++- ui/index.html | 8 ++++-- ui/media/css/main.css | 6 +++++ ui/media/js/main.js | 44 ++++++++++++++++++++++++++++++- 6 files changed, 81 insertions(+), 11 deletions(-) diff --git a/ui/easydiffusion/model_manager.py b/ui/easydiffusion/model_manager.py index 324dcec9..d6a227be 100644 --- a/ui/easydiffusion/model_manager.py +++ b/ui/easydiffusion/model_manager.py @@ -107,12 +107,15 @@ def resolve_model_to_use(model_name: str = None, model_type: str = None): def reload_models_if_necessary(context: Context, task_data: TaskData): + use_upscale_lower = task_data.use_upscale.lower() if task_data.use_upscale else "" + model_paths_in_req = { "stable-diffusion": task_data.use_stable_diffusion_model, "vae": task_data.use_vae_model, "hypernetwork": task_data.use_hypernetwork_model, "gfpgan": task_data.use_face_correction, - "realesrgan": task_data.use_upscale, + "realesrgan": task_data.use_upscale if "realesrgan" in use_upscale_lower else None, + "latent_upscaler": True if task_data.use_upscale == "latent_upscaler" else None, "nsfw_checker": True if task_data.block_nsfw else None, "lora": task_data.use_lora_model, } @@ -142,7 +145,7 @@ def resolve_model_paths(task_data: TaskData): if task_data.use_face_correction: task_data.use_face_correction = resolve_model_to_use(task_data.use_face_correction, "gfpgan") - if task_data.use_upscale: + if task_data.use_upscale and "realesrgan" in task_data.use_upscale.lower(): task_data.use_upscale = resolve_model_to_use(task_data.use_upscale, "realesrgan") diff --git a/ui/easydiffusion/renderer.py b/ui/easydiffusion/renderer.py index e26b4389..c60c42df 100644 --- a/ui/easydiffusion/renderer.py +++ b/ui/easydiffusion/renderer.py @@ -95,7 +95,7 @@ def make_images_internal( task_data.stream_image_progress_interval, ) gc(context) - filtered_images = filter_images(task_data, images, user_stopped) + filtered_images = filter_images(req, task_data, images, user_stopped) if task_data.save_to_disk_path is not None: save_images_to_disk(images, filtered_images, req, task_data) @@ -151,22 +151,36 @@ def generate_images_internal( return images, user_stopped -def filter_images(task_data: TaskData, images: list, user_stopped): +def filter_images(req: GenerateImageRequest, task_data: TaskData, images: list, user_stopped): if user_stopped: return images filters_to_apply = [] + filter_params = {} if task_data.block_nsfw: filters_to_apply.append("nsfw_checker") if task_data.use_face_correction and "gfpgan" in task_data.use_face_correction.lower(): filters_to_apply.append("gfpgan") - if task_data.use_upscale and "realesrgan" in task_data.use_upscale.lower(): - filters_to_apply.append("realesrgan") + if task_data.use_upscale: + if "realesrgan" in task_data.use_upscale.lower(): + filters_to_apply.append("realesrgan") + elif task_data.use_upscale == "latent_upscaler": + filters_to_apply.append("latent_upscaler") + + filter_params["latent_upscaler_options"] = { + "prompt": req.prompt, + "negative_prompt": req.negative_prompt, + "seed": req.seed, + "num_inference_steps": task_data.latent_upscaler_steps, + "guidance_scale": 0, + } + + filter_params["scale"] = task_data.upscale_amount if len(filters_to_apply) == 0: return images - return apply_filters(context, filters_to_apply, images, scale=task_data.upscale_amount) + return apply_filters(context, filters_to_apply, images, **filter_params) def construct_response(images: list, seeds: list, task_data: TaskData, base_seed: int): diff --git a/ui/easydiffusion/types.py b/ui/easydiffusion/types.py index 7a5201ab..a76f489a 100644 --- a/ui/easydiffusion/types.py +++ b/ui/easydiffusion/types.py @@ -32,8 +32,9 @@ class TaskData(BaseModel): vram_usage_level: str = "balanced" # or "low" or "medium" use_face_correction: str = None # or "GFPGANv1.3" - use_upscale: str = None # or "RealESRGAN_x4plus" or "RealESRGAN_x4plus_anime_6B" + use_upscale: str = None # or "RealESRGAN_x4plus" or "RealESRGAN_x4plus_anime_6B" or "latent_upscaler" upscale_amount: int = 4 # or 2 + latent_upscaler_steps: int = 10 use_stable_diffusion_model: str = "sd-v1-4" # use_stable_diffusion_config: str = "v1-inference" use_vae_model: str = None diff --git a/ui/index.html b/ui/index.html index 99087eec..5097d84a 100644 --- a/ui/index.html +++ b/ui/index.html @@ -258,14 +258,18 @@
  • with +
    + +
  • diff --git a/ui/media/css/main.css b/ui/media/css/main.css index ba513237..8f4f49fa 100644 --- a/ui/media/css/main.css +++ b/ui/media/css/main.css @@ -1303,6 +1303,12 @@ body.wait-pause { display:none !important; } +#latent_upscaler_settings { + padding-top: 3pt; + padding-bottom: 3pt; + padding-left: 5pt; +} + /* TOAST NOTIFICATIONS */ .toast-notification { position: fixed; diff --git a/ui/media/js/main.js b/ui/media/js/main.js index 0ce32f2b..23ed5f46 100644 --- a/ui/media/js/main.js +++ b/ui/media/js/main.js @@ -86,6 +86,9 @@ let gfpganModelField = new ModelDropdown(document.querySelector("#gfpgan_model") let useUpscalingField = document.querySelector("#use_upscale") let upscaleModelField = document.querySelector("#upscale_model") let upscaleAmountField = document.querySelector("#upscale_amount") +let latentUpscalerSettings = document.querySelector("#latent_upscaler_settings") +let latentUpscalerStepsSlider = document.querySelector("#latent_upscaler_steps_slider") +let latentUpscalerStepsField = document.querySelector("#latent_upscaler_steps") let stableDiffusionModelField = new ModelDropdown(document.querySelector("#stable_diffusion_model"), "stable-diffusion") let clipSkipField = document.querySelector("#clip_skip") let vaeModelField = new ModelDropdown(document.querySelector("#vae_model"), "vae", "None") @@ -239,7 +242,7 @@ function setServerStatus(event) { break } if (SD.serverState.devices) { - document.dispatchEvent(new CustomEvent("system_info_update", { detail: SD.serverState.devices})) + document.dispatchEvent(new CustomEvent("system_info_update", { detail: SD.serverState.devices })) } } @@ -1268,6 +1271,10 @@ function getCurrentUserRequest() { if (useUpscalingField.checked) { newTask.reqBody.use_upscale = upscaleModelField.value newTask.reqBody.upscale_amount = upscaleAmountField.value + if (upscaleModelField.value === "latent_upscaler") { + newTask.reqBody.upscale_amount = "2" + newTask.reqBody.latent_upscaler_steps = latentUpscalerStepsField.value + } } if (hypernetworkModelField.value) { newTask.reqBody.use_hypernetwork_model = hypernetworkModelField.value @@ -1582,6 +1589,20 @@ useUpscalingField.addEventListener("change", function(e) { upscaleAmountField.disabled = !this.checked }) +function onUpscaleModelChange() { + let upscale4x = document.querySelector("#upscale_amount_4x") + if (upscaleModelField.value === "latent_upscaler") { + upscale4x.disabled = true + upscaleAmountField.value = "2" + latentUpscalerSettings.classList.remove("displayNone") + } else { + upscale4x.disabled = false + latentUpscalerSettings.classList.add("displayNone") + } +} +upscaleModelField.addEventListener("change", onUpscaleModelChange) +onUpscaleModelChange() + makeImageBtn.addEventListener("click", makeImage) document.onkeydown = function(e) { @@ -1591,6 +1612,27 @@ document.onkeydown = function(e) { } } +/********************* Latent Upscaler Steps **************************/ +function updateLatentUpscalerSteps() { + latentUpscalerStepsField.value = latentUpscalerStepsSlider.value + latentUpscalerStepsField.dispatchEvent(new Event("change")) +} + +function updateLatentUpscalerStepsSlider() { + if (latentUpscalerStepsField.value < 1) { + latentUpscalerStepsField.value = 1 + } else if (latentUpscalerStepsField.value > 50) { + latentUpscalerStepsField.value = 50 + } + + latentUpscalerStepsSlider.value = latentUpscalerStepsField.value + latentUpscalerStepsSlider.dispatchEvent(new Event("change")) +} + +latentUpscalerStepsSlider.addEventListener("input", updateLatentUpscalerSteps) +latentUpscalerStepsField.addEventListener("input", updateLatentUpscalerStepsSlider) +updateLatentUpscalerSteps() + /********************* Guidance **************************/ function updateGuidanceScale() { guidanceScaleField.value = guidanceScaleSlider.value / 10 From a87dca1ef4b0c2bd6a0380866b1ca76acbc26c89 Mon Sep 17 00:00:00 2001 From: cmdr2 Date: Tue, 23 May 2023 16:55:42 +0530 Subject: [PATCH 3/9] changelog --- CHANGES.md | 1 + ui/index.html | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 9b2b72c1..d8e5c3f0 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -22,6 +22,7 @@ Our focus continues to remain on an easy installation experience, and an easy user-interface. While still remaining pretty powerful, in terms of features and speed. ### Detailed changelog +* 2.5.38 - 23 May 2023 - Add Latent Upscaler as another option for upscaling images. Thanks @JeLuf for the implementation of the Latent Upscaler model. * 2.5.37 - 19 May 2023 - (beta-only) Two more samplers: DDPM and DEIS. Also disables the samplers that aren't working yet in the Diffusers version. Thanks @ogmaresca. * 2.5.37 - 19 May 2023 - (beta-only) Support CLIP-Skip. You can set this option under the models dropdown. Thanks @JeLuf. * 2.5.37 - 19 May 2023 - (beta-only) More VRAM optimizations for all modes in diffusers. The VRAM usage for diffusers in "low" and "balanced" should now be equal or less than the non-diffusers version. Performs softmax in half precision, like sdkit does. diff --git a/ui/index.html b/ui/index.html index 5097d84a..dc4eb7f0 100644 --- a/ui/index.html +++ b/ui/index.html @@ -30,7 +30,7 @@

    Easy Diffusion - v2.5.37 + v2.5.38

    From eba83386c1e69ac2d02c8e11c7b59251a518664b Mon Sep 17 00:00:00 2001 From: cmdr2 Date: Wed, 24 May 2023 10:08:00 +0530 Subject: [PATCH 4/9] make a note about a flood fill library --- ui/media/js/image-editor.js | 1 + 1 file changed, 1 insertion(+) diff --git a/ui/media/js/image-editor.js b/ui/media/js/image-editor.js index af19daeb..e7de9f2b 100644 --- a/ui/media/js/image-editor.js +++ b/ui/media/js/image-editor.js @@ -834,6 +834,7 @@ function pixelCompare(int1, int2) { } // adapted from https://ben.akrin.com/canvas_fill/fill_04.html +// May 2023 - look at using a library instead of custom code: https://github.com/shaneosullivan/example-canvas-fill function flood_fill(editor, the_canvas_context, x, y, color) { pixel_stack = [{ x: x, y: y }] pixels = the_canvas_context.getImageData(0, 0, editor.width, editor.height) From 30c07eab6b5fd131fa0858d7582d690fbdb7b76a Mon Sep 17 00:00:00 2001 From: cmdr2 Date: Wed, 24 May 2023 15:30:55 +0530 Subject: [PATCH 5/9] Cleaner reporting of errors in the UI; Suggest increasing the page size if that's the error --- ui/media/js/main.js | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/ui/media/js/main.js b/ui/media/js/main.js index 23ed5f46..473ed780 100644 --- a/ui/media/js/main.js +++ b/ui/media/js/main.js @@ -296,6 +296,7 @@ function logError(msg, res, outputMsg) { logMsg(msg, "error", outputMsg) console.log("request error", res) + console.trace() setStatus("request", "error", "error") } @@ -787,11 +788,6 @@ function getTaskUpdater(task, reqBody, outputContainer) { } msg += "" logError(msg, event, outputMsg) - } else { - let msg = `Unexpected Read Error:
    Error:${
    -                            this.exception
    -                        }
    EventInfo: ${JSON.stringify(event, undefined, 4)}
    ` - logError(msg, event, outputMsg) } break } @@ -888,15 +884,15 @@ function onTaskCompleted(task, reqBody, instance, outputContainer, stepUpdate) { 1. If you have set an initial image, please try reducing its dimension to ${MAX_INIT_IMAGE_DIMENSION}x${MAX_INIT_IMAGE_DIMENSION} or smaller.
    2. Try picking a lower level in the 'GPU Memory Usage' setting (in the 'Settings' tab).
    3. Try generating a smaller image.
    ` - } else if (msg.toLowerCase().includes("DefaultCPUAllocator: not enough memory")) { + } else if (msg.includes("DefaultCPUAllocator: not enough memory")) { msg += `

    Reason: Your computer is running out of system RAM! -
    +

    Suggestions:
    1. Try closing unnecessary programs and browser tabs.
    2. If that doesn't help, please increase your computer's virtual memory by following these steps for - Windows, or + Windows or Linux.
    3. Try restarting your computer.
    ` } From 8554b0eab2a355c5472c42ccad039b01c7f18d67 Mon Sep 17 00:00:00 2001 From: cmdr2 Date: Wed, 24 May 2023 16:02:53 +0530 Subject: [PATCH 6/9] Better reporting of model load errors - sends the report to the browser UI during the next image rendering task --- ui/easydiffusion/model_manager.py | 23 ++++++++++++++++++++++- ui/easydiffusion/renderer.py | 1 + ui/easydiffusion/task_manager.py | 1 + 3 files changed, 24 insertions(+), 1 deletion(-) diff --git a/ui/easydiffusion/model_manager.py b/ui/easydiffusion/model_manager.py index d6a227be..0a1f1b5c 100644 --- a/ui/easydiffusion/model_manager.py +++ b/ui/easydiffusion/model_manager.py @@ -53,15 +53,21 @@ def load_default_models(context: Context): scan_model=context.model_paths[model_type] != None and not context.model_paths[model_type].endswith(".safetensors"), ) + if model_type in context.model_load_errors: + del context.model_load_errors[model_type] except Exception as e: log.error(f"[red]Error while loading {model_type} model: {context.model_paths[model_type]}[/red]") log.exception(e) del context.model_paths[model_type] + context.model_load_errors[model_type] = str(e) # storing the entire Exception can lead to memory leaks + def unload_all(context: Context): for model_type in KNOWN_MODEL_TYPES: unload_model(context, model_type) + if model_type in context.model_load_errors: + del context.model_load_errors[model_type] def resolve_model_to_use(model_name: str = None, model_type: str = None): @@ -132,7 +138,14 @@ def reload_models_if_necessary(context: Context, task_data: TaskData): context.model_paths[model_type] = model_path_in_req action_fn = unload_model if context.model_paths[model_type] is None else load_model - action_fn(context, model_type, scan_model=False) # we've scanned them already + try: + action_fn(context, model_type, scan_model=False) # we've scanned them already + if model_type in context.model_load_errors: + del context.model_load_errors[model_type] + except Exception as e: + log.exception(e) + if action_fn == load_model: + context.model_load_errors[model_type] = str(e) # storing the entire Exception can lead to memory leaks def resolve_model_paths(task_data: TaskData): @@ -149,6 +162,14 @@ def resolve_model_paths(task_data: TaskData): task_data.use_upscale = resolve_model_to_use(task_data.use_upscale, "realesrgan") +def fail_if_models_did_not_load(context: Context): + for model_type in KNOWN_MODEL_TYPES: + if model_type in context.model_load_errors: + e = context.model_load_errors[model_type] + raise Exception(f"Could not load the {model_type} model! Reason: " + e) + # concat 'e', don't use in format string (injection attack) + + def set_vram_optimizations(context: Context): config = app.getConfig() vram_usage_level = config.get("vram_usage_level", "balanced") diff --git a/ui/easydiffusion/renderer.py b/ui/easydiffusion/renderer.py index c60c42df..e2dae34f 100644 --- a/ui/easydiffusion/renderer.py +++ b/ui/easydiffusion/renderer.py @@ -33,6 +33,7 @@ def init(device): context.stop_processing = False context.temp_images = {} context.partial_x_samples = None + context.model_load_errors = {} from easydiffusion import app diff --git a/ui/easydiffusion/task_manager.py b/ui/easydiffusion/task_manager.py index c11acbec..a91cd9c6 100644 --- a/ui/easydiffusion/task_manager.py +++ b/ui/easydiffusion/task_manager.py @@ -336,6 +336,7 @@ def thread_render(device): current_state = ServerStates.LoadingModel model_manager.resolve_model_paths(task.task_data) model_manager.reload_models_if_necessary(renderer.context, task.task_data) + model_manager.fail_if_models_did_not_load(renderer.context) current_state = ServerStates.Rendering task.response = renderer.make_images( From db265309a57d576f2ec08433c56c33b8f8bb27a4 Mon Sep 17 00:00:00 2001 From: cmdr2 Date: Wed, 24 May 2023 16:24:29 +0530 Subject: [PATCH 7/9] Show an explanation for why the CPU toggle is disabled; utility class for alert() and confirm() that matches the ED theme; code formatting --- ui/media/js/main.js | 19 +++------ ui/media/js/parameters.js | 19 +++++++-- ui/media/js/utils.js | 90 +++++++++++++++++++++++++-------------- 3 files changed, 79 insertions(+), 49 deletions(-) diff --git a/ui/media/js/main.js b/ui/media/js/main.js index 473ed780..ecd8ad73 100644 --- a/ui/media/js/main.js +++ b/ui/media/js/main.js @@ -261,20 +261,11 @@ function shiftOrConfirm(e, prompt, fn) { if (e.shiftKey || !confirmDangerousActionsField.checked) { fn(e) } else { - $.confirm({ - theme: "modern", - title: prompt, - useBootstrap: false, - animateFromElement: false, - content: - 'Tip: To skip this dialog, use shift-click or disable the "Confirm dangerous actions" setting in the Settings tab.', - buttons: { - yes: () => { - fn(e) - }, - cancel: () => {}, - }, - }) + confirm( + 'Tip: To skip this dialog, use shift-click or disable the "Confirm dangerous actions" setting in the Settings tab.', + prompt, + fn + ) } } diff --git a/ui/media/js/parameters.js b/ui/media/js/parameters.js index f51b5290..4c7240eb 100644 --- a/ui/media/js/parameters.js +++ b/ui/media/js/parameters.js @@ -191,7 +191,8 @@ var PARAMETERS = [ id: "listen_port", type: ParameterType.custom, label: "Network port", - note: "Port that this server listens to. The '9000' part in 'http://localhost:9000'. Please restart the program after changing this.", + note: + "Port that this server listens to. The '9000' part in 'http://localhost:9000'. Please restart the program after changing this.", icon: "fa-anchor", render: (parameter) => { return `` @@ -396,14 +397,14 @@ async function getAppConfig() { document.querySelector("#lora_model_container").style.display = "none" document.querySelector("#lora_alpha_container").style.display = "none" - document.querySelectorAll("#sampler_name option.diffusers-only").forEach(option => { + document.querySelectorAll("#sampler_name option.diffusers-only").forEach((option) => { option.style.display = "none" }) } else { document.querySelector("#lora_model_container").style.display = "" document.querySelector("#lora_alpha_container").style.display = loraModelField.value ? "" : "none" - document.querySelectorAll("#sampler_name option.k_diffusion-only").forEach(option => { + document.querySelectorAll("#sampler_name option.k_diffusion-only").forEach((option) => { option.disabled = true }) document.querySelector("#clip_skip_config").classList.remove("displayNone") @@ -568,6 +569,16 @@ async function getSystemInfo() { if (allDeviceIds.length === 0) { useCPUField.checked = true useCPUField.disabled = true // no compatible GPUs, so make the CPU mandatory + + getParameterSettingsEntry("use_cpu").addEventListener("click", function() { + alert( + "Sorry, we could not find a compatible graphics card! Easy Diffusion supports graphics cards with minimum 2 GB of RAM. " + + "Only NVIDIA cards are supported on Windows. NVIDIA and AMD cards are supported on Linux.

    " + + "If you have a compatible graphics card, please try updating to the latest drivers.

    " + + "Only the CPU can be used for generating images, without a compatible graphics card.", + "No compatible graphics card found!" + ) + }) } autoPickGPUsField.checked = devices["config"] === "auto" @@ -586,7 +597,7 @@ async function getSystemInfo() { $("#use_gpus").val(activeDeviceIds) } - document.dispatchEvent(new CustomEvent("system_info_update", { detail: devices})) + document.dispatchEvent(new CustomEvent("system_info_update", { detail: devices })) setHostInfo(res["hosts"]) let force = false if (res["enforce_output_dir"] !== undefined) { diff --git a/ui/media/js/utils.js b/ui/media/js/utils.js index d1578d8e..16778b2d 100644 --- a/ui/media/js/utils.js +++ b/ui/media/js/utils.js @@ -843,57 +843,85 @@ function createTab(request) { /* TOAST NOTIFICATIONS */ function showToast(message, duration = 5000, error = false) { - const toast = document.createElement("div"); - toast.classList.add("toast-notification"); + const toast = document.createElement("div") + toast.classList.add("toast-notification") if (error === true) { - toast.classList.add("toast-notification-error"); + toast.classList.add("toast-notification-error") } - toast.innerHTML = message; - document.body.appendChild(toast); + toast.innerHTML = message + document.body.appendChild(toast) // Set the position of the toast on the screen - const toastCount = document.querySelectorAll(".toast-notification").length; - const toastHeight = toast.offsetHeight; + const toastCount = document.querySelectorAll(".toast-notification").length + const toastHeight = toast.offsetHeight const previousToastsHeight = Array.from(document.querySelectorAll(".toast-notification")) .slice(0, -1) // exclude current toast - .reduce((totalHeight, toast) => totalHeight + toast.offsetHeight + 10, 0); // add 10 pixels for spacing - toast.style.bottom = `${10 + previousToastsHeight}px`; - toast.style.right = "10px"; + .reduce((totalHeight, toast) => totalHeight + toast.offsetHeight + 10, 0) // add 10 pixels for spacing + toast.style.bottom = `${10 + previousToastsHeight}px` + toast.style.right = "10px" // Delay the removal of the toast until animation has completed const removeToast = () => { - toast.classList.add("hide"); + toast.classList.add("hide") const removeTimeoutId = setTimeout(() => { - toast.remove(); + toast.remove() // Adjust the position of remaining toasts - const remainingToasts = document.querySelectorAll(".toast-notification"); - const removedToastBottom = toast.getBoundingClientRect().bottom; - + const remainingToasts = document.querySelectorAll(".toast-notification") + const removedToastBottom = toast.getBoundingClientRect().bottom + remainingToasts.forEach((toast) => { if (toast.getBoundingClientRect().bottom < removedToastBottom) { - toast.classList.add("slide-down"); + toast.classList.add("slide-down") } - }); - + }) + // Wait for the slide-down animation to complete setTimeout(() => { // Remove the slide-down class after the animation has completed - const slidingToasts = document.querySelectorAll(".slide-down"); + const slidingToasts = document.querySelectorAll(".slide-down") slidingToasts.forEach((toast) => { - toast.classList.remove("slide-down"); - }); - + toast.classList.remove("slide-down") + }) + // Adjust the position of remaining toasts again, in case there are multiple toasts being removed at once - const remainingToastsDown = document.querySelectorAll(".toast-notification"); - let heightSoFar = 0; + const remainingToastsDown = document.querySelectorAll(".toast-notification") + let heightSoFar = 0 remainingToastsDown.forEach((toast) => { - toast.style.bottom = `${10 + heightSoFar}px`; - heightSoFar += toast.offsetHeight + 10; // add 10 pixels for spacing - }); - }, 0); // The duration of the slide-down animation (in milliseconds) - }, 500); - }; + toast.style.bottom = `${10 + heightSoFar}px` + heightSoFar += toast.offsetHeight + 10 // add 10 pixels for spacing + }) + }, 0) // The duration of the slide-down animation (in milliseconds) + }, 500) + } // Remove the toast after specified duration - setTimeout(removeToast, duration); + setTimeout(removeToast, duration) +} + +function alert(msg, title) { + title = title || "" + $.alert({ + theme: "modern", + title: title, + useBootstrap: false, + animateFromElement: false, + content: msg, + }) +} + +function confirm(msg, title, fn) { + title = title || "" + $.confirm({ + theme: "modern", + title: title, + useBootstrap: false, + animateFromElement: false, + content: msg, + buttons: { + yes: () => { + fn(e) + }, + cancel: () => {}, + }, + }) } From 3d7e16cfd944540a0e5b9a0f0c8c10b760f6cf7d Mon Sep 17 00:00:00 2001 From: cmdr2 Date: Wed, 24 May 2023 16:29:58 +0530 Subject: [PATCH 8/9] changelog --- CHANGES.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGES.md b/CHANGES.md index d8e5c3f0..a1e49336 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -22,6 +22,7 @@ Our focus continues to remain on an easy installation experience, and an easy user-interface. While still remaining pretty powerful, in terms of features and speed. ### Detailed changelog +* 2.5.38 - 24 May 2023 - Better reporting of errors, and show an explanation if the user cannot disable the "Use CPU" setting. * 2.5.38 - 23 May 2023 - Add Latent Upscaler as another option for upscaling images. Thanks @JeLuf for the implementation of the Latent Upscaler model. * 2.5.37 - 19 May 2023 - (beta-only) Two more samplers: DDPM and DEIS. Also disables the samplers that aren't working yet in the Diffusers version. Thanks @ogmaresca. * 2.5.37 - 19 May 2023 - (beta-only) Support CLIP-Skip. You can set this option under the models dropdown. Thanks @JeLuf. From 3ea74af76d32f005eaed7df6bc9245e99f99ffd6 Mon Sep 17 00:00:00 2001 From: JeLuF Date: Wed, 24 May 2023 19:29:54 +0200 Subject: [PATCH 9/9] Fix confirmation dialog By splitting the confirmation function into two halves, the closure was lost --- ui/media/js/main.js | 2 +- ui/media/js/utils.js | 4 +--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/ui/media/js/main.js b/ui/media/js/main.js index ecd8ad73..fa37600a 100644 --- a/ui/media/js/main.js +++ b/ui/media/js/main.js @@ -264,7 +264,7 @@ function shiftOrConfirm(e, prompt, fn) { confirm( 'Tip: To skip this dialog, use shift-click or disable the "Confirm dangerous actions" setting in the Settings tab.', prompt, - fn + () => { fn(e) } ) } } diff --git a/ui/media/js/utils.js b/ui/media/js/utils.js index 16778b2d..6ddb0ae6 100644 --- a/ui/media/js/utils.js +++ b/ui/media/js/utils.js @@ -918,9 +918,7 @@ function confirm(msg, title, fn) { animateFromElement: false, content: msg, buttons: { - yes: () => { - fn(e) - }, + yes: fn, cancel: () => {}, }, })