Merge branch 'beta' into splash

This commit is contained in:
JeLuF 2023-05-25 00:30:47 +02:00 committed by GitHub
commit 17e731dfe3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 228 additions and 77 deletions

View File

@ -22,6 +22,8 @@
Our focus continues to remain on an easy installation experience, and an easy user-interface. While still remaining pretty powerful, in terms of features and speed. Our focus continues to remain on an easy installation experience, and an easy user-interface. While still remaining pretty powerful, in terms of features and speed.
### Detailed changelog ### Detailed changelog
* 2.5.38 - 24 May 2023 - Better reporting of errors, and show an explanation if the user cannot disable the "Use CPU" setting.
* 2.5.38 - 23 May 2023 - Add Latent Upscaler as another option for upscaling images. Thanks @JeLuf for the implementation of the Latent Upscaler model.
* 2.5.37 - 19 May 2023 - (beta-only) Two more samplers: DDPM and DEIS. Also disables the samplers that aren't working yet in the Diffusers version. Thanks @ogmaresca. * 2.5.37 - 19 May 2023 - (beta-only) Two more samplers: DDPM and DEIS. Also disables the samplers that aren't working yet in the Diffusers version. Thanks @ogmaresca.
* 2.5.37 - 19 May 2023 - (beta-only) Support CLIP-Skip. You can set this option under the models dropdown. Thanks @JeLuf. * 2.5.37 - 19 May 2023 - (beta-only) Support CLIP-Skip. You can set this option under the models dropdown. Thanks @JeLuf.
* 2.5.37 - 19 May 2023 - (beta-only) More VRAM optimizations for all modes in diffusers. The VRAM usage for diffusers in "low" and "balanced" should now be equal or less than the non-diffusers version. Performs softmax in half precision, like sdkit does. * 2.5.37 - 19 May 2023 - (beta-only) More VRAM optimizations for all modes in diffusers. The VRAM usage for diffusers in "low" and "balanced" should now be equal or less than the non-diffusers version. Performs softmax in half precision, like sdkit does.

View File

@ -18,7 +18,7 @@ os_name = platform.system()
modules_to_check = { modules_to_check = {
"torch": ("1.11.0", "1.13.1", "2.0.0"), "torch": ("1.11.0", "1.13.1", "2.0.0"),
"torchvision": ("0.12.0", "0.14.1", "0.15.1"), "torchvision": ("0.12.0", "0.14.1", "0.15.1"),
"sdkit": "1.0.96", "sdkit": "1.0.97",
"stable-diffusion-sdkit": "2.1.4", "stable-diffusion-sdkit": "2.1.4",
"rich": "12.6.0", "rich": "12.6.0",
"uvicorn": "0.19.0", "uvicorn": "0.19.0",

View File

@ -1,5 +1,6 @@
import os import os
import argparse import argparse
import sys
# The config file is in the same directory as this script # The config file is in the same directory as this script
config_directory = os.path.dirname(__file__) config_directory = os.path.dirname(__file__)
@ -21,16 +22,16 @@ if os.path.isfile(config_yaml):
try: try:
config = yaml.safe_load(configfile) config = yaml.safe_load(configfile)
except Exception as e: except Exception as e:
print(e) print(e, file=sys.stderr)
exit() config = {}
elif os.path.isfile(config_json): elif os.path.isfile(config_json):
import json import json
with open(config_json, 'r') as configfile: with open(config_json, 'r') as configfile:
try: try:
config = json.load(configfile) config = json.load(configfile)
except Exception as e: except Exception as e:
print(e) print(e, file=sys.stderr)
exit() config = {}
else: else:
config = {} config = {}

View File

@ -10,6 +10,8 @@ import warnings
from easydiffusion import task_manager from easydiffusion import task_manager
from easydiffusion.utils import log from easydiffusion.utils import log
from rich.logging import RichHandler from rich.logging import RichHandler
from rich.console import Console
from rich.panel import Panel
from sdkit.utils import log as sdkit_log # hack, so we can overwrite the log config from sdkit.utils import log as sdkit_log # hack, so we can overwrite the log config
# Remove all handlers associated with the root logger object. # Remove all handlers associated with the root logger object.
@ -213,11 +215,19 @@ def open_browser():
ui = config.get("ui", {}) ui = config.get("ui", {})
net = config.get("net", {}) net = config.get("net", {})
port = net.get("listen_port", 9000) port = net.get("listen_port", 9000)
if ui.get("open_browser_on_start", True): if ui.get("open_browser_on_start", True):
import webbrowser import webbrowser
webbrowser.open(f"http://localhost:{port}") webbrowser.open(f"http://localhost:{port}")
Console().print(Panel(
"\n" +
"[white]Easy Diffusion is ready to serve requests.\n\n" +
"A new browser tab should have been opened by now.\n" +
f"If not, please open your web browser and navigate to [bold yellow underline]http://localhost:{port}/\n",
title="Easy Diffusion is ready", style="bold yellow on blue"))
def get_image_modifiers(): def get_image_modifiers():
modifiers_json_path = os.path.join(SD_UI_DIR, "modifiers.json") modifiers_json_path = os.path.join(SD_UI_DIR, "modifiers.json")

View File

@ -53,15 +53,21 @@ def load_default_models(context: Context):
scan_model=context.model_paths[model_type] != None scan_model=context.model_paths[model_type] != None
and not context.model_paths[model_type].endswith(".safetensors"), and not context.model_paths[model_type].endswith(".safetensors"),
) )
if model_type in context.model_load_errors:
del context.model_load_errors[model_type]
except Exception as e: except Exception as e:
log.error(f"[red]Error while loading {model_type} model: {context.model_paths[model_type]}[/red]") log.error(f"[red]Error while loading {model_type} model: {context.model_paths[model_type]}[/red]")
log.exception(e) log.exception(e)
del context.model_paths[model_type] del context.model_paths[model_type]
context.model_load_errors[model_type] = str(e) # storing the entire Exception can lead to memory leaks
def unload_all(context: Context): def unload_all(context: Context):
for model_type in KNOWN_MODEL_TYPES: for model_type in KNOWN_MODEL_TYPES:
unload_model(context, model_type) unload_model(context, model_type)
if model_type in context.model_load_errors:
del context.model_load_errors[model_type]
def resolve_model_to_use(model_name: str = None, model_type: str = None): def resolve_model_to_use(model_name: str = None, model_type: str = None):
@ -107,12 +113,15 @@ def resolve_model_to_use(model_name: str = None, model_type: str = None):
def reload_models_if_necessary(context: Context, task_data: TaskData): def reload_models_if_necessary(context: Context, task_data: TaskData):
use_upscale_lower = task_data.use_upscale.lower() if task_data.use_upscale else ""
model_paths_in_req = { model_paths_in_req = {
"stable-diffusion": task_data.use_stable_diffusion_model, "stable-diffusion": task_data.use_stable_diffusion_model,
"vae": task_data.use_vae_model, "vae": task_data.use_vae_model,
"hypernetwork": task_data.use_hypernetwork_model, "hypernetwork": task_data.use_hypernetwork_model,
"gfpgan": task_data.use_face_correction, "gfpgan": task_data.use_face_correction,
"realesrgan": task_data.use_upscale, "realesrgan": task_data.use_upscale if "realesrgan" in use_upscale_lower else None,
"latent_upscaler": True if task_data.use_upscale == "latent_upscaler" else None,
"nsfw_checker": True if task_data.block_nsfw else None, "nsfw_checker": True if task_data.block_nsfw else None,
"lora": task_data.use_lora_model, "lora": task_data.use_lora_model,
} }
@ -129,7 +138,14 @@ def reload_models_if_necessary(context: Context, task_data: TaskData):
context.model_paths[model_type] = model_path_in_req context.model_paths[model_type] = model_path_in_req
action_fn = unload_model if context.model_paths[model_type] is None else load_model action_fn = unload_model if context.model_paths[model_type] is None else load_model
action_fn(context, model_type, scan_model=False) # we've scanned them already try:
action_fn(context, model_type, scan_model=False) # we've scanned them already
if model_type in context.model_load_errors:
del context.model_load_errors[model_type]
except Exception as e:
log.exception(e)
if action_fn == load_model:
context.model_load_errors[model_type] = str(e) # storing the entire Exception can lead to memory leaks
def resolve_model_paths(task_data: TaskData): def resolve_model_paths(task_data: TaskData):
@ -142,10 +158,18 @@ def resolve_model_paths(task_data: TaskData):
if task_data.use_face_correction: if task_data.use_face_correction:
task_data.use_face_correction = resolve_model_to_use(task_data.use_face_correction, "gfpgan") task_data.use_face_correction = resolve_model_to_use(task_data.use_face_correction, "gfpgan")
if task_data.use_upscale: if task_data.use_upscale and "realesrgan" in task_data.use_upscale.lower():
task_data.use_upscale = resolve_model_to_use(task_data.use_upscale, "realesrgan") task_data.use_upscale = resolve_model_to_use(task_data.use_upscale, "realesrgan")
def fail_if_models_did_not_load(context: Context):
for model_type in KNOWN_MODEL_TYPES:
if model_type in context.model_load_errors:
e = context.model_load_errors[model_type]
raise Exception(f"Could not load the {model_type} model! Reason: " + e)
# concat 'e', don't use in format string (injection attack)
def set_vram_optimizations(context: Context): def set_vram_optimizations(context: Context):
config = app.getConfig() config = app.getConfig()
vram_usage_level = config.get("vram_usage_level", "balanced") vram_usage_level = config.get("vram_usage_level", "balanced")

View File

@ -33,6 +33,7 @@ def init(device):
context.stop_processing = False context.stop_processing = False
context.temp_images = {} context.temp_images = {}
context.partial_x_samples = None context.partial_x_samples = None
context.model_load_errors = {}
from easydiffusion import app from easydiffusion import app
@ -95,7 +96,7 @@ def make_images_internal(
task_data.stream_image_progress_interval, task_data.stream_image_progress_interval,
) )
gc(context) gc(context)
filtered_images = filter_images(task_data, images, user_stopped) filtered_images = filter_images(req, task_data, images, user_stopped)
if task_data.save_to_disk_path is not None: if task_data.save_to_disk_path is not None:
save_images_to_disk(images, filtered_images, req, task_data) save_images_to_disk(images, filtered_images, req, task_data)
@ -151,22 +152,36 @@ def generate_images_internal(
return images, user_stopped return images, user_stopped
def filter_images(task_data: TaskData, images: list, user_stopped): def filter_images(req: GenerateImageRequest, task_data: TaskData, images: list, user_stopped):
if user_stopped: if user_stopped:
return images return images
filters_to_apply = [] filters_to_apply = []
filter_params = {}
if task_data.block_nsfw: if task_data.block_nsfw:
filters_to_apply.append("nsfw_checker") filters_to_apply.append("nsfw_checker")
if task_data.use_face_correction and "gfpgan" in task_data.use_face_correction.lower(): if task_data.use_face_correction and "gfpgan" in task_data.use_face_correction.lower():
filters_to_apply.append("gfpgan") filters_to_apply.append("gfpgan")
if task_data.use_upscale and "realesrgan" in task_data.use_upscale.lower(): if task_data.use_upscale:
filters_to_apply.append("realesrgan") if "realesrgan" in task_data.use_upscale.lower():
filters_to_apply.append("realesrgan")
elif task_data.use_upscale == "latent_upscaler":
filters_to_apply.append("latent_upscaler")
filter_params["latent_upscaler_options"] = {
"prompt": req.prompt,
"negative_prompt": req.negative_prompt,
"seed": req.seed,
"num_inference_steps": task_data.latent_upscaler_steps,
"guidance_scale": 0,
}
filter_params["scale"] = task_data.upscale_amount
if len(filters_to_apply) == 0: if len(filters_to_apply) == 0:
return images return images
return apply_filters(context, filters_to_apply, images, scale=task_data.upscale_amount) return apply_filters(context, filters_to_apply, images, **filter_params)
def construct_response(images: list, seeds: list, task_data: TaskData, base_seed: int): def construct_response(images: list, seeds: list, task_data: TaskData, base_seed: int):

View File

@ -336,6 +336,7 @@ def thread_render(device):
current_state = ServerStates.LoadingModel current_state = ServerStates.LoadingModel
model_manager.resolve_model_paths(task.task_data) model_manager.resolve_model_paths(task.task_data)
model_manager.reload_models_if_necessary(renderer.context, task.task_data) model_manager.reload_models_if_necessary(renderer.context, task.task_data)
model_manager.fail_if_models_did_not_load(renderer.context)
current_state = ServerStates.Rendering current_state = ServerStates.Rendering
task.response = renderer.make_images( task.response = renderer.make_images(

View File

@ -32,8 +32,9 @@ class TaskData(BaseModel):
vram_usage_level: str = "balanced" # or "low" or "medium" vram_usage_level: str = "balanced" # or "low" or "medium"
use_face_correction: str = None # or "GFPGANv1.3" use_face_correction: str = None # or "GFPGANv1.3"
use_upscale: str = None # or "RealESRGAN_x4plus" or "RealESRGAN_x4plus_anime_6B" use_upscale: str = None # or "RealESRGAN_x4plus" or "RealESRGAN_x4plus_anime_6B" or "latent_upscaler"
upscale_amount: int = 4 # or 2 upscale_amount: int = 4 # or 2
latent_upscaler_steps: int = 10
use_stable_diffusion_model: str = "sd-v1-4" use_stable_diffusion_model: str = "sd-v1-4"
# use_stable_diffusion_config: str = "v1-inference" # use_stable_diffusion_config: str = "v1-inference"
use_vae_model: str = None use_vae_model: str = None

View File

@ -30,7 +30,7 @@
<h1> <h1>
<img id="logo_img" src="/media/images/icon-512x512.png" > <img id="logo_img" src="/media/images/icon-512x512.png" >
Easy Diffusion Easy Diffusion
<small><span id="version">v2.5.37</span> <span id="updateBranchLabel"></span></small> <small><span id="version">v2.5.38</span> <span id="updateBranchLabel"></span></small>
</h1> </h1>
</div> </div>
<div id="server-status"> <div id="server-status">
@ -258,14 +258,18 @@
<li class="pl-5"> <li class="pl-5">
<input id="use_upscale" name="use_upscale" type="checkbox"> <label for="use_upscale">Scale up by</label> <input id="use_upscale" name="use_upscale" type="checkbox"> <label for="use_upscale">Scale up by</label>
<select id="upscale_amount" name="upscale_amount"> <select id="upscale_amount" name="upscale_amount">
<option value="2">2x</option> <option id="upscale_amount_2x" value="2">2x</option>
<option value="4" selected>4x</option> <option id="upscale_amount_4x" value="4" selected>4x</option>
</select> </select>
with with
<select id="upscale_model" name="upscale_model"> <select id="upscale_model" name="upscale_model">
<option value="RealESRGAN_x4plus" selected>RealESRGAN_x4plus</option> <option value="RealESRGAN_x4plus" selected>RealESRGAN_x4plus</option>
<option value="RealESRGAN_x4plus_anime_6B">RealESRGAN_x4plus_anime_6B</option> <option value="RealESRGAN_x4plus_anime_6B">RealESRGAN_x4plus_anime_6B</option>
<option value="latent_upscaler">Latent Upscaler 2x</option>
</select> </select>
<div id="latent_upscaler_settings" class="displayNone">
<label for="latent_upscaler_steps_slider">Upscaling Steps:</label></td><td> <input id="latent_upscaler_steps_slider" name="latent_upscaler_steps_slider" class="editor-slider" value="10" type="range" min="1" max="50"> <input id="latent_upscaler_steps" name="latent_upscaler_steps" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)">
</div>
</li> </li>
<li class="pl-5"><input id="show_only_filtered_image" name="show_only_filtered_image" type="checkbox" checked> <label for="show_only_filtered_image">Show only the corrected/upscaled image</label></li> <li class="pl-5"><input id="show_only_filtered_image" name="show_only_filtered_image" type="checkbox" checked> <label for="show_only_filtered_image">Show only the corrected/upscaled image</label></li>
</ul></div> </ul></div>

View File

@ -1346,6 +1346,12 @@ body.wait-pause {
display:none !important; display:none !important;
} }
#latent_upscaler_settings {
padding-top: 3pt;
padding-bottom: 3pt;
padding-left: 5pt;
}
/* TOAST NOTIFICATIONS */ /* TOAST NOTIFICATIONS */
.toast-notification { .toast-notification {
position: fixed; position: fixed;

View File

@ -169,6 +169,22 @@ function loadSettings() {
} }
}) })
CURRENTLY_LOADING_SETTINGS = false CURRENTLY_LOADING_SETTINGS = false
} else if (localStorage.length < 2) {
// localStorage is too short for OldSettings
// So this is likely the first time Easy Diffusion is running.
// Initialize vram_usage_level based on the available VRAM
function initGPUProfile(event) {
if ( "detail" in event
&& "active" in event.detail
&& "cuda:0" in event.detail.active
&& event.detail.active["cuda:0"].mem_total <4.5 )
{
vramUsageLevelField.value = "low"
vramUsageLevelField.dispatchEvent(new Event("change"))
}
document.removeEventListener("system_info_update", initGPUProfile)
}
document.addEventListener("system_info_update", initGPUProfile)
} else { } else {
CURRENTLY_LOADING_SETTINGS = true CURRENTLY_LOADING_SETTINGS = true
tryLoadOldSettings() tryLoadOldSettings()

View File

@ -834,6 +834,7 @@ function pixelCompare(int1, int2) {
} }
// adapted from https://ben.akrin.com/canvas_fill/fill_04.html // adapted from https://ben.akrin.com/canvas_fill/fill_04.html
// May 2023 - look at using a library instead of custom code: https://github.com/shaneosullivan/example-canvas-fill
function flood_fill(editor, the_canvas_context, x, y, color) { function flood_fill(editor, the_canvas_context, x, y, color) {
pixel_stack = [{ x: x, y: y }] pixel_stack = [{ x: x, y: y }]
pixels = the_canvas_context.getImageData(0, 0, editor.width, editor.height) pixels = the_canvas_context.getImageData(0, 0, editor.width, editor.height)

View File

@ -86,6 +86,9 @@ let gfpganModelField = new ModelDropdown(document.querySelector("#gfpgan_model")
let useUpscalingField = document.querySelector("#use_upscale") let useUpscalingField = document.querySelector("#use_upscale")
let upscaleModelField = document.querySelector("#upscale_model") let upscaleModelField = document.querySelector("#upscale_model")
let upscaleAmountField = document.querySelector("#upscale_amount") let upscaleAmountField = document.querySelector("#upscale_amount")
let latentUpscalerSettings = document.querySelector("#latent_upscaler_settings")
let latentUpscalerStepsSlider = document.querySelector("#latent_upscaler_steps_slider")
let latentUpscalerStepsField = document.querySelector("#latent_upscaler_steps")
let stableDiffusionModelField = new ModelDropdown(document.querySelector("#stable_diffusion_model"), "stable-diffusion") let stableDiffusionModelField = new ModelDropdown(document.querySelector("#stable_diffusion_model"), "stable-diffusion")
let clipSkipField = document.querySelector("#clip_skip") let clipSkipField = document.querySelector("#clip_skip")
let vaeModelField = new ModelDropdown(document.querySelector("#vae_model"), "vae", "None") let vaeModelField = new ModelDropdown(document.querySelector("#vae_model"), "vae", "None")
@ -241,7 +244,7 @@ function setServerStatus(event) {
break break
} }
if (SD.serverState.devices) { if (SD.serverState.devices) {
setDeviceInfo(SD.serverState.devices) document.dispatchEvent(new CustomEvent("system_info_update", { detail: SD.serverState.devices }))
} }
} }
@ -260,20 +263,11 @@ function shiftOrConfirm(e, prompt, fn) {
if (e.shiftKey || !confirmDangerousActionsField.checked) { if (e.shiftKey || !confirmDangerousActionsField.checked) {
fn(e) fn(e)
} else { } else {
$.confirm({ confirm(
theme: "modern", '<small>Tip: To skip this dialog, use shift-click or disable the "Confirm dangerous actions" setting in the Settings tab.</small>',
title: prompt, prompt,
useBootstrap: false, fn
animateFromElement: false, )
content:
'<small>Tip: To skip this dialog, use shift-click or disable the "Confirm dangerous actions" setting in the Settings tab.</small>',
buttons: {
yes: () => {
fn(e)
},
cancel: () => {},
},
})
} }
} }
@ -295,6 +289,7 @@ function logError(msg, res, outputMsg) {
logMsg(msg, "error", outputMsg) logMsg(msg, "error", outputMsg)
console.log("request error", res) console.log("request error", res)
console.trace()
setStatus("request", "error", "error") setStatus("request", "error", "error")
} }
@ -786,11 +781,6 @@ function getTaskUpdater(task, reqBody, outputContainer) {
} }
msg += "</pre>" msg += "</pre>"
logError(msg, event, outputMsg) logError(msg, event, outputMsg)
} else {
let msg = `Unexpected Read Error:<br/><pre>Error:${
this.exception
}<br/>EventInfo: ${JSON.stringify(event, undefined, 4)}</pre>`
logError(msg, event, outputMsg)
} }
break break
} }
@ -887,15 +877,15 @@ function onTaskCompleted(task, reqBody, instance, outputContainer, stepUpdate) {
1. If you have set an initial image, please try reducing its dimension to ${MAX_INIT_IMAGE_DIMENSION}x${MAX_INIT_IMAGE_DIMENSION} or smaller.<br/> 1. If you have set an initial image, please try reducing its dimension to ${MAX_INIT_IMAGE_DIMENSION}x${MAX_INIT_IMAGE_DIMENSION} or smaller.<br/>
2. Try picking a lower level in the '<em>GPU Memory Usage</em>' setting (in the '<em>Settings</em>' tab).<br/> 2. Try picking a lower level in the '<em>GPU Memory Usage</em>' setting (in the '<em>Settings</em>' tab).<br/>
3. Try generating a smaller image.<br/>` 3. Try generating a smaller image.<br/>`
} else if (msg.toLowerCase().includes("DefaultCPUAllocator: not enough memory")) { } else if (msg.includes("DefaultCPUAllocator: not enough memory")) {
msg += `<br/><br/> msg += `<br/><br/>
Reason: Your computer is running out of system RAM! Reason: Your computer is running out of system RAM!
<br/> <br/><br/>
<b>Suggestions</b>: <b>Suggestions</b>:
<br/> <br/>
1. Try closing unnecessary programs and browser tabs.<br/> 1. Try closing unnecessary programs and browser tabs.<br/>
2. If that doesn't help, please increase your computer's virtual memory by following these steps for 2. If that doesn't help, please increase your computer's virtual memory by following these steps for
<a href="https://www.ibm.com/docs/en/opw/8.2.0?topic=tuning-optional-increasing-paging-file-size-windows-computers" target="_blank">Windows</a>, or <a href="https://www.ibm.com/docs/en/opw/8.2.0?topic=tuning-optional-increasing-paging-file-size-windows-computers" target="_blank">Windows</a> or
<a href="https://linuxhint.com/increase-swap-space-linux/" target="_blank">Linux</a>.<br/> <a href="https://linuxhint.com/increase-swap-space-linux/" target="_blank">Linux</a>.<br/>
3. Try restarting your computer.<br/>` 3. Try restarting your computer.<br/>`
} }
@ -1270,6 +1260,10 @@ function getCurrentUserRequest() {
if (useUpscalingField.checked) { if (useUpscalingField.checked) {
newTask.reqBody.use_upscale = upscaleModelField.value newTask.reqBody.use_upscale = upscaleModelField.value
newTask.reqBody.upscale_amount = upscaleAmountField.value newTask.reqBody.upscale_amount = upscaleAmountField.value
if (upscaleModelField.value === "latent_upscaler") {
newTask.reqBody.upscale_amount = "2"
newTask.reqBody.latent_upscaler_steps = latentUpscalerStepsField.value
}
} }
if (hypernetworkModelField.value) { if (hypernetworkModelField.value) {
newTask.reqBody.use_hypernetwork_model = hypernetworkModelField.value newTask.reqBody.use_hypernetwork_model = hypernetworkModelField.value
@ -1584,6 +1578,20 @@ useUpscalingField.addEventListener("change", function(e) {
upscaleAmountField.disabled = !this.checked upscaleAmountField.disabled = !this.checked
}) })
function onUpscaleModelChange() {
let upscale4x = document.querySelector("#upscale_amount_4x")
if (upscaleModelField.value === "latent_upscaler") {
upscale4x.disabled = true
upscaleAmountField.value = "2"
latentUpscalerSettings.classList.remove("displayNone")
} else {
upscale4x.disabled = false
latentUpscalerSettings.classList.add("displayNone")
}
}
upscaleModelField.addEventListener("change", onUpscaleModelChange)
onUpscaleModelChange()
makeImageBtn.addEventListener("click", makeImage) makeImageBtn.addEventListener("click", makeImage)
document.onkeydown = function(e) { document.onkeydown = function(e) {
@ -1593,6 +1601,27 @@ document.onkeydown = function(e) {
} }
} }
/********************* Latent Upscaler Steps **************************/
function updateLatentUpscalerSteps() {
latentUpscalerStepsField.value = latentUpscalerStepsSlider.value
latentUpscalerStepsField.dispatchEvent(new Event("change"))
}
function updateLatentUpscalerStepsSlider() {
if (latentUpscalerStepsField.value < 1) {
latentUpscalerStepsField.value = 1
} else if (latentUpscalerStepsField.value > 50) {
latentUpscalerStepsField.value = 50
}
latentUpscalerStepsSlider.value = latentUpscalerStepsField.value
latentUpscalerStepsSlider.dispatchEvent(new Event("change"))
}
latentUpscalerStepsSlider.addEventListener("input", updateLatentUpscalerSteps)
latentUpscalerStepsField.addEventListener("input", updateLatentUpscalerStepsSlider)
updateLatentUpscalerSteps()
/********************* Guidance **************************/ /********************* Guidance **************************/
function updateGuidanceScale() { function updateGuidanceScale() {
guidanceScaleField.value = guidanceScaleSlider.value / 10 guidanceScaleField.value = guidanceScaleSlider.value / 10

View File

@ -181,8 +181,8 @@ var PARAMETERS = [
{ {
id: "listen_to_network", id: "listen_to_network",
type: ParameterType.checkbox, type: ParameterType.checkbox,
label: "Make Stable Diffusion available on your network. Please restart the program after changing this.", label: "Make Stable Diffusion available on your network",
note: "Other devices on your network can access this web page", note: "Other devices on your network can access this web page. Please restart the program after changing this.",
icon: "fa-network-wired", icon: "fa-network-wired",
default: true, default: true,
saveInAppConfig: true, saveInAppConfig: true,
@ -191,7 +191,8 @@ var PARAMETERS = [
id: "listen_port", id: "listen_port",
type: ParameterType.custom, type: ParameterType.custom,
label: "Network port", label: "Network port",
note: "Port that this server listens to. The '9000' part in 'http://localhost:9000'. Please restart the program after changing this.", note:
"Port that this server listens to. The '9000' part in 'http://localhost:9000'. Please restart the program after changing this.",
icon: "fa-anchor", icon: "fa-anchor",
render: (parameter) => { render: (parameter) => {
return `<input id="${parameter.id}" name="${parameter.id}" size="6" value="9000" onkeypress="preventNonNumericalInput(event)">` return `<input id="${parameter.id}" name="${parameter.id}" size="6" value="9000" onkeypress="preventNonNumericalInput(event)">`
@ -396,14 +397,14 @@ async function getAppConfig() {
document.querySelector("#lora_model_container").style.display = "none" document.querySelector("#lora_model_container").style.display = "none"
document.querySelector("#lora_alpha_container").style.display = "none" document.querySelector("#lora_alpha_container").style.display = "none"
document.querySelectorAll("#sampler_name option.diffusers-only").forEach(option => { document.querySelectorAll("#sampler_name option.diffusers-only").forEach((option) => {
option.style.display = "none" option.style.display = "none"
}) })
} else { } else {
document.querySelector("#lora_model_container").style.display = "" document.querySelector("#lora_model_container").style.display = ""
document.querySelector("#lora_alpha_container").style.display = loraModelField.value ? "" : "none" document.querySelector("#lora_alpha_container").style.display = loraModelField.value ? "" : "none"
document.querySelectorAll("#sampler_name option.k_diffusion-only").forEach(option => { document.querySelectorAll("#sampler_name option.k_diffusion-only").forEach((option) => {
option.disabled = true option.disabled = true
}) })
document.querySelector("#clip_skip_config").classList.remove("displayNone") document.querySelector("#clip_skip_config").classList.remove("displayNone")
@ -568,6 +569,16 @@ async function getSystemInfo() {
if (allDeviceIds.length === 0) { if (allDeviceIds.length === 0) {
useCPUField.checked = true useCPUField.checked = true
useCPUField.disabled = true // no compatible GPUs, so make the CPU mandatory useCPUField.disabled = true // no compatible GPUs, so make the CPU mandatory
getParameterSettingsEntry("use_cpu").addEventListener("click", function() {
alert(
"Sorry, we could not find a compatible graphics card! Easy Diffusion supports graphics cards with minimum 2 GB of RAM. " +
"Only NVIDIA cards are supported on Windows. NVIDIA and AMD cards are supported on Linux.<br/><br/>" +
"If you have a compatible graphics card, please try updating to the latest drivers.<br/><br/>" +
"Only the CPU can be used for generating images, without a compatible graphics card.",
"No compatible graphics card found!"
)
})
} }
autoPickGPUsField.checked = devices["config"] === "auto" autoPickGPUsField.checked = devices["config"] === "auto"
@ -586,7 +597,7 @@ async function getSystemInfo() {
$("#use_gpus").val(activeDeviceIds) $("#use_gpus").val(activeDeviceIds)
} }
setDeviceInfo(devices) document.dispatchEvent(new CustomEvent("system_info_update", { detail: devices }))
setHostInfo(res["hosts"]) setHostInfo(res["hosts"])
let force = false let force = false
if (res["enforce_output_dir"] !== undefined) { if (res["enforce_output_dir"] !== undefined) {
@ -657,3 +668,5 @@ saveSettingsBtn.addEventListener("click", function() {
saveSettingsBtn.classList.add("active") saveSettingsBtn.classList.add("active")
Promise.all([savePromise, asyncDelay(300)]).then(() => saveSettingsBtn.classList.remove("active")) Promise.all([savePromise, asyncDelay(300)]).then(() => saveSettingsBtn.classList.remove("active"))
}) })
document.addEventListener("system_info_update", (e) => setDeviceInfo(e.detail))

View File

@ -843,57 +843,85 @@ function createTab(request) {
/* TOAST NOTIFICATIONS */ /* TOAST NOTIFICATIONS */
function showToast(message, duration = 5000, error = false) { function showToast(message, duration = 5000, error = false) {
const toast = document.createElement("div"); const toast = document.createElement("div")
toast.classList.add("toast-notification"); toast.classList.add("toast-notification")
if (error === true) { if (error === true) {
toast.classList.add("toast-notification-error"); toast.classList.add("toast-notification-error")
} }
toast.innerHTML = message; toast.innerHTML = message
document.body.appendChild(toast); document.body.appendChild(toast)
// Set the position of the toast on the screen // Set the position of the toast on the screen
const toastCount = document.querySelectorAll(".toast-notification").length; const toastCount = document.querySelectorAll(".toast-notification").length
const toastHeight = toast.offsetHeight; const toastHeight = toast.offsetHeight
const previousToastsHeight = Array.from(document.querySelectorAll(".toast-notification")) const previousToastsHeight = Array.from(document.querySelectorAll(".toast-notification"))
.slice(0, -1) // exclude current toast .slice(0, -1) // exclude current toast
.reduce((totalHeight, toast) => totalHeight + toast.offsetHeight + 10, 0); // add 10 pixels for spacing .reduce((totalHeight, toast) => totalHeight + toast.offsetHeight + 10, 0) // add 10 pixels for spacing
toast.style.bottom = `${10 + previousToastsHeight}px`; toast.style.bottom = `${10 + previousToastsHeight}px`
toast.style.right = "10px"; toast.style.right = "10px"
// Delay the removal of the toast until animation has completed // Delay the removal of the toast until animation has completed
const removeToast = () => { const removeToast = () => {
toast.classList.add("hide"); toast.classList.add("hide")
const removeTimeoutId = setTimeout(() => { const removeTimeoutId = setTimeout(() => {
toast.remove(); toast.remove()
// Adjust the position of remaining toasts // Adjust the position of remaining toasts
const remainingToasts = document.querySelectorAll(".toast-notification"); const remainingToasts = document.querySelectorAll(".toast-notification")
const removedToastBottom = toast.getBoundingClientRect().bottom; const removedToastBottom = toast.getBoundingClientRect().bottom
remainingToasts.forEach((toast) => { remainingToasts.forEach((toast) => {
if (toast.getBoundingClientRect().bottom < removedToastBottom) { if (toast.getBoundingClientRect().bottom < removedToastBottom) {
toast.classList.add("slide-down"); toast.classList.add("slide-down")
} }
}); })
// Wait for the slide-down animation to complete // Wait for the slide-down animation to complete
setTimeout(() => { setTimeout(() => {
// Remove the slide-down class after the animation has completed // Remove the slide-down class after the animation has completed
const slidingToasts = document.querySelectorAll(".slide-down"); const slidingToasts = document.querySelectorAll(".slide-down")
slidingToasts.forEach((toast) => { slidingToasts.forEach((toast) => {
toast.classList.remove("slide-down"); toast.classList.remove("slide-down")
}); })
// Adjust the position of remaining toasts again, in case there are multiple toasts being removed at once // Adjust the position of remaining toasts again, in case there are multiple toasts being removed at once
const remainingToastsDown = document.querySelectorAll(".toast-notification"); const remainingToastsDown = document.querySelectorAll(".toast-notification")
let heightSoFar = 0; let heightSoFar = 0
remainingToastsDown.forEach((toast) => { remainingToastsDown.forEach((toast) => {
toast.style.bottom = `${10 + heightSoFar}px`; toast.style.bottom = `${10 + heightSoFar}px`
heightSoFar += toast.offsetHeight + 10; // add 10 pixels for spacing heightSoFar += toast.offsetHeight + 10 // add 10 pixels for spacing
}); })
}, 0); // The duration of the slide-down animation (in milliseconds) }, 0) // The duration of the slide-down animation (in milliseconds)
}, 500); }, 500)
}; }
// Remove the toast after specified duration // Remove the toast after specified duration
setTimeout(removeToast, duration); setTimeout(removeToast, duration)
}
function alert(msg, title) {
title = title || ""
$.alert({
theme: "modern",
title: title,
useBootstrap: false,
animateFromElement: false,
content: msg,
})
}
function confirm(msg, title, fn) {
title = title || ""
$.confirm({
theme: "modern",
title: title,
useBootstrap: false,
animateFromElement: false,
content: msg,
buttons: {
yes: () => {
fn(e)
},
cancel: () => {},
},
})
} }