Merge remote-tracking branch 'origin/beta' into restart-needed

This commit is contained in:
Olivia Godone-Maresca 2023-07-15 13:22:41 -04:00
commit 3461bb669d
17 changed files with 272 additions and 148 deletions

View File

@ -22,6 +22,7 @@
Our focus continues to remain on an easy installation experience, and an easy user-interface. While still remaining pretty powerful, in terms of features and speed.
### Detailed changelog
* 2.5.44 - 15 Jul 2023 - (beta-only) Support for multiple LoRA files.
* 2.5.43 - 9 Jul 2023 - (beta-only) Support for loading Textual Inversion embeddings. You can find the option in the Image Settings panel. Thanks @JeLuf.
* 2.5.43 - 9 Jul 2023 - Improve the startup time of the UI.
* 2.5.42 - 4 Jul 2023 - Keyboard shortcuts for the Image Editor. Thanks @JeLuf.

View File

@ -41,6 +41,10 @@ call python --version
echo PYTHONPATH=%PYTHONPATH%
if exist "%cd%\profile" (
set HF_HOME=%cd%\profile\.cache\huggingface
)
@rem done
echo.

View File

@ -18,7 +18,7 @@ os_name = platform.system()
modules_to_check = {
"torch": ("1.11.0", "1.13.1", "2.0.0"),
"torchvision": ("0.12.0", "0.14.1", "0.15.1"),
"sdkit": "1.0.116",
"sdkit": "1.0.125",
"stable-diffusion-sdkit": "2.1.4",
"rich": "12.6.0",
"uvicorn": "0.19.0",

View File

@ -104,18 +104,21 @@ call python --version
@FOR /F "tokens=* USEBACKQ" %%F IN (`python scripts\get_config.py --default=False net listen_to_network`) DO (
if "%%F" EQU "True" (
@SET ED_BIND_IP=0.0.0.0
@FOR /F "tokens=* USEBACKQ" %%G IN (`python scripts\get_config.py --default=0.0.0.0 net bind_ip`) DO (
@SET ED_BIND_IP=%%G
)
) else (
@SET ED_BIND_IP=127.0.0.1
)
)
@cd stable-diffusion
@rem set any overrides
set HF_HUB_DISABLE_SYMLINKS_WARNING=true
@uvicorn main:server_api --app-dir "%SD_UI_PATH%" --port %ED_BIND_PORT% --host %ED_BIND_IP% --log-level error
@python -m uvicorn main:server_api --app-dir "%SD_UI_PATH%" --port %ED_BIND_PORT% --host %ED_BIND_IP% --log-level error
@pause

View File

@ -72,7 +72,7 @@ export SD_UI_PATH=`pwd`/ui
export ED_BIND_PORT="$( python scripts/get_config.py --default=9000 net listen_port )"
case "$( python scripts/get_config.py --default=False net listen_to_network )" in
"True")
export ED_BIND_IP=0.0.0.0
export ED_BIND_IP=$( python scripts/get_config.py --default=0.0.0.0 net bind_ip)
;;
"False")
export ED_BIND_IP=127.0.0.1

View File

@ -2,6 +2,7 @@ import os
import shutil
from glob import glob
import traceback
from typing import Union
from easydiffusion import app
from easydiffusion.types import TaskData
@ -93,7 +94,14 @@ def unload_all(context: Context):
del context.model_load_errors[model_type]
def resolve_model_to_use(model_name: str = None, model_type: str = None, fail_if_not_found: bool = True):
def resolve_model_to_use(model_name: Union[str, list] = None, model_type: str = None, fail_if_not_found: bool = True):
model_names = model_name if isinstance(model_name, list) else [model_name]
model_paths = [resolve_model_to_use_single(m, model_type, fail_if_not_found) for m in model_names]
return model_paths[0] if len(model_paths) == 1 else model_paths
def resolve_model_to_use_single(model_name: str = None, model_type: str = None, fail_if_not_found: bool = True):
model_extensions = MODEL_EXTENSIONS.get(model_type, [])
default_models = DEFAULT_MODELS.get(model_type, [])
config = app.getConfig()

View File

@ -473,15 +473,15 @@ def start_render_thread(device):
render_threads.append(rthread)
finally:
manager_lock.release()
# timeout = DEVICE_START_TIMEOUT
# while not rthread.is_alive() or not rthread in weak_thread_data or not "device" in weak_thread_data[rthread]:
# if rthread in weak_thread_data and "error" in weak_thread_data[rthread]:
# log.error(f"{rthread}, {device}, error: {weak_thread_data[rthread]['error']}")
# return False
# if timeout <= 0:
# return False
# timeout -= 1
# time.sleep(1)
timeout = DEVICE_START_TIMEOUT
while not rthread.is_alive() or not rthread in weak_thread_data or not "device" in weak_thread_data[rthread]:
if rthread in weak_thread_data and "error" in weak_thread_data[rthread]:
log.error(f"{rthread}, {device}, error: {weak_thread_data[rthread]['error']}")
return False
if timeout <= 0:
return False
timeout -= 1
time.sleep(1)
return True
@ -535,12 +535,12 @@ def update_render_threads(render_devices, active_devices):
if not start_render_thread(device):
log.warn(f"{device} failed to start.")
# if is_alive() <= 0: # No running devices, probably invalid user config.
# raise EnvironmentError(
# 'ERROR: No active render devices! Please verify the "render_devices" value in config.json'
# )
if is_alive() <= 0: # No running devices, probably invalid user config.
raise EnvironmentError(
'ERROR: No active render devices! Please verify the "render_devices" value in config.json'
)
# log.debug(f"active devices: {get_devices()['active']}")
log.debug(f"active devices: {get_devices()['active']}")
def shutdown_event(): # Signal render thread to close on shutdown

View File

@ -1,4 +1,4 @@
from typing import Any
from typing import Any, List, Union
from pydantic import BaseModel
@ -22,7 +22,7 @@ class GenerateImageRequest(BaseModel):
sampler_name: str = None # "ddim", "plms", "heun", "euler", "euler_a", "dpm2", "dpm2_a", "lms"
hypernetwork_strength: float = 0
lora_alpha: float = 0
lora_alpha: Union[float, List[float]] = 0
tiling: str = "none" # "none", "x", "y", "xy"
@ -32,15 +32,14 @@ class TaskData(BaseModel):
save_to_disk_path: str = None
vram_usage_level: str = "balanced" # or "low" or "medium"
use_face_correction: str = None # or "GFPGANv1.3"
use_upscale: str = None # or "RealESRGAN_x4plus" or "RealESRGAN_x4plus_anime_6B" or "latent_upscaler"
use_face_correction: Union[str, List[str]] = None # or "GFPGANv1.3"
use_upscale: Union[str, List[str]] = None
upscale_amount: int = 4 # or 2
latent_upscaler_steps: int = 10
use_stable_diffusion_model: str = "sd-v1-4"
# use_stable_diffusion_config: str = "v1-inference"
use_vae_model: str = None
use_hypernetwork_model: str = None
use_lora_model: str = None
use_stable_diffusion_model: Union[str, List[str]] = "sd-v1-4"
use_vae_model: Union[str, List[str]] = None
use_hypernetwork_model: Union[str, List[str]] = None
use_lora_model: Union[str, List[str]] = None
show_only_filtered_image: bool = False
block_nsfw: bool = False

View File

@ -1,6 +1,8 @@
import os
import re
import time
import regex
from datetime import datetime
from functools import reduce
@ -30,11 +32,12 @@ TASK_TEXT_MAPPING = {
"lora_alpha": "LoRA Strength",
"use_hypernetwork_model": "Hypernetwork model",
"hypernetwork_strength": "Hypernetwork Strength",
"use_embedding_models": "Embedding models",
"tiling": "Seamless Tiling",
"use_face_correction": "Use Face Correction",
"use_upscale": "Use Upscaling",
"upscale_amount": "Upscale By",
"latent_upscaler_steps": "Latent Upscaler Steps"
"latent_upscaler_steps": "Latent Upscaler Steps",
}
time_placeholders = {
@ -202,6 +205,9 @@ def get_printable_request(req: GenerateImageRequest, task_data: TaskData):
req_metadata = req.dict()
task_data_metadata = task_data.dict()
app_config = app.getConfig()
using_diffusers = app_config.get("test_diffusers", False)
# Save the metadata in the order defined in TASK_TEXT_MAPPING
metadata = {}
for key in TASK_TEXT_MAPPING.keys():
@ -209,6 +215,24 @@ def get_printable_request(req: GenerateImageRequest, task_data: TaskData):
metadata[key] = req_metadata[key]
elif key in task_data_metadata:
metadata[key] = task_data_metadata[key]
elif key is "use_embedding_models" and using_diffusers:
embeddings_extensions = {".pt", ".bin", ".safetensors"}
def scan_directory(directory_path: str):
used_embeddings = []
for entry in os.scandir(directory_path):
if entry.is_file():
entry_extension = os.path.splitext(entry.name)[1]
if entry_extension not in embeddings_extensions:
continue
embedding_name_regex = regex.compile(r"(^|[\s,])" + regex.escape(os.path.splitext(entry.name)[0]) + r"([+-]*$|[\s,]|[+-]+[\s,])")
if embedding_name_regex.search(req.prompt) or embedding_name_regex.search(req.negative_prompt):
used_embeddings.append(entry.path)
elif entry.is_dir():
used_embeddings.extend(scan_directory(entry.path))
return used_embeddings
used_embeddings = scan_directory(os.path.join(app.MODELS_DIR, "embeddings"))
metadata["use_embedding_models"] = ", ".join(used_embeddings) if len(used_embeddings) > 0 else None
# Clean up the metadata
if req.init_image is None and "prompt_strength" in metadata:
@ -222,8 +246,7 @@ def get_printable_request(req: GenerateImageRequest, task_data: TaskData):
if task_data.use_upscale != "latent_upscaler" and "latent_upscaler_steps" in metadata:
del metadata["latent_upscaler_steps"]
app_config = app.getConfig()
if not app_config.get("test_diffusers", False):
if not using_diffusers:
for key in (x for x in ["use_lora_model", "lora_alpha", "clip_skip", "tiling", "latent_upscaler_steps"] if x in metadata):
del metadata[key]

View File

@ -31,7 +31,7 @@
<h1>
<img id="logo_img" src="/media/images/icon-512x512.png" >
Easy Diffusion
<small><span id="version">v2.5.43</span> <span id="updateBranchLabel"></span></small>
<small><span id="version">v2.5.44</span> <span id="updateBranchLabel"></span></small>
</h1>
</div>
<div id="server-status">
@ -162,9 +162,10 @@
<option value="dpm2_a">DPM2 Ancestral</option>
<option value="lms">LMS</option>
<option value="dpm_solver_stability">DPM Solver (Stability AI)</option>
<option value="dpmpp_2s_a" class="k_diffusion-only">DPM++ 2s Ancestral (Karras)</option>
<option value="dpmpp_2s_a">DPM++ 2s Ancestral (Karras)</option>
<option value="dpmpp_2m">DPM++ 2m (Karras)</option>
<option value="dpmpp_sde" class="k_diffusion-only">DPM++ SDE (Karras)</option>
<option value="dpmpp_2m_sde" class="diffusers-only">DPM++ 2m SDE (Karras)</option>
<option value="dpmpp_sde">DPM++ SDE (Karras)</option>
<option value="dpm_fast" class="k_diffusion-only">DPM Fast (Karras)</option>
<option value="dpm_adaptive" class="k_diffusion-only">DPM Adaptive (Karras)</option>
<option value="ddpm" class="diffusers-only">DDPM</option>
@ -224,21 +225,14 @@
<label for="height"><small>(height)</small></label>
<div id="small_image_warning" class="displayNone">Small image sizes can cause bad image quality</div>
</td></tr>
<tr class="pl-5"><td><label for="num_inference_steps">Inference Steps:</label></td><td> <input id="num_inference_steps" name="num_inference_steps" size="4" value="25" onkeypress="preventNonNumericalInput(event)"></td></tr>
<tr class="pl-5"><td><label for="num_inference_steps">Inference Steps:</label></td><td> <input id="num_inference_steps" name="num_inference_steps" type="number" min="1" step="1" style="width: 42pt" value="25" onkeypress="preventNonNumericalInput(event)"></td></tr>
<tr class="pl-5"><td><label for="guidance_scale_slider">Guidance Scale:</label></td><td> <input id="guidance_scale_slider" name="guidance_scale_slider" class="editor-slider" value="75" type="range" min="11" max="500"> <input id="guidance_scale" name="guidance_scale" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"></td></tr>
<tr id="prompt_strength_container" class="pl-5"><td><label for="prompt_strength_slider">Prompt Strength:</label></td><td> <input id="prompt_strength_slider" name="prompt_strength_slider" class="editor-slider" value="80" type="range" min="0" max="99"> <input id="prompt_strength" name="prompt_strength" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"><br/></td></tr>
<tr id="lora_model_container" class="pl-5">
<td><label for="lora_model">LoRA:</label></td>
<td class="diffusers-restart-needed">
<input id="lora_model" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
</td>
</tr>
<tr id="lora_alpha_container" class="pl-5">
<td><label for="lora_alpha_slider">LoRA Strength:</label></td>
<td class="diffusers-restart-needed">
<small>-2</small> <input id="lora_alpha_slider" name="lora_alpha_slider" class="editor-slider" value="50" type="range" min="-200" max="200"> <small>2</small> &nbsp;
<input id="lora_alpha" name="lora_alpha" size="4" pattern="^-?[0-9]*\.?[0-9]*$" onkeypress="preventNonNumericalInput(event)"><br/>
<td>
<label for="lora_model">LoRA:</label>
</td>
<td class="model_entries diffusers-restart-needed"></td>
</tr>
<tr class="pl-5"><td><label for="hypernetwork_model">Hypernetwork:</label></td><td>
<input id="hypernetwork_model" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />

View File

@ -1,12 +1,12 @@
from easydiffusion import model_manager, app, server
from easydiffusion.server import server_api # required for uvicorn
server.init()
# Init the app
model_manager.init()
app.init()
server.init()
app.init_render_threads()
# start the browser ui
app.open_browser()
app.init_render_threads()

View File

@ -5,6 +5,8 @@
html {
position: relative;
overscroll-behavior-y: none;
color-scheme: dark !important;
}
body {
@ -1677,6 +1679,10 @@ body.wait-pause {
background: var(--background-color3);
}
.model_entry .model_name {
width: 70%;
}
.diffusers-disabled-on-startup .diffusers-restart-needed {
font-size: 0;
}

View File

@ -16,7 +16,9 @@ const SETTINGS_IDS_LIST = [
"clip_skip",
"vae_model",
"hypernetwork_model",
"lora_model",
"lora_model_0",
"lora_model_1",
"lora_model_2",
"sampler_name",
"width",
"height",
@ -24,7 +26,9 @@ const SETTINGS_IDS_LIST = [
"guidance_scale",
"prompt_strength",
"hypernetwork_strength",
"lora_alpha",
"lora_alpha_0",
"lora_alpha_1",
"lora_alpha_2",
"tiling",
"output_format",
"output_quality",
@ -176,13 +180,14 @@ function loadSettings() {
// So this is likely the first time Easy Diffusion is running.
// Initialize vram_usage_level based on the available VRAM
function initGPUProfile(event) {
if ( "detail" in event
&& "active" in event.detail
&& "cuda:0" in event.detail.active
&& event.detail.active["cuda:0"].mem_total <4.5 )
{
vramUsageLevelField.value = "low"
vramUsageLevelField.dispatchEvent(new Event("change"))
if (
"detail" in event &&
"active" in event.detail &&
"cuda:0" in event.detail.active &&
event.detail.active["cuda:0"].mem_total < 4.5
) {
vramUsageLevelField.value = "low"
vramUsageLevelField.dispatchEvent(new Event("change"))
}
document.removeEventListener("system_info_update", initGPUProfile)
}

View File

@ -292,29 +292,58 @@ const TASK_MAPPING = {
use_lora_model: {
name: "LoRA model",
setUI: (use_lora_model) => {
const oldVal = loraModelField.value
use_lora_model =
use_lora_model === undefined || use_lora_model === null || use_lora_model === "None"
? ""
: use_lora_model
use_lora_model.forEach((model_name, i) => {
let field = loraModels[i][0]
const oldVal = field.value
if (use_lora_model !== "") {
use_lora_model = getModelPath(use_lora_model, [".ckpt", ".safetensors"])
use_lora_model = use_lora_model !== "" ? use_lora_model : oldVal
if (model_name !== "") {
model_name = getModelPath(model_name, [".ckpt", ".safetensors"])
model_name = model_name !== "" ? model_name : oldVal
}
field.value = model_name
})
// clear the remaining entries
for (let i = use_lora_model.length; i < loraModels.length; i++) {
loraModels[i][0].value = ""
}
loraModelField.value = use_lora_model
},
readUI: () => loraModelField.value,
parse: (val) => val,
readUI: () => {
let values = loraModels.map((e) => e[0].value)
values = values.filter((e) => e.trim() !== "")
values = values.length > 0 ? values : "None"
return values
},
parse: (val) => {
val = !val || val === "None" ? "" : val
val = Array.isArray(val) ? val : [val]
return val
},
},
lora_alpha: {
name: "LoRA Strength",
setUI: (lora_alpha) => {
loraAlphaField.value = lora_alpha
updateLoraAlphaSlider()
lora_alpha.forEach((model_strength, i) => {
let field = loraModels[i][1]
field.value = model_strength
})
// clear the remaining entries
for (let i = lora_alpha.length; i < loraModels.length; i++) {
loraModels[i][1].value = 0
}
},
readUI: () => {
let models = loraModels.filter((e) => e[0].value.trim() !== "")
let values = models.map((e) => e[1].value)
values = values.length > 0 ? values : 0
return values
},
parse: (val) => {
val = Array.isArray(val) ? val : [val]
val = val.map((e) => parseFloat(e))
return val
},
readUI: () => parseFloat(loraAlphaField.value),
parse: (val) => parseFloat(val),
},
use_hypernetwork_model: {
name: "Hypernetwork model",
@ -426,8 +455,11 @@ function restoreTaskToUI(task, fieldsToSkip) {
}
if (!("use_lora_model" in task.reqBody)) {
loraModelField.value = ""
loraModelField.dispatchEvent(new Event("change"))
loraModels.forEach((e) => {
e[0].value = ""
e[1].value = 0
e[0].dispatchEvent(new Event("change"))
})
}
// restore the original prompt if provided (e.g. use settings), fallback to prompt as needed (e.g. copy/paste or d&d)

View File

@ -103,9 +103,6 @@ let vaeModelField = new ModelDropdown(document.querySelector("#vae_model"), "vae
let hypernetworkModelField = new ModelDropdown(document.querySelector("#hypernetwork_model"), "hypernetwork", "None")
let hypernetworkStrengthSlider = document.querySelector("#hypernetwork_strength_slider")
let hypernetworkStrengthField = document.querySelector("#hypernetwork_strength")
let loraModelField = new ModelDropdown(document.querySelector("#lora_model"), "lora", "None")
let loraAlphaSlider = document.querySelector("#lora_alpha_slider")
let loraAlphaField = document.querySelector("#lora_alpha")
let outputFormatField = document.querySelector("#output_format")
let outputLosslessField = document.querySelector("#output_lossless")
let outputLosslessContainer = document.querySelector("#output_lossless_container")
@ -159,6 +156,8 @@ let undoButton = document.querySelector("#undo")
let undoBuffer = []
const UNDO_LIMIT = 20
let loraModels = []
imagePreview.addEventListener("drop", function(ev) {
const data = ev.dataTransfer?.getData("text/plain")
if (!data) {
@ -1292,13 +1291,31 @@ function getCurrentUserRequest() {
newTask.reqBody.use_hypernetwork_model = hypernetworkModelField.value
newTask.reqBody.hypernetwork_strength = parseFloat(hypernetworkStrengthField.value)
}
if (testDiffusers.checked && loraModelField.value) {
newTask.reqBody.use_lora_model = loraModelField.value
newTask.reqBody.lora_alpha = parseFloat(loraAlphaField.value)
if (testDiffusers.checked) {
let [modelNames, modelStrengths] = getModelInfo(loraModels)
if (modelNames.length > 0) {
modelNames = modelNames.length == 1 ? modelNames[0] : modelNames
modelStrengths = modelStrengths.length == 1 ? modelStrengths[0] : modelStrengths
newTask.reqBody.use_lora_model = modelNames
newTask.reqBody.lora_alpha = modelStrengths
}
}
return newTask
}
function getModelInfo(models) {
let modelInfo = models.map((e) => [e[0].value, e[1].value])
modelInfo = modelInfo.filter((e) => e[0].trim() !== "")
modelInfo = modelInfo.map((e) => [e[0], parseFloat(e[1])])
let modelNames = modelInfo.map((e) => e[0])
let modelStrengths = modelInfo.map((e) => e[1])
return [modelNames, modelStrengths]
}
function getPrompts(prompts) {
if (typeof prompts === "undefined") {
prompts = promptField.value
@ -1346,7 +1363,8 @@ function getPromptsNumber(prompts) {
let promptsToMake = []
let numberOfPrompts = 0
if (prompts.trim() !== "") { // this needs to stay sort of the same, as the prompts have to be passed through to the other functions
if (prompts.trim() !== "") {
// this needs to stay sort of the same, as the prompts have to be passed through to the other functions
prompts = prompts.split("\n")
prompts = prompts.map((prompt) => prompt.trim())
prompts = prompts.filter((prompt) => prompt !== "")
@ -1354,7 +1372,11 @@ function getPromptsNumber(prompts) {
// estimate number of prompts
let estimatedNumberOfPrompts = 0
prompts.forEach((prompt) => {
estimatedNumberOfPrompts += (prompt.match(/{[^}]*}/g) || []).map((e) => (e.match(/,/g) || []).length + 1).reduce( (p,a) => p*a, 1) * (2**(prompt.match(/\|/g) || []).length)
estimatedNumberOfPrompts +=
(prompt.match(/{[^}]*}/g) || [])
.map((e) => (e.match(/,/g) || []).length + 1)
.reduce((p, a) => p * a, 1) *
2 ** (prompt.match(/\|/g) || []).length
})
if (estimatedNumberOfPrompts >= 10000) {
@ -1394,7 +1416,8 @@ function applySetOperator(prompts) {
return promptsToMake
}
function applyPermuteOperator(prompts) { // prompts is array of input, trimmed, filtered and split by \n
function applyPermuteOperator(prompts) {
// prompts is array of input, trimmed, filtered and split by \n
let promptsToMake = []
prompts.forEach((prompt) => {
let promptMatrix = prompt.split("|")
@ -1414,7 +1437,8 @@ function applyPermuteOperator(prompts) { // prompts is array of input, trimmed,
}
// returns how many prompts would have to be made with the given prompts
function applyPermuteOperatorNumber(prompts) { // prompts is array of input, trimmed, filtered and split by \n
function applyPermuteOperatorNumber(prompts) {
// prompts is array of input, trimmed, filtered and split by \n
let numberOfPrompts = 0
prompts.forEach((prompt) => {
let promptCounter = 1
@ -1510,8 +1534,12 @@ clearAllPreviewsBtn.addEventListener("click", (e) => {
})
/* Download images popup */
showDownloadDialogBtn.addEventListener("click", (e) => { saveAllImagesDialog.showModal() })
saveAllImagesCloseBtn.addEventListener("click", (e) => { saveAllImagesDialog.close() })
showDownloadDialogBtn.addEventListener("click", (e) => {
saveAllImagesDialog.showModal()
})
saveAllImagesCloseBtn.addEventListener("click", (e) => {
saveAllImagesDialog.close()
})
modalDialogCloseOnBackdropClick(saveAllImagesDialog)
makeDialogDraggable(saveAllImagesDialog)
@ -1629,15 +1657,11 @@ function renameMakeImageButton() {
imageLabel = totalImages + " Images"
}
if (SD.activeTasks.size == 0) {
if (totalImages >= 10000)
makeImageBtn.innerText = "Make 10000+ images"
else
makeImageBtn.innerText = "Make " + imageLabel
if (totalImages >= 10000) makeImageBtn.innerText = "Make 10000+ images"
else makeImageBtn.innerText = "Make " + imageLabel
} else {
if (totalImages >= 10000)
makeImageBtn.innerText = "Enqueue 10000+ images"
else
makeImageBtn.innerText = "Enqueue Next " + imageLabel
if (totalImages >= 10000) makeImageBtn.innerText = "Enqueue 10000+ images"
else makeImageBtn.innerText = "Enqueue Next " + imageLabel
}
}
numOutputsTotalField.addEventListener("change", renameMakeImageButton)
@ -1829,36 +1853,6 @@ function updateHypernetworkStrengthContainer() {
hypernetworkModelField.addEventListener("change", updateHypernetworkStrengthContainer)
updateHypernetworkStrengthContainer()
/********************* LoRA alpha **********************/
function updateLoraAlpha() {
loraAlphaField.value = loraAlphaSlider.value / 100
loraAlphaField.dispatchEvent(new Event("change"))
}
function updateLoraAlphaSlider() {
if (loraAlphaField.value < -2) {
loraAlphaField.value = -2
} else if (loraAlphaField.value > 2) {
loraAlphaField.value = 2
}
loraAlphaSlider.value = loraAlphaField.value * 100
loraAlphaSlider.dispatchEvent(new Event("change"))
}
loraAlphaSlider.addEventListener("input", updateLoraAlpha)
loraAlphaField.addEventListener("input", updateLoraAlphaSlider)
updateLoraAlpha()
function updateLoraAlphaContainer() {
const loraModelContainer = document.querySelector("#lora_model_container")
if (loraModelContainer && window.getComputedStyle(loraModelContainer).display !== "none") {
document.querySelector("#lora_alpha_container").style.display = loraModelField.value === "" ? "none" : ""
}
}
loraModelField.addEventListener("change", updateLoraAlphaContainer)
updateLoraAlphaContainer()
/********************* JPEG/WEBP Quality **********************/
function updateOutputQuality() {
outputQualityField.value = 0 | outputQualitySlider.value
@ -2076,9 +2070,8 @@ function resumeClient() {
})
}
function splashScreen(force = false) {
const splashVersion = splashScreenPopup.dataset['version']
const splashVersion = splashScreenPopup.dataset["version"]
const lastSplash = localStorage.getItem("lastSplashScreenVersion") || 0
if (testDiffusers.checked) {
if (force || lastSplash < splashVersion) {
@ -2088,8 +2081,9 @@ function splashScreen(force = false) {
}
}
document.getElementById("logo_img").addEventListener("click", (e) => { splashScreen(true) })
document.getElementById("logo_img").addEventListener("click", (e) => {
splashScreen(true)
})
promptField.addEventListener("input", debounce(renameMakeImageButton, 1000))
@ -2142,21 +2136,21 @@ document.getElementById("toggle-cloudflare-tunnel").addEventListener("click", as
/* Embeddings */
function updateEmbeddingsList(filter="") {
function html(model, prefix="", filter="") {
function updateEmbeddingsList(filter = "") {
function html(model, prefix = "", filter = "") {
filter = filter.toLowerCase()
let toplevel=""
let folders=""
let toplevel = ""
let folders = ""
model?.forEach( m => {
if (typeof(m) == "string") {
if (m.toLowerCase().search(filter)!=-1) {
model?.forEach((m) => {
if (typeof m == "string") {
if (m.toLowerCase().search(filter) != -1) {
toplevel += `<button data-embedding="${m}">${m}</button> `
}
} else {
let subdir = html(m[1], prefix+m[0]+"/", filter)
let subdir = html(m[1], prefix + m[0] + "/", filter)
if (subdir != "") {
folders += `<h4>${prefix}${m[0]}</h4>` + subdir
folders += `<h4>${prefix}${m[0]}</h4>` + subdir
}
}
})
@ -2174,7 +2168,7 @@ function updateEmbeddingsList(filter="") {
insertAtCursor(promptField, text)
}
} else {
let pad=""
let pad = ""
if (e.shiftKey) {
if (!negativePromptField.value.endsWith(" ")) {
pad = " "
@ -2189,13 +2183,25 @@ function updateEmbeddingsList(filter="") {
}
}
embeddingsList.innerHTML = html(modelsOptions.embeddings, "", filter)
embeddingsList.querySelectorAll("button").forEach( (b) => { b.addEventListener("click", onButtonClick)})
// Remove after fixing https://github.com/huggingface/diffusers/issues/3922
let warning = ""
if (vramUsageLevelField.value == "low") {
warning = `
<div style="border-color: var(--accent-color); border-width: 4px; border-radius: 1em; border-style: solid; background: black; text-align: center; padding: 1em; margin: 1em; ">
<i class="fa fa-fire" style="color:#f7630c;"></i> Warning: Your GPU memory profile is set to "Low". Embeddings currently only work in "Balanced" mode!
</div>`
}
// END of remove block
embeddingsList.innerHTML = warning + html(modelsOptions.embeddings, "", filter)
embeddingsList.querySelectorAll("button").forEach((b) => {
b.addEventListener("click", onButtonClick)
})
}
embeddingsButton.addEventListener("click", () => {
updateEmbeddingsList()
embeddingsSearchBox.value=""
embeddingsSearchBox.value = ""
embeddingsDialog.showModal()
})
embeddingsDialogCloseBtn.addEventListener("click", (e) => {
@ -2208,7 +2214,6 @@ embeddingsSearchBox.addEventListener("input", (e) => {
modalDialogCloseOnBackdropClick(embeddingsDialog)
makeDialogDraggable(embeddingsDialog)
if (testDiffusers.checked) {
document.getElementById("embeddings-container").classList.remove("displayNone")
}
@ -2235,3 +2240,43 @@ prettifyInputs(document)
// set the textbox as focused on start
promptField.focus()
promptField.selectionStart = promptField.value.length
// multi-models
function addModelEntry(i, modelContainer, modelsList, modelType, defaultValue, strengthStep) {
let nameId = modelType + "_model_" + i
let strengthId = modelType + "_alpha_" + i
const modelEntry = document.createElement("div")
modelEntry.className = "model_entry"
modelEntry.innerHTML = `
<input id="${nameId}" class="model_name" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
<input id="${strengthId}" class="model_strength" type="number" step="${strengthStep}" style="width: 50pt" value="${defaultValue}" pattern="^-?[0-9]*\.?[0-9]*$" onkeypress="preventNonNumericalInput(event)"><br/>
`
let modelName = new ModelDropdown(modelEntry.querySelector(".model_name"), modelType, "None")
let modelStrength = modelEntry.querySelector(".model_strength")
modelContainer.appendChild(modelEntry)
modelsList.push([modelName, modelStrength])
}
function createLoRAEntries() {
let container = document.querySelector("#lora_model_container .model_entries")
for (let i = 0; i < 3; i++) {
addModelEntry(i, container, loraModels, "lora", 0.5, 0.02)
}
}
createLoRAEntries()
// chrome-like spinners only on hover
function showSpinnerOnlyOnHover(e) {
e.addEventListener("mouseenter", () => {
e.setAttribute("type", "number")
})
e.addEventListener("mouseleave", () => {
e.removeAttribute("type")
})
e.removeAttribute("type")
}
document.querySelectorAll("input[type=number]").forEach(showSpinnerOnlyOnHover)

View File

@ -436,7 +436,6 @@ async function getAppConfig() {
if (!testDiffusersEnabled) {
document.querySelector("#lora_model_container").style.display = "none"
document.querySelector("#lora_alpha_container").style.display = "none"
document.querySelector("#tiling_container").style.display = "none"
document.querySelectorAll("#sampler_name option.diffusers-only").forEach((option) => {
@ -444,7 +443,6 @@ async function getAppConfig() {
})
} else {
document.querySelector("#lora_model_container").style.display = ""
document.querySelector("#lora_alpha_container").style.display = loraModelField.value ? "" : "none"
document.querySelector("#tiling_container").style.display = ""
document.querySelectorAll("#sampler_name option.k_diffusion-only").forEach((option) => {

View File

@ -1074,6 +1074,12 @@ async function deleteKeys(keyToDelete) {
function modalDialogCloseOnBackdropClick(dialog) {
dialog.addEventListener('mousedown', function (event) {
// Firefox creates an event with clientX|Y = 0|0 when choosing an <option>.
// Test whether the element interacted with is a child of the dialog, but not the
// dialog itself (the backdrop would be a part of the dialog)
if (dialog.contains(event.target) && dialog != event.target) {
return
}
var rect = dialog.getBoundingClientRect()
var isInDialog=(rect.top <= event.clientY && event.clientY <= rect.top + rect.height
&& rect.left <= event.clientX && event.clientX <= rect.left + rect.width)