Merge pull request #1119 from cmdr2/beta

Beta
This commit is contained in:
cmdr2 2023-04-04 16:16:25 +05:30 committed by GitHub
commit 730424ebc4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
23 changed files with 450 additions and 162 deletions

View File

@ -21,6 +21,16 @@
Our focus continues to remain on an easy installation experience, and an easy user-interface. While still remaining pretty powerful, in terms of features and speed.
### Detailed changelog
* 2.5.30 - 1 Apr 2023 - Slider to control the strength of the LoRA model.
* 2.5.30 - 28 Mar 2023 - Refactor task entry config to use a generating method. Added ability for plugins to easily add to this. Removed confusing sentence from `contributing.md`
* 2.5.30 - 28 Mar 2023 - Allow the user to undo the deletion of tasks or images, instead of showing a pop-up each time. The new `Undo` button will be present at the top of the UI. Thanks @JeLuf.
* 2.5.30 - 28 Mar 2023 - Support saving lossless WEBP images. Thanks @ogmaresca.
* 2.5.30 - 28 Mar 2023 - Lots of bug fixes for the UI (Read LoRA flag in metadata files, new prompt weight format with scrollwheel, fix overflow with lots of tabs, clear button in image editor, shorter filenames in download). Thanks @patriceac, @JeLuf and @ogmaresca.
* 2.5.29 - 27 Mar 2023 - Fix a bug where some non-square images would fail while inpainting with a `The size of tensor a must match size of tensor b` error.
* 2.5.29 - 27 Mar 2023 - Fix the `incorrect number of channels` error, when given a PNG image with an alpha channel in `Test Diffusers`.
* 2.5.29 - 27 Mar 2023 - Fix broken inpainting in `Test Diffusers` (beta).
* 2.5.28 - 24 Mar 2023 - Support for weighted prompts and long prompt lengths (not limited to 77 tokens). This change requires enabling the `Test Diffusers` setting in beta (in the Settings tab), and restarting the program.
* 2.5.27 - 21 Mar 2023 - LoRA support, accessible by enabling the `Test Diffusers` setting (in the Settings tab in the UI). This change switches the internal engine to diffusers (if the `Test Diffusers` setting is enabled). If the `Test Diffusers` flag is disabled, it'll have no impact for the user.
* 2.5.26 - 15 Mar 2023 - Allow styling the buttons displayed on an image. Update the API to allow multiple buttons and text labels in a single row. Thanks @ogmaresca.
* 2.5.26 - 15 Mar 2023 - View images in full-screen, by either clicking on the image, or clicking the "Full screen" icon next to the Seed number on the image. Thanks @ogmaresca for the internal API.
* 2.5.25 - 14 Mar 2023 - Button to download all the images, and all the metadata as a zip file. This is available at the top of the UI, as well as on each image. Thanks @JeLuf.

View File

@ -42,8 +42,6 @@ or for Windows
10) Congrats, now any changes you make in your repo `ui` folder are linked to this running archive of the app and can be previewed in the browser.
11) Please update CHANGES.md in your pull requests.
Check the `ui/frontend/build/README.md` for instructions on running and building the React code.
## Development environment for Installer changes
Build the Windows installer using Windows, and the Linux installer using Linux. Don't mix the two, and don't use WSL. An Ubuntu VM is fine for building the Linux installer on a Windows host.

View File

@ -95,7 +95,7 @@ if "%ERRORLEVEL%" EQU "0" (
set PYTHONNOUSERSITE=1
set PYTHONPATH=%INSTALL_ENV_DIR%\lib\site-packages
call python -m pip install --upgrade sdkit==1.0.48 -q || (
call python -m pip install --upgrade sdkit==1.0.63 -q || (
echo "Error updating sdkit"
)
)
@ -106,7 +106,7 @@ if "%ERRORLEVEL%" EQU "0" (
set PYTHONNOUSERSITE=1
set PYTHONPATH=%INSTALL_ENV_DIR%\lib\site-packages
call python -m pip install sdkit==1.0.48 || (
call python -m pip install sdkit==1.0.63 || (
echo "Error installing sdkit. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!"
pause
exit /b

View File

@ -103,7 +103,7 @@ if python ../scripts/check_modules.py sdkit sdkit.models ldm transformers numpy
export PYTHONNOUSERSITE=1
export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages"
python -m pip install --upgrade sdkit==1.0.48 -q
python -m pip install --upgrade sdkit==1.0.63 -q
fi
else
echo "Installing sdkit: https://pypi.org/project/sdkit/"
@ -111,7 +111,7 @@ else
export PYTHONNOUSERSITE=1
export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages"
if python -m pip install sdkit==1.0.48 ; then
if python -m pip install sdkit==1.0.63 ; then
echo "Installed."
else
fail "sdkit install failed"

View File

@ -7,13 +7,14 @@ from easydiffusion.utils import log
from sdkit import Context
from sdkit.models import load_model, unload_model, scan_model
KNOWN_MODEL_TYPES = ["stable-diffusion", "vae", "hypernetwork", "gfpgan", "realesrgan"]
KNOWN_MODEL_TYPES = ["stable-diffusion", "vae", "hypernetwork", "gfpgan", "realesrgan", "lora"]
MODEL_EXTENSIONS = {
"stable-diffusion": [".ckpt", ".safetensors"],
"vae": [".vae.pt", ".ckpt", ".safetensors"],
"hypernetwork": [".pt", ".safetensors"],
"gfpgan": [".pth"],
"realesrgan": [".pth"],
"lora": [".ckpt", ".safetensors"],
}
DEFAULT_MODELS = {
"stable-diffusion": [ # needed to support the legacy installations
@ -23,7 +24,7 @@ DEFAULT_MODELS = {
"gfpgan": ["GFPGANv1.3"],
"realesrgan": ["RealESRGAN_x4plus"],
}
MODELS_TO_LOAD_ON_START = ["stable-diffusion", "vae", "hypernetwork"]
MODELS_TO_LOAD_ON_START = ["stable-diffusion", "vae", "hypernetwork", "lora"]
known_models = {}
@ -102,6 +103,7 @@ def reload_models_if_necessary(context: Context, task_data: TaskData):
"gfpgan": task_data.use_face_correction,
"realesrgan": task_data.use_upscale,
"nsfw_checker": True if task_data.block_nsfw else None,
"lora": task_data.use_lora_model,
}
models_to_reload = {
model_type: path
@ -125,6 +127,7 @@ def resolve_model_paths(task_data: TaskData):
)
task_data.use_vae_model = resolve_model_to_use(task_data.use_vae_model, model_type="vae")
task_data.use_hypernetwork_model = resolve_model_to_use(task_data.use_hypernetwork_model, model_type="hypernetwork")
task_data.use_lora_model = resolve_model_to_use(task_data.use_lora_model, model_type="lora")
if task_data.use_face_correction:
task_data.use_face_correction = resolve_model_to_use(task_data.use_face_correction, "gfpgan")
@ -184,11 +187,13 @@ def getModels():
"stable-diffusion": "sd-v1-4",
"vae": "",
"hypernetwork": "",
"lora": "",
},
"options": {
"stable-diffusion": ["sd-v1-4"],
"vae": [],
"hypernetwork": [],
"lora": [],
},
}
@ -243,6 +248,7 @@ def getModels():
listModels(model_type="vae")
listModels(model_type="hypernetwork")
listModels(model_type="gfpgan")
listModels(model_type="lora")
if models_scanned > 0:
log.info(f"[green]Scanned {models_scanned} models. Nothing infected[/]")

View File

@ -10,7 +10,7 @@ from easydiffusion.utils import get_printable_request, save_images_to_disk, log
from sdkit import Context
from sdkit.generate import generate_images
from sdkit.filter import apply_filters
from sdkit.utils import img_to_buffer, img_to_base64_str, latent_samples_to_images, gc
from sdkit.utils import img_to_buffer, img_to_base64_str, latent_samples_to_images, diffusers_latent_samples_to_images
context = Context() # thread-local
"""
@ -26,6 +26,13 @@ def init(device):
context.temp_images = {}
context.partial_x_samples = None
from easydiffusion import app
app_config = app.getConfig()
context.test_diffusers = (
app_config.get("test_diffusers", False) and app_config.get("update_branch", "main") != "main"
)
device_manager.device_init(context, device)
@ -57,7 +64,13 @@ def make_images_internal(
):
images, user_stopped = generate_images_internal(
req, task_data, data_queue, task_temp_images, step_callback, task_data.stream_image_progress, task_data.stream_image_progress_interval
req,
task_data,
data_queue,
task_temp_images,
step_callback,
task_data.stream_image_progress,
task_data.stream_image_progress_interval,
)
filtered_images = filter_images(task_data, images, user_stopped)
@ -82,10 +95,18 @@ def generate_images_internal(
):
context.temp_images.clear()
callback = make_step_callback(req, task_data, data_queue, task_temp_images, step_callback, stream_image_progress, stream_image_progress_interval)
callback = make_step_callback(
req,
task_data,
data_queue,
task_temp_images,
step_callback,
stream_image_progress,
stream_image_progress_interval,
)
try:
if req.init_image is not None:
if req.init_image is not None and not context.test_diffusers:
req.sampler_name = "ddim"
images = generate_images(context, callback=callback, **req.dict())
@ -94,10 +115,14 @@ def generate_images_internal(
images = []
user_stopped = True
if context.partial_x_samples is not None:
images = latent_samples_to_images(context, context.partial_x_samples)
if context.test_diffusers:
images = diffusers_latent_samples_to_images(context, context.partial_x_samples)
else:
images = latent_samples_to_images(context, context.partial_x_samples)
finally:
if hasattr(context, "partial_x_samples") and context.partial_x_samples is not None:
del context.partial_x_samples
if not context.test_diffusers:
del context.partial_x_samples
context.partial_x_samples = None
return images, user_stopped
@ -124,7 +149,7 @@ def filter_images(task_data: TaskData, images: list, user_stopped):
def construct_response(images: list, seeds: list, task_data: TaskData, base_seed: int):
return [
ResponseImage(
data=img_to_base64_str(img, task_data.output_format, task_data.output_quality),
data=img_to_base64_str(img, task_data.output_format, task_data.output_quality, task_data.output_lossless),
seed=seed,
)
for img, seed in zip(images, seeds)
@ -145,7 +170,12 @@ def make_step_callback(
def update_temp_img(x_samples, task_temp_images: list):
partial_images = []
images = latent_samples_to_images(context, x_samples)
if context.test_diffusers:
images = diffusers_latent_samples_to_images(context, x_samples)
else:
images = latent_samples_to_images(context, x_samples)
if task_data.block_nsfw:
images = apply_filters(context, "nsfw_checker", images)
@ -158,17 +188,21 @@ def make_step_callback(
del images
return partial_images
def on_image_step(x_samples, i):
def on_image_step(x_samples, i, *args):
nonlocal last_callback_time
context.partial_x_samples = x_samples
if context.test_diffusers:
context.partial_x_samples = (x_samples, args[0])
else:
context.partial_x_samples = x_samples
step_time = time.time() - last_callback_time if last_callback_time != -1 else -1
last_callback_time = time.time()
progress = {"step": i, "step_time": step_time, "total_steps": n_steps}
if stream_image_progress and stream_image_progress_interval > 0 and i % stream_image_progress_interval == 0:
progress["output"] = update_temp_img(x_samples, task_temp_images)
progress["output"] = update_temp_img(context.partial_x_samples, task_temp_images)
data_queue.put(json.dumps(progress))

View File

@ -29,10 +29,10 @@ NOCACHE_HEADERS = {"Cache-Control": "no-cache, no-store, must-revalidate", "Prag
class NoCacheStaticFiles(StaticFiles):
def __init__(self, directory: str):
# follow_symlink is only available on fastapi >= 0.92.0
if (os.path.islink(directory)):
super().__init__(directory = os.path.realpath(directory))
if os.path.islink(directory):
super().__init__(directory=os.path.realpath(directory))
else:
super().__init__(directory = directory)
super().__init__(directory=directory)
def is_not_modified(self, response_headers, request_headers) -> bool:
if "content-type" in response_headers and (
@ -51,11 +51,12 @@ class SetAppConfigRequest(BaseModel):
ui_open_browser_on_start: bool = None
listen_to_network: bool = None
listen_port: int = None
test_diffusers: bool = False
def init():
mimetypes.init()
mimetypes.add_type('text/css', '.css')
mimetypes.add_type("text/css", ".css")
if os.path.isdir(app.CUSTOM_MODIFIERS_DIR):
server_api.mount(
@ -132,6 +133,9 @@ def set_app_config_internal(req: SetAppConfigRequest):
if "net" not in config:
config["net"] = {}
config["net"]["listen_port"] = int(req.listen_port)
config["test_diffusers"] = req.test_diffusers
try:
app.setConfig(config)

View File

@ -21,6 +21,7 @@ class GenerateImageRequest(BaseModel):
sampler_name: str = None # "ddim", "plms", "heun", "euler", "euler_a", "dpm2", "dpm2_a", "lms"
hypernetwork_strength: float = 0
lora_alpha: float = 0
class TaskData(BaseModel):
@ -36,11 +37,13 @@ class TaskData(BaseModel):
# use_stable_diffusion_config: str = "v1-inference"
use_vae_model: str = None
use_hypernetwork_model: str = None
use_lora_model: str = None
show_only_filtered_image: bool = False
block_nsfw: bool = False
output_format: str = "jpeg" # or "png" or "webp"
output_quality: int = 75
output_lossless: bool = False
metadata_output_format: str = "txt" # or "json"
stream_image_progress: bool = False
stream_image_progress_interval: int = 5

View File

@ -27,6 +27,8 @@ TASK_TEXT_MAPPING = {
"use_vae_model": "VAE model",
"use_hypernetwork_model": "Hypernetwork model",
"hypernetwork_strength": "Hypernetwork Strength",
"use_lora_model": "LoRA model",
# "lora_alpha": "LoRA Strength",
}
@ -43,15 +45,18 @@ def save_images_to_disk(images: list, filtered_images: list, req: GenerateImageR
file_name=make_filename,
output_format=task_data.output_format,
output_quality=task_data.output_quality,
output_lossless=task_data.output_lossless,
)
if task_data.metadata_output_format.lower() in ["json", "txt", "embed"]:
save_dicts(
metadata_entries,
save_dir_path,
file_name=make_filename,
output_format=task_data.metadata_output_format,
file_format=task_data.output_format,
)
if task_data.metadata_output_format:
for metadata_output_format in task_data.metadata_output_format.split(','):
if metadata_output_format.lower() in ["json", "txt", "embed"]:
save_dicts(
metadata_entries,
save_dir_path,
file_name=make_filename,
output_format=metadata_output_format,
file_format=task_data.output_format,
)
else:
make_filter_filename = make_filename_callback(req, now=now, suffix="filtered")
@ -61,6 +66,7 @@ def save_images_to_disk(images: list, filtered_images: list, req: GenerateImageR
file_name=make_filename,
output_format=task_data.output_format,
output_quality=task_data.output_quality,
output_lossless=task_data.output_lossless,
)
save_images(
filtered_images,
@ -68,6 +74,7 @@ def save_images_to_disk(images: list, filtered_images: list, req: GenerateImageR
file_name=make_filter_filename,
output_format=task_data.output_format,
output_quality=task_data.output_quality,
output_lossless=task_data.output_lossless,
)
if task_data.metadata_output_format.lower() in ["json", "txt", "embed"]:
save_dicts(
@ -86,6 +93,7 @@ def get_metadata_entries_for_request(req: GenerateImageRequest, task_data: TaskD
"use_stable_diffusion_model": task_data.use_stable_diffusion_model,
"use_vae_model": task_data.use_vae_model,
"use_hypernetwork_model": task_data.use_hypernetwork_model,
"use_lora_model": task_data.use_lora_model,
"use_face_correction": task_data.use_face_correction,
"use_upscale": task_data.use_upscale,
}
@ -94,6 +102,15 @@ def get_metadata_entries_for_request(req: GenerateImageRequest, task_data: TaskD
metadata["upscale_amount"] = task_data.upscale_amount
if task_data.use_hypernetwork_model is None:
del metadata["hypernetwork_strength"]
if task_data.use_lora_model is None:
if "lora_alpha" in metadata:
del metadata["lora_alpha"]
from easydiffusion import app
app_config = app.getConfig()
if not app_config.get("test_diffusers", False) and "use_lora_model" in metadata:
del metadata["use_lora_model"]
# if text, format it in the text format expected by the UI
is_txt_format = task_data.metadata_output_format.lower() == "txt"

View File

@ -30,7 +30,7 @@
<h1>
<img id="logo_img" src="/media/images/icon-512x512.png" >
Easy Diffusion
<small>v2.5.26 <span id="updateBranchLabel"></span></small>
<small>v2.5.30 <span id="updateBranchLabel"></span></small>
</h1>
</div>
<div id="server-status">
@ -162,7 +162,7 @@
<option value="unipc_snr">UniPC SNR</option>
<option value="unipc_tu">UniPC TU</option>
<option value="unipc_snr_2">UniPC SNR 2</option>
<option value="unipc_tu_2">UniPC TC 2</option>
<option value="unipc_tu_2">UniPC TU 2</option>
<option value="unipc_tq">UniPC TQ</option>
</select>
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/How-to-Use#samplers" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Click to learn more about samplers</span></i></a>
@ -217,6 +217,13 @@
<tr class="pl-5"><td><label for="num_inference_steps">Inference Steps:</label></td><td> <input id="num_inference_steps" name="num_inference_steps" size="4" value="25" onkeypress="preventNonNumericalInput(event)"></td></tr>
<tr class="pl-5"><td><label for="guidance_scale_slider">Guidance Scale:</label></td><td> <input id="guidance_scale_slider" name="guidance_scale_slider" class="editor-slider" value="75" type="range" min="11" max="500"> <input id="guidance_scale" name="guidance_scale" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"></td></tr>
<tr id="prompt_strength_container" class="pl-5"><td><label for="prompt_strength_slider">Prompt Strength:</label></td><td> <input id="prompt_strength_slider" name="prompt_strength_slider" class="editor-slider" value="80" type="range" min="0" max="99"> <input id="prompt_strength" name="prompt_strength" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"><br/></td></tr>
<tr id="lora_model_container" class="pl-5"><td><label for="lora_model">LoRA:</i></label></td><td>
<input id="lora_model" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
</td></tr>
<tr id="lora_alpha_container" class="pl-5">
<td><label for="lora_alpha_slider">LoRA strength:</label></td>
<td> <input id="lora_alpha_slider" name="lora_alpha_slider" class="editor-slider" value="50" type="range" min="0" max="100"> <input id="lora_alpha" name="lora_alpha" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"><br/></td>
</tr>
<tr class="pl-5"><td><label for="hypernetwork_model">Hypernetwork:</i></label></td><td>
<input id="hypernetwork_model" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
</td></tr>
@ -230,6 +237,9 @@
<option value="png">png</option>
<option value="webp">webp</option>
</select>
<span id="output_lossless_container" class="displayNone">
<input id="output_lossless" name="output_lossless" type="checkbox"><label for="output_lossless">Lossless</label></td></tr>
</span>
</td></tr>
<tr class="pl-5" id="output_quality_row"><td><label for="output_quality">Image Quality:</label></td><td>
<input id="output_quality_slider" name="output_quality" class="editor-slider" value="75" type="range" min="10" max="95"> <input id="output_quality" name="output_quality" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)">
@ -282,18 +292,16 @@
</div>
<div id="preview" class="col-free">
<div id="initial-text">
Type a prompt and press the "Make Image" button.<br/><br/>You can set an "Initial Image" if you want to guide the AI.<br/><br/>
You can also add modifiers like "Realistic", "Pencil Sketch", "ArtStation" etc by browsing through the "Image Modifiers" section
and selecting the desired modifiers.<br/><br/>
Click "Image Settings" for additional settings like seed, image size, number of images to generate etc.<br/><br/>Enjoy! :)
</div>
<div id="preview-content">
<div id="preview-tools">
<div id="preview-tools" class="displayNone">
<button id="clear-all-previews" class="secondaryButton"><i class="fa-solid fa-trash-can icon"></i> Clear All</button>
<button class="tertiaryButton" id="show-download-popup"><i class="fa-solid fa-download"></i> Download images</button>
<div class="display-settings">
<button id="undo" class="displayNone primaryButton">
Undo <i class="fa-solid fa-rotate-left icon"></i>
<span class="simple-tooltip left">Undo last remove</span>
</button>
<span class="auto-scroll"></span> <!-- hack for Rabbit Hole update -->
<button id="auto_scroll_btn" class="tertiaryButton">
<i class="fa-solid fa-arrows-up-to-line icon"></i>
@ -318,6 +326,12 @@
<div class="clearfix" style="clear: both;"></div>
</div>
</div>
<div id="initial-text">
Type a prompt and press the "Make Image" button.<br/><br/>You can set an "Initial Image" if you want to guide the AI.<br/><br/>
You can also add modifiers like "Realistic", "Pencil Sketch", "ArtStation" etc by browsing through the "Image Modifiers" section
and selecting the desired modifiers.<br/><br/>
Click "Image Settings" for additional settings like seed, image size, number of images to generate etc.<br/><br/>Enjoy! :)
</div>
</div>
</div>

View File

@ -13,6 +13,10 @@
z-index: 1001;
}
#viewFullSizeImgModal:not(.active) {
display: none;
}
#viewFullSizeImgModal > * {
pointer-events: auto;
margin: 0;

View File

@ -309,8 +309,7 @@ div.img-preview img {
#server-status {
position: absolute;
right: 16px;
top: 50%;
transform: translateY(-50%);
top: 4px;
text-align: right;
}
#server-status-color {
@ -336,6 +335,7 @@ div.img-preview img {
position: relative;
background: var(--background-color4);
display: flex;
padding: 12px 0 0;
}
.tab .icon {
padding-right: 4pt;
@ -344,8 +344,7 @@ div.img-preview img {
}
#logo {
display: inline;
padding: 12px;
padding-top: 8px;
padding: 0 12px 12px;
white-space: nowrap;
}
#logo h1 {
@ -559,7 +558,6 @@ div.img-preview img {
float: right;
}
#preview-tools {
display: none;
padding: 4pt;
}
#preview-tools .display-settings .dropdown-content {
@ -574,6 +572,11 @@ div.img-preview img {
margin-bottom: 5pt;
margin-top: 5pt;
}
.taskConfigContainer {
display: inline;
}
.img-batch {
display: inline;
}
@ -881,9 +884,6 @@ input::file-selector-button {
.tab .icon {
padding-right: 0px;
}
#server-status {
top: 75%;
}
.popup > div {
padding-left: 5px !important;
padding-right: 5px !important;
@ -1126,6 +1126,8 @@ input::file-selector-button {
.tab-container {
display: flex;
align-items: flex-end;
overflow-x: auto;
overflow-y: hidden;
}
.tab {

View File

@ -15,6 +15,7 @@ const SETTINGS_IDS_LIST = [
"stable_diffusion_model",
"vae_model",
"hypernetwork_model",
"lora_model",
"sampler_name",
"width",
"height",
@ -22,8 +23,10 @@ const SETTINGS_IDS_LIST = [
"guidance_scale",
"prompt_strength",
"hypernetwork_strength",
"lora_alpha",
"output_format",
"output_quality",
"output_lossless",
"negative_prompt",
"stream_image_progress",
"use_face_correction",

View File

@ -97,6 +97,7 @@ const TASK_MAPPING = {
return
}
randomSeedField.checked = false
randomSeedField.dispatchEvent(new Event('change')) // let plugins know that the state of the random seed toggle changed
seedField.disabled = false
seedField.value = seed
},
@ -230,6 +231,20 @@ const TASK_MAPPING = {
readUI: () => vaeModelField.value,
parse: (val) => val
},
use_lora_model: { name: 'LoRA model',
setUI: (use_lora_model) => {
const oldVal = loraModelField.value
use_lora_model = (use_lora_model === undefined || use_lora_model === null || use_lora_model === 'None' ? '' : use_lora_model)
if (use_lora_model !== '') {
use_lora_model = getModelPath(use_lora_model, ['.ckpt', '.safetensors'])
use_lora_model = use_lora_model !== '' ? use_lora_model : oldVal
}
loraModelField.value = use_lora_model
},
readUI: () => loraModelField.value,
parse: (val) => val
},
use_hypernetwork_model: { name: 'Hypernetwork model',
setUI: (use_hypernetwork_model) => {
const oldVal = hypernetworkModelField.value

View File

@ -744,6 +744,7 @@
"block_nsfw": false,
"output_format": "png",
"output_quality": 75,
"output_lossless": false,
}
const TASK_OPTIONAL = {
"device": 'string',
@ -755,6 +756,7 @@
"use_vae_model": 'string',
"use_hypernetwork_model": 'string',
"hypernetwork_strength": 'number',
"output_lossless": 'boolean',
}
// Higer values will result in...

View File

@ -171,6 +171,7 @@ const IMAGE_EDITOR_ACTIONS = [
icon: "fa-solid fa-xmark",
handler: (editor) => {
editor.ctx_current.clearRect(0, 0, editor.width, editor.height)
imageEditor.setImage(null, editor.width, editor.height) // properly reset the drawing canvas
},
trackHistory: true
},
@ -511,13 +512,13 @@ class ImageEditor {
}
show() {
this.popup.classList.add("active")
document.addEventListener("keydown", this.keyHandlerBound)
document.addEventListener("keyup", this.keyHandlerBound)
document.addEventListener("keydown", this.keyHandlerBound, true)
document.addEventListener("keyup", this.keyHandlerBound, true)
}
hide() {
this.popup.classList.remove("active")
document.removeEventListener("keydown", this.keyHandlerBound)
document.removeEventListener("keyup", this.keyHandlerBound)
document.removeEventListener("keydown", this.keyHandlerBound, true)
document.removeEventListener("keyup", this.keyHandlerBound, true)
}
setSize(width, height) {
if (width == this.width && height == this.height) {
@ -671,12 +672,18 @@ class ImageEditor {
else {
this.history.redo()
}
event.stopPropagation();
event.preventDefault();
}
if (event.key == "y" && event.ctrlKey) {
this.history.redo()
event.stopPropagation();
event.preventDefault();
}
if (event.key === "Escape") {
this.hide()
event.stopPropagation();
event.preventDefault();
}
}

View File

@ -129,7 +129,10 @@ function createModifierGroup(modifierGroup, initiallyExpanded, removeBy) {
}
function trimModifiers(tag) {
return tag.replace(/^\(+|\)+$/g, '').replace(/^\[+|\]+$/g, '')
// Remove trailing '-' and/or '+'
tag = tag.replace(/[-+]+$/, '');
// Remove parentheses at beginning and end
return tag.replace(/^[(]+|[\s)]+$/g, '');
}
async function loadModifiers() {
@ -157,7 +160,7 @@ async function loadModifiers() {
document.dispatchEvent(new Event('loadImageModifiers'))
}
function refreshModifiersState(newTags) {
function refreshModifiersState(newTags, inactiveTags) {
// clear existing modifiers
document.querySelector('#editor-modifiers').querySelectorAll('.modifier-card').forEach(modifierCard => {
const modifierName = modifierCard.querySelector('.modifier-card-label p').dataset.fullName // pick the full modifier name
@ -211,7 +214,7 @@ function refreshModifiersState(newTags) {
})
}
})
refreshTagsList()
refreshTagsList(inactiveTags)
}
function refreshInactiveTags(inactiveTags) {
@ -228,13 +231,13 @@ function refreshInactiveTags(inactiveTags) {
let overlays = document.querySelector('#editor-inputs-tags-list').querySelectorAll('.modifier-card-overlay')
overlays.forEach (i => {
let modifierName = i.parentElement.getElementsByClassName('modifier-card-label')[0].getElementsByTagName("p")[0].innerText
if (inactiveTags.find(element => element === modifierName) !== undefined) {
if (inactiveTags?.find(element => element === modifierName) !== undefined) {
i.parentElement.classList.add('modifier-toggle-inactive')
}
})
}
function refreshTagsList() {
function refreshTagsList(inactiveTags) {
editorModifierTagsList.innerHTML = ''
if (activeTags.length == 0) {
@ -266,6 +269,7 @@ function refreshTagsList() {
let brk = document.createElement('br')
brk.style.clear = 'both'
editorModifierTagsList.appendChild(brk)
refreshInactiveTags(inactiveTags)
document.dispatchEvent(new Event('refreshImageModifiers')) // notify plugins that the image tags have been refreshed
}

View File

@ -5,6 +5,29 @@ const MIN_GPUS_TO_SHOW_SELECTION = 2
const IMAGE_REGEX = new RegExp('data:image/[A-Za-z]+;base64')
const htmlTaskMap = new WeakMap()
const taskConfigSetup = {
taskConfig: {
seed: { value: ({ seed }) => seed, label: 'Seed' },
dimensions: { value: ({ reqBody }) => `${reqBody?.width}x${reqBody?.height}`, label: 'Dimensions' },
sampler_name: 'Sampler',
num_inference_steps: 'Inference Steps',
guidance_scale: 'Guidance Scale',
use_stable_diffusion_model: 'Model',
use_vae_model: { label: 'VAE', visible: ({ reqBody }) => reqBody?.use_vae_model !== undefined && reqBody?.use_vae_model.trim() !== ''},
negative_prompt: { label: 'Negative Prompt', visible: ({ reqBody }) => reqBody?.negative_prompt !== undefined && reqBody?.negative_prompt.trim() !== ''},
prompt_strength: 'Prompt Strength',
use_face_correction: 'Fix Faces',
upscale: { value: ({ reqBody }) => `${reqBody?.use_upscale} (${reqBody?.upscale_amount || 4}x)`, label: 'Upscale', visible: ({ reqBody }) => !!reqBody?.use_upscale },
use_hypernetwork_model: 'Hypernetwork',
hypernetwork_strength: { label: 'Hypernetwork Strength', visible: ({ reqBody }) => !!reqBody?.use_hypernetwork_model },
use_lora_model: { label: 'Lora Model', visible: ({ reqBody }) => !!reqBody?.use_lora_model },
lora_alpha: { label: 'Lora Strength', visible: ({ reqBody }) => !!reqBody?.use_lora_model },
preserve_init_image_color_profile: 'Preserve Color Profile',
},
pluginTaskConfig: {},
getCSSKey: (key) => key.split('_').map((s) => s.charAt(0).toUpperCase() + s.slice(1)).join('')
}
let imageCounter = 0
let imageRequest = []
@ -46,7 +69,12 @@ let vaeModelField = new ModelDropdown(document.querySelector('#vae_model'), 'vae
let hypernetworkModelField = new ModelDropdown(document.querySelector('#hypernetwork_model'), 'hypernetwork', 'None')
let hypernetworkStrengthSlider = document.querySelector('#hypernetwork_strength_slider')
let hypernetworkStrengthField = document.querySelector('#hypernetwork_strength')
let loraModelField = new ModelDropdown(document.querySelector('#lora_model'), 'lora', 'None')
let loraAlphaSlider = document.querySelector('#lora_alpha_slider')
let loraAlphaField = document.querySelector('#lora_alpha')
let outputFormatField = document.querySelector('#output_format')
let outputLosslessField = document.querySelector('#output_lossless')
let outputLosslessContainer = document.querySelector('#output_lossless_container')
let blockNSFWField = document.querySelector('#block_nsfw')
let showOnlyFilteredImageField = document.querySelector("#show_only_filtered_image")
let updateBranchLabel = document.querySelector("#updateBranchLabel")
@ -83,6 +111,11 @@ const processOrder = document.querySelector('#process_order_toggle')
let imagePreview = document.querySelector("#preview")
let imagePreviewContent = document.querySelector("#preview-content")
let undoButton = document.querySelector("#undo")
let undoBuffer = []
const UNDO_LIMIT = 20
imagePreview.addEventListener('drop', function(ev) {
const data = ev.dataTransfer?.getData("text/plain");
if (!data) {
@ -251,6 +284,48 @@ function playSound() {
}
}
function undoableRemove(element, doubleUndo=false) {
let data = { 'element': element, 'parent': element.parentNode, 'prev': element.previousSibling, 'next': element.nextSibling, 'doubleUndo': doubleUndo }
undoBuffer.push(data)
if (undoBuffer.length > UNDO_LIMIT) {
// Remove item from memory and also remove it from the data structures
let item = undoBuffer.shift()
htmlTaskMap.delete(item.element)
item.element.querySelectorAll('[data-imagecounter]').forEach( (img) => { delete imageRequest[img.dataset['imagecounter']] })
}
element.remove()
if (undoBuffer.length != 0) {
undoButton.classList.remove('displayNone')
}
}
function undoRemove() {
let data = undoBuffer.pop()
if (!data) {
return
}
if (data.next == null) {
data.parent.appendChild(data.element)
} else {
data.parent.insertBefore(data.element, data.next)
}
if (data.doubleUndo) {
undoRemove()
}
if (undoBuffer.length == 0) {
undoButton.classList.add('displayNone')
}
updateInitialText()
}
undoButton.addEventListener('click', () => { undoRemove() })
document.addEventListener('keydown', function(e) {
if ((e.ctrlKey || e.metaKey) && e.key === 'z' && e.target == document.body) {
undoRemove()
}
})
function showImages(reqBody, res, outputContainer, livePreview) {
let imageItemElements = outputContainer.querySelectorAll('.imgItem')
if(typeof res != 'object') return
@ -290,21 +365,19 @@ function showImages(reqBody, res, outputContainer, livePreview) {
const imageRemoveBtn = imageItemElem.querySelector('.imgPreviewItemClearBtn')
let parentTaskContainer = imageRemoveBtn.closest('.imageTaskContainer')
imageRemoveBtn.addEventListener('click', (e) => {
shiftOrConfirm(e, "Remove the image from the results?", () => {
imageItemElem.style.display = 'none'
let allHidden = true;
let children = parentTaskContainer.querySelectorAll('.imgItem');
for(let x = 0; x < children.length; x++) {
let child = children[x];
if(child.style.display != "none") {
allHidden = false;
}
undoableRemove(imageItemElem)
let allHidden = true;
let children = parentTaskContainer.querySelectorAll('.imgItem');
for(let x = 0; x < children.length; x++) {
let child = children[x];
if(child.style.display != "none") {
allHidden = false;
}
if(allHidden === true) {
const req = htmlTaskMap.get(parentTaskContainer)
if(!req.isProcessing || req.batchesDone == req.batchCount) {parentTaskContainer.parentNode.removeChild(parentTaskContainer)}
}
})
}
if(allHidden === true) {
const req = htmlTaskMap.get(parentTaskContainer)
if(!req.isProcessing || req.batchesDone == req.batchCount) { undoableRemove(parentTaskContainer, true) }
}
})
}
const imageElem = imageItemElem.querySelector('img')
@ -566,7 +639,7 @@ function makeImage() {
}))
newTaskRequests.forEach(createTask)
initialText.style.display = 'none'
updateInitialText()
}
async function onIdle() {
@ -864,7 +937,7 @@ async function onTaskStart(task) {
setStatus('request', 'fetching..')
renderButtons.style.display = 'flex'
renameMakeImageButton()
previewTools.style.display = 'block'
updateInitialText()
}
/* Hover effect for the init image in the task list */
@ -902,6 +975,29 @@ function onTaskEntryDragOver(event) {
}
}
function generateConfig({ label, value, visible, cssKey }) {
if (!visible) return null;
return `<div class="taskConfigContainer task${cssKey}Container"><b>${label}:</b> <span class="task${cssKey}">${value}`
}
function getVisibleConfig(config, task) {
const mergedTaskConfig = { ...config.taskConfig, ...config.pluginTaskConfig }
return Object.keys(mergedTaskConfig)
.map((key) => {
const value = mergedTaskConfig?.[key]?.value?.(task) ?? task.reqBody[key]
const visible = mergedTaskConfig?.[key]?.visible?.(task) ?? value !== undefined ?? true
const label = mergedTaskConfig?.[key]?.label ?? mergedTaskConfig?.[key]
const cssKey = config.getCSSKey(key)
return { label, visible, value, cssKey }
})
.map((obj) => generateConfig(obj))
.filter(obj => obj)
}
function createTaskConfig(task) {
return getVisibleConfig(taskConfigSetup, task).join('</span>,&nbsp;</div>')
}
function createTask(task) {
let taskConfig = ''
@ -910,30 +1006,8 @@ function createTask(task) {
let w = task.reqBody.width * h / task.reqBody.height >>0
taskConfig += `<div class="task-initimg" style="float:left;"><img style="width:${w}px;height:${h}px;" src="${task.reqBody.init_image}"><div class="task-fs-initimage"></div></div>`
}
taskConfig += `<b>Seed:</b> ${task.seed}, <b>Sampler:</b> ${task.reqBody.sampler_name}, <b>Inference Steps:</b> ${task.reqBody.num_inference_steps}, <b>Guidance Scale:</b> ${task.reqBody.guidance_scale}, <b>Model:</b> ${task.reqBody.use_stable_diffusion_model}`
if (task.reqBody.use_vae_model.trim() !== '') {
taskConfig += `, <b>VAE:</b> ${task.reqBody.use_vae_model}`
}
if (task.reqBody.negative_prompt.trim() !== '') {
taskConfig += `, <b>Negative Prompt:</b> ${task.reqBody.negative_prompt}`
}
if (task.reqBody.init_image !== undefined) {
taskConfig += `, <b>Prompt Strength:</b> ${task.reqBody.prompt_strength}`
}
if (task.reqBody.use_face_correction) {
taskConfig += `, <b>Fix Faces:</b> ${task.reqBody.use_face_correction}`
}
if (task.reqBody.use_upscale) {
taskConfig += `, <b>Upscale:</b> ${task.reqBody.use_upscale} (${task.reqBody.upscale_amount || 4}x)`
}
if (task.reqBody.use_hypernetwork_model) {
taskConfig += `, <b>Hypernetwork:</b> ${task.reqBody.use_hypernetwork_model}`
taskConfig += `, <b>Hypernetwork Strength:</b> ${task.reqBody.hypernetwork_strength}`
}
if (task.reqBody.preserve_init_image_color_profile) {
taskConfig += `, <b>Preserve Color Profile:</b> true`
}
taskConfig += `<div class="taskConfigData">${createTaskConfig(task)}</span></div></div>`;
let taskEntry = document.createElement('div')
taskEntry.id = `imageTaskContainer-${Date.now()}`
@ -993,13 +1067,16 @@ function createTask(task) {
task['stopTask'].addEventListener('click', (e) => {
e.stopPropagation()
let question = (task['isProcessing'] ? "Stop this task?" : "Remove this task?")
shiftOrConfirm(e, question, async function(e) {
if (task.batchesDone <= 0 || !task.isProcessing) {
removeTask(taskEntry)
}
abortTask(task)
})
if (task['isProcessing']) {
shiftOrConfirm(e, "Stop this task?", async function(e) {
if (task.batchesDone <= 0 || !task.isProcessing) {
removeTask(taskEntry)
}
abortTask(task)
})
} else {
removeTask(taskEntry)
}
})
task['useSettings'] = taskEntry.querySelector('.useSettings')
@ -1029,7 +1106,6 @@ function getCurrentUserRequest() {
numOutputsTotal: numOutputsTotal,
batchCount: Math.ceil(numOutputsTotal / numOutputsParallel),
seed,
reqBody: {
seed,
used_random_seed: randomSeedField.checked,
@ -1041,6 +1117,7 @@ function getCurrentUserRequest() {
height: parseInt(heightField.value),
// allow_nsfw: allowNSFWField.checked,
vram_usage_level: vramUsageLevelField.value,
sampler_name: samplerField.value,
//render_device: undefined, // Set device affinity. Prefer this device, but wont activate.
use_stable_diffusion_model: stableDiffusionModelField.value,
use_vae_model: vaeModelField.value,
@ -1050,6 +1127,7 @@ function getCurrentUserRequest() {
block_nsfw: blockNSFWField.checked,
output_format: outputFormatField.value,
output_quality: parseInt(outputQualityField.value),
output_lossless: outputLosslessField.checked,
metadata_output_format: metadataOutputFormatField.value,
original_prompt: promptField.value,
active_tags: (activeTags.map(x => x.name)),
@ -1059,7 +1137,6 @@ function getCurrentUserRequest() {
if (IMAGE_REGEX.test(initImagePreview.src)) {
newTask.reqBody.init_image = initImagePreview.src
newTask.reqBody.prompt_strength = parseFloat(promptStrengthField.value)
// if (IMAGE_REGEX.test(maskImagePreview.src)) {
// newTask.reqBody.mask = maskImagePreview.src
// }
@ -1067,9 +1144,9 @@ function getCurrentUserRequest() {
newTask.reqBody.mask = imageInpainter.getImg()
}
newTask.reqBody.preserve_init_image_color_profile = applyColorCorrectionField.checked
newTask.reqBody.sampler_name = 'ddim'
} else {
newTask.reqBody.sampler_name = samplerField.value
if (!testDiffusers.checked) {
newTask.reqBody.sampler_name = 'ddim'
}
}
if (saveToDiskField.checked && diskPathField.value.trim() !== '') {
newTask.reqBody.save_to_disk_path = diskPathField.value.trim()
@ -1085,6 +1162,10 @@ function getCurrentUserRequest() {
newTask.reqBody.use_hypernetwork_model = hypernetworkModelField.value
newTask.reqBody.hypernetwork_strength = parseFloat(hypernetworkStrengthField.value)
}
if (testDiffusers.checked && loraModelField.value) {
newTask.reqBody.use_lora_model = loraModelField.value
newTask.reqBody.lora_alpha = parseFloat(loraAlphaField.value)
}
return newTask
}
@ -1182,31 +1263,10 @@ function createFileName(prompt, seed, steps, guidance, outputFormat) {
// Most important information is the prompt
let underscoreName = prompt.replace(/[^a-zA-Z0-9]/g, '_')
underscoreName = underscoreName.substring(0, 100)
//const steps = numInferenceStepsField.value
//const guidance = guidanceScaleField.value
underscoreName = underscoreName.substring(0, 70)
// name and the top level metadata
let fileName = `${underscoreName}_Seed-${seed}_Steps-${steps}_Guidance-${guidance}`
// add the tags
// let tags = []
// let tagString = ''
// document.querySelectorAll(modifyTagsSelector).forEach(function(tag) {
// tags.push(tag.innerHTML)
// })
// join the tags with a pipe
// if (activeTags.length > 0) {
// tagString = '_Tags-'
// tagString += tags.join('|')
// }
// // append empty or populated tags
// fileName += `${tagString}`
// add the file extension
fileName += '.' + outputFormat
let fileName = `${underscoreName}_S${seed}_St${steps}_G${guidance}.${outputFormat}`
return fileName
}
@ -1225,15 +1285,23 @@ async function stopAllTasks() {
})
}
function removeTask(taskToRemove) {
taskToRemove.remove()
function updateInitialText() {
if (document.querySelector('.imageTaskContainer') === null) {
previewTools.style.display = 'none'
initialText.style.display = 'block'
if (undoBuffer.length == 0) {
previewTools.classList.add('displayNone')
}
initialText.classList.remove('displayNone')
} else {
initialText.classList.add('displayNone')
previewTools.classList.remove('displayNone')
}
}
function removeTask(taskToRemove) {
undoableRemove(taskToRemove)
updateInitialText()
}
clearAllPreviewsBtn.addEventListener('click', (e) => { shiftOrConfirm(e, "Clear all the results and tasks in this window?", async function() {
await stopAllTasks()
@ -1286,7 +1354,7 @@ function downloadAllImages() {
document.querySelectorAll(".imageTaskContainer").forEach(container => {
if (optTree) {
let name = ++i + '-' + container.querySelector('.preview-prompt').textContent.replace(/[^a-zA-Z0-9]/g, '_')
let name = ++i + '-' + container.querySelector('.preview-prompt').textContent.replace(/[^a-zA-Z0-9]/g, '_').substring(0,25)
folder = zip.folder(name)
}
container.querySelectorAll(".imgContainer img").forEach(img => {
@ -1315,9 +1383,9 @@ function downloadAllImages() {
})
})
if (optZIP) {
let now = new Date()
let now = Date.now().toString(36).toUpperCase()
zip.generateAsync({type:"blob"}).then(function (blob) {
saveAs(blob, `EasyDiffusion-Images-${now.toISOString()}.zip`);
saveAs(blob, `EasyDiffusion-Images-${now}.zip`);
})
}
@ -1458,6 +1526,33 @@ function updateHypernetworkStrengthContainer() {
hypernetworkModelField.addEventListener('change', updateHypernetworkStrengthContainer)
updateHypernetworkStrengthContainer()
/********************* LoRA alpha **********************/
function updateLoraAlpha() {
loraAlphaField.value = loraAlphaSlider.value / 100
loraAlphaField.dispatchEvent(new Event("change"))
}
function updateLoraAlphaSlider() {
if (loraAlphaField.value < 0) {
loraAlphaField.value = 0
} else if (loraAlphaField.value > 1) {
loraAlphaField.value = 1
}
loraAlphaSlider.value = loraAlphaField.value * 100
loraAlphaSlider.dispatchEvent(new Event("change"))
}
loraAlphaSlider.addEventListener('input', updateLoraAlpha)
loraAlphaField.addEventListener('input', updateLoraAlphaSlider)
updateLoraAlpha()
function updateLoraAlphaContainer() {
document.querySelector("#lora_alpha_container").style.display = (loraModelField.value === "" ? 'none' : '')
}
loraModelField.addEventListener('change', updateLoraAlphaContainer)
updateLoraAlphaContainer()
/********************* JPEG/WEBP Quality **********************/
function updateOutputQuality() {
outputQualityField.value = 0 | outputQualitySlider.value
@ -1479,13 +1574,26 @@ outputQualitySlider.addEventListener('input', updateOutputQuality)
outputQualityField.addEventListener('input', debounce(updateOutputQualitySlider, 1500))
updateOutputQuality()
outputFormatField.addEventListener('change', e => {
if (outputFormatField.value === 'png') {
outputQualityRow.style.display='none'
} else {
outputQualityRow.style.display='table-row'
function updateOutputQualityVisibility() {
if (outputFormatField.value === 'webp') {
outputLosslessContainer.classList.remove('displayNone')
if (outputLosslessField.checked) {
outputQualityRow.classList.add('displayNone')
} else {
outputQualityRow.classList.remove('displayNone')
}
}
})
else if (outputFormatField.value === 'png') {
outputQualityRow.classList.add('displayNone')
outputLosslessContainer.classList.add('displayNone')
} else {
outputQualityRow.classList.remove('displayNone')
outputLosslessContainer.classList.add('displayNone')
}
}
outputFormatField.addEventListener('change', updateOutputQualityVisibility)
outputLosslessField.addEventListener('change', updateOutputQualityVisibility)
/********************* Zoom Slider **********************/
thumbnailSizeField.addEventListener('change', () => {
(function (s) {
@ -1550,7 +1658,9 @@ loadImg2ImgFromFile()
function img2imgLoad() {
promptStrengthContainer.style.display = 'table-row'
samplerSelectionContainer.style.display = "none"
if (!testDiffusers.checked) {
samplerSelectionContainer.style.display = "none"
}
initImagePreviewContainer.classList.add("has-image")
colorCorrectionSetting.style.display = ''
@ -1565,7 +1675,9 @@ function img2imgUnload() {
maskSetting.checked = false
promptStrengthContainer.style.display = "none"
samplerSelectionContainer.style.display = ""
if (!testDiffusers.checked) {
samplerSelectionContainer.style.display = ""
}
initImagePreviewContainer.classList.remove("has-image")
colorCorrectionSetting.style.display = 'none'
imageEditor.setImage(null, parseInt(widthField.value), parseInt(heightField.value))

View File

@ -76,7 +76,15 @@ var PARAMETERS = [
{
value: "embed",
label: "embed"
}
},
{
value: "embed,txt",
label: "embed & txt",
},
{
value: "embed,json",
label: "embed & json",
},
],
},
{
@ -190,6 +198,14 @@ var PARAMETERS = [
icon: "fa-fire",
default: false,
},
{
id: "test_diffusers",
type: ParameterType.checkbox,
label: "Test Diffusers",
note: "<b>Experimental! Can have bugs!</b> Use upcoming features (like LoRA) in our new engine. Please press Save, then restart the program after changing this.",
icon: "fa-bolt",
default: false,
},
];
function getParameterSettingsEntry(id) {
@ -263,6 +279,7 @@ let listenPortField = document.querySelector("#listen_port")
let useBetaChannelField = document.querySelector("#use_beta_channel")
let uiOpenBrowserOnStartField = document.querySelector("#ui_open_browser_on_start")
let confirmDangerousActionsField = document.querySelector("#confirm_dangerous_actions")
let testDiffusers = document.querySelector("#test_diffusers")
let saveSettingsBtn = document.querySelector('#save-system-settings-btn')
@ -292,6 +309,8 @@ async function getAppConfig() {
if (config.update_branch === 'beta') {
useBetaChannelField.checked = true
document.querySelector("#updateBranchLabel").innerText = "(beta)"
} else {
getParameterSettingsEntry("test_diffusers").style.display = "none"
}
if (config.ui && config.ui.open_browser_on_start === false) {
uiOpenBrowserOnStartField.checked = false
@ -302,6 +321,14 @@ async function getAppConfig() {
if (config.net && config.net.listen_port !== undefined) {
listenPortField.value = config.net.listen_port
}
if (config.test_diffusers === undefined || config.update_branch === 'main') {
document.querySelector("#lora_model_container").style.display = 'none'
document.querySelector("#lora_alpha_container").style.display = 'none'
} else {
testDiffusers.checked = config.test_diffusers && config.update_branch !== 'main'
document.querySelector("#lora_model_container").style.display = (testDiffusers.checked ? '' : 'none')
document.querySelector("#lora_alpha_container").style.display = (testDiffusers.checked && loraModelField.value !== "" ? '' : 'none')
}
console.log('get config status response', config)
} catch (e) {
@ -471,7 +498,8 @@ saveSettingsBtn.addEventListener('click', function() {
'update_branch': updateBranch,
'ui_open_browser_on_start': uiOpenBrowserOnStartField.checked,
'listen_to_network': listenToNetworkField.checked,
'listen_port': listenPortField.value
'listen_port': listenPortField.value,
'test_diffusers': testDiffusers.checked
})
saveSettingsBtn.classList.add('active')
asyncDelay(300).then(() => saveSettingsBtn.classList.remove('active'))

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 57 KiB

View File

@ -2428,6 +2428,19 @@
"path": "artist/by_yoshitaka_amano/landscape-0.jpg"
}
]
},
{
"modifier": "by Zdzislaw Beksinski",
"previews": [
{
"name": "portrait",
"path": "artist/by_zdzislaw_beksinski/portrait-0.jpg"
},
{
"name": "landscape",
"path": "artist/by_zdzislaw_beksinski/landscape-0.jpg"
}
]
}
]
},

View File

@ -1,4 +1,8 @@
(function () { "use strict"
(function () {
"use strict"
const MAX_WEIGHT = 5
if (typeof editorModifierTagsList !== 'object') {
console.error('editorModifierTagsList missing...')
return
@ -34,34 +38,42 @@
break
}
}
if (s.charAt(0) !== '(' && s.charAt(s.length - 1) !== ')' && s.trim().includes(' ')) {
s = '(' + s + ')'
t = '(' + t + ')'
}
if (delta < 0) {
// wheel scrolling up
if (s.substring(0, 1) == '[' && s.substring(s.length-1) == ']') {
s = s.substring(1, s.length - 1)
t = t.substring(1, t.length - 1)
if (s.substring(s.length - 1) == '-') {
s = s.substring(0, s.length - 1)
t = t.substring(0, t.length - 1)
}
else
{
if (s.substring(0, 10) !== '('.repeat(10) && s.substring(s.length-10) !== ')'.repeat(10)) {
s = '(' + s + ')'
t = '(' + t + ')'
if (s.substring(s.length - MAX_WEIGHT) !== '+'.repeat(MAX_WEIGHT)) {
s = s + '+'
t = t + '+'
}
}
}
else{
// wheel scrolling down
if (s.substring(0, 1) == '(' && s.substring(s.length-1) == ')') {
s = s.substring(1, s.length - 1)
t = t.substring(1, t.length - 1)
if (s.substring(s.length - 1) == '+') {
s = s.substring(0, s.length - 1)
t = t.substring(0, t.length - 1)
}
else
{
if (s.substring(0, 10) !== '['.repeat(10) && s.substring(s.length-10) !== ']'.repeat(10)) {
s = '[' + s + ']'
t = '[' + t + ']'
if (s.substring(s.length - MAX_WEIGHT) !== '-'.repeat(MAX_WEIGHT)) {
s = s + '-'
t = t + '-'
}
}
}
if (s.charAt(0) === '(' && s.charAt(s.length - 1) === ')') {
s = s.substring(1, s.length - 1)
t = t.substring(1, t.length - 1)
}
i.parentElement.getElementsByClassName('modifier-card-label')[0].getElementsByTagName("p")[0].innerText = s
// update activeTags
for (let it = 0; it < overlays.length; it++) {