sdkit 1.0.49; Use a test_diffusers flag to gate access to the new renderer and LoRA model selection

This commit is contained in:
cmdr2 2023-03-21 17:59:20 +05:30
parent 8aead029a8
commit f8bc50871a
10 changed files with 134 additions and 27 deletions

View File

@ -95,7 +95,7 @@ if "%ERRORLEVEL%" EQU "0" (
set PYTHONNOUSERSITE=1 set PYTHONNOUSERSITE=1
set PYTHONPATH=%INSTALL_ENV_DIR%\lib\site-packages set PYTHONPATH=%INSTALL_ENV_DIR%\lib\site-packages
call python -m pip install --upgrade sdkit==1.0.48 -q || ( call python -m pip install --upgrade sdkit==1.0.49 -q || (
echo "Error updating sdkit" echo "Error updating sdkit"
) )
) )
@ -106,7 +106,7 @@ if "%ERRORLEVEL%" EQU "0" (
set PYTHONNOUSERSITE=1 set PYTHONNOUSERSITE=1
set PYTHONPATH=%INSTALL_ENV_DIR%\lib\site-packages set PYTHONPATH=%INSTALL_ENV_DIR%\lib\site-packages
call python -m pip install sdkit==1.0.48 || ( call python -m pip install sdkit==1.0.49 || (
echo "Error installing sdkit. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" echo "Error installing sdkit. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!"
pause pause
exit /b exit /b

View File

@ -103,7 +103,7 @@ if python ../scripts/check_modules.py sdkit sdkit.models ldm transformers numpy
export PYTHONNOUSERSITE=1 export PYTHONNOUSERSITE=1
export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages" export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages"
python -m pip install --upgrade sdkit==1.0.48 -q python -m pip install --upgrade sdkit==1.0.49 -q
fi fi
else else
echo "Installing sdkit: https://pypi.org/project/sdkit/" echo "Installing sdkit: https://pypi.org/project/sdkit/"
@ -111,7 +111,7 @@ else
export PYTHONNOUSERSITE=1 export PYTHONNOUSERSITE=1
export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages" export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages"
if python -m pip install sdkit==1.0.48 ; then if python -m pip install sdkit==1.0.49 ; then
echo "Installed." echo "Installed."
else else
fail "sdkit install failed" fail "sdkit install failed"

View File

@ -7,13 +7,14 @@ from easydiffusion.utils import log
from sdkit import Context from sdkit import Context
from sdkit.models import load_model, unload_model, scan_model from sdkit.models import load_model, unload_model, scan_model
KNOWN_MODEL_TYPES = ["stable-diffusion", "vae", "hypernetwork", "gfpgan", "realesrgan"] KNOWN_MODEL_TYPES = ["stable-diffusion", "vae", "hypernetwork", "gfpgan", "realesrgan", "lora"]
MODEL_EXTENSIONS = { MODEL_EXTENSIONS = {
"stable-diffusion": [".ckpt", ".safetensors"], "stable-diffusion": [".ckpt", ".safetensors"],
"vae": [".vae.pt", ".ckpt", ".safetensors"], "vae": [".vae.pt", ".ckpt", ".safetensors"],
"hypernetwork": [".pt", ".safetensors"], "hypernetwork": [".pt", ".safetensors"],
"gfpgan": [".pth"], "gfpgan": [".pth"],
"realesrgan": [".pth"], "realesrgan": [".pth"],
"lora": [".ckpt", ".safetensors"],
} }
DEFAULT_MODELS = { DEFAULT_MODELS = {
"stable-diffusion": [ # needed to support the legacy installations "stable-diffusion": [ # needed to support the legacy installations
@ -23,7 +24,7 @@ DEFAULT_MODELS = {
"gfpgan": ["GFPGANv1.3"], "gfpgan": ["GFPGANv1.3"],
"realesrgan": ["RealESRGAN_x4plus"], "realesrgan": ["RealESRGAN_x4plus"],
} }
MODELS_TO_LOAD_ON_START = ["stable-diffusion", "vae", "hypernetwork"] MODELS_TO_LOAD_ON_START = ["stable-diffusion", "vae", "hypernetwork", "lora"]
known_models = {} known_models = {}
@ -102,6 +103,7 @@ def reload_models_if_necessary(context: Context, task_data: TaskData):
"gfpgan": task_data.use_face_correction, "gfpgan": task_data.use_face_correction,
"realesrgan": task_data.use_upscale, "realesrgan": task_data.use_upscale,
"nsfw_checker": True if task_data.block_nsfw else None, "nsfw_checker": True if task_data.block_nsfw else None,
"lora": task_data.use_lora_model,
} }
models_to_reload = { models_to_reload = {
model_type: path model_type: path
@ -125,6 +127,7 @@ def resolve_model_paths(task_data: TaskData):
) )
task_data.use_vae_model = resolve_model_to_use(task_data.use_vae_model, model_type="vae") task_data.use_vae_model = resolve_model_to_use(task_data.use_vae_model, model_type="vae")
task_data.use_hypernetwork_model = resolve_model_to_use(task_data.use_hypernetwork_model, model_type="hypernetwork") task_data.use_hypernetwork_model = resolve_model_to_use(task_data.use_hypernetwork_model, model_type="hypernetwork")
task_data.use_lora_model = resolve_model_to_use(task_data.use_lora_model, model_type="lora")
if task_data.use_face_correction: if task_data.use_face_correction:
task_data.use_face_correction = resolve_model_to_use(task_data.use_face_correction, "gfpgan") task_data.use_face_correction = resolve_model_to_use(task_data.use_face_correction, "gfpgan")
@ -184,11 +187,13 @@ def getModels():
"stable-diffusion": "sd-v1-4", "stable-diffusion": "sd-v1-4",
"vae": "", "vae": "",
"hypernetwork": "", "hypernetwork": "",
"lora": "",
}, },
"options": { "options": {
"stable-diffusion": ["sd-v1-4"], "stable-diffusion": ["sd-v1-4"],
"vae": [], "vae": [],
"hypernetwork": [], "hypernetwork": [],
"lora": [],
}, },
} }
@ -243,6 +248,7 @@ def getModels():
listModels(model_type="vae") listModels(model_type="vae")
listModels(model_type="hypernetwork") listModels(model_type="hypernetwork")
listModels(model_type="gfpgan") listModels(model_type="gfpgan")
listModels(model_type="lora")
if models_scanned > 0: if models_scanned > 0:
log.info(f"[green]Scanned {models_scanned} models. Nothing infected[/]") log.info(f"[green]Scanned {models_scanned} models. Nothing infected[/]")

View File

@ -10,7 +10,7 @@ from easydiffusion.utils import get_printable_request, save_images_to_disk, log
from sdkit import Context from sdkit import Context
from sdkit.generate import generate_images from sdkit.generate import generate_images
from sdkit.filter import apply_filters from sdkit.filter import apply_filters
from sdkit.utils import img_to_buffer, img_to_base64_str, latent_samples_to_images, gc from sdkit.utils import img_to_buffer, img_to_base64_str, latent_samples_to_images, diffusers_latent_samples_to_images
context = Context() # thread-local context = Context() # thread-local
""" """
@ -26,6 +26,11 @@ def init(device):
context.temp_images = {} context.temp_images = {}
context.partial_x_samples = None context.partial_x_samples = None
from easydiffusion import app
app_config = app.getConfig()
context.test_diffusers = app_config.get("test_diffusers", False)
device_manager.device_init(context, device) device_manager.device_init(context, device)
@ -57,7 +62,13 @@ def make_images_internal(
): ):
images, user_stopped = generate_images_internal( images, user_stopped = generate_images_internal(
req, task_data, data_queue, task_temp_images, step_callback, task_data.stream_image_progress, task_data.stream_image_progress_interval req,
task_data,
data_queue,
task_temp_images,
step_callback,
task_data.stream_image_progress,
task_data.stream_image_progress_interval,
) )
filtered_images = filter_images(task_data, images, user_stopped) filtered_images = filter_images(task_data, images, user_stopped)
@ -82,10 +93,18 @@ def generate_images_internal(
): ):
context.temp_images.clear() context.temp_images.clear()
callback = make_step_callback(req, task_data, data_queue, task_temp_images, step_callback, stream_image_progress, stream_image_progress_interval) callback = make_step_callback(
req,
task_data,
data_queue,
task_temp_images,
step_callback,
stream_image_progress,
stream_image_progress_interval,
)
try: try:
if req.init_image is not None: if req.init_image is not None and not context.test_diffusers:
req.sampler_name = "ddim" req.sampler_name = "ddim"
images = generate_images(context, callback=callback, **req.dict()) images = generate_images(context, callback=callback, **req.dict())
@ -94,9 +113,13 @@ def generate_images_internal(
images = [] images = []
user_stopped = True user_stopped = True
if context.partial_x_samples is not None: if context.partial_x_samples is not None:
if context.test_diffusers:
images = diffusers_latent_samples_to_images(context, context.partial_x_samples)
else:
images = latent_samples_to_images(context, context.partial_x_samples) images = latent_samples_to_images(context, context.partial_x_samples)
finally: finally:
if hasattr(context, "partial_x_samples") and context.partial_x_samples is not None: if hasattr(context, "partial_x_samples") and context.partial_x_samples is not None:
if not context.test_diffusers:
del context.partial_x_samples del context.partial_x_samples
context.partial_x_samples = None context.partial_x_samples = None
@ -145,7 +168,12 @@ def make_step_callback(
def update_temp_img(x_samples, task_temp_images: list): def update_temp_img(x_samples, task_temp_images: list):
partial_images = [] partial_images = []
if context.test_diffusers:
images = diffusers_latent_samples_to_images(context, x_samples)
else:
images = latent_samples_to_images(context, x_samples) images = latent_samples_to_images(context, x_samples)
if task_data.block_nsfw: if task_data.block_nsfw:
images = apply_filters(context, "nsfw_checker", images) images = apply_filters(context, "nsfw_checker", images)
@ -158,17 +186,21 @@ def make_step_callback(
del images del images
return partial_images return partial_images
def on_image_step(x_samples, i): def on_image_step(x_samples, i, *args):
nonlocal last_callback_time nonlocal last_callback_time
if context.test_diffusers:
context.partial_x_samples = (x_samples, args[0])
else:
context.partial_x_samples = x_samples context.partial_x_samples = x_samples
step_time = time.time() - last_callback_time if last_callback_time != -1 else -1 step_time = time.time() - last_callback_time if last_callback_time != -1 else -1
last_callback_time = time.time() last_callback_time = time.time()
progress = {"step": i, "step_time": step_time, "total_steps": n_steps} progress = {"step": i, "step_time": step_time, "total_steps": n_steps}
if stream_image_progress and stream_image_progress_interval > 0 and i % stream_image_progress_interval == 0: if stream_image_progress and stream_image_progress_interval > 0 and i % stream_image_progress_interval == 0:
progress["output"] = update_temp_img(x_samples, task_temp_images) progress["output"] = update_temp_img(context.partial_x_samples, task_temp_images)
data_queue.put(json.dumps(progress)) data_queue.put(json.dumps(progress))

View File

@ -29,7 +29,7 @@ NOCACHE_HEADERS = {"Cache-Control": "no-cache, no-store, must-revalidate", "Prag
class NoCacheStaticFiles(StaticFiles): class NoCacheStaticFiles(StaticFiles):
def __init__(self, directory: str): def __init__(self, directory: str):
# follow_symlink is only available on fastapi >= 0.92.0 # follow_symlink is only available on fastapi >= 0.92.0
if (os.path.islink(directory)): if os.path.islink(directory):
super().__init__(directory=os.path.realpath(directory)) super().__init__(directory=os.path.realpath(directory))
else: else:
super().__init__(directory=directory) super().__init__(directory=directory)
@ -51,11 +51,12 @@ class SetAppConfigRequest(BaseModel):
ui_open_browser_on_start: bool = None ui_open_browser_on_start: bool = None
listen_to_network: bool = None listen_to_network: bool = None
listen_port: int = None listen_port: int = None
test_diffusers: bool = False
def init(): def init():
mimetypes.init() mimetypes.init()
mimetypes.add_type('text/css', '.css') mimetypes.add_type("text/css", ".css")
if os.path.isdir(app.CUSTOM_MODIFIERS_DIR): if os.path.isdir(app.CUSTOM_MODIFIERS_DIR):
server_api.mount( server_api.mount(
@ -132,6 +133,9 @@ def set_app_config_internal(req: SetAppConfigRequest):
if "net" not in config: if "net" not in config:
config["net"] = {} config["net"] = {}
config["net"]["listen_port"] = int(req.listen_port) config["net"]["listen_port"] = int(req.listen_port)
config["test_diffusers"] = req.test_diffusers
try: try:
app.setConfig(config) app.setConfig(config)

View File

@ -21,6 +21,7 @@ class GenerateImageRequest(BaseModel):
sampler_name: str = None # "ddim", "plms", "heun", "euler", "euler_a", "dpm2", "dpm2_a", "lms" sampler_name: str = None # "ddim", "plms", "heun", "euler", "euler_a", "dpm2", "dpm2_a", "lms"
hypernetwork_strength: float = 0 hypernetwork_strength: float = 0
lora_alpha: float = 0
class TaskData(BaseModel): class TaskData(BaseModel):
@ -36,6 +37,7 @@ class TaskData(BaseModel):
# use_stable_diffusion_config: str = "v1-inference" # use_stable_diffusion_config: str = "v1-inference"
use_vae_model: str = None use_vae_model: str = None
use_hypernetwork_model: str = None use_hypernetwork_model: str = None
use_lora_model: str = None
show_only_filtered_image: bool = False show_only_filtered_image: bool = False
block_nsfw: bool = False block_nsfw: bool = False

View File

@ -162,7 +162,7 @@
<option value="unipc_snr">UniPC SNR</option> <option value="unipc_snr">UniPC SNR</option>
<option value="unipc_tu">UniPC TU</option> <option value="unipc_tu">UniPC TU</option>
<option value="unipc_snr_2">UniPC SNR 2</option> <option value="unipc_snr_2">UniPC SNR 2</option>
<option value="unipc_tu_2">UniPC TC 2</option> <option value="unipc_tu_2">UniPC TU 2</option>
<option value="unipc_tq">UniPC TQ</option> <option value="unipc_tq">UniPC TQ</option>
</select> </select>
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/How-to-Use#samplers" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Click to learn more about samplers</span></i></a> <a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/How-to-Use#samplers" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Click to learn more about samplers</span></i></a>
@ -217,6 +217,13 @@
<tr class="pl-5"><td><label for="num_inference_steps">Inference Steps:</label></td><td> <input id="num_inference_steps" name="num_inference_steps" size="4" value="25" onkeypress="preventNonNumericalInput(event)"></td></tr> <tr class="pl-5"><td><label for="num_inference_steps">Inference Steps:</label></td><td> <input id="num_inference_steps" name="num_inference_steps" size="4" value="25" onkeypress="preventNonNumericalInput(event)"></td></tr>
<tr class="pl-5"><td><label for="guidance_scale_slider">Guidance Scale:</label></td><td> <input id="guidance_scale_slider" name="guidance_scale_slider" class="editor-slider" value="75" type="range" min="11" max="500"> <input id="guidance_scale" name="guidance_scale" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"></td></tr> <tr class="pl-5"><td><label for="guidance_scale_slider">Guidance Scale:</label></td><td> <input id="guidance_scale_slider" name="guidance_scale_slider" class="editor-slider" value="75" type="range" min="11" max="500"> <input id="guidance_scale" name="guidance_scale" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"></td></tr>
<tr id="prompt_strength_container" class="pl-5"><td><label for="prompt_strength_slider">Prompt Strength:</label></td><td> <input id="prompt_strength_slider" name="prompt_strength_slider" class="editor-slider" value="80" type="range" min="0" max="99"> <input id="prompt_strength" name="prompt_strength" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"><br/></td></tr> <tr id="prompt_strength_container" class="pl-5"><td><label for="prompt_strength_slider">Prompt Strength:</label></td><td> <input id="prompt_strength_slider" name="prompt_strength_slider" class="editor-slider" value="80" type="range" min="0" max="99"> <input id="prompt_strength" name="prompt_strength" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"><br/></td></tr>
<tr id="lora_model_container" class="pl-5"><td><label for="lora_model">LoRA:</i></label></td><td>
<input id="lora_model" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
</td></tr>
<tr id="lora_alpha_container" class="pl-5">
<td><label for="lora_alpha_slider">LoRA strength:</label></td>
<td> <input id="lora_alpha_slider" name="lora_alpha_slider" class="editor-slider" value="100" type="range" min="0" max="100"> <input id="lora_alpha" name="lora_alpha" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"><br/></td>
</tr>
<tr class="pl-5"><td><label for="hypernetwork_model">Hypernetwork:</i></label></td><td> <tr class="pl-5"><td><label for="hypernetwork_model">Hypernetwork:</i></label></td><td>
<input id="hypernetwork_model" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" /> <input id="hypernetwork_model" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
</td></tr> </td></tr>

View File

@ -15,6 +15,7 @@ const SETTINGS_IDS_LIST = [
"stable_diffusion_model", "stable_diffusion_model",
"vae_model", "vae_model",
"hypernetwork_model", "hypernetwork_model",
"lora_model",
"sampler_name", "sampler_name",
"width", "width",
"height", "height",
@ -22,6 +23,7 @@ const SETTINGS_IDS_LIST = [
"guidance_scale", "guidance_scale",
"prompt_strength", "prompt_strength",
"hypernetwork_strength", "hypernetwork_strength",
"lora_alpha",
"output_format", "output_format",
"output_quality", "output_quality",
"negative_prompt", "negative_prompt",

View File

@ -46,6 +46,9 @@ let vaeModelField = new ModelDropdown(document.querySelector('#vae_model'), 'vae
let hypernetworkModelField = new ModelDropdown(document.querySelector('#hypernetwork_model'), 'hypernetwork', 'None') let hypernetworkModelField = new ModelDropdown(document.querySelector('#hypernetwork_model'), 'hypernetwork', 'None')
let hypernetworkStrengthSlider = document.querySelector('#hypernetwork_strength_slider') let hypernetworkStrengthSlider = document.querySelector('#hypernetwork_strength_slider')
let hypernetworkStrengthField = document.querySelector('#hypernetwork_strength') let hypernetworkStrengthField = document.querySelector('#hypernetwork_strength')
let loraModelField = new ModelDropdown(document.querySelector('#lora_model'), 'lora', 'None')
let loraAlphaSlider = document.querySelector('#lora_alpha_slider')
let loraAlphaField = document.querySelector('#lora_alpha')
let outputFormatField = document.querySelector('#output_format') let outputFormatField = document.querySelector('#output_format')
let blockNSFWField = document.querySelector('#block_nsfw') let blockNSFWField = document.querySelector('#block_nsfw')
let showOnlyFilteredImageField = document.querySelector("#show_only_filtered_image") let showOnlyFilteredImageField = document.querySelector("#show_only_filtered_image")
@ -931,6 +934,9 @@ function createTask(task) {
taskConfig += `, <b>Hypernetwork:</b> ${task.reqBody.use_hypernetwork_model}` taskConfig += `, <b>Hypernetwork:</b> ${task.reqBody.use_hypernetwork_model}`
taskConfig += `, <b>Hypernetwork Strength:</b> ${task.reqBody.hypernetwork_strength}` taskConfig += `, <b>Hypernetwork Strength:</b> ${task.reqBody.hypernetwork_strength}`
} }
if (task.reqBody.use_lora_model) {
taskConfig += `, <b>LoRA:</b> ${task.reqBody.use_lora_model}`
}
if (task.reqBody.preserve_init_image_color_profile) { if (task.reqBody.preserve_init_image_color_profile) {
taskConfig += `, <b>Preserve Color Profile:</b> true` taskConfig += `, <b>Preserve Color Profile:</b> true`
} }
@ -1041,9 +1047,11 @@ function getCurrentUserRequest() {
height: parseInt(heightField.value), height: parseInt(heightField.value),
// allow_nsfw: allowNSFWField.checked, // allow_nsfw: allowNSFWField.checked,
vram_usage_level: vramUsageLevelField.value, vram_usage_level: vramUsageLevelField.value,
sampler_name: samplerField.value,
//render_device: undefined, // Set device affinity. Prefer this device, but wont activate. //render_device: undefined, // Set device affinity. Prefer this device, but wont activate.
use_stable_diffusion_model: stableDiffusionModelField.value, use_stable_diffusion_model: stableDiffusionModelField.value,
use_vae_model: vaeModelField.value, use_vae_model: vaeModelField.value,
use_lora_model: loraModelField.value,
stream_progress_updates: true, stream_progress_updates: true,
stream_image_progress: (numOutputsTotal > 50 ? false : streamImageProgressField.checked), stream_image_progress: (numOutputsTotal > 50 ? false : streamImageProgressField.checked),
show_only_filtered_image: showOnlyFilteredImageField.checked, show_only_filtered_image: showOnlyFilteredImageField.checked,
@ -1067,9 +1075,9 @@ function getCurrentUserRequest() {
newTask.reqBody.mask = imageInpainter.getImg() newTask.reqBody.mask = imageInpainter.getImg()
} }
newTask.reqBody.preserve_init_image_color_profile = applyColorCorrectionField.checked newTask.reqBody.preserve_init_image_color_profile = applyColorCorrectionField.checked
if (!testDiffusers.checked) {
newTask.reqBody.sampler_name = 'ddim' newTask.reqBody.sampler_name = 'ddim'
} else { }
newTask.reqBody.sampler_name = samplerField.value
} }
if (saveToDiskField.checked && diskPathField.value.trim() !== '') { if (saveToDiskField.checked && diskPathField.value.trim() !== '') {
newTask.reqBody.save_to_disk_path = diskPathField.value.trim() newTask.reqBody.save_to_disk_path = diskPathField.value.trim()
@ -1458,6 +1466,34 @@ function updateHypernetworkStrengthContainer() {
hypernetworkModelField.addEventListener('change', updateHypernetworkStrengthContainer) hypernetworkModelField.addEventListener('change', updateHypernetworkStrengthContainer)
updateHypernetworkStrengthContainer() updateHypernetworkStrengthContainer()
/********************* LoRA alpha **********************/
function updateLoraAlpha() {
loraAlphaField.value = loraAlphaSlider.value / 100
loraAlphaField.dispatchEvent(new Event("change"))
}
function updateLoraAlphaSlider() {
if (loraAlphaField.value < 0) {
loraAlphaField.value = 0
} else if (loraAlphaField.value > 0.99) {
loraAlphaField.value = 0.99
}
loraAlphaSlider.value = loraAlphaField.value * 100
loraAlphaSlider.dispatchEvent(new Event("change"))
}
loraAlphaSlider.addEventListener('input', updateLoraAlpha)
loraAlphaField.addEventListener('input', updateLoraAlphaSlider)
updateLoraAlpha()
// function updateLoraAlphaContainer() {
// document.querySelector("#lora_alpha_container").style.display = (loraModelField.value === "" ? 'none' : '')
// }
// loraModelField.addEventListener('change', updateLoraAlphaContainer)
// updateLoraAlphaContainer()
document.querySelector("#lora_alpha_container").style.display = 'none'
/********************* JPEG/WEBP Quality **********************/ /********************* JPEG/WEBP Quality **********************/
function updateOutputQuality() { function updateOutputQuality() {
outputQualityField.value = 0 | outputQualitySlider.value outputQualityField.value = 0 | outputQualitySlider.value
@ -1550,7 +1586,9 @@ loadImg2ImgFromFile()
function img2imgLoad() { function img2imgLoad() {
promptStrengthContainer.style.display = 'table-row' promptStrengthContainer.style.display = 'table-row'
if (!testDiffusers.checked) {
samplerSelectionContainer.style.display = "none" samplerSelectionContainer.style.display = "none"
}
initImagePreviewContainer.classList.add("has-image") initImagePreviewContainer.classList.add("has-image")
colorCorrectionSetting.style.display = '' colorCorrectionSetting.style.display = ''
@ -1565,7 +1603,9 @@ function img2imgUnload() {
maskSetting.checked = false maskSetting.checked = false
promptStrengthContainer.style.display = "none" promptStrengthContainer.style.display = "none"
if (!testDiffusers.checked) {
samplerSelectionContainer.style.display = "" samplerSelectionContainer.style.display = ""
}
initImagePreviewContainer.classList.remove("has-image") initImagePreviewContainer.classList.remove("has-image")
colorCorrectionSetting.style.display = 'none' colorCorrectionSetting.style.display = 'none'
imageEditor.setImage(null, parseInt(widthField.value), parseInt(heightField.value)) imageEditor.setImage(null, parseInt(widthField.value), parseInt(heightField.value))

View File

@ -190,6 +190,14 @@ var PARAMETERS = [
icon: "fa-fire", icon: "fa-fire",
default: false, default: false,
}, },
{
id: "test_diffusers",
type: ParameterType.checkbox,
label: "Test Diffusers",
note: "<b>Experimental! Can have bugs!</b> Use upcoming features (like LoRA) in our new engine. Please press Save, then restart the program after changing this.",
icon: "fa-bolt",
default: false,
},
]; ];
function getParameterSettingsEntry(id) { function getParameterSettingsEntry(id) {
@ -263,6 +271,7 @@ let listenPortField = document.querySelector("#listen_port")
let useBetaChannelField = document.querySelector("#use_beta_channel") let useBetaChannelField = document.querySelector("#use_beta_channel")
let uiOpenBrowserOnStartField = document.querySelector("#ui_open_browser_on_start") let uiOpenBrowserOnStartField = document.querySelector("#ui_open_browser_on_start")
let confirmDangerousActionsField = document.querySelector("#confirm_dangerous_actions") let confirmDangerousActionsField = document.querySelector("#confirm_dangerous_actions")
let testDiffusers = document.querySelector("#test_diffusers")
let saveSettingsBtn = document.querySelector('#save-system-settings-btn') let saveSettingsBtn = document.querySelector('#save-system-settings-btn')
@ -302,6 +311,10 @@ async function getAppConfig() {
if (config.net && config.net.listen_port !== undefined) { if (config.net && config.net.listen_port !== undefined) {
listenPortField.value = config.net.listen_port listenPortField.value = config.net.listen_port
} }
if (config.test_diffusers !== undefined) {
testDiffusers.checked = config.test_diffusers
document.querySelector("#lora_model_container").style.display = (testDiffusers.checked ? '' : 'none')
}
console.log('get config status response', config) console.log('get config status response', config)
} catch (e) { } catch (e) {
@ -471,7 +484,8 @@ saveSettingsBtn.addEventListener('click', function() {
'update_branch': updateBranch, 'update_branch': updateBranch,
'ui_open_browser_on_start': uiOpenBrowserOnStartField.checked, 'ui_open_browser_on_start': uiOpenBrowserOnStartField.checked,
'listen_to_network': listenToNetworkField.checked, 'listen_to_network': listenToNetworkField.checked,
'listen_port': listenPortField.value 'listen_port': listenPortField.value,
'test_diffusers': testDiffusers.checked
}) })
saveSettingsBtn.classList.add('active') saveSettingsBtn.classList.add('active')
asyncDelay(300).then(() => saveSettingsBtn.classList.remove('active')) asyncDelay(300).then(() => saveSettingsBtn.classList.remove('active'))