Merge branch 'beta' into bucketlite

This commit is contained in:
JeLuF 2023-08-03 20:31:08 +02:00
commit c1bb7dc119
14 changed files with 324 additions and 114 deletions

View File

@ -5,10 +5,10 @@ If you haven't downloaded Stable Diffusion UI yet, please download from https://
After downloading, to install please follow these instructions:
For Windows:
- Please double-click the "Start Stable Diffusion UI.cmd" file inside the "stable-diffusion-ui" folder.
- Please double-click the "Easy-Diffusion-Windows.exe" file and follow the instructions.
For Linux:
- Please open a terminal, and go to the "stable-diffusion-ui" directory. Then run ./start.sh
- Please open a terminal, unzip the Easy-Diffusion-Linux.zip file and go to the "easy-diffusion" directory. Then run ./start.sh
That file will automatically install everything. After that it will start the Stable Diffusion interface in a web browser.
@ -21,4 +21,4 @@ If you have any problems, please:
3. Or, file an issue at https://github.com/easydiffusion/easydiffusion/issues
Thanks
cmdr2 (and contributors to the project)
cmdr2 (and contributors to the project)

View File

@ -11,9 +11,9 @@ Does not require technical knowledge, does not require pre-installed software. 1
Click the download button for your operating system:
<p float="left">
<a href="https://github.com/easydiffusion/easydiffusion/releases/download/v2.5.24/Easy-Diffusion-Windows.exe"><img src="https://github.com/easydiffusion/easydiffusion/raw/main/media/download-win.png" width="200" /></a>
<a href="https://github.com/easydiffusion/easydiffusion/releases/download/v2.5.24/Easy-Diffusion-Linux.zip"><img src="https://github.com/easydiffusion/easydiffusion/raw/main/media/download-linux.png" width="200" /></a>
<a href="https://github.com/easydiffusion/easydiffusion/releases/download/v2.5.24/Easy-Diffusion-Mac.zip"><img src="https://github.com/easydiffusion/easydiffusion/raw/main/media/download-mac.png" width="200" /></a>
<a href="https://github.com/cmdr2/stable-diffusion-ui/releases/download/v2.5.41a/Easy-Diffusion-Windows.exe"><img src="https://github.com/cmdr2/stable-diffusion-ui/raw/main/media/download-win.png" width="200" /></a>
<a href="https://github.com/cmdr2/stable-diffusion-ui/releases/download/v2.5.41a/Easy-Diffusion-Linux.zip"><img src="https://github.com/cmdr2/stable-diffusion-ui/raw/main/media/download-linux.png" width="200" /></a>
<a href="https://github.com/cmdr2/stable-diffusion-ui/releases/download/v2.5.41a/Easy-Diffusion-Mac.zip"><img src="https://github.com/cmdr2/stable-diffusion-ui/raw/main/media/download-mac.png" width="200" /></a>
</p>
**Hardware requirements:**
@ -23,6 +23,7 @@ Click the download button for your operating system:
- Minimum 8 GB of system RAM.
- Atleast 25 GB of space on the hard disk.
The installer will take care of whatever is needed. If you face any problems, you can join the friendly [Discord community](https://discord.com/invite/u9yhsFmEkB) and ask for assistance.
## On Windows:
@ -132,6 +133,15 @@ We could really use help on these aspects (click to view tasks that need your he
If you have any code contributions in mind, please feel free to say Hi to us on the [discord server](https://discord.com/invite/u9yhsFmEkB). We use the Discord server for development-related discussions, and for helping users.
# Credits
* Stable Diffusion: https://github.com/Stability-AI/stablediffusion
* CodeFormer: https://github.com/sczhou/CodeFormer (license: https://github.com/sczhou/CodeFormer/blob/master/LICENSE)
* GFPGAN: https://github.com/TencentARC/GFPGAN
* RealESRGAN: https://github.com/xinntao/Real-ESRGAN
* k-diffusion: https://github.com/crowsonkb/k-diffusion
* Code contributors and artists on the cmdr2 UI: https://github.com/cmdr2/stable-diffusion-ui and Discord (https://discord.com/invite/u9yhsFmEkB)
* Lots of contributors on the internet
# Disclaimer
The authors of this project are not responsible for any content generated using this interface.

View File

@ -18,7 +18,7 @@ os_name = platform.system()
modules_to_check = {
"torch": ("1.11.0", "1.13.1", "2.0.0"),
"torchvision": ("0.12.0", "0.14.1", "0.15.1"),
"sdkit": "1.0.156",
"sdkit": "1.0.165",
"stable-diffusion-sdkit": "2.1.4",
"rich": "12.6.0",
"uvicorn": "0.19.0",

View File

@ -148,7 +148,7 @@ def reload_models_if_necessary(context: Context, models_data: ModelsData, models
models_to_reload = {
model_type: path
for model_type, path in models_data.model_paths.items()
if context.model_paths.get(model_type) != path
if context.model_paths.get(model_type) != path or (path is not None and context.models.get(model_type) is None)
}
if models_data.model_paths.get("codeformer"):

View File

@ -15,6 +15,7 @@ from sdkit.utils import (
img_to_base64_str,
img_to_buffer,
latent_samples_to_images,
log,
)
from .task import Task
@ -93,15 +94,27 @@ class RenderTask(Task):
return model["params"].get(param_name) != new_val
def trt_needs_reload(self, context):
if not self.has_param_changed(context, "convert_to_tensorrt"):
if not context.test_diffusers:
return False
if "stable-diffusion" not in context.models or "params" not in context.models["stable-diffusion"]:
return True
model = context.models["stable-diffusion"]
pipe = model["default"]
if hasattr(pipe.unet, "_allocate_trt_buffers"): # TRT already loaded
return False
return True
# curr_convert_to_trt = model["params"].get("convert_to_tensorrt")
new_convert_to_trt = self.models_data.model_params.get("stable-diffusion", {}).get("convert_to_tensorrt", False)
pipe = model["default"]
is_trt_loaded = hasattr(pipe.unet, "_allocate_trt_buffers") or hasattr(
pipe.unet, "_allocate_trt_buffers_backup"
)
if new_convert_to_trt and not is_trt_loaded:
return True
curr_build_config = model["params"].get("trt_build_config")
new_build_config = self.models_data.model_params.get("stable-diffusion", {}).get("trt_build_config", {})
return new_convert_to_trt and curr_build_config != new_build_config
def make_images(
@ -210,17 +223,29 @@ def generate_images_internal(
if req.init_image is not None and not context.test_diffusers:
req.sampler_name = "ddim"
req.width, req.height = map(lambda x: x - x % 8, (req.width, req.height)) # clamp to 8
if req.control_image and task_data.control_filter_to_apply:
req.control_image = filter_images(context, req.control_image, task_data.control_filter_to_apply)[0]
if context.test_diffusers:
pipe = context.models["stable-diffusion"]["default"]
if hasattr(pipe.unet, "_allocate_trt_buffers_backup"):
setattr(pipe.unet, "_allocate_trt_buffers", pipe.unet._allocate_trt_buffers_backup)
delattr(pipe.unet, "_allocate_trt_buffers_backup")
if hasattr(pipe.unet, "_allocate_trt_buffers"):
convert_to_trt = models_data.model_params["stable-diffusion"].get("convert_to_tensorrt", False)
pipe.unet.forward = pipe.unet._trt_forward if convert_to_trt else pipe.unet._non_trt_forward
# pipe.vae.decoder.forward = (
# pipe.vae.decoder._trt_forward if convert_to_trt else pipe.vae.decoder._non_trt_forward
# )
if convert_to_trt:
pipe.unet.forward = pipe.unet._trt_forward
# pipe.vae.decoder.forward = pipe.vae.decoder._trt_forward
log.info(f"Setting unet.forward to TensorRT")
else:
log.info(f"Not using TensorRT for unet.forward")
pipe.unet.forward = pipe.unet._non_trt_forward
# pipe.vae.decoder.forward = pipe.vae.decoder._non_trt_forward
setattr(pipe.unet, "_allocate_trt_buffers_backup", pipe.unet._allocate_trt_buffers)
delattr(pipe.unet, "_allocate_trt_buffers")
images = generate_images(context, callback=callback, **req.dict())
user_stopped = False

View File

@ -226,6 +226,9 @@ def convert_legacy_render_req_to_new(old_req: dict):
model_params["stable-diffusion"] = {
"clip_skip": bool(old_req.get("clip_skip", False)),
"convert_to_tensorrt": bool(old_req.get("convert_to_tensorrt", False)),
"trt_build_config": old_req.get(
"trt_build_config", {"batch_size_range": (1, 1), "dimensions_range": [(768, 1024)]}
),
}
# move the filter params

View File

@ -21,6 +21,8 @@ TASK_TEXT_MAPPING = {
"seed": "Seed",
"use_stable_diffusion_model": "Stable Diffusion model",
"clip_skip": "Clip Skip",
"use_controlnet_model": "ControlNet model",
"control_filter_to_apply": "ControlNet Filter",
"use_vae_model": "VAE model",
"sampler_name": "Sampler",
"width": "Width",
@ -260,10 +262,12 @@ def get_printable_request(req: GenerateImageRequest, task_data: TaskData, output
del metadata["lora_alpha"]
if task_data.use_upscale != "latent_upscaler" and "latent_upscaler_steps" in metadata:
del metadata["latent_upscaler_steps"]
if task_data.use_controlnet_model is None and "control_filter_to_apply" in metadata:
del metadata["control_filter_to_apply"]
if not using_diffusers:
for key in (
x for x in ["use_lora_model", "lora_alpha", "clip_skip", "tiling", "latent_upscaler_steps"] if x in metadata
x for x in ["use_lora_model", "lora_alpha", "clip_skip", "tiling", "latent_upscaler_steps", "use_controlnet_model", "control_filter_to_apply"] if x in metadata
):
del metadata[key]

View File

@ -163,60 +163,63 @@
<a href="https://github.com/easydiffusion/easydiffusion/wiki/Clip-Skip" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Click to learn more about Clip Skip</span></i></a>
</td>
</tr>
<tr id="controlnet_model_container" class="pl-5"><td><label for="controlnet_model">ControlNet Image:</label></td><td>
<div id="control_image_wrapper" class="preview_image_wrapper">
<img id="control_image_preview" class="image_preview" src="" crossorigin="anonymous" />
<span id="control_image_size_box" class="img_bottom_label"></span>
<button class="control_image_clear image_clear_btn"><i class="fa-solid fa-xmark"></i></button>
</div>
<input id="control_image" name="control_image" type="file" />
<a href="https://github.com/easydiffusion/easydiffusion/wiki/ControlNet" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Click to learn more about ControlNets</span></i></a>
<div id="controlnet_config" class="displayNone">
<label><small>Filter to apply:</small></label>
<select id="control_image_filter">
<option value="">None</option>
<optgroup label="Pose">
<option value="openpose">OpenPose (*)</option>
<option value="openpose_face">OpenPose face</option>
<option value="openpose_faceonly">OpenPose face-only</option>
<option value="openpose_hand">OpenPose hand</option>
<option value="openpose_full">OpenPose full</option>
</optgroup>
<optgroup label="Outline">
<option value="canny">Canny (*)</option>
<option value="mlsd">Straight lines</option>
<option value="scribble_hed">Scribble hed (*)</option>
<option value="scribble_hedsafe">Scribble hedsafe</option>
<option value="scribble_pidinet">Scribble pidinet</option>
<option value="scribble_pidsafe">Scribble pidsafe</option>
<option value="softedge_hed">Softedge hed</option>
<option value="softedge_hedsafe">Softedge hedsafe</option>
<option value="softedge_pidinet">Softedge pidinet</option>
<option value="softedge_pidsafe">Softedge pidsafe</option>
</optgroup>
<optgroup label="Depth">
<option value="normal_bae">Normal bae (*)</option>
<option value="depth_midas">Depth midas</option>
<option value="depth_zoe">Depth zoe</option>
<option value="depth_leres">Depth leres</option>
<option value="depth_leres++">Depth leres++</option>
</optgroup>
<optgroup label="Line art">
<option value="lineart_coarse">Lineart coarse</option>
<option value="lineart_realistic">Lineart realistic</option>
<option value="lineart_anime">Lineart anime</option>
</optgroup>
<optgroup label="Misc">
<option value="shuffle">Shuffle</option>
<option value="segment">Segment</option>
</optgroup>
</select>
<br/>
<label for="controlnet_model"><small>Model:</small></label> <input id="controlnet_model" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
<br/>
<label><small>Will download the necessary models, the first time.</small></label>
</div>
</td></tr>
<tr id="controlnet_model_container" class="pl-5">
<td><label for="controlnet_model">ControlNet Image:</label></td>
<td class="diffusers-restart-needed">
<div id="control_image_wrapper" class="preview_image_wrapper">
<img id="control_image_preview" class="image_preview" src="" crossorigin="anonymous" />
<span id="control_image_size_box" class="img_bottom_label"></span>
<button class="control_image_clear image_clear_btn"><i class="fa-solid fa-xmark"></i></button>
</div>
<input id="control_image" name="control_image" type="file" />
<a href="https://github.com/easydiffusion/easydiffusion/wiki/ControlNet" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Click to learn more about ControlNets</span></i></a>
<div id="controlnet_config" class="displayNone">
<label><small>Filter to apply:</small></label>
<select id="control_image_filter">
<option value="">None</option>
<optgroup label="Pose">
<option value="openpose">OpenPose (*)</option>
<option value="openpose_face">OpenPose face</option>
<option value="openpose_faceonly">OpenPose face-only</option>
<option value="openpose_hand">OpenPose hand</option>
<option value="openpose_full">OpenPose full</option>
</optgroup>
<optgroup label="Outline">
<option value="canny">Canny (*)</option>
<option value="mlsd">Straight lines</option>
<option value="scribble_hed">Scribble hed (*)</option>
<option value="scribble_hedsafe">Scribble hedsafe</option>
<option value="scribble_pidinet">Scribble pidinet</option>
<option value="scribble_pidsafe">Scribble pidsafe</option>
<option value="softedge_hed">Softedge hed</option>
<option value="softedge_hedsafe">Softedge hedsafe</option>
<option value="softedge_pidinet">Softedge pidinet</option>
<option value="softedge_pidsafe">Softedge pidsafe</option>
</optgroup>
<optgroup label="Depth">
<option value="normal_bae">Normal bae (*)</option>
<option value="depth_midas">Depth midas</option>
<option value="depth_zoe">Depth zoe</option>
<option value="depth_leres">Depth leres</option>
<option value="depth_leres++">Depth leres++</option>
</optgroup>
<optgroup label="Line art">
<option value="lineart_coarse">Lineart coarse</option>
<option value="lineart_realistic">Lineart realistic</option>
<option value="lineart_anime">Lineart anime</option>
</optgroup>
<optgroup label="Misc">
<option value="shuffle">Shuffle</option>
<option value="segment">Segment</option>
</optgroup>
</select>
<br/>
<label for="controlnet_model"><small>Model:</small></label> <input id="controlnet_model" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
<br/>
<label><small>Will download the necessary models, the first time.</small></label>
</div>
</td>
</tr>
<tr class="pl-5"><td><label for="vae_model">Custom VAE:</label></td><td>
<input id="vae_model" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
<a href="https://github.com/easydiffusion/easydiffusion/wiki/VAE-Variational-Auto-Encoder" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Click to learn more about VAEs</span></i></a>
@ -323,7 +326,7 @@
<button class="add_model_entry"><i class="fa-solid fa-plus"></i> add another LoRA</button>
</td>
</tr>
<tr class="pl-5"><td><label for="hypernetwork_model">Hypernetwork:</label></td><td>
<tr id="hypernetwork_model_container" class="pl-5"><td><label for="hypernetwork_model">Hypernetwork:</label></td><td>
<input id="hypernetwork_model" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
</td></tr>
<tr id="hypernetwork_strength_container" class="pl-5">

View File

@ -1852,4 +1852,9 @@ div#enlarge-buttons {
}
#controlnet_model {
width: 77%;
}
/* hack for fixing Image Modifier Improvements plugin */
#imageTagPopupContainer {
position: absolute;
}

View File

@ -55,6 +55,7 @@ const SETTINGS_IDS_LIST = [
"zip_toggle",
"tree_toggle",
"json_toggle",
"extract_lora_from_prompt",
]
const IGNORE_BY_DEFAULT = ["prompt"]

View File

@ -186,6 +186,8 @@ let undoBuffer = []
const UNDO_LIMIT = 20
const MAX_IMG_UNDO_ENTRIES = 5
let IMAGE_STEP_SIZE = 64
let loraModels = []
imagePreview.addEventListener("drop", function(ev) {
@ -1453,15 +1455,21 @@ function getCurrentUserRequest() {
let numOutputsParallel = parseInt(numOutputsParallelField.value)
const seed = randomSeedField.checked ? Math.floor(Math.random() * (2 ** 32 - 1)) : parseInt(seedField.value)
if (
testDiffusers.checked &&
document.getElementById("toggle-tensorrt-install").innerHTML == "Uninstall" &&
document.querySelector("#convert_to_tensorrt").checked
) {
// TRT enabled
// if (
// testDiffusers.checked &&
// document.getElementById("toggle-tensorrt-install").innerHTML == "Uninstall" &&
// document.querySelector("#convert_to_tensorrt").checked
// ) {
// // TRT enabled
numOutputsParallel = 1 // force 1 parallel
}
// numOutputsParallel = 1 // force 1 parallel
// }
// clamp to multiple of 8
let width = parseInt(widthField.value)
let height = parseInt(heightField.value)
width = width - (width % IMAGE_STEP_SIZE)
height = height - (height % IMAGE_STEP_SIZE)
const newTask = {
batchesDone: 0,
@ -1475,8 +1483,8 @@ function getCurrentUserRequest() {
num_outputs: numOutputsParallel,
num_inference_steps: parseInt(numInferenceStepsField.value),
guidance_scale: parseFloat(guidanceScaleField.value),
width: parseInt(widthField.value),
height: parseInt(heightField.value),
width: width,
height: height,
// allow_nsfw: allowNSFWField.checked,
vram_usage_level: vramUsageLevelField.value,
sampler_name: samplerField.value,
@ -1550,6 +1558,22 @@ function getCurrentUserRequest() {
if (testDiffusers.checked && document.getElementById("toggle-tensorrt-install").innerHTML == "Uninstall") {
// TRT is installed
newTask.reqBody.convert_to_tensorrt = document.querySelector("#convert_to_tensorrt").checked
let trtBuildConfig = {
batch_size_range: [
parseInt(document.querySelector("#trt-build-min-batch").value),
parseInt(document.querySelector("#trt-build-max-batch").value),
],
dimensions_range: [],
}
let sizes = [512, 768, 1024, 1280, 1536]
sizes.forEach((i) => {
let el = document.querySelector("#trt-build-res-" + i)
if (el.checked) {
trtBuildConfig["dimensions_range"].push([i, i + 256])
}
})
newTask.reqBody.trt_build_config = trtBuildConfig
}
if (controlnetModelField.value !== "" && IMAGE_REGEX.test(controlImagePreview.src)) {
newTask.reqBody.use_controlnet_model = controlnetModelField.value
@ -2238,6 +2262,7 @@ function checkRandomSeed() {
randomSeedField.addEventListener("input", checkRandomSeed)
checkRandomSeed()
// warning: the core plugin `image-editor-improvements.js:172` replaces loadImg2ImgFromFile() with a custom version
function loadImg2ImgFromFile() {
if (initImageSelector.files.length === 0) {
return
@ -2320,6 +2345,9 @@ controlImageSelector.addEventListener("change", loadControlnetImageFromFile)
function controlImageLoad() {
let w = controlImagePreview.naturalWidth
let h = controlImagePreview.naturalHeight
w = w - (w % IMAGE_STEP_SIZE)
h = h - (h % IMAGE_STEP_SIZE)
addImageSizeOption(w)
addImageSizeOption(h)
@ -2481,6 +2509,7 @@ function packagesUpdate(event) {
if (document.getElementById("toggle-tensorrt-install").innerHTML == "Uninstall") {
document.querySelector("#enable_trt_config").classList.remove("displayNone")
document.querySelector("#trt-build-config").classList.remove("displayNone")
if (!trtSettingsForced) {
// settings for demo
@ -2492,8 +2521,8 @@ function packagesUpdate(event) {
seedField.disabled = false
stableDiffusionModelField.value = "sd-v1-4"
numOutputsParallelField.classList.add("displayNone")
document.querySelector("#num_outputs_parallel_label").classList.add("displayNone")
// numOutputsParallelField.classList.add("displayNone")
// document.querySelector("#num_outputs_parallel_label").classList.add("displayNone")
trtSettingsForced = true
}

View File

@ -121,6 +121,15 @@ var PARAMETERS = [
icon: "fa-arrow-down-short-wide",
default: false,
},
{
id: "extract_lora_from_prompt",
type: ParameterType.checkbox,
label: "Extract LoRA tags from the prompt",
note:
"Automatically extract lora tags like &lt;lora:name:0.4&gt; from the prompt, and apply the correct LoRA (if present)",
icon: "fa-code",
default: true,
},
{
id: "ui_open_browser_on_start",
type: ParameterType.checkbox,
@ -258,7 +267,19 @@ var PARAMETERS = [
label: "NVIDIA TensorRT",
note: `Faster image generation by converting your Stable Diffusion models to the NVIDIA TensorRT format. You can choose the
models to convert. Download size: approximately 2 GB.<br/><br/>
<b>Early access version:</b> support for LoRA is still under development.`,
<b>Early access version:</b> support for LoRA is still under development.
<div id="trt-build-config" class="displayNone">
<h3>Build Config:</h3>
Batch size range:
<label>Min:</label> <input id="trt-build-min-batch" type="number" min="1" value="1" style="width: 40pt" />
<label>Max:</label> <input id="trt-build-max-batch" type="number" min="1" value="1" style="width: 40pt" /><br/><br/>
<b>Build for resolutions</b>:<br/>
<input id="trt-build-res-512" type="checkbox" value="1" /> 512x512 to 768x768<br/>
<input id="trt-build-res-768" type="checkbox" value="1" checked /> 768x768 to 1024x1024<br/>
<input id="trt-build-res-1024" type="checkbox" value="1" /> 1024x1024 to 1280x1280<br/>
<input id="trt-build-res-1280" type="checkbox" value="1" /> 1280x1280 to 1536x1536<br/>
<input id="trt-build-res-1536" type="checkbox" value="1" /> 1536x1536 to 1792x1792<br/>
</div>`,
icon: "fa-angles-up",
render: () => '<button id="toggle-tensorrt-install" class="primaryButton">Install</button>',
table: installExtrasTable,
@ -460,15 +481,22 @@ async function getAppConfig() {
if (!testDiffusersEnabled) {
document.querySelector("#lora_model_container").style.display = "none"
document.querySelector("#tiling_container").style.display = "none"
document.querySelector("#controlnet_model_container").style.display = "none"
document.querySelector("#hypernetwork_model_container").style.display = ""
document.querySelector("#hypernetwork_strength_container").style.display = ""
document.querySelectorAll("#sampler_name option.diffusers-only").forEach((option) => {
option.style.display = "none"
})
customWidthField.step=64
customHeightField.step=64
IMAGE_STEP_SIZE = 64
customWidthField.step = IMAGE_STEP_SIZE
customHeightField.step = IMAGE_STEP_SIZE
} else {
document.querySelector("#lora_model_container").style.display = ""
document.querySelector("#tiling_container").style.display = ""
document.querySelector("#controlnet_model_container").style.display = ""
document.querySelector("#hypernetwork_model_container").style.display = "none"
document.querySelector("#hypernetwork_strength_container").style.display = "none"
document.querySelectorAll("#sampler_name option.k_diffusion-only").forEach((option) => {
option.style.display = "none"
@ -476,8 +504,9 @@ async function getAppConfig() {
document.querySelector("#clip_skip_config").classList.remove("displayNone")
document.querySelector("#embeddings-button").classList.remove("displayNone")
document.querySelector("#negative-embeddings-button").classList.remove("displayNone")
customWidthField.step=8
customHeightField.step=8
IMAGE_STEP_SIZE = 8
customWidthField.step = IMAGE_STEP_SIZE
customHeightField.step = IMAGE_STEP_SIZE
}
console.log("get config status response", config)

View File

@ -124,35 +124,17 @@
// Draw the image with centered coordinates
context.drawImage(imageObj, x, y, this.width, this.height);
initImagePreview.src = canvas.toDataURL('image/png');
let bestWidth = maxCroppedWidth - maxCroppedWidth % IMAGE_STEP_SIZE
let bestHeight = maxCroppedHeight - maxCroppedHeight % IMAGE_STEP_SIZE
// Get the options from widthField and heightField
const widthOptions = Array.from(widthField.options).map(option => parseInt(option.value));
const heightOptions = Array.from(heightField.options).map(option => parseInt(option.value));
// Find the closest aspect ratio and closest to original dimensions
let bestWidth = widthOptions[0];
let bestHeight = heightOptions[0];
let minDifference = Math.abs(maxCroppedWidth / maxCroppedHeight - bestWidth / bestHeight);
let minDistance = Math.abs(maxCroppedWidth - bestWidth) + Math.abs(maxCroppedHeight - bestHeight);
for (const width of widthOptions) {
for (const height of heightOptions) {
const difference = Math.abs(maxCroppedWidth / maxCroppedHeight - width / height);
const distance = Math.abs(maxCroppedWidth - width) + Math.abs(maxCroppedHeight - height);
if (difference < minDifference || (difference === minDifference && distance < minDistance)) {
minDifference = difference;
minDistance = distance;
bestWidth = width;
bestHeight = height;
}
}
}
addImageSizeOption(bestWidth)
addImageSizeOption(bestHeight)
// Set the width and height to the closest aspect ratio and closest to original dimensions
widthField.value = bestWidth;
heightField.value = bestHeight;
initImagePreview.src = canvas.toDataURL('image/png');
};
function handlePaste(e) {

View File

@ -0,0 +1,119 @@
/*
LoRA Prompt Parser 1.0
by Patrice
Copying and pasting a prompt with a LoRA tag will automatically select the corresponding option in the Easy Diffusion dropdown and remove the LoRA tag from the prompt. The LoRA must be already available in the corresponding Easy Diffusion dropdown (this is not a LoRA downloader).
*/
(function() {
"use strict"
promptField.addEventListener('input', function(e) {
let loraExtractSetting = document.getElementById("extract_lora_from_prompt")
if (!loraExtractSetting.checked) {
return
}
const { LoRA, prompt } = extractLoraTags(e.target.value);
//console.log('e.target: ' + JSON.stringify(LoRA));
if (LoRA !== null && LoRA.length > 0) {
promptField.value = prompt.replace(/,+$/, ''); // remove any trailing ,
if (testDiffusers?.checked === false) {
showToast("LoRA's are only supported with diffusers. Just stripping the LoRA tag from the prompt.")
}
}
if (LoRA !== null && LoRA.length > 0 && testDiffusers?.checked) {
for (let i = 0; i < LoRA.length; i++) {
//if (loraModelField.value !== LoRA[0].lora_model) {
// Set the new LoRA value
//console.log("Loading info");
//console.log(LoRA[0].lora_model_0);
//console.log(JSON.stringify(LoRa));
let lora = `lora_model_${i}`;
let alpha = `lora_alpha_${i}`;
let loramodel = document.getElementById(lora);
let alphavalue = document.getElementById(alpha);
loramodel.setAttribute("data-path", LoRA[i].lora_model_0);
loramodel.value = LoRA[i].lora_model_0;
alphavalue.value = LoRA[i].lora_alpha_0;
if (i != LoRA.length - 1)
createLoraEntry();
}
//loraAlphaSlider.value = loraAlphaField.value * 100;
//TBD.value = LoRA[0].blockweights; // block weights not supported by ED at this time
//}
showToast("Prompt successfully processed", LoRA[0].lora_model_0);
//console.log('LoRa: ' + LoRA[0].lora_model_0);
//showToast("Prompt successfully processed", lora_model_0.value);
}
//promptField.dispatchEvent(new Event('change'));
});
function isModelAvailable(array, searchString) {
const foundItem = array.find(function(item) {
item = item.toString().toLowerCase();
return item === searchString.toLowerCase()
});
return foundItem || "";
}
// extract LoRA tags from strings
function extractLoraTags(prompt) {
// Define the regular expression for the tags
const regex = /<(?:lora|lyco):([^:>]+)(?::([^:>]*))?(?::([^:>]*))?>/gi
// Initialize an array to hold the matches
let matches = []
// Iterate over the string, finding matches
for (const match of prompt.matchAll(regex)) {
const modelFileName = isModelAvailable(modelsCache.options.lora, match[1].trim())
if (modelFileName !== "") {
// Initialize an object to hold a match
let loraTag = {
lora_model_0: modelFileName,
}
//console.log("Model:" + modelFileName);
// If weight is provided, add it to the loraTag object
if (match[2] !== undefined && match[2] !== '') {
loraTag.lora_alpha_0 = parseFloat(match[2].trim())
}
else
{
loraTag.lora_alpha_0 = 0.5
}
// If blockweights are provided, add them to the loraTag object
if (match[3] !== undefined && match[3] !== '') {
loraTag.blockweights = match[3].trim()
}
// Add the loraTag object to the array of matches
matches.push(loraTag);
//console.log(JSON.stringify(matches));
}
else
{
showToast("LoRA not found: " + match[1].trim(), 5000, true)
}
}
// Clean up the prompt string, e.g. from "apple, banana, <lora:...>, orange, <lora:...> , pear <lora:...>, <lora:...>" to "apple, banana, orange, pear"
let cleanedPrompt = prompt.replace(regex, '').replace(/(\s*,\s*(?=\s*,|$))|(^\s*,\s*)|\s+/g, ' ').trim();
//console.log('Matches: ' + JSON.stringify(matches));
// Return the array of matches and cleaned prompt string
return {
LoRA: matches,
prompt: cleanedPrompt
}
}
})()