mirror of
https://github.com/easydiffusion/easydiffusion.git
synced 2025-03-27 07:40:52 +01:00
UI changes for multiple LoRA files
This commit is contained in:
parent
92ffbb5ed8
commit
f5b8044bad
@ -225,18 +225,14 @@
|
||||
<label for="height"><small>(height)</small></label>
|
||||
<div id="small_image_warning" class="displayNone">Small image sizes can cause bad image quality</div>
|
||||
</td></tr>
|
||||
<tr class="pl-5"><td><label for="num_inference_steps">Inference Steps:</label></td><td> <input id="num_inference_steps" name="num_inference_steps" size="4" value="25" onkeypress="preventNonNumericalInput(event)"></td></tr>
|
||||
<tr class="pl-5"><td><label for="num_inference_steps">Inference Steps:</label></td><td> <input id="num_inference_steps" name="num_inference_steps" type="number" min="1" step="1" style="width: 42pt" value="25" onkeypress="preventNonNumericalInput(event)"></td></tr>
|
||||
<tr class="pl-5"><td><label for="guidance_scale_slider">Guidance Scale:</label></td><td> <input id="guidance_scale_slider" name="guidance_scale_slider" class="editor-slider" value="75" type="range" min="11" max="500"> <input id="guidance_scale" name="guidance_scale" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"></td></tr>
|
||||
<tr id="prompt_strength_container" class="pl-5"><td><label for="prompt_strength_slider">Prompt Strength:</label></td><td> <input id="prompt_strength_slider" name="prompt_strength_slider" class="editor-slider" value="80" type="range" min="0" max="99"> <input id="prompt_strength" name="prompt_strength" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"><br/></td></tr>
|
||||
<tr id="lora_model_container" class="pl-5"><td><label for="lora_model">LoRA:</label></td><td>
|
||||
<input id="lora_model" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
|
||||
</td></tr>
|
||||
<tr id="lora_alpha_container" class="pl-5">
|
||||
<td><label for="lora_alpha_slider">LoRA Strength:</label></td>
|
||||
<tr id="lora_model_container" class="pl-5">
|
||||
<td>
|
||||
<small>-2</small> <input id="lora_alpha_slider" name="lora_alpha_slider" class="editor-slider" value="50" type="range" min="-200" max="200"> <small>2</small>
|
||||
<input id="lora_alpha" name="lora_alpha" size="4" pattern="^-?[0-9]*\.?[0-9]*$" onkeypress="preventNonNumericalInput(event)"><br/>
|
||||
<label for="lora_model">LoRA:</label>
|
||||
</td>
|
||||
<td class="model_entries"></td>
|
||||
</tr>
|
||||
<tr class="pl-5"><td><label for="hypernetwork_model">Hypernetwork:</label></td><td>
|
||||
<input id="hypernetwork_model" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
|
||||
|
@ -1678,3 +1678,7 @@ body.wait-pause {
|
||||
#embeddings-list::-webkit-scrollbar-thumb {
|
||||
background: var(--background-color3);
|
||||
}
|
||||
|
||||
.model_entry .model_name {
|
||||
width: 70%;
|
||||
}
|
@ -16,7 +16,9 @@ const SETTINGS_IDS_LIST = [
|
||||
"clip_skip",
|
||||
"vae_model",
|
||||
"hypernetwork_model",
|
||||
"lora_model",
|
||||
"lora_model_0",
|
||||
"lora_model_1",
|
||||
"lora_model_2",
|
||||
"sampler_name",
|
||||
"width",
|
||||
"height",
|
||||
@ -24,7 +26,9 @@ const SETTINGS_IDS_LIST = [
|
||||
"guidance_scale",
|
||||
"prompt_strength",
|
||||
"hypernetwork_strength",
|
||||
"lora_alpha",
|
||||
"lora_alpha_0",
|
||||
"lora_alpha_1",
|
||||
"lora_alpha_2",
|
||||
"tiling",
|
||||
"output_format",
|
||||
"output_quality",
|
||||
@ -176,13 +180,14 @@ function loadSettings() {
|
||||
// So this is likely the first time Easy Diffusion is running.
|
||||
// Initialize vram_usage_level based on the available VRAM
|
||||
function initGPUProfile(event) {
|
||||
if ( "detail" in event
|
||||
&& "active" in event.detail
|
||||
&& "cuda:0" in event.detail.active
|
||||
&& event.detail.active["cuda:0"].mem_total <4.5 )
|
||||
{
|
||||
vramUsageLevelField.value = "low"
|
||||
vramUsageLevelField.dispatchEvent(new Event("change"))
|
||||
if (
|
||||
"detail" in event &&
|
||||
"active" in event.detail &&
|
||||
"cuda:0" in event.detail.active &&
|
||||
event.detail.active["cuda:0"].mem_total < 4.5
|
||||
) {
|
||||
vramUsageLevelField.value = "low"
|
||||
vramUsageLevelField.dispatchEvent(new Event("change"))
|
||||
}
|
||||
document.removeEventListener("system_info_update", initGPUProfile)
|
||||
}
|
||||
|
@ -292,29 +292,58 @@ const TASK_MAPPING = {
|
||||
use_lora_model: {
|
||||
name: "LoRA model",
|
||||
setUI: (use_lora_model) => {
|
||||
const oldVal = loraModelField.value
|
||||
use_lora_model =
|
||||
use_lora_model === undefined || use_lora_model === null || use_lora_model === "None"
|
||||
? ""
|
||||
: use_lora_model
|
||||
use_lora_model.forEach((model_name, i) => {
|
||||
let field = loraModels[i][0]
|
||||
const oldVal = field.value
|
||||
|
||||
if (use_lora_model !== "") {
|
||||
use_lora_model = getModelPath(use_lora_model, [".ckpt", ".safetensors"])
|
||||
use_lora_model = use_lora_model !== "" ? use_lora_model : oldVal
|
||||
if (model_name !== "") {
|
||||
model_name = getModelPath(model_name, [".ckpt", ".safetensors"])
|
||||
model_name = model_name !== "" ? model_name : oldVal
|
||||
}
|
||||
field.value = model_name
|
||||
})
|
||||
|
||||
// clear the remaining entries
|
||||
for (let i = use_lora_model.length; i < loraModels.length; i++) {
|
||||
loraModels[i][0].value = ""
|
||||
}
|
||||
loraModelField.value = use_lora_model
|
||||
},
|
||||
readUI: () => loraModelField.value,
|
||||
parse: (val) => val,
|
||||
readUI: () => {
|
||||
let values = loraModels.map((e) => e[0].value)
|
||||
values = values.filter((e) => e.trim() !== "")
|
||||
values = values.length > 0 ? values : "None"
|
||||
return values
|
||||
},
|
||||
parse: (val) => {
|
||||
val = !val || val === "None" ? "" : val
|
||||
val = Array.isArray(val) ? val : [val]
|
||||
return val
|
||||
},
|
||||
},
|
||||
lora_alpha: {
|
||||
name: "LoRA Strength",
|
||||
setUI: (lora_alpha) => {
|
||||
loraAlphaField.value = lora_alpha
|
||||
updateLoraAlphaSlider()
|
||||
lora_alpha.forEach((model_strength, i) => {
|
||||
let field = loraModels[i][1]
|
||||
field.value = model_strength
|
||||
})
|
||||
|
||||
// clear the remaining entries
|
||||
for (let i = lora_alpha.length; i < loraModels.length; i++) {
|
||||
loraModels[i][1].value = 0
|
||||
}
|
||||
},
|
||||
readUI: () => {
|
||||
let models = loraModels.filter((e) => e[0].value.trim() !== "")
|
||||
let values = models.map((e) => e[1].value)
|
||||
values = values.length > 0 ? values : 0
|
||||
return values
|
||||
},
|
||||
parse: (val) => {
|
||||
val = Array.isArray(val) ? val : [val]
|
||||
val = val.map((e) => parseFloat(e))
|
||||
return val
|
||||
},
|
||||
readUI: () => parseFloat(loraAlphaField.value),
|
||||
parse: (val) => parseFloat(val),
|
||||
},
|
||||
use_hypernetwork_model: {
|
||||
name: "Hypernetwork model",
|
||||
@ -426,8 +455,11 @@ function restoreTaskToUI(task, fieldsToSkip) {
|
||||
}
|
||||
|
||||
if (!("use_lora_model" in task.reqBody)) {
|
||||
loraModelField.value = ""
|
||||
loraModelField.dispatchEvent(new Event("change"))
|
||||
loraModels.forEach((e) => {
|
||||
e[0].value = ""
|
||||
e[1].value = 0
|
||||
e[0].dispatchEvent(new Event("change"))
|
||||
})
|
||||
}
|
||||
|
||||
// restore the original prompt if provided (e.g. use settings), fallback to prompt as needed (e.g. copy/paste or d&d)
|
||||
|
@ -103,9 +103,6 @@ let vaeModelField = new ModelDropdown(document.querySelector("#vae_model"), "vae
|
||||
let hypernetworkModelField = new ModelDropdown(document.querySelector("#hypernetwork_model"), "hypernetwork", "None")
|
||||
let hypernetworkStrengthSlider = document.querySelector("#hypernetwork_strength_slider")
|
||||
let hypernetworkStrengthField = document.querySelector("#hypernetwork_strength")
|
||||
let loraModelField = new ModelDropdown(document.querySelector("#lora_model"), "lora", "None")
|
||||
let loraAlphaSlider = document.querySelector("#lora_alpha_slider")
|
||||
let loraAlphaField = document.querySelector("#lora_alpha")
|
||||
let outputFormatField = document.querySelector("#output_format")
|
||||
let outputLosslessField = document.querySelector("#output_lossless")
|
||||
let outputLosslessContainer = document.querySelector("#output_lossless_container")
|
||||
@ -159,6 +156,8 @@ let undoButton = document.querySelector("#undo")
|
||||
let undoBuffer = []
|
||||
const UNDO_LIMIT = 20
|
||||
|
||||
let loraModels = []
|
||||
|
||||
imagePreview.addEventListener("drop", function(ev) {
|
||||
const data = ev.dataTransfer?.getData("text/plain")
|
||||
if (!data) {
|
||||
@ -1292,13 +1291,31 @@ function getCurrentUserRequest() {
|
||||
newTask.reqBody.use_hypernetwork_model = hypernetworkModelField.value
|
||||
newTask.reqBody.hypernetwork_strength = parseFloat(hypernetworkStrengthField.value)
|
||||
}
|
||||
if (testDiffusers.checked && loraModelField.value) {
|
||||
newTask.reqBody.use_lora_model = loraModelField.value
|
||||
newTask.reqBody.lora_alpha = parseFloat(loraAlphaField.value)
|
||||
if (testDiffusers.checked) {
|
||||
let [modelNames, modelStrengths] = getModelInfo(loraModels)
|
||||
|
||||
if (modelNames.length > 0) {
|
||||
modelNames = modelNames.length == 1 ? modelNames[0] : modelNames
|
||||
modelStrengths = modelStrengths.length == 1 ? modelStrengths[0] : modelStrengths
|
||||
|
||||
newTask.reqBody.use_lora_model = modelNames
|
||||
newTask.reqBody.lora_alpha = modelStrengths
|
||||
}
|
||||
}
|
||||
return newTask
|
||||
}
|
||||
|
||||
function getModelInfo(models) {
|
||||
let modelInfo = models.map((e) => [e[0].value, e[1].value])
|
||||
modelInfo = modelInfo.filter((e) => e[0].trim() !== "")
|
||||
modelInfo = modelInfo.map((e) => [e[0], parseFloat(e[1])])
|
||||
|
||||
let modelNames = modelInfo.map((e) => e[0])
|
||||
let modelStrengths = modelInfo.map((e) => e[1])
|
||||
|
||||
return [modelNames, modelStrengths]
|
||||
}
|
||||
|
||||
function getPrompts(prompts) {
|
||||
if (typeof prompts === "undefined") {
|
||||
prompts = promptField.value
|
||||
@ -1836,33 +1853,6 @@ function updateHypernetworkStrengthContainer() {
|
||||
hypernetworkModelField.addEventListener("change", updateHypernetworkStrengthContainer)
|
||||
updateHypernetworkStrengthContainer()
|
||||
|
||||
/********************* LoRA alpha **********************/
|
||||
function updateLoraAlpha() {
|
||||
loraAlphaField.value = loraAlphaSlider.value / 100
|
||||
loraAlphaField.dispatchEvent(new Event("change"))
|
||||
}
|
||||
|
||||
function updateLoraAlphaSlider() {
|
||||
if (loraAlphaField.value < -2) {
|
||||
loraAlphaField.value = -2
|
||||
} else if (loraAlphaField.value > 2) {
|
||||
loraAlphaField.value = 2
|
||||
}
|
||||
|
||||
loraAlphaSlider.value = loraAlphaField.value * 100
|
||||
loraAlphaSlider.dispatchEvent(new Event("change"))
|
||||
}
|
||||
|
||||
loraAlphaSlider.addEventListener("input", updateLoraAlpha)
|
||||
loraAlphaField.addEventListener("input", updateLoraAlphaSlider)
|
||||
updateLoraAlpha()
|
||||
|
||||
function updateLoraAlphaContainer() {
|
||||
document.querySelector("#lora_alpha_container").style.display = loraModelField.value === "" ? "none" : ""
|
||||
}
|
||||
loraModelField.addEventListener("change", updateLoraAlphaContainer)
|
||||
updateLoraAlphaContainer()
|
||||
|
||||
/********************* JPEG/WEBP Quality **********************/
|
||||
function updateOutputQuality() {
|
||||
outputQualityField.value = 0 | outputQualitySlider.value
|
||||
@ -2250,3 +2240,43 @@ prettifyInputs(document)
|
||||
// set the textbox as focused on start
|
||||
promptField.focus()
|
||||
promptField.selectionStart = promptField.value.length
|
||||
|
||||
// multi-models
|
||||
function addModelEntry(i, modelContainer, modelsList, modelType, defaultValue, minStrength, maxStrength, strengthStep) {
|
||||
let nameId = modelType + "_model_" + i
|
||||
let strengthId = modelType + "_alpha_" + i
|
||||
|
||||
const modelEntry = document.createElement("div")
|
||||
modelEntry.className = "model_entry"
|
||||
modelEntry.innerHTML = `
|
||||
<input id="${nameId}" class="model_name" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
|
||||
<input id="${strengthId}" class="model_strength" type="number" max="${maxStrength}" min="${minStrength}" step="${strengthStep}" style="width: 50pt" value="${defaultValue}" pattern="^-?[0-9]*\.?[0-9]*$" onkeypress="preventNonNumericalInput(event)"><br/>
|
||||
`
|
||||
|
||||
let modelName = new ModelDropdown(modelEntry.querySelector(".model_name"), modelType, "None")
|
||||
let modelStrength = modelEntry.querySelector(".model_strength")
|
||||
|
||||
modelContainer.appendChild(modelEntry)
|
||||
modelsList.push([modelName, modelStrength])
|
||||
}
|
||||
|
||||
function createLoRAEntries() {
|
||||
let container = document.querySelector("#lora_model_container .model_entries")
|
||||
for (let i = 0; i < 3; i++) {
|
||||
addModelEntry(i, container, loraModels, "lora", 0.5, -2, 2, 0.02)
|
||||
}
|
||||
}
|
||||
createLoRAEntries()
|
||||
|
||||
// chrome-like spinners only on hover
|
||||
function showSpinnerOnlyOnHover(e) {
|
||||
e.addEventListener("mouseenter", () => {
|
||||
e.setAttribute("type", "number")
|
||||
})
|
||||
e.addEventListener("mouseleave", () => {
|
||||
e.removeAttribute("type")
|
||||
})
|
||||
e.removeAttribute("type")
|
||||
}
|
||||
|
||||
document.querySelectorAll("input[type=number]").forEach(showSpinnerOnlyOnHover)
|
||||
|
@ -426,7 +426,6 @@ async function getAppConfig() {
|
||||
|
||||
if (!testDiffusersEnabled) {
|
||||
document.querySelector("#lora_model_container").style.display = "none"
|
||||
document.querySelector("#lora_alpha_container").style.display = "none"
|
||||
document.querySelector("#tiling_container").style.display = "none"
|
||||
|
||||
document.querySelectorAll("#sampler_name option.diffusers-only").forEach((option) => {
|
||||
@ -434,7 +433,6 @@ async function getAppConfig() {
|
||||
})
|
||||
} else {
|
||||
document.querySelector("#lora_model_container").style.display = ""
|
||||
document.querySelector("#lora_alpha_container").style.display = loraModelField.value ? "" : "none"
|
||||
document.querySelector("#tiling_container").style.display = ""
|
||||
|
||||
document.querySelectorAll("#sampler_name option.k_diffusion-only").forEach((option) => {
|
||||
|
Loading…
Reference in New Issue
Block a user