Merge pull request #1302 from cmdr2/beta

Beta
This commit is contained in:
cmdr2 2023-05-25 14:57:51 +05:30 committed by GitHub
commit b511000441
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 200 additions and 70 deletions

View File

@ -22,6 +22,8 @@
Our focus continues to remain on an easy installation experience, and an easy user-interface. While still remaining pretty powerful, in terms of features and speed.
### Detailed changelog
* 2.5.38 - 24 May 2023 - Better reporting of errors, and show an explanation if the user cannot disable the "Use CPU" setting.
* 2.5.38 - 23 May 2023 - Add Latent Upscaler as another option for upscaling images. Thanks @JeLuf for the implementation of the Latent Upscaler model.
* 2.5.37 - 19 May 2023 - (beta-only) Two more samplers: DDPM and DEIS. Also disables the samplers that aren't working yet in the Diffusers version. Thanks @ogmaresca.
* 2.5.37 - 19 May 2023 - (beta-only) Support CLIP-Skip. You can set this option under the models dropdown. Thanks @JeLuf.
* 2.5.37 - 19 May 2023 - (beta-only) More VRAM optimizations for all modes in diffusers. The VRAM usage for diffusers in "low" and "balanced" should now be equal or less than the non-diffusers version. Performs softmax in half precision, like sdkit does.

View File

@ -10,6 +10,8 @@ import warnings
from easydiffusion import task_manager
from easydiffusion.utils import log
from rich.logging import RichHandler
from rich.console import Console
from rich.panel import Panel
from sdkit.utils import log as sdkit_log # hack, so we can overwrite the log config
# Remove all handlers associated with the root logger object.
@ -213,11 +215,19 @@ def open_browser():
ui = config.get("ui", {})
net = config.get("net", {})
port = net.get("listen_port", 9000)
if ui.get("open_browser_on_start", True):
import webbrowser
webbrowser.open(f"http://localhost:{port}")
Console().print(Panel(
"\n" +
"[white]Easy Diffusion is ready to serve requests.\n\n" +
"A new browser tab should have been opened by now.\n" +
f"If not, please open your web browser and navigate to [bold yellow underline]http://localhost:{port}/\n",
title="Easy Diffusion is ready", style="bold yellow on blue"))
def get_image_modifiers():
modifiers_json_path = os.path.join(SD_UI_DIR, "modifiers.json")

View File

@ -53,15 +53,21 @@ def load_default_models(context: Context):
scan_model=context.model_paths[model_type] != None
and not context.model_paths[model_type].endswith(".safetensors"),
)
if model_type in context.model_load_errors:
del context.model_load_errors[model_type]
except Exception as e:
log.error(f"[red]Error while loading {model_type} model: {context.model_paths[model_type]}[/red]")
log.exception(e)
del context.model_paths[model_type]
context.model_load_errors[model_type] = str(e) # storing the entire Exception can lead to memory leaks
def unload_all(context: Context):
for model_type in KNOWN_MODEL_TYPES:
unload_model(context, model_type)
if model_type in context.model_load_errors:
del context.model_load_errors[model_type]
def resolve_model_to_use(model_name: str = None, model_type: str = None):
@ -107,12 +113,15 @@ def resolve_model_to_use(model_name: str = None, model_type: str = None):
def reload_models_if_necessary(context: Context, task_data: TaskData):
use_upscale_lower = task_data.use_upscale.lower() if task_data.use_upscale else ""
model_paths_in_req = {
"stable-diffusion": task_data.use_stable_diffusion_model,
"vae": task_data.use_vae_model,
"hypernetwork": task_data.use_hypernetwork_model,
"gfpgan": task_data.use_face_correction,
"realesrgan": task_data.use_upscale,
"realesrgan": task_data.use_upscale if "realesrgan" in use_upscale_lower else None,
"latent_upscaler": True if task_data.use_upscale == "latent_upscaler" else None,
"nsfw_checker": True if task_data.block_nsfw else None,
"lora": task_data.use_lora_model,
}
@ -129,7 +138,14 @@ def reload_models_if_necessary(context: Context, task_data: TaskData):
context.model_paths[model_type] = model_path_in_req
action_fn = unload_model if context.model_paths[model_type] is None else load_model
action_fn(context, model_type, scan_model=False) # we've scanned them already
try:
action_fn(context, model_type, scan_model=False) # we've scanned them already
if model_type in context.model_load_errors:
del context.model_load_errors[model_type]
except Exception as e:
log.exception(e)
if action_fn == load_model:
context.model_load_errors[model_type] = str(e) # storing the entire Exception can lead to memory leaks
def resolve_model_paths(task_data: TaskData):
@ -142,10 +158,18 @@ def resolve_model_paths(task_data: TaskData):
if task_data.use_face_correction:
task_data.use_face_correction = resolve_model_to_use(task_data.use_face_correction, "gfpgan")
if task_data.use_upscale:
if task_data.use_upscale and "realesrgan" in task_data.use_upscale.lower():
task_data.use_upscale = resolve_model_to_use(task_data.use_upscale, "realesrgan")
def fail_if_models_did_not_load(context: Context):
for model_type in KNOWN_MODEL_TYPES:
if model_type in context.model_load_errors:
e = context.model_load_errors[model_type]
raise Exception(f"Could not load the {model_type} model! Reason: " + e)
# concat 'e', don't use in format string (injection attack)
def set_vram_optimizations(context: Context):
config = app.getConfig()
vram_usage_level = config.get("vram_usage_level", "balanced")

View File

@ -33,6 +33,7 @@ def init(device):
context.stop_processing = False
context.temp_images = {}
context.partial_x_samples = None
context.model_load_errors = {}
from easydiffusion import app
@ -95,7 +96,7 @@ def make_images_internal(
task_data.stream_image_progress_interval,
)
gc(context)
filtered_images = filter_images(task_data, images, user_stopped)
filtered_images = filter_images(req, task_data, images, user_stopped)
if task_data.save_to_disk_path is not None:
save_images_to_disk(images, filtered_images, req, task_data)
@ -151,22 +152,36 @@ def generate_images_internal(
return images, user_stopped
def filter_images(task_data: TaskData, images: list, user_stopped):
def filter_images(req: GenerateImageRequest, task_data: TaskData, images: list, user_stopped):
if user_stopped:
return images
filters_to_apply = []
filter_params = {}
if task_data.block_nsfw:
filters_to_apply.append("nsfw_checker")
if task_data.use_face_correction and "gfpgan" in task_data.use_face_correction.lower():
filters_to_apply.append("gfpgan")
if task_data.use_upscale and "realesrgan" in task_data.use_upscale.lower():
filters_to_apply.append("realesrgan")
if task_data.use_upscale:
if "realesrgan" in task_data.use_upscale.lower():
filters_to_apply.append("realesrgan")
elif task_data.use_upscale == "latent_upscaler":
filters_to_apply.append("latent_upscaler")
filter_params["latent_upscaler_options"] = {
"prompt": req.prompt,
"negative_prompt": req.negative_prompt,
"seed": req.seed,
"num_inference_steps": task_data.latent_upscaler_steps,
"guidance_scale": 0,
}
filter_params["scale"] = task_data.upscale_amount
if len(filters_to_apply) == 0:
return images
return apply_filters(context, filters_to_apply, images, scale=task_data.upscale_amount)
return apply_filters(context, filters_to_apply, images, **filter_params)
def construct_response(images: list, seeds: list, task_data: TaskData, base_seed: int):

View File

@ -336,6 +336,7 @@ def thread_render(device):
current_state = ServerStates.LoadingModel
model_manager.resolve_model_paths(task.task_data)
model_manager.reload_models_if_necessary(renderer.context, task.task_data)
model_manager.fail_if_models_did_not_load(renderer.context)
current_state = ServerStates.Rendering
task.response = renderer.make_images(

View File

@ -32,8 +32,9 @@ class TaskData(BaseModel):
vram_usage_level: str = "balanced" # or "low" or "medium"
use_face_correction: str = None # or "GFPGANv1.3"
use_upscale: str = None # or "RealESRGAN_x4plus" or "RealESRGAN_x4plus_anime_6B"
use_upscale: str = None # or "RealESRGAN_x4plus" or "RealESRGAN_x4plus_anime_6B" or "latent_upscaler"
upscale_amount: int = 4 # or 2
latent_upscaler_steps: int = 10
use_stable_diffusion_model: str = "sd-v1-4"
# use_stable_diffusion_config: str = "v1-inference"
use_vae_model: str = None

View File

@ -30,7 +30,7 @@
<h1>
<img id="logo_img" src="/media/images/icon-512x512.png" >
Easy Diffusion
<small>v2.5.37 <span id="updateBranchLabel"></span></small>
<small>v2.5.38 <span id="updateBranchLabel"></span></small>
</h1>
</div>
<div id="server-status">
@ -258,14 +258,18 @@
<li class="pl-5">
<input id="use_upscale" name="use_upscale" type="checkbox"> <label for="use_upscale">Scale up by</label>
<select id="upscale_amount" name="upscale_amount">
<option value="2">2x</option>
<option value="4" selected>4x</option>
<option id="upscale_amount_2x" value="2">2x</option>
<option id="upscale_amount_4x" value="4" selected>4x</option>
</select>
with
<select id="upscale_model" name="upscale_model">
<option value="RealESRGAN_x4plus" selected>RealESRGAN_x4plus</option>
<option value="RealESRGAN_x4plus_anime_6B">RealESRGAN_x4plus_anime_6B</option>
<option value="latent_upscaler">Latent Upscaler 2x</option>
</select>
<div id="latent_upscaler_settings" class="displayNone">
<label for="latent_upscaler_steps_slider">Upscaling Steps:</label></td><td> <input id="latent_upscaler_steps_slider" name="latent_upscaler_steps_slider" class="editor-slider" value="10" type="range" min="1" max="50"> <input id="latent_upscaler_steps" name="latent_upscaler_steps" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)">
</div>
</li>
<li class="pl-5"><input id="show_only_filtered_image" name="show_only_filtered_image" type="checkbox" checked> <label for="show_only_filtered_image">Show only the corrected/upscaled image</label></li>
</ul></div>

View File

@ -1303,6 +1303,12 @@ body.wait-pause {
display:none !important;
}
#latent_upscaler_settings {
padding-top: 3pt;
padding-bottom: 3pt;
padding-left: 5pt;
}
/* TOAST NOTIFICATIONS */
.toast-notification {
position: fixed;

View File

@ -834,6 +834,7 @@ function pixelCompare(int1, int2) {
}
// adapted from https://ben.akrin.com/canvas_fill/fill_04.html
// May 2023 - look at using a library instead of custom code: https://github.com/shaneosullivan/example-canvas-fill
function flood_fill(editor, the_canvas_context, x, y, color) {
pixel_stack = [{ x: x, y: y }]
pixels = the_canvas_context.getImageData(0, 0, editor.width, editor.height)

View File

@ -86,6 +86,9 @@ let gfpganModelField = new ModelDropdown(document.querySelector("#gfpgan_model")
let useUpscalingField = document.querySelector("#use_upscale")
let upscaleModelField = document.querySelector("#upscale_model")
let upscaleAmountField = document.querySelector("#upscale_amount")
let latentUpscalerSettings = document.querySelector("#latent_upscaler_settings")
let latentUpscalerStepsSlider = document.querySelector("#latent_upscaler_steps_slider")
let latentUpscalerStepsField = document.querySelector("#latent_upscaler_steps")
let stableDiffusionModelField = new ModelDropdown(document.querySelector("#stable_diffusion_model"), "stable-diffusion")
let clipSkipField = document.querySelector("#clip_skip")
let vaeModelField = new ModelDropdown(document.querySelector("#vae_model"), "vae", "None")
@ -239,7 +242,7 @@ function setServerStatus(event) {
break
}
if (SD.serverState.devices) {
document.dispatchEvent(new CustomEvent("system_info_update", { detail: SD.serverState.devices}))
document.dispatchEvent(new CustomEvent("system_info_update", { detail: SD.serverState.devices }))
}
}
@ -258,20 +261,11 @@ function shiftOrConfirm(e, prompt, fn) {
if (e.shiftKey || !confirmDangerousActionsField.checked) {
fn(e)
} else {
$.confirm({
theme: "modern",
title: prompt,
useBootstrap: false,
animateFromElement: false,
content:
'<small>Tip: To skip this dialog, use shift-click or disable the "Confirm dangerous actions" setting in the Settings tab.</small>',
buttons: {
yes: () => {
fn(e)
},
cancel: () => {},
},
})
confirm(
'<small>Tip: To skip this dialog, use shift-click or disable the "Confirm dangerous actions" setting in the Settings tab.</small>',
prompt,
() => { fn(e) }
)
}
}
@ -293,6 +287,7 @@ function logError(msg, res, outputMsg) {
logMsg(msg, "error", outputMsg)
console.log("request error", res)
console.trace()
setStatus("request", "error", "error")
}
@ -784,11 +779,6 @@ function getTaskUpdater(task, reqBody, outputContainer) {
}
msg += "</pre>"
logError(msg, event, outputMsg)
} else {
let msg = `Unexpected Read Error:<br/><pre>Error:${
this.exception
}<br/>EventInfo: ${JSON.stringify(event, undefined, 4)}</pre>`
logError(msg, event, outputMsg)
}
break
}
@ -885,15 +875,15 @@ function onTaskCompleted(task, reqBody, instance, outputContainer, stepUpdate) {
1. If you have set an initial image, please try reducing its dimension to ${MAX_INIT_IMAGE_DIMENSION}x${MAX_INIT_IMAGE_DIMENSION} or smaller.<br/>
2. Try picking a lower level in the '<em>GPU Memory Usage</em>' setting (in the '<em>Settings</em>' tab).<br/>
3. Try generating a smaller image.<br/>`
} else if (msg.toLowerCase().includes("DefaultCPUAllocator: not enough memory")) {
} else if (msg.includes("DefaultCPUAllocator: not enough memory")) {
msg += `<br/><br/>
Reason: Your computer is running out of system RAM!
<br/>
<br/><br/>
<b>Suggestions</b>:
<br/>
1. Try closing unnecessary programs and browser tabs.<br/>
2. If that doesn't help, please increase your computer's virtual memory by following these steps for
<a href="https://www.ibm.com/docs/en/opw/8.2.0?topic=tuning-optional-increasing-paging-file-size-windows-computers" target="_blank">Windows</a>, or
<a href="https://www.ibm.com/docs/en/opw/8.2.0?topic=tuning-optional-increasing-paging-file-size-windows-computers" target="_blank">Windows</a> or
<a href="https://linuxhint.com/increase-swap-space-linux/" target="_blank">Linux</a>.<br/>
3. Try restarting your computer.<br/>`
}
@ -1268,6 +1258,10 @@ function getCurrentUserRequest() {
if (useUpscalingField.checked) {
newTask.reqBody.use_upscale = upscaleModelField.value
newTask.reqBody.upscale_amount = upscaleAmountField.value
if (upscaleModelField.value === "latent_upscaler") {
newTask.reqBody.upscale_amount = "2"
newTask.reqBody.latent_upscaler_steps = latentUpscalerStepsField.value
}
}
if (hypernetworkModelField.value) {
newTask.reqBody.use_hypernetwork_model = hypernetworkModelField.value
@ -1582,6 +1576,20 @@ useUpscalingField.addEventListener("change", function(e) {
upscaleAmountField.disabled = !this.checked
})
function onUpscaleModelChange() {
let upscale4x = document.querySelector("#upscale_amount_4x")
if (upscaleModelField.value === "latent_upscaler") {
upscale4x.disabled = true
upscaleAmountField.value = "2"
latentUpscalerSettings.classList.remove("displayNone")
} else {
upscale4x.disabled = false
latentUpscalerSettings.classList.add("displayNone")
}
}
upscaleModelField.addEventListener("change", onUpscaleModelChange)
onUpscaleModelChange()
makeImageBtn.addEventListener("click", makeImage)
document.onkeydown = function(e) {
@ -1591,6 +1599,27 @@ document.onkeydown = function(e) {
}
}
/********************* Latent Upscaler Steps **************************/
function updateLatentUpscalerSteps() {
latentUpscalerStepsField.value = latentUpscalerStepsSlider.value
latentUpscalerStepsField.dispatchEvent(new Event("change"))
}
function updateLatentUpscalerStepsSlider() {
if (latentUpscalerStepsField.value < 1) {
latentUpscalerStepsField.value = 1
} else if (latentUpscalerStepsField.value > 50) {
latentUpscalerStepsField.value = 50
}
latentUpscalerStepsSlider.value = latentUpscalerStepsField.value
latentUpscalerStepsSlider.dispatchEvent(new Event("change"))
}
latentUpscalerStepsSlider.addEventListener("input", updateLatentUpscalerSteps)
latentUpscalerStepsField.addEventListener("input", updateLatentUpscalerStepsSlider)
updateLatentUpscalerSteps()
/********************* Guidance **************************/
function updateGuidanceScale() {
guidanceScaleField.value = guidanceScaleSlider.value / 10

View File

@ -191,7 +191,8 @@ var PARAMETERS = [
id: "listen_port",
type: ParameterType.custom,
label: "Network port",
note: "Port that this server listens to. The '9000' part in 'http://localhost:9000'. Please restart the program after changing this.",
note:
"Port that this server listens to. The '9000' part in 'http://localhost:9000'. Please restart the program after changing this.",
icon: "fa-anchor",
render: (parameter) => {
return `<input id="${parameter.id}" name="${parameter.id}" size="6" value="9000" onkeypress="preventNonNumericalInput(event)">`
@ -396,14 +397,14 @@ async function getAppConfig() {
document.querySelector("#lora_model_container").style.display = "none"
document.querySelector("#lora_alpha_container").style.display = "none"
document.querySelectorAll("#sampler_name option.diffusers-only").forEach(option => {
document.querySelectorAll("#sampler_name option.diffusers-only").forEach((option) => {
option.style.display = "none"
})
} else {
document.querySelector("#lora_model_container").style.display = ""
document.querySelector("#lora_alpha_container").style.display = loraModelField.value ? "" : "none"
document.querySelectorAll("#sampler_name option.k_diffusion-only").forEach(option => {
document.querySelectorAll("#sampler_name option.k_diffusion-only").forEach((option) => {
option.disabled = true
})
document.querySelector("#clip_skip_config").classList.remove("displayNone")
@ -568,6 +569,16 @@ async function getSystemInfo() {
if (allDeviceIds.length === 0) {
useCPUField.checked = true
useCPUField.disabled = true // no compatible GPUs, so make the CPU mandatory
getParameterSettingsEntry("use_cpu").addEventListener("click", function() {
alert(
"Sorry, we could not find a compatible graphics card! Easy Diffusion supports graphics cards with minimum 2 GB of RAM. " +
"Only NVIDIA cards are supported on Windows. NVIDIA and AMD cards are supported on Linux.<br/><br/>" +
"If you have a compatible graphics card, please try updating to the latest drivers.<br/><br/>" +
"Only the CPU can be used for generating images, without a compatible graphics card.",
"No compatible graphics card found!"
)
})
}
autoPickGPUsField.checked = devices["config"] === "auto"
@ -586,7 +597,7 @@ async function getSystemInfo() {
$("#use_gpus").val(activeDeviceIds)
}
document.dispatchEvent(new CustomEvent("system_info_update", { detail: devices}))
document.dispatchEvent(new CustomEvent("system_info_update", { detail: devices }))
setHostInfo(res["hosts"])
let force = false
if (res["enforce_output_dir"] !== undefined) {

View File

@ -843,57 +843,83 @@ function createTab(request) {
/* TOAST NOTIFICATIONS */
function showToast(message, duration = 5000, error = false) {
const toast = document.createElement("div");
toast.classList.add("toast-notification");
const toast = document.createElement("div")
toast.classList.add("toast-notification")
if (error === true) {
toast.classList.add("toast-notification-error");
toast.classList.add("toast-notification-error")
}
toast.innerHTML = message;
document.body.appendChild(toast);
toast.innerHTML = message
document.body.appendChild(toast)
// Set the position of the toast on the screen
const toastCount = document.querySelectorAll(".toast-notification").length;
const toastHeight = toast.offsetHeight;
const toastCount = document.querySelectorAll(".toast-notification").length
const toastHeight = toast.offsetHeight
const previousToastsHeight = Array.from(document.querySelectorAll(".toast-notification"))
.slice(0, -1) // exclude current toast
.reduce((totalHeight, toast) => totalHeight + toast.offsetHeight + 10, 0); // add 10 pixels for spacing
toast.style.bottom = `${10 + previousToastsHeight}px`;
toast.style.right = "10px";
.reduce((totalHeight, toast) => totalHeight + toast.offsetHeight + 10, 0) // add 10 pixels for spacing
toast.style.bottom = `${10 + previousToastsHeight}px`
toast.style.right = "10px"
// Delay the removal of the toast until animation has completed
const removeToast = () => {
toast.classList.add("hide");
toast.classList.add("hide")
const removeTimeoutId = setTimeout(() => {
toast.remove();
toast.remove()
// Adjust the position of remaining toasts
const remainingToasts = document.querySelectorAll(".toast-notification");
const removedToastBottom = toast.getBoundingClientRect().bottom;
const remainingToasts = document.querySelectorAll(".toast-notification")
const removedToastBottom = toast.getBoundingClientRect().bottom
remainingToasts.forEach((toast) => {
if (toast.getBoundingClientRect().bottom < removedToastBottom) {
toast.classList.add("slide-down");
toast.classList.add("slide-down")
}
});
})
// Wait for the slide-down animation to complete
setTimeout(() => {
// Remove the slide-down class after the animation has completed
const slidingToasts = document.querySelectorAll(".slide-down");
const slidingToasts = document.querySelectorAll(".slide-down")
slidingToasts.forEach((toast) => {
toast.classList.remove("slide-down");
});
toast.classList.remove("slide-down")
})
// Adjust the position of remaining toasts again, in case there are multiple toasts being removed at once
const remainingToastsDown = document.querySelectorAll(".toast-notification");
let heightSoFar = 0;
const remainingToastsDown = document.querySelectorAll(".toast-notification")
let heightSoFar = 0
remainingToastsDown.forEach((toast) => {
toast.style.bottom = `${10 + heightSoFar}px`;
heightSoFar += toast.offsetHeight + 10; // add 10 pixels for spacing
});
}, 0); // The duration of the slide-down animation (in milliseconds)
}, 500);
};
toast.style.bottom = `${10 + heightSoFar}px`
heightSoFar += toast.offsetHeight + 10 // add 10 pixels for spacing
})
}, 0) // The duration of the slide-down animation (in milliseconds)
}, 500)
}
// Remove the toast after specified duration
setTimeout(removeToast, duration);
setTimeout(removeToast, duration)
}
function alert(msg, title) {
title = title || ""
$.alert({
theme: "modern",
title: title,
useBootstrap: false,
animateFromElement: false,
content: msg,
})
}
function confirm(msg, title, fn) {
title = title || ""
$.confirm({
theme: "modern",
title: title,
useBootstrap: false,
animateFromElement: false,
content: msg,
buttons: {
yes: fn,
cancel: () => {},
},
})
}