diff --git a/ui/easydiffusion/model_manager.py b/ui/easydiffusion/model_manager.py
index 7bf56575..324dcec9 100644
--- a/ui/easydiffusion/model_manager.py
+++ b/ui/easydiffusion/model_manager.py
@@ -122,7 +122,7 @@ def reload_models_if_necessary(context: Context, task_data: TaskData):
if context.model_paths.get(model_type) != path
}
- if set_vram_optimizations(context): # reload SD
+ if set_vram_optimizations(context) or set_clip_skip(context, task_data): # reload SD
models_to_reload["stable-diffusion"] = model_paths_in_req["stable-diffusion"]
for model_type, model_path_in_req in models_to_reload.items():
@@ -157,6 +157,16 @@ def set_vram_optimizations(context: Context):
return False
+def set_clip_skip(context: Context, task_data: TaskData):
+ clip_skip = task_data.clip_skip
+
+ if clip_skip != context.clip_skip:
+ context.clip_skip = clip_skip
+ return True
+
+ return False
+
+
def make_model_folders():
for model_type in KNOWN_MODEL_TYPES:
model_dir_path = os.path.join(app.MODELS_DIR, model_type)
diff --git a/ui/easydiffusion/types.py b/ui/easydiffusion/types.py
index 7462355f..7a5201ab 100644
--- a/ui/easydiffusion/types.py
+++ b/ui/easydiffusion/types.py
@@ -48,6 +48,7 @@ class TaskData(BaseModel):
metadata_output_format: str = "txt" # or "json"
stream_image_progress: bool = False
stream_image_progress_interval: int = 5
+ clip_skip: bool = False
class MergeRequest(BaseModel):
diff --git a/ui/index.html b/ui/index.html
index d4b8c7bd..62999c8e 100644
--- a/ui/index.html
+++ b/ui/index.html
@@ -135,10 +135,13 @@
Click to learn more about custom models
-
+
|
Click to learn more about VAEs
diff --git a/ui/media/js/auto-save.js b/ui/media/js/auto-save.js
index 1e536247..ee01ba98 100644
--- a/ui/media/js/auto-save.js
+++ b/ui/media/js/auto-save.js
@@ -13,6 +13,7 @@ const SETTINGS_IDS_LIST = [
"num_outputs_total",
"num_outputs_parallel",
"stable_diffusion_model",
+ "clip_skip",
"vae_model",
"hypernetwork_model",
"lora_model",
diff --git a/ui/media/js/dnd.js b/ui/media/js/dnd.js
index 548b06ad..02848266 100644
--- a/ui/media/js/dnd.js
+++ b/ui/media/js/dnd.js
@@ -240,6 +240,14 @@ const TASK_MAPPING = {
readUI: () => stableDiffusionModelField.value,
parse: (val) => val,
},
+ clip_skip: {
+ name: "Clip Skip",
+ setUI: (value) => {
+ clip_skip.checked = value
+ },
+ readUI: () => clip_skip.checked,
+ parse: (val) => Boolean(val),
+ },
use_vae_model: {
name: "VAE model",
setUI: (use_vae_model) => {
diff --git a/ui/media/js/engine.js b/ui/media/js/engine.js
index f396d951..eccae6ac 100644
--- a/ui/media/js/engine.js
+++ b/ui/media/js/engine.js
@@ -750,6 +750,7 @@
sampler_name: "string",
use_stable_diffusion_model: "string",
+ clip_skip: "boolean",
num_inference_steps: "number",
guidance_scale: "number",
@@ -763,6 +764,7 @@
const TASK_DEFAULTS = {
sampler_name: "plms",
use_stable_diffusion_model: "sd-v1-4",
+ clip_skip: false,
num_inference_steps: 50,
guidance_scale: 7.5,
negative_prompt: "",
diff --git a/ui/media/js/main.js b/ui/media/js/main.js
index a54f6ecb..c69535df 100644
--- a/ui/media/js/main.js
+++ b/ui/media/js/main.js
@@ -13,6 +13,11 @@ const taskConfigSetup = {
num_inference_steps: "Inference Steps",
guidance_scale: "Guidance Scale",
use_stable_diffusion_model: "Model",
+ clip_skip: {
+ label: "Clip Skip",
+ visible: ({ reqBody }) => reqBody?.clip_skip,
+ value: ({ reqBody }) => "yes",
+ },
use_vae_model: {
label: "VAE",
visible: ({ reqBody }) => reqBody?.use_vae_model !== undefined && reqBody?.use_vae_model.trim() !== "",
@@ -82,6 +87,7 @@ let useUpscalingField = document.querySelector("#use_upscale")
let upscaleModelField = document.querySelector("#upscale_model")
let upscaleAmountField = document.querySelector("#upscale_amount")
let stableDiffusionModelField = new ModelDropdown(document.querySelector("#stable_diffusion_model"), "stable-diffusion")
+let clipSkipField = document.querySelector("#clip_skip")
let vaeModelField = new ModelDropdown(document.querySelector("#vae_model"), "vae", "None")
let hypernetworkModelField = new ModelDropdown(document.querySelector("#hypernetwork_model"), "hypernetwork", "None")
let hypernetworkStrengthSlider = document.querySelector("#hypernetwork_strength_slider")
@@ -1224,6 +1230,7 @@ function getCurrentUserRequest() {
sampler_name: samplerField.value,
//render_device: undefined, // Set device affinity. Prefer this device, but wont activate.
use_stable_diffusion_model: stableDiffusionModelField.value,
+ clip_skip: clipSkipField.checked,
use_vae_model: vaeModelField.value,
stream_progress_updates: true,
stream_image_progress: numOutputsTotal > 50 ? false : streamImageProgressField.checked,
diff --git a/ui/media/js/parameters.js b/ui/media/js/parameters.js
index 75abecd7..746dc00e 100644
--- a/ui/media/js/parameters.js
+++ b/ui/media/js/parameters.js
@@ -397,6 +397,7 @@ async function getAppConfig() {
document.querySelector("#lora_model_container").style.display = testDiffusers.checked ? "" : "none"
document.querySelector("#lora_alpha_container").style.display =
testDiffusers.checked && loraModelField.value !== "" ? "" : "none"
+ document.querySelector("#clip_skip_config").classList.remove("displayNone")
}
console.log("get config status response", config)
|