Merge pull request #1289 from cmdr2/beta

Beta
This commit is contained in:
cmdr2 2023-05-22 16:02:22 +05:30 committed by GitHub
commit e48c73d277
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 120 additions and 67 deletions

View File

@ -22,6 +22,9 @@
Our focus continues to remain on an easy installation experience, and an easy user-interface. While still remaining pretty powerful, in terms of features and speed.
### Detailed changelog
* 2.5.37 - 19 May 2023 - (beta-only) Two more samplers: DDPM and DEIS. Also disables the samplers that aren't working yet in the Diffusers version. Thanks @ogmaresca.
* 2.5.37 - 19 May 2023 - (beta-only) Support CLIP-Skip. You can set this option under the models dropdown. Thanks @JeLuf.
* 2.5.37 - 19 May 2023 - (beta-only) More VRAM optimizations for all modes in diffusers. The VRAM usage for diffusers in "low" and "balanced" should now be equal or less than the non-diffusers version. Performs softmax in half precision, like sdkit does.
* 2.5.36 - 16 May 2023 - (beta-only) More VRAM optimizations for "balanced" VRAM usage mode.
* 2.5.36 - 11 May 2023 - (beta-only) More VRAM optimizations for "low" VRAM usage mode.
* 2.5.36 - 10 May 2023 - (beta-only) Bug fix for "meta" error when using a LoRA in 'low' VRAM usage mode.

View File

@ -60,7 +60,7 @@ Just delete the `EasyDiffusion` folder to uninstall all the downloaded packages.
### Image generation
- **Supports**: "*Text to Image*" and "*Image to Image*".
- **19 Samplers**: `ddim`, `plms`, `heun`, `euler`, `euler_a`, `dpm2`, `dpm2_a`, `lms`, `dpm_solver_stability`, `dpmpp_2s_a`, `dpmpp_2m`, `dpmpp_sde`, `dpm_fast`, `dpm_adaptive`, `unipc_snr`, `unipc_tu`, `unipc_tq`, `unipc_snr_2`, `unipc_tu_2`.
- **21 Samplers**: `ddim`, `plms`, `heun`, `euler`, `euler_a`, `dpm2`, `dpm2_a`, `lms`, `dpm_solver_stability`, `dpmpp_2s_a`, `dpmpp_2m`, `dpmpp_sde`, `dpm_fast`, `dpm_adaptive`, `ddpm`, `deis`, `unipc_snr`, `unipc_tu`, `unipc_tq`, `unipc_snr_2`, `unipc_tu_2`.
- **In-Painting**: Specify areas of your image to paint into.
- **Simple Drawing Tool**: Draw basic images to guide the AI, without needing an external drawing program.
- **Face Correction (GFPGAN)**

View File

@ -18,7 +18,7 @@ os_name = platform.system()
modules_to_check = {
"torch": ("1.11.0", "1.13.1", "2.0.0"),
"torchvision": ("0.12.0", "0.14.1", "0.15.1"),
"sdkit": "1.0.93",
"sdkit": "1.0.96",
"stable-diffusion-sdkit": "2.1.4",
"rich": "12.6.0",
"uvicorn": "0.19.0",
@ -130,10 +130,13 @@ def include_cuda_versions(module_versions: tuple) -> tuple:
def is_amd_on_linux():
if os_name == "Linux":
with open("/proc/bus/pci/devices", "r") as f:
device_info = f.read()
if "amdgpu" in device_info and "nvidia" not in device_info:
return True
try:
with open("/proc/bus/pci/devices", "r") as f:
device_info = f.read()
if "amdgpu" in device_info and "nvidia" not in device_info:
return True
except:
return False
return False

View File

@ -98,8 +98,8 @@ def auto_pick_devices(currently_active_devices):
continue
mem_free, mem_total = torch.cuda.mem_get_info(device)
mem_free /= float(10 ** 9)
mem_total /= float(10 ** 9)
mem_free /= float(10**9)
mem_total /= float(10**9)
device_name = torch.cuda.get_device_name(device)
log.debug(
f"{device} detected: {device_name} - Memory (free/total): {round(mem_free, 2)}Gb / {round(mem_total, 2)}Gb"
@ -182,7 +182,7 @@ def get_max_vram_usage_level(device):
else:
return "high"
mem_total /= float(10 ** 9)
mem_total /= float(10**9)
if mem_total < 4.5:
return "low"
elif mem_total < 6.5:
@ -224,10 +224,10 @@ def is_device_compatible(device):
# Memory check
try:
_, mem_total = torch.cuda.mem_get_info(device)
mem_total /= float(10 ** 9)
if mem_total < 3.0:
mem_total /= float(10**9)
if mem_total < 1.9:
if is_device_compatible.history.get(device) == None:
log.warn(f"GPU {device} with less than 3 GB of VRAM is not compatible with Stable Diffusion")
log.warn(f"GPU {device} with less than 2 GB of VRAM is not compatible with Stable Diffusion")
is_device_compatible.history[device] = 1
return False
except RuntimeError as e:

View File

@ -122,7 +122,7 @@ def reload_models_if_necessary(context: Context, task_data: TaskData):
if context.model_paths.get(model_type) != path
}
if set_vram_optimizations(context): # reload SD
if set_vram_optimizations(context) or set_clip_skip(context, task_data): # reload SD
models_to_reload["stable-diffusion"] = model_paths_in_req["stable-diffusion"]
for model_type, model_path_in_req in models_to_reload.items():
@ -157,6 +157,16 @@ def set_vram_optimizations(context: Context):
return False
def set_clip_skip(context: Context, task_data: TaskData):
clip_skip = task_data.clip_skip
if clip_skip != context.clip_skip:
context.clip_skip = clip_skip
return True
return False
def make_model_folders():
for model_type in KNOWN_MODEL_TYPES:
model_dir_path = os.path.join(app.MODELS_DIR, model_type)

View File

@ -72,7 +72,7 @@ def make_images(
def print_task_info(req: GenerateImageRequest, task_data: TaskData):
req_str = pprint.pformat(get_printable_request(req)).replace("[", "\[")
req_str = pprint.pformat(get_printable_request(req, task_data)).replace("[", "\[")
task_str = pprint.pformat(task_data.dict()).replace("[", "\[")
log.info(f"request: {req_str}")
log.info(f"task data: {task_str}")

View File

@ -48,6 +48,7 @@ class TaskData(BaseModel):
metadata_output_format: str = "txt" # or "json"
stream_image_progress: bool = False
stream_image_progress_interval: int = 5
clip_skip: bool = False
class MergeRequest(BaseModel):

View File

@ -15,23 +15,24 @@ img_number_regex = re.compile("([0-9]{5,})")
# keep in sync with `ui/media/js/dnd.js`
TASK_TEXT_MAPPING = {
"prompt": "Prompt",
"negative_prompt": "Negative Prompt",
"seed": "Seed",
"use_stable_diffusion_model": "Stable Diffusion model",
"clip_skip": "Clip Skip",
"use_vae_model": "VAE model",
"sampler_name": "Sampler",
"width": "Width",
"height": "Height",
"seed": "Seed",
"num_inference_steps": "Steps",
"guidance_scale": "Guidance Scale",
"prompt_strength": "Prompt Strength",
"use_lora_model": "LoRA model",
"lora_alpha": "LoRA Strength",
"use_hypernetwork_model": "Hypernetwork model",
"hypernetwork_strength": "Hypernetwork Strength",
"use_face_correction": "Use Face Correction",
"use_upscale": "Use Upscaling",
"upscale_amount": "Upscale By",
"sampler_name": "Sampler",
"negative_prompt": "Negative Prompt",
"use_stable_diffusion_model": "Stable Diffusion model",
"use_vae_model": "VAE model",
"use_hypernetwork_model": "Hypernetwork model",
"hypernetwork_strength": "Hypernetwork Strength",
"use_lora_model": "LoRA model",
"lora_alpha": "LoRA Strength",
}
time_placeholders = {
@ -179,27 +180,7 @@ def save_images_to_disk(images: list, filtered_images: list, req: GenerateImageR
def get_metadata_entries_for_request(req: GenerateImageRequest, task_data: TaskData):
metadata = get_printable_request(req)
metadata.update(
{
"use_stable_diffusion_model": task_data.use_stable_diffusion_model,
"use_vae_model": task_data.use_vae_model,
"use_hypernetwork_model": task_data.use_hypernetwork_model,
"use_lora_model": task_data.use_lora_model,
"use_face_correction": task_data.use_face_correction,
"use_upscale": task_data.use_upscale,
}
)
if metadata["use_upscale"] is not None:
metadata["upscale_amount"] = task_data.upscale_amount
if task_data.use_hypernetwork_model is None:
del metadata["hypernetwork_strength"]
if task_data.use_lora_model is None:
if "lora_alpha" in metadata:
del metadata["lora_alpha"]
app_config = app.getConfig()
if not app_config.get("test_diffusers", False) and "use_lora_model" in metadata:
del metadata["use_lora_model"]
metadata = get_printable_request(req, task_data)
# if text, format it in the text format expected by the UI
is_txt_format = task_data.metadata_output_format.lower() == "txt"
@ -213,12 +194,33 @@ def get_metadata_entries_for_request(req: GenerateImageRequest, task_data: TaskD
return entries
def get_printable_request(req: GenerateImageRequest):
metadata = req.dict()
del metadata["init_image"]
del metadata["init_image_mask"]
if req.init_image is None:
def get_printable_request(req: GenerateImageRequest, task_data: TaskData):
req_metadata = req.dict()
task_data_metadata = task_data.dict()
# Save the metadata in the order defined in TASK_TEXT_MAPPING
metadata = {}
for key in TASK_TEXT_MAPPING.keys():
if key in req_metadata:
metadata[key] = req_metadata[key]
elif key in task_data_metadata:
metadata[key] = task_data_metadata[key]
# Clean up the metadata
if req.init_image is None and "prompt_strength" in metadata:
del metadata["prompt_strength"]
if task_data.use_upscale is None and "upscale_amount" in metadata:
del metadata["upscale_amount"]
if task_data.use_hypernetwork_model is None and "hypernetwork_strength" in metadata:
del metadata["hypernetwork_strength"]
if task_data.use_lora_model is None and "lora_alpha" in metadata:
del metadata["lora_alpha"]
app_config = app.getConfig()
if not app_config.get("test_diffusers", False):
for key in (x for x in ["use_lora_model", "lora_alpha", "clip_skip"] if x in metadata):
del metadata[key]
return metadata

View File

@ -30,7 +30,7 @@
<h1>
<img id="logo_img" src="/media/images/icon-512x512.png" >
Easy Diffusion
<small>v2.5.36 <span id="updateBranchLabel"></span></small>
<small>v2.5.37 <span id="updateBranchLabel"></span></small>
</h1>
</div>
<div id="server-status">
@ -135,10 +135,13 @@
<button id="reload-models" class="secondaryButton reloadModels"><i class='fa-solid fa-rotate'></i></button>
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/Custom-Models" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Click to learn more about custom models</span></i></a>
</td></tr>
<!-- <tr id="modelConfigSelection" class="pl-5"><td><label for="model_config">Model Config:</label></td><td>
<select id="model_config" name="model_config">
</select>
</td></tr> -->
<tr class="pl-5 displayNone" id="clip_skip_config">
<td><label for="clip_skip">Clip Skip:</label></td>
<td>
<input id="clip_skip" name="clip_skip" type="checkbox">
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/Clip-Skip" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Click to learn more about Clip Skip</span></i></a>
</td>
</tr>
<tr class="pl-5"><td><label for="vae_model">Custom VAE:</label></td><td>
<input id="vae_model" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/VAE-Variational-Auto-Encoder" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Click to learn more about VAEs</span></i></a>
@ -154,16 +157,18 @@
<option value="dpm2_a">DPM2 Ancestral</option>
<option value="lms">LMS</option>
<option value="dpm_solver_stability">DPM Solver (Stability AI)</option>
<option value="dpmpp_2s_a">DPM++ 2s Ancestral (Karras)</option>
<option value="dpmpp_2s_a" class="k_diffusion-only">DPM++ 2s Ancestral (Karras)</option>
<option value="dpmpp_2m">DPM++ 2m (Karras)</option>
<option value="dpmpp_sde">DPM++ SDE (Karras)</option>
<option value="dpm_fast">DPM Fast (Karras)</option>
<option value="dpm_adaptive">DPM Adaptive (Karras)</option>
<option value="unipc_snr">UniPC SNR</option>
<option value="dpmpp_sde" class="k_diffusion-only">DPM++ SDE (Karras)</option>
<option value="dpm_fast" class="k_diffusion-only">DPM Fast (Karras)</option>
<option value="dpm_adaptive" class="k_diffusion-only">DPM Adaptive (Karras)</option>
<option value="ddpm" class="diffusers-only">DDPM</option>
<option value="deis" class="diffusers-only">DEIS</option>
<option value="unipc_snr" class="k_diffusion-only">UniPC SNR</option>
<option value="unipc_tu">UniPC TU</option>
<option value="unipc_snr_2">UniPC SNR 2</option>
<option value="unipc_snr_2" class="k_diffusion-only">UniPC SNR 2</option>
<option value="unipc_tu_2">UniPC TU 2</option>
<option value="unipc_tq">UniPC TQ</option>
<option value="unipc_tq" class="k_diffusion-only">UniPC TQ</option>
</select>
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/How-to-Use#samplers" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Click to learn more about samplers</span></i></a>
</td></tr>

View File

@ -13,6 +13,7 @@ const SETTINGS_IDS_LIST = [
"num_outputs_total",
"num_outputs_parallel",
"stable_diffusion_model",
"clip_skip",
"vae_model",
"hypernetwork_model",
"lora_model",

View File

@ -37,6 +37,7 @@ function parseBoolean(stringValue) {
}
}
// keep in sync with `ui/easydiffusion/utils/save_utils.py`
const TASK_MAPPING = {
prompt: {
name: "Prompt",
@ -240,6 +241,14 @@ const TASK_MAPPING = {
readUI: () => stableDiffusionModelField.value,
parse: (val) => val,
},
clip_skip: {
name: "Clip Skip",
setUI: (value) => {
clip_skip.checked = value
},
readUI: () => clip_skip.checked,
parse: (val) => Boolean(val),
},
use_vae_model: {
name: "VAE model",
setUI: (use_vae_model) => {

View File

@ -750,6 +750,7 @@
sampler_name: "string",
use_stable_diffusion_model: "string",
clip_skip: "boolean",
num_inference_steps: "number",
guidance_scale: "number",
@ -763,6 +764,7 @@
const TASK_DEFAULTS = {
sampler_name: "plms",
use_stable_diffusion_model: "sd-v1-4",
clip_skip: false,
num_inference_steps: 50,
guidance_scale: 7.5,
negative_prompt: "",

View File

@ -13,6 +13,11 @@ const taskConfigSetup = {
num_inference_steps: "Inference Steps",
guidance_scale: "Guidance Scale",
use_stable_diffusion_model: "Model",
clip_skip: {
label: "Clip Skip",
visible: ({ reqBody }) => reqBody?.clip_skip,
value: ({ reqBody }) => "yes",
},
use_vae_model: {
label: "VAE",
visible: ({ reqBody }) => reqBody?.use_vae_model !== undefined && reqBody?.use_vae_model.trim() !== "",
@ -82,6 +87,7 @@ let useUpscalingField = document.querySelector("#use_upscale")
let upscaleModelField = document.querySelector("#upscale_model")
let upscaleAmountField = document.querySelector("#upscale_amount")
let stableDiffusionModelField = new ModelDropdown(document.querySelector("#stable_diffusion_model"), "stable-diffusion")
let clipSkipField = document.querySelector("#clip_skip")
let vaeModelField = new ModelDropdown(document.querySelector("#vae_model"), "vae", "None")
let hypernetworkModelField = new ModelDropdown(document.querySelector("#hypernetwork_model"), "hypernetwork", "None")
let hypernetworkStrengthSlider = document.querySelector("#hypernetwork_strength_slider")
@ -1224,6 +1230,7 @@ function getCurrentUserRequest() {
sampler_name: samplerField.value,
//render_device: undefined, // Set device affinity. Prefer this device, but wont activate.
use_stable_diffusion_model: stableDiffusionModelField.value,
clip_skip: clipSkipField.checked,
use_vae_model: vaeModelField.value,
stream_progress_updates: true,
stream_image_progress: numOutputsTotal > 50 ? false : streamImageProgressField.checked,

View File

@ -388,15 +388,25 @@ async function getAppConfig() {
if (config.net && config.net.listen_port !== undefined) {
listenPortField.value = config.net.listen_port
}
if (config.test_diffusers === undefined || config.update_branch === "main") {
testDiffusers.checked = false
const testDiffusersEnabled = config.test_diffusers && config.update_branch !== "main"
testDiffusers.checked = testDiffusersEnabled
if (!testDiffusersEnabled) {
document.querySelector("#lora_model_container").style.display = "none"
document.querySelector("#lora_alpha_container").style.display = "none"
document.querySelectorAll("#sampler_name option.diffusers-only").forEach(option => {
option.style.display = "none"
})
} else {
testDiffusers.checked = config.test_diffusers && config.update_branch !== "main"
document.querySelector("#lora_model_container").style.display = testDiffusers.checked ? "" : "none"
document.querySelector("#lora_alpha_container").style.display =
testDiffusers.checked && loraModelField.value !== "" ? "" : "none"
document.querySelector("#lora_model_container").style.display = ""
document.querySelector("#lora_alpha_container").style.display = loraModelField.value ? "" : "none"
document.querySelectorAll("#sampler_name option.k_diffusion-only").forEach(option => {
option.disabled = true
})
document.querySelector("#clip_skip_config").classList.remove("displayNone")
}
console.log("get config status response", config)