diff --git a/How to install and run.txt b/How to install and run.txt
index af783b64..8e83ab7c 100644
--- a/How to install and run.txt
+++ b/How to install and run.txt
@@ -5,10 +5,10 @@ If you haven't downloaded Stable Diffusion UI yet, please download from https://
After downloading, to install please follow these instructions:
For Windows:
-- Please double-click the "Start Stable Diffusion UI.cmd" file inside the "stable-diffusion-ui" folder.
+- Please double-click the "Easy-Diffusion-Windows.exe" file and follow the instructions.
For Linux:
-- Please open a terminal, and go to the "stable-diffusion-ui" directory. Then run ./start.sh
+- Please open a terminal, unzip the Easy-Diffusion-Linux.zip file and go to the "easy-diffusion" directory. Then run ./start.sh
That file will automatically install everything. After that it will start the Stable Diffusion interface in a web browser.
@@ -21,4 +21,4 @@ If you have any problems, please:
3. Or, file an issue at https://github.com/easydiffusion/easydiffusion/issues
Thanks
-cmdr2 (and contributors to the project)
\ No newline at end of file
+cmdr2 (and contributors to the project)
diff --git a/README.md b/README.md
index b97c35d1..8acafd76 100644
--- a/README.md
+++ b/README.md
@@ -11,9 +11,9 @@ Does not require technical knowledge, does not require pre-installed software. 1
Click the download button for your operating system:
-
-
-
+
+
+
**Hardware requirements:**
@@ -23,6 +23,7 @@ Click the download button for your operating system:
- Minimum 8 GB of system RAM.
- Atleast 25 GB of space on the hard disk.
+
The installer will take care of whatever is needed. If you face any problems, you can join the friendly [Discord community](https://discord.com/invite/u9yhsFmEkB) and ask for assistance.
## On Windows:
@@ -132,6 +133,15 @@ We could really use help on these aspects (click to view tasks that need your he
If you have any code contributions in mind, please feel free to say Hi to us on the [discord server](https://discord.com/invite/u9yhsFmEkB). We use the Discord server for development-related discussions, and for helping users.
+# Credits
+* Stable Diffusion: https://github.com/Stability-AI/stablediffusion
+* CodeFormer: https://github.com/sczhou/CodeFormer (license: https://github.com/sczhou/CodeFormer/blob/master/LICENSE)
+* GFPGAN: https://github.com/TencentARC/GFPGAN
+* RealESRGAN: https://github.com/xinntao/Real-ESRGAN
+* k-diffusion: https://github.com/crowsonkb/k-diffusion
+* Code contributors and artists on the cmdr2 UI: https://github.com/cmdr2/stable-diffusion-ui and Discord (https://discord.com/invite/u9yhsFmEkB)
+* Lots of contributors on the internet
+
# Disclaimer
The authors of this project are not responsible for any content generated using this interface.
diff --git a/scripts/check_modules.py b/scripts/check_modules.py
index bc043c7c..d549a16d 100644
--- a/scripts/check_modules.py
+++ b/scripts/check_modules.py
@@ -18,7 +18,7 @@ os_name = platform.system()
modules_to_check = {
"torch": ("1.11.0", "1.13.1", "2.0.0"),
"torchvision": ("0.12.0", "0.14.1", "0.15.1"),
- "sdkit": "1.0.156",
+ "sdkit": "1.0.165",
"stable-diffusion-sdkit": "2.1.4",
"rich": "12.6.0",
"uvicorn": "0.19.0",
diff --git a/ui/easydiffusion/model_manager.py b/ui/easydiffusion/model_manager.py
index 63f79859..845e9126 100644
--- a/ui/easydiffusion/model_manager.py
+++ b/ui/easydiffusion/model_manager.py
@@ -148,7 +148,7 @@ def reload_models_if_necessary(context: Context, models_data: ModelsData, models
models_to_reload = {
model_type: path
for model_type, path in models_data.model_paths.items()
- if context.model_paths.get(model_type) != path
+ if context.model_paths.get(model_type) != path or (path is not None and context.models.get(model_type) is None)
}
if models_data.model_paths.get("codeformer"):
diff --git a/ui/easydiffusion/tasks/render_images.py b/ui/easydiffusion/tasks/render_images.py
index 8df208b6..bdf6e3ac 100644
--- a/ui/easydiffusion/tasks/render_images.py
+++ b/ui/easydiffusion/tasks/render_images.py
@@ -15,6 +15,7 @@ from sdkit.utils import (
img_to_base64_str,
img_to_buffer,
latent_samples_to_images,
+ log,
)
from .task import Task
@@ -93,15 +94,27 @@ class RenderTask(Task):
return model["params"].get(param_name) != new_val
def trt_needs_reload(self, context):
- if not self.has_param_changed(context, "convert_to_tensorrt"):
+ if not context.test_diffusers:
return False
+ if "stable-diffusion" not in context.models or "params" not in context.models["stable-diffusion"]:
+ return True
model = context.models["stable-diffusion"]
- pipe = model["default"]
- if hasattr(pipe.unet, "_allocate_trt_buffers"): # TRT already loaded
- return False
- return True
+ # curr_convert_to_trt = model["params"].get("convert_to_tensorrt")
+ new_convert_to_trt = self.models_data.model_params.get("stable-diffusion", {}).get("convert_to_tensorrt", False)
+
+ pipe = model["default"]
+ is_trt_loaded = hasattr(pipe.unet, "_allocate_trt_buffers") or hasattr(
+ pipe.unet, "_allocate_trt_buffers_backup"
+ )
+ if new_convert_to_trt and not is_trt_loaded:
+ return True
+
+ curr_build_config = model["params"].get("trt_build_config")
+ new_build_config = self.models_data.model_params.get("stable-diffusion", {}).get("trt_build_config", {})
+
+ return new_convert_to_trt and curr_build_config != new_build_config
def make_images(
@@ -210,17 +223,29 @@ def generate_images_internal(
if req.init_image is not None and not context.test_diffusers:
req.sampler_name = "ddim"
+ req.width, req.height = map(lambda x: x - x % 8, (req.width, req.height)) # clamp to 8
+
if req.control_image and task_data.control_filter_to_apply:
req.control_image = filter_images(context, req.control_image, task_data.control_filter_to_apply)[0]
if context.test_diffusers:
pipe = context.models["stable-diffusion"]["default"]
+ if hasattr(pipe.unet, "_allocate_trt_buffers_backup"):
+ setattr(pipe.unet, "_allocate_trt_buffers", pipe.unet._allocate_trt_buffers_backup)
+ delattr(pipe.unet, "_allocate_trt_buffers_backup")
+
if hasattr(pipe.unet, "_allocate_trt_buffers"):
convert_to_trt = models_data.model_params["stable-diffusion"].get("convert_to_tensorrt", False)
- pipe.unet.forward = pipe.unet._trt_forward if convert_to_trt else pipe.unet._non_trt_forward
- # pipe.vae.decoder.forward = (
- # pipe.vae.decoder._trt_forward if convert_to_trt else pipe.vae.decoder._non_trt_forward
- # )
+ if convert_to_trt:
+ pipe.unet.forward = pipe.unet._trt_forward
+ # pipe.vae.decoder.forward = pipe.vae.decoder._trt_forward
+ log.info(f"Setting unet.forward to TensorRT")
+ else:
+ log.info(f"Not using TensorRT for unet.forward")
+ pipe.unet.forward = pipe.unet._non_trt_forward
+ # pipe.vae.decoder.forward = pipe.vae.decoder._non_trt_forward
+ setattr(pipe.unet, "_allocate_trt_buffers_backup", pipe.unet._allocate_trt_buffers)
+ delattr(pipe.unet, "_allocate_trt_buffers")
images = generate_images(context, callback=callback, **req.dict())
user_stopped = False
diff --git a/ui/easydiffusion/types.py b/ui/easydiffusion/types.py
index 181a9505..fe936ca2 100644
--- a/ui/easydiffusion/types.py
+++ b/ui/easydiffusion/types.py
@@ -226,6 +226,9 @@ def convert_legacy_render_req_to_new(old_req: dict):
model_params["stable-diffusion"] = {
"clip_skip": bool(old_req.get("clip_skip", False)),
"convert_to_tensorrt": bool(old_req.get("convert_to_tensorrt", False)),
+ "trt_build_config": old_req.get(
+ "trt_build_config", {"batch_size_range": (1, 1), "dimensions_range": [(768, 1024)]}
+ ),
}
# move the filter params
diff --git a/ui/easydiffusion/utils/save_utils.py b/ui/easydiffusion/utils/save_utils.py
index 49743554..89dae991 100644
--- a/ui/easydiffusion/utils/save_utils.py
+++ b/ui/easydiffusion/utils/save_utils.py
@@ -21,6 +21,8 @@ TASK_TEXT_MAPPING = {
"seed": "Seed",
"use_stable_diffusion_model": "Stable Diffusion model",
"clip_skip": "Clip Skip",
+ "use_controlnet_model": "ControlNet model",
+ "control_filter_to_apply": "ControlNet Filter",
"use_vae_model": "VAE model",
"sampler_name": "Sampler",
"width": "Width",
@@ -260,10 +262,12 @@ def get_printable_request(req: GenerateImageRequest, task_data: TaskData, output
del metadata["lora_alpha"]
if task_data.use_upscale != "latent_upscaler" and "latent_upscaler_steps" in metadata:
del metadata["latent_upscaler_steps"]
+ if task_data.use_controlnet_model is None and "control_filter_to_apply" in metadata:
+ del metadata["control_filter_to_apply"]
if not using_diffusers:
for key in (
- x for x in ["use_lora_model", "lora_alpha", "clip_skip", "tiling", "latent_upscaler_steps"] if x in metadata
+ x for x in ["use_lora_model", "lora_alpha", "clip_skip", "tiling", "latent_upscaler_steps", "use_controlnet_model", "control_filter_to_apply"] if x in metadata
):
del metadata[key]
diff --git a/ui/index.html b/ui/index.html
index 2e158780..faee0ff2 100644
--- a/ui/index.html
+++ b/ui/index.html
@@ -163,60 +163,63 @@
Click to learn more about Clip Skip
-