diff --git a/3rd-PARTY-LICENSES b/3rd-PARTY-LICENSES index bd29393a..78bfe3bb 100644 --- a/3rd-PARTY-LICENSES +++ b/3rd-PARTY-LICENSES @@ -712,3 +712,31 @@ FileSaver.js is licensed under the MIT license: SOFTWARE. [1]: http://eligrey.com + +croppr.js +========= +https://github.com/jamesssooi/Croppr.js + +croppr.js is licensed under the MIT license: + + MIT License + + Copyright (c) 2017 James Ooi + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. diff --git a/CHANGES.md b/CHANGES.md index 1ac36410..9e72b44f 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -22,6 +22,8 @@ Our focus continues to remain on an easy installation experience, and an easy user-interface. While still remaining pretty powerful, in terms of features and speed. ### Detailed changelog +* 3.0.0 - 3 Aug 2023 - Enabled diffusers for everyone by default. The old v2 engine can be used by disabling the "Use v3 engine" option in the Settings tab. +* 2.5.48 - 1 Aug 2023 - (beta-only) Full support for ControlNets. You can select a control image to guide the AI. You can pick a filter to pre-process the image, and one of the known (or custom) controlnet models. Supports `OpenPose`, `Canny`, `Straight Lines`, `Depth`, `Line Art`, `Scribble`, `Soft Edge`, `Shuffle` and `Segment`. * 2.5.47 - 30 Jul 2023 - An option to use `Strict Mask Border` while inpainting, to avoid touching areas outside the mask. But this might show a slight outline of the mask, which you will have to touch up separately. * 2.5.47 - 29 Jul 2023 - (beta-only) Fix long prompts with SDXL. * 2.5.47 - 29 Jul 2023 - (beta-only) Fix red dots in some SDXL images. diff --git a/How to install and run.txt b/How to install and run.txt index af783b64..8e83ab7c 100644 --- a/How to install and run.txt +++ b/How to install and run.txt @@ -5,10 +5,10 @@ If you haven't downloaded Stable Diffusion UI yet, please download from https:// After downloading, to install please follow these instructions: For Windows: -- Please double-click the "Start Stable Diffusion UI.cmd" file inside the "stable-diffusion-ui" folder. +- Please double-click the "Easy-Diffusion-Windows.exe" file and follow the instructions. For Linux: -- Please open a terminal, and go to the "stable-diffusion-ui" directory. Then run ./start.sh +- Please open a terminal, unzip the Easy-Diffusion-Linux.zip file and go to the "easy-diffusion" directory. Then run ./start.sh That file will automatically install everything. After that it will start the Stable Diffusion interface in a web browser. @@ -21,4 +21,4 @@ If you have any problems, please: 3. Or, file an issue at https://github.com/easydiffusion/easydiffusion/issues Thanks -cmdr2 (and contributors to the project) \ No newline at end of file +cmdr2 (and contributors to the project) diff --git a/README.md b/README.md index b97c35d1..c7848a7c 100644 --- a/README.md +++ b/README.md @@ -11,9 +11,9 @@ Does not require technical knowledge, does not require pre-installed software. 1 Click the download button for your operating system:

- - - + + +

**Hardware requirements:** @@ -23,6 +23,7 @@ Click the download button for your operating system: - Minimum 8 GB of system RAM. - Atleast 25 GB of space on the hard disk. + The installer will take care of whatever is needed. If you face any problems, you can join the friendly [Discord community](https://discord.com/invite/u9yhsFmEkB) and ask for assistance. ## On Windows: @@ -132,6 +133,15 @@ We could really use help on these aspects (click to view tasks that need your he If you have any code contributions in mind, please feel free to say Hi to us on the [discord server](https://discord.com/invite/u9yhsFmEkB). We use the Discord server for development-related discussions, and for helping users. +# Credits +* Stable Diffusion: https://github.com/Stability-AI/stablediffusion +* CodeFormer: https://github.com/sczhou/CodeFormer (license: https://github.com/sczhou/CodeFormer/blob/master/LICENSE) +* GFPGAN: https://github.com/TencentARC/GFPGAN +* RealESRGAN: https://github.com/xinntao/Real-ESRGAN +* k-diffusion: https://github.com/crowsonkb/k-diffusion +* Code contributors and artists on the cmdr2 UI: https://github.com/cmdr2/stable-diffusion-ui and Discord (https://discord.com/invite/u9yhsFmEkB) +* Lots of contributors on the internet + # Disclaimer The authors of this project are not responsible for any content generated using this interface. diff --git a/scripts/check_modules.py b/scripts/check_modules.py index 301b6163..aecf7576 100644 --- a/scripts/check_modules.py +++ b/scripts/check_modules.py @@ -18,7 +18,7 @@ os_name = platform.system() modules_to_check = { "torch": ("1.11.0", "1.13.1", "2.0.0"), "torchvision": ("0.12.0", "0.14.1", "0.15.1"), - "sdkit": "1.0.151", + "sdkit": "1.0.167", "stable-diffusion-sdkit": "2.1.4", "rich": "12.6.0", "uvicorn": "0.19.0", diff --git a/ui/easydiffusion/app.py b/ui/easydiffusion/app.py index 6f8d731e..e2c190f8 100644 --- a/ui/easydiffusion/app.py +++ b/ui/easydiffusion/app.py @@ -61,6 +61,7 @@ APP_CONFIG_DEFAULTS = { "ui": { "open_browser_on_start": True, }, + "test_diffusers": True, } IMAGE_EXTENSIONS = [ @@ -116,7 +117,7 @@ def getConfig(default_val=APP_CONFIG_DEFAULTS): def set_config_on_startup(config: dict): if getConfig.__test_diffusers_on_startup is None: - getConfig.__test_diffusers_on_startup = config.get("test_diffusers", False) + getConfig.__test_diffusers_on_startup = config.get("test_diffusers", True) config["config_on_startup"] = {"test_diffusers": getConfig.__test_diffusers_on_startup} if os.path.isfile(config_yaml_path): diff --git a/ui/easydiffusion/bucket_manager.py b/ui/easydiffusion/bucket_manager.py index 4400fd17..0d72ed06 100644 --- a/ui/easydiffusion/bucket_manager.py +++ b/ui/easydiffusion/bucket_manager.py @@ -71,9 +71,8 @@ def init(): bucket = crud.get_bucket_by_path(db, path) if bucket == None: - bucket_id = crud.create_bucket(db=db, bucket=schemas.BucketCreate(path=path)) - else: - bucket_id = bucket.id + bucket = crud.create_bucket(db=db, bucket=schemas.BucketCreate(path=path)) + bucket_id = bucket.id bucketfile = schemas.BucketFileCreate(filename=filename, data=file) result = crud.create_bucketfile(db=db, bucketfile=bucketfile, bucket_id=bucket_id) @@ -92,25 +91,19 @@ def init(): @server_api.get("/image/{image_path:path}") def get_image(image_path: str, db: Session = Depends(get_db)): - from easydiffusion.easydb.mappings import Image + from easydiffusion.easydb.mappings import GalleryImage image_path = str(abspath(image_path)) - amount = len(db.query(Image).filter(Image.path == image_path).all()) - if amount > 0: - image = db.query(Image).filter(Image.path == image_path).first() + try: + image = db.query(GalleryImage).filter(GalleryImage.path == image_path).first() return FileResponse(image.path) - else: + except Exception as e: raise HTTPException(status_code=404, detail="Image not found") @server_api.get("/all_images") def get_all_images(db: Session = Depends(get_db)): - from easydiffusion.easydb.mappings import Image - images = db.query(Image).all() - sum_string = "
" - for img in images: - options = f"Path: {img.path}\nPrompt: {img.prompt}\nNegative Prompt: {img.negative_prompt}\nSeed: {img.seed}\nModel: {img.use_stable_diffusion_model}\nSize: {img.height}x{img.width}\nSampler: {img.sampler_name}\nSteps: {img.num_inference_steps}\nGuidance Scale: {img.guidance_scale}\nLoRA: {img.lora}\nUpscaling: {img.use_upscale}\nFace Correction: {img.use_face_correction}\n" - sum_string += f"" - sum_string += "
" - return Response(content=sum_string, media_type="text/html") + from easydiffusion.easydb.mappings import GalleryImage + images = db.query(GalleryImage).all() + return images def get_filename_from_url(url): diff --git a/ui/easydiffusion/easydb/crud.py b/ui/easydiffusion/easydb/crud.py index 7550a52a..65bea255 100644 --- a/ui/easydiffusion/easydb/crud.py +++ b/ui/easydiffusion/easydb/crud.py @@ -19,7 +19,6 @@ def create_bucketfile(db: Session, bucketfile: schemas.BucketFileCreate, bucket_ db_bucketfile = models.BucketFile(**bucketfile.dict(), bucket_id=bucket_id) db.merge(db_bucketfile) db.commit() - from pprint import pprint db_bucketfile = db.query(models.BucketFile).filter(models.BucketFile.bucket_id==bucket_id, models.BucketFile.filename==bucketfile.filename).first() return db_bucketfile diff --git a/ui/easydiffusion/easydb/database.py b/ui/easydiffusion/easydb/database.py index e3c92845..6cb43ecb 100644 --- a/ui/easydiffusion/easydb/database.py +++ b/ui/easydiffusion/easydb/database.py @@ -7,7 +7,6 @@ from sqlalchemy.orm import sessionmaker os.makedirs(app.BUCKET_DIR, exist_ok=True) SQLALCHEMY_DATABASE_URL = "sqlite:///"+os.path.join(app.BUCKET_DIR, "bucket.db") -print("## SQLALCHEMY_DATABASE_URL = ", SQLALCHEMY_DATABASE_URL) engine = create_engine(SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False}) SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) diff --git a/ui/easydiffusion/easydb/mappings.py b/ui/easydiffusion/easydb/mappings.py index ad68ecab..31549709 100644 --- a/ui/easydiffusion/easydb/mappings.py +++ b/ui/easydiffusion/easydb/mappings.py @@ -1,9 +1,10 @@ -from sqlalchemy import Column, Integer, String, Float, Boolean +from sqlalchemy import Column, Integer, String, Float, Boolean, DateTime from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.sql import func Base = declarative_base() -class Image(Base): +class GalleryImage(Base): __tablename__ = 'images' path = Column(String, primary_key=True) @@ -23,10 +24,11 @@ class Image(Base): use_upscale = Column(String) prompt = Column(String) negative_prompt = Column(String) + time_created = Column(DateTime(timezone=True), server_default=func.now()) def __repr__(self): - return "" % ( + return "" % ( self.path, self.seed, self.use_stable_diffusion_model, self.clip_skip, self.use_vae_model, self.sampler_name, self.width, self.height, self.num_inference_steps, self.guidance_scale, self.lora, self.use_hypernetwork_model, self.tiling, self.use_face_correction, self.use_upscale, self.prompt, self.negative_prompt) from easydiffusion.easydb.database import engine -Image.metadata.create_all(engine) \ No newline at end of file +GalleryImage.metadata.create_all(engine) diff --git a/ui/easydiffusion/model_manager.py b/ui/easydiffusion/model_manager.py index 1ee5ce9d..845e9126 100644 --- a/ui/easydiffusion/model_manager.py +++ b/ui/easydiffusion/model_manager.py @@ -9,6 +9,7 @@ from easydiffusion.types import ModelsData from easydiffusion.utils import log from sdkit import Context from sdkit.models import load_model, scan_model, unload_model, download_model, get_model_info_from_db +from sdkit.models.model_loader.controlnet_filters import filters as cn_filters from sdkit.utils import hash_file_quick KNOWN_MODEL_TYPES = [ @@ -19,6 +20,8 @@ KNOWN_MODEL_TYPES = [ "realesrgan", "lora", "codeformer", + "embeddings", + "controlnet", ] MODEL_EXTENSIONS = { "stable-diffusion": [".ckpt", ".safetensors"], @@ -29,6 +32,7 @@ MODEL_EXTENSIONS = { "lora": [".ckpt", ".safetensors"], "codeformer": [".pth"], "embeddings": [".pt", ".bin", ".safetensors"], + "controlnet": [".pth", ".safetensors"], } DEFAULT_MODELS = { "stable-diffusion": [ @@ -144,7 +148,7 @@ def reload_models_if_necessary(context: Context, models_data: ModelsData, models models_to_reload = { model_type: path for model_type, path in models_data.model_paths.items() - if context.model_paths.get(model_type) != path + if context.model_paths.get(model_type) != path or (path is not None and context.models.get(model_type) is None) } if models_data.model_paths.get("codeformer"): @@ -177,10 +181,17 @@ def reload_models_if_necessary(context: Context, models_data: ModelsData, models def resolve_model_paths(models_data: ModelsData): model_paths = models_data.model_paths for model_type in model_paths: - if model_type in ("latent_upscaler", "nsfw_checker"): # doesn't use model paths + skip_models = cn_filters + ["latent_upscaler", "nsfw_checker"] + if model_type in skip_models: # doesn't use model paths continue if model_type == "codeformer": download_if_necessary("codeformer", "codeformer.pth", "codeformer-0.1.0") + elif model_type == "controlnet": + model_id = model_paths[model_type] + model_info = get_model_info_from_db(model_type=model_type, model_id=model_id) + if model_info: + filename = model_info.get("url", "").split("/")[-1] + download_if_necessary("controlnet", filename, model_id, skip_if_others_exist=False) model_paths[model_type] = resolve_model_to_use(model_paths[model_type], model_type=model_type) @@ -204,17 +215,17 @@ def download_default_models_if_necessary(): print(model_type, "model(s) found.") -def download_if_necessary(model_type: str, file_name: str, model_id: str): +def download_if_necessary(model_type: str, file_name: str, model_id: str, skip_if_others_exist=True): model_path = os.path.join(app.MODELS_DIR, model_type, file_name) expected_hash = get_model_info_from_db(model_type=model_type, model_id=model_id)["quick_hash"] - other_models_exist = any_model_exists(model_type) + other_models_exist = any_model_exists(model_type) and skip_if_others_exist known_model_exists = os.path.exists(model_path) known_model_is_corrupt = known_model_exists and hash_file_quick(model_path) != expected_hash if known_model_is_corrupt or (not other_models_exist and not known_model_exists): print("> download", model_type, model_id) - download_model(model_type, model_id, download_base_dir=app.MODELS_DIR) + download_model(model_type, model_id, download_base_dir=app.MODELS_DIR, download_config_if_available=False) def migrate_legacy_model_location(): @@ -285,12 +296,26 @@ def is_malicious_model(file_path): def getModels(scan_for_malicious: bool = True): models = { "options": { - "stable-diffusion": ["sd-v1-4"], + "stable-diffusion": [{"sd-v1-4": "SD 1.4"}], "vae": [], "hypernetwork": [], "lora": [], - "codeformer": ["codeformer"], + "codeformer": [{"codeformer": "CodeFormer"}], "embeddings": [], + "controlnet": [ + {"control_v11p_sd15_canny": "Canny (*)"}, + {"control_v11p_sd15_openpose": "OpenPose (*)"}, + {"control_v11p_sd15_normalbae": "Normal BAE (*)"}, + {"control_v11f1p_sd15_depth": "Depth (*)"}, + {"control_v11p_sd15_scribble": "Scribble"}, + {"control_v11p_sd15_softedge": "Soft Edge"}, + {"control_v11p_sd15_inpaint": "Inpaint"}, + {"control_v11p_sd15_lineart": "Line Art"}, + {"control_v11p_sd15s2_lineart_anime": "Line Art Anime"}, + {"control_v11p_sd15_mlsd": "Straight Lines"}, + {"control_v11p_sd15_seg": "Segment"}, + {"control_v11e_sd15_shuffle": "Shuffle"}, + ], }, } @@ -299,9 +324,9 @@ def getModels(scan_for_malicious: bool = True): class MaliciousModelException(Exception): "Raised when picklescan reports a problem with a model" - def scan_directory(directory, suffixes, directoriesFirst: bool = True): + def scan_directory(directory, suffixes, directoriesFirst: bool = True, default_entries=[]): + tree = list(default_entries) nonlocal models_scanned - tree = [] for entry in sorted( os.scandir(directory), key=lambda entry: (entry.is_file() == directoriesFirst, entry.name.lower()), @@ -320,7 +345,14 @@ def getModels(scan_for_malicious: bool = True): raise MaliciousModelException(entry.path) if scan_for_malicious: known_models[entry.path] = mtime - tree.append(entry.name[: -len(matching_suffix)]) + model_id = entry.name[: -len(matching_suffix)] + model_exists = False + for m in tree: # allows default "named" models, like CodeFormer and known ControlNet models + if (isinstance(m, str) and model_id == m) or (isinstance(m, dict) and model_id in m): + model_exists = True + break + if not model_exists: + tree.append(model_id) elif entry.is_dir(): scan = scan_directory(entry.path, suffixes, directoriesFirst=False) @@ -337,7 +369,8 @@ def getModels(scan_for_malicious: bool = True): os.makedirs(models_dir) try: - models["options"][model_type] = scan_directory(models_dir, model_extensions) + default_tree = models["options"].get(model_type, []) + models["options"][model_type] = scan_directory(models_dir, model_extensions, default_entries=default_tree) except MaliciousModelException as e: models["scan-error"] = str(e) @@ -350,6 +383,7 @@ def getModels(scan_for_malicious: bool = True): listModels(model_type="gfpgan") listModels(model_type="lora") listModels(model_type="embeddings") + listModels(model_type="controlnet") if scan_for_malicious and models_scanned > 0: log.info(f"[green]Scanned {models_scanned} models. Nothing infected[/]") diff --git a/ui/easydiffusion/package_manager.py b/ui/easydiffusion/package_manager.py index c246c54d..de64b66c 100644 --- a/ui/easydiffusion/package_manager.py +++ b/ui/easydiffusion/package_manager.py @@ -12,9 +12,9 @@ from easydiffusion import app manifest = { "tensorrt": { "install": [ - "nvidia-cudnn --extra-index-url=https://pypi.ngc.nvidia.com --trusted-host pypi.ngc.nvidia.com", - "tensorrt-libs --extra-index-url=https://pypi.ngc.nvidia.com --trusted-host pypi.ngc.nvidia.com", - "tensorrt --extra-index-url=https://pypi.ngc.nvidia.com --trusted-host pypi.ngc.nvidia.com", + "nvidia-cudnn --pre --extra-index-url=https://pypi.ngc.nvidia.com --trusted-host pypi.ngc.nvidia.com", + "tensorrt-libs --pre --extra-index-url=https://pypi.ngc.nvidia.com --trusted-host pypi.ngc.nvidia.com", + "tensorrt --pre --extra-index-url=https://pypi.ngc.nvidia.com --trusted-host pypi.ngc.nvidia.com", ], "uninstall": ["tensorrt"], # TODO also uninstall tensorrt-libs and nvidia-cudnn, but do it upon restarting (avoid 'file in use' error) @@ -25,7 +25,7 @@ installing = [] # remove this once TRT releases on pypi if platform.system() == "Windows": trt_dir = os.path.join(app.ROOT_DIR, "tensorrt") - if os.path.exists(trt_dir): + if os.path.exists(trt_dir) and os.path.isdir(trt_dir) and len(os.listdir(trt_dir)) > 0: files = os.listdir(trt_dir) packages = manifest["tensorrt"]["install"] @@ -61,6 +61,10 @@ def install(module_name): raise RuntimeError(f"Can't install unknown package: {module_name}!") commands = manifest[module_name]["install"] + if module_name == "tensorrt": + commands += [ + "protobuf==3.20.3 polygraphy==0.47.1 onnx==1.14.0 --extra-index-url=https://pypi.ngc.nvidia.com --trusted-host pypi.ngc.nvidia.com" + ] commands = [f"python -m pip install --upgrade {cmd}" for cmd in commands] installing.append(module_name) diff --git a/ui/easydiffusion/runtime.py b/ui/easydiffusion/runtime.py index 4098ee8e..8d2f0186 100644 --- a/ui/easydiffusion/runtime.py +++ b/ui/easydiffusion/runtime.py @@ -31,7 +31,7 @@ def init(device): app_config = app.getConfig() context.test_diffusers = ( - app_config.get("test_diffusers", False) and app_config.get("update_branch", "main") != "main" + app_config.get("test_diffusers", True) and app_config.get("update_branch", "main") != "main" ) log.info("Device usage during initialization:") diff --git a/ui/easydiffusion/server.py b/ui/easydiffusion/server.py index a8f848fd..1ecbbbd3 100644 --- a/ui/easydiffusion/server.py +++ b/ui/easydiffusion/server.py @@ -63,7 +63,7 @@ class SetAppConfigRequest(BaseModel, extra=Extra.allow): ui_open_browser_on_start: bool = None listen_to_network: bool = None listen_port: int = None - test_diffusers: bool = False + test_diffusers: bool = True def init(): diff --git a/ui/easydiffusion/tasks/render_images.py b/ui/easydiffusion/tasks/render_images.py index bbc36aa5..bdf6e3ac 100644 --- a/ui/easydiffusion/tasks/render_images.py +++ b/ui/easydiffusion/tasks/render_images.py @@ -15,6 +15,7 @@ from sdkit.utils import ( img_to_base64_str, img_to_buffer, latent_samples_to_images, + log, ) from .task import Task @@ -63,7 +64,7 @@ class RenderTask(Task): if ( runtime.set_vram_optimizations(context) or self.has_param_changed(context, "clip_skip") - or self.has_param_changed(context, "convert_to_tensorrt") + or self.trt_needs_reload(context) ): models_to_force_reload.append("stable-diffusion") @@ -92,6 +93,29 @@ class RenderTask(Task): new_val = self.models_data.model_params.get("stable-diffusion", {}).get(param_name, False) return model["params"].get(param_name) != new_val + def trt_needs_reload(self, context): + if not context.test_diffusers: + return False + if "stable-diffusion" not in context.models or "params" not in context.models["stable-diffusion"]: + return True + + model = context.models["stable-diffusion"] + + # curr_convert_to_trt = model["params"].get("convert_to_tensorrt") + new_convert_to_trt = self.models_data.model_params.get("stable-diffusion", {}).get("convert_to_tensorrt", False) + + pipe = model["default"] + is_trt_loaded = hasattr(pipe.unet, "_allocate_trt_buffers") or hasattr( + pipe.unet, "_allocate_trt_buffers_backup" + ) + if new_convert_to_trt and not is_trt_loaded: + return True + + curr_build_config = model["params"].get("trt_build_config") + new_build_config = self.models_data.model_params.get("stable-diffusion", {}).get("trt_build_config", {}) + + return new_convert_to_trt and curr_build_config != new_build_config + def make_images( context, @@ -148,6 +172,7 @@ def make_images_internal( context, req, task_data, + models_data, data_queue, task_temp_images, step_callback, @@ -174,6 +199,7 @@ def generate_images_internal( context, req: GenerateImageRequest, task_data: TaskData, + models_data: ModelsData, data_queue: queue.Queue, task_temp_images: list, step_callback, @@ -197,6 +223,30 @@ def generate_images_internal( if req.init_image is not None and not context.test_diffusers: req.sampler_name = "ddim" + req.width, req.height = map(lambda x: x - x % 8, (req.width, req.height)) # clamp to 8 + + if req.control_image and task_data.control_filter_to_apply: + req.control_image = filter_images(context, req.control_image, task_data.control_filter_to_apply)[0] + + if context.test_diffusers: + pipe = context.models["stable-diffusion"]["default"] + if hasattr(pipe.unet, "_allocate_trt_buffers_backup"): + setattr(pipe.unet, "_allocate_trt_buffers", pipe.unet._allocate_trt_buffers_backup) + delattr(pipe.unet, "_allocate_trt_buffers_backup") + + if hasattr(pipe.unet, "_allocate_trt_buffers"): + convert_to_trt = models_data.model_params["stable-diffusion"].get("convert_to_tensorrt", False) + if convert_to_trt: + pipe.unet.forward = pipe.unet._trt_forward + # pipe.vae.decoder.forward = pipe.vae.decoder._trt_forward + log.info(f"Setting unet.forward to TensorRT") + else: + log.info(f"Not using TensorRT for unet.forward") + pipe.unet.forward = pipe.unet._non_trt_forward + # pipe.vae.decoder.forward = pipe.vae.decoder._non_trt_forward + setattr(pipe.unet, "_allocate_trt_buffers_backup", pipe.unet._allocate_trt_buffers) + delattr(pipe.unet, "_allocate_trt_buffers") + images = generate_images(context, callback=callback, **req.dict()) user_stopped = False except UserInitiatedStop: diff --git a/ui/easydiffusion/types.py b/ui/easydiffusion/types.py index 894867b8..fe936ca2 100644 --- a/ui/easydiffusion/types.py +++ b/ui/easydiffusion/types.py @@ -75,6 +75,7 @@ class TaskData(BaseModel): use_controlnet_model: Union[str, List[str]] = None filters: List[str] = [] filter_params: Dict[str, Dict[str, Any]] = {} + control_filter_to_apply: Union[str, List[str]] = None show_only_filtered_image: bool = False block_nsfw: bool = False @@ -135,6 +136,7 @@ class GenerateImageResponse: def json(self): del self.render_request.init_image del self.render_request.init_image_mask + del self.render_request.control_image task_data = self.task_data.dict() task_data.update(self.output_format.dict()) @@ -212,6 +214,9 @@ def convert_legacy_render_req_to_new(old_req: dict): model_paths["latent_upscaler"] = ( model_paths["latent_upscaler"] if "latent_upscaler" in model_paths["latent_upscaler"].lower() else None ) + if "control_filter_to_apply" in old_req: + filter_model = old_req["control_filter_to_apply"] + model_paths[filter_model] = filter_model if old_req.get("block_nsfw"): model_paths["nsfw_checker"] = "nsfw_checker" @@ -221,6 +226,9 @@ def convert_legacy_render_req_to_new(old_req: dict): model_params["stable-diffusion"] = { "clip_skip": bool(old_req.get("clip_skip", False)), "convert_to_tensorrt": bool(old_req.get("convert_to_tensorrt", False)), + "trt_build_config": old_req.get( + "trt_build_config", {"batch_size_range": (1, 1), "dimensions_range": [(768, 1024)]} + ), } # move the filter params diff --git a/ui/easydiffusion/utils/save_utils.py b/ui/easydiffusion/utils/save_utils.py index bb508229..7f668280 100644 --- a/ui/easydiffusion/utils/save_utils.py +++ b/ui/easydiffusion/utils/save_utils.py @@ -21,6 +21,8 @@ TASK_TEXT_MAPPING = { "seed": "Seed", "use_stable_diffusion_model": "Stable Diffusion model", "clip_skip": "Clip Skip", + "use_controlnet_model": "ControlNet model", + "control_filter_to_apply": "ControlNet Filter", "use_vae_model": "VAE model", "sampler_name": "Sampler", "width": "Width", @@ -155,11 +157,11 @@ def save_images_to_disk( else: return metadata_entries[i]["use_lora_model"] + ":" + str(metadata_entries[i]["lora_alpha"]) - from easydiffusion.easydb.mappings import Image + from easydiffusion.easydb.mappings import GalleryImage from easydiffusion.easydb.database import SessionLocal session = SessionLocal() - session.add(Image( + session.add(GalleryImage( path = path_i, seed = metadata_entries[i]["seed"], use_stable_diffusion_model = metadata_entries[i]["use_stable_diffusion_model"], @@ -258,7 +260,7 @@ def get_printable_request(req: GenerateImageRequest, task_data: TaskData, output task_data_metadata.update(output_format.dict()) app_config = app.getConfig() - using_diffusers = app_config.get("test_diffusers", False) + using_diffusers = app_config.get("test_diffusers", True) # Save the metadata in the order defined in TASK_TEXT_MAPPING metadata = {} @@ -301,10 +303,12 @@ def get_printable_request(req: GenerateImageRequest, task_data: TaskData, output del metadata["lora_alpha"] if task_data.use_upscale != "latent_upscaler" and "latent_upscaler_steps" in metadata: del metadata["latent_upscaler_steps"] + if task_data.use_controlnet_model is None and "control_filter_to_apply" in metadata: + del metadata["control_filter_to_apply"] if not using_diffusers: for key in ( - x for x in ["use_lora_model", "lora_alpha", "clip_skip", "tiling", "latent_upscaler_steps"] if x in metadata + x for x in ["use_lora_model", "lora_alpha", "clip_skip", "tiling", "latent_upscaler_steps", "use_controlnet_model", "control_filter_to_apply"] if x in metadata ): del metadata[key] diff --git a/ui/index.html b/ui/index.html index 15385bda..d5803fcc 100644 --- a/ui/index.html +++ b/ui/index.html @@ -18,12 +18,14 @@ + +
@@ -32,7 +34,7 @@

Easy Diffusion - v2.5.47 + v3.0.0

@@ -86,8 +88,8 @@
-
- +
+
@@ -144,18 +146,17 @@
Image Settings - + - + @@ -165,6 +166,63 @@ Click to learn more about Clip Skip + + + + - @@ -460,7 +518,9 @@ @@ -630,6 +690,15 @@ + +  
@@ -637,6 +706,34 @@
+ +
+
+

Use as thumbnail

+ Use a pictures as thumbnail for embeddings, LORAs, etc. +
+
+ +
+
+
+
+
+
+
+
+
+ +
+
+ + +
+
+
+
+
Click to learn more about custom models
- Click to learn more about TensorRT - +
+
+ + + +
+ + Click to learn more about ControlNets +
+ + +
+ +
+ +
+
Click to learn more about VAEs @@ -242,7 +300,7 @@
- Recent sizes + Advanced sizes
Custom size:
@@ -271,7 +329,7 @@
+