diff --git a/.gitignore b/.gitignore
index b5157e17..90bf0a44 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,3 +3,4 @@ installer
installer.tar
dist
.idea/*
+node_modules/*
\ No newline at end of file
diff --git a/.prettierignore b/.prettierignore
new file mode 100644
index 00000000..b0f8227f
--- /dev/null
+++ b/.prettierignore
@@ -0,0 +1,9 @@
+*.min.*
+*.py
+*.json
+*.html
+/*
+!/ui
+/ui/easydiffusion
+!/ui/plugins
+!/ui/media
\ No newline at end of file
diff --git a/.prettierrc.json b/.prettierrc.json
new file mode 100644
index 00000000..a42b3fd7
--- /dev/null
+++ b/.prettierrc.json
@@ -0,0 +1,7 @@
+{
+ "printWidth": 120,
+ "tabWidth": 4,
+ "semi": false,
+ "arrowParens": "always",
+ "trailingComma": "es5"
+}
diff --git a/CHANGES.md b/CHANGES.md
index 4b225b32..b13083dc 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -2,18 +2,19 @@
## v2.5
### Major Changes
-- **Nearly twice as fast** - significantly faster speed of image generation. We're now pretty close to automatic1111's speed. Code contributions are welcome to make our project even faster: https://github.com/easydiffusion/sdkit/#is-it-fast
+- **Nearly twice as fast** - significantly faster speed of image generation. Code contributions are welcome to make our project even faster: https://github.com/easydiffusion/sdkit/#is-it-fast
- **Mac M1/M2 support** - Experimental support for Mac M1/M2. Thanks @michaelgallacher, @JeLuf and vishae.
+- **AMD support for Linux** - Experimental support for AMD GPUs on Linux. Thanks @DianaNites and @JeLuf.
- **Full support for Stable Diffusion 2.1 (including CPU)** - supports loading v1.4 or v2.0 or v2.1 models seamlessly. No need to enable "Test SD2", and no need to add `sd2_` to your SD 2.0 model file names. Works on CPU as well.
- **Memory optimized Stable Diffusion 2.1** - you can now use Stable Diffusion 2.1 models, with the same low VRAM optimizations that we've always had for SD 1.4. Please note, the SD 2.0 and 2.1 models require more GPU and System RAM, as compared to the SD 1.4 and 1.5 models.
- **11 new samplers!** - explore the new samplers, some of which can generate great images in less than 10 inference steps! We've added the Karras and UniPC samplers. Thanks @Schorny for the UniPC samplers.
-- **Model Merging** - You can now merge two models (`.ckpt` or `.safetensors`) and output `.ckpt` or `.safetensors` models, optionally in `fp16` precision. Details: https://github.com/cmdr2/stable-diffusion-ui/wiki/Model-Merging . Thanks @JeLuf.
+- **Model Merging** - You can now merge two models (`.ckpt` or `.safetensors`) and output `.ckpt` or `.safetensors` models, optionally in `fp16` precision. Details: https://github.com/easydiffusion/easydiffusion/wiki/Model-Merging . Thanks @JeLuf.
- **Fast loading/unloading of VAEs** - No longer needs to reload the entire Stable Diffusion model, each time you change the VAE
- **Database of known models** - automatically picks the right configuration for known models. E.g. we automatically detect and apply "v" parameterization (required for some SD 2.0 models), and "fp32" attention precision (required for some SD 2.1 models).
- **Color correction for img2img** - an option to preserve the color profile (histogram) of the initial image. This is especially useful if you're getting red-tinted images after inpainting/masking.
- **Three GPU Memory Usage Settings** - `High` (fastest, maximum VRAM usage), `Balanced` (default - almost as fast, significantly lower VRAM usage), `Low` (slowest, very low VRAM usage). The `Low` setting is applied automatically for GPUs with less than 4 GB of VRAM.
- **Find models in sub-folders** - This allows you to organize your models into sub-folders inside `models/stable-diffusion`, instead of keeping them all in a single folder. Thanks @patriceac and @ogmaresca.
-- **Custom Modifier Categories** - Ability to create custom modifiers with thumbnails, and custom categories (and hierarchy of categories). Details: https://github.com/cmdr2/stable-diffusion-ui/wiki/Custom-Modifiers . Thanks @ogmaresca.
+- **Custom Modifier Categories** - Ability to create custom modifiers with thumbnails, and custom categories (and hierarchy of categories). Details: https://github.com/easydiffusion/easydiffusion/wiki/Custom-Modifiers . Thanks @ogmaresca.
- **Embed metadata, or save as TXT/JSON** - You can now embed the metadata directly into the images, or save them as text or json files (choose in the Settings tab). Thanks @patriceac.
- **Major rewrite of the code** - Most of the codebase has been reorganized and rewritten, to make it more manageable and easier for new developers to contribute features. We've separated our core engine into a new project called `sdkit`, which allows anyone to easily integrate Stable Diffusion (and related modules like GFPGAN etc) into their programming projects (via a simple `pip install sdkit`): https://github.com/easydiffusion/sdkit/
- **Name change** - Last, and probably the least, the UI is now called "Easy Diffusion". It indicates the focus of this project - an easy way for people to play with Stable Diffusion.
@@ -21,14 +22,49 @@
Our focus continues to remain on an easy installation experience, and an easy user-interface. While still remaining pretty powerful, in terms of features and speed.
### Detailed changelog
+* 2.5.41 - 24 Jun 2023 - (beta-only) Fix broken inpainting in low VRAM usage mode.
+* 2.5.41 - 24 Jun 2023 - (beta-only) Fix a recent regression where the LoRA would not get applied when changing SD models.
+* 2.5.41 - 23 Jun 2023 - Fix a regression where latent upscaler stopped working on PCs without a graphics card.
+* 2.5.41 - 20 Jun 2023 - Automatically fix black images if fp32 attention precision is required in diffusers.
+* 2.5.41 - 19 Jun 2023 - Another fix for multi-gpu rendering (in all VRAM usage modes).
+* 2.5.41 - 13 Jun 2023 - Fix multi-gpu bug with "low" VRAM usage mode while generating images.
+* 2.5.41 - 12 Jun 2023 - Fix multi-gpu bug with CodeFormer.
+* 2.5.41 - 6 Jun 2023 - Allow changing the strength of CodeFormer, and slightly improved styling of the CodeFormer options.
+* 2.5.41 - 5 Jun 2023 - Allow sharing an Easy Diffusion instance via https://try.cloudflare.com/ . You can find this option at the bottom of the Settings tab. Thanks @JeLuf.
+* 2.5.41 - 5 Jun 2023 - Show an option to download for tiled images. Shows a button on the generated image. Creates larger images by tiling them with the image generated by Easy Diffusion. Thanks @JeLuf.
+* 2.5.41 - 5 Jun 2023 - (beta-only) Allow LoRA strengths between -2 and 2. Thanks @ogmaresca.
+* 2.5.40 - 5 Jun 2023 - Reduce the VRAM usage of Latent Upscaling when using "balanced" VRAM usage mode.
+* 2.5.40 - 5 Jun 2023 - Fix the "realesrgan" key error when using CodeFormer with more than 1 image in a batch.
+* 2.5.40 - 3 Jun 2023 - Added CodeFormer as another option for fixing faces and eyes. CodeFormer tends to perform better than GFPGAN for many images. Thanks @patriceac for the implementation, and for contacting the CodeFormer team (who were supportive of it being integrated into Easy Diffusion).
+* 2.5.39 - 25 May 2023 - (beta-only) Seamless Tiling - make seamlessly tiled images, e.g. rock and grass textures. Thanks @JeLuf.
+* 2.5.38 - 24 May 2023 - Better reporting of errors, and show an explanation if the user cannot disable the "Use CPU" setting.
+* 2.5.38 - 23 May 2023 - Add Latent Upscaler as another option for upscaling images. Thanks @JeLuf for the implementation of the Latent Upscaler model.
+* 2.5.37 - 19 May 2023 - (beta-only) Two more samplers: DDPM and DEIS. Also disables the samplers that aren't working yet in the Diffusers version. Thanks @ogmaresca.
+* 2.5.37 - 19 May 2023 - (beta-only) Support CLIP-Skip. You can set this option under the models dropdown. Thanks @JeLuf.
+* 2.5.37 - 19 May 2023 - (beta-only) More VRAM optimizations for all modes in diffusers. The VRAM usage for diffusers in "low" and "balanced" should now be equal or less than the non-diffusers version. Performs softmax in half precision, like sdkit does.
+* 2.5.36 - 16 May 2023 - (beta-only) More VRAM optimizations for "balanced" VRAM usage mode.
+* 2.5.36 - 11 May 2023 - (beta-only) More VRAM optimizations for "low" VRAM usage mode.
+* 2.5.36 - 10 May 2023 - (beta-only) Bug fix for "meta" error when using a LoRA in 'low' VRAM usage mode.
+* 2.5.35 - 8 May 2023 - Allow dragging a zoomed-in image (after opening an image with the "expand" button). Thanks @ogmaresca.
+* 2.5.35 - 3 May 2023 - (beta-only) First round of VRAM Optimizations for the "Test Diffusers" version. This change significantly reduces the amount of VRAM used by the diffusers version during image generation. The VRAM usage is still not equal to the "non-diffusers" version, but more optimizations are coming soon.
+* 2.5.34 - 22 Apr 2023 - Don't start the browser in an incognito new profile (on Windows). Thanks @JeLuf.
+* 2.5.33 - 21 Apr 2023 - Install PyTorch 2.0 on new installations (on Windows and Linux).
+* 2.5.32 - 19 Apr 2023 - Automatically check for black images, and set full-precision if necessary (for attn). This means custom models based on Stable Diffusion v2.1 will just work, without needing special command-line arguments or editing of yaml config files.
+* 2.5.32 - 18 Apr 2023 - Automatic support for AMD graphics cards on Linux. Thanks @DianaNites and @JeLuf.
+* 2.5.31 - 10 Apr 2023 - Reduce VRAM usage while upscaling.
+* 2.5.31 - 6 Apr 2023 - Allow seeds upto `4,294,967,295`. Thanks @ogmaresca.
+* 2.5.31 - 6 Apr 2023 - Buttons to show the previous/next image in the image popup. Thanks @ogmaresca.
+* 2.5.30 - 5 Apr 2023 - Fix a bug where the JPEG image quality wasn't being respected when embedding the metadata into it. Thanks @JeLuf.
+* 2.5.30 - 1 Apr 2023 - (beta-only) Slider to control the strength of the LoRA model.
+* 2.5.30 - 28 Mar 2023 - Refactor task entry config to use a generating method. Added ability for plugins to easily add to this. Removed confusing sentence from `contributing.md`
* 2.5.30 - 28 Mar 2023 - Allow the user to undo the deletion of tasks or images, instead of showing a pop-up each time. The new `Undo` button will be present at the top of the UI. Thanks @JeLuf.
* 2.5.30 - 28 Mar 2023 - Support saving lossless WEBP images. Thanks @ogmaresca.
* 2.5.30 - 28 Mar 2023 - Lots of bug fixes for the UI (Read LoRA flag in metadata files, new prompt weight format with scrollwheel, fix overflow with lots of tabs, clear button in image editor, shorter filenames in download). Thanks @patriceac, @JeLuf and @ogmaresca.
-* 2.5.29 - 27 Mar 2023 - Fix a bug where some non-square images would fail while inpainting with a `The size of tensor a must match size of tensor b` error.
-* 2.5.29 - 27 Mar 2023 - Fix the `incorrect number of channels` error, when given a PNG image with an alpha channel in `Test Diffusers`.
-* 2.5.29 - 27 Mar 2023 - Fix broken inpainting in `Test Diffusers` (beta).
-* 2.5.28 - 24 Mar 2023 - Support for weighted prompts and long prompt lengths (not limited to 77 tokens). This change requires enabling the `Test Diffusers` setting in beta (in the Settings tab), and restarting the program.
-* 2.5.27 - 21 Mar 2023 - LoRA support, accessible by enabling the `Test Diffusers` setting (in the Settings tab in the UI). This change switches the internal engine to diffusers (if the `Test Diffusers` setting is enabled). If the `Test Diffusers` flag is disabled, it'll have no impact for the user.
+* 2.5.29 - 27 Mar 2023 - (beta-only) Fix a bug where some non-square images would fail while inpainting with a `The size of tensor a must match size of tensor b` error.
+* 2.5.29 - 27 Mar 2023 - (beta-only) Fix the `incorrect number of channels` error, when given a PNG image with an alpha channel in `Test Diffusers`.
+* 2.5.29 - 27 Mar 2023 - (beta-only) Fix broken inpainting in `Test Diffusers`.
+* 2.5.28 - 24 Mar 2023 - (beta-only) Support for weighted prompts and long prompt lengths (not limited to 77 tokens). This change requires enabling the `Test Diffusers` setting in beta (in the Settings tab), and restarting the program.
+* 2.5.27 - 21 Mar 2023 - (beta-only) LoRA support, accessible by enabling the `Test Diffusers` setting (in the Settings tab in the UI). This change switches the internal engine to diffusers (if the `Test Diffusers` setting is enabled). If the `Test Diffusers` flag is disabled, it'll have no impact for the user.
* 2.5.26 - 15 Mar 2023 - Allow styling the buttons displayed on an image. Update the API to allow multiple buttons and text labels in a single row. Thanks @ogmaresca.
* 2.5.26 - 15 Mar 2023 - View images in full-screen, by either clicking on the image, or clicking the "Full screen" icon next to the Seed number on the image. Thanks @ogmaresca for the internal API.
* 2.5.25 - 14 Mar 2023 - Button to download all the images, and all the metadata as a zip file. This is available at the top of the UI, as well as on each image. Thanks @JeLuf.
@@ -36,7 +72,7 @@ Our focus continues to remain on an easy installation experience, and an easy us
* 2.5.24 - 11 Mar 2023 - Button to load an image mask from a file.
* 2.5.24 - 10 Mar 2023 - Logo change. Image credit: @lazlo_vii.
* 2.5.23 - 8 Mar 2023 - Experimental support for Mac M1/M2. Thanks @michaelgallacher, @JeLuf and vishae!
-* 2.5.23 - 8 Mar 2023 - Ability to create custom modifiers with thumbnails, and custom categories (and hierarchy of categories). More details - https://github.com/cmdr2/stable-diffusion-ui/wiki/Custom-Modifiers . Thanks @ogmaresca.
+* 2.5.23 - 8 Mar 2023 - Ability to create custom modifiers with thumbnails, and custom categories (and hierarchy of categories). More details - https://github.com/easydiffusion/easydiffusion/wiki/Custom-Modifiers . Thanks @ogmaresca.
* 2.5.22 - 28 Feb 2023 - Minor styling changes to UI buttons, and the models dropdown.
* 2.5.22 - 28 Feb 2023 - Lots of UI-related bug fixes. Thanks @patriceac.
* 2.5.21 - 22 Feb 2023 - An option to control the size of the image thumbnails. You can use the `Display options` in the top-right corner to change this. Thanks @JeLuf.
@@ -61,7 +97,7 @@ Our focus continues to remain on an easy installation experience, and an easy us
* 2.5.14 - 3 Feb 2023 - Fix the 'Make Similar Images' button, which was producing incorrect images (weren't very similar).
* 2.5.13 - 1 Feb 2023 - Fix the remaining GPU memory leaks, including a better fix (more comprehensive) for the change in 2.5.12 (27 Jan).
* 2.5.12 - 27 Jan 2023 - Fix a memory leak, which made the UI unresponsive after an out-of-memory error. The allocated memory is now freed-up after an error.
-* 2.5.11 - 25 Jan 2023 - UI for Merging Models. Thanks @JeLuf. More info: https://github.com/cmdr2/stable-diffusion-ui/wiki/Model-Merging
+* 2.5.11 - 25 Jan 2023 - UI for Merging Models. Thanks @JeLuf. More info: https://github.com/easydiffusion/easydiffusion/wiki/Model-Merging
* 2.5.10 - 24 Jan 2023 - Reduce the VRAM usage for img2img in 'balanced' mode (without reducing the rendering speed), to make it similar to v2.4 of this UI.
* 2.5.9 - 23 Jan 2023 - Fix a bug where img2img would produce poorer-quality images for the same settings, as compared to version 2.4 of this UI.
* 2.5.9 - 23 Jan 2023 - Reduce the VRAM usage for 'balanced' mode (without reducing the rendering speed), to make it similar to v2.4 of the UI.
@@ -90,8 +126,8 @@ Our focus continues to remain on an easy installation experience, and an easy us
- **Automatic scanning for malicious model files** - using `picklescan`, and support for `safetensor` model format. Thanks @JeLuf
- **Image Editor** - for drawing simple images for guiding the AI. Thanks @mdiller
- **Use pre-trained hypernetworks** - for improving the quality of images. Thanks @C0bra5
-- **Support for custom VAE models**. You can place your VAE files in the `models/vae` folder, and refresh the browser page to use them. More info: https://github.com/cmdr2/stable-diffusion-ui/wiki/VAE-Variational-Auto-Encoder
-- **Experimental support for multiple GPUs!** It should work automatically. Just open one browser tab per GPU, and spread your tasks across your GPUs. For e.g. open our UI in two browser tabs if you have two GPUs. You can customize which GPUs it should use in the "Settings" tab, otherwise let it automatically pick the best GPUs. Thanks @madrang . More info: https://github.com/cmdr2/stable-diffusion-ui/wiki/Run-on-Multiple-GPUs
+- **Support for custom VAE models**. You can place your VAE files in the `models/vae` folder, and refresh the browser page to use them. More info: https://github.com/easydiffusion/easydiffusion/wiki/VAE-Variational-Auto-Encoder
+- **Experimental support for multiple GPUs!** It should work automatically. Just open one browser tab per GPU, and spread your tasks across your GPUs. For e.g. open our UI in two browser tabs if you have two GPUs. You can customize which GPUs it should use in the "Settings" tab, otherwise let it automatically pick the best GPUs. Thanks @madrang . More info: https://github.com/easydiffusion/easydiffusion/wiki/Run-on-Multiple-GPUs
- **Cleaner UI design** - Show settings and help in new tabs, instead of dropdown popups (which were buggy). Thanks @mdiller
- **Progress bar.** Thanks @mdiller
- **Custom Image Modifiers** - You can now save your custom image modifiers! Your saved modifiers can include special characters like `{}, (), [], |`
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 02ce6fc6..bb6408c8 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -1,6 +1,6 @@
Hi there, these instructions are meant for the developers of this project.
-If you only want to use the Stable Diffusion UI, you've downloaded the wrong file. In that case, please download and follow the instructions at https://github.com/cmdr2/stable-diffusion-ui#installation
+If you only want to use the Stable Diffusion UI, you've downloaded the wrong file. In that case, please download and follow the instructions at https://github.com/easydiffusion/easydiffusion#installation
Thanks
@@ -13,7 +13,7 @@ If you would like to contribute to this project, there is a discord for discussi
This is in-flux, but one way to get a development environment running for editing the UI of this project is:
(swap `.sh` or `.bat` in instructions depending on your environment, and be sure to adjust any paths to match where you're working)
-1) Install the project to a new location using the [usual installation process](https://github.com/cmdr2/stable-diffusion-ui#installation), e.g. to `/projects/stable-diffusion-ui-archive`
+1) Install the project to a new location using the [usual installation process](https://github.com/easydiffusion/easydiffusion#installation), e.g. to `/projects/stable-diffusion-ui-archive`
2) Start the newly installed project, and check that you can view and generate images on `localhost:9000`
3) Next, please clone the project repository using `git clone` (e.g. to `/projects/stable-diffusion-ui-repo`)
4) Close the server (started in step 2), and edit `/projects/stable-diffusion-ui-archive/scripts/on_env_start.sh` (or `on_env_start.bat`)
@@ -42,8 +42,6 @@ or for Windows
10) Congrats, now any changes you make in your repo `ui` folder are linked to this running archive of the app and can be previewed in the browser.
11) Please update CHANGES.md in your pull requests.
-Check the `ui/frontend/build/README.md` for instructions on running and building the React code.
-
## Development environment for Installer changes
Build the Windows installer using Windows, and the Linux installer using Linux. Don't mix the two, and don't use WSL. An Ubuntu VM is fine for building the Linux installer on a Windows host.
diff --git a/How to install and run.txt b/How to install and run.txt
index e48d217c..af783b64 100644
--- a/How to install and run.txt
+++ b/How to install and run.txt
@@ -1,6 +1,6 @@
Congrats on downloading Stable Diffusion UI, version 2!
-If you haven't downloaded Stable Diffusion UI yet, please download from https://github.com/cmdr2/stable-diffusion-ui#installation
+If you haven't downloaded Stable Diffusion UI yet, please download from https://github.com/easydiffusion/easydiffusion#installation
After downloading, to install please follow these instructions:
@@ -16,9 +16,9 @@ To start the UI in the future, please run the same command mentioned above.
If you have any problems, please:
-1. Try the troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting
+1. Try the troubleshooting steps at https://github.com/easydiffusion/easydiffusion/wiki/Troubleshooting
2. Or, seek help from the community at https://discord.com/invite/u9yhsFmEkB
-3. Or, file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues
+3. Or, file an issue at https://github.com/easydiffusion/easydiffusion/issues
Thanks
cmdr2 (and contributors to the project)
\ No newline at end of file
diff --git a/NSIS/sdui.nsi b/NSIS/sdui.nsi
index 0a1a8f9d..aabe6775 100644
--- a/NSIS/sdui.nsi
+++ b/NSIS/sdui.nsi
@@ -235,7 +235,7 @@ Section "MainSection" SEC01
NScurl::http get "https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt" "$INSTDIR\models\stable-diffusion\sd-v1-4.ckpt" /CANCEL /INSIST /END
DetailPrint 'Downloading the GFPGAN model...'
- NScurl::http get "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth" "$INSTDIR\models\gfpgan\GFPGANv1.3.pth" /CANCEL /INSIST /END
+ NScurl::http get "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/GFPGANv1.4.pth" "$INSTDIR\models\gfpgan\GFPGANv1.4.pth" /CANCEL /INSIST /END
DetailPrint 'Downloading the RealESRGAN_x4plus model...'
NScurl::http get "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth" "$INSTDIR\models\realesrgan\RealESRGAN_x4plus.pth" /CANCEL /INSIST /END
diff --git a/PRIVACY.md b/PRIVACY.md
new file mode 100644
index 00000000..543a167d
--- /dev/null
+++ b/PRIVACY.md
@@ -0,0 +1,9 @@
+// placeholder until a more formal and legal-sounding privacy policy document is written. but the information below is true.
+
+This is a summary of whether Easy Diffusion uses your data or tracks you:
+* The short answer is - Easy Diffusion does *not* use your data, and does *not* track you.
+* Easy Diffusion does not send your prompts or usage or analytics to anyone. There is no tracking. We don't even know how many people use Easy Diffusion, let alone their prompts.
+* Easy Diffusion fetches updates to the code whenever it starts up. It does this by contacting GitHub directly, via SSL (secure connection). Only your computer and GitHub and [this repository](https://github.com/easydiffusion/easydiffusion) are involved, and no third party is involved. Some countries intercepts SSL connections, that's not something we can do much about. GitHub does *not* share statistics (even with me) about how many people fetched code updates.
+* Easy Diffusion fetches the models from huggingface.co and github.com, if they don't exist on your PC. For e.g. if the safety checker (NSFW) model doesn't exist, it'll try to download it.
+* Easy Diffusion fetches code packages from pypi.org, which is the standard hosting service for all Python projects. That's where packages installed via `pip install` are stored.
+* Occasionally, antivirus software are known to *incorrectly* flag and delete some model files, which will result in Easy Diffusion re-downloading `pytorch_model.bin`. This *incorrect deletion* affects other Stable Diffusion UIs as well, like Invoke AI - https://itch.io/post/7509488
diff --git a/README BEFORE YOU RUN THIS.txt b/README BEFORE YOU RUN THIS.txt
index e9f81544..a989b835 100644
--- a/README BEFORE YOU RUN THIS.txt
+++ b/README BEFORE YOU RUN THIS.txt
@@ -3,6 +3,6 @@ Hi there,
What you have downloaded is meant for the developers of this project, not for users.
If you only want to use the Stable Diffusion UI, you've downloaded the wrong file.
-Please download and follow the instructions at https://github.com/cmdr2/stable-diffusion-ui#installation
+Please download and follow the instructions at https://github.com/easydiffusion/easydiffusion#installation
Thanks
\ No newline at end of file
diff --git a/README.md b/README.md
index 51ba812a..b97c35d1 100644
--- a/README.md
+++ b/README.md
@@ -1,9 +1,9 @@
# Easy Diffusion 2.5
-### The easiest way to install and use [Stable Diffusion](https://github.com/CompVis/stable-diffusion) on your own computer.
+### The easiest way to install and use [Stable Diffusion](https://github.com/CompVis/stable-diffusion) on your computer.
Does not require technical knowledge, does not require pre-installed software. 1-click install, powerful features, friendly community.
-[Installation guide](#installation) | [Troubleshooting guide](https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting) | [](https://discord.com/invite/u9yhsFmEkB)(for support queries, and development discussions)
+[Installation guide](#installation) | [Troubleshooting guide](https://github.com/easydiffusion/easydiffusion/wiki/Troubleshooting) | [](https://discord.com/invite/u9yhsFmEkB)(for support queries, and development discussions)

@@ -11,11 +11,18 @@ Does not require technical knowledge, does not require pre-installed software. 1
Click the download button for your operating system:
-
-
-
+
+
+
+**Hardware requirements:**
+- **Windows:** NVIDIA graphics card (minimum 2 GB RAM), or run on your CPU.
+- **Linux:** NVIDIA or AMD graphics card (minimum 2 GB RAM), or run on your CPU.
+- **Mac:** M1 or M2, or run on your CPU.
+- Minimum 8 GB of system RAM.
+- Atleast 25 GB of space on the hard disk.
+
The installer will take care of whatever is needed. If you face any problems, you can join the friendly [Discord community](https://discord.com/invite/u9yhsFmEkB) and ask for assistance.
## On Windows:
@@ -53,7 +60,7 @@ Just delete the `EasyDiffusion` folder to uninstall all the downloaded packages.
### Image generation
- **Supports**: "*Text to Image*" and "*Image to Image*".
-- **19 Samplers**: `ddim`, `plms`, `heun`, `euler`, `euler_a`, `dpm2`, `dpm2_a`, `lms`, `dpm_solver_stability`, `dpmpp_2s_a`, `dpmpp_2m`, `dpmpp_sde`, `dpm_fast`, `dpm_adaptive`, `unipc_snr`, `unipc_tu`, `unipc_tq`, `unipc_snr_2`, `unipc_tu_2`.
+- **21 Samplers**: `ddim`, `plms`, `heun`, `euler`, `euler_a`, `dpm2`, `dpm2_a`, `lms`, `dpm_solver_stability`, `dpmpp_2s_a`, `dpmpp_2m`, `dpmpp_sde`, `dpm_fast`, `dpm_adaptive`, `ddpm`, `deis`, `unipc_snr`, `unipc_tu`, `unipc_tq`, `unipc_snr_2`, `unipc_tu_2`.
- **In-Painting**: Specify areas of your image to paint into.
- **Simple Drawing Tool**: Draw basic images to guide the AI, without needing an external drawing program.
- **Face Correction (GFPGAN)**
@@ -63,6 +70,7 @@ Just delete the `EasyDiffusion` folder to uninstall all the downloaded packages.
- **Attention/Emphasis**: () in the prompt increases the model's attention to enclosed words, and [] decreases it.
- **Weighted Prompts**: Use weights for specific words in your prompt to change their importance, e.g. `red:2.4 dragon:1.2`.
- **Prompt Matrix**: Quickly create multiple variations of your prompt, e.g. `a photograph of an astronaut riding a horse | illustration | cinematic lighting`.
+- **Prompt Set**: Quickly create multiple variations of your prompt, e.g. `a photograph of an astronaut on the {moon,earth}`
- **1-click Upscale/Face Correction**: Upscale or correct an image after it has been generated.
- **Make Similar Images**: Click to generate multiple variations of a generated image.
- **NSFW Setting**: A setting in the UI to control *NSFW content*.
@@ -75,11 +83,11 @@ Just delete the `EasyDiffusion` folder to uninstall all the downloaded packages.
- **Use custom VAE models**
- **Use pre-trained Hypernetworks**
- **Use custom GFPGAN models**
-- **UI Plugins**: Choose from a growing list of [community-generated UI plugins](https://github.com/cmdr2/stable-diffusion-ui/wiki/UI-Plugins), or write your own plugin to add features to the project!
+- **UI Plugins**: Choose from a growing list of [community-generated UI plugins](https://github.com/easydiffusion/easydiffusion/wiki/UI-Plugins), or write your own plugin to add features to the project!
### Performance and security
- **Fast**: Creates a 512x512 image with euler_a in 5 seconds, on an NVIDIA 3060 12GB.
-- **Low Memory Usage**: Create 512x512 images with less than 3 GB of GPU RAM, and 768x768 images with less than 4 GB of GPU RAM!
+- **Low Memory Usage**: Create 512x512 images with less than 2 GB of GPU RAM, and 768x768 images with less than 3 GB of GPU RAM!
- **Use CPU setting**: If you don't have a compatible graphics card, but still want to run it on your CPU.
- **Multi-GPU support**: Automatically spreads your tasks across multiple GPUs (if available), for faster performance!
- **Auto scan for malicious models**: Uses picklescan to prevent malicious models.
@@ -108,21 +116,13 @@ Useful for judging (and stopping) an image quickly, without waiting for it to fi

-
-# System Requirements
-1. Windows 10/11, or Linux. Experimental support for Mac is coming soon.
-2. An NVIDIA graphics card, preferably with 4GB or more of VRAM. If you don't have a compatible graphics card, it'll automatically run in the slower "CPU Mode".
-3. Minimum 8 GB of RAM and 25GB of disk space.
-
-You don't need to install or struggle with Python, Anaconda, Docker etc. The installer will take care of whatever is needed.
-
----
# How to use?
-Please refer to our [guide](https://github.com/cmdr2/stable-diffusion-ui/wiki/How-to-Use) to understand how to use the features in this UI.
+Please refer to our [guide](https://github.com/easydiffusion/easydiffusion/wiki/How-to-Use) to understand how to use the features in this UI.
# Bugs reports and code contributions welcome
-If there are any problems or suggestions, please feel free to ask on the [discord server](https://discord.com/invite/u9yhsFmEkB) or [file an issue](https://github.com/cmdr2/stable-diffusion-ui/issues).
+If there are any problems or suggestions, please feel free to ask on the [discord server](https://discord.com/invite/u9yhsFmEkB) or [file an issue](https://github.com/easydiffusion/easydiffusion/issues).
We could really use help on these aspects (click to view tasks that need your help):
* [User Interface](https://github.com/users/cmdr2/projects/1/views/1)
diff --git a/build.bat b/build.bat
index b9c6b9ab..2c7890ee 100644
--- a/build.bat
+++ b/build.bat
@@ -2,7 +2,7 @@
@echo "Hi there, what you are running is meant for the developers of this project, not for users." & echo.
@echo "If you only want to use the Stable Diffusion UI, you've downloaded the wrong file."
-@echo "Please download and follow the instructions at https://github.com/cmdr2/stable-diffusion-ui#installation" & echo.
+@echo "Please download and follow the instructions at https://github.com/easydiffusion/easydiffusion#installation" & echo.
@echo "If you are actually a developer of this project, please type Y and press enter" & echo.
set /p answer=Are you a developer of this project (Y/N)?
diff --git a/build.sh b/build.sh
index a7ed152c..dfb8f420 100755
--- a/build.sh
+++ b/build.sh
@@ -2,7 +2,7 @@
printf "Hi there, what you are running is meant for the developers of this project, not for users.\n\n"
printf "If you only want to use the Stable Diffusion UI, you've downloaded the wrong file.\n"
-printf "Please download and follow the instructions at https://github.com/cmdr2/stable-diffusion-ui#installation\n\n"
+printf "Please download and follow the instructions at https://github.com/easydiffusion/easydiffusion#installation \n\n"
printf "If you are actually a developer of this project, please type Y and press enter\n\n"
read -p "Are you a developer of this project (Y/N) " yn
diff --git a/package.json b/package.json
new file mode 100644
index 00000000..fbf1dadb
--- /dev/null
+++ b/package.json
@@ -0,0 +1,9 @@
+{
+ "scripts": {
+ "prettier-fix": "npx prettier --write \"./**/*.js\"",
+ "prettier-check": "npx prettier --check \"./**/*.js\""
+ },
+ "devDependencies": {
+ "prettier": "^1.19.1"
+ }
+}
diff --git a/scripts/Developer Console.cmd b/scripts/Developer Console.cmd
index 921a9dca..0efbda13 100644
--- a/scripts/Developer Console.cmd
+++ b/scripts/Developer Console.cmd
@@ -2,6 +2,8 @@
echo "Opening Stable Diffusion UI - Developer Console.." & echo.
+cd /d %~dp0
+
set PATH=C:\Windows\System32;%PATH%
@rem set legacy and new installer's PATH, if they exist
@@ -21,6 +23,8 @@ call git --version
call where conda
call conda --version
+echo.
+echo COMSPEC=%COMSPEC%
echo.
@rem activate the legacy environment (if present) and set PYTHONPATH
diff --git a/scripts/Start Stable Diffusion UI.cmd b/scripts/Start Stable Diffusion UI.cmd
index 4f8555ea..9a4a6303 100644
--- a/scripts/Start Stable Diffusion UI.cmd
+++ b/scripts/Start Stable Diffusion UI.cmd
@@ -36,8 +36,9 @@ call git --version
call where conda
call conda --version
+echo .
+echo COMSPEC=%COMSPEC%
@rem Download the rest of the installer and UI
call scripts\on_env_start.bat
-
@pause
diff --git a/scripts/bootstrap.bat b/scripts/bootstrap.bat
index d3cdd19f..8c1069c8 100644
--- a/scripts/bootstrap.bat
+++ b/scripts/bootstrap.bat
@@ -11,7 +11,7 @@ setlocal enabledelayedexpansion
set MAMBA_ROOT_PREFIX=%cd%\installer_files\mamba
set INSTALL_ENV_DIR=%cd%\installer_files\env
set LEGACY_INSTALL_ENV_DIR=%cd%\installer
-set MICROMAMBA_DOWNLOAD_URL=https://github.com/cmdr2/stable-diffusion-ui/releases/download/v1.1/micromamba.exe
+set MICROMAMBA_DOWNLOAD_URL=https://github.com/easydiffusion/easydiffusion/releases/download/v1.1/micromamba.exe
set umamba_exists=F
set OLD_APPDATA=%APPDATA%
diff --git a/scripts/check_modules.py b/scripts/check_modules.py
index 416ad851..4634adb3 100644
--- a/scripts/check_modules.py
+++ b/scripts/check_modules.py
@@ -1,13 +1,161 @@
-'''
-This script checks if the given modules exist
-'''
+"""
+This script checks and installs the required modules.
-import sys
-import pkgutil
+This script runs inside the legacy "stable-diffusion" folder
-modules = sys.argv[1:]
-missing_modules = []
-for m in modules:
- if pkgutil.find_loader(m) is None:
- print('module', m, 'not found')
- exit(1)
+TODO - Maybe replace the bulk of this script with a call to `pip install -f requirements.txt`, with
+a custom index URL depending on the platform.
+
+"""
+
+import os
+from importlib.metadata import version as pkg_version
+import platform
+import traceback
+
+os_name = platform.system()
+
+modules_to_check = {
+ "torch": ("1.11.0", "1.13.1", "2.0.0"),
+ "torchvision": ("0.12.0", "0.14.1", "0.15.1"),
+ "sdkit": "1.0.112",
+ "stable-diffusion-sdkit": "2.1.4",
+ "rich": "12.6.0",
+ "uvicorn": "0.19.0",
+ "fastapi": "0.85.1",
+ "pycloudflared": "0.2.0",
+ # "xformers": "0.0.16",
+}
+modules_to_log = ["torch", "torchvision", "sdkit", "stable-diffusion-sdkit"]
+
+
+def version(module_name: str) -> str:
+ try:
+ return pkg_version(module_name)
+ except:
+ return None
+
+
+def install(module_name: str, module_version: str):
+ if module_name == "xformers" and (os_name == "Darwin" or is_amd_on_linux()):
+ return
+
+ index_url = None
+ if module_name in ("torch", "torchvision"):
+ module_version, index_url = apply_torch_install_overrides(module_version)
+
+ if is_amd_on_linux(): # hack until AMD works properly on torch 2.0 (avoids black images on some cards)
+ if module_name == "torch":
+ module_version = "1.13.1+rocm5.2"
+ elif module_name == "torchvision":
+ module_version = "0.14.1+rocm5.2"
+ elif os_name == "Darwin":
+ if module_name == "torch":
+ module_version = "1.13.1"
+ elif module_name == "torchvision":
+ module_version = "0.14.1"
+
+ install_cmd = f"python -m pip install --upgrade {module_name}=={module_version}"
+ if index_url:
+ install_cmd += f" --index-url {index_url}"
+ if module_name == "sdkit" and version("sdkit") is not None:
+ install_cmd += " -q"
+
+ print(">", install_cmd)
+ os.system(install_cmd)
+
+
+def init():
+ for module_name, allowed_versions in modules_to_check.items():
+ if os.path.exists(f"../src/{module_name}"):
+ print(f"Skipping {module_name} update, since it's in developer/editable mode")
+ continue
+
+ allowed_versions, latest_version = get_allowed_versions(module_name, allowed_versions)
+
+ requires_install = False
+ if module_name in ("torch", "torchvision"):
+ if version(module_name) is None: # allow any torch version
+ requires_install = True
+ elif os_name == "Darwin" and ( # force mac to downgrade from torch 2.0
+ version("torch").startswith("2.") or version("torchvision").startswith("0.15.")
+ ):
+ requires_install = True
+ elif version(module_name) not in allowed_versions:
+ requires_install = True
+
+ if requires_install:
+ try:
+ install(module_name, latest_version)
+ except:
+ traceback.print_exc()
+ fail(module_name)
+
+ if module_name in modules_to_log:
+ print(f"{module_name}: {version(module_name)}")
+
+
+### utilities
+
+
+def get_allowed_versions(module_name: str, allowed_versions: tuple):
+ allowed_versions = (allowed_versions,) if isinstance(allowed_versions, str) else allowed_versions
+ latest_version = allowed_versions[-1]
+
+ if module_name in ("torch", "torchvision"):
+ allowed_versions = include_cuda_versions(allowed_versions)
+
+ return allowed_versions, latest_version
+
+
+def apply_torch_install_overrides(module_version: str):
+ index_url = None
+ if os_name == "Windows":
+ module_version += "+cu117"
+ index_url = "https://download.pytorch.org/whl/cu117"
+ elif is_amd_on_linux():
+ index_url = "https://download.pytorch.org/whl/rocm5.2"
+
+ return module_version, index_url
+
+
+def include_cuda_versions(module_versions: tuple) -> tuple:
+ "Adds CUDA-specific versions to the list of allowed version numbers"
+
+ allowed_versions = tuple(module_versions)
+ allowed_versions += tuple(f"{v}+cu116" for v in module_versions)
+ allowed_versions += tuple(f"{v}+cu117" for v in module_versions)
+ allowed_versions += tuple(f"{v}+rocm5.2" for v in module_versions)
+ allowed_versions += tuple(f"{v}+rocm5.4.2" for v in module_versions)
+
+ return allowed_versions
+
+
+def is_amd_on_linux():
+ if os_name == "Linux":
+ try:
+ with open("/proc/bus/pci/devices", "r") as f:
+ device_info = f.read()
+ if "amdgpu" in device_info and "nvidia" not in device_info:
+ return True
+ except:
+ return False
+
+ return False
+
+
+def fail(module_name):
+ print(
+ f"""Error installing {module_name}. Sorry about that, please try to:
+1. Run this installer again.
+2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/easydiffusion/easydiffusion/wiki/Troubleshooting
+3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB
+4. If that doesn't solve the problem, please file an issue at https://github.com/easydiffusion/easydiffusion/issues
+Thanks!"""
+ )
+ exit(1)
+
+
+### start
+
+init()
diff --git a/scripts/developer_console.sh b/scripts/developer_console.sh
index 73972568..57846eeb 100755
--- a/scripts/developer_console.sh
+++ b/scripts/developer_console.sh
@@ -39,6 +39,8 @@ if [ "$0" == "bash" ]; then
export PYTHONPATH="$(pwd)/stable-diffusion/env/lib/python3.8/site-packages"
fi
+ export PYTHONNOUSERSITE=y
+
which python
python --version
diff --git a/scripts/functions.sh b/scripts/functions.sh
index 5b1be7f4..477b7743 100644
--- a/scripts/functions.sh
+++ b/scripts/functions.sh
@@ -15,9 +15,9 @@ fail() {
Error downloading Stable Diffusion UI. Sorry about that, please try to:
1. Run this installer again.
- 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting
+ 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/easydiffusion/easydiffusion/wiki/Troubleshooting
3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB
- 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues
+ 4. If that doesn't solve the problem, please file an issue at https://github.com/easydiffusion/easydiffusion/issues
Thanks!
@@ -31,7 +31,7 @@ EOF
filesize() {
case "$(uname -s)" in
Linux*) stat -c "%s" $1;;
- Darwin*) stat -f "%z" $1;;
+ Darwin*) /usr/bin/stat -f "%z" $1;;
*) echo "Unknown OS: $OS_NAME! This script runs only on Linux or Mac" && exit
esac
}
diff --git a/scripts/get_config.py b/scripts/get_config.py
new file mode 100644
index 00000000..9cdfb2fe
--- /dev/null
+++ b/scripts/get_config.py
@@ -0,0 +1,46 @@
+import os
+import argparse
+import sys
+
+# The config file is in the same directory as this script
+config_directory = os.path.dirname(__file__)
+config_yaml = os.path.join(config_directory, "config.yaml")
+config_json = os.path.join(config_directory, "config.json")
+
+parser = argparse.ArgumentParser(description='Get values from config file')
+parser.add_argument('--default', dest='default', action='store',
+ help='default value, to be used if the setting is not defined in the config file')
+parser.add_argument('key', metavar='key', nargs='+',
+ help='config key to return')
+
+args = parser.parse_args()
+
+
+if os.path.isfile(config_yaml):
+ import yaml
+ with open(config_yaml, 'r') as configfile:
+ try:
+ config = yaml.safe_load(configfile)
+ except Exception as e:
+ print(e, file=sys.stderr)
+ config = {}
+elif os.path.isfile(config_json):
+ import json
+ with open(config_json, 'r') as configfile:
+ try:
+ config = json.load(configfile)
+ except Exception as e:
+ print(e, file=sys.stderr)
+ config = {}
+else:
+ config = {}
+
+for k in args.key:
+ if k in config:
+ config = config[k]
+ else:
+ if args.default != None:
+ print(args.default)
+ exit()
+
+print(config)
diff --git a/scripts/on_env_start.bat b/scripts/on_env_start.bat
index 57dc5da0..0871973f 100644
--- a/scripts/on_env_start.bat
+++ b/scripts/on_env_start.bat
@@ -8,6 +8,20 @@ if exist "scripts\config.bat" (
@call scripts\config.bat
)
+if exist "scripts\user_config.bat" (
+ @call scripts\user_config.bat
+)
+
+if exist "stable-diffusion\env" (
+ @set PYTHONPATH=%PYTHONPATH%;%cd%\stable-diffusion\env\lib\site-packages
+)
+
+if exist "scripts\get_config.py" (
+ @FOR /F "tokens=* USEBACKQ" %%F IN (`python scripts\get_config.py --default=main update_branch`) DO (
+ @SET update_branch=%%F
+ )
+)
+
if "%update_branch%"=="" (
set update_branch=main
)
@@ -41,10 +55,10 @@ if "%update_branch%"=="" (
@echo. & echo "Downloading Easy Diffusion..." & echo.
@echo "Using the %update_branch% channel" & echo.
- @call git clone -b "%update_branch%" https://github.com/cmdr2/stable-diffusion-ui.git sd-ui-files && (
+ @call git clone -b "%update_branch%" https://github.com/easydiffusion/easydiffusion.git sd-ui-files && (
@echo sd_ui_git_cloned >> scripts\install_status.txt
) || (
- @echo "Error downloading Easy Diffusion. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!"
+ @echo "Error downloading Easy Diffusion. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/easydiffusion/easydiffusion/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/easydiffusion/easydiffusion/issues" & echo "Thanks!"
pause
@exit /b
)
@@ -53,6 +67,7 @@ if "%update_branch%"=="" (
@xcopy sd-ui-files\ui ui /s /i /Y /q
@copy sd-ui-files\scripts\on_sd_start.bat scripts\ /Y
@copy sd-ui-files\scripts\check_modules.py scripts\ /Y
+@copy sd-ui-files\scripts\get_config.py scripts\ /Y
@copy "sd-ui-files\scripts\Start Stable Diffusion UI.cmd" . /Y
@copy "sd-ui-files\scripts\Developer Console.cmd" . /Y
diff --git a/scripts/on_env_start.sh b/scripts/on_env_start.sh
index 7e180f02..d936924e 100755
--- a/scripts/on_env_start.sh
+++ b/scripts/on_env_start.sh
@@ -4,10 +4,22 @@ source ./scripts/functions.sh
printf "\n\nEasy Diffusion\n\n"
+export PYTHONNOUSERSITE=y
+
if [ -f "scripts/config.sh" ]; then
source scripts/config.sh
fi
+if [ -f "scripts/user_config.sh" ]; then
+ source scripts/user_config.sh
+fi
+
+export PYTHONPATH=$(pwd)/installer_files/env/lib/python3.8/site-packages:$(pwd)/stable-diffusion/env/lib/python3.8/site-packages
+
+if [ -f "scripts/get_config.py" ]; then
+ export update_branch="$( python scripts/get_config.py --default=main update_branch )"
+fi
+
if [ "$update_branch" == "" ]; then
export update_branch="main"
fi
@@ -26,7 +38,7 @@ else
printf "\n\nDownloading Easy Diffusion..\n\n"
printf "Using the $update_branch channel\n\n"
- if git clone -b "$update_branch" https://github.com/cmdr2/stable-diffusion-ui.git sd-ui-files ; then
+ if git clone -b "$update_branch" https://github.com/easydiffusion/easydiffusion.git sd-ui-files ; then
echo sd_ui_git_cloned >> scripts/install_status.txt
else
fail "git clone failed"
@@ -38,6 +50,7 @@ cp -Rf sd-ui-files/ui .
cp sd-ui-files/scripts/on_sd_start.sh scripts/
cp sd-ui-files/scripts/bootstrap.sh scripts/
cp sd-ui-files/scripts/check_modules.py scripts/
+cp sd-ui-files/scripts/get_config.py scripts/
cp sd-ui-files/scripts/start.sh .
cp sd-ui-files/scripts/developer_console.sh .
cp sd-ui-files/scripts/functions.sh scripts/
diff --git a/scripts/on_sd_start.bat b/scripts/on_sd_start.bat
index 50925816..860361d4 100644
--- a/scripts/on_sd_start.bat
+++ b/scripts/on_sd_start.bat
@@ -4,11 +4,11 @@
@REM Note to self: Please rewrite this in Python. For the sake of your own sanity.
@copy sd-ui-files\scripts\on_env_start.bat scripts\ /Y
-@copy sd-ui-files\scripts\bootstrap.bat scripts\ /Y
@copy sd-ui-files\scripts\check_modules.py scripts\ /Y
+@copy sd-ui-files\scripts\get_config.py scripts\ /Y
if exist "%cd%\profile" (
- set USERPROFILE=%cd%\profile
+ set HF_HOME=%cd%\profile\.cache\huggingface
)
@rem set the correct installer path (current vs legacy)
@@ -26,7 +26,7 @@ if exist "%cd%\stable-diffusion\env" (
@rem activate the installer env
call conda activate
@if "%ERRORLEVEL%" NEQ "0" (
- @echo. & echo "Error activating conda for Easy Diffusion. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
+ @echo. & echo "Error activating conda for Easy Diffusion. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/easydiffusion/easydiffusion/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/easydiffusion/easydiffusion/issues" & echo "Thanks!" & echo.
pause
exit /b
)
@@ -34,8 +34,6 @@ call conda activate
@REM remove the old version of the dev console script, if it's still present
if exist "Open Developer Console.cmd" del "Open Developer Console.cmd"
-@call python -c "import os; import shutil; frm = 'sd-ui-files\\ui\\hotfix\\9c24e6cd9f499d02c4f21a033736dabd365962dc80fe3aeb57a8f85ea45a20a3.26fead7ea4f0f843f6eb4055dfd25693f1a71f3c6871b184042d4b126244e142'; dst = os.path.join(os.path.expanduser('~'), '.cache', 'huggingface', 'transformers', '9c24e6cd9f499d02c4f21a033736dabd365962dc80fe3aeb57a8f85ea45a20a3.26fead7ea4f0f843f6eb4055dfd25693f1a71f3c6871b184042d4b126244e142'); shutil.copyfile(frm, dst) if os.path.exists(dst) else print(''); print('Hotfixed broken JSON file from OpenAI');"
-
@rem create the stable-diffusion folder, to work with legacy installations
if not exist "stable-diffusion" mkdir stable-diffusion
cd stable-diffusion
@@ -49,134 +47,28 @@ if exist "env" (
if exist src rename src src-old
if exist ldm rename ldm ldm-old
-if not exist "..\models\stable-diffusion" mkdir "..\models\stable-diffusion"
-if not exist "..\models\gfpgan" mkdir "..\models\gfpgan"
-if not exist "..\models\realesrgan" mkdir "..\models\realesrgan"
-if not exist "..\models\vae" mkdir "..\models\vae"
-
-@rem migrate the legacy models to the correct path (if already downloaded)
-if exist "sd-v1-4.ckpt" move sd-v1-4.ckpt ..\models\stable-diffusion\
-if exist "custom-model.ckpt" move custom-model.ckpt ..\models\stable-diffusion\
-if exist "GFPGANv1.3.pth" move GFPGANv1.3.pth ..\models\gfpgan\
-if exist "RealESRGAN_x4plus.pth" move RealESRGAN_x4plus.pth ..\models\realesrgan\
-if exist "RealESRGAN_x4plus_anime_6B.pth" move RealESRGAN_x4plus_anime_6B.pth ..\models\realesrgan\
if not exist "%INSTALL_ENV_DIR%\DLLs\libssl-1_1-x64.dll" copy "%INSTALL_ENV_DIR%\Library\bin\libssl-1_1-x64.dll" "%INSTALL_ENV_DIR%\DLLs\"
if not exist "%INSTALL_ENV_DIR%\DLLs\libcrypto-1_1-x64.dll" copy "%INSTALL_ENV_DIR%\Library\bin\libcrypto-1_1-x64.dll" "%INSTALL_ENV_DIR%\DLLs\"
-@rem install torch and torchvision
-call python ..\scripts\check_modules.py torch torchvision
-if "%ERRORLEVEL%" EQU "0" (
- echo "torch and torchvision have already been installed."
-) else (
- echo "Installing torch and torchvision.."
-
- @REM prevent from using packages from the user's home directory, to avoid conflicts
- set PYTHONNOUSERSITE=1
- set PYTHONPATH=%INSTALL_ENV_DIR%\lib\site-packages
-
- call python -m pip install --upgrade torch==1.13.1+cu116 torchvision==0.14.1+cu116 --extra-index-url https://download.pytorch.org/whl/cu116 || (
- echo "Error installing torch. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!"
- pause
- exit /b
- )
-)
-
+@rem install or upgrade the required modules
set PATH=C:\Windows\System32;%PATH%
-@rem install/upgrade sdkit
-call python ..\scripts\check_modules.py sdkit sdkit.models ldm transformers numpy antlr4 gfpgan realesrgan
-if "%ERRORLEVEL%" EQU "0" (
- echo "sdkit is already installed."
+@REM prevent from using packages from the user's home directory, to avoid conflicts
+set PYTHONNOUSERSITE=1
+set PYTHONPATH=%INSTALL_ENV_DIR%\lib\site-packages
- @rem skip sdkit upgrade if in developer-mode
- if not exist "..\src\sdkit" (
- @REM prevent from using packages from the user's home directory, to avoid conflicts
- set PYTHONNOUSERSITE=1
- set PYTHONPATH=%INSTALL_ENV_DIR%\lib\site-packages
-
- call python -m pip install --upgrade sdkit==1.0.60 -q || (
- echo "Error updating sdkit"
- )
- )
-) else (
- echo "Installing sdkit: https://pypi.org/project/sdkit/"
-
- @REM prevent from using packages from the user's home directory, to avoid conflicts
- set PYTHONNOUSERSITE=1
- set PYTHONPATH=%INSTALL_ENV_DIR%\lib\site-packages
-
- call python -m pip install sdkit==1.0.60 || (
- echo "Error installing sdkit. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!"
- pause
- exit /b
- )
-)
-
-call python -c "from importlib.metadata import version; print('sdkit version:', version('sdkit'))"
-
-@rem upgrade stable-diffusion-sdkit
-call python -m pip install --upgrade stable-diffusion-sdkit==2.1.4 -q || (
- echo "Error updating stable-diffusion-sdkit"
-)
-call python -c "from importlib.metadata import version; print('stable-diffusion version:', version('stable-diffusion-sdkit'))"
-
-@rem install rich
-call python ..\scripts\check_modules.py rich
-if "%ERRORLEVEL%" EQU "0" (
- echo "rich has already been installed."
-) else (
- echo "Installing rich.."
-
- set PYTHONNOUSERSITE=1
- set PYTHONPATH=%INSTALL_ENV_DIR%\lib\site-packages
-
- call python -m pip install rich || (
- echo "Error installing rich. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!"
- pause
- exit /b
- )
-)
-
-@rem install ruamel.yaml
-call python ..\scripts\check_modules.py ruamel.yaml
-if "%ERRORLEVEL%" EQU "0" (
- echo "ruamel.yaml has already been installed."
-) else (
- echo "Installing ruamel.yaml.."
-
- set PYTHONNOUSERSITE=1
- set PYTHONPATH=%INSTALL_ENV_DIR%\lib\site-packages
-
- call python -m pip install ruamel.yaml==0.17.21 || (
- echo "Error installing ruamel.yaml. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!"
- pause
- exit /b
- )
-)
-
-set PATH=C:\Windows\System32;%PATH%
-
-call python ..\scripts\check_modules.py uvicorn fastapi
-@if "%ERRORLEVEL%" EQU "0" (
- echo "Packages necessary for Easy Diffusion were already installed"
-) else (
- @echo. & echo "Downloading packages necessary for Easy Diffusion..." & echo.
-
- set PYTHONNOUSERSITE=1
- set PYTHONPATH=%INSTALL_ENV_DIR%\lib\site-packages
-
- @call conda install -c conda-forge -y uvicorn fastapi || (
- echo "Error installing the packages necessary for Easy Diffusion. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!"
- pause
- exit /b
- )
+@rem Download the required packages
+call python ..\scripts\check_modules.py
+if "%ERRORLEVEL%" NEQ "0" (
+ pause
+ exit /b
)
call WHERE uvicorn > .tmp
@>nul findstr /m "uvicorn" .tmp
@if "%ERRORLEVEL%" NEQ "0" (
- @echo. & echo "UI packages not found! Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
+ @echo. & echo "UI packages not found! Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/easydiffusion/easydiffusion/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/easydiffusion/easydiffusion/issues" & echo "Thanks!" & echo.
pause
exit /b
)
@@ -186,162 +78,6 @@ call WHERE uvicorn > .tmp
@echo conda_sd_ui_deps_installed >> ..\scripts\install_status.txt
)
-@if exist "..\models\stable-diffusion\sd-v1-4.ckpt" (
- for %%I in ("..\models\stable-diffusion\sd-v1-4.ckpt") do if "%%~zI" EQU "4265380512" (
- echo "Data files (weights) necessary for Stable Diffusion were already downloaded. Using the HuggingFace 4 GB Model."
- ) else (
- for %%J in ("..\models\stable-diffusion\sd-v1-4.ckpt") do if "%%~zJ" EQU "7703807346" (
- echo "Data files (weights) necessary for Stable Diffusion were already downloaded. Using the HuggingFace 7 GB Model."
- ) else (
- for %%K in ("..\models\stable-diffusion\sd-v1-4.ckpt") do if "%%~zK" EQU "7703810927" (
- echo "Data files (weights) necessary for Stable Diffusion were already downloaded. Using the Waifu Model."
- ) else (
- echo. & echo "The model file present at models\stable-diffusion\sd-v1-4.ckpt is invalid. It is only %%~zK bytes in size. Re-downloading.." & echo.
- del "..\models\stable-diffusion\sd-v1-4.ckpt"
- )
- )
- )
-)
-
-@if not exist "..\models\stable-diffusion\sd-v1-4.ckpt" (
- @echo. & echo "Downloading data files (weights) for Stable Diffusion.." & echo.
-
- @call curl -L -k https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt > ..\models\stable-diffusion\sd-v1-4.ckpt
-
- @if exist "..\models\stable-diffusion\sd-v1-4.ckpt" (
- for %%I in ("..\models\stable-diffusion\sd-v1-4.ckpt") do if "%%~zI" NEQ "4265380512" (
- echo. & echo "Error: The downloaded model file was invalid! Bytes downloaded: %%~zI" & echo.
- echo. & echo "Error downloading the data files (weights) for Stable Diffusion. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
- pause
- exit /b
- )
- ) else (
- @echo. & echo "Error downloading the data files (weights) for Stable Diffusion. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
- pause
- exit /b
- )
-)
-
-
-
-@if exist "..\models\gfpgan\GFPGANv1.3.pth" (
- for %%I in ("..\models\gfpgan\GFPGANv1.3.pth") do if "%%~zI" EQU "348632874" (
- echo "Data files (weights) necessary for GFPGAN (Face Correction) were already downloaded"
- ) else (
- echo. & echo "The GFPGAN model file present at models\gfpgan\GFPGANv1.3.pth is invalid. It is only %%~zI bytes in size. Re-downloading.." & echo.
- del "..\models\gfpgan\GFPGANv1.3.pth"
- )
-)
-
-@if not exist "..\models\gfpgan\GFPGANv1.3.pth" (
- @echo. & echo "Downloading data files (weights) for GFPGAN (Face Correction).." & echo.
-
- @call curl -L -k https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth > ..\models\gfpgan\GFPGANv1.3.pth
-
- @if exist "..\models\gfpgan\GFPGANv1.3.pth" (
- for %%I in ("..\models\gfpgan\GFPGANv1.3.pth") do if "%%~zI" NEQ "348632874" (
- echo. & echo "Error: The downloaded GFPGAN model file was invalid! Bytes downloaded: %%~zI" & echo.
- echo. & echo "Error downloading the data files (weights) for GFPGAN (Face Correction). Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
- pause
- exit /b
- )
- ) else (
- @echo. & echo "Error downloading the data files (weights) for GFPGAN (Face Correction). Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
- pause
- exit /b
- )
-)
-
-
-
-@if exist "..\models\realesrgan\RealESRGAN_x4plus.pth" (
- for %%I in ("..\models\realesrgan\RealESRGAN_x4plus.pth") do if "%%~zI" EQU "67040989" (
- echo "Data files (weights) necessary for ESRGAN (Resolution Upscaling) x4plus were already downloaded"
- ) else (
- echo. & echo "The RealESRGAN model file present at models\realesrgan\RealESRGAN_x4plus.pth is invalid. It is only %%~zI bytes in size. Re-downloading.." & echo.
- del "..\models\realesrgan\RealESRGAN_x4plus.pth"
- )
-)
-
-@if not exist "..\models\realesrgan\RealESRGAN_x4plus.pth" (
- @echo. & echo "Downloading data files (weights) for ESRGAN (Resolution Upscaling) x4plus.." & echo.
-
- @call curl -L -k https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth > ..\models\realesrgan\RealESRGAN_x4plus.pth
-
- @if exist "..\models\realesrgan\RealESRGAN_x4plus.pth" (
- for %%I in ("..\models\realesrgan\RealESRGAN_x4plus.pth") do if "%%~zI" NEQ "67040989" (
- echo. & echo "Error: The downloaded ESRGAN x4plus model file was invalid! Bytes downloaded: %%~zI" & echo.
- echo. & echo "Error downloading the data files (weights) for ESRGAN (Resolution Upscaling) x4plus. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
- pause
- exit /b
- )
- ) else (
- @echo. & echo "Error downloading the data files (weights) for ESRGAN (Resolution Upscaling) x4plus. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
- pause
- exit /b
- )
-)
-
-
-
-@if exist "..\models\realesrgan\RealESRGAN_x4plus_anime_6B.pth" (
- for %%I in ("..\models\realesrgan\RealESRGAN_x4plus_anime_6B.pth") do if "%%~zI" EQU "17938799" (
- echo "Data files (weights) necessary for ESRGAN (Resolution Upscaling) x4plus_anime were already downloaded"
- ) else (
- echo. & echo "The RealESRGAN model file present at models\realesrgan\RealESRGAN_x4plus_anime_6B.pth is invalid. It is only %%~zI bytes in size. Re-downloading.." & echo.
- del "..\models\realesrgan\RealESRGAN_x4plus_anime_6B.pth"
- )
-)
-
-@if not exist "..\models\realesrgan\RealESRGAN_x4plus_anime_6B.pth" (
- @echo. & echo "Downloading data files (weights) for ESRGAN (Resolution Upscaling) x4plus_anime.." & echo.
-
- @call curl -L -k https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth > ..\models\realesrgan\RealESRGAN_x4plus_anime_6B.pth
-
- @if exist "..\models\realesrgan\RealESRGAN_x4plus_anime_6B.pth" (
- for %%I in ("..\models\realesrgan\RealESRGAN_x4plus_anime_6B.pth") do if "%%~zI" NEQ "17938799" (
- echo. & echo "Error: The downloaded ESRGAN x4plus_anime model file was invalid! Bytes downloaded: %%~zI" & echo.
- echo. & echo "Error downloading the data files (weights) for ESRGAN (Resolution Upscaling) x4plus_anime. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
- pause
- exit /b
- )
- ) else (
- @echo. & echo "Error downloading the data files (weights) for ESRGAN (Resolution Upscaling) x4plus_anime. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
- pause
- exit /b
- )
-)
-
-
-
-@if exist "..\models\vae\vae-ft-mse-840000-ema-pruned.ckpt" (
- for %%I in ("..\models\vae\vae-ft-mse-840000-ema-pruned.ckpt") do if "%%~zI" EQU "334695179" (
- echo "Data files (weights) necessary for the default VAE (sd-vae-ft-mse-original) were already downloaded"
- ) else (
- echo. & echo "The default VAE (sd-vae-ft-mse-original) file present at models\vae\vae-ft-mse-840000-ema-pruned.ckpt is invalid. It is only %%~zI bytes in size. Re-downloading.." & echo.
- del "..\models\vae\vae-ft-mse-840000-ema-pruned.ckpt"
- )
-)
-
-@if not exist "..\models\vae\vae-ft-mse-840000-ema-pruned.ckpt" (
- @echo. & echo "Downloading data files (weights) for the default VAE (sd-vae-ft-mse-original).." & echo.
-
- @call curl -L -k https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.ckpt > ..\models\vae\vae-ft-mse-840000-ema-pruned.ckpt
-
- @if exist "..\models\vae\vae-ft-mse-840000-ema-pruned.ckpt" (
- for %%I in ("..\models\vae\vae-ft-mse-840000-ema-pruned.ckpt") do if "%%~zI" NEQ "334695179" (
- echo. & echo "Error: The downloaded default VAE (sd-vae-ft-mse-original) file was invalid! Bytes downloaded: %%~zI" & echo.
- echo. & echo "Error downloading the data files (weights) for the default VAE (sd-vae-ft-mse-original). Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
- pause
- exit /b
- )
- ) else (
- @echo. & echo "Error downloading the data files (weights) for the default VAE (sd-vae-ft-mse-original). Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
- pause
- exit /b
- )
-)
-
@>nul findstr /m "sd_install_complete" ..\scripts\install_status.txt
@if "%ERRORLEVEL%" NEQ "0" (
@echo sd_weights_downloaded >> ..\scripts\install_status.txt
@@ -360,14 +96,25 @@ call python --version
@cd ..
@set SD_UI_PATH=%cd%\ui
+
+@FOR /F "tokens=* USEBACKQ" %%F IN (`python scripts\get_config.py --default=9000 net listen_port`) DO (
+ @SET ED_BIND_PORT=%%F
+)
+
+@FOR /F "tokens=* USEBACKQ" %%F IN (`python scripts\get_config.py --default=False net listen_to_network`) DO (
+ if "%%F" EQU "True" (
+ @SET ED_BIND_IP=0.0.0.0
+ ) else (
+ @SET ED_BIND_IP=127.0.0.1
+ )
+)
+
@cd stable-diffusion
@rem set any overrides
set HF_HUB_DISABLE_SYMLINKS_WARNING=true
-@if NOT DEFINED SD_UI_BIND_PORT set SD_UI_BIND_PORT=9000
-@if NOT DEFINED SD_UI_BIND_IP set SD_UI_BIND_IP=0.0.0.0
-@uvicorn main:server_api --app-dir "%SD_UI_PATH%" --port %SD_UI_BIND_PORT% --host %SD_UI_BIND_IP% --log-level error
+@uvicorn main:server_api --app-dir "%SD_UI_PATH%" --port %ED_BIND_PORT% --host %ED_BIND_IP% --log-level error
@pause
diff --git a/scripts/on_sd_start.sh b/scripts/on_sd_start.sh
index 724d9a31..be5161d4 100755
--- a/scripts/on_sd_start.sh
+++ b/scripts/on_sd_start.sh
@@ -4,6 +4,7 @@ cp sd-ui-files/scripts/functions.sh scripts/
cp sd-ui-files/scripts/on_env_start.sh scripts/
cp sd-ui-files/scripts/bootstrap.sh scripts/
cp sd-ui-files/scripts/check_modules.py scripts/
+cp sd-ui-files/scripts/get_config.py scripts/
source ./scripts/functions.sh
@@ -18,11 +19,6 @@ if [ -e "open_dev_console.sh" ]; then
rm "open_dev_console.sh"
fi
-python -c "import os; import shutil; frm = 'sd-ui-files/ui/hotfix/9c24e6cd9f499d02c4f21a033736dabd365962dc80fe3aeb57a8f85ea45a20a3.26fead7ea4f0f843f6eb4055dfd25693f1a71f3c6871b184042d4b126244e142'; dst = os.path.join(os.path.expanduser('~'), '.cache', 'huggingface', 'transformers', '9c24e6cd9f499d02c4f21a033736dabd365962dc80fe3aeb57a8f85ea45a20a3.26fead7ea4f0f843f6eb4055dfd25693f1a71f3c6871b184042d4b126244e142'); shutil.copyfile(frm, dst) if os.path.exists(dst) else print(''); print('Hotfixed broken JSON file from OpenAI');"
-
-# Caution, this file will make your eyes and brain bleed. It's such an unholy mess.
-# Note to self: Please rewrite this in Python. For the sake of your own sanity.
-
# set the correct installer path (current vs legacy)
if [ -e "installer_files/env" ]; then
export INSTALL_ENV_DIR="$(pwd)/installer_files/env"
@@ -44,274 +40,14 @@ fi
if [ -e "src" ]; then mv src src-old; fi
if [ -e "ldm" ]; then mv ldm ldm-old; fi
-mkdir -p "../models/stable-diffusion"
-mkdir -p "../models/gfpgan"
-mkdir -p "../models/realesrgan"
-mkdir -p "../models/vae"
-
-# migrate the legacy models to the correct path (if already downloaded)
-if [ -e "sd-v1-4.ckpt" ]; then mv sd-v1-4.ckpt ../models/stable-diffusion/; fi
-if [ -e "custom-model.ckpt" ]; then mv custom-model.ckpt ../models/stable-diffusion/; fi
-if [ -e "GFPGANv1.3.pth" ]; then mv GFPGANv1.3.pth ../models/gfpgan/; fi
-if [ -e "RealESRGAN_x4plus.pth" ]; then mv RealESRGAN_x4plus.pth ../models/realesrgan/; fi
-if [ -e "RealESRGAN_x4plus_anime_6B.pth" ]; then mv RealESRGAN_x4plus_anime_6B.pth ../models/realesrgan/; fi
-
-OS_NAME=$(uname -s)
-case "${OS_NAME}" in
- Linux*) OS_NAME="linux";;
- Darwin*) OS_NAME="macos";;
- *) echo "Unknown OS: $OS_NAME! This script runs only on Linux or Mac" && exit
-esac
-
-# install torch and torchvision
-if python ../scripts/check_modules.py torch torchvision; then
- # temp fix for installations that installed torch 2.0 by mistake
- if [ "$OS_NAME" == "linux" ]; then
- python -m pip install --upgrade torch==1.13.1+cu116 torchvision==0.14.1+cu116 --extra-index-url https://download.pytorch.org/whl/cu116 -q
- elif [ "$OS_NAME" == "macos" ]; then
- python -m pip install --upgrade torch==1.13.1 torchvision==0.14.1 -q
- fi
-
- echo "torch and torchvision have already been installed."
-else
- echo "Installing torch and torchvision.."
-
- export PYTHONNOUSERSITE=1
- export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages"
-
- if [ "$OS_NAME" == "linux" ]; then
- if python -m pip install --upgrade torch==1.13.1+cu116 torchvision==0.14.1+cu116 --extra-index-url https://download.pytorch.org/whl/cu116 ; then
- echo "Installed."
- else
- fail "torch install failed"
- fi
- elif [ "$OS_NAME" == "macos" ]; then
- if python -m pip install --upgrade torch==1.13.1 torchvision==0.14.1 ; then
- echo "Installed."
- else
- fail "torch install failed"
- fi
- fi
+# Download the required packages
+if ! python ../scripts/check_modules.py; then
+ read -p "Press any key to continue"
+ exit 1
fi
-# install/upgrade sdkit
-if python ../scripts/check_modules.py sdkit sdkit.models ldm transformers numpy antlr4 gfpgan realesrgan ; then
- echo "sdkit is already installed."
-
- # skip sdkit upgrade if in developer-mode
- if [ ! -e "../src/sdkit" ]; then
- export PYTHONNOUSERSITE=1
- export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages"
-
- python -m pip install --upgrade sdkit==1.0.60 -q
- fi
-else
- echo "Installing sdkit: https://pypi.org/project/sdkit/"
-
- export PYTHONNOUSERSITE=1
- export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages"
-
- if python -m pip install sdkit==1.0.60 ; then
- echo "Installed."
- else
- fail "sdkit install failed"
- fi
-fi
-
-python -c "from importlib.metadata import version; print('sdkit version:', version('sdkit'))"
-
-# upgrade stable-diffusion-sdkit
-python -m pip install --upgrade stable-diffusion-sdkit==2.1.4 -q
-python -c "from importlib.metadata import version; print('stable-diffusion version:', version('stable-diffusion-sdkit'))"
-
-# install rich
-if python ../scripts/check_modules.py rich; then
- echo "rich has already been installed."
-else
- echo "Installing rich.."
-
- export PYTHONNOUSERSITE=1
- export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages"
-
- if python -m pip install rich ; then
- echo "Installed."
- else
- fail "Install failed for rich"
- fi
-fi
-
-# install ruamel
-if python ../scripts/check_modules.py ruamel.yaml; then
- echo "ruamel.yaml has already been installed."
-else
- echo "Installing ruamel.yaml.."
-
- export PYTHONNOUSERSITE=1
- export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages"
-
- if python -m pip install ruamel.yaml==0.17.21 ; then
- echo "Installed."
- else
- fail "Install failed for rich"
- fi
-fi
-
-if python ../scripts/check_modules.py uvicorn fastapi ; then
- echo "Packages necessary for Easy Diffusion were already installed"
-else
- printf "\n\nDownloading packages necessary for Easy Diffusion..\n\n"
-
- export PYTHONNOUSERSITE=1
- export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages"
-
- if conda install -c conda-forge -y uvicorn fastapi ; then
- echo "Installed. Testing.."
- else
- fail "'conda install uvicorn' failed"
- fi
-
- if ! command -v uvicorn &> /dev/null; then
- fail "UI packages not found!"
- fi
-fi
-
-if [ -f "../models/stable-diffusion/sd-v1-4.ckpt" ]; then
- model_size=`filesize "../models/stable-diffusion/sd-v1-4.ckpt"`
-
- if [ "$model_size" -eq "4265380512" ] || [ "$model_size" -eq "7703807346" ] || [ "$model_size" -eq "7703810927" ]; then
- echo "Data files (weights) necessary for Stable Diffusion were already downloaded"
- else
- printf "\n\nThe model file present at models/stable-diffusion/sd-v1-4.ckpt is invalid. It is only $model_size bytes in size. Re-downloading.."
- rm ../models/stable-diffusion/sd-v1-4.ckpt
- fi
-fi
-
-if [ ! -f "../models/stable-diffusion/sd-v1-4.ckpt" ]; then
- echo "Downloading data files (weights) for Stable Diffusion.."
-
- curl -L -k https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt > ../models/stable-diffusion/sd-v1-4.ckpt
-
- if [ -f "../models/stable-diffusion/sd-v1-4.ckpt" ]; then
- model_size=`filesize "../models/stable-diffusion/sd-v1-4.ckpt"`
- if [ ! "$model_size" == "4265380512" ]; then
- fail "The downloaded model file was invalid! Bytes downloaded: $model_size"
- fi
- else
- fail "Error downloading the data files (weights) for Stable Diffusion"
- fi
-fi
-
-
-if [ -f "../models/gfpgan/GFPGANv1.3.pth" ]; then
- model_size=`filesize "../models/gfpgan/GFPGANv1.3.pth"`
-
- if [ "$model_size" -eq "348632874" ]; then
- echo "Data files (weights) necessary for GFPGAN (Face Correction) were already downloaded"
- else
- printf "\n\nThe model file present at models/gfpgan/GFPGANv1.3.pth is invalid. It is only $model_size bytes in size. Re-downloading.."
- rm ../models/gfpgan/GFPGANv1.3.pth
- fi
-fi
-
-if [ ! -f "../models/gfpgan/GFPGANv1.3.pth" ]; then
- echo "Downloading data files (weights) for GFPGAN (Face Correction).."
-
- curl -L -k https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth > ../models/gfpgan/GFPGANv1.3.pth
-
- if [ -f "../models/gfpgan/GFPGANv1.3.pth" ]; then
- model_size=`filesize "../models/gfpgan/GFPGANv1.3.pth"`
- if [ ! "$model_size" -eq "348632874" ]; then
- fail "The downloaded GFPGAN model file was invalid! Bytes downloaded: $model_size"
- fi
- else
- fail "Error downloading the data files (weights) for GFPGAN (Face Correction)."
- fi
-fi
-
-
-if [ -f "../models/realesrgan/RealESRGAN_x4plus.pth" ]; then
- model_size=`filesize "../models/realesrgan/RealESRGAN_x4plus.pth"`
-
- if [ "$model_size" -eq "67040989" ]; then
- echo "Data files (weights) necessary for ESRGAN (Resolution Upscaling) x4plus were already downloaded"
- else
- printf "\n\nThe model file present at models/realesrgan/RealESRGAN_x4plus.pth is invalid. It is only $model_size bytes in size. Re-downloading.."
- rm ../models/realesrgan/RealESRGAN_x4plus.pth
- fi
-fi
-
-if [ ! -f "../models/realesrgan/RealESRGAN_x4plus.pth" ]; then
- echo "Downloading data files (weights) for ESRGAN (Resolution Upscaling) x4plus.."
-
- curl -L -k https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth > ../models/realesrgan/RealESRGAN_x4plus.pth
-
- if [ -f "../models/realesrgan/RealESRGAN_x4plus.pth" ]; then
- model_size=`filesize "../models/realesrgan/RealESRGAN_x4plus.pth"`
- if [ ! "$model_size" -eq "67040989" ]; then
- fail "The downloaded ESRGAN x4plus model file was invalid! Bytes downloaded: $model_size"
- fi
- else
- fail "Error downloading the data files (weights) for ESRGAN (Resolution Upscaling) x4plus"
- fi
-fi
-
-
-if [ -f "../models/realesrgan/RealESRGAN_x4plus_anime_6B.pth" ]; then
- model_size=`filesize "../models/realesrgan/RealESRGAN_x4plus_anime_6B.pth"`
-
- if [ "$model_size" -eq "17938799" ]; then
- echo "Data files (weights) necessary for ESRGAN (Resolution Upscaling) x4plus_anime were already downloaded"
- else
- printf "\n\nThe model file present at models/realesrgan/RealESRGAN_x4plus_anime_6B.pth is invalid. It is only $model_size bytes in size. Re-downloading.."
- rm ../models/realesrgan/RealESRGAN_x4plus_anime_6B.pth
- fi
-fi
-
-if [ ! -f "../models/realesrgan/RealESRGAN_x4plus_anime_6B.pth" ]; then
- echo "Downloading data files (weights) for ESRGAN (Resolution Upscaling) x4plus_anime.."
-
- curl -L -k https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth > ../models/realesrgan/RealESRGAN_x4plus_anime_6B.pth
-
- if [ -f "../models/realesrgan/RealESRGAN_x4plus_anime_6B.pth" ]; then
- model_size=`filesize "../models/realesrgan/RealESRGAN_x4plus_anime_6B.pth"`
- if [ ! "$model_size" -eq "17938799" ]; then
- fail "The downloaded ESRGAN x4plus_anime model file was invalid! Bytes downloaded: $model_size"
- fi
- else
- fail "Error downloading the data files (weights) for ESRGAN (Resolution Upscaling) x4plus_anime."
- fi
-fi
-
-
-if [ -f "../models/vae/vae-ft-mse-840000-ema-pruned.ckpt" ]; then
- model_size=`filesize "../models/vae/vae-ft-mse-840000-ema-pruned.ckpt"`
-
- if [ "$model_size" -eq "334695179" ]; then
- echo "Data files (weights) necessary for the default VAE (sd-vae-ft-mse-original) were already downloaded"
- else
- printf "\n\nThe model file present at models/vae/vae-ft-mse-840000-ema-pruned.ckpt is invalid. It is only $model_size bytes in size. Re-downloading.."
- rm ../models/vae/vae-ft-mse-840000-ema-pruned.ckpt
- fi
-fi
-
-if [ ! -f "../models/vae/vae-ft-mse-840000-ema-pruned.ckpt" ]; then
- echo "Downloading data files (weights) for the default VAE (sd-vae-ft-mse-original).."
-
- curl -L -k https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.ckpt > ../models/vae/vae-ft-mse-840000-ema-pruned.ckpt
-
- if [ -f "../models/vae/vae-ft-mse-840000-ema-pruned.ckpt" ]; then
- model_size=`filesize "../models/vae/vae-ft-mse-840000-ema-pruned.ckpt"`
- if [ ! "$model_size" -eq "334695179" ]; then
- printf "\n\nError: The downloaded default VAE (sd-vae-ft-mse-original) file was invalid! Bytes downloaded: $model_size\n\n"
- printf "\n\nError downloading the data files (weights) for the default VAE (sd-vae-ft-mse-original). Sorry about that, please try to:\n 1. Run this installer again.\n 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting\n 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB\n 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues\nThanks!\n\n"
- read -p "Press any key to continue"
- exit
- fi
- else
- printf "\n\nError downloading the data files (weights) for the default VAE (sd-vae-ft-mse-original). Sorry about that, please try to:\n 1. Run this installer again.\n 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting\n 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB\n 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues\nThanks!\n\n"
- read -p "Press any key to continue"
- exit
- fi
+if ! command -v uvicorn &> /dev/null; then
+ fail "UI packages not found!"
fi
if [ `grep -c sd_install_complete ../scripts/install_status.txt` -gt "0" ]; then
@@ -332,8 +68,17 @@ python --version
cd ..
export SD_UI_PATH=`pwd`/ui
+export ED_BIND_PORT="$( python scripts/get_config.py --default=9000 net listen_port )"
+case "$( python scripts/get_config.py --default=False net listen_to_network )" in
+ "True")
+ export ED_BIND_IP=0.0.0.0
+ ;;
+ "False")
+ export ED_BIND_IP=127.0.0.1
+ ;;
+esac
cd stable-diffusion
-uvicorn main:server_api --app-dir "$SD_UI_PATH" --port ${SD_UI_BIND_PORT:-9000} --host ${SD_UI_BIND_IP:-0.0.0.0} --log-level error
+uvicorn main:server_api --app-dir "$SD_UI_PATH" --port "$ED_BIND_PORT" --host "$ED_BIND_IP" --log-level error
read -p "Press any key to continue"
diff --git a/ui/easydiffusion/app.py b/ui/easydiffusion/app.py
index 1f106335..d50ee5d1 100644
--- a/ui/easydiffusion/app.py
+++ b/ui/easydiffusion/app.py
@@ -1,20 +1,23 @@
+import json
+import logging
import os
import socket
import sys
-import json
import traceback
-import logging
import shlex
from ruamel.yaml import YAML
-yaml = YAML()
import urllib
-from rich.logging import RichHandler
-
-from sdkit.utils import log as sdkit_log # hack, so we can overwrite the log config
+import warnings
from easydiffusion import task_manager
from easydiffusion.utils import log
+from rich.logging import RichHandler
+from rich.console import Console
+from rich.panel import Panel
+from sdkit.utils import log as sdkit_log # hack, so we can overwrite the log config
+
+yaml = YAML()
# Remove all handlers associated with the root logger object.
for handler in logging.root.handlers[:]:
@@ -58,54 +61,81 @@ APP_CONFIG_DEFAULTS = {
},
}
-IMAGE_EXTENSIONS = [".png", ".apng", ".jpg", ".jpeg", ".jfif", ".pjpeg", ".pjp", ".jxl", ".gif", ".webp", ".avif", ".svg"]
+IMAGE_EXTENSIONS = [
+ ".png",
+ ".apng",
+ ".jpg",
+ ".jpeg",
+ ".jfif",
+ ".pjpeg",
+ ".pjp",
+ ".jxl",
+ ".gif",
+ ".webp",
+ ".avif",
+ ".svg",
+]
CUSTOM_MODIFIERS_DIR = os.path.abspath(os.path.join(SD_DIR, "..", "modifiers"))
-CUSTOM_MODIFIERS_PORTRAIT_EXTENSIONS=[".portrait", "_portrait", " portrait", "-portrait"]
-CUSTOM_MODIFIERS_LANDSCAPE_EXTENSIONS=[".landscape", "_landscape", " landscape", "-landscape"]
+CUSTOM_MODIFIERS_PORTRAIT_EXTENSIONS = [
+ ".portrait",
+ "_portrait",
+ " portrait",
+ "-portrait",
+]
+CUSTOM_MODIFIERS_LANDSCAPE_EXTENSIONS = [
+ ".landscape",
+ "_landscape",
+ " landscape",
+ "-landscape",
+]
+
def init():
os.makedirs(USER_UI_PLUGINS_DIR, exist_ok=True)
os.makedirs(USER_SERVER_PLUGINS_DIR, exist_ok=True)
+ # https://pytorch.org/docs/stable/storage.html
+ warnings.filterwarnings("ignore", category=UserWarning, message="TypedStorage is deprecated")
+
load_server_plugins()
update_render_threads()
def getConfig(default_val=APP_CONFIG_DEFAULTS):
- config_yaml_path = os.path.join(CONFIG_DIR, 'config.yaml')
+ config_yaml_path = os.path.join(CONFIG_DIR, "config.yaml")
if os.path.isfile(config_yaml_path):
try:
- log.info('Loading config.yaml')
- with open(config_yaml_path, 'r', encoding='utf-8') as f:
+ log.info("Loading config.yaml")
+ with open(config_yaml_path, "r", encoding="utf-8") as f:
config = yaml.load(f)
- if 'net' not in config:
- config['net'] = {}
- if os.getenv('SD_UI_BIND_PORT') is not None:
- config['net']['listen_port'] = int(os.getenv('SD_UI_BIND_PORT'))
+ if "net" not in config:
+ config["net"] = {}
+ if os.getenv("SD_UI_BIND_PORT") is not None:
+ config["net"]["listen_port"] = int(os.getenv("SD_UI_BIND_PORT"))
else:
config['net']['listen_port'] = 9000
- if os.getenv('SD_UI_BIND_IP') is not None:
- config['net']['listen_to_network'] = (os.getenv('SD_UI_BIND_IP') == '0.0.0.0')
+ if os.getenv("SD_UI_BIND_IP") is not None:
+ config["net"]["listen_to_network"] = os.getenv("SD_UI_BIND_IP") == "0.0.0.0"
else:
- config['net']['listen_to_network'] = True
+ config["net"]["listen_to_network"] = True
return config
except Exception as e:
log.warn(traceback.format_exc())
return default_val
else:
try:
- config_json_path = os.path.join(CONFIG_DIR, 'config.json')
+ config_json_path = os.path.join(CONFIG_DIR, "config.json")
if not os.path.exists(config_json_path):
return default_val
else:
- log.info('Converting old json config file to yaml')
- with open(config_json_path, 'r', encoding='utf-8') as f:
+ log.info("Converting old json config file to yaml")
+ with open(config_json_path, "r", encoding="utf-8") as f:
config = json.load(f)
# Save config in new format
setConfig(config)
- os.rename(config_json_path, config_json_path + '.bak')
- log.info('Saved old config.json as config.json.bak')
+ os.rename(config_json_path, config_json_path + ".bak")
+ log.info("Saved old config.json as config.json.bak")
return getConfig(default_val)
except Exception as e:
log.warn(traceback.format_exc())
@@ -121,50 +151,6 @@ def setConfig(config):
except:
log.error(traceback.format_exc())
- try: # config.bat
- config_bat_path = os.path.join(CONFIG_DIR, "config.bat")
- config_bat = []
-
- if "update_branch" in config:
- config_bat.append(f"@set update_branch={config['update_branch']}")
-
- config_bat.append(f"@set SD_UI_BIND_PORT={config['net']['listen_port']}")
- bind_ip = "0.0.0.0" if config["net"]["listen_to_network"] else "127.0.0.1"
- config_bat.append(f"@set SD_UI_BIND_IP={bind_ip}")
-
- # Preserve these variables if they are set
- for var in PRESERVE_CONFIG_VARS:
- if os.getenv(var) is not None:
- config_bat.append(f"@set {var}={os.getenv(var)}")
-
- if len(config_bat) > 0:
- with open(config_bat_path, "w", encoding="utf-8") as f:
- f.write("\n".join(config_bat))
- except:
- log.error(traceback.format_exc())
-
- try: # config.sh
- config_sh_path = os.path.join(CONFIG_DIR, "config.sh")
- config_sh = ["#!/bin/bash"]
-
- if "update_branch" in config:
- config_sh.append(f"export update_branch={config['update_branch']}")
-
- config_sh.append(f"export SD_UI_BIND_PORT={config['net']['listen_port']}")
- bind_ip = "0.0.0.0" if config["net"]["listen_to_network"] else "127.0.0.1"
- config_sh.append(f"export SD_UI_BIND_IP={bind_ip}")
-
- # Preserve these variables if they are set
- for var in PRESERVE_CONFIG_VARS:
- if os.getenv(var) is not None:
- config_bat.append(f'export {var}="{shlex.quote(os.getenv(var))}"')
-
- if len(config_sh) > 1:
- with open(config_sh_path, "w", encoding="utf-8") as f:
- f.write("\n".join(config_sh))
- except:
- log.error(traceback.format_exc())
-
def save_to_config(ckpt_model_name, vae_model_name, hypernetwork_model_name, vram_usage_level):
config = getConfig()
@@ -253,18 +239,56 @@ def getIPConfig():
def open_browser():
config = getConfig()
ui = config.get("ui", {})
- net = config.get("net", {"listen_port": 9000})
+ net = config.get("net", {})
port = net.get("listen_port", 9000)
+
if ui.get("open_browser_on_start", True):
import webbrowser
webbrowser.open(f"http://localhost:{port}")
+ Console().print(
+ Panel(
+ "\n"
+ + "[white]Easy Diffusion is ready to serve requests.\n\n"
+ + "A new browser tab should have been opened by now.\n"
+ + f"If not, please open your web browser and navigate to [bold yellow underline]http://localhost:{port}/\n",
+ title="Easy Diffusion is ready",
+ style="bold yellow on blue",
+ )
+ )
+
+
+def fail_and_die(fail_type: str, data: str):
+ suggestions = [
+ "Run this installer again.",
+ "If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB",
+ "If that doesn't solve the problem, please file an issue at https://github.com/easydiffusion/easydiffusion/issues",
+ ]
+
+ if fail_type == "model_download":
+ fail_label = f"Error downloading the {data} model"
+ suggestions.insert(
+ 1,
+ "If that doesn't fix it, please try to download the file manually. The address to download from, and the destination to save to are printed above this message.",
+ )
+ else:
+ fail_label = "Error while installing Easy Diffusion"
+
+ msg = [f"{fail_label}. Sorry about that, please try to:"]
+ for i, suggestion in enumerate(suggestions):
+ msg.append(f"{i+1}. {suggestion}")
+ msg.append("Thanks!")
+
+ print("\n".join(msg))
+ exit(1)
+
+
def get_image_modifiers():
modifiers_json_path = os.path.join(SD_UI_DIR, "modifiers.json")
modifier_categories = {}
- original_category_order=[]
+ original_category_order = []
with open(modifiers_json_path, "r", encoding="utf-8") as f:
modifiers_file = json.load(f)
@@ -274,14 +298,14 @@ def get_image_modifiers():
# convert modifiers from a list of objects to a dict of dicts
for category_item in modifiers_file:
- category_name = category_item['category']
+ category_name = category_item["category"]
original_category_order.append(category_name)
category = {}
- for modifier_item in category_item['modifiers']:
+ for modifier_item in category_item["modifiers"]:
modifier = {}
- for preview_item in modifier_item['previews']:
- modifier[preview_item['name']] = preview_item['path']
- category[modifier_item['modifier']] = modifier
+ for preview_item in modifier_item["previews"]:
+ modifier[preview_item["name"]] = preview_item["path"]
+ category[modifier_item["modifier"]] = modifier
modifier_categories[category_name] = category
def scan_directory(directory_path: str, category_name="Modifiers"):
@@ -294,12 +318,27 @@ def get_image_modifiers():
modifier_name = entry.name[: -len(file_extension[0])]
modifier_path = f"custom/{entry.path[len(CUSTOM_MODIFIERS_DIR) + 1:]}"
# URL encode path segments
- modifier_path = "/".join(map(lambda segment: urllib.parse.quote(segment), modifier_path.split("/")))
+ modifier_path = "/".join(
+ map(
+ lambda segment: urllib.parse.quote(segment),
+ modifier_path.split("/"),
+ )
+ )
is_portrait = True
is_landscape = True
- portrait_extension = list(filter(lambda e: modifier_name.lower().endswith(e), CUSTOM_MODIFIERS_PORTRAIT_EXTENSIONS))
- landscape_extension = list(filter(lambda e: modifier_name.lower().endswith(e), CUSTOM_MODIFIERS_LANDSCAPE_EXTENSIONS))
+ portrait_extension = list(
+ filter(
+ lambda e: modifier_name.lower().endswith(e),
+ CUSTOM_MODIFIERS_PORTRAIT_EXTENSIONS,
+ )
+ )
+ landscape_extension = list(
+ filter(
+ lambda e: modifier_name.lower().endswith(e),
+ CUSTOM_MODIFIERS_LANDSCAPE_EXTENSIONS,
+ )
+ )
if len(portrait_extension) > 0:
is_landscape = False
@@ -307,24 +346,24 @@ def get_image_modifiers():
elif len(landscape_extension) > 0:
is_portrait = False
modifier_name = modifier_name[: -len(landscape_extension[0])]
-
- if (category_name not in modifier_categories):
+
+ if category_name not in modifier_categories:
modifier_categories[category_name] = {}
-
+
category = modifier_categories[category_name]
- if (modifier_name not in category):
+ if modifier_name not in category:
category[modifier_name] = {}
- if (is_portrait or "portrait" not in category[modifier_name]):
+ if is_portrait or "portrait" not in category[modifier_name]:
category[modifier_name]["portrait"] = modifier_path
-
- if (is_landscape or "landscape" not in category[modifier_name]):
+
+ if is_landscape or "landscape" not in category[modifier_name]:
category[modifier_name]["landscape"] = modifier_path
elif entry.is_dir():
scan_directory(
entry.path,
- entry.name if directory_path==CUSTOM_MODIFIERS_DIR else f"{category_name}/{entry.name}",
+ entry.name if directory_path == CUSTOM_MODIFIERS_DIR else f"{category_name}/{entry.name}",
)
scan_directory(CUSTOM_MODIFIERS_DIR)
@@ -337,12 +376,12 @@ def get_image_modifiers():
# convert the modifiers back into a list of objects
modifier_categories_list = []
for category_name in [*original_category_order, *custom_categories]:
- category = { 'category': category_name, 'modifiers': [] }
+ category = {"category": category_name, "modifiers": []}
for modifier_name in sorted(modifier_categories[category_name].keys(), key=str.casefold):
- modifier = { 'modifier': modifier_name, 'previews': [] }
+ modifier = {"modifier": modifier_name, "previews": []}
for preview_name, preview_path in modifier_categories[category_name][modifier_name].items():
- modifier['previews'].append({ 'name': preview_name, 'path': preview_path })
- category['modifiers'].append(modifier)
+ modifier["previews"].append({"name": preview_name, "path": preview_path})
+ category["modifiers"].append(modifier)
modifier_categories_list.append(category)
return modifier_categories_list
diff --git a/ui/easydiffusion/device_manager.py b/ui/easydiffusion/device_manager.py
index 18069a82..dc705927 100644
--- a/ui/easydiffusion/device_manager.py
+++ b/ui/easydiffusion/device_manager.py
@@ -1,9 +1,9 @@
import os
import platform
-import torch
-import traceback
import re
+import traceback
+import torch
from easydiffusion.utils import log
"""
@@ -118,7 +118,10 @@ def auto_pick_devices(currently_active_devices):
# These already-running devices probably aren't terrible, since they were picked in the past.
# Worst case, the user can restart the program and that'll get rid of them.
devices = list(
- filter((lambda x: x["mem_free"] > mem_free_threshold or x["device"] in currently_active_devices), devices)
+ filter(
+ (lambda x: x["mem_free"] > mem_free_threshold or x["device"] in currently_active_devices),
+ devices,
+ )
)
devices = list(map(lambda x: x["device"], devices))
return devices
@@ -162,6 +165,7 @@ def needs_to_force_full_precision(context):
and (
" 1660" in device_name
or " 1650" in device_name
+ or " 1630" in device_name
or " t400" in device_name
or " t550" in device_name
or " t600" in device_name
@@ -221,9 +225,9 @@ def is_device_compatible(device):
try:
_, mem_total = torch.cuda.mem_get_info(device)
mem_total /= float(10**9)
- if mem_total < 3.0:
+ if mem_total < 1.9:
if is_device_compatible.history.get(device) == None:
- log.warn(f"GPU {device} with less than 3 GB of VRAM is not compatible with Stable Diffusion")
+ log.warn(f"GPU {device} with less than 2 GB of VRAM is not compatible with Stable Diffusion")
is_device_compatible.history[device] = 1
return False
except RuntimeError as e:
diff --git a/ui/easydiffusion/model_manager.py b/ui/easydiffusion/model_manager.py
index a06c56cf..de2c10ac 100644
--- a/ui/easydiffusion/model_manager.py
+++ b/ui/easydiffusion/model_manager.py
@@ -1,13 +1,24 @@
import os
+import shutil
+from glob import glob
+import traceback
from easydiffusion import app
from easydiffusion.types import TaskData
from easydiffusion.utils import log
-
from sdkit import Context
-from sdkit.models import load_model, unload_model, scan_model
+from sdkit.models import load_model, scan_model, unload_model, download_model, get_model_info_from_db
+from sdkit.utils import hash_file_quick
-KNOWN_MODEL_TYPES = ["stable-diffusion", "vae", "hypernetwork", "gfpgan", "realesrgan", "lora"]
+KNOWN_MODEL_TYPES = [
+ "stable-diffusion",
+ "vae",
+ "hypernetwork",
+ "gfpgan",
+ "realesrgan",
+ "lora",
+ "codeformer",
+]
MODEL_EXTENSIONS = {
"stable-diffusion": [".ckpt", ".safetensors"],
"vae": [".vae.pt", ".ckpt", ".safetensors"],
@@ -15,14 +26,22 @@ MODEL_EXTENSIONS = {
"gfpgan": [".pth"],
"realesrgan": [".pth"],
"lora": [".ckpt", ".safetensors"],
+ "codeformer": [".pth"],
}
DEFAULT_MODELS = {
- "stable-diffusion": [ # needed to support the legacy installations
- "custom-model", # only one custom model file was supported initially, creatively named 'custom-model'
- "sd-v1-4", # Default fallback.
+ "stable-diffusion": [
+ {"file_name": "sd-v1-4.ckpt", "model_id": "1.4"},
+ ],
+ "gfpgan": [
+ {"file_name": "GFPGANv1.4.pth", "model_id": "1.4"},
+ ],
+ "realesrgan": [
+ {"file_name": "RealESRGAN_x4plus.pth", "model_id": "x4plus"},
+ {"file_name": "RealESRGAN_x4plus_anime_6B.pth", "model_id": "x4plus_anime_6"},
+ ],
+ "vae": [
+ {"file_name": "vae-ft-mse-840000-ema-pruned.ckpt", "model_id": "vae-ft-mse-840000-ema-pruned"},
],
- "gfpgan": ["GFPGANv1.3"],
- "realesrgan": ["RealESRGAN_x4plus"],
}
MODELS_TO_LOAD_ON_START = ["stable-diffusion", "vae", "hypernetwork", "lora"]
@@ -31,6 +50,8 @@ known_models = {}
def init():
make_model_folders()
+ migrate_legacy_model_location() # if necessary
+ download_default_models_if_necessary()
getModels() # run this once, to cache the picklescan results
@@ -39,26 +60,42 @@ def load_default_models(context: Context):
# init default model paths
for model_type in MODELS_TO_LOAD_ON_START:
- context.model_paths[model_type] = resolve_model_to_use(model_type=model_type)
+ context.model_paths[model_type] = resolve_model_to_use(model_type=model_type, fail_if_not_found=False)
try:
- load_model(context, model_type)
+ load_model(
+ context,
+ model_type,
+ scan_model=context.model_paths[model_type] != None
+ and not context.model_paths[model_type].endswith(".safetensors"),
+ )
+ if model_type in context.model_load_errors:
+ del context.model_load_errors[model_type]
except Exception as e:
log.error(f"[red]Error while loading {model_type} model: {context.model_paths[model_type]}[/red]")
- log.error(f"[red]Error: {e}[/red]")
- log.error(f"[red]Consider removing the model from the model folder.[red]")
+ if "DefaultCPUAllocator: not enough memory" in str(e):
+ log.error(
+ f"[red]Your PC is low on system RAM. Please add some virtual memory (or swap space) by following the instructions at this link: https://www.ibm.com/docs/en/opw/8.2.0?topic=tuning-optional-increasing-paging-file-size-windows-computers[/red]"
+ )
+ else:
+ log.exception(e)
+ del context.model_paths[model_type]
+
+ context.model_load_errors[model_type] = str(e) # storing the entire Exception can lead to memory leaks
def unload_all(context: Context):
for model_type in KNOWN_MODEL_TYPES:
unload_model(context, model_type)
+ if model_type in context.model_load_errors:
+ del context.model_load_errors[model_type]
-def resolve_model_to_use(model_name: str = None, model_type: str = None):
+def resolve_model_to_use(model_name: str = None, model_type: str = None, fail_if_not_found: bool = True):
model_extensions = MODEL_EXTENSIONS.get(model_type, [])
default_models = DEFAULT_MODELS.get(model_type, [])
config = app.getConfig()
- model_dirs = [os.path.join(app.MODELS_DIR, model_type), app.SD_DIR]
+ model_dir = os.path.join(app.MODELS_DIR, model_type)
if not model_name: # When None try user configured model.
# config = getConfig()
if "model" in config and model_type in config["model"]:
@@ -66,42 +103,42 @@ def resolve_model_to_use(model_name: str = None, model_type: str = None):
if model_name:
# Check models directory
- models_dir_path = os.path.join(app.MODELS_DIR, model_type, model_name)
+ model_path = os.path.join(model_dir, model_name)
+ if os.path.exists(model_path):
+ return model_path
for model_extension in model_extensions:
- if os.path.exists(models_dir_path + model_extension):
- return models_dir_path + model_extension
+ if os.path.exists(model_path + model_extension):
+ return model_path + model_extension
if os.path.exists(model_name + model_extension):
return os.path.abspath(model_name + model_extension)
- # Default locations
- if model_name in default_models:
- default_model_path = os.path.join(app.SD_DIR, model_name)
- for model_extension in model_extensions:
- if os.path.exists(default_model_path + model_extension):
- return default_model_path + model_extension
-
# Can't find requested model, check the default paths.
- for default_model in default_models:
- for model_dir in model_dirs:
- default_model_path = os.path.join(model_dir, default_model)
- for model_extension in model_extensions:
- if os.path.exists(default_model_path + model_extension):
- if model_name is not None:
- log.warn(
- f"Could not find the configured custom model {model_name}{model_extension}. Using the default one: {default_model_path}{model_extension}"
- )
- return default_model_path + model_extension
+ if model_type == "stable-diffusion" and not fail_if_not_found:
+ for default_model in default_models:
+ default_model_path = os.path.join(model_dir, default_model["file_name"])
+ if os.path.exists(default_model_path):
+ if model_name is not None:
+ log.warn(
+ f"Could not find the configured custom model {model_name}. Using the default one: {default_model_path}"
+ )
+ return default_model_path
- return None
+ if model_name and fail_if_not_found:
+ raise Exception(f"Could not find the desired model {model_name}! Is it present in the {model_dir} folder?")
def reload_models_if_necessary(context: Context, task_data: TaskData):
+ face_fix_lower = task_data.use_face_correction.lower() if task_data.use_face_correction else ""
+ upscale_lower = task_data.use_upscale.lower() if task_data.use_upscale else ""
+
model_paths_in_req = {
"stable-diffusion": task_data.use_stable_diffusion_model,
"vae": task_data.use_vae_model,
"hypernetwork": task_data.use_hypernetwork_model,
- "gfpgan": task_data.use_face_correction,
- "realesrgan": task_data.use_upscale,
+ "codeformer": task_data.use_face_correction if "codeformer" in face_fix_lower else None,
+ "gfpgan": task_data.use_face_correction if "gfpgan" in face_fix_lower else None,
+ "realesrgan": task_data.use_upscale if "realesrgan" in upscale_lower else None,
+ "latent_upscaler": True if "latent_upscaler" in upscale_lower else None,
"nsfw_checker": True if task_data.block_nsfw else None,
"lora": task_data.use_lora_model,
}
@@ -111,14 +148,28 @@ def reload_models_if_necessary(context: Context, task_data: TaskData):
if context.model_paths.get(model_type) != path
}
- if set_vram_optimizations(context): # reload SD
+ if task_data.codeformer_upscale_faces:
+ if "realesrgan" not in models_to_reload and "realesrgan" not in context.models:
+ default_realesrgan = DEFAULT_MODELS["realesrgan"][0]["file_name"]
+ models_to_reload["realesrgan"] = resolve_model_to_use(default_realesrgan, "realesrgan")
+ elif "realesrgan" in models_to_reload and models_to_reload["realesrgan"] is None:
+ del models_to_reload["realesrgan"] # don't unload realesrgan
+
+ if set_vram_optimizations(context) or set_clip_skip(context, task_data): # reload SD
models_to_reload["stable-diffusion"] = model_paths_in_req["stable-diffusion"]
for model_type, model_path_in_req in models_to_reload.items():
context.model_paths[model_type] = model_path_in_req
action_fn = unload_model if context.model_paths[model_type] is None else load_model
- action_fn(context, model_type, scan_model=False) # we've scanned them already
+ try:
+ action_fn(context, model_type, scan_model=False) # we've scanned them already
+ if model_type in context.model_load_errors:
+ del context.model_load_errors[model_type]
+ except Exception as e:
+ log.exception(e)
+ if action_fn == load_model:
+ context.model_load_errors[model_type] = str(e) # storing the entire Exception can lead to memory leaks
def resolve_model_paths(task_data: TaskData):
@@ -130,11 +181,49 @@ def resolve_model_paths(task_data: TaskData):
task_data.use_lora_model = resolve_model_to_use(task_data.use_lora_model, model_type="lora")
if task_data.use_face_correction:
- task_data.use_face_correction = resolve_model_to_use(task_data.use_face_correction, "gfpgan")
- if task_data.use_upscale:
+ if "gfpgan" in task_data.use_face_correction.lower():
+ model_type = "gfpgan"
+ elif "codeformer" in task_data.use_face_correction.lower():
+ model_type = "codeformer"
+ download_if_necessary("codeformer", "codeformer.pth", "codeformer-0.1.0")
+
+ task_data.use_face_correction = resolve_model_to_use(task_data.use_face_correction, model_type)
+ if task_data.use_upscale and "realesrgan" in task_data.use_upscale.lower():
task_data.use_upscale = resolve_model_to_use(task_data.use_upscale, "realesrgan")
+def fail_if_models_did_not_load(context: Context):
+ for model_type in KNOWN_MODEL_TYPES:
+ if model_type in context.model_load_errors:
+ e = context.model_load_errors[model_type]
+ raise Exception(f"Could not load the {model_type} model! Reason: " + e)
+
+
+def download_default_models_if_necessary():
+ for model_type, models in DEFAULT_MODELS.items():
+ for model in models:
+ try:
+ download_if_necessary(model_type, model["file_name"], model["model_id"])
+ except:
+ traceback.print_exc()
+ app.fail_and_die(fail_type="model_download", data=model_type)
+
+ print(model_type, "model(s) found.")
+
+
+def download_if_necessary(model_type: str, file_name: str, model_id: str):
+ model_path = os.path.join(app.MODELS_DIR, model_type, file_name)
+ expected_hash = get_model_info_from_db(model_type=model_type, model_id=model_id)["quick_hash"]
+
+ other_models_exist = any_model_exists(model_type)
+ known_model_exists = os.path.exists(model_path)
+ known_model_is_corrupt = known_model_exists and hash_file_quick(model_path) != expected_hash
+
+ if known_model_is_corrupt or (not other_models_exist and not known_model_exists):
+ print("> download", model_type, model_id)
+ download_model(model_type, model_id, download_base_dir=app.MODELS_DIR)
+
+
def set_vram_optimizations(context: Context):
config = app.getConfig()
vram_usage_level = config.get("vram_usage_level", "balanced")
@@ -146,6 +235,36 @@ def set_vram_optimizations(context: Context):
return False
+def migrate_legacy_model_location():
+ 'Move the models inside the legacy "stable-diffusion" folder, to their respective folders'
+
+ for model_type, models in DEFAULT_MODELS.items():
+ for model in models:
+ file_name = model["file_name"]
+ legacy_path = os.path.join(app.SD_DIR, file_name)
+ if os.path.exists(legacy_path):
+ shutil.move(legacy_path, os.path.join(app.MODELS_DIR, model_type, file_name))
+
+
+def any_model_exists(model_type: str) -> bool:
+ extensions = MODEL_EXTENSIONS.get(model_type, [])
+ for ext in extensions:
+ if any(glob(f"{app.MODELS_DIR}/{model_type}/**/*{ext}", recursive=True)):
+ return True
+
+ return False
+
+
+def set_clip_skip(context: Context, task_data: TaskData):
+ clip_skip = task_data.clip_skip
+
+ if clip_skip != context.clip_skip:
+ context.clip_skip = clip_skip
+ return True
+
+ return False
+
+
def make_model_folders():
for model_type in KNOWN_MODEL_TYPES:
model_dir_path = os.path.join(app.MODELS_DIR, model_type)
@@ -167,13 +286,23 @@ def is_malicious_model(file_path):
if scan_result.issues_count > 0 or scan_result.infected_files > 0:
log.warn(
":warning: [bold red]Scan %s: %d scanned, %d issue, %d infected.[/bold red]"
- % (file_path, scan_result.scanned_files, scan_result.issues_count, scan_result.infected_files)
+ % (
+ file_path,
+ scan_result.scanned_files,
+ scan_result.issues_count,
+ scan_result.infected_files,
+ )
)
return True
else:
log.debug(
"Scan %s: [green]%d scanned, %d issue, %d infected.[/green]"
- % (file_path, scan_result.scanned_files, scan_result.issues_count, scan_result.infected_files)
+ % (
+ file_path,
+ scan_result.scanned_files,
+ scan_result.issues_count,
+ scan_result.infected_files,
+ )
)
return False
except Exception as e:
@@ -183,17 +312,12 @@ def is_malicious_model(file_path):
def getModels():
models = {
- "active": {
- "stable-diffusion": "sd-v1-4",
- "vae": "",
- "hypernetwork": "",
- "lora": "",
- },
"options": {
"stable-diffusion": ["sd-v1-4"],
"vae": [],
"hypernetwork": [],
"lora": [],
+ "codeformer": ["codeformer"],
},
}
@@ -201,13 +325,13 @@ def getModels():
class MaliciousModelException(Exception):
"Raised when picklescan reports a problem with a model"
- pass
def scan_directory(directory, suffixes, directoriesFirst: bool = True):
nonlocal models_scanned
tree = []
for entry in sorted(
- os.scandir(directory), key=lambda entry: (entry.is_file() == directoriesFirst, entry.name.lower())
+ os.scandir(directory),
+ key=lambda entry: (entry.is_file() == directoriesFirst, entry.name.lower()),
):
if entry.is_file():
matching_suffix = list(filter(lambda s: entry.name.endswith(s), suffixes))
@@ -243,6 +367,7 @@ def getModels():
except MaliciousModelException as e:
models["scan-error"] = e
+ log.info(f"[green]Scanning all model folders for models...[/]")
# custom models
listModels(model_type="stable-diffusion")
listModels(model_type="vae")
@@ -253,9 +378,4 @@ def getModels():
if models_scanned > 0:
log.info(f"[green]Scanned {models_scanned} models. Nothing infected[/]")
- # legacy
- custom_weight_path = os.path.join(app.SD_DIR, "custom-model.ckpt")
- if os.path.exists(custom_weight_path):
- models["options"]["stable-diffusion"].append("custom-model")
-
return models
diff --git a/ui/easydiffusion/renderer.py b/ui/easydiffusion/renderer.py
index 8270d232..a57dfc6c 100644
--- a/ui/easydiffusion/renderer.py
+++ b/ui/easydiffusion/renderer.py
@@ -1,16 +1,26 @@
-import queue
-import time
import json
import pprint
+import queue
+import time
from easydiffusion import device_manager
-from easydiffusion.types import TaskData, Response, Image as ResponseImage, UserInitiatedStop, GenerateImageRequest
-from easydiffusion.utils import get_printable_request, save_images_to_disk, log
-
+from easydiffusion.types import GenerateImageRequest
+from easydiffusion.types import Image as ResponseImage
+from easydiffusion.types import Response, TaskData, UserInitiatedStop
+from easydiffusion.model_manager import DEFAULT_MODELS, resolve_model_to_use
+from easydiffusion.utils import get_printable_request, log, save_images_to_disk
from sdkit import Context
-from sdkit.generate import generate_images
from sdkit.filter import apply_filters
-from sdkit.utils import img_to_buffer, img_to_base64_str, latent_samples_to_images, diffusers_latent_samples_to_images
+from sdkit.generate import generate_images
+from sdkit.models import load_model
+from sdkit.utils import (
+ diffusers_latent_samples_to_images,
+ gc,
+ img_to_base64_str,
+ img_to_buffer,
+ latent_samples_to_images,
+ get_device_usage,
+)
context = Context() # thread-local
"""
@@ -25,24 +35,39 @@ def init(device):
context.stop_processing = False
context.temp_images = {}
context.partial_x_samples = None
+ context.model_load_errors = {}
+ context.enable_codeformer = True
from easydiffusion import app
app_config = app.getConfig()
- context.test_diffusers = app_config.get("test_diffusers", False)
+ context.test_diffusers = (
+ app_config.get("test_diffusers", False) and app_config.get("update_branch", "main") != "main"
+ )
+
+ log.info("Device usage during initialization:")
+ get_device_usage(device, log_info=True, process_usage_only=False)
device_manager.device_init(context, device)
def make_images(
- req: GenerateImageRequest, task_data: TaskData, data_queue: queue.Queue, task_temp_images: list, step_callback
+ req: GenerateImageRequest,
+ task_data: TaskData,
+ data_queue: queue.Queue,
+ task_temp_images: list,
+ step_callback,
):
context.stop_processing = False
print_task_info(req, task_data)
images, seeds = make_images_internal(req, task_data, data_queue, task_temp_images, step_callback)
- res = Response(req, task_data, images=construct_response(images, seeds, task_data, base_seed=req.seed))
+ res = Response(
+ req,
+ task_data,
+ images=construct_response(images, seeds, task_data, base_seed=req.seed),
+ )
res = res.json()
data_queue.put(json.dumps(res))
log.info("Task completed")
@@ -51,16 +76,19 @@ def make_images(
def print_task_info(req: GenerateImageRequest, task_data: TaskData):
- req_str = pprint.pformat(get_printable_request(req)).replace("[", "\[")
+ req_str = pprint.pformat(get_printable_request(req, task_data)).replace("[", "\[")
task_str = pprint.pformat(task_data.dict()).replace("[", "\[")
log.info(f"request: {req_str}")
log.info(f"task data: {task_str}")
def make_images_internal(
- req: GenerateImageRequest, task_data: TaskData, data_queue: queue.Queue, task_temp_images: list, step_callback
+ req: GenerateImageRequest,
+ task_data: TaskData,
+ data_queue: queue.Queue,
+ task_temp_images: list,
+ step_callback,
):
-
images, user_stopped = generate_images_internal(
req,
task_data,
@@ -70,7 +98,8 @@ def make_images_internal(
task_data.stream_image_progress,
task_data.stream_image_progress_interval,
)
- filtered_images = filter_images(task_data, images, user_stopped)
+ gc(context)
+ filtered_images = filter_images(req, task_data, images, user_stopped)
if task_data.save_to_disk_path is not None:
save_images_to_disk(images, filtered_images, req, task_data)
@@ -126,28 +155,66 @@ def generate_images_internal(
return images, user_stopped
-def filter_images(task_data: TaskData, images: list, user_stopped):
+def filter_images(req: GenerateImageRequest, task_data: TaskData, images: list, user_stopped):
if user_stopped:
return images
- filters_to_apply = []
if task_data.block_nsfw:
- filters_to_apply.append("nsfw_checker")
- if task_data.use_face_correction and "gfpgan" in task_data.use_face_correction.lower():
- filters_to_apply.append("gfpgan")
- if task_data.use_upscale and "realesrgan" in task_data.use_upscale.lower():
- filters_to_apply.append("realesrgan")
+ images = apply_filters(context, "nsfw_checker", images)
- if len(filters_to_apply) == 0:
- return images
+ if task_data.use_face_correction and "codeformer" in task_data.use_face_correction.lower():
+ default_realesrgan = DEFAULT_MODELS["realesrgan"][0]["file_name"]
+ prev_realesrgan_path = None
+ if task_data.codeformer_upscale_faces and default_realesrgan not in context.model_paths["realesrgan"]:
+ prev_realesrgan_path = context.model_paths["realesrgan"]
+ context.model_paths["realesrgan"] = resolve_model_to_use(default_realesrgan, "realesrgan")
+ load_model(context, "realesrgan")
- return apply_filters(context, filters_to_apply, images, scale=task_data.upscale_amount)
+ try:
+ images = apply_filters(
+ context,
+ "codeformer",
+ images,
+ upscale_faces=task_data.codeformer_upscale_faces,
+ codeformer_fidelity=task_data.codeformer_fidelity,
+ )
+ finally:
+ if prev_realesrgan_path:
+ context.model_paths["realesrgan"] = prev_realesrgan_path
+ load_model(context, "realesrgan")
+ elif task_data.use_face_correction and "gfpgan" in task_data.use_face_correction.lower():
+ images = apply_filters(context, "gfpgan", images)
+
+ if task_data.use_upscale:
+ if "realesrgan" in task_data.use_upscale.lower():
+ images = apply_filters(context, "realesrgan", images, scale=task_data.upscale_amount)
+ elif task_data.use_upscale == "latent_upscaler":
+ images = apply_filters(
+ context,
+ "latent_upscaler",
+ images,
+ scale=task_data.upscale_amount,
+ latent_upscaler_options={
+ "prompt": req.prompt,
+ "negative_prompt": req.negative_prompt,
+ "seed": req.seed,
+ "num_inference_steps": task_data.latent_upscaler_steps,
+ "guidance_scale": 0,
+ },
+ )
+
+ return images
def construct_response(images: list, seeds: list, task_data: TaskData, base_seed: int):
return [
ResponseImage(
- data=img_to_base64_str(img, task_data.output_format, task_data.output_quality, task_data.output_lossless),
+ data=img_to_base64_str(
+ img,
+ task_data.output_format,
+ task_data.output_quality,
+ task_data.output_lossless,
+ ),
seed=seed,
)
for img, seed in zip(images, seeds)
diff --git a/ui/easydiffusion/server.py b/ui/easydiffusion/server.py
index e27f9c5b..d8940bb5 100644
--- a/ui/easydiffusion/server.py
+++ b/ui/easydiffusion/server.py
@@ -2,28 +2,31 @@
Notes:
async endpoints always run on the main thread. Without they run on the thread pool.
"""
+import datetime
+import mimetypes
import os
import traceback
-import datetime
from typing import List, Union
+from easydiffusion import app, model_manager, task_manager
+from easydiffusion.types import GenerateImageRequest, MergeRequest, TaskData
+from easydiffusion.utils import log
from fastapi import FastAPI, HTTPException
from fastapi.staticfiles import StaticFiles
+from pydantic import BaseModel, Extra
from starlette.responses import FileResponse, JSONResponse, StreamingResponse
-from pydantic import BaseModel
-
-from easydiffusion import app, model_manager, task_manager
-from easydiffusion.types import TaskData, GenerateImageRequest, MergeRequest
-from easydiffusion.utils import log
-
-import mimetypes
+from pycloudflared import try_cloudflare
log.info(f"started in {app.SD_DIR}")
log.info(f"started at {datetime.datetime.now():%x %X}")
server_api = FastAPI()
-NOCACHE_HEADERS = {"Cache-Control": "no-cache, no-store, must-revalidate", "Pragma": "no-cache", "Expires": "0"}
+NOCACHE_HEADERS = {
+ "Cache-Control": "no-cache, no-store, must-revalidate",
+ "Pragma": "no-cache",
+ "Expires": "0",
+}
class NoCacheStaticFiles(StaticFiles):
@@ -44,7 +47,7 @@ class NoCacheStaticFiles(StaticFiles):
return super().is_not_modified(response_headers, request_headers)
-class SetAppConfigRequest(BaseModel):
+class SetAppConfigRequest(BaseModel, extra=Extra.allow):
update_branch: str = None
render_devices: Union[List[str], List[int], str, int] = None
model_vae: str = None
@@ -65,11 +68,17 @@ def init():
name="custom-thumbnails",
)
- server_api.mount("/media", NoCacheStaticFiles(directory=os.path.join(app.SD_UI_DIR, "media")), name="media")
+ server_api.mount(
+ "/media",
+ NoCacheStaticFiles(directory=os.path.join(app.SD_UI_DIR, "media")),
+ name="media",
+ )
for plugins_dir, dir_prefix in app.UI_PLUGINS_SOURCES:
server_api.mount(
- f"/plugins/{dir_prefix}", NoCacheStaticFiles(directory=plugins_dir), name=f"plugins-{dir_prefix}"
+ f"/plugins/{dir_prefix}",
+ NoCacheStaticFiles(directory=plugins_dir),
+ name=f"plugins-{dir_prefix}",
)
@server_api.post("/app_config")
@@ -105,6 +114,14 @@ def init():
def get_image(task_id: int, img_id: int):
return get_image_internal(task_id, img_id)
+ @server_api.post("/tunnel/cloudflare/start")
+ def start_cloudflare_tunnel(req: dict):
+ return start_cloudflare_tunnel_internal(req)
+
+ @server_api.post("/tunnel/cloudflare/stop")
+ def stop_cloudflare_tunnel(req: dict):
+ return stop_cloudflare_tunnel_internal(req)
+
@server_api.get("/")
def read_root():
return FileResponse(os.path.join(app.SD_UI_DIR, "index.html"), headers=NOCACHE_HEADERS)
@@ -136,6 +153,10 @@ def set_app_config_internal(req: SetAppConfigRequest):
config["test_diffusers"] = req.test_diffusers
+ for property, property_value in req.dict().items():
+ if property_value is not None and property not in req.__fields__:
+ config[property] = property_value
+
try:
app.setConfig(config)
@@ -199,6 +220,8 @@ def ping_internal(session_id: str = None):
session = task_manager.get_cached_session(session_id, update_ttl=True)
response["tasks"] = {id(t): t.status for t in session.tasks}
response["devices"] = task_manager.get_devices()
+ if cloudflare.address != None:
+ response["cloudflare"] = cloudflare.address
return JSONResponse(response, headers=NOCACHE_HEADERS)
@@ -242,8 +265,8 @@ def render_internal(req: dict):
def model_merge_internal(req: dict):
try:
- from sdkit.train import merge_models
from easydiffusion.utils.save_utils import filename_regex
+ from sdkit.train import merge_models
mergeReq: MergeRequest = MergeRequest.parse_obj(req)
@@ -251,7 +274,11 @@ def model_merge_internal(req: dict):
model_manager.resolve_model_to_use(mergeReq.model0, "stable-diffusion"),
model_manager.resolve_model_to_use(mergeReq.model1, "stable-diffusion"),
mergeReq.ratio,
- os.path.join(app.MODELS_DIR, "stable-diffusion", filename_regex.sub("_", mergeReq.out_path)),
+ os.path.join(
+ app.MODELS_DIR,
+ "stable-diffusion",
+ filename_regex.sub("_", mergeReq.out_path),
+ ),
mergeReq.use_fp16,
)
return JSONResponse({"status": "OK"}, headers=NOCACHE_HEADERS)
@@ -306,3 +333,47 @@ def get_image_internal(task_id: int, img_id: int):
return StreamingResponse(img_data, media_type="image/jpeg")
except KeyError as e:
raise HTTPException(status_code=500, detail=str(e))
+
+#---- Cloudflare Tunnel ----
+class CloudflareTunnel:
+ def __init__(self):
+ config = app.getConfig()
+ self.urls = None
+ self.port = config.get("net", {}).get("listen_port")
+
+ def start(self):
+ if self.port:
+ self.urls = try_cloudflare(self.port)
+
+ def stop(self):
+ if self.urls:
+ try_cloudflare.terminate(self.port)
+ self.urls = None
+
+ @property
+ def address(self):
+ if self.urls:
+ return self.urls.tunnel
+ else:
+ return None
+
+cloudflare = CloudflareTunnel()
+
+def start_cloudflare_tunnel_internal(req: dict):
+ try:
+ cloudflare.start()
+ log.info(f"- Started cloudflare tunnel. Using address: {cloudflare.address}")
+ return JSONResponse({"address":cloudflare.address})
+ except Exception as e:
+ log.error(str(e))
+ log.error(traceback.format_exc())
+ return HTTPException(status_code=500, detail=str(e))
+
+def stop_cloudflare_tunnel_internal(req: dict):
+ try:
+ cloudflare.stop()
+ except Exception as e:
+ log.error(str(e))
+ log.error(traceback.format_exc())
+ return HTTPException(status_code=500, detail=str(e))
+
diff --git a/ui/easydiffusion/task_manager.py b/ui/easydiffusion/task_manager.py
index 31fdaa6f..a91cd9c6 100644
--- a/ui/easydiffusion/task_manager.py
+++ b/ui/easydiffusion/task_manager.py
@@ -7,16 +7,18 @@ Notes:
import json
import traceback
-TASK_TTL = 15 * 60 # seconds, Discard last session's task timeout
+TASK_TTL = 30 * 60 # seconds, Discard last session's task timeout
-import torch
-import queue, threading, time, weakref
+import queue
+import threading
+import time
+import weakref
from typing import Any, Hashable
+import torch
from easydiffusion import device_manager
-from easydiffusion.types import TaskData, GenerateImageRequest
+from easydiffusion.types import GenerateImageRequest, TaskData
from easydiffusion.utils import log
-
from sdkit.utils import gc
THREAD_NAME_PREFIX = ""
@@ -167,7 +169,7 @@ class DataCache:
raise Exception("DataCache.put" + ERR_LOCK_FAILED)
try:
self._base[key] = (self._get_ttl_time(ttl), value)
- except Exception as e:
+ except Exception:
log.error(traceback.format_exc())
return False
else:
@@ -264,7 +266,7 @@ def thread_get_next_task():
def thread_render(device):
global current_state, current_state_error
- from easydiffusion import renderer, model_manager
+ from easydiffusion import model_manager, renderer
try:
renderer.init(device)
@@ -317,6 +319,9 @@ def thread_render(device):
def step_callback():
global current_state_error
+ task_cache.keep(id(task), TASK_TTL)
+ session_cache.keep(task.task_data.session_id, TASK_TTL)
+
if (
isinstance(current_state_error, SystemExit)
or isinstance(current_state_error, StopAsyncIteration)
@@ -331,10 +336,15 @@ def thread_render(device):
current_state = ServerStates.LoadingModel
model_manager.resolve_model_paths(task.task_data)
model_manager.reload_models_if_necessary(renderer.context, task.task_data)
+ model_manager.fail_if_models_did_not_load(renderer.context)
current_state = ServerStates.Rendering
task.response = renderer.make_images(
- task.render_request, task.task_data, task.buffer_queue, task.temp_images, step_callback
+ task.render_request,
+ task.task_data,
+ task.buffer_queue,
+ task.temp_images,
+ step_callback,
)
# Before looping back to the generator, mark cache as still alive.
task_cache.keep(id(task), TASK_TTL)
diff --git a/ui/easydiffusion/types.py b/ui/easydiffusion/types.py
index bbec0afa..abf8db29 100644
--- a/ui/easydiffusion/types.py
+++ b/ui/easydiffusion/types.py
@@ -1,6 +1,7 @@
-from pydantic import BaseModel
from typing import Any
+from pydantic import BaseModel
+
class GenerateImageRequest(BaseModel):
prompt: str = ""
@@ -22,6 +23,7 @@ class GenerateImageRequest(BaseModel):
sampler_name: str = None # "ddim", "plms", "heun", "euler", "euler_a", "dpm2", "dpm2_a", "lms"
hypernetwork_strength: float = 0
lora_alpha: float = 0
+ tiling: str = "none" # "none", "x", "y", "xy"
class TaskData(BaseModel):
@@ -31,8 +33,9 @@ class TaskData(BaseModel):
vram_usage_level: str = "balanced" # or "low" or "medium"
use_face_correction: str = None # or "GFPGANv1.3"
- use_upscale: str = None # or "RealESRGAN_x4plus" or "RealESRGAN_x4plus_anime_6B"
+ use_upscale: str = None # or "RealESRGAN_x4plus" or "RealESRGAN_x4plus_anime_6B" or "latent_upscaler"
upscale_amount: int = 4 # or 2
+ latent_upscaler_steps: int = 10
use_stable_diffusion_model: str = "sd-v1-4"
# use_stable_diffusion_config: str = "v1-inference"
use_vae_model: str = None
@@ -47,6 +50,9 @@ class TaskData(BaseModel):
metadata_output_format: str = "txt" # or "json"
stream_image_progress: bool = False
stream_image_progress_interval: int = 5
+ clip_skip: bool = False
+ codeformer_upscale_faces: bool = False
+ codeformer_fidelity: float = 0.5
class MergeRequest(BaseModel):
diff --git a/ui/easydiffusion/utils/save_utils.py b/ui/easydiffusion/utils/save_utils.py
index 6affac78..ff2906a6 100644
--- a/ui/easydiffusion/utils/save_utils.py
+++ b/ui/easydiffusion/utils/save_utils.py
@@ -1,42 +1,130 @@
import os
-import time
import re
+import time
+from datetime import datetime
+from functools import reduce
-from easydiffusion.types import TaskData, GenerateImageRequest
-
-from sdkit.utils import save_images, save_dicts
+from easydiffusion import app
+from easydiffusion.types import GenerateImageRequest, TaskData
from numpy import base_repr
+from sdkit.utils import save_dicts, save_images
filename_regex = re.compile("[^a-zA-Z0-9._-]")
+img_number_regex = re.compile("([0-9]{5,})")
# keep in sync with `ui/media/js/dnd.js`
TASK_TEXT_MAPPING = {
"prompt": "Prompt",
+ "negative_prompt": "Negative Prompt",
+ "seed": "Seed",
+ "use_stable_diffusion_model": "Stable Diffusion model",
+ "clip_skip": "Clip Skip",
+ "use_vae_model": "VAE model",
+ "sampler_name": "Sampler",
"width": "Width",
"height": "Height",
- "seed": "Seed",
"num_inference_steps": "Steps",
"guidance_scale": "Guidance Scale",
"prompt_strength": "Prompt Strength",
+ "use_lora_model": "LoRA model",
+ "lora_alpha": "LoRA Strength",
+ "use_hypernetwork_model": "Hypernetwork model",
+ "hypernetwork_strength": "Hypernetwork Strength",
+ "tiling": "Seamless Tiling",
"use_face_correction": "Use Face Correction",
"use_upscale": "Use Upscaling",
"upscale_amount": "Upscale By",
- "sampler_name": "Sampler",
- "negative_prompt": "Negative Prompt",
- "use_stable_diffusion_model": "Stable Diffusion model",
- "use_vae_model": "VAE model",
- "use_hypernetwork_model": "Hypernetwork model",
- "hypernetwork_strength": "Hypernetwork Strength",
- "use_lora_model": "LoRA model",
- # "lora_alpha": "LoRA Strength",
+ "latent_upscaler_steps": "Latent Upscaler Steps"
}
+time_placeholders = {
+ "$yyyy": "%Y",
+ "$MM": "%m",
+ "$dd": "%d",
+ "$HH": "%H",
+ "$mm": "%M",
+ "$ss": "%S",
+}
+
+other_placeholders = {
+ "$id": lambda req, task_data: filename_regex.sub("_", task_data.session_id),
+ "$p": lambda req, task_data: filename_regex.sub("_", req.prompt)[:50],
+ "$s": lambda req, task_data: str(req.seed),
+}
+
+
+class ImageNumber:
+ _factory = None
+ _evaluated = False
+
+ def __init__(self, factory):
+ self._factory = factory
+ self._evaluated = None
+
+ def __call__(self) -> int:
+ if self._evaluated is None:
+ self._evaluated = self._factory()
+ return self._evaluated
+
+
+def format_placeholders(format: str, req: GenerateImageRequest, task_data: TaskData, now=None):
+ if now is None:
+ now = time.time()
+
+ for placeholder, time_format in time_placeholders.items():
+ if placeholder in format:
+ format = format.replace(placeholder, datetime.fromtimestamp(now).strftime(time_format))
+ for placeholder, replace_func in other_placeholders.items():
+ if placeholder in format:
+ format = format.replace(placeholder, replace_func(req, task_data))
+
+ return format
+
+
+def format_folder_name(format: str, req: GenerateImageRequest, task_data: TaskData):
+ format = format_placeholders(format, req, task_data)
+ return filename_regex.sub("_", format)
+
+
+def format_file_name(
+ format: str,
+ req: GenerateImageRequest,
+ task_data: TaskData,
+ now: float,
+ batch_file_number: int,
+ folder_img_number: ImageNumber,
+):
+ format = format_placeholders(format, req, task_data, now)
+
+ if "$n" in format:
+ format = format.replace("$n", f"{folder_img_number():05}")
+
+ if "$tsb64" in format:
+ img_id = base_repr(int(now * 10000), 36)[-7:] + base_repr(
+ int(batch_file_number), 36
+ ) # Base 36 conversion, 0-9, A-Z
+ format = format.replace("$tsb64", img_id)
+
+ if "$ts" in format:
+ format = format.replace("$ts", str(int(now * 1000) + batch_file_number))
+
+ return filename_regex.sub("_", format)
+
def save_images_to_disk(images: list, filtered_images: list, req: GenerateImageRequest, task_data: TaskData):
now = time.time()
- save_dir_path = os.path.join(task_data.save_to_disk_path, filename_regex.sub("_", task_data.session_id))
+ app_config = app.getConfig()
+ folder_format = app_config.get("folder_format", "$id")
+ save_dir_path = os.path.join(task_data.save_to_disk_path, format_folder_name(folder_format, req, task_data))
metadata_entries = get_metadata_entries_for_request(req, task_data)
- make_filename = make_filename_callback(req, now=now)
+ file_number = calculate_img_number(save_dir_path, task_data)
+ make_filename = make_filename_callback(
+ app_config.get("filename_format", "$p_$tsb64"),
+ req,
+ task_data,
+ file_number,
+ now=now,
+ )
if task_data.show_only_filtered_image or filtered_images is images:
save_images(
@@ -47,16 +135,25 @@ def save_images_to_disk(images: list, filtered_images: list, req: GenerateImageR
output_quality=task_data.output_quality,
output_lossless=task_data.output_lossless,
)
- if task_data.metadata_output_format.lower() in ["json", "txt", "embed"]:
- save_dicts(
- metadata_entries,
- save_dir_path,
- file_name=make_filename,
- output_format=task_data.metadata_output_format,
- file_format=task_data.output_format,
- )
+ if task_data.metadata_output_format:
+ for metadata_output_format in task_data.metadata_output_format.split(","):
+ if metadata_output_format.lower() in ["json", "txt", "embed"]:
+ save_dicts(
+ metadata_entries,
+ save_dir_path,
+ file_name=make_filename,
+ output_format=metadata_output_format,
+ file_format=task_data.output_format,
+ )
else:
- make_filter_filename = make_filename_callback(req, now=now, suffix="filtered")
+ make_filter_filename = make_filename_callback(
+ app_config.get("filename_format", "$p_$tsb64"),
+ req,
+ task_data,
+ file_number,
+ now=now,
+ suffix="filtered",
+ )
save_images(
images,
@@ -74,44 +171,23 @@ def save_images_to_disk(images: list, filtered_images: list, req: GenerateImageR
output_quality=task_data.output_quality,
output_lossless=task_data.output_lossless,
)
- if task_data.metadata_output_format.lower() in ["json", "txt", "embed"]:
- save_dicts(
- metadata_entries,
- save_dir_path,
- file_name=make_filter_filename,
- output_format=task_data.metadata_output_format,
- file_format=task_data.output_format,
- )
+ if task_data.metadata_output_format:
+ for metadata_output_format in task_data.metadata_output_format.split(","):
+ if metadata_output_format.lower() in ["json", "txt", "embed"]:
+ save_dicts(
+ metadata_entries,
+ save_dir_path,
+ file_name=make_filter_filename,
+ output_format=task_data.metadata_output_format,
+ file_format=task_data.output_format,
+ )
def get_metadata_entries_for_request(req: GenerateImageRequest, task_data: TaskData):
- metadata = get_printable_request(req)
- metadata.update(
- {
- "use_stable_diffusion_model": task_data.use_stable_diffusion_model,
- "use_vae_model": task_data.use_vae_model,
- "use_hypernetwork_model": task_data.use_hypernetwork_model,
- "use_lora_model": task_data.use_lora_model,
- "use_face_correction": task_data.use_face_correction,
- "use_upscale": task_data.use_upscale,
- }
- )
- if metadata["use_upscale"] is not None:
- metadata["upscale_amount"] = task_data.upscale_amount
- if task_data.use_hypernetwork_model is None:
- del metadata["hypernetwork_strength"]
- if task_data.use_lora_model is None:
- if "lora_alpha" in metadata:
- del metadata["lora_alpha"]
-
- from easydiffusion import app
-
- app_config = app.getConfig()
- if not app_config.get("test_diffusers", False) and "use_lora_model" in metadata:
- del metadata["use_lora_model"]
+ metadata = get_printable_request(req, task_data)
# if text, format it in the text format expected by the UI
- is_txt_format = task_data.metadata_output_format.lower() == "txt"
+ is_txt_format = task_data.metadata_output_format and "txt" in task_data.metadata_output_format.lower().split(",")
if is_txt_format:
metadata = {TASK_TEXT_MAPPING[key]: val for key, val in metadata.items() if key in TASK_TEXT_MAPPING}
@@ -122,25 +198,101 @@ def get_metadata_entries_for_request(req: GenerateImageRequest, task_data: TaskD
return entries
-def get_printable_request(req: GenerateImageRequest):
- metadata = req.dict()
- del metadata["init_image"]
- del metadata["init_image_mask"]
- if req.init_image is None:
+def get_printable_request(req: GenerateImageRequest, task_data: TaskData):
+ req_metadata = req.dict()
+ task_data_metadata = task_data.dict()
+
+ # Save the metadata in the order defined in TASK_TEXT_MAPPING
+ metadata = {}
+ for key in TASK_TEXT_MAPPING.keys():
+ if key in req_metadata:
+ metadata[key] = req_metadata[key]
+ elif key in task_data_metadata:
+ metadata[key] = task_data_metadata[key]
+
+ # Clean up the metadata
+ if req.init_image is None and "prompt_strength" in metadata:
del metadata["prompt_strength"]
+ if task_data.use_upscale is None and "upscale_amount" in metadata:
+ del metadata["upscale_amount"]
+ if task_data.use_hypernetwork_model is None and "hypernetwork_strength" in metadata:
+ del metadata["hypernetwork_strength"]
+ if task_data.use_lora_model is None and "lora_alpha" in metadata:
+ del metadata["lora_alpha"]
+ if task_data.use_upscale != "latent_upscaler" and "latent_upscaler_steps" in metadata:
+ del metadata["latent_upscaler_steps"]
+
+ app_config = app.getConfig()
+ if not app_config.get("test_diffusers", False):
+ for key in (x for x in ["use_lora_model", "lora_alpha", "clip_skip", "tiling", "latent_upscaler_steps"] if x in metadata):
+ del metadata[key]
+
return metadata
-def make_filename_callback(req: GenerateImageRequest, suffix=None, now=None):
+def make_filename_callback(
+ filename_format: str,
+ req: GenerateImageRequest,
+ task_data: TaskData,
+ folder_img_number: int,
+ suffix=None,
+ now=None,
+):
if now is None:
now = time.time()
def make_filename(i):
- img_id = base_repr(int(now * 10000), 36)[-7:] + base_repr(int(i),36) # Base 36 conversion, 0-9, A-Z
-
- prompt_flattened = filename_regex.sub("_", req.prompt)[:50]
- name = f"{prompt_flattened}_{img_id}"
+ name = format_file_name(filename_format, req, task_data, now, i, folder_img_number)
name = name if suffix is None else f"{name}_{suffix}"
+
return name
return make_filename
+
+
+def _calculate_img_number(save_dir_path: str, task_data: TaskData):
+ def get_highest_img_number(accumulator: int, file: os.DirEntry) -> int:
+ if not file.is_file:
+ return accumulator
+
+ if len(list(filter(lambda e: file.name.endswith(e), app.IMAGE_EXTENSIONS))) == 0:
+ return accumulator
+
+ get_highest_img_number.number_of_images = get_highest_img_number.number_of_images + 1
+
+ number_match = img_number_regex.match(file.name)
+ if not number_match:
+ return accumulator
+
+ file_number = number_match.group().lstrip("0")
+
+ # Handle 00000
+ return int(file_number) if file_number else 0
+
+ get_highest_img_number.number_of_images = 0
+
+ highest_file_number = -1
+
+ if os.path.isdir(save_dir_path):
+ existing_files = list(os.scandir(save_dir_path))
+ highest_file_number = reduce(get_highest_img_number, existing_files, -1)
+
+ calculated_img_number = max(highest_file_number, get_highest_img_number.number_of_images - 1)
+
+ if task_data.session_id in _calculate_img_number.session_img_numbers:
+ calculated_img_number = max(
+ _calculate_img_number.session_img_numbers[task_data.session_id],
+ calculated_img_number,
+ )
+
+ calculated_img_number = calculated_img_number + 1
+
+ _calculate_img_number.session_img_numbers[task_data.session_id] = calculated_img_number
+ return calculated_img_number
+
+
+_calculate_img_number.session_img_numbers = {}
+
+
+def calculate_img_number(save_dir_path: str, task_data: TaskData):
+ return ImageNumber(lambda: _calculate_img_number(save_dir_path, task_data))
diff --git a/ui/hotfix/9c24e6cd9f499d02c4f21a033736dabd365962dc80fe3aeb57a8f85ea45a20a3.26fead7ea4f0f843f6eb4055dfd25693f1a71f3c6871b184042d4b126244e142 b/ui/hotfix/9c24e6cd9f499d02c4f21a033736dabd365962dc80fe3aeb57a8f85ea45a20a3.26fead7ea4f0f843f6eb4055dfd25693f1a71f3c6871b184042d4b126244e142
deleted file mode 100644
index 2c19f666..00000000
--- a/ui/hotfix/9c24e6cd9f499d02c4f21a033736dabd365962dc80fe3aeb57a8f85ea45a20a3.26fead7ea4f0f843f6eb4055dfd25693f1a71f3c6871b184042d4b126244e142
+++ /dev/null
@@ -1,171 +0,0 @@
-{
- "_name_or_path": "clip-vit-large-patch14/",
- "architectures": [
- "CLIPModel"
- ],
- "initializer_factor": 1.0,
- "logit_scale_init_value": 2.6592,
- "model_type": "clip",
- "projection_dim": 768,
- "text_config": {
- "_name_or_path": "",
- "add_cross_attention": false,
- "architectures": null,
- "attention_dropout": 0.0,
- "bad_words_ids": null,
- "bos_token_id": 0,
- "chunk_size_feed_forward": 0,
- "cross_attention_hidden_size": null,
- "decoder_start_token_id": null,
- "diversity_penalty": 0.0,
- "do_sample": false,
- "dropout": 0.0,
- "early_stopping": false,
- "encoder_no_repeat_ngram_size": 0,
- "eos_token_id": 2,
- "finetuning_task": null,
- "forced_bos_token_id": null,
- "forced_eos_token_id": null,
- "hidden_act": "quick_gelu",
- "hidden_size": 768,
- "id2label": {
- "0": "LABEL_0",
- "1": "LABEL_1"
- },
- "initializer_factor": 1.0,
- "initializer_range": 0.02,
- "intermediate_size": 3072,
- "is_decoder": false,
- "is_encoder_decoder": false,
- "label2id": {
- "LABEL_0": 0,
- "LABEL_1": 1
- },
- "layer_norm_eps": 1e-05,
- "length_penalty": 1.0,
- "max_length": 20,
- "max_position_embeddings": 77,
- "min_length": 0,
- "model_type": "clip_text_model",
- "no_repeat_ngram_size": 0,
- "num_attention_heads": 12,
- "num_beam_groups": 1,
- "num_beams": 1,
- "num_hidden_layers": 12,
- "num_return_sequences": 1,
- "output_attentions": false,
- "output_hidden_states": false,
- "output_scores": false,
- "pad_token_id": 1,
- "prefix": null,
- "problem_type": null,
- "projection_dim" : 768,
- "pruned_heads": {},
- "remove_invalid_values": false,
- "repetition_penalty": 1.0,
- "return_dict": true,
- "return_dict_in_generate": false,
- "sep_token_id": null,
- "task_specific_params": null,
- "temperature": 1.0,
- "tie_encoder_decoder": false,
- "tie_word_embeddings": true,
- "tokenizer_class": null,
- "top_k": 50,
- "top_p": 1.0,
- "torch_dtype": null,
- "torchscript": false,
- "transformers_version": "4.16.0.dev0",
- "use_bfloat16": false,
- "vocab_size": 49408
- },
- "text_config_dict": {
- "hidden_size": 768,
- "intermediate_size": 3072,
- "num_attention_heads": 12,
- "num_hidden_layers": 12,
- "projection_dim": 768
- },
- "torch_dtype": "float32",
- "transformers_version": null,
- "vision_config": {
- "_name_or_path": "",
- "add_cross_attention": false,
- "architectures": null,
- "attention_dropout": 0.0,
- "bad_words_ids": null,
- "bos_token_id": null,
- "chunk_size_feed_forward": 0,
- "cross_attention_hidden_size": null,
- "decoder_start_token_id": null,
- "diversity_penalty": 0.0,
- "do_sample": false,
- "dropout": 0.0,
- "early_stopping": false,
- "encoder_no_repeat_ngram_size": 0,
- "eos_token_id": null,
- "finetuning_task": null,
- "forced_bos_token_id": null,
- "forced_eos_token_id": null,
- "hidden_act": "quick_gelu",
- "hidden_size": 1024,
- "id2label": {
- "0": "LABEL_0",
- "1": "LABEL_1"
- },
- "image_size": 224,
- "initializer_factor": 1.0,
- "initializer_range": 0.02,
- "intermediate_size": 4096,
- "is_decoder": false,
- "is_encoder_decoder": false,
- "label2id": {
- "LABEL_0": 0,
- "LABEL_1": 1
- },
- "layer_norm_eps": 1e-05,
- "length_penalty": 1.0,
- "max_length": 20,
- "min_length": 0,
- "model_type": "clip_vision_model",
- "no_repeat_ngram_size": 0,
- "num_attention_heads": 16,
- "num_beam_groups": 1,
- "num_beams": 1,
- "num_hidden_layers": 24,
- "num_return_sequences": 1,
- "output_attentions": false,
- "output_hidden_states": false,
- "output_scores": false,
- "pad_token_id": null,
- "patch_size": 14,
- "prefix": null,
- "problem_type": null,
- "projection_dim" : 768,
- "pruned_heads": {},
- "remove_invalid_values": false,
- "repetition_penalty": 1.0,
- "return_dict": true,
- "return_dict_in_generate": false,
- "sep_token_id": null,
- "task_specific_params": null,
- "temperature": 1.0,
- "tie_encoder_decoder": false,
- "tie_word_embeddings": true,
- "tokenizer_class": null,
- "top_k": 50,
- "top_p": 1.0,
- "torch_dtype": null,
- "torchscript": false,
- "transformers_version": "4.16.0.dev0",
- "use_bfloat16": false
- },
- "vision_config_dict": {
- "hidden_size": 1024,
- "intermediate_size": 4096,
- "num_attention_heads": 16,
- "num_hidden_layers": 24,
- "patch_size": 14,
- "projection_dim": 768
- }
-}
diff --git a/ui/index.html b/ui/index.html
index 0bf90733..77337505 100644
--- a/ui/index.html
+++ b/ui/index.html
@@ -16,6 +16,7 @@
+
@@ -30,7 +31,7 @@
+ Type a prompt and press the "Make Image" button.
You can set an "Initial Image" if you want to guide the AI.
+ You can also add modifiers like "Realistic", "Pencil Sketch", "ArtStation" etc by browsing through the "Image Modifiers" section
+ and selecting the desired modifiers.
+ Click "Image Settings" for additional settings like seed, image size, number of images to generate etc.
Enjoy! :)
+
@@ -326,22 +367,22 @@
-
- Type a prompt and press the "Make Image" button.
You can set an "Initial Image" if you want to guide the AI.
- You can also add modifiers like "Realistic", "Pencil Sketch", "ArtStation" etc by browsing through the "Image Modifiers" section
- and selecting the desired modifiers.
- Click "Image Settings" for additional settings like seed, image size, number of images to generate etc.
The Diffusers Tech Preview allows early access to the new features based on Diffusers.
+
This is under active development, and is missing a few features. It is experimental! Please report any bugs to the #beta channel in our Discord server!
+
New upcoming features in our new engine
+
+
LORA support - Place LORA files in the models/lora folder.
no limit to the length of prompts (i.e. long prompts are supported)
+
Use + and - to increase/decrease the weight. E.g. apple, apple+, apple++, apple+++,
+ or apple-, apple-- for different weights.
+
Use exact weights - 0.0 to 1.0 reduces the weight, 1.0 to 2.0 increases the weight.
+ Think of it like a multiplier, like 1.5x or 0.5x: E.g. (apple)0.8 falling from a tree,
+ (apple)1.5 falling from a tree, (apple falling)1.4 from a tree
+
You can group tokens together using parentheses/round-brackets. E.g. (apple falling)++
+ from a tree. Nested parentheses are supported.
+
+ This clarifies a few things:
+
+
colon (:) is NOT used for blending. Neither is it used for weights. It has no impact and
+ will be considered a part of the prompt.
+
(()) and [] do not affect the prompt's weights.
+
+
+
More choices for img2img samplers
+
Support for official inpainting models
+
Generate images that tile seamlessly
+
Clip Skip support allows to skip the last CLIP layer (recommended by some LORA models)
+
New samplers: DDPM and DEIS
+
Memory optimizations that allow the use of 2GB GPUs
+
+
Known issues
+
+
Some LoRA consistently fail to load in EasyDiffusion
+
Some LoRA are far more sensitive to alpha (compared to a11)
+
Hangs sometimes on "compel is ready", while making the token.
The time remaining in browser differs from the one in the console
+
+
+
@@ -442,6 +530,16 @@
Set your custom modifiers (one per line)
Tip: You can include special characters like {} () [] and |. You can also put multiple comma-separated phrases in a single line, to make a single modifier that combines all of those.
'
+ logError(
+ "Stable Diffusion is still starting up, please wait. If this goes on beyond a few minutes, Stable Diffusion has probably crashed. Please check the error message in the command-line window.",
+ event,
+ outputMsg
+ )
+ } else if (typeof event?.response === "object") {
+ let msg = "Stable Diffusion had an error reading the response:
"
logError(msg, event, outputMsg)
}
break
}
}
- if ('update' in event) {
+ if ("update" in event) {
const stepUpdate = event.update
- if (!('step' in stepUpdate)) {
+ if (!("step" in stepUpdate)) {
return
}
// task.instances can be a mix of different tasks with uneven number of steps (Render Vs Filter Tasks)
- const overallStepCount = task.instances.reduce(
- (sum, instance) => sum + (instance.isPending ? Math.max(0, instance.step || stepUpdate.step) / (instance.total_steps || stepUpdate.total_steps) : 1),
- 0 // Initial value
- ) * stepUpdate.total_steps // Scale to current number of steps.
+ const overallStepCount =
+ task.instances.reduce(
+ (sum, instance) =>
+ sum +
+ (instance.isPending
+ ? Math.max(0, instance.step || stepUpdate.step) /
+ (instance.total_steps || stepUpdate.total_steps)
+ : 1),
+ 0 // Initial value
+ ) * stepUpdate.total_steps // Scale to current number of steps.
const totalSteps = task.instances.reduce(
(sum, instance) => sum + (instance.total_steps || stepUpdate.total_steps),
stepUpdate.total_steps * (batchCount - task.batchesDone) // Initial value at (unstarted task count * Nbr of steps)
@@ -711,9 +819,9 @@ function getTaskUpdater(task, reqBody, outputContainer) {
const timeTaken = stepUpdate.step_time // sec
const stepsRemaining = Math.max(0, totalSteps - overallStepCount)
- const timeRemaining = (timeTaken < 0 ? '' : millisecondsToStr(stepsRemaining * timeTaken * 1000))
+ const timeRemaining = timeTaken < 0 ? "" : millisecondsToStr(stepsRemaining * timeTaken * 1000)
outputMsg.innerHTML = `Batch ${task.batchesDone} of ${batchCount}. Generating image(s): ${percent}%. Time remaining (approx): ${timeRemaining}`
- outputMsg.style.display = 'block'
+ outputMsg.style.display = "block"
progressBarInner.style.width = `${percent}%`
if (stepUpdate.output) {
@@ -729,8 +837,8 @@ function abortTask(task) {
}
task.isProcessing = false
task.progressBar.classList.remove("active")
- task['taskStatusLabel'].style.display = 'none'
- task['stopTask'].innerHTML = ' Remove'
+ task["taskStatusLabel"].style.display = "none"
+ task["stopTask"].innerHTML = ' Remove'
if (!task.instances?.some((r) => r.isPending)) {
return
}
@@ -747,30 +855,49 @@ function onTaskErrorHandler(task, reqBody, instance, reason) {
if (!task.isProcessing) {
return
}
- console.log('Render request %o, Instance: %o, Error: %s', reqBody, instance, reason)
+ console.log("Render request %o, Instance: %o, Error: %s", reqBody, instance, reason)
abortTask(task)
- const outputMsg = task['outputMsg']
- logError('Stable Diffusion had an error. Please check the logs in the command-line window.
' + reason + '
' + reason.stack + '
', task, outputMsg)
- setStatus('request', 'error', 'error')
+ const outputMsg = task["outputMsg"]
+ logError(
+ "Stable Diffusion had an error. Please check the logs in the command-line window.
" +
+ reason +
+ "
" +
+ reason.stack +
+ "
",
+ task,
+ outputMsg
+ )
+ setStatus("request", "error", "error")
}
function onTaskCompleted(task, reqBody, instance, outputContainer, stepUpdate) {
- if (typeof stepUpdate === 'object') {
- if (stepUpdate.status === 'succeeded') {
+ if (typeof stepUpdate === "object") {
+ if (stepUpdate.status === "succeeded") {
showImages(reqBody, stepUpdate, outputContainer, false)
} else {
task.isProcessing = false
- const outputMsg = task['outputMsg']
- let msg = ''
- if ('detail' in stepUpdate && typeof stepUpdate.detail === 'string' && stepUpdate.detail.length > 0) {
+ const outputMsg = task["outputMsg"]
+ let msg = ""
+ if ("detail" in stepUpdate && typeof stepUpdate.detail === "string" && stepUpdate.detail.length > 0) {
msg = stepUpdate.detail
- if (msg.toLowerCase().includes('out of memory')) {
+ if (msg.toLowerCase().includes("out of memory")) {
msg += `
Suggestions:
1. If you have set an initial image, please try reducing its dimension to ${MAX_INIT_IMAGE_DIMENSION}x${MAX_INIT_IMAGE_DIMENSION} or smaller.
2. Try picking a lower level in the 'GPU Memory Usage' setting (in the 'Settings' tab).
3. Try generating a smaller image. `
+ } else if (msg.includes("DefaultCPUAllocator: not enough memory")) {
+ msg += `
+ Reason: Your computer is running out of system RAM!
+
+ Suggestions:
+
+ 1. Try closing unnecessary programs and browser tabs.
+ 2. If that doesn't help, please increase your computer's virtual memory by following these steps for
+ Windows or
+ Linux.
+ 3. Try restarting your computer. `
}
} else {
msg = `Unexpected Read Error:
")
+}
+
function createTask(task) {
- let taskConfig = ''
+ let taskConfig = ""
if (task.reqBody.init_image !== undefined) {
let h = 80
- let w = task.reqBody.width * h / task.reqBody.height >>0
+ let w = ((task.reqBody.width * h) / task.reqBody.height) >> 0
taskConfig += `
" +
- "Balanced: nearly as fast as High, much lower VRAM usage " +
- "High: fastest, maximum GPU memory usage" +
- "Low: slowest, recommended for GPUs with 3 to 4 GB memory",
+ note:
+ "Faster performance requires more GPU memory (VRAM)
" +
+ "Balanced: nearly as fast as High, much lower VRAM usage " +
+ "High: fastest, maximum GPU memory usage" +
+ "Low: slowest, recommended for GPUs with 3 to 4 GB memory",
icon: "fa-forward",
default: "balanced",
options: [
- {value: "balanced", label: "Balanced"},
- {value: "high", label: "High"},
- {value: "low", label: "Low"}
+ { value: "balanced", label: "Balanced" },
+ { value: "high", label: "High" },
+ { value: "low", label: "Low" },
],
},
{
@@ -160,7 +179,8 @@ var PARAMETERS = [
id: "confirm_dangerous_actions",
type: ParameterType.checkbox,
label: "Confirm dangerous actions",
- note: "Actions that might lead to data loss must either be clicked with the shift key pressed, or confirmed in an 'Are you sure?' dialog",
+ note:
+ "Actions that might lead to data loss must either be clicked with the shift key pressed, or confirmed in an 'Are you sure?' dialog",
icon: "fa-check-double",
default: true,
},
@@ -168,25 +188,31 @@ var PARAMETERS = [
id: "listen_to_network",
type: ParameterType.checkbox,
label: "Make Stable Diffusion available on your network",
- note: "Other devices on your network can access this web page",
+ note: "Other devices on your network can access this web page. Please restart the program after changing this.",
icon: "fa-network-wired",
default: true,
+ saveInAppConfig: true,
+ table: networkParametersTable,
},
{
id: "listen_port",
type: ParameterType.custom,
label: "Network port",
- note: "Port that this server listens to. The '9000' part in 'http://localhost:9000'",
+ note:
+ "Port that this server listens to. The '9000' part in 'http://localhost:9000'. Please restart the program after changing this.",
icon: "fa-anchor",
render: (parameter) => {
return ``
- }
+ },
+ saveInAppConfig: true,
+ table: networkParametersTable,
},
{
id: "use_beta_channel",
type: ParameterType.checkbox,
label: "Beta channel",
- note: "Get the latest features immediately (but could be less stable). Please restart the program after changing this.",
+ note:
+ "Get the latest features immediately (but could be less stable). Please restart the program after changing this.",
icon: "fa-fire",
default: false,
},
@@ -194,14 +220,31 @@ var PARAMETERS = [
id: "test_diffusers",
type: ParameterType.checkbox,
label: "Test Diffusers",
- note: "Experimental! Can have bugs! Use upcoming features (like LoRA) in our new engine. Please press Save, then restart the program after changing this.",
+ note:
+ "Experimental! Can have bugs! Use upcoming features (like LoRA) in our new engine. Please press Save, then restart the program after changing this.",
icon: "fa-bolt",
default: false,
+ saveInAppConfig: true,
},
-];
+ {
+ id: "cloudflare",
+ type: ParameterType.custom,
+ label: "Cloudflare tunnel",
+ note: `Create a VPN tunnel to share your Easy Diffusion instance with your friends. This will
+ generate a web server address on the public Internet for your Easy Diffusion instance.
+
This Easy Diffusion server is available on the Internet using the
+ address:
+ Anyone knowing this address can access your server. The address of your server will change each time
+ you share a session.
+ Uses Cloudflare services.`,
+ icon: ["fa-brands", "fa-cloudflare"],
+ render: () => '',
+ table: networkParametersTable,
+ }
+]
function getParameterSettingsEntry(id) {
- let parameter = PARAMETERS.filter(p => p.id === id)
+ let parameter = PARAMETERS.filter((p) => p.id === id)
if (parameter.length === 0) {
return
}
@@ -209,63 +252,125 @@ function getParameterSettingsEntry(id) {
}
function sliderUpdate(event) {
- if (event.srcElement.id.endsWith('-input')) {
- let slider = document.getElementById(event.srcElement.id.slice(0,-6))
+ if (event.srcElement.id.endsWith("-input")) {
+ let slider = document.getElementById(event.srcElement.id.slice(0, -6))
slider.value = event.srcElement.value
slider.dispatchEvent(new Event("change"))
} else {
- let field = document.getElementById(event.srcElement.id+'-input')
+ let field = document.getElementById(event.srcElement.id + "-input")
field.value = event.srcElement.value
field.dispatchEvent(new Event("change"))
}
}
+/**
+ * @param {Parameter} parameter
+ * @returns {string | HTMLElement}
+ */
function getParameterElement(parameter) {
switch (parameter.type) {
case ParameterType.checkbox:
- var is_checked = parameter.default ? " checked" : "";
+ var is_checked = parameter.default ? " checked" : ""
return ``
case ParameterType.select:
case ParameterType.select_multiple:
- var options = (parameter.options || []).map(option => ``).join("")
- var multiple = (parameter.type == ParameterType.select_multiple ? 'multiple' : '')
+ var options = (parameter.options || [])
+ .map((option) => ``)
+ .join("")
+ var multiple = parameter.type == ParameterType.select_multiple ? "multiple" : ""
return `${options}`
case ParameterType.slider:
return ` ${parameter.slider_unit}`
case ParameterType.custom:
return parameter.render(parameter)
default:
- console.error(`Invalid type for parameter ${parameter.id}`);
+ console.error(`Invalid type ${parameter.type} for parameter ${parameter.id}`)
return "ERROR: Invalid Type"
}
}
-let parametersTable = document.querySelector("#system-settings .parameters-table")
-/* fill in the system settings popup table */
-function initParameters() {
- PARAMETERS.forEach(parameter => {
- var element = getParameterElement(parameter)
- var note = parameter.note ? `${parameter.note}` : "";
- var icon = parameter.icon ? `` : "";
- var newrow = document.createElement('div')
- newrow.innerHTML = `
-