Compare commits

...

110 Commits

Author SHA1 Message Date
91c4b5865c Pin the sdkit version during fresh installs 2023-02-21 10:40:17 +05:30
14b0dabfdf Merge pull request #904 from JeLuF/patch-16
🔥 Installer: Fix ESRGAN anime model's path
2023-02-20 18:28:31 +05:30
6a2c2152e2 🔥 Installer: Fix ESRGAN anime model's path
The size check fails on every installation. The path name of the check wasn't changed when the model was moved to the models directory.
2023-02-18 14:05:55 +01:00
51b6a2fd2a Pin the version of stable-diffusion-sdkit used, to avoid untested releases from getting used 2023-02-18 14:21:24 +05:30
f36b7ce016 Merge branch 'main' of github.com:cmdr2/stable-diffusion-ui 2023-02-16 19:22:04 +05:30
9fb5cac5d4 Bypass incorrect ERRORLEVEL values in nested code blocks by using something called delayedexpansion. Ugh 2023-02-16 19:21:51 +05:30
7ce223771d Add k40m to list of FP32 cards (#863)
https://discord.com/channels/1014774730907209781/1073819636329631754
2023-02-12 14:37:53 +05:30
ab7ba35639 Revert "sdkit message"
This reverts commit 6ab3133b33.
2023-02-10 22:54:45 +05:30
fe7e398eb4 sdkit message 2023-02-10 19:01:27 +05:30
071a4d6f37 Use a fixed sdkit version, to avoid bumping to the latest sdkit version in the main branch 2023-02-10 18:05:17 +05:30
a589d98cd4 Merge pull request #850 from JeLuF/patch-13
Link to LINUX.zip
2023-02-10 10:51:12 +05:30
14fb115fc8 Link to LINUX.zip 2023-02-09 20:25:27 +01:00
c35a731a60 Update README.md 2023-02-09 19:56:18 +05:30
69c8fc3236 Merge pull request #811 from patriceac/patch-23
Removing the ':' after the tooltip icon
2023-02-09 19:31:10 +05:30
840ff5c363 Merge branch 'main' into patch-23 2023-02-09 19:30:55 +05:30
8386cd5cf7 Merge pull request #817 from fernandoisnaldo/patch-1
Fix aarch64 (arm64) verification
2023-02-09 19:24:13 +05:30
63bf84fdd5 Merge pull request #845 from cmdr2/beta
Changelog
2023-02-09 19:01:53 +05:30
070e51fcab Changelog 2023-02-09 19:01:25 +05:30
50fd64150e Merge pull request #844 from JeLuF/patch-12
Add T500 to list of full precision cards
2023-02-09 09:56:51 +05:30
63c5de2612 Add T500 to list of full precision cards 2023-02-09 01:46:32 +01:00
026a4b6c76 Merge pull request #842 from cmdr2/beta
Don't force a user to 'low' VRAM usage, if their GPU has 4 GB or less VRAM
2023-02-08 19:46:57 +05:30
7bc95b68c8 Merge branch 'beta' of github.com:cmdr2/stable-diffusion-ui into beta 2023-02-08 19:43:31 +05:30
0332cc8cb3 Don't force the user to 'low' VRAM usage automatically, if their GPU is less than 4 GB of VRAM. We need a better way to set 'low' as the default in the UI, but the user should be able to override it if they want 2023-02-08 19:41:55 +05:30
ce192f4ad7 Merge pull request #839 from cmdr2/main
Merge from main
2023-02-08 11:44:51 +05:30
cbdb715918 Merge pull request #838 from cmdr2/beta
Beta
2023-02-08 11:19:59 +05:30
5537102fd3 changelog 2023-02-08 11:19:16 +05:30
1ea294f15c Fix broken auto-save settings. We renamed sampler to sampler_name, which causes old settings to fail 2023-02-08 11:18:28 +05:30
4c8da67bb1 Use "python -m pip" instead of "pip" (#835)
* Use "python -m pip" instead of "pip"

https://discord.com/channels/1014774730907209781/1072423234676461619

* Use "python -m" instead of "pip" (Linux=
2023-02-07 15:39:02 +05:30
43a1c3901f ED favicon (#832) 2023-02-07 11:32:55 +05:30
a4c6f28a70 Merge branch 'beta' of github.com:cmdr2/stable-diffusion-ui into beta 2023-02-07 11:32:06 +05:30
f8bca93170 ED favicon 2023-02-07 11:31:56 +05:30
19b05659b5 Update README.md 2023-02-06 23:07:11 +05:30
7e5c7ca1b7 Easy Diffusion 2.5 2023-02-06 22:50:18 +05:30
1156c159f9 Merge pull request #827 from cmdr2/beta
v2.5
2023-02-06 20:11:18 +05:30
5c6c2303ba Why does this script file keep losing exec permission? 2023-02-06 20:05:40 +05:30
a0a58bcfa8 Merge branch 'main' into beta 2023-02-06 19:42:24 +05:30
8a28b265a3 Preserve the id of the top-level tabs container, to avoid breaking plugins that rely on it 2023-02-06 19:09:39 +05:30
86dc08130b typo 2023-02-06 16:47:48 +05:30
5cd8a732c7 grammar 2023-02-06 16:29:46 +05:30
fafbbf68a4 changelog 2023-02-06 16:20:38 +05:30
0cbb553564 Follow the theme in the popup dialog box 2023-02-06 15:32:54 +05:30
f4512bb291 Color of close button 2023-02-06 15:19:10 +05:30
99205b4d03 Show an X over an image, instead of a remove button in image options 2023-02-06 15:14:47 +05:30
d48e6554d5 Merge branch 'beta' of github.com:cmdr2/stable-diffusion-ui into beta 2023-02-06 13:49:38 +05:30
d0c4e95de3 Simplify the UI of the model merge tab; Allows a user to merge a single model, or a batch of variations; Also fixes a few logging bugs in the model merge tab 2023-02-06 13:49:15 +05:30
0b3a35c4b6 Make the tabs container a class, to make it reusable for other tab groups 2023-02-06 13:48:18 +05:30
ded6a41f86 Only disable the sibling tabs when a particular tab is selected. This allows the 'tab' management code to be reused for nested tabs 2023-02-06 13:46:40 +05:30
f4063e63d3 Merge pull request #824 from JeLuF/pause2
Fix 'Pause All' function
2023-02-06 10:18:51 +05:30
23ba912db0 Fix 'Pause All' function
If 'pause all' is clicked during the last scheduled job, the 'resume all' button gets hidden when the jobs terminates, making it
impossible to unpause the engine.
https://discord.com/channels/1014774730907209781/1014780368890630164/1071584183417323602
2023-02-05 17:33:43 +01:00
b7047dafb2 Fix aarch64 (arm64) verification 2023-02-03 16:36:49 -03:00
368967fbcf Merge branch 'beta' of github.com:cmdr2/stable-diffusion-ui into beta 2023-02-03 21:41:23 +05:30
a9d0fc9978 changelog 2023-02-03 21:41:12 +05:30
b6f3d2ec02 Formatting 2023-02-03 21:40:08 +05:30
78e917a6fb Fix the broken 'Make Similar Images' button 2023-02-03 21:40:03 +05:30
96b45385e8 Merge pull request #803 from JeLuF/patch-10
Add T600 to list of FP only GPUs
2023-02-03 19:56:54 +05:30
db47888a75 changelog 2023-02-01 11:54:05 +05:30
51443741b8 Proactively delete the partial samples from the callbacks 2023-02-01 11:50:50 +05:30
3e7f14af2c Don't use Rich Tracebacks, can cause a memory leak. It keeps a reference to the Exception object (which in turn keeps references to any torch Tensors in the stack, preventing their garbage-collection) 2023-02-01 11:50:27 +05:30
733439da07 Fix a memory leak. Apparently the Exception object keeps references to torch Tensors in the stack, so keeping a reference to the Exception object prevents those Tensors from getting garbage-collected. 2023-02-01 11:49:18 +05:30
6bff97d6fa Removing the ':' after the tooltip icon
This colon after the tooltip icon just feels out of place.
2023-01-30 23:09:36 -08:00
efba81cb66 Add T1000, make Quadro equivalent to nvidia or geforce 2023-01-28 20:51:01 +01:00
b2cc5dcf4b Add T600 to list of FP only GPUs
https://discord.com/channels/1014774730907209781/1068948110304354314
2023-01-28 20:18:07 +01:00
fab86ddf35 changelog 2023-01-27 09:46:50 +05:30
f3a90ce02d Formatting tweaks and tip about merging similar type of models 2023-01-25 20:05:27 +05:30
4886616c48 changelog 2023-01-25 19:52:28 +05:30
dcd8121009 Revert "Temporarily disable the Merge Models UI"
This reverts commit 59adaf6225.
2023-01-25 19:51:12 +05:30
59adaf6225 Temporarily disable the Merge Models UI 2023-01-25 19:46:55 +05:30
0055cd9b2e Merge pull request #734 from JeLuF/mrguipi
Frontend of the batch merger
2023-01-25 19:39:19 +05:30
fe89d487f6 Merge pull request #733 from JeLuF/mrgui
Backend side merge API
2023-01-25 19:38:21 +05:30
495064985e Reduce VRAM usage of img2img in balanced mode, without reducing the speed of rendering 2023-01-24 18:58:15 +05:30
e12387a377 changelog 2023-01-23 21:40:50 +05:30
5d3fb9091a Reduce the VRAM usage for balanced mode, without sacrificing the rendering speed 2023-01-23 19:36:00 +05:30
e2ae2715a3 Revert "Revert "Don't set the specific vram optimizations to use, instead use the new sdkit API for setting the vram usage level directly""
This reverts commit 52458ae273.
2023-01-18 17:03:14 +05:30
52458ae273 Revert "Don't set the specific vram optimizations to use, instead use the new sdkit API for setting the vram usage level directly"
This reverts commit 42f9abdfe3.
2023-01-18 10:30:56 +05:30
9b1a9cc7c8 changelog 2023-01-17 21:34:41 +05:30
42f9abdfe3 Don't set the specific vram optimizations to use, instead use the new sdkit API for setting the vram usage level directly 2023-01-17 21:33:15 +05:30
0a1197055c changelog 2023-01-16 18:32:09 +05:30
649cbf07e3 Merge branch 'beta' of github.com:cmdr2/stable-diffusion-ui into beta 2023-01-16 18:30:46 +05:30
5089ac5ad1 Fix a bug where the .vae.pt extension wouldn't get picked up. Thanks Madrang, rbertus2000 and JeLuf 2023-01-16 18:30:22 +05:30
d99e3f7974 Merge pull request #776 from JeLuF/patch-8
Add NVIDIA T1200 to the list of FP GPUs
2023-01-16 18:09:06 +05:30
b5d1912c94 Add NVIDIA T1200 to the list of FP GPUs
Fixes https://discord.com/channels/1014774730907209781/1014774732018683926/1064269949339697163
2023-01-16 00:42:02 +01:00
8ee4364065 Merge pull request #768 from rbertus2000/beta
bugfix for FP GPUs
2023-01-13 17:39:49 +05:30
152aa7de09 bugfix for FP GPUs 2023-01-13 12:54:11 +01:00
85c90cbee1 Merge pull request #764 from JeLuF/patch-7
Add NVIDIA T550 to list of FP GPUs #755
2023-01-13 10:18:24 +05:30
7302927e4c Add NVIDIA T550 to list of FP GPUs #755
The Nvidia T550 needs full precision to work correctly.
2023-01-12 14:16:35 +01:00
df3d00ef94 Merge pull request #763 from patriceac/patch-18
Another fix for high res images
2023-01-12 10:23:01 +05:30
bb47835256 Another fix for high res images
This time to address the height.
2023-01-11 17:25:54 -08:00
037512ca5c Merge branch 'beta' of github.com:cmdr2/stable-diffusion-ui into beta 2023-01-11 18:25:16 +05:30
a13713adaf Don't search for a yaml config file next to the model, since sdkit now does this automatically 2023-01-11 18:23:56 +05:30
ad073252e7 Merge pull request #762 from patriceac/patch-17
Fix the restoring of the previous nested model
2023-01-11 14:58:25 +05:30
d24a7a5c5e Fix the restoring of the last selected model 2023-01-10 19:00:19 -08:00
a671dd8e00 Fix import, remove debug output 2023-01-10 20:34:17 +01:00
8b764a8fd3 changelog 2023-01-10 21:58:29 +05:30
1da4b3d94a Not all browsers return the PerformanceEntry object on performance.measure(). Fix credit @JeLuf 2023-01-10 10:01:24 +05:30
fb0c9405cf changelog 2023-01-09 19:40:17 +05:30
a17a9044ad Check whether the browser supports performance.measure/mark before calling them. Fixes https://github.com/cmdr2/stable-diffusion-ui/pull/757 2023-01-09 19:33:23 +05:30
ac4c5003f1 also empty VAE and hypernetwork fields 2023-01-03 08:23:42 +01:00
23d5f85d17 Frontend batch merger 2022-12-30 10:13:34 +01:00
15a1436c8b Backend side merge API 2022-12-30 10:07:23 +01:00
64ced3b3f6 Tag v2.4.23, to be able to revert back incase of an emergency 2022-12-29 13:04:44 +05:30
493526c478 If downgrading to 2.4 (from 2.5), move the default models back to the legacy location 2022-12-29 13:00:57 +05:30
8cedeb349d Changes to allow rolling back from the upcoming sdkit-based system 2022-12-26 23:04:45 +05:30
72b3598687 Merge pull request #703 from JeLuF/patch-5
Bring back Linux download link
2022-12-26 17:36:43 +05:30
b1a2d36c2d Bring back Linux download link 2022-12-26 10:16:43 +01:00
e636dd3649 Merge pull request #694 from cmdr2/beta
Beta
2022-12-24 19:18:28 +05:30
b87bc033f5 Merge pull request #690 from cmdr2/beta
Update CHANGES.md
2022-12-23 11:26:42 +05:30
4e765a7948 Merge pull request #689 from cmdr2/beta
Speed up image creation, by removing a delay (regression) of 4-5 seconds between clicking Make Image and calling the server
2022-12-23 11:25:14 +05:30
6d08082693 Merge branch 'beta' 2022-12-22 13:43:50 +05:30
00597879bc Merge pull request #688 from cmdr2/beta
Update CHANGES.md
2022-12-22 13:26:15 +05:30
9d201f82f1 Merge pull request #687 from cmdr2/beta
Undo/redo buttons in the image editor, Drag handle to reorder tasks, Pause button to pause all the tasks
2022-12-22 13:23:50 +05:30
25 changed files with 637 additions and 107 deletions

View File

@ -4,7 +4,7 @@
### Major Changes
- **Nearly twice as fast** - significantly faster speed of image generation. We're now pretty close to automatic1111's speed. Code contributions are welcome to make our project even faster: https://github.com/easydiffusion/sdkit/#is-it-fast
- **Full support for Stable Diffusion 2.1 (including CPU)** - supports loading v1.4 or v2.0 or v2.1 models seamlessly. No need to enable "Test SD2", and no need to add `sd2_` to your SD 2.0 model file names. Works on CPU as well.
- **Memory optimized Stable Diffusion 2.1** - you can now use 768x768 models for SD 2.1, with the same low VRAM optimizations that we've always had for SD 1.4. Please note, 4 GB graphics cards can still only support images upto 512x512 resolution.
- **Memory optimized Stable Diffusion 2.1** - you can now use Stable Diffusion 2.1 models, with the same low VRAM optimizations that we've always had for SD 1.4. Please note, the SD 2.0 and 2.1 models require more GPU and System RAM, as compared to the SD 1.4 and 1.5 models.
- **6 new samplers!** - explore the new samplers, some of which can generate great images in less than 10 inference steps!
- **Model Merging** - You can now merge two models (`.ckpt` or `.safetensors`) and output `.ckpt` or `.safetensors` models, optionally in `fp16` precision. Details: https://github.com/cmdr2/stable-diffusion-ui/wiki/Model-Merging
- **Fast loading/unloading of VAEs** - No longer needs to reload the entire Stable Diffusion model, each time you change the VAE
@ -19,8 +19,21 @@
Our focus continues to remain on an easy installation experience, and an easy user-interface. While still remaining pretty powerful, in terms of features and speed.
### Detailed changelog
* 2.5.6 - 10 Jan 2022 - Find Stable Diffusion models in sub-folders inside `models/stable-diffusion`. This allows you to organize your models into sub-folders, instead of keeping them all in a single folder. Thanks @JeLuf.
* 2.5.5 - 9 Jan 2022 - Lots of bug fixes. Thanks @patriceac and @JeLuf.
* 2.5.15 - 8 Feb 2023 - Allow using 'balanced' VRAM usage mode on GPUs with 4 GB or less of VRAM. This mode used to be called 'Turbo' in the previous version.
* 2.5.14 - 8 Feb 2023 - Fix broken auto-save settings. We renamed `sampler` to `sampler_name`, which caused old settings to fail.
* 2.5.14 - 6 Feb 2023 - Simplify the UI for merging models, and some other minor UI tweaks. Better error reporting if a model failed to load.
* 2.5.14 - 3 Feb 2023 - Fix the 'Make Similar Images' button, which was producing incorrect images (weren't very similar).
* 2.5.13 - 1 Feb 2023 - Fix the remaining GPU memory leaks, including a better fix (more comprehensive) for the change in 2.5.12 (27 Jan).
* 2.5.12 - 27 Jan 2023 - Fix a memory leak, which made the UI unresponsive after an out-of-memory error. The allocated memory is now freed-up after an error.
* 2.5.11 - 25 Jan 2023 - UI for Merging Models. Thanks @JeLuf. More info: https://github.com/cmdr2/stable-diffusion-ui/wiki/Model-Merging
* 2.5.10 - 24 Jan 2023 - Reduce the VRAM usage for img2img in 'balanced' mode (without reducing the rendering speed), to make it similar to v2.4 of this UI.
* 2.5.9 - 23 Jan 2023 - Fix a bug where img2img would produce poorer-quality images for the same settings, as compared to version 2.4 of this UI.
* 2.5.9 - 23 Jan 2023 - Reduce the VRAM usage for 'balanced' mode (without reducing the rendering speed), to make it similar to v2.4 of the UI.
* 2.5.8 - 17 Jan 2023 - Fix a bug where 'Low' VRAM usage would consume a LOT of VRAM (on higher-end GPUs). Also fixed a bug that caused out-of-memory errors on SD 2.1-768 models, on 'high' VRAM usage setting.
* 2.5.7 - 16 Jan 2023 - Fix a bug where VAE files ending with .vae.pt weren't getting displayed. Thanks Madrang, rbertus2000 and JeLuf.
* 2.5.6 - 10 Jan 2023 - `Fill` tool for the Image Editor, to allow filling areas with color (or the entire image). And some bug fixes to the Image Editor. Thanks @mdiller.
* 2.5.6 - 10 Jan 2023 - Find Stable Diffusion models in sub-folders inside `models/stable-diffusion`. This allows you to organize your models into sub-folders, instead of keeping them all in a single folder. Thanks @JeLuf.
* 2.5.5 - 9 Jan 2023 - Lots of bug fixes. Thanks @patriceac and @JeLuf.
* 2.5.4 - 29 Dec 2022 - Press Esc key on the keyboard to close the Image Editor. Thanks @patriceac.
* 2.5.4 - 29 Dec 2022 - Lots of bug fixes in the UI. Thanks @patriceac.
* 2.5.4 - 28 Dec 2022 - Full support for running tasks in parallel on multiple GPUs. Warning: 'Euler Ancestral', 'DPM2 Ancestral' and 'DPM++ 2s Ancestral' may produce slight variations in the image (if run in parallel), so we recommend using the other samplers.

View File

@ -1,19 +1,18 @@
# Stable Diffusion UI
### The easiest way to install and use [Stable Diffusion](https://github.com/CompVis/stable-diffusion) on your own computer. Does not require technical knowledge, does not require pre-installed software. 1-click install, powerful features, friendly community.
# Easy Diffusion 2.5
### The easiest way to install and use [Stable Diffusion](https://github.com/CompVis/stable-diffusion) on your own computer.
[![Discord Server](https://img.shields.io/discord/1014774730907209781?label=Discord)](https://discord.com/invite/u9yhsFmEkB) (for support, and development discussion) | [Troubleshooting guide for common problems](https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting)
Does not require technical knowledge, does not require pre-installed software. 1-click install, powerful features, friendly community.
### New:
Experimental support for Stable Diffusion 2.0 is available in beta!
[Installation guide](#step-1-download-and-extract-the-installer) | [Troubleshooting guide](https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting) | <sub>[![Discord Server](https://img.shields.io/discord/1014774730907209781?label=Discord)](https://discord.com/invite/u9yhsFmEkB)</sub> <sup>(for support queries, and development discussions)</sup>
----
![t2i](https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/assets/stable-samples/txt2img/768/merged-0006.png)
# Step 1: Download and prepare the installer
# Step 1: Download and extract the installer
Click the download button for your operating system:
<p float="left">
<a href="https://github.com/cmdr2/stable-diffusion-ui/releases/download/v2.4.13/stable-diffusion-ui-windows.zip"><img src="https://github.com/cmdr2/stable-diffusion-ui/raw/main/media/download-win.png" width="200" /></a>
<a href="https://github.com/cmdr2/stable-diffusion-ui/releases/download/v2.4.13/stable-diffusion-ui-linux.zip"><img src="https://github.com/cmdr2/stable-diffusion-ui/raw/main/media/download-linux.png" width="200" /></a>
<a href="https://github.com/cmdr2/stable-diffusion-ui/releases/download/v2.5.15/stable-diffusion-ui-windows.zip"><img src="https://github.com/cmdr2/stable-diffusion-ui/raw/main/media/download-win.png" width="200" /></a>
<a href="https://github.com/cmdr2/stable-diffusion-ui/releases/download/v2.5.15/stable-diffusion-ui-linux.zip"><img src="https://github.com/cmdr2/stable-diffusion-ui/raw/main/media/download-linux.png" width="200" /></a>
</p>
## On Windows:
@ -44,9 +43,18 @@ The installer will take care of whatever is needed. If you face any problems, yo
### User experience
- **Hassle-free installation**: Does not require technical knowledge, does not require pre-installed software. Just download and run!
- **Clutter-free UI**: A friendly and simple UI, while providing a lot of powerful features.
- **Task Queue**: Queue up all your ideas, without waiting for the current task to finish.
- **Intelligent Model Detection**: Automatically figures out the YAML config file to use for the chosen model (via a models database).
- **Live Preview**: See the image as the AI is drawing it.
- **Image Modifiers**: A library of *modifier tags* like *"Realistic"*, *"Pencil Sketch"*, *"ArtStation"* etc. Experiment with various styles quickly.
- **Multiple Prompts File**: Queue multiple prompts by entering one prompt per line, or by running a text file.
- **Save generated images to disk**: Save your images to your PC!
- **UI Themes**: Customize the program to your liking.
- **Organize your models into sub-folders**
### Image generation
- **Supports**: "*Text to Image*" and "*Image to Image*".
- **14 Samplers**: `ddim`, `plms`, `heun`, `euler`, `euler_a`, `dpm2`, `dpm2_a`, `lms`, `dpm_solver_stability`, `dpmpp_2s_a`, `dpmpp_2m`, `dpmpp_sde`, `dpm_fast`, `dpm_adaptive`
- **In-Painting**: Specify areas of your image to paint into.
- **Simple Drawing Tool**: Draw basic images to guide the AI, without needing an external drawing program.
- **Face Correction (GFPGAN)**
@ -56,7 +64,6 @@ The installer will take care of whatever is needed. If you face any problems, yo
- **Attention/Emphasis**: () in the prompt increases the model's attention to enclosed words, and [] decreases it.
- **Weighted Prompts**: Use weights for specific words in your prompt to change their importance, e.g. `red:2.4 dragon:1.2`.
- **Prompt Matrix**: Quickly create multiple variations of your prompt, e.g. `a photograph of an astronaut riding a horse | illustration | cinematic lighting`.
- **Lots of Samplers**: ddim, plms, heun, euler, euler_a, dpm2, dpm2_a, lms.
- **1-click Upscale/Face Correction**: Upscale or correct an image after it has been generated.
- **Make Similar Images**: Click to generate multiple variations of a generated image.
- **NSFW Setting**: A setting in the UI to control *NSFW content*.
@ -64,13 +71,15 @@ The installer will take care of whatever is needed. If you face any problems, yo
### Advanced features
- **Custom Models**: Use your own `.ckpt` or `.safetensors` file, by placing it inside the `models/stable-diffusion` folder!
- **Stable Diffusion 2.0 support (experimental)**: available in beta channel.
- **Stable Diffusion 2.1 support**
- **Merge Models**
- **Use custom VAE models**
- **Use pre-trained Hypernetworks**
- **UI Plugins**: Choose from a growing list of [community-generated UI plugins](https://github.com/cmdr2/stable-diffusion-ui/wiki/UI-Plugins), or write your own plugin to add features to the project!
### Performance and security
- **Low Memory Usage**: Creates 512x512 images with less than 4GB of GPU RAM!
- **Fast**: Creates a 512x512 image with euler_a in 5 seconds, on an NVIDIA 3060 12GB.
- **Low Memory Usage**: Create 512x512 images with less than 3 GB of GPU RAM, and 768x768 images with less than 4 GB of GPU RAM!
- **Use CPU setting**: If you don't have a compatible graphics card, but still want to run it on your CPU.
- **Multi-GPU support**: Automatically spreads your tasks across multiple GPUs (if available), for faster performance!
- **Auto scan for malicious models**: Uses picklescan to prevent malicious models.
@ -78,23 +87,17 @@ The installer will take care of whatever is needed. If you face any problems, yo
- **Auto-updater**: Gets you the latest improvements and bug-fixes to a rapidly evolving project.
- **Developer Console**: A developer-mode for those who want to modify their Stable Diffusion code, and edit the conda environment.
### Usability:
- **Live Preview**: See the image as the AI is drawing it.
- **Task Queue**: Queue up all your ideas, without waiting for the current task to finish.
- **Image Modifiers**: A library of *modifier tags* like *"Realistic"*, *"Pencil Sketch"*, *"ArtStation"* etc. Experiment with various styles quickly.
- **Multiple Prompts File**: Queue multiple prompts by entering one prompt per line, or by running a text file.
- **Save generated images to disk**: Save your images to your PC!
- **UI Themes**: Customize the program to your liking.
**(and a lot more)**
----
## Easy for new users:
![Screenshot of the initial UI](media/shot-v10-simple.jpg?raw=true)
![Screenshot of the initial UI](https://user-images.githubusercontent.com/844287/217043152-29454d15-0387-4228-b70d-9a4b84aeb8ba.png)
## Powerful features for advanced users:
![Screenshot of advanced settings](media/shot-v10.jpg?raw=true)
![Screenshot of advanced settings](https://user-images.githubusercontent.com/844287/217042588-fc53c975-bacd-4a9c-af88-37408734ade3.png)
## Live Preview
Useful for judging (and stopping) an image quickly, without waiting for it to finish rendering.
@ -102,7 +105,9 @@ Useful for judging (and stopping) an image quickly, without waiting for it to fi
![live-512](https://user-images.githubusercontent.com/844287/192097249-729a0a1e-a677-485e-9ccc-16a9e848fabe.gif)
## Task Queue
![Screenshot of task queue](media/task-queue-v1.jpg?raw=true)
![Screenshot of task queue](https://user-images.githubusercontent.com/844287/217043984-0b35f73b-1318-47cb-9eed-a2a91b430490.png)
# System Requirements
1. Windows 10/11, or Linux. Experimental support for Mac is coming soon.

View File

@ -1,4 +1,5 @@
@echo off
setlocal enabledelayedexpansion
@rem This script will install git and conda (if not found on the PATH variable)
@rem using micromamba (an 8mb static-linked single-file binary, conda replacement).
@ -28,10 +29,10 @@ if not exist "%LEGACY_INSTALL_ENV_DIR%\etc\profile.d\conda.sh" (
)
call git --version >.tmp1 2>.tmp2
if "%ERRORLEVEL%" NEQ "0" set PACKAGES_TO_INSTALL=%PACKAGES_TO_INSTALL% git
if "!ERRORLEVEL!" NEQ "0" set PACKAGES_TO_INSTALL=%PACKAGES_TO_INSTALL% git
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" --version >.tmp1 2>.tmp2
if "%ERRORLEVEL%" EQU "0" set umamba_exists=T
if "!ERRORLEVEL!" EQU "0" set umamba_exists=T
@rem (if necessary) install git and conda into a contained environment
if "%PACKAGES_TO_INSTALL%" NEQ "" (
@ -42,7 +43,7 @@ if "%PACKAGES_TO_INSTALL%" NEQ "" (
mkdir "%MAMBA_ROOT_PREFIX%"
call curl -Lk "%MICROMAMBA_DOWNLOAD_URL%" > "%MAMBA_ROOT_PREFIX%\micromamba.exe"
if "%ERRORLEVEL%" NEQ "0" (
if "!ERRORLEVEL!" NEQ "0" (
echo "There was a problem downloading micromamba. Cannot continue."
pause
exit /b

View File

@ -21,6 +21,7 @@ OS_ARCH=$(uname -m)
case "${OS_ARCH}" in
x86_64*) OS_ARCH="64";;
arm64*) OS_ARCH="arm64";;
aarch64*) OS_ARCH="arm64";;
*) echo "Unknown system architecture: $OS_ARCH! This script runs only on x86_64 or arm64" && exit
esac

View File

@ -72,13 +72,15 @@ if "%ERRORLEVEL%" EQU "0" (
set PYTHONNOUSERSITE=1
set PYTHONPATH=%INSTALL_ENV_DIR%\lib\site-packages
call pip install --upgrade torch torchvision --extra-index-url https://download.pytorch.org/whl/cu116 || (
call python -m pip install --upgrade torch torchvision --extra-index-url https://download.pytorch.org/whl/cu116 || (
echo "Error installing torch. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!"
pause
exit /b
)
)
set PATH=C:\Windows\System32;%PATH%
@rem install/upgrade sdkit
call python ..\scripts\check_modules.py sdkit sdkit.models ldm transformers numpy antlr4 gfpgan realesrgan
if "%ERRORLEVEL%" EQU "0" (
@ -90,7 +92,7 @@ if "%ERRORLEVEL%" EQU "0" (
set PYTHONNOUSERSITE=1
set PYTHONPATH=%INSTALL_ENV_DIR%\lib\site-packages
call pip install --upgrade sdkit -q || (
call python -m pip install --upgrade sdkit==1.0.35 -q || (
echo "Error updating sdkit"
)
)
@ -101,7 +103,7 @@ if "%ERRORLEVEL%" EQU "0" (
set PYTHONNOUSERSITE=1
set PYTHONPATH=%INSTALL_ENV_DIR%\lib\site-packages
call pip install sdkit || (
call python -m pip install sdkit==1.0.35 || (
echo "Error installing sdkit. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!"
pause
exit /b
@ -111,7 +113,7 @@ if "%ERRORLEVEL%" EQU "0" (
call python -c "from importlib.metadata import version; print('sdkit version:', version('sdkit'))"
@rem upgrade stable-diffusion-sdkit
call pip install --upgrade stable-diffusion-sdkit -q || (
call python -m pip install --upgrade stable-diffusion-sdkit==2.1.1 -q || (
echo "Error updating stable-diffusion-sdkit"
)
call python -c "from importlib.metadata import version; print('stable-diffusion version:', version('stable-diffusion-sdkit'))"
@ -126,7 +128,7 @@ if "%ERRORLEVEL%" EQU "0" (
set PYTHONNOUSERSITE=1
set PYTHONPATH=%INSTALL_ENV_DIR%\lib\site-packages
call pip install rich || (
call python -m pip install rich || (
echo "Error installing rich. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!"
pause
exit /b
@ -277,7 +279,7 @@ call WHERE uvicorn > .tmp
@call curl -L -k https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth > ..\models\realesrgan\RealESRGAN_x4plus_anime_6B.pth
@if exist "..\models\realesrgan\RealESRGAN_x4plus_anime_6B.pth" (
for %%I in ("RealESRGAN_x4plus_anime_6B.pth") do if "%%~zI" NEQ "17938799" (
for %%I in ("..\models\realesrgan\RealESRGAN_x4plus_anime_6B.pth") do if "%%~zI" NEQ "17938799" (
echo. & echo "Error: The downloaded ESRGAN x4plus_anime model file was invalid! Bytes downloaded: %%~zI" & echo.
echo. & echo "Error downloading the data files (weights) for ESRGAN (Resolution Upscaling) x4plus_anime. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
pause

View File

@ -64,7 +64,7 @@ else
export PYTHONNOUSERSITE=1
export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages"
if pip install --upgrade torch torchvision --extra-index-url https://download.pytorch.org/whl/cu116 ; then
if python -m pip install --upgrade torch torchvision --extra-index-url https://download.pytorch.org/whl/cu116 ; then
echo "Installed."
else
fail "torch install failed"
@ -80,7 +80,7 @@ if python ../scripts/check_modules.py sdkit sdkit.models ldm transformers numpy
export PYTHONNOUSERSITE=1
export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages"
pip install --upgrade sdkit -q
python -m pip install --upgrade sdkit==1.0.35 -q
fi
else
echo "Installing sdkit: https://pypi.org/project/sdkit/"
@ -88,7 +88,7 @@ else
export PYTHONNOUSERSITE=1
export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages"
if pip install sdkit ; then
if python -m pip install sdkit==1.0.35 ; then
echo "Installed."
else
fail "sdkit install failed"
@ -98,7 +98,7 @@ fi
python -c "from importlib.metadata import version; print('sdkit version:', version('sdkit'))"
# upgrade stable-diffusion-sdkit
pip install --upgrade stable-diffusion-sdkit -q
python -m pip install --upgrade stable-diffusion-sdkit==2.1.1 -q
python -c "from importlib.metadata import version; print('stable-diffusion version:', version('stable-diffusion-sdkit'))"
# install rich
@ -110,7 +110,7 @@ else
export PYTHONNOUSERSITE=1
export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages"
if pip install rich ; then
if python -m pip install rich ; then
echo "Installed."
else
fail "Install failed for rich"

View File

@ -20,7 +20,7 @@ logging.basicConfig(
level=logging.INFO,
format=LOG_FORMAT,
datefmt="%X",
handlers=[RichHandler(markup=True, rich_tracebacks=True, show_time=False, show_level=False)]
handlers=[RichHandler(markup=True, rich_tracebacks=False, show_time=False, show_level=False)],
)
SD_DIR = os.getcwd()

View File

@ -125,7 +125,7 @@ def needs_to_force_full_precision(context):
return True
device_name = context.device_name.lower()
return (('nvidia' in device_name or 'geforce' in device_name) and (' 1660' in device_name or ' 1650' in device_name or ' t400' in device_name)) or ('Quadro T2000' in device_name)
return (('nvidia' in device_name or 'geforce' in device_name or 'quadro' in device_name) and (' 1660' in device_name or ' 1650' in device_name or ' t400' in device_name or ' t500' in device_name or ' t550' in device_name or ' t600' in device_name or ' t1000' in device_name or ' t1200' in device_name or ' t2000' in device_name)) or ('tesla k40m' in device_name)
def get_max_vram_usage_level(device):
if device != 'cpu':

View File

@ -24,11 +24,6 @@ DEFAULT_MODELS = {
'gfpgan': ['GFPGANv1.3'],
'realesrgan': ['RealESRGAN_x4plus'],
}
VRAM_USAGE_LEVEL_TO_OPTIMIZATIONS = {
'balanced': {'KEEP_FS_AND_CS_IN_CPU', 'SET_ATTENTION_STEP_TO_4'},
'low': {'KEEP_ENTIRE_MODEL_IN_CPU'},
'high': {},
}
MODELS_TO_LOAD_ON_START = ['stable-diffusion', 'vae', 'hypernetwork']
known_models = {}
@ -43,13 +38,12 @@ def load_default_models(context: Context):
# init default model paths
for model_type in MODELS_TO_LOAD_ON_START:
context.model_paths[model_type] = resolve_model_to_use(model_type=model_type)
set_model_config_path(context, model_type)
try:
load_model(context, model_type)
except Exception as e:
log.error(f'[red]Error while loading {model_type} model: {context.model_paths[model_type]}[/red]')
log.error(f'[red]Error: {e}[/red]')
log.error(f'[red]Consider to remove the model from the model folder.[red]')
log.error(f'[red]Consider removing the model from the model folder.[red]')
def unload_all(context: Context):
@ -110,24 +104,10 @@ def reload_models_if_necessary(context: Context, task_data: TaskData):
for model_type, model_path_in_req in models_to_reload.items():
context.model_paths[model_type] = model_path_in_req
set_model_config_path(context, model_type)
action_fn = unload_model if context.model_paths[model_type] is None else load_model
action_fn(context, model_type, scan_model=False) # we've scanned them already
def set_model_config_path(context: Context, model_type: str):
if model_type != 'stable-diffusion':
return
context.model_configs['stable-diffusion'] = None # reset this, to avoid loading the last config
# look for a yaml file next to the model, otherwise let sdkit match it to a known model
model_path = context.model_paths['stable-diffusion']
file_path, _ = os.path.splitext(model_path)
config_path = file_path + '.yaml'
if os.path.exists(config_path):
context.model_configs['stable-diffusion'] = config_path
def resolve_model_paths(task_data: TaskData):
task_data.use_stable_diffusion_model = resolve_model_to_use(task_data.use_stable_diffusion_model, model_type='stable-diffusion')
task_data.use_vae_model = resolve_model_to_use(task_data.use_vae_model, model_type='vae')
@ -138,20 +118,10 @@ def resolve_model_paths(task_data: TaskData):
def set_vram_optimizations(context: Context):
config = app.getConfig()
max_usage_level = device_manager.get_max_vram_usage_level(context.device)
vram_usage_level = config.get('vram_usage_level', 'balanced')
v = {'low': 0, 'balanced': 1, 'high': 2}
if v[vram_usage_level] > v[max_usage_level]:
log.error(f'Requested GPU Memory Usage level ({vram_usage_level}) is higher than what is ' + \
f'possible ({max_usage_level}) on this device ({context.device}). Using "{max_usage_level}" instead')
vram_usage_level = max_usage_level
vram_optimizations = VRAM_USAGE_LEVEL_TO_OPTIMIZATIONS[vram_usage_level]
if vram_optimizations != context.vram_optimizations:
context.vram_optimizations = vram_optimizations
if vram_usage_level != context.vram_usage_level:
context.vram_usage_level = vram_usage_level
return True
return False
@ -205,7 +175,11 @@ def getModels():
nonlocal models_scanned
tree = []
for entry in os.scandir(directory):
if entry.is_file() and True in [entry.name.endswith(s) for s in suffixes]:
if entry.is_file():
matching_suffix = list(filter(lambda s: entry.name.endswith(s), suffixes))
if len(matching_suffix) == 0: continue
matching_suffix = matching_suffix[0]
mtime = entry.stat().st_mtime
mod_time = known_models[entry.path] if entry.path in known_models else -1
if mod_time != mtime:
@ -213,7 +187,7 @@ def getModels():
if is_malicious_model(entry.path):
raise MaliciousModelException(entry.path)
known_models[entry.path] = mtime
tree.append(entry.name.rsplit('.',1)[0])
tree.append(entry.name[:-len(matching_suffix)])
elif entry.is_dir():
scan=scan_directory(entry.path, suffixes)
if len(scan) != 0:

View File

@ -65,6 +65,8 @@ def generate_images_internal(req: GenerateImageRequest, task_data: TaskData, dat
callback = make_step_callback(req, task_data, data_queue, task_temp_images, step_callback, stream_image_progress)
try:
if req.init_image is not None: req.sampler_name = 'ddim'
images = generate_images(context, callback=callback, **req.dict())
user_stopped = False
except UserInitiatedStop:
@ -72,9 +74,10 @@ def generate_images_internal(req: GenerateImageRequest, task_data: TaskData, dat
user_stopped = True
if context.partial_x_samples is not None:
images = latent_samples_to_images(context, context.partial_x_samples)
context.partial_x_samples = None
finally:
gc(context)
if hasattr(context, 'partial_x_samples') and context.partial_x_samples is not None:
del context.partial_x_samples
context.partial_x_samples = None
return images, user_stopped

View File

@ -13,7 +13,7 @@ from starlette.responses import FileResponse, JSONResponse, StreamingResponse
from pydantic import BaseModel
from easydiffusion import app, model_manager, task_manager
from easydiffusion.types import TaskData, GenerateImageRequest
from easydiffusion.types import TaskData, GenerateImageRequest, MergeRequest
from easydiffusion.utils import log
log.info(f'started in {app.SD_DIR}')
@ -61,6 +61,11 @@ def init():
def render(req: dict):
return render_internal(req)
@server_api.post('/model/merge')
def model_merge(req: dict):
print(req)
return model_merge_internal(req)
@server_api.get('/image/stream/{task_id:int}')
def stream(task_id:int):
return stream_internal(task_id)
@ -181,6 +186,23 @@ def render_internal(req: dict):
log.error(traceback.format_exc())
raise HTTPException(status_code=500, detail=str(e))
def model_merge_internal(req: dict):
try:
from sdkit.train import merge_models
from easydiffusion.utils.save_utils import filename_regex
mergeReq: MergeRequest = MergeRequest.parse_obj(req)
merge_models(model_manager.resolve_model_to_use(mergeReq.model0,'stable-diffusion'),
model_manager.resolve_model_to_use(mergeReq.model1,'stable-diffusion'),
mergeReq.ratio,
os.path.join(app.MODELS_DIR, 'stable-diffusion', filename_regex.sub('_', mergeReq.out_path)),
mergeReq.use_fp16
)
return JSONResponse({'status':'OK'}, headers=NOCACHE_HEADERS)
except Exception as e:
log.error(traceback.format_exc())
raise HTTPException(status_code=500, detail=str(e))
def stream_internal(task_id:int):
#TODO Move to WebSockets ??
task = task_manager.get_cached_task(task_id, update_ttl=True)

View File

@ -17,6 +17,8 @@ from easydiffusion import device_manager
from easydiffusion.types import TaskData, GenerateImageRequest
from easydiffusion.utils import log
from sdkit.utils import gc
THREAD_NAME_PREFIX = ''
ERR_LOCK_FAILED = ' failed to acquire lock within timeout.'
LOCK_TIMEOUT = 15 # Maximum locking time in seconds before failing a task.
@ -287,13 +289,12 @@ def thread_render(device):
task_cache.keep(id(task), TASK_TTL)
session_cache.keep(task.task_data.session_id, TASK_TTL)
except Exception as e:
task.error = e
task.error = str(e)
task.response = {"status": 'failed', "detail": str(task.error)}
task.buffer_queue.put(json.dumps(task.response))
log.error(traceback.format_exc())
continue
finally:
# Task completed
gc(renderer.context)
task.lock.release()
task_cache.keep(id(task), TASK_TTL)
session_cache.keep(task.task_data.session_id, TASK_TTL)

View File

@ -41,6 +41,13 @@ class TaskData(BaseModel):
metadata_output_format: str = "txt" # or "json"
stream_image_progress: bool = False
class MergeRequest(BaseModel):
model0: str = None
model1: str = None
ratio: float = None
out_path: str = "mix"
use_fp16 = True
class Image:
data: str # base64
seed: int

View File

@ -7,7 +7,7 @@ from easydiffusion.types import TaskData, GenerateImageRequest
from sdkit.utils import save_images, save_dicts
filename_regex = re.compile('[^a-zA-Z0-9]')
filename_regex = re.compile('[^a-zA-Z0-9._-]')
# keep in sync with `ui/media/js/dnd.js`
TASK_TEXT_MAPPING = {

View File

@ -6,6 +6,7 @@
<meta name="theme-color" content="#673AB6">
<link rel="icon" type="image/png" href="/media/images/favicon-16x16.png" sizes="16x16">
<link rel="icon" type="image/png" href="/media/images/favicon-32x32.png" sizes="32x32">
<link rel="stylesheet" href="/media/css/jquery-confirm.min.css">
<link rel="stylesheet" href="/media/css/fonts.css">
<link rel="stylesheet" href="/media/css/themes.css">
<link rel="stylesheet" href="/media/css/main.css">
@ -13,7 +14,6 @@
<link rel="stylesheet" href="/media/css/modifier-thumbnails.css">
<link rel="stylesheet" href="/media/css/fontawesome-all.min.css">
<link rel="stylesheet" href="/media/css/image-editor.css">
<link rel="stylesheet" href="/media/css/jquery-confirm.min.css">
<link rel="manifest" href="/media/manifest.webmanifest">
<script src="/media/js/jquery-3.6.1.min.js"></script>
<script src="/media/js/jquery-confirm.min.js"></script>
@ -25,14 +25,14 @@
<div id="logo">
<h1>
Easy Diffusion
<small>v2.5.6 <span id="updateBranchLabel"></span></small>
<small>v2.5.15 <span id="updateBranchLabel"></span></small>
</h1>
</div>
<div id="server-status">
<div id="server-status-color"></div>
<span id="server-status-msg">Stable Diffusion is starting..</span>
</div>
<div id="tab-container">
<div id="tab-container" class="tab-container">
<span id="tab-main" class="tab active">
<span><i class="fa fa-image icon"></i> Generate</span>
</span>
@ -97,7 +97,7 @@
</div>
<div id="editor-inputs-tags-container" class="row">
<label>Image Modifiers <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip right">click an Image Modifier to remove it, right-click to temporarily disable it, use Ctrl+Mouse Wheel to adjust its weight</span></i>:</label>
<label>Image Modifiers <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip right">click an Image Modifier to remove it, right-click to temporarily disable it, use Ctrl+Mouse Wheel to adjust its weight</span></i></label>
<div id="editor-inputs-tags-list"></div>
</div>

View File

@ -107,6 +107,7 @@ code {
.imgContainer {
display: flex;
justify-content: flex-end;
position: relative;
}
.imgItemInfo {
padding-bottom: 0.5em;
@ -114,16 +115,29 @@ code {
align-items: flex-end;
flex-direction: column;
position: absolute;
padding: 5px;
padding-right: 5pt;
padding-top: 6pt;
opacity: 0;
transition: 0.1s all;
}
.imgPreviewItemClearBtn {
opacity: 0;
}
.imgPreviewItemClearBtn:hover {
background: rgb(177, 27, 0);
}
.imgContainer:hover > .imgItemInfo {
opacity: 1;
}
.imgContainer:hover > .imgPreviewItemClearBtn {
opacity: 1;
}
.imgItemInfo * {
margin-bottom: 7px;
}
.imgItem .image_clear_btn {
transform: translate(40%, -50%);
}
#container {
min-height: 100vh;
width: 100%;
@ -254,6 +268,7 @@ img {
div.img-preview img {
width:100%;
height: 100%;
max-height: 70vh;
}
.line-separator {
background: var(--background-color3);
@ -1028,7 +1043,7 @@ input::file-selector-button {
}
/* TABS */
#tab-container {
.tab-container {
display: flex;
align-items: flex-end;
}
@ -1181,3 +1196,10 @@ body.wait-pause {
50% { border: solid 12px var(--background-color1); }
100% { border: solid 12px var(--accent-color); }
}
.jconfirm.jconfirm-modern .jconfirm-box div.jconfirm-title-c {
color: var(--button-text-color);
}
.jconfirm.jconfirm-modern .jconfirm-box {
background-color: var(--background-color1);
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 466 B

After

Width:  |  Height:  |  Size: 1.4 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 973 B

After

Width:  |  Height:  |  Size: 3.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 329 KiB

View File

@ -262,10 +262,12 @@ function tryLoadOldSettings() {
var saved_settings = JSON.parse(saved_settings_text)
Object.keys(saved_settings.should_save).forEach(key => {
key = key in old_map ? old_map[key] : key
if (!(key in SETTINGS)) return
SETTINGS[key].ignore = !saved_settings.should_save[key]
});
Object.keys(saved_settings.values).forEach(key => {
key = key in old_map ? old_map[key] : key
if (!(key in SETTINGS)) return
var setting = SETTINGS[key]
if (!setting.ignore) {
setting.value = saved_settings.values[key]

View File

@ -718,7 +718,7 @@
"height": 'number',
"seed": 'number',
"sampler": 'string',
"sampler_name": 'string',
"use_stable_diffusion_model": 'string',
"num_inference_steps": 'number',
"guidance_scale": 'number',
@ -731,7 +731,7 @@
"output_quality": 'number',
}
const TASK_DEFAULTS = {
"sampler": "plms",
"sampler_name": "plms",
"use_stable_diffusion_model": "sd-v1-4",
"num_inference_steps": 50,
"guidance_scale": 7.5,
@ -835,11 +835,10 @@
* @memberof Task
*/
async post(timeout=-1) {
if (typeof performance == "object" && performance.mark && performance.measure) {
performance.mark('make-render-request')
if (performance.getEntriesByName('click-makeImage', 'mark').length > 0) {
console.log('delay between clicking and making the server request:', performance.measure('diff', 'click-makeImage', 'make-render-request').duration + ' ms')
}
performance.mark('make-render-request')
if (performance.getEntriesByName('click-makeImage', 'mark').length > 0) {
performance.measure('diff', 'click-makeImage', 'make-render-request')
console.log('delay between clicking and making the server request:', performance.getEntriesByName('diff', 'measure')[0].duration + ' ms')
}
let jsonResponse = await super.post('/render', timeout)

View File

@ -263,6 +263,7 @@ function showImages(reqBody, res, outputContainer, livePreview) {
<div class="imgItemInfo">
<span class="imgSeedLabel"></span>
</div>
<button class="imgPreviewItemClearBtn image_clear_btn"><i class="fa-solid fa-xmark"></i></button>
</div>
`
outputContainer.appendChild(imageItemElem)
@ -275,6 +276,11 @@ function showImages(reqBody, res, outputContainer, livePreview) {
imageElem.setAttribute('data-steps', imageInferenceSteps)
imageElem.setAttribute('data-guidance', imageGuidanceScale)
const imageRemoveBtn = imageItemElem.querySelector('.imgPreviewItemClearBtn')
imageRemoveBtn.addEventListener('click', (e) => {
console.log(e)
shiftOrConfirm(e, "Remove the image from the results?", () => { imageItemElem.style.display = 'none' })
})
const imageInfo = imageItemElem.querySelector('.imgItemInfo')
imageInfo.style.visibility = (livePreview ? 'hidden' : 'visible')
@ -288,7 +294,6 @@ function showImages(reqBody, res, outputContainer, livePreview) {
imageSeedLabel.innerText = 'Seed: ' + req.seed
let buttons = [
{ text: 'Remove', on_click: onRemoveClick, class: 'secondaryButton' },
{ text: 'Use as Input', on_click: onUseAsInputClick },
{ text: 'Download', on_click: onDownloadImageClick },
{ text: 'Make Similar Images', on_click: onMakeSimilarClick },
@ -325,10 +330,6 @@ function showImages(reqBody, res, outputContainer, livePreview) {
})
}
function onRemoveClick(req, img, event) {
shiftOrConfirm(event, "Remove the image from the results?", () => { findClosestAncestor(img, '.imgItem').style.display='none' })
}
function onUseAsInputClick(req, img) {
const imgData = img.src
@ -658,7 +659,7 @@ function onTaskCompleted(task, reqBody, instance, outputContainer, stepUpdate) {
task.progressBar.classList.remove("active")
setStatus('request', 'done', 'success')
} else {
task.outputMsg.innerText += `Task ended after ${time}`
task.outputMsg.innerText += `. Task ended after ${time}`
}
if (randomSeedField.checked) {
@ -673,6 +674,9 @@ function onTaskCompleted(task, reqBody, instance, outputContainer, stepUpdate) {
return
}
if (pauseClient) {
resumeBtn.click()
}
renderButtons.style.display = 'none'
renameMakeImageButton()
@ -1313,7 +1317,7 @@ async function getModels() {
modelOption.value = path + modelName
modelOption.innerHTML = modelName !== '' ? (path != "" ? "&nbsp;&nbsp;"+modelName : modelName) : 'None'
if (modelName === selectedModel) {
if (path + modelName === selectedModel) {
modelOption.selected = true
}
modelField.appendChild(modelOption)
@ -1445,7 +1449,7 @@ function selectTab(tab_id) {
let tabInfo = tabElements.find(t => t.tab.id == tab_id)
if (!tabInfo.tab.classList.contains("active")) {
tabElements.forEach(info => {
if (info.tab.classList.contains("active")) {
if (info.tab.classList.contains("active") && info.tab.parentNode === tabInfo.tab.parentNode) {
info.tab.classList.toggle("active")
info.content.classList.toggle("active")
}
@ -1466,6 +1470,9 @@ function linkTabContents(tab) {
tab.addEventListener("click", event => selectTab(tab.id))
}
function isTabActive(tab) {
return tab.classList.contains("active")
}
let pauseClient = false

View File

@ -101,7 +101,7 @@ var PARAMETERS = [
note: "Faster performance requires more GPU memory (VRAM)<br/><br/>" +
"<b>Balanced:</b> nearly as fast as High, much lower VRAM usage<br/>" +
"<b>High:</b> fastest, maximum GPU memory usage</br>" +
"<b>Low:</b> slowest, force-used for GPUs with 3 to 4 GB memory",
"<b>Low:</b> slowest, recommended for GPUs with 3 to 4 GB memory",
icon: "fa-forward",
default: "balanced",
options: [

View File

@ -0,0 +1,471 @@
(function() {
"use strict"
///////////////////// Function section
function smoothstep(x) {
return x * x * (3 - 2 * x)
}
function smootherstep(x) {
return x * x * x * (x * (x * 6 - 15) + 10)
}
function smootheststep(x) {
let y = -20 * Math.pow(x, 7)
y += 70 * Math.pow(x, 6)
y -= 84 * Math.pow(x, 5)
y += 35 * Math.pow(x, 4)
return y
}
function getCurrentTime() {
const now = new Date();
let hours = now.getHours();
let minutes = now.getMinutes();
let seconds = now.getSeconds();
hours = hours < 10 ? `0${hours}` : hours;
minutes = minutes < 10 ? `0${minutes}` : minutes;
seconds = seconds < 10 ? `0${seconds}` : seconds;
return `${hours}:${minutes}:${seconds}`;
}
function addLogMessage(message) {
const logContainer = document.getElementById('merge-log');
logContainer.innerHTML += `<i>${getCurrentTime()}</i> ${message}<br>`;
// Scroll to the bottom of the log
logContainer.scrollTop = logContainer.scrollHeight;
document.querySelector('#merge-log-container').style.display = 'block'
}
function addLogSeparator() {
const logContainer = document.getElementById('merge-log');
logContainer.innerHTML += '<hr>'
logContainer.scrollTop = logContainer.scrollHeight;
}
function drawDiagram(fn) {
const SIZE = 300
const canvas = document.getElementById('merge-canvas');
canvas.height = canvas.width = SIZE
const ctx = canvas.getContext('2d');
// Draw coordinate system
ctx.scale(1, -1);
ctx.translate(0, -canvas.height);
ctx.lineWidth = 1;
ctx.beginPath();
ctx.strokeStyle = 'white'
ctx.moveTo(0,0); ctx.lineTo(0,SIZE); ctx.lineTo(SIZE,SIZE); ctx.lineTo(SIZE,0); ctx.lineTo(0,0); ctx.lineTo(SIZE,SIZE);
ctx.stroke()
ctx.beginPath()
ctx.setLineDash([1,2])
const n = SIZE / 10
for (let i=n; i<SIZE; i+=n) {
ctx.moveTo(0,i)
ctx.lineTo(SIZE,i)
ctx.moveTo(i,0)
ctx.lineTo(i,SIZE)
}
ctx.stroke()
ctx.beginPath()
ctx.setLineDash([])
ctx.beginPath();
ctx.strokeStyle = 'black'
ctx.lineWidth = 3;
// Plot function
const numSamples = 20;
for (let i = 0; i <= numSamples; i++) {
const x = i / numSamples;
const y = fn(x);
const canvasX = x * SIZE;
const canvasY = y * SIZE;
if (i === 0) {
ctx.moveTo(canvasX, canvasY);
} else {
ctx.lineTo(canvasX, canvasY);
}
}
ctx.stroke()
// Plot alpha values (yellow boxes)
let start = parseFloat( document.querySelector('#merge-start').value )
let step = parseFloat( document.querySelector('#merge-step').value )
let iterations = document.querySelector('#merge-count').value>>0
ctx.beginPath()
ctx.fillStyle = "yellow"
for (let i=0; i< iterations; i++) {
const alpha = ( start + i * step ) / 100
const x = alpha*SIZE
const y = fn(alpha) * SIZE
if (x <= SIZE) {
ctx.rect(x-3,y-3,6,6)
ctx.fill()
} else {
ctx.strokeStyle = 'red'
ctx.moveTo(0,0); ctx.lineTo(0,SIZE); ctx.lineTo(SIZE,SIZE); ctx.lineTo(SIZE,0); ctx.lineTo(0,0); ctx.lineTo(SIZE,SIZE);
ctx.stroke()
addLogMessage('<i>Warning: maximum ratio is &#8805; 100%</i>')
}
}
}
function updateChart() {
let fn = (x) => x
switch (document.querySelector('#merge-interpolation').value) {
case 'SmoothStep':
fn = smoothstep
break
case 'SmootherStep':
fn = smootherstep
break
case 'SmoothestStep':
fn = smootheststep
break
}
drawDiagram(fn)
}
/////////////////////// Tab implementation
document.querySelector('.tab-container')?.insertAdjacentHTML('beforeend', `
<span id="tab-merge" class="tab">
<span><i class="fa fa-code-merge icon"></i> Merge models <small>(beta)</small></span>
</span>
`)
document.querySelector('#tab-content-wrapper')?.insertAdjacentHTML('beforeend', `
<div id="tab-content-merge" class="tab-content">
<div id="merge" class="tab-content-inner">
Loading..
</div>
</div>
`)
const tabMerge = document.querySelector('#tab-merge')
if (tabMerge) {
linkTabContents(tabMerge)
}
const merge = document.querySelector('#merge')
if (!merge) {
// merge tab not found, dont exec plugin code.
return
}
document.querySelector('body').insertAdjacentHTML('beforeend', `
<style>
#tab-content-merge .tab-content-inner {
max-width: 100%;
padding: 10pt;
}
.merge-container {
margin-left: 15%;
margin-right: 15%;
text-align: left;
display: inline-grid;
grid-template-columns: 1fr 1fr;
grid-template-rows: auto auto auto;
gap: 0px 0px;
grid-auto-flow: row;
grid-template-areas:
"merge-input merge-config"
"merge-buttons merge-buttons";
}
.merge-container p {
margin-top: 3pt;
margin-bottom: 3pt;
}
.merge-config .tab-content {
background: var(--background-color1);
border-radius: 3pt;
}
.merge-config .tab-content-inner {
text-align: left;
}
.merge-input {
grid-area: merge-input;
padding-left:1em;
}
.merge-config {
grid-area: merge-config;
padding:1em;
}
.merge-config input {
margin-bottom: 3px;
}
.merge-config select {
margin-bottom: 3px;
}
.merge-buttons {
grid-area: merge-buttons;
padding:1em;
text-align: center;
}
#merge-button {
padding: 8px;
width:20em;
}
div#merge-log {
height:150px;
overflow-x:hidden;
overflow-y:scroll;
background:var(--background-color1);
border-radius: 3pt;
}
div#merge-log i {
color: hsl(var(--accent-hue), 100%, calc(2*var(--accent-lightness)));
font-family: monospace;
}
.disabled {
background: var(--background-color4);
color: var(--text-color);
}
#merge-type-tabs {
border-bottom: 1px solid black;
}
#merge-log-container {
display: none;
}
.merge-container #merge-warning {
color: rgb(153, 153, 153);
}
</style>
`)
merge.innerHTML = `
<div class="merge-container panel-box">
<div class="merge-input">
<p><label for="#mergeModelA">Select Model A:</label></p>
<select id="mergeModelA">
<option>A</option>
</select>
<p><label for="#mergeModelB">Select Model B:</label></p>
<select id="mergeModelB">
<option>A</option>
</select>
<br/><br/>
<p id="merge-warning"><small><b>Important:</b> Please merge models of similar type.<br/>For e.g. <code>SD 1.4</code> models with only <code>SD 1.4/1.5</code> models,<br/><code>SD 2.0</code> with <code>SD 2.0</code>-type, and <code>SD 2.1</code> with <code>SD 2.1</code>-type models.</small></p>
<br/>
<table>
<tr>
<td><label for="#merge-filename">Output file name:</label></td>
<td><input id="merge-filename" size=24> <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Base name of the output file.<br>Mix ratio and file suffix will be appended to this.</span></i></td>
</tr>
<tr>
<td><label for="#merge-fp">Output precision:</label></td>
<td><select id="merge-fp">
<option value="fp16">fp16 (smaller file size)</option>
<option value="fp32">fp32 (larger file size)</option>
</select>
<i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Image generation uses fp16, so it's a good choice.<br>Use fp32 if you want to use the result models for more mixes</span></i>
</td>
</tr>
<tr>
<td><label for="#merge-format">Output file format:</label></td>
<td><select id="merge-format">
<option value="safetensors">Safetensors (recommended)</option>
<option value="ckpt">CKPT/Pickle (legacy format)</option>
</select>
</td>
</tr>
</table>
<br/>
<div id="merge-log-container">
<p><label for="#merge-log">Log messages:</label></p>
<div id="merge-log"></div>
</div>
</div>
<div class="merge-config">
<div class="tab-container">
<span id="tab-merge-opts-single" class="tab active">
<span>Make a single file</small></span>
</span>
<span id="tab-merge-opts-batch" class="tab">
<span>Make multiple variations</small></span>
</span>
</div>
<div>
<div id="tab-content-merge-opts-single" class="tab-content active">
<div class="tab-content-inner">
<small>Saves a single merged model file, at the specified merge ratio.</small><br/><br/>
<label for="#single-merge-ratio-slider">Merge ratio:</label>
<input id="single-merge-ratio-slider" name="single-merge-ratio-slider" class="editor-slider" value="50" type="range" min="1" max="1000">
<input id="single-merge-ratio" size=2 value="5">%
<i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Model A's contribution to the mix. The rest will be from Model B.</span></i>
</div>
</div>
<div id="tab-content-merge-opts-batch" class="tab-content">
<div class="tab-content-inner">
<small>Saves multiple variations of the model, at different merge ratios.<br/>Each variation will be saved as a separate file.</small><br/><br/>
<table>
<tr><td><label for="#merge-count">Number of variations:</label></td>
<td> <input id="merge-count" size=2 value="5"></td>
<td> <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Number of models to create</span></i></td></tr>
<tr><td><label for="#merge-start">Starting merge ratio:</label></td>
<td> <input id="merge-start" size=2 value="5">%</td>
<td> <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Smallest share of model A in the mix</span></i></td></tr>
<tr><td><label for="#merge-step">Increment each step:</label></td>
<td> <input id="merge-step" size=2 value="10">%</td>
<td> <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Share of model A added into the mix per step</span></i></td></tr>
<tr><td><label for="#merge-interpolation">Interpolation model:</label></td>
<td> <select id="merge-interpolation">
<option>Exact</option>
<option>SmoothStep</option>
<option>SmootherStep</option>
<option>SmoothestStep</option>
</select></td>
<td> <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Sigmoid function to be applied to the model share before mixing</span></i></td></tr>
</table>
<br/>
<small>Preview of variation ratios:</small><br/>
<canvas id="merge-canvas" width="400" height="400"></canvas>
</div>
</div>
</div>
</div>
<div class="merge-buttons">
<button id="merge-button" class="primaryButton">Merge models</button>
</div>
</div>`
const tabSettingsSingle = document.querySelector('#tab-merge-opts-single')
const tabSettingsBatch = document.querySelector('#tab-merge-opts-batch')
linkTabContents(tabSettingsSingle)
linkTabContents(tabSettingsBatch)
/////////////////////// Event Listener
document.addEventListener('tabClick', (e) => {
if (e.detail.name == 'merge') {
console.log('Activate')
let modelList = stableDiffusionModelField.cloneNode(true)
modelList.id = "mergeModelA"
document.querySelector("#mergeModelA").replaceWith(modelList)
modelList = stableDiffusionModelField.cloneNode(true)
modelList.id = "mergeModelB"
document.querySelector("#mergeModelB").replaceWith(modelList)
updateChart()
}
})
// slider
const singleMergeRatioField = document.querySelector('#single-merge-ratio')
const singleMergeRatioSlider = document.querySelector('#single-merge-ratio-slider')
function updateSingleMergeRatio() {
singleMergeRatioField.value = singleMergeRatioSlider.value / 10
singleMergeRatioField.dispatchEvent(new Event("change"))
}
function updateSingleMergeRatioSlider() {
if (singleMergeRatioField.value < 0) {
singleMergeRatioField.value = 0
} else if (singleMergeRatioField.value > 100) {
singleMergeRatioField.value = 100
}
singleMergeRatioSlider.value = singleMergeRatioField.value * 10
singleMergeRatioSlider.dispatchEvent(new Event("change"))
}
singleMergeRatioSlider.addEventListener('input', updateSingleMergeRatio)
singleMergeRatioField.addEventListener('input', updateSingleMergeRatioSlider)
updateSingleMergeRatio()
document.querySelector('.merge-config').addEventListener('change', updateChart)
document.querySelector('#merge-button').addEventListener('click', async function(e) {
// Build request template
let model0 = document.querySelector('#mergeModelA').value
let model1 = document.querySelector('#mergeModelB').value
let request = { model0: model0, model1: model1 }
request['use_fp16'] = document.querySelector('#merge-fp').value == 'fp16'
let iterations = document.querySelector('#merge-count').value>>0
let start = parseFloat( document.querySelector('#merge-start').value )
let step = parseFloat( document.querySelector('#merge-step').value )
if (isTabActive(tabSettingsSingle)) {
start = parseFloat(singleMergeRatioField.value)
step = 0
iterations = 1
addLogMessage(`merge ratio = ${start}%`)
} else {
addLogMessage(`start = ${start}%`)
addLogMessage(`step = ${step}%`)
}
if (start + (iterations-1) * step >= 100) {
addLogMessage('<i>Aborting: maximum ratio is &#8805; 100%</i>')
addLogMessage('Reduce the number of variations or the step size')
addLogSeparator()
document.querySelector('#merge-count').focus()
return
}
if (document.querySelector('#merge-filename').value == "") {
addLogMessage('<i>Aborting: No output file name specified</i>')
addLogSeparator()
document.querySelector('#merge-filename').focus()
return
}
// Disable merge button
e.target.disabled=true
e.target.classList.add('disabled')
let cursor = $("body").css("cursor");
let label = document.querySelector('#merge-button').innerHTML
$("body").css("cursor", "progress");
document.querySelector('#merge-button').innerHTML = 'Merging models ...'
addLogMessage("Merging models")
addLogMessage("Model A: "+model0)
addLogMessage("Model B: "+model1)
// Batch main loop
for (let i=0; i<iterations; i++) {
let alpha = ( start + i * step ) / 100
switch (document.querySelector('#merge-interpolation').value) {
case 'SmoothStep':
alpha = smoothstep(alpha)
break
case 'SmootherStep':
alpha = smootherstep(alpha)
break
case 'SmoothestStep':
alpha = smootheststep(alpha)
break
}
addLogMessage(`merging batch job ${i+1}/${iterations}, alpha = ${alpha.toFixed(5)}...`)
request['out_path'] = document.querySelector('#merge-filename').value
request['out_path'] += '-' + alpha.toFixed(5) + '.' + document.querySelector('#merge-format').value
addLogMessage(`&nbsp;&nbsp;filename: ${request['out_path']}`)
request['ratio'] = alpha
let res = await fetch('/model/merge', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(request) })
const data = await res.json();
addLogMessage(JSON.stringify(data))
}
addLogMessage("<b>Done.</b> The models have been saved to your <tt>models/stable-diffusion</tt> folder.")
addLogSeparator()
// Re-enable merge button
$("body").css("cursor", cursor);
document.querySelector('#merge-button').innerHTML = label
e.target.disabled=false
e.target.classList.remove('disabled')
// Update model list
stableDiffusionModelField.innerHTML = ''
vaeModelField.innerHTML = ''
hypernetworkModelField.innerHTML = ''
await getModels()
})
})()

View File

@ -9,7 +9,7 @@
}
}
document.querySelector('#tab-container')?.insertAdjacentHTML('beforeend', `
document.querySelector('.tab-container')?.insertAdjacentHTML('beforeend', `
<span id="tab-news" class="tab">
<span><i class="fa fa-bolt icon"></i> What's new?</span>
</span>