Compare commits

...

142 Commits

Author SHA1 Message Date
645b596eb0 Revert "Frontend of the batch merger" 2023-01-25 19:45:52 +05:30
0055cd9b2e Merge pull request #734 from JeLuF/mrguipi
Frontend of the batch merger
2023-01-25 19:39:19 +05:30
fe89d487f6 Merge pull request #733 from JeLuF/mrgui
Backend side merge API
2023-01-25 19:38:21 +05:30
495064985e Reduce VRAM usage of img2img in balanced mode, without reducing the speed of rendering 2023-01-24 18:58:15 +05:30
e12387a377 changelog 2023-01-23 21:40:50 +05:30
5d3fb9091a Reduce the VRAM usage for balanced mode, without sacrificing the rendering speed 2023-01-23 19:36:00 +05:30
e2ae2715a3 Revert "Revert "Don't set the specific vram optimizations to use, instead use the new sdkit API for setting the vram usage level directly""
This reverts commit 52458ae273.
2023-01-18 17:03:14 +05:30
52458ae273 Revert "Don't set the specific vram optimizations to use, instead use the new sdkit API for setting the vram usage level directly"
This reverts commit 42f9abdfe3.
2023-01-18 10:30:56 +05:30
9b1a9cc7c8 changelog 2023-01-17 21:34:41 +05:30
42f9abdfe3 Don't set the specific vram optimizations to use, instead use the new sdkit API for setting the vram usage level directly 2023-01-17 21:33:15 +05:30
0a1197055c changelog 2023-01-16 18:32:09 +05:30
649cbf07e3 Merge branch 'beta' of github.com:cmdr2/stable-diffusion-ui into beta 2023-01-16 18:30:46 +05:30
5089ac5ad1 Fix a bug where the .vae.pt extension wouldn't get picked up. Thanks Madrang, rbertus2000 and JeLuf 2023-01-16 18:30:22 +05:30
d99e3f7974 Merge pull request #776 from JeLuF/patch-8
Add NVIDIA T1200 to the list of FP GPUs
2023-01-16 18:09:06 +05:30
b5d1912c94 Add NVIDIA T1200 to the list of FP GPUs
Fixes https://discord.com/channels/1014774730907209781/1014774732018683926/1064269949339697163
2023-01-16 00:42:02 +01:00
8ee4364065 Merge pull request #768 from rbertus2000/beta
bugfix for FP GPUs
2023-01-13 17:39:49 +05:30
152aa7de09 bugfix for FP GPUs 2023-01-13 12:54:11 +01:00
85c90cbee1 Merge pull request #764 from JeLuF/patch-7
Add NVIDIA T550 to list of FP GPUs #755
2023-01-13 10:18:24 +05:30
7302927e4c Add NVIDIA T550 to list of FP GPUs #755
The Nvidia T550 needs full precision to work correctly.
2023-01-12 14:16:35 +01:00
df3d00ef94 Merge pull request #763 from patriceac/patch-18
Another fix for high res images
2023-01-12 10:23:01 +05:30
bb47835256 Another fix for high res images
This time to address the height.
2023-01-11 17:25:54 -08:00
037512ca5c Merge branch 'beta' of github.com:cmdr2/stable-diffusion-ui into beta 2023-01-11 18:25:16 +05:30
a13713adaf Don't search for a yaml config file next to the model, since sdkit now does this automatically 2023-01-11 18:23:56 +05:30
ad073252e7 Merge pull request #762 from patriceac/patch-17
Fix the restoring of the previous nested model
2023-01-11 14:58:25 +05:30
d24a7a5c5e Fix the restoring of the last selected model 2023-01-10 19:00:19 -08:00
a671dd8e00 Fix import, remove debug output 2023-01-10 20:34:17 +01:00
8b764a8fd3 changelog 2023-01-10 21:58:29 +05:30
aa576e68e3 Bring back the default opacity of 0.4 for inpainting mask, even though it leads to some other bugs. It's not a good UX to have an inpainting mask with full opacity 2023-01-10 21:56:26 +05:30
ad5508a14d Fix typo 2023-01-10 21:54:31 +05:30
4fafc8aa67 Merge pull request #685 from mdiller/mdiller_bugfixes
Mdiller bugfixes
2023-01-10 21:44:40 +05:30
0aab3d0f12 Merge pull request #744 from AssassinJN/patch-2
return taskEntry.id on createTask
2023-01-10 21:41:56 +05:30
a5d88bdfcc changelog 2023-01-10 21:09:08 +05:30
5173957368 Minor refactor of save file 2023-01-10 20:13:39 +05:30
4b3e3d900d Merge pull request #745 from JeLuF/sync-fn
Synchronize .img and .txt autosave file names
2023-01-10 20:07:17 +05:30
9ea51b174a Merge branch 'beta' into sync-fn 2023-01-10 20:06:58 +05:30
80e265e547 Merge pull request #746 from JeLuF/modelload
Don't crash on unsupported models
2023-01-10 20:01:24 +05:30
c3e6e63023 Merge pull request #754 from patriceac/patch-15
Fix display of very large images
2023-01-10 20:00:00 +05:30
9b5a262d63 Merge pull request #758 from patriceac/patch-16
Fix image editor display
2023-01-10 19:56:18 +05:30
1309f1480c Tabs to spaces 2023-01-10 19:48:36 +05:30
12ba5b8096 Merge pull request #753 from JeLuF/modeldir
Recursive scanning for models
2023-01-10 19:29:27 +05:30
156c5f4792 Fix incorrect seeds returned when no filters were applied. Fixes https://github.com/cmdr2/stable-diffusion-ui/pull/748 2023-01-10 19:23:17 +05:30
18aca98e41 Fix image editor display
Fix for the cut off controls
2023-01-09 09:29:31 -08:00
a88afb0956 Add paths to the value field 2023-01-09 18:24:04 +01:00
bfa1f57930 Fix rendering of very large images
See comments for screenshots.
2023-01-09 09:21:16 -08:00
a5350eb3cc changelog 2023-01-09 19:42:06 +05:30
3ed4d792b3 Check whether the browser supports performance.measure/mark before calling them. Fixes https://github.com/cmdr2/stable-diffusion-ui/pull/757 2023-01-09 19:41:10 +05:30
73af7f5481 Use a boolean .includes() instead of a regex match() for checking string contains 2023-01-09 19:19:30 +05:30
57ead7f0c0 Merge pull request #752 from patriceac/patch-14
Fix parsing of text file tasks
2023-01-09 19:16:36 +05:30
bf490c910a changelog 2023-01-09 18:48:15 +05:30
40f806efa8 Merge pull request #742 from JeLuF/noise
Prevent flooding the log with warnings for GPU<3GB
2023-01-09 18:47:20 +05:30
226ba8b06e Bump version 2023-01-09 18:39:24 +05:30
b11aa4833d Merge pull request #724 from patriceac/img2img-settings-restoration
Img2img settings restoration
2023-01-09 18:36:32 +05:30
8d9cd0e30b Fix display of very large images 2023-01-07 15:04:07 -08:00
9532928998 Recursive scanning for models 2023-01-07 19:04:15 +01:00
420f7549a2 Fix parsing of text file tasks
parseContent(text) doesn't check the text content being passed actually described a task, which causes some corner case scenarios to break (image task settings are incorrectly cleared because an empty image task is created).
2023-01-07 00:47:30 -08:00
ed64b9bfed Don't crash on unsupported models 2023-01-06 01:41:55 +01:00
5d5ebfdef6 Synchronize .img and .txt autosave file names 2023-01-04 16:51:18 +01:00
567c02bf5d return taskEntry.id on createTask
I would like to have createTask return the taskEntry.id in order to allow for watchers or callbacks to be able to reference tasks by id more easily.
2023-01-04 10:04:52 -05:00
60f7c73c8a prevent flooding the log with warnings for GPU<3GB 2023-01-04 02:45:51 +01:00
ac4c5003f1 also empty VAE and hypernetwork fields 2023-01-03 08:23:42 +01:00
23d5f85d17 Frontend batch merger 2022-12-30 10:13:34 +01:00
f75adc1e22 added fill tool and updated as requested in pull request 2022-12-30 01:07:46 -08:00
15a1436c8b Backend side merge API 2022-12-30 10:07:23 +01:00
813edec808 Removing one more unnecessary custom event 2022-12-29 09:43:12 -08:00
21e3299b7a Applying changes from latest CR
- Replaced custom event with load event
- Removed the custom event dispatch
2022-12-29 09:26:32 -08:00
f7193966fb Addressing Cmdr2's comments and more
Only triggers events when there actually was a state  change. Also opportunistically removed the hardcoded delay in favor of an even-driven flow, which makes the whole thing more robust and much more reactive.
2022-12-29 01:16:44 -08:00
2d9853f1f4 Merge branch 'beta' of github.com:cmdr2/stable-diffusion-ui into beta 2022-12-29 13:25:25 +05:30
ced79a187d changelog 2022-12-29 13:25:13 +05:30
7832524963 Merge pull request #729 from patriceac/patch-12
ESC keyboard shortcut to close the image editor
2022-12-29 13:23:00 +05:30
58c7f3ba15 ESC keyboard shortcut to close the image editor 2022-12-28 23:50:56 -08:00
90ec8f0575 changelog 2022-12-29 13:17:26 +05:30
b86617e3af Merge pull request #720 from patriceac/restore-inactive-modifiers
Proper restoration of inactive image modifiers
2022-12-29 10:28:28 +05:30
f3db6d84fb Merge pull request #721 from patriceac/patch-8
Fix restoration of hypernetwork dropdown
2022-12-29 10:26:54 +05:30
f9b9ecf754 Merge branch 'beta' into patch-8 2022-12-29 10:26:48 +05:30
af43a92a2f Merge pull request #725 from patriceac/patch-9
Limit the size of zoomed-in source images
2022-12-29 10:18:17 +05:30
4dbdc642f9 Merge pull request #726 from patriceac/patch-10
Persist the processing order toggle across sessions
2022-12-29 10:17:24 +05:30
8f2c87ce94 Merge pull request #717 from jsuelwald/patch-1
Restore download link for Linux in beta, ...
2022-12-29 10:16:59 +05:30
5149040496 Merge pull request #727 from patriceac/patch-11
Restore the original prompt if provided
2022-12-29 10:15:22 +05:30
5b1078e0db Merge pull request #719 from patriceac/fix-duplicate-image
Fix for duplicate images
2022-12-29 10:13:51 +05:30
ae31813239 Restore the original prompt if provided
Restore the original prompt if provided... including if it's empty now that empty prompts are allowed if there are modifiers.
2022-12-28 18:52:18 -08:00
f6b3cde286 Persist the process order toggle across sessions
🤷
2022-12-28 17:50:18 -08:00
0f05f9c32c Limit the size of zoomed-in source images
If the source image has a high enough resolution it won't fit on the screen when hovering over it. This simple fix limits the max size so the user always has a chance to see the full image.
2022-12-28 17:30:59 -08:00
89170af721 Proper source image unloading 2022-12-28 17:00:38 -08:00
5fddae589b Reverting duplicate hypernetwork fix 2022-12-28 16:54:36 -08:00
19c16af5fa Fix img2img task restoration
Fix source image, mask, and color profile restoration for use settings, copy/paste, and d&d.
2022-12-28 16:43:35 -08:00
019f8f69f4 Fix restoration of hypernetwork dropdown
Fix for https://discord.com/channels/1014774730907209781/1014774732018683928/1055508538228748368
2022-12-28 15:55:59 -08:00
ad8d1f77df Proper restoration of inactive image modifiers
Inactive image modifiers (right click on image tag) are not properly restored by Use Settings and Copy/Paste settings. This PR fixes that.
2022-12-28 13:41:36 -08:00
e82a8a7f3d Fix for duplicate images
When eye correction, upscaling, and only show filtered image are ALL disabled, the UI still generates two of the same image, and increments the second's seed by 1 (although it's the same image). It doesn't perform an additional process, but the item is shown twice.
2022-12-28 12:06:36 -08:00
ad07aeb041 Restore download link for Linux in beta, ...
and make shellscripts in scripts/ executable
2022-12-28 17:52:49 +01:00
451ab7e84c Create the folders before moving to them 2022-12-28 19:40:08 +05:30
083390da83 Fix a bug where the task and req data needed to print with a backslash 2022-12-28 19:23:36 +05:30
dc6d48580b Merge pull request #715 from jsuelwald/beta
Convert [ to \[ so the logging backend...
2022-12-28 19:20:28 +05:30
27d69e2ac3 Upgrade stable-diffusion-sdkit during startup 2022-12-28 19:19:53 +05:30
91274a4df8 Move the mandatory models to the models folder, instead of the legacy location inside the stable-diffusion folder 2022-12-28 19:08:39 +05:30
6eafcdfafd Update renderer.py
Use .replace on pformat in both lines
2022-12-28 14:27:07 +01:00
5e44744ff7 Update renderer.py
Updated (replace doesn't work on sets)
2022-12-28 13:49:52 +01:00
37b293fe74 Force full precision on NVIDIA T400 2022-12-28 17:46:24 +05:30
280f0be690 Disable symlink warnings on Windows for huggingface cache 2022-12-28 16:48:12 +05:30
183bc8321c Convert [ to \[ so the logging backend...
doesn't interpret that as a colour or other command
2022-12-28 10:43:39 +01:00
a973e4d1ef version 2022-12-28 14:30:01 +05:30
eed1066967 Merge pull request #714 from patriceac/patch-7
Default to 4x in taskConfig when factor not present in task
2022-12-28 13:09:27 +05:30
2859c94fea Applying Madrang's suggestion 2022-12-27 23:36:43 -08:00
dbcce2ee5d Default to 4x in taskConfig 2022-12-27 23:27:25 -08:00
25071c238c Remove the width for better formatting (uses what Bonsi suggested in the first place) 2022-12-27 21:14:31 +05:30
9995ffb5f3 Merge pull request #711 from jsuelwald/patch-1
Update renderer.py for better readable console output
2022-12-27 21:11:44 +05:30
c867c35e45 Update renderer.py 2022-12-27 16:23:36 +01:00
6f60e88ca6 Update renderer.py for better readable console output 2022-12-27 15:41:10 +01:00
11730dcbe4 changelog 2022-12-27 17:07:43 +05:30
e155bac445 Merge branch 'beta' of github.com:cmdr2/stable-diffusion-ui into beta 2022-12-27 17:06:55 +05:30
15a4682665 Fix broken drag-and-drop for text files and clipboard paste 2022-12-27 17:06:46 +05:30
08675b39f7 Merge pull request #710 from patriceac/image-modifiers-events
Adding image modifier events to core plugins
2022-12-27 16:39:11 +05:30
2c7d5adb80 Adding image modifier events to core plugins
Sorry, forgot these in the first PR.
2022-12-27 02:58:46 -08:00
51c7faee3c Changelog 2022-12-27 16:23:57 +05:30
852e129f9c Support upscaling by 2x or 4x (previously only supported 4x) 2022-12-27 16:20:16 +05:30
6eb2d800fa Tweak low GPU wording 2022-12-27 14:58:08 +05:30
0a2c70595d Turbo be gone 2022-12-27 14:51:03 +05:30
f13e16af15 Disable unused config for now 2022-12-27 12:21:51 +05:30
f364958c13 Merge pull request #705 from patriceac/fix-cut-off-tooltips-display
Fix cut off tooltips display
2022-12-27 10:26:46 +05:30
e65150647d Merge pull request #708 from patriceac/patch-6
Add icon to "Process newest jobs first" setting
2022-12-27 10:25:45 +05:30
3c435b9593 Merge pull request #707 from patriceac/image-modifiers-events
Adding image modifiers events
2022-12-27 10:25:20 +05:30
871b96a450 Add icon to "Process newest jobs first" setting 2022-12-26 19:10:37 -08:00
48a3254ad2 Adding image modifiers events
Adding events to allow plugins to listen for image modifiers loaded and refreshed events respectively.
2022-12-26 12:16:36 -08:00
2c0bdd6377 Fix cut off tooltips display 2022-12-26 10:04:36 -08:00
e241ef25e5 Merge branch 'beta' of github.com:cmdr2/stable-diffusion-ui into beta 2022-12-26 21:00:57 +05:30
5e553dd958 Skip sdkit upgrade if in developer mode 2022-12-26 21:00:46 +05:30
19ee87d2cd Merge pull request #692 from JeLuF/remove-result
Add "Remove" button to each image's hover menu (Fixes #682)
2022-12-26 17:38:00 +05:30
33b120f6cd Merge pull request #702 from patriceac/fix-copy-to-clipboard
Fix copy image settings to clipboard
2022-12-26 16:25:44 +05:30
0bfb9d00c8 Fix copy image settings to clipboard
Regression was caused by the processing of the legacy turbo field, which I understand to now be obsolete.
2022-12-26 02:10:36 -08:00
517ddca22d Changelog 2022-12-26 13:12:56 +05:30
41c7b08418 Keep euler_a as the default 2022-12-26 11:59:44 +05:30
c7c1b5a570 changelog 2022-12-25 17:18:31 +05:30
87b6dfb1a9 Changelog 2022-12-25 17:17:10 +05:30
46c56f3706 Use a model config yaml file if placed next to the model (with the same name). This can override a known model as well 2022-12-25 17:07:00 +05:30
32bab80508 Show sdkit version during startup 2022-12-25 16:38:37 +05:30
b6f1194c93 Typo 2022-12-25 00:23:51 +05:30
206f9b97bb Merge pull request #695 from cmdr2/refactor
v2.5 - move to sdkit
2022-12-24 23:28:10 +05:30
4eae540086 Add "Remove" button to each image's hover menu 2022-12-24 01:02:38 +01:00
21108650f7 add findClosestAncestor
Function to find the closest ancestor of an element that matches the selection criterion
2022-12-24 00:58:52 +01:00
5474d1786f updated inpainter to not auto-clear itself whenever you draw etc 2022-12-21 16:35:03 -08:00
7f36473544 added a fill action 2022-12-21 16:20:07 -08:00
9d19698bf3 fixed opacity on inpainter to be 100% by default so no weird erasing 2022-12-21 16:09:56 -08:00
582b2d936f fixed theme css properties not being updated properly 2022-12-21 16:03:52 -08:00
30 changed files with 638 additions and 240 deletions

View File

@ -3,20 +3,44 @@
## v2.5
### Major Changes
- **Nearly twice as fast** - significantly faster speed of image generation. We're now pretty close to automatic1111's speed. Code contributions are welcome to make our project even faster: https://github.com/easydiffusion/sdkit/#is-it-fast
- **Full support for Stable Diffusion 2.1** - supports loading v1.4 or v2.0 or v2.1 models seamlessly. No need to enable "Test SD2", and no need to add `sd2_` to your SD 2.0 model file names.
- **Memory optimized Stable Diffusion 2.1** - you can now use 768x768 models for SD 2.1, with the same low VRAM optimizations that we've always had for SD 1.4.
- **Full support for Stable Diffusion 2.1 (including CPU)** - supports loading v1.4 or v2.0 or v2.1 models seamlessly. No need to enable "Test SD2", and no need to add `sd2_` to your SD 2.0 model file names. Works on CPU as well.
- **Memory optimized Stable Diffusion 2.1** - you can now use 768x768 models for SD 2.1, with the same low VRAM optimizations that we've always had for SD 1.4. Please note, 4 GB graphics cards can still only support images upto 512x512 resolution.
- **6 new samplers!** - explore the new samplers, some of which can generate great images in less than 10 inference steps!
- **Model Merging** - You can now merge two models (`.ckpt` or `.safetensors`) and output `.ckpt` or `.safetensors` models, optionally in `fp16` precision. Details: https://github.com/cmdr2/stable-diffusion-ui/wiki/Model-Merging
- **Fast loading/unloading of VAEs** - No longer needs to reload the entire Stable Diffusion model, each time you change the VAE
- **Database of known models** - automatically picks the right configuration for known models. E.g. we automatically detect and apply "v" parameterization (required for some SD 2.0 models), and "fp32" attention precision (required for some SD 2.1 models).
- **Color correction for img2img** - an option to preserve the color profile (histogram) of the initial image. This is especially useful if you're getting red-tinted images after inpainting/masking.
- **Three GPU Memory Usage Settings** - `High` (fastest, maximum VRAM usage), `Balanced` (default - almost as fast, significantly lower VRAM usage), `Low` (slowest, very low VRAM usage). The `Low` setting is applied automatically for GPUs with less than 4 GB of VRAM.
- **Find models in sub-folders** - This allows you to organize your models into sub-folders inside `models/stable-diffusion`, instead of keeping them all in a single folder.
- **Save metadata as JSON** - You can now save the metadata files as either text or json files (choose in the Settings tab).
- **Major rewrite of the code** - Most of the codebase has been reorganized and rewritten, to make it more manageable and easier for new developers to contribute features. We've separated our core engine into a new project called `sdkit`, which allows anyone to easily integrate Stable Diffusion (and related modules like GFPGAN etc) into their programming projects (via a simple `pip install sdkit`): https://github.com/easydiffusion/sdkit/
- **Name change** - Last, and probably the least, the UI is now called "Easy Diffusion". It indicates the focus of this project - an easy way for people to play with Stable Diffusion.
Our focus continues to remain on an easy installation experience, and an easy user-interface. While still remaining pretty powerful, in terms of features and speed.
### Detailed changelog
* 2.5.10 - 24 Jan 2023 - Reduce the VRAM usage for img2img in 'balanced' mode (without reducing the rendering speed), to make it similar to v2.4 of this UI.
* 2.5.9 - 23 Jan 2023 - Fix a bug where img2img would produce poorer-quality images for the same settings, as compared to version 2.4 of this UI.
* 2.5.9 - 23 Jan 2023 - Reduce the VRAM usage for 'balanced' mode (without reducing the rendering speed), to make it similar to v2.4 of the UI.
* 2.5.8 - 17 Jan 2023 - Fix a bug where 'Low' VRAM usage would consume a LOT of VRAM (on higher-end GPUs). Also fixed a bug that caused out-of-memory errors on SD 2.1-768 models, on 'high' VRAM usage setting.
* 2.5.7 - 16 Jan 2023 - Fix a bug where VAE files ending with .vae.pt weren't getting displayed. Thanks Madrang, rbertus2000 and JeLuf.
* 2.5.6 - 10 Jan 2023 - `Fill` tool for the Image Editor, to allow filling areas with color (or the entire image). And some bug fixes to the Image Editor. Thanks @mdiller.
* 2.5.6 - 10 Jan 2023 - Find Stable Diffusion models in sub-folders inside `models/stable-diffusion`. This allows you to organize your models into sub-folders, instead of keeping them all in a single folder. Thanks @JeLuf.
* 2.5.5 - 9 Jan 2023 - Lots of bug fixes. Thanks @patriceac and @JeLuf.
* 2.5.4 - 29 Dec 2022 - Press Esc key on the keyboard to close the Image Editor. Thanks @patriceac.
* 2.5.4 - 29 Dec 2022 - Lots of bug fixes in the UI. Thanks @patriceac.
* 2.5.4 - 28 Dec 2022 - Full support for running tasks in parallel on multiple GPUs. Warning: 'Euler Ancestral', 'DPM2 Ancestral' and 'DPM++ 2s Ancestral' may produce slight variations in the image (if run in parallel), so we recommend using the other samplers.
* 2.5.3 - 27 Dec 2022 - Fix broken drag-and-drop for text metadata files (as well as paste in clipboard).
* 2.5.3 - 27 Dec 2022 - Allow upscaling by 2x as well as 4x.
* 2.5.3 - 27 Dec 2022 - Fix broken renders on a second GPU.
* 2.5.3 - 26 Dec 2022 - Add a `Remove` button on each image. Thanks @JeLuf.
* 2.5.2 - 26 Dec 2022 - Fix broken inpainting if using non-square target images.
* 2.5.2 - 26 Dec 2022 - Fix a bug where an incorrect model config would get used for some SD 2.1 models.
* 2.5.2 - 26 Dec 2022 - Slight performance and memory improvement while rendering using SD 2.1 models.
* 2.5.1 - 25 Dec 2022 - Allow custom config yaml files for models. You can put a config file (`.yaml`) next to the model file, with the same name as the model. For e.g. if you put `robo-diffusion-v2-base.yaml` next to `robo-diffusion-v2-base.ckpt`, it'll automatically use that config file.
* 2.5.1 - 25 Dec 2022 - Fix broken rendering for SD 2.1-768 models. Fix broken rendering SD 2.0 safetensor models.
* 2.5.0 - 25 Dec 2022 - Major new release! Nearly twice as fast, Full support for SD 2.1 (including low GPU RAM optimizations), 6 new samplers, Model Merging, Fast loading/unloading of VAEs, Database of known models, Color correction for img2img, Three GPU Memory Usage Settings, Save metadata as JSON, Major rewrite of the code, Name change.
## v2.4
### Major Changes
- **Allow reordering the task queue** (by dragging and dropping tasks). Thanks @madrang
@ -44,6 +68,8 @@ Our focus continues to remain on an easy installation experience, and an easy us
- Support loading models in the safetensor format, for improved safety
### Detailed changelog
* 2.4.24 - 9 Jan 2022 - Urgent fix for failures on old/long-term-support browsers. Thanks @JeLuf.
* 2.4.23/22 - 29 Dec 2022 - Allow rolling back from the upcoming v2.5 change (in beta).
* 2.4.21 - 23 Dec 2022 - Speed up image creation, by removing a delay (regression) of 4-5 seconds between clicking the `Make Image` button and calling the server.
* 2.4.20 - 22 Dec 2022 - `Pause All` button to pause all the pending tasks. Thanks @JeLuf
* 2.4.20 - 22 Dec 2022 - `Undo`/`Redo` buttons in the image editor. Thanks @JeLuf

View File

@ -13,7 +13,7 @@ Click the download button for your operating system:
<p float="left">
<a href="https://github.com/cmdr2/stable-diffusion-ui/releases/download/v2.4.13/stable-diffusion-ui-windows.zip"><img src="https://github.com/cmdr2/stable-diffusion-ui/raw/main/media/download-win.png" width="200" /></a>
<a href="https://github.com/cmdr2/stable-diffusion-ui#installation"><img src="https://github.com/cmdr2/stable-diffusion-ui/raw/main/media/download-linux.png" width="200" /></a>
<a href="https://github.com/cmdr2/stable-diffusion-ui/releases/download/v2.4.13/stable-diffusion-ui-linux.zip"><img src="https://github.com/cmdr2/stable-diffusion-ui/raw/main/media/download-linux.png" width="200" /></a>
</p>
## On Windows:

View File

@ -49,6 +49,18 @@ if exist "env" (
if exist src rename src src-old
if exist ldm rename ldm ldm-old
if not exist "..\models\stable-diffusion" mkdir "..\models\stable-diffusion"
if not exist "..\models\gfpgan" mkdir "..\models\gfpgan"
if not exist "..\models\realesrgan" mkdir "..\models\realesrgan"
if not exist "..\models\vae" mkdir "..\models\vae"
@rem migrate the legacy models to the correct path (if already downloaded)
if exist "sd-v1-4.ckpt" move sd-v1-4.ckpt ..\models\stable-diffusion\
if exist "custom-model.ckpt" move custom-model.ckpt ..\models\stable-diffusion\
if exist "GFPGANv1.3.pth" move GFPGANv1.3.pth ..\models\gfpgan\
if exist "RealESRGAN_x4plus.pth" move RealESRGAN_x4plus.pth ..\models\realesrgan\
if exist "RealESRGAN_x4plus_anime_6B.pth" move RealESRGAN_x4plus_anime_6B.pth ..\models\realesrgan\
@rem install torch and torchvision
call python ..\scripts\check_modules.py torch torchvision
if "%ERRORLEVEL%" EQU "0" (
@ -72,12 +84,15 @@ call python ..\scripts\check_modules.py sdkit sdkit.models ldm transformers nump
if "%ERRORLEVEL%" EQU "0" (
echo "sdkit is already installed."
@REM prevent from using packages from the user's home directory, to avoid conflicts
set PYTHONNOUSERSITE=1
set PYTHONPATH=%INSTALL_ENV_DIR%\lib\site-packages
@rem skip sdkit upgrade if in developer-mode
if not exist "..\src\sdkit" (
@REM prevent from using packages from the user's home directory, to avoid conflicts
set PYTHONNOUSERSITE=1
set PYTHONPATH=%INSTALL_ENV_DIR%\lib\site-packages
call >nul pip install --upgrade sdkit || (
echo "Error updating sdkit"
call pip install --upgrade sdkit -q || (
echo "Error updating sdkit"
)
)
) else (
echo "Installing sdkit: https://pypi.org/project/sdkit/"
@ -93,6 +108,14 @@ if "%ERRORLEVEL%" EQU "0" (
)
)
call python -c "from importlib.metadata import version; print('sdkit version:', version('sdkit'))"
@rem upgrade stable-diffusion-sdkit
call pip install --upgrade stable-diffusion-sdkit -q || (
echo "Error updating stable-diffusion-sdkit"
)
call python -c "from importlib.metadata import version; print('stable-diffusion version:', version('stable-diffusion-sdkit'))"
@rem install rich
call python ..\scripts\check_modules.py rich
if "%ERRORLEVEL%" EQU "0" (
@ -141,34 +164,30 @@ call WHERE uvicorn > .tmp
@echo conda_sd_ui_deps_installed >> ..\scripts\install_status.txt
)
if not exist "..\models\vae" mkdir "..\models\vae"
@if exist "sd-v1-4.ckpt" (
for %%I in ("sd-v1-4.ckpt") do if "%%~zI" EQU "4265380512" (
@if exist "..\models\stable-diffusion\sd-v1-4.ckpt" (
for %%I in ("..\models\stable-diffusion\sd-v1-4.ckpt") do if "%%~zI" EQU "4265380512" (
echo "Data files (weights) necessary for Stable Diffusion were already downloaded. Using the HuggingFace 4 GB Model."
) else (
for %%J in ("sd-v1-4.ckpt") do if "%%~zJ" EQU "7703807346" (
for %%J in ("..\models\stable-diffusion\sd-v1-4.ckpt") do if "%%~zJ" EQU "7703807346" (
echo "Data files (weights) necessary for Stable Diffusion were already downloaded. Using the HuggingFace 7 GB Model."
) else (
for %%K in ("sd-v1-4.ckpt") do if "%%~zK" EQU "7703810927" (
for %%K in ("..\models\stable-diffusion\sd-v1-4.ckpt") do if "%%~zK" EQU "7703810927" (
echo "Data files (weights) necessary for Stable Diffusion were already downloaded. Using the Waifu Model."
) else (
echo. & echo "The model file present at %cd%\sd-v1-4.ckpt is invalid. It is only %%~zK bytes in size. Re-downloading.." & echo.
del "sd-v1-4.ckpt"
echo. & echo "The model file present at models\stable-diffusion\sd-v1-4.ckpt is invalid. It is only %%~zK bytes in size. Re-downloading.." & echo.
del "..\models\stable-diffusion\sd-v1-4.ckpt"
)
)
)
)
@if not exist "sd-v1-4.ckpt" (
@if not exist "..\models\stable-diffusion\sd-v1-4.ckpt" (
@echo. & echo "Downloading data files (weights) for Stable Diffusion.." & echo.
@call curl -L -k https://me.cmdr2.org/stable-diffusion-ui/sd-v1-4.ckpt > sd-v1-4.ckpt
@call curl -L -k https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt > ..\models\stable-diffusion\sd-v1-4.ckpt
@if exist "sd-v1-4.ckpt" (
for %%I in ("sd-v1-4.ckpt") do if "%%~zI" NEQ "4265380512" (
@if exist "..\models\stable-diffusion\sd-v1-4.ckpt" (
for %%I in ("..\models\stable-diffusion\sd-v1-4.ckpt") do if "%%~zI" NEQ "4265380512" (
echo. & echo "Error: The downloaded model file was invalid! Bytes downloaded: %%~zI" & echo.
echo. & echo "Error downloading the data files (weights) for Stable Diffusion. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
pause
@ -183,22 +202,22 @@ if not exist "..\models\vae" mkdir "..\models\vae"
@if exist "GFPGANv1.3.pth" (
for %%I in ("GFPGANv1.3.pth") do if "%%~zI" EQU "348632874" (
@if exist "..\models\gfpgan\GFPGANv1.3.pth" (
for %%I in ("..\models\gfpgan\GFPGANv1.3.pth") do if "%%~zI" EQU "348632874" (
echo "Data files (weights) necessary for GFPGAN (Face Correction) were already downloaded"
) else (
echo. & echo "The GFPGAN model file present at %cd%\GFPGANv1.3.pth is invalid. It is only %%~zI bytes in size. Re-downloading.." & echo.
del "GFPGANv1.3.pth"
echo. & echo "The GFPGAN model file present at models\gfpgan\GFPGANv1.3.pth is invalid. It is only %%~zI bytes in size. Re-downloading.." & echo.
del "..\models\gfpgan\GFPGANv1.3.pth"
)
)
@if not exist "GFPGANv1.3.pth" (
@if not exist "..\models\gfpgan\GFPGANv1.3.pth" (
@echo. & echo "Downloading data files (weights) for GFPGAN (Face Correction).." & echo.
@call curl -L -k https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth > GFPGANv1.3.pth
@call curl -L -k https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth > ..\models\gfpgan\GFPGANv1.3.pth
@if exist "GFPGANv1.3.pth" (
for %%I in ("GFPGANv1.3.pth") do if "%%~zI" NEQ "348632874" (
@if exist "..\models\gfpgan\GFPGANv1.3.pth" (
for %%I in ("..\models\gfpgan\GFPGANv1.3.pth") do if "%%~zI" NEQ "348632874" (
echo. & echo "Error: The downloaded GFPGAN model file was invalid! Bytes downloaded: %%~zI" & echo.
echo. & echo "Error downloading the data files (weights) for GFPGAN (Face Correction). Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
pause
@ -213,22 +232,22 @@ if not exist "..\models\vae" mkdir "..\models\vae"
@if exist "RealESRGAN_x4plus.pth" (
for %%I in ("RealESRGAN_x4plus.pth") do if "%%~zI" EQU "67040989" (
@if exist "..\models\realesrgan\RealESRGAN_x4plus.pth" (
for %%I in ("..\models\realesrgan\RealESRGAN_x4plus.pth") do if "%%~zI" EQU "67040989" (
echo "Data files (weights) necessary for ESRGAN (Resolution Upscaling) x4plus were already downloaded"
) else (
echo. & echo "The RealESRGAN model file present at %cd%\RealESRGAN_x4plus.pth is invalid. It is only %%~zI bytes in size. Re-downloading.." & echo.
del "RealESRGAN_x4plus.pth"
echo. & echo "The RealESRGAN model file present at models\realesrgan\RealESRGAN_x4plus.pth is invalid. It is only %%~zI bytes in size. Re-downloading.." & echo.
del "..\models\realesrgan\RealESRGAN_x4plus.pth"
)
)
@if not exist "RealESRGAN_x4plus.pth" (
@if not exist "..\models\realesrgan\RealESRGAN_x4plus.pth" (
@echo. & echo "Downloading data files (weights) for ESRGAN (Resolution Upscaling) x4plus.." & echo.
@call curl -L -k https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth > RealESRGAN_x4plus.pth
@call curl -L -k https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth > ..\models\realesrgan\RealESRGAN_x4plus.pth
@if exist "RealESRGAN_x4plus.pth" (
for %%I in ("RealESRGAN_x4plus.pth") do if "%%~zI" NEQ "67040989" (
@if exist "..\models\realesrgan\RealESRGAN_x4plus.pth" (
for %%I in ("..\models\realesrgan\RealESRGAN_x4plus.pth") do if "%%~zI" NEQ "67040989" (
echo. & echo "Error: The downloaded ESRGAN x4plus model file was invalid! Bytes downloaded: %%~zI" & echo.
echo. & echo "Error downloading the data files (weights) for ESRGAN (Resolution Upscaling) x4plus. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
pause
@ -243,21 +262,21 @@ if not exist "..\models\vae" mkdir "..\models\vae"
@if exist "RealESRGAN_x4plus_anime_6B.pth" (
for %%I in ("RealESRGAN_x4plus_anime_6B.pth") do if "%%~zI" EQU "17938799" (
@if exist "..\models\realesrgan\RealESRGAN_x4plus_anime_6B.pth" (
for %%I in ("..\models\realesrgan\RealESRGAN_x4plus_anime_6B.pth") do if "%%~zI" EQU "17938799" (
echo "Data files (weights) necessary for ESRGAN (Resolution Upscaling) x4plus_anime were already downloaded"
) else (
echo. & echo "The RealESRGAN model file present at %cd%\RealESRGAN_x4plus_anime_6B.pth is invalid. It is only %%~zI bytes in size. Re-downloading.." & echo.
del "RealESRGAN_x4plus_anime_6B.pth"
echo. & echo "The RealESRGAN model file present at models\realesrgan\RealESRGAN_x4plus_anime_6B.pth is invalid. It is only %%~zI bytes in size. Re-downloading.." & echo.
del "..\models\realesrgan\RealESRGAN_x4plus_anime_6B.pth"
)
)
@if not exist "RealESRGAN_x4plus_anime_6B.pth" (
@if not exist "..\models\realesrgan\RealESRGAN_x4plus_anime_6B.pth" (
@echo. & echo "Downloading data files (weights) for ESRGAN (Resolution Upscaling) x4plus_anime.." & echo.
@call curl -L -k https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth > RealESRGAN_x4plus_anime_6B.pth
@call curl -L -k https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth > ..\models\realesrgan\RealESRGAN_x4plus_anime_6B.pth
@if exist "RealESRGAN_x4plus_anime_6B.pth" (
@if exist "..\models\realesrgan\RealESRGAN_x4plus_anime_6B.pth" (
for %%I in ("RealESRGAN_x4plus_anime_6B.pth") do if "%%~zI" NEQ "17938799" (
echo. & echo "Error: The downloaded ESRGAN x4plus_anime model file was invalid! Bytes downloaded: %%~zI" & echo.
echo. & echo "Error downloading the data files (weights) for ESRGAN (Resolution Upscaling) x4plus_anime. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
@ -321,6 +340,9 @@ call python --version
@set SD_UI_PATH=%cd%\ui
@cd stable-diffusion
@rem set any overrides
set HF_HUB_DISABLE_SYMLINKS_WARNING=true
@if NOT DEFINED SD_UI_BIND_PORT set SD_UI_BIND_PORT=9000
@if NOT DEFINED SD_UI_BIND_IP set SD_UI_BIND_IP=0.0.0.0
@uvicorn main:server_api --app-dir "%SD_UI_PATH%" --port %SD_UI_BIND_PORT% --host %SD_UI_BIND_IP% --log-level error

View File

@ -43,6 +43,18 @@ fi
if [ -e "src" ]; then mv src src-old; fi
if [ -e "ldm" ]; then mv ldm ldm-old; fi
mkdir -p "../models/stable-diffusion"
mkdir -p "../models/gfpgan"
mkdir -p "../models/realesrgan"
mkdir -p "../models/vae"
# migrate the legacy models to the correct path (if already downloaded)
if [ -e "sd-v1-4.ckpt" ]; then mv sd-v1-4.ckpt ../models/stable-diffusion/; fi
if [ -e "custom-model.ckpt" ]; then mv custom-model.ckpt ../models/stable-diffusion/; fi
if [ -e "GFPGANv1.3.pth" ]; then mv GFPGANv1.3.pth ../models/gfpgan/; fi
if [ -e "RealESRGAN_x4plus.pth" ]; then mv RealESRGAN_x4plus.pth ../models/realesrgan/; fi
if [ -e "RealESRGAN_x4plus_anime_6B.pth" ]; then mv RealESRGAN_x4plus_anime_6B.pth ../models/realesrgan/; fi
# install torch and torchvision
if python ../scripts/check_modules.py torch torchvision; then
echo "torch and torchvision have already been installed."
@ -63,10 +75,13 @@ fi
if python ../scripts/check_modules.py sdkit sdkit.models ldm transformers numpy antlr4 gfpgan realesrgan ; then
echo "sdkit is already installed."
export PYTHONNOUSERSITE=1
export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages"
# skip sdkit upgrade if in developer-mode
if [ ! -e "../src/sdkit" ]; then
export PYTHONNOUSERSITE=1
export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages"
pip install --upgrade sdkit > /dev/null
pip install --upgrade sdkit -q
fi
else
echo "Installing sdkit: https://pypi.org/project/sdkit/"
@ -80,6 +95,12 @@ else
fi
fi
python -c "from importlib.metadata import version; print('sdkit version:', version('sdkit'))"
# upgrade stable-diffusion-sdkit
pip install --upgrade stable-diffusion-sdkit -q
python -c "from importlib.metadata import version; print('stable-diffusion version:', version('stable-diffusion-sdkit'))"
# install rich
if python ../scripts/check_modules.py rich; then
echo "rich has already been installed."
@ -115,26 +136,24 @@ else
fi
fi
mkdir -p "../models/vae"
if [ -f "sd-v1-4.ckpt" ]; then
model_size=`find "sd-v1-4.ckpt" -printf "%s"`
if [ -f "../models/stable-diffusion/sd-v1-4.ckpt" ]; then
model_size=`find "../models/stable-diffusion/sd-v1-4.ckpt" -printf "%s"`
if [ "$model_size" -eq "4265380512" ] || [ "$model_size" -eq "7703807346" ] || [ "$model_size" -eq "7703810927" ]; then
echo "Data files (weights) necessary for Stable Diffusion were already downloaded"
else
printf "\n\nThe model file present at $PWD/sd-v1-4.ckpt is invalid. It is only $model_size bytes in size. Re-downloading.."
rm sd-v1-4.ckpt
printf "\n\nThe model file present at models/stable-diffusion/sd-v1-4.ckpt is invalid. It is only $model_size bytes in size. Re-downloading.."
rm ../models/stable-diffusion/sd-v1-4.ckpt
fi
fi
if [ ! -f "sd-v1-4.ckpt" ]; then
if [ ! -f "../models/stable-diffusion/sd-v1-4.ckpt" ]; then
echo "Downloading data files (weights) for Stable Diffusion.."
curl -L -k https://me.cmdr2.org/stable-diffusion-ui/sd-v1-4.ckpt > sd-v1-4.ckpt
curl -L -k https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt > ../models/stable-diffusion/sd-v1-4.ckpt
if [ -f "sd-v1-4.ckpt" ]; then
model_size=`find "sd-v1-4.ckpt" -printf "%s"`
if [ -f "../models/stable-diffusion/sd-v1-4.ckpt" ]; then
model_size=`find "../models/stable-diffusion/sd-v1-4.ckpt" -printf "%s"`
if [ ! "$model_size" == "4265380512" ]; then
fail "The downloaded model file was invalid! Bytes downloaded: $model_size"
fi
@ -144,24 +163,24 @@ if [ ! -f "sd-v1-4.ckpt" ]; then
fi
if [ -f "GFPGANv1.3.pth" ]; then
model_size=`find "GFPGANv1.3.pth" -printf "%s"`
if [ -f "../models/gfpgan/GFPGANv1.3.pth" ]; then
model_size=`find "../models/gfpgan/GFPGANv1.3.pth" -printf "%s"`
if [ "$model_size" -eq "348632874" ]; then
echo "Data files (weights) necessary for GFPGAN (Face Correction) were already downloaded"
else
printf "\n\nThe model file present at $PWD/GFPGANv1.3.pth is invalid. It is only $model_size bytes in size. Re-downloading.."
rm GFPGANv1.3.pth
printf "\n\nThe model file present at models/gfpgan/GFPGANv1.3.pth is invalid. It is only $model_size bytes in size. Re-downloading.."
rm ../models/gfpgan/GFPGANv1.3.pth
fi
fi
if [ ! -f "GFPGANv1.3.pth" ]; then
if [ ! -f "../models/gfpgan/GFPGANv1.3.pth" ]; then
echo "Downloading data files (weights) for GFPGAN (Face Correction).."
curl -L -k https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth > GFPGANv1.3.pth
curl -L -k https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth > ../models/gfpgan/GFPGANv1.3.pth
if [ -f "GFPGANv1.3.pth" ]; then
model_size=`find "GFPGANv1.3.pth" -printf "%s"`
if [ -f "../models/gfpgan/GFPGANv1.3.pth" ]; then
model_size=`find "../models/gfpgan/GFPGANv1.3.pth" -printf "%s"`
if [ ! "$model_size" -eq "348632874" ]; then
fail "The downloaded GFPGAN model file was invalid! Bytes downloaded: $model_size"
fi
@ -171,24 +190,24 @@ if [ ! -f "GFPGANv1.3.pth" ]; then
fi
if [ -f "RealESRGAN_x4plus.pth" ]; then
model_size=`find "RealESRGAN_x4plus.pth" -printf "%s"`
if [ -f "../models/realesrgan/RealESRGAN_x4plus.pth" ]; then
model_size=`find "../models/realesrgan/RealESRGAN_x4plus.pth" -printf "%s"`
if [ "$model_size" -eq "67040989" ]; then
echo "Data files (weights) necessary for ESRGAN (Resolution Upscaling) x4plus were already downloaded"
else
printf "\n\nThe model file present at $PWD/RealESRGAN_x4plus.pth is invalid. It is only $model_size bytes in size. Re-downloading.."
rm RealESRGAN_x4plus.pth
printf "\n\nThe model file present at models/realesrgan/RealESRGAN_x4plus.pth is invalid. It is only $model_size bytes in size. Re-downloading.."
rm ../models/realesrgan/RealESRGAN_x4plus.pth
fi
fi
if [ ! -f "RealESRGAN_x4plus.pth" ]; then
if [ ! -f "../models/realesrgan/RealESRGAN_x4plus.pth" ]; then
echo "Downloading data files (weights) for ESRGAN (Resolution Upscaling) x4plus.."
curl -L -k https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth > RealESRGAN_x4plus.pth
curl -L -k https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth > ../models/realesrgan/RealESRGAN_x4plus.pth
if [ -f "RealESRGAN_x4plus.pth" ]; then
model_size=`find "RealESRGAN_x4plus.pth" -printf "%s"`
if [ -f "../models/realesrgan/RealESRGAN_x4plus.pth" ]; then
model_size=`find "../models/realesrgan/RealESRGAN_x4plus.pth" -printf "%s"`
if [ ! "$model_size" -eq "67040989" ]; then
fail "The downloaded ESRGAN x4plus model file was invalid! Bytes downloaded: $model_size"
fi
@ -198,24 +217,24 @@ if [ ! -f "RealESRGAN_x4plus.pth" ]; then
fi
if [ -f "RealESRGAN_x4plus_anime_6B.pth" ]; then
model_size=`find "RealESRGAN_x4plus_anime_6B.pth" -printf "%s"`
if [ -f "../models/realesrgan/RealESRGAN_x4plus_anime_6B.pth" ]; then
model_size=`find "../models/realesrgan/RealESRGAN_x4plus_anime_6B.pth" -printf "%s"`
if [ "$model_size" -eq "17938799" ]; then
echo "Data files (weights) necessary for ESRGAN (Resolution Upscaling) x4plus_anime were already downloaded"
else
printf "\n\nThe model file present at $PWD/RealESRGAN_x4plus_anime_6B.pth is invalid. It is only $model_size bytes in size. Re-downloading.."
rm RealESRGAN_x4plus_anime_6B.pth
printf "\n\nThe model file present at models/realesrgan/RealESRGAN_x4plus_anime_6B.pth is invalid. It is only $model_size bytes in size. Re-downloading.."
rm ../models/realesrgan/RealESRGAN_x4plus_anime_6B.pth
fi
fi
if [ ! -f "RealESRGAN_x4plus_anime_6B.pth" ]; then
if [ ! -f "../models/realesrgan/RealESRGAN_x4plus_anime_6B.pth" ]; then
echo "Downloading data files (weights) for ESRGAN (Resolution Upscaling) x4plus_anime.."
curl -L -k https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth > RealESRGAN_x4plus_anime_6B.pth
curl -L -k https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth > ../models/realesrgan/RealESRGAN_x4plus_anime_6B.pth
if [ -f "RealESRGAN_x4plus_anime_6B.pth" ]; then
model_size=`find "RealESRGAN_x4plus_anime_6B.pth" -printf "%s"`
if [ -f "../models/realesrgan/RealESRGAN_x4plus_anime_6B.pth" ]; then
model_size=`find "../models/realesrgan/RealESRGAN_x4plus_anime_6B.pth" -printf "%s"`
if [ ! "$model_size" -eq "17938799" ]; then
fail "The downloaded ESRGAN x4plus_anime model file was invalid! Bytes downloaded: $model_size"
fi

View File

@ -19,4 +19,5 @@ which conda
conda --version || exit 1
# Download the rest of the installer and UI
chmod +x scripts/*.sh
scripts/on_env_start.sh

View File

@ -125,7 +125,7 @@ def needs_to_force_full_precision(context):
return True
device_name = context.device_name.lower()
return (('nvidia' in device_name or 'geforce' in device_name) and (' 1660' in device_name or ' 1650' in device_name)) or ('Quadro T2000' in device_name)
return (('nvidia' in device_name or 'geforce' in device_name) and (' 1660' in device_name or ' 1650' in device_name or ' t400' in device_name or ' t550' in device_name or ' t1200' in device_name)) or ('Quadro T2000' in device_name)
def get_max_vram_usage_level(device):
if device != 'cpu':
@ -156,6 +156,8 @@ def is_device_compatible(device):
'''
Returns True/False, and prints any compatibility errors
'''
# static variable "history".
is_device_compatible.history = getattr(is_device_compatible, 'history', {})
try:
validate_device_id(device, log_prefix='is_device_compatible')
except:
@ -168,7 +170,9 @@ def is_device_compatible(device):
_, mem_total = torch.cuda.mem_get_info(device)
mem_total /= float(10**9)
if mem_total < 3.0:
log.warn(f'GPU {device} with less than 3 GB of VRAM is not compatible with Stable Diffusion')
if is_device_compatible.history.get(device) == None:
log.warn(f'GPU {device} with less than 3 GB of VRAM is not compatible with Stable Diffusion')
is_device_compatible.history[device] = 1
return False
except RuntimeError as e:
log.error(str(e))

View File

@ -24,11 +24,6 @@ DEFAULT_MODELS = {
'gfpgan': ['GFPGANv1.3'],
'realesrgan': ['RealESRGAN_x4plus'],
}
VRAM_USAGE_LEVEL_TO_OPTIMIZATIONS = {
'balanced': {'KEEP_FS_AND_CS_IN_CPU', 'SET_ATTENTION_STEP_TO_4'},
'low': {'KEEP_ENTIRE_MODEL_IN_CPU'},
'high': {},
}
MODELS_TO_LOAD_ON_START = ['stable-diffusion', 'vae', 'hypernetwork']
known_models = {}
@ -43,7 +38,13 @@ def load_default_models(context: Context):
# init default model paths
for model_type in MODELS_TO_LOAD_ON_START:
context.model_paths[model_type] = resolve_model_to_use(model_type=model_type)
load_model(context, model_type)
try:
load_model(context, model_type)
except Exception as e:
log.error(f'[red]Error while loading {model_type} model: {context.model_paths[model_type]}[/red]')
log.error(f'[red]Error: {e}[/red]')
log.error(f'[red]Consider to remove the model from the model folder.[red]')
def unload_all(context: Context):
for model_type in KNOWN_MODEL_TYPES:
@ -101,10 +102,6 @@ def reload_models_if_necessary(context: Context, task_data: TaskData):
if set_vram_optimizations(context): # reload SD
models_to_reload['stable-diffusion'] = model_paths_in_req['stable-diffusion']
if 'stable-diffusion' in models_to_reload:
quick_hash = hash_file_quick(models_to_reload['stable-diffusion'])
known_model_info = get_model_info_from_db(quick_hash=quick_hash)
for model_type, model_path_in_req in models_to_reload.items():
context.model_paths[model_type] = model_path_in_req
@ -131,10 +128,8 @@ def set_vram_optimizations(context: Context):
f'possible ({max_usage_level}) on this device ({context.device}). Using "{max_usage_level}" instead')
vram_usage_level = max_usage_level
vram_optimizations = VRAM_USAGE_LEVEL_TO_OPTIMIZATIONS[vram_usage_level]
if vram_optimizations != context.vram_optimizations:
context.vram_optimizations = vram_optimizations
if vram_usage_level != context.vram_usage_level:
context.vram_usage_level = vram_usage_level
return True
return False
@ -179,6 +174,34 @@ def getModels():
}
models_scanned = 0
class MaliciousModelException(Exception):
"Raised when picklescan reports a problem with a model"
pass
def scan_directory(directory, suffixes):
nonlocal models_scanned
tree = []
for entry in os.scandir(directory):
if entry.is_file():
matching_suffix = list(filter(lambda s: entry.name.endswith(s), suffixes))
if len(matching_suffix) == 0: continue
matching_suffix = matching_suffix[0]
mtime = entry.stat().st_mtime
mod_time = known_models[entry.path] if entry.path in known_models else -1
if mod_time != mtime:
models_scanned += 1
if is_malicious_model(entry.path):
raise MaliciousModelException(entry.path)
known_models[entry.path] = mtime
tree.append(entry.name[:-len(matching_suffix)])
elif entry.is_dir():
scan=scan_directory(entry.path, suffixes)
if len(scan) != 0:
tree.append( (entry.name, scan ) )
return tree
def listModels(model_type):
nonlocal models_scanned
@ -187,26 +210,10 @@ def getModels():
if not os.path.exists(models_dir):
os.makedirs(models_dir)
for file in os.listdir(models_dir):
for model_extension in model_extensions:
if not file.endswith(model_extension):
continue
model_path = os.path.join(models_dir, file)
mtime = os.path.getmtime(model_path)
mod_time = known_models[model_path] if model_path in known_models else -1
if mod_time != mtime:
models_scanned += 1
if is_malicious_model(model_path):
models['scan-error'] = file
return
known_models[model_path] = mtime
model_name = file[:-len(model_extension)]
models['options'][model_type].append(model_name)
models['options'][model_type] = [*set(models['options'][model_type])] # remove duplicates
models['options'][model_type].sort()
try:
models['options'][model_type] = scan_directory(models_dir, model_extensions)
except MaliciousModelException as e:
models['scan-error'] = e
# custom models
listModels(model_type='stable-diffusion')

View File

@ -1,6 +1,7 @@
import queue
import time
import json
import pprint
from easydiffusion import device_manager
from easydiffusion.types import TaskData, Response, Image as ResponseImage, UserInitiatedStop, GenerateImageRequest
@ -28,18 +29,23 @@ def init(device):
def make_images(req: GenerateImageRequest, task_data: TaskData, data_queue: queue.Queue, task_temp_images: list, step_callback):
context.stop_processing = False
log.info(f'request: {get_printable_request(req)}')
log.info(f'task data: {task_data.dict()}')
print_task_info(req, task_data)
images = make_images_internal(req, task_data, data_queue, task_temp_images, step_callback)
images, seeds = make_images_internal(req, task_data, data_queue, task_temp_images, step_callback)
res = Response(req, task_data, images=construct_response(images, task_data, base_seed=req.seed))
res = Response(req, task_data, images=construct_response(images, seeds, task_data, base_seed=req.seed))
res = res.json()
data_queue.put(json.dumps(res))
log.info('Task completed')
return res
def print_task_info(req: GenerateImageRequest, task_data: TaskData):
req_str = pprint.pformat(get_printable_request(req)).replace("[","\[")
task_str = pprint.pformat(task_data.dict()).replace("[","\[")
log.info(f'request: {req_str}')
log.info(f'task data: {task_str}')
def make_images_internal(req: GenerateImageRequest, task_data: TaskData, data_queue: queue.Queue, task_temp_images: list, step_callback):
images, user_stopped = generate_images_internal(req, task_data, data_queue, task_temp_images, step_callback, task_data.stream_image_progress)
filtered_images = filter_images(task_data, images, user_stopped)
@ -47,7 +53,11 @@ def make_images_internal(req: GenerateImageRequest, task_data: TaskData, data_qu
if task_data.save_to_disk_path is not None:
save_images_to_disk(images, filtered_images, req, task_data)
return filtered_images if task_data.show_only_filtered_image else images + filtered_images
seeds = [*range(req.seed, req.seed + len(images))]
if task_data.show_only_filtered_image or filtered_images is images:
return filtered_images, seeds
else:
return images + filtered_images, seeds + seeds
def generate_images_internal(req: GenerateImageRequest, task_data: TaskData, data_queue: queue.Queue, task_temp_images: list, step_callback, stream_image_progress: bool):
context.temp_images.clear()
@ -76,14 +86,14 @@ def filter_images(task_data: TaskData, images: list, user_stopped):
if task_data.use_face_correction and 'gfpgan' in task_data.use_face_correction.lower(): filters_to_apply.append('gfpgan')
if task_data.use_upscale and 'realesrgan' in task_data.use_upscale.lower(): filters_to_apply.append('realesrgan')
return apply_filters(context, filters_to_apply, images)
return apply_filters(context, filters_to_apply, images, scale=task_data.upscale_amount)
def construct_response(images: list, task_data: TaskData, base_seed: int):
def construct_response(images: list, seeds: list, task_data: TaskData, base_seed: int):
return [
ResponseImage(
data=img_to_base64_str(img, task_data.output_format, task_data.output_quality),
seed=base_seed + i
) for i, img in enumerate(images)
seed=seed,
) for img, seed in zip(images, seeds)
]
def make_step_callback(req: GenerateImageRequest, task_data: TaskData, data_queue: queue.Queue, task_temp_images: list, step_callback, stream_image_progress: bool):

View File

@ -13,7 +13,7 @@ from starlette.responses import FileResponse, JSONResponse, StreamingResponse
from pydantic import BaseModel
from easydiffusion import app, model_manager, task_manager
from easydiffusion.types import TaskData, GenerateImageRequest
from easydiffusion.types import TaskData, GenerateImageRequest, MergeRequest
from easydiffusion.utils import log
log.info(f'started in {app.SD_DIR}')
@ -61,6 +61,11 @@ def init():
def render(req: dict):
return render_internal(req)
@server_api.post('/model/merge')
def model_merge(req: dict):
print(req)
return model_merge_internal(req)
@server_api.get('/image/stream/{task_id:int}')
def stream(task_id:int):
return stream_internal(task_id)
@ -181,6 +186,23 @@ def render_internal(req: dict):
log.error(traceback.format_exc())
raise HTTPException(status_code=500, detail=str(e))
def model_merge_internal(req: dict):
try:
from sdkit.train import merge_models
from easydiffusion.utils.save_utils import filename_regex
mergeReq: MergeRequest = MergeRequest.parse_obj(req)
merge_models(model_manager.resolve_model_to_use(mergeReq.model0,'stable-diffusion'),
model_manager.resolve_model_to_use(mergeReq.model1,'stable-diffusion'),
mergeReq.ratio,
os.path.join(app.MODELS_DIR, 'stable-diffusion', filename_regex.sub('_', mergeReq.out_path)),
mergeReq.use_fp16
)
return JSONResponse({'status':'OK'}, headers=NOCACHE_HEADERS)
except Exception as e:
log.error(traceback.format_exc())
raise HTTPException(status_code=500, detail=str(e))
def stream_internal(task_id:int):
#TODO Move to WebSockets ??
task = task_manager.get_cached_task(task_id, update_ttl=True)

View File

@ -29,8 +29,9 @@ class TaskData(BaseModel):
use_face_correction: str = None # or "GFPGANv1.3"
use_upscale: str = None # or "RealESRGAN_x4plus" or "RealESRGAN_x4plus_anime_6B"
upscale_amount: int = 4 # or 2
use_stable_diffusion_model: str = "sd-v1-4"
use_stable_diffusion_config: str = "v1-inference"
# use_stable_diffusion_config: str = "v1-inference"
use_vae_model: str = None
use_hypernetwork_model: str = None
@ -40,6 +41,13 @@ class TaskData(BaseModel):
metadata_output_format: str = "txt" # or "json"
stream_image_progress: bool = False
class MergeRequest(BaseModel):
model0: str = None
model1: str = None
ratio: float = None
out_path: str = "mix"
use_fp16 = True
class Image:
data: str # base64
seed: int

View File

@ -7,7 +7,7 @@ from easydiffusion.types import TaskData, GenerateImageRequest
from sdkit.utils import save_images, save_dicts
filename_regex = re.compile('[^a-zA-Z0-9]')
filename_regex = re.compile('[^a-zA-Z0-9._-]')
# keep in sync with `ui/media/js/dnd.js`
TASK_TEXT_MAPPING = {
@ -20,6 +20,7 @@ TASK_TEXT_MAPPING = {
'prompt_strength': 'Prompt Strength',
'use_face_correction': 'Use Face Correction',
'use_upscale': 'Use Upscaling',
'upscale_amount': 'Upscale By',
'sampler_name': 'Sampler',
'negative_prompt': 'Negative Prompt',
'use_stable_diffusion_model': 'Stable Diffusion model',
@ -28,16 +29,20 @@ TASK_TEXT_MAPPING = {
}
def save_images_to_disk(images: list, filtered_images: list, req: GenerateImageRequest, task_data: TaskData):
now = time.time()
save_dir_path = os.path.join(task_data.save_to_disk_path, filename_regex.sub('_', task_data.session_id))
metadata_entries = get_metadata_entries_for_request(req, task_data)
make_filename = make_filename_callback(req, now=now)
if task_data.show_only_filtered_image or filtered_images == images:
save_images(filtered_images, save_dir_path, file_name=make_filename_callback(req), output_format=task_data.output_format, output_quality=task_data.output_quality)
save_dicts(metadata_entries, save_dir_path, file_name=make_filename_callback(req), output_format=task_data.metadata_output_format)
if task_data.show_only_filtered_image or filtered_images is images:
save_images(filtered_images, save_dir_path, file_name=make_filename, output_format=task_data.output_format, output_quality=task_data.output_quality)
save_dicts(metadata_entries, save_dir_path, file_name=make_filename, output_format=task_data.metadata_output_format)
else:
save_images(images, save_dir_path, file_name=make_filename_callback(req), output_format=task_data.output_format, output_quality=task_data.output_quality)
save_images(filtered_images, save_dir_path, file_name=make_filename_callback(req, suffix='filtered'), output_format=task_data.output_format, output_quality=task_data.output_quality)
save_dicts(metadata_entries, save_dir_path, file_name=make_filename_callback(req, suffix='filtered'), output_format=task_data.metadata_output_format)
make_filter_filename = make_filename_callback(req, now=now, suffix='filtered')
save_images(images, save_dir_path, file_name=make_filename, output_format=task_data.output_format, output_quality=task_data.output_quality)
save_images(filtered_images, save_dir_path, file_name=make_filter_filename, output_format=task_data.output_format, output_quality=task_data.output_quality)
save_dicts(metadata_entries, save_dir_path, file_name=make_filter_filename, output_format=task_data.metadata_output_format)
def get_metadata_entries_for_request(req: GenerateImageRequest, task_data: TaskData):
metadata = get_printable_request(req)
@ -48,6 +53,8 @@ def get_metadata_entries_for_request(req: GenerateImageRequest, task_data: TaskD
'use_face_correction': task_data.use_face_correction,
'use_upscale': task_data.use_upscale,
})
if metadata['use_upscale'] is not None:
metadata['upscale_amount'] = task_data.upscale_amount
# if text, format it in the text format expected by the UI
is_txt_format = (task_data.metadata_output_format.lower() == 'txt')
@ -66,9 +73,11 @@ def get_printable_request(req: GenerateImageRequest):
del metadata['init_image_mask']
return metadata
def make_filename_callback(req: GenerateImageRequest, suffix=None):
def make_filename_callback(req: GenerateImageRequest, suffix=None, now=None):
if now is None:
now = time.time()
def make_filename(i):
img_id = base64.b64encode(int(time.time()+i).to_bytes(8, 'big')).decode() # Generate unique ID based on time.
img_id = base64.b64encode(int(now+i).to_bytes(8, 'big')).decode() # Generate unique ID based on time.
img_id = img_id.translate({43:None, 47:None, 61:None})[-8:] # Remove + / = and keep last 8 chars.
prompt_flattened = filename_regex.sub('_', req.prompt)[:50]
@ -76,4 +85,4 @@ def make_filename_callback(req: GenerateImageRequest, suffix=None):
name = name if suffix is None else f'{name}_{suffix}'
return name
return make_filename
return make_filename

View File

@ -1,7 +1,7 @@
<!DOCTYPE html>
<html>
<head>
<title>Stable Diffusion UI</title>
<title>Easy Diffusion</title>
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="theme-color" content="#673AB6">
<link rel="icon" type="image/png" href="/media/images/favicon-16x16.png" sizes="16x16">
@ -25,7 +25,7 @@
<div id="logo">
<h1>
Easy Diffusion
<small>v2.5.0 <span id="updateBranchLabel"></span></small>
<small>v2.5.10 <span id="updateBranchLabel"></span></small>
</h1>
</div>
<div id="server-status">
@ -55,7 +55,7 @@
<input id="prompt_from_file" name="prompt_from_file" type="file" /> <!-- hidden -->
<label for="negative_prompt" class="collapsible" id="negative_prompt_handle">
Negative Prompt
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/Writing-prompts#negative-prompts" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Click to learn more about Negative Prompts</span></i></a>
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/Writing-prompts#negative-prompts" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top">Click to learn more about Negative Prompts</span></i></a>
<small>(optional)</small>
</label>
<div class="collapsible-content">
@ -92,10 +92,12 @@
</div>
</div>
<div id="apply_color_correction_setting" class="pl-5"><input id="apply_color_correction" name="apply_color_correction" type="checkbox"> <label for="apply_color_correction">Preserve color profile <small>(helps during inpainting)</small></label></div>
</div>
<div id="editor-inputs-tags-container" class="row">
<label>Image Modifiers <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">click an Image Modifier to remove it, use Ctrl+Mouse Wheel to adjust its weight</span></i>:</label>
<label>Image Modifiers <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip right">click an Image Modifier to remove it, right-click to temporarily disable it, use Ctrl+Mouse Wheel to adjust its weight</span></i>:</label>
<div id="editor-inputs-tags-list"></div>
</div>
@ -150,7 +152,7 @@
<option value="dpm2_a">DPM2 Ancestral</option>
<option value="lms">LMS</option>
<option value="dpm_solver_stability">DPM Solver (Stability AI)</option>
<option value="dpmpp_2s_a" selected>DPM++ 2s Ancestral</option>
<option value="dpmpp_2s_a">DPM++ 2s Ancestral</option>
<option value="dpmpp_2m">DPM++ 2m</option>
<option value="dpmpp_sde">DPM++ SDE</option>
<option value="dpm_fast">DPM Fast</option>
@ -230,10 +232,14 @@
<div><ul>
<li><b class="settings-subheader">Render Settings</b></li>
<li class="pl-5"><input id="stream_image_progress" name="stream_image_progress" type="checkbox"> <label for="stream_image_progress">Show a live preview <small>(uses more VRAM, slower images)</small></label></li>
<li id="apply_color_correction_setting" class="pl-5"><input id="apply_color_correction" name="apply_color_correction" type="checkbox"> <label for="apply_color_correction">Preserve color profile <small>(helps during inpainting)</small></label></li>
<li class="pl-5"><input id="use_face_correction" name="use_face_correction" type="checkbox"> <label for="use_face_correction">Fix incorrect faces and eyes <small>(uses GFPGAN)</small></label></li>
<li class="pl-5">
<input id="use_upscale" name="use_upscale" type="checkbox"> <label for="use_upscale">Upscale image by 4x with </label>
<input id="use_upscale" name="use_upscale" type="checkbox"> <label for="use_upscale">Scale up by</label>
<select id="upscale_amount" name="upscale_amount">
<option value="2">2x</option>
<option value="4" selected>4x</option>
</select>
with
<select id="upscale_model" name="upscale_model">
<option value="RealESRGAN_x4plus" selected>RealESRGAN_x4plus</option>
<option value="RealESRGAN_x4plus_anime_6B">RealESRGAN_x4plus_anime_6B</option>

View File

@ -2,12 +2,12 @@
padding-left: 32px;
text-align: left;
padding-bottom: 20px;
max-width: min-content;
}
.editor-options-container {
display: flex;
row-gap: 10px;
max-width: 210px;
}
.editor-options-container > * {

View File

@ -251,6 +251,11 @@ button#resume {
img {
box-shadow: 0 4px 8px 0 rgba(0, 0, 0, 0.15), 0 6px 20px 0 rgba(0, 0, 0, 0.15);
}
div.img-preview img {
width:100%;
height: 100%;
max-height: 70vh;
}
.line-separator {
background: var(--background-color3);
height: 1pt;
@ -876,10 +881,11 @@ input::file-selector-button {
font-size: 12px;
background-color: var(--background-color3);
visibility: hidden;
visibility: hidden;
opacity: 0;
position: absolute;
white-space: nowrap;
width: max-content;
max-width: 300px;
padding: 8px 12px;
transition: 0.3s all;
@ -895,7 +901,7 @@ input::file-selector-button {
.simple-tooltip.right {
right: 0px;
top: 50%;
transform: translate(calc(100% - 15%), -50%);
transform: translate(100%, -50%);
}
:hover > .simple-tooltip.right {
transform: translate(100%, -50%);
@ -1099,11 +1105,11 @@ button:active {
div.task-initimg > img {
margin-right: 6px;
display: block;
display: block;
}
div.task-fs-initimage {
display: none;
# position: absolute;
display: none;
position: absolute;
}
div.task-initimg:hover div.task-fs-initimage {
display: block;
@ -1111,6 +1117,8 @@ div.task-initimg:hover div.task-fs-initimage {
z-index: 9999;
box-shadow: 0 0 30px #000;
margin-top:-64px;
max-width: 75vw;
max-height: 75vh;
}
div.top-right {
position: absolute;

View File

@ -0,0 +1,4 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 576 576" width="24" height="24">
<!--! Font Awesome Pro 6.2.1 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license (Commercial License) Copyright 2022 Fonticons, Inc.-->
<path style="filter: drop-shadow(0px 0px 20px white)" d="M290.7 57.4 57.4 290.7c-25 25-25 65.5 0 90.5l80 80c12 12 28.3 18.7 45.3 18.7H512c17.7 0 32-14.3 32-32s-14.3-32-32-32H387.9l130.7-130.6c25-25 25-65.5 0-90.5L381.3 57.4c-25-25-65.5-25-90.5 0zm6.7 358.6H182.6l-80-80 124.7-124.7 137.4 137.4-67.3 67.3z"/>
</svg>

After

Width:  |  Height:  |  Size: 571 B

View File

@ -0,0 +1,4 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" width="24" height="24">
<!--! Font Awesome Pro 6.2.1 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license (Commercial License) Copyright 2022 Fonticons, Inc.-->
<path style="filter: drop-shadow(0px 0px 20px white)" d="M341.6 29.2 240.1 130.8l-9.4-9.4c-12.5-12.5-32.8-12.5-45.3 0s-12.5 32.8 0 45.3l160 160c12.5 12.5 32.8 12.5 45.3 0s12.5-32.8 0-45.3l-9.4-9.4 101.5-101.6c39-39 39-102.2 0-141.1s-102.2-39-141.1 0zM55.4 323.3c-15 15-23.4 35.4-23.4 56.6v42.4L5.4 462.2c-8.5 12.7-6.8 29.6 4 40.4s27.7 12.5 40.4 4L89.7 480h42.4c21.2 0 41.6-8.4 56.6-23.4l120.7-120.7-45.3-45.3-120.7 120.7c-3 3-7.1 4.7-11.3 4.7H96v-36.1c0-4.2 1.7-8.3 4.7-11.3l120.7-120.7-45.3-45.3L55.4 323.3z"/>
</svg>

After

Width:  |  Height:  |  Size: 775 B

View File

@ -0,0 +1,4 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 576 576" width="24" height="24">
<!--! Font Awesome Pro 6.2.1 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license (Commercial License) Copyright 2022 Fonticons, Inc.-->
<path style="filter: drop-shadow(0px 0px 20px white)" d="M118.6 9.4c-12.5-12.5-32.7-12.5-45.2 0s-12.5 32.8 0 45.3l81.3 81.3-92.1 92.1c-37.5 37.5-37.5 98.3 0 135.8l117.5 117.5c37.5 37.5 98.3 37.5 135.8 0l190.4-190.5c28.1-28.1 28.1-73.7 0-101.8L354.9 37.7c-28.1-28.1-73.7-28.1-101.8 0l-53.1 53-81.4-81.3zM200 181.3l49.4 49.4c12.5 12.5 32.8 12.5 45.3 0s12.5-32.8 0-45.3L245.3 136l53.1-53.1c3.1-3.1 8.2-3.1 11.3 0l151.4 151.4c3.1 3.1 3.1 8.2 0 11.3L418.7 288H99.5c1.4-5.4 4.2-10.4 8.4-14.6l92.1-92.1z"/>
</svg>

After

Width:  |  Height:  |  Size: 763 B

View File

@ -0,0 +1,4 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" width="24" height="24">
<!--! Font Awesome Pro 6.2.1 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license (Commercial License) Copyright 2022 Fonticons, Inc.-->
<path style="filter: drop-shadow(0px 0px 20px white)" d="m410.3 231 11.3-11.3-33.9-33.9-62.1-62.1-33.9-33.9-11.3 11.3-22.6 22.6L58.6 322.9c-10.4 10.4-18 23.3-22.2 37.4L1 480.7c-2.5 8.4-.2 17.5 6.1 23.7s15.3 8.5 23.7 6.1l120.3-35.4c14.1-4.2 27-11.8 37.4-22.2l199.2-199.2 22.6-22.7zM160 399.4l-9.1 22.7c-4 3.1-8.5 5.4-13.3 6.9l-78.2 23 23-78.1c1.4-4.9 3.8-9.4 6.9-13.3l22.7-9.1v32c0 8.8 7.2 16 16 16h32zM362.7 18.7l-14.4 14.5-22.6 22.6-11.4 11.3 33.9 33.9 62.1 62.1 33.9 33.9 11.3-11.3 22.6-22.6 14.5-14.5c25-25 25-65.5 0-90.5l-39.3-39.4c-25-25-65.5-25-90.5 0zm-47.4 168-144 144c-6.2 6.2-16.4 6.2-22.6 0s-6.2-16.4 0-22.6l144-144c6.2-6.2 16.4-6.2 22.6 0s6.2 16.4 0 22.6z"/>
</svg>

After

Width:  |  Height:  |  Size: 934 B

View File

@ -28,6 +28,7 @@ const SETTINGS_IDS_LIST = [
"stream_image_progress",
"use_face_correction",
"use_upscale",
"upscale_amount",
"show_only_filtered_image",
"upscale_model",
"preview-image",
@ -40,7 +41,8 @@ const SETTINGS_IDS_LIST = [
"confirm_dangerous_actions",
"metadata_output_format",
"auto_save_settings",
"apply_color_correction"
"apply_color_correction",
"process_order_toggle"
]
const IGNORE_BY_DEFAULT = [
@ -278,7 +280,6 @@ function tryLoadOldSettings() {
"soundEnabled": "sound_toggle",
"saveToDisk": "save_to_disk",
"useCPU": "use_cpu",
"useTurboMode": "turbo",
"diskPath": "diskPath",
"useFaceCorrection": "use_face_correction",
"useUpscaling": "use_upscale",

View File

@ -59,6 +59,13 @@ const TASK_MAPPING = {
readUI: () => activeTags.map(x => x.name),
parse: (val) => val
},
inactive_tags: { name: "Inactive Image Modifiers",
setUI: (inactive_tags) => {
refreshInactiveTags(inactive_tags)
},
readUI: () => activeTags.filter(tag => tag.inactive === true).map(x => x.name),
parse: (val) => val
},
width: { name: 'Width',
setUI: (width) => {
const oldVal = widthField.value
@ -137,7 +144,14 @@ const TASK_MAPPING = {
readUI: () => (maskSetting.checked ? imageInpainter.getImg() : undefined),
parse: (val) => val
},
preserve_init_image_color_profile: { name: 'Preserve Color Profile',
setUI: (preserve_init_image_color_profile) => {
applyColorCorrectionField.checked = parseBoolean(preserve_init_image_color_profile)
},
readUI: () => applyColorCorrectionField.checked,
parse: (val) => parseBoolean(val)
},
use_face_correction: { name: 'Use Face Correction',
setUI: (use_face_correction) => {
useFaceCorrectionField.checked = parseBoolean(use_face_correction)
@ -148,12 +162,14 @@ const TASK_MAPPING = {
use_upscale: { name: 'Use Upscaling',
setUI: (use_upscale) => {
const oldVal = upscaleModelField.value
upscaleModelField.value = use_upscale
upscaleModelField.value = getModelPath(use_upscale, ['.pth'])
if (upscaleModelField.value) { // Is a valid value for the field.
useUpscalingField.checked = true
upscaleModelField.disabled = false
upscaleAmountField.disabled = false
} else { // Not a valid value, restore the old value and disable the filter.
upscaleModelField.disabled = true
upscaleAmountField.disabled = true
upscaleModelField.value = oldVal
useUpscalingField.checked = false
}
@ -161,6 +177,13 @@ const TASK_MAPPING = {
readUI: () => (useUpscalingField.checked ? upscaleModelField.value : undefined),
parse: (val) => val
},
upscale_amount: { name: 'Upscale By',
setUI: (upscale_amount) => {
upscaleAmountField.value = upscale_amount
},
readUI: () => upscaleAmountField.value,
parse: (val) => val
},
sampler_name: { name: 'Sampler',
setUI: (sampler_name) => {
samplerField.value = sampler_name
@ -235,13 +258,6 @@ const TASK_MAPPING = {
readUI: () => useCPUField.checked,
parse: (val) => val
},
turbo: { name: 'Turbo',
setUI: (turbo) => {
turboField.checked = turbo
},
readUI: () => turboField.checked,
parse: (val) => Boolean(val)
},
stream_image_progress: { name: 'Stream Image Progress',
setUI: (stream_image_progress) => {
@ -273,6 +289,7 @@ const TASK_MAPPING = {
parse: (val) => val
}
}
function restoreTaskToUI(task, fieldsToSkip) {
fieldsToSkip = fieldsToSkip || []
@ -292,9 +309,18 @@ function restoreTaskToUI(task, fieldsToSkip) {
}
}
// restore the original tag
promptField.value = task.reqBody.original_prompt || task.reqBody.prompt
// properly reset fields not present in the task
if (!('use_hypernetwork_model' in task.reqBody)) {
hypernetworkModelField.value = ""
hypernetworkModelField.dispatchEvent(new Event("change"))
}
// restore the original prompt if provided (e.g. use settings), fallback to prompt as needed (e.g. copy/paste or d&d)
promptField.value = task.reqBody.original_prompt
if (!('original_prompt' in task.reqBody)) {
promptField.value = task.reqBody.prompt
}
// properly reset checkboxes
if (!('use_face_correction' in task.reqBody)) {
useFaceCorrectionField.checked = false
@ -302,19 +328,26 @@ function restoreTaskToUI(task, fieldsToSkip) {
if (!('use_upscale' in task.reqBody)) {
useUpscalingField.checked = false
}
if (!('mask' in task.reqBody)) {
if (!('mask' in task.reqBody) && maskSetting.checked) {
maskSetting.checked = false
maskSetting.dispatchEvent(new Event("click"))
}
upscaleModelField.disabled = !useUpscalingField.checked
upscaleAmountField.disabled = !useUpscalingField.checked
// Show the source picture if present
initImagePreview.src = (task.reqBody.init_image == undefined ? '' : task.reqBody.init_image)
if (IMAGE_REGEX.test(initImagePreview.src)) {
if (Boolean(task.reqBody.mask)) {
setTimeout(() => { // add a delay to insure this happens AFTER the main image loads (which reloads the inpainter)
// hide/show source picture as needed
if (IMAGE_REGEX.test(initImagePreview.src) && task.reqBody.init_image == undefined) {
// hide source image
initImageClearBtn.dispatchEvent(new Event("click"))
}
else if (task.reqBody.init_image !== undefined) {
// listen for inpainter loading event, which happens AFTER the main image loads (which reloads the inpainter)
initImagePreview.addEventListener('load', function() {
if (Boolean(task.reqBody.mask)) {
imageInpainter.setImg(task.reqBody.mask)
}, 250)
}
}
}, { once: true })
initImagePreview.src = task.reqBody.init_image
}
}
function readUI() {
@ -355,26 +388,39 @@ const TASK_TEXT_MAPPING = {
prompt_strength: 'Prompt Strength',
use_face_correction: 'Use Face Correction',
use_upscale: 'Use Upscaling',
upscale_amount: 'Upscale By',
sampler_name: 'Sampler',
negative_prompt: 'Negative Prompt',
use_stable_diffusion_model: 'Stable Diffusion model',
use_hypernetwork_model: 'Hypernetwork model',
hypernetwork_strength: 'Hypernetwork Strength'
}
const afterPromptRe = /^\s*Width\s*:\s*\d+\s*(?:\r\n|\r|\n)+\s*Height\s*:\s*\d+\s*(\r\n|\r|\n)+Seed\s*:\s*\d+\s*$/igm
function parseTaskFromText(str) {
const taskReqBody = {}
const lines = str.split('\n')
if (lines.length === 0) {
return
}
// Prompt
afterPromptRe.lastIndex = 0
const match = afterPromptRe.exec(str)
if (match) {
let prompt = str.slice(0, match.index)
str = str.slice(prompt.length)
taskReqBody.prompt = prompt.trim()
let knownKeyOnFirstLine = false
for (let key in TASK_TEXT_MAPPING) {
if (lines[0].startsWith(TASK_TEXT_MAPPING[key] + ':')) {
knownKeyOnFirstLine = true
break
}
}
if (!knownKeyOnFirstLine) {
taskReqBody.prompt = lines[0]
console.log('Prompt:', taskReqBody.prompt)
}
for (const key in TASK_TEXT_MAPPING) {
if (key in taskReqBody) {
continue
}
const name = TASK_TEXT_MAPPING[key];
let val = undefined
@ -419,7 +465,7 @@ async function parseContent(text) {
}
// Normal txt file.
const task = parseTaskFromText(text)
if (task) {
if (text.toLowerCase().includes('seed:') && task) { // only parse valid task content
restoreTaskToUI(task)
return true
} else {
@ -476,7 +522,6 @@ document.addEventListener("dragover", dragOverHandler)
const TASK_REQ_NO_EXPORT = [
"use_cpu",
"turbo",
"save_to_disk_path"
]
const resetSettings = document.getElementById('reset-image-settings')

View File

@ -727,7 +727,6 @@
"stream_progress_updates": 'boolean',
"stream_image_progress": 'boolean',
"show_only_filtered_image": 'boolean',
"turbo": 'boolean',
"output_format": 'string',
"output_quality": 'number',
}
@ -742,7 +741,6 @@
"stream_progress_updates": true,
"stream_image_progress": true,
"show_only_filtered_image": true,
"turbo": false,
"output_format": "png",
"output_quality": 75,
}
@ -837,10 +835,13 @@
* @memberof Task
*/
async post(timeout=-1) {
performance.mark('make-render-request')
if (performance.getEntriesByName('click-makeImage', 'mark').length > 0) {
console.log('delay between clicking and making the server request:', performance.measure('diff', 'click-makeImage', 'make-render-request').duration + ' ms')
if (typeof performance == "object" && performance.mark && performance.measure) {
performance.mark('make-render-request')
if (performance.getEntriesByName('click-makeImage', 'mark').length > 0) {
console.log('delay between clicking and making the server request:', performance.measure('diff', 'click-makeImage', 'make-render-request').duration + ' ms')
}
}
let jsonResponse = await super.post('/render', timeout)
if (typeof jsonResponse?.task !== 'number') {
console.warn('Endpoint error response: ', jsonResponse)

View File

@ -36,13 +36,14 @@ const defaultToolEnd = (editor, ctx, x, y, is_overlay = false) => {
ctx.clearRect(0, 0, editor.width, editor.height)
}
}
const toolDoNothing = (editor, ctx, x, y, is_overlay = false) => {}
const IMAGE_EDITOR_TOOLS = [
{
id: "draw",
name: "Draw",
icon: "fa-solid fa-pencil",
cursor: "url(/media/images/fa-pencil.png) 0 24, pointer",
cursor: "url(/media/images/fa-pencil.svg) 0 24, pointer",
begin: defaultToolBegin,
move: defaultToolMove,
end: defaultToolEnd
@ -51,7 +52,7 @@ const IMAGE_EDITOR_TOOLS = [
id: "erase",
name: "Erase",
icon: "fa-solid fa-eraser",
cursor: "url(/media/images/fa-eraser.png) 0 18, pointer",
cursor: "url(/media/images/fa-eraser.svg) 0 14, pointer",
begin: defaultToolBegin,
move: (editor, ctx, x, y, is_overlay = false) => {
ctx.lineTo(x, y)
@ -78,27 +79,56 @@ const IMAGE_EDITOR_TOOLS = [
}
},
{
id: "colorpicker",
name: "Color Picker",
icon: "fa-solid fa-eye-dropper",
cursor: "url(/media/images/fa-eye-dropper.png) 0 24, pointer",
id: "fill",
name: "Fill",
icon: "fa-solid fa-fill",
cursor: "url(/media/images/fa-fill.svg) 20 6, pointer",
begin: (editor, ctx, x, y, is_overlay = false) => {
var img_rgb = editor.layers.background.ctx.getImageData(x, y, 1, 1).data
var drawn_rgb = editor.ctx_current.getImageData(x, y, 1, 1).data
var drawn_opacity = drawn_rgb[3] / 255
editor.custom_color_input.value = rgbToHex({
r: (drawn_rgb[0] * drawn_opacity) + (img_rgb[0] * (1 - drawn_opacity)),
g: (drawn_rgb[1] * drawn_opacity) + (img_rgb[1] * (1 - drawn_opacity)),
b: (drawn_rgb[2] * drawn_opacity) + (img_rgb[2] * (1 - drawn_opacity)),
})
editor.custom_color_input.dispatchEvent(new Event("change"))
if (!is_overlay) {
var color = hexToRgb(ctx.fillStyle)
color.a = parseInt(ctx.globalAlpha * 255) // layer.ctx.globalAlpha
flood_fill(editor, ctx, parseInt(x), parseInt(y), color)
}
},
move: (editor, ctx, x, y, is_overlay = false) => {},
end: (editor, ctx, x, y, is_overlay = false) => {}
move: toolDoNothing,
end: toolDoNothing
},
{
id: "colorpicker",
name: "Picker",
icon: "fa-solid fa-eye-dropper",
cursor: "url(/media/images/fa-eye-dropper.svg) 0 24, pointer",
begin: (editor, ctx, x, y, is_overlay = false) => {
if (!is_overlay) {
var img_rgb = editor.layers.background.ctx.getImageData(x, y, 1, 1).data
var drawn_rgb = editor.ctx_current.getImageData(x, y, 1, 1).data
var drawn_opacity = drawn_rgb[3] / 255
editor.custom_color_input.value = rgbToHex({
r: (drawn_rgb[0] * drawn_opacity) + (img_rgb[0] * (1 - drawn_opacity)),
g: (drawn_rgb[1] * drawn_opacity) + (img_rgb[1] * (1 - drawn_opacity)),
b: (drawn_rgb[2] * drawn_opacity) + (img_rgb[2] * (1 - drawn_opacity)),
})
editor.custom_color_input.dispatchEvent(new Event("change"))
}
},
move: toolDoNothing,
end: toolDoNothing
}
]
const IMAGE_EDITOR_ACTIONS = [
{
id: "fill_all",
name: "Fill all",
icon: "fa-solid fa-paint-roller",
handler: (editor) => {
editor.ctx_current.globalCompositeOperation = "source-over"
editor.ctx_current.rect(0, 0, editor.width, editor.height)
editor.ctx_current.fill()
editor.setBrush()
},
trackHistory: true
},
{
id: "clear",
name: "Clear",
@ -467,8 +497,8 @@ class ImageEditor {
width = (multiplier * width).toFixed()
height = (multiplier * height).toFixed()
}
this.width = width
this.height = height
this.width = parseInt(width)
this.height = parseInt(height)
this.container.style.width = width + "px"
this.container.style.height = height + "px"
@ -494,8 +524,10 @@ class ImageEditor {
}
setImage(url, width, height) {
this.setSize(width, height)
this.layers.drawing.ctx.clearRect(0, 0, this.width, this.height)
this.layers.background.ctx.clearRect(0, 0, this.width, this.height)
if (!(url && this.inpainter)) {
this.layers.drawing.ctx.clearRect(0, 0, this.width, this.height)
}
if (url) {
var image = new Image()
image.onload = () => {
@ -604,6 +636,9 @@ class ImageEditor {
if (event.key == "y" && event.ctrlKey) {
this.history.redo()
}
if (event.key === "Escape") {
this.hide()
}
}
// dropper ctrl holding handler stuff
@ -682,14 +717,6 @@ class ImageEditor {
}
}
function rgbToHex(rgb) {
function componentToHex(c) {
var hex = parseInt(c).toString(16)
return hex.length == 1 ? "0" + hex : hex
}
return "#" + componentToHex(rgb.r) + componentToHex(rgb.g) + componentToHex(rgb.b)
}
const imageEditor = new ImageEditor(document.getElementById("image-editor"))
const imageInpainter = new ImageEditor(document.getElementById("image-inpainter"), true)
@ -704,3 +731,107 @@ document.getElementById("init_image_button_inpaint").addEventListener("click", (
})
img2imgUnload() // no init image when the app starts
function rgbToHex(rgb) {
function componentToHex(c) {
var hex = parseInt(c).toString(16)
return hex.length == 1 ? "0" + hex : hex
}
return "#" + componentToHex(rgb.r) + componentToHex(rgb.g) + componentToHex(rgb.b)
}
function hexToRgb(hex) {
var result = /^#?([a-f\d]{2})([a-f\d]{2})([a-f\d]{2})$/i.exec(hex);
return result ? {
r: parseInt(result[1], 16),
g: parseInt(result[2], 16),
b: parseInt(result[3], 16)
} : null;
}
function pixelCompare(int1, int2) {
return Math.abs(int1 - int2) < 4
}
// adapted from https://ben.akrin.com/canvas_fill/fill_04.html
function flood_fill(editor, the_canvas_context, x, y, color) {
pixel_stack = [{x:x, y:y}] ;
pixels = the_canvas_context.getImageData( 0, 0, editor.width, editor.height ) ;
var linear_cords = ( y * editor.width + x ) * 4 ;
var original_color = {r:pixels.data[linear_cords],
g:pixels.data[linear_cords+1],
b:pixels.data[linear_cords+2],
a:pixels.data[linear_cords+3]} ;
var opacity = color.a / 255;
var new_color = {
r: parseInt((color.r * opacity) + (original_color.r * (1 - opacity))),
g: parseInt((color.g * opacity) + (original_color.g * (1 - opacity))),
b: parseInt((color.b * opacity) + (original_color.b * (1 - opacity)))
}
if ((pixelCompare(new_color.r, original_color.r) &&
pixelCompare(new_color.g, original_color.g) &&
pixelCompare(new_color.b, original_color.b)))
{
return; // This color is already the color we want, so do nothing
}
var max_stack_size = editor.width * editor.height;
while( pixel_stack.length > 0 && pixel_stack.length < max_stack_size ) {
new_pixel = pixel_stack.shift() ;
x = new_pixel.x ;
y = new_pixel.y ;
linear_cords = ( y * editor.width + x ) * 4 ;
while( y-->=0 &&
(pixelCompare(pixels.data[linear_cords], original_color.r) &&
pixelCompare(pixels.data[linear_cords+1], original_color.g) &&
pixelCompare(pixels.data[linear_cords+2], original_color.b))) {
linear_cords -= editor.width * 4 ;
}
linear_cords += editor.width * 4 ;
y++ ;
var reached_left = false ;
var reached_right = false ;
while( y++<editor.height &&
(pixelCompare(pixels.data[linear_cords], original_color.r) &&
pixelCompare(pixels.data[linear_cords+1], original_color.g) &&
pixelCompare(pixels.data[linear_cords+2], original_color.b))) {
pixels.data[linear_cords] = new_color.r ;
pixels.data[linear_cords+1] = new_color.g ;
pixels.data[linear_cords+2] = new_color.b ;
pixels.data[linear_cords+3] = 255 ;
if( x>0 ) {
if( pixelCompare(pixels.data[linear_cords-4], original_color.r) &&
pixelCompare(pixels.data[linear_cords-4+1], original_color.g) &&
pixelCompare(pixels.data[linear_cords-4+2], original_color.b)) {
if( !reached_left ) {
pixel_stack.push( {x:x-1, y:y} ) ;
reached_left = true ;
}
} else if( reached_left ) {
reached_left = false ;
}
}
if( x<editor.width-1 ) {
if( pixelCompare(pixels.data[linear_cords+4], original_color.r) &&
pixelCompare(pixels.data[linear_cords+4+1], original_color.g) &&
pixelCompare(pixels.data[linear_cords+4+2], original_color.b)) {
if( !reached_right ) {
pixel_stack.push( {x:x+1,y:y} ) ;
reached_right = true ;
}
} else if( reached_right ) {
reached_right = false ;
}
}
linear_cords += editor.width * 4 ;
}
}
the_canvas_context.putImageData( pixels, 0, 0 ) ;
}

View File

@ -104,6 +104,7 @@ function createModifierGroup(modifierGroup, initiallyExpanded) {
}
refreshTagsList()
document.dispatchEvent(new Event('refreshImageModifiers'))
})
}
})
@ -146,6 +147,7 @@ async function loadModifiers() {
}
loadCustomModifiers()
document.dispatchEvent(new Event('loadImageModifiers'))
}
function refreshModifiersState(newTags) {
@ -202,6 +204,26 @@ function refreshModifiersState(newTags) {
refreshTagsList()
}
function refreshInactiveTags(inactiveTags) {
// update inactive tags
if (inactiveTags !== undefined && inactiveTags.length > 0) {
activeTags.forEach (tag => {
if (inactiveTags.find(element => element === tag.name) !== undefined) {
tag.inactive = true
}
})
}
// update cards
let overlays = document.querySelector('#editor-inputs-tags-list').querySelectorAll('.modifier-card-overlay')
overlays.forEach (i => {
let modifierName = i.parentElement.getElementsByClassName('modifier-card-label')[0].getElementsByTagName("p")[0].innerText
if (inactiveTags.find(element => element === modifierName) !== undefined) {
i.parentElement.classList.add('modifier-toggle-inactive')
}
})
}
function refreshTagsList() {
editorModifierTagsList.innerHTML = ''
@ -227,6 +249,7 @@ function refreshTagsList() {
activeTags.splice(idx, 1)
refreshTagsList()
}
document.dispatchEvent(new Event('refreshImageModifiers'))
})
})

View File

@ -35,6 +35,7 @@ let samplerSelectionContainer = document.querySelector("#samplerSelection")
let useFaceCorrectionField = document.querySelector("#use_face_correction")
let useUpscalingField = document.querySelector("#use_upscale")
let upscaleModelField = document.querySelector("#upscale_model")
let upscaleAmountField = document.querySelector("#upscale_amount")
let stableDiffusionModelField = document.querySelector('#stable_diffusion_model')
let vaeModelField = document.querySelector('#vae_model')
let hypernetworkModelField = document.querySelector('#hypernetwork_model')
@ -287,6 +288,7 @@ function showImages(reqBody, res, outputContainer, livePreview) {
imageSeedLabel.innerText = 'Seed: ' + req.seed
let buttons = [
{ text: 'Remove', on_click: onRemoveClick, class: 'secondaryButton' },
{ text: 'Use as Input', on_click: onUseAsInputClick },
{ text: 'Download', on_click: onDownloadImageClick },
{ text: 'Make Similar Images', on_click: onMakeSimilarClick },
@ -304,9 +306,12 @@ function showImages(reqBody, res, outputContainer, livePreview) {
const newButton = document.createElement('button')
newButton.classList.add('tasksBtns')
newButton.innerText = btnInfo.text
newButton.addEventListener('click', function() {
btnInfo.on_click(req, img)
newButton.addEventListener('click', function(event) {
btnInfo.on_click(req, img, event)
})
if (btnInfo.class !== undefined) {
newButton.classList.add(btnInfo.class)
}
imgItemInfo.appendChild(newButton)
}
buttons.forEach(btn => {
@ -320,6 +325,10 @@ function showImages(reqBody, res, outputContainer, livePreview) {
})
}
function onRemoveClick(req, img, event) {
shiftOrConfirm(event, "Remove the image from the results?", () => { findClosestAncestor(img, '.imgItem').style.display='none' })
}
function onUseAsInputClick(req, img) {
const imgData = img.src
@ -431,7 +440,10 @@ function getUncompletedTaskEntries() {
}
function makeImage() {
performance.mark('click-makeImage')
if (typeof performance == "object" && performance.mark) {
performance.mark('click-makeImage')
}
if (!SD.isServerAvailable()) {
alert('The server is not available.')
return
@ -806,7 +818,7 @@ function createTask(task) {
taskConfig += `, <b>Fix Faces:</b> ${task.reqBody.use_face_correction}`
}
if (task.reqBody.use_upscale) {
taskConfig += `, <b>Upscale:</b> ${task.reqBody.use_upscale}`
taskConfig += `, <b>Upscale:</b> ${task.reqBody.use_upscale} (${task.reqBody.upscale_amount || 4}x)`
}
if (task.reqBody.use_hypernetwork_model) {
taskConfig += `, <b>Hypernetwork:</b> ${task.reqBody.use_hypernetwork_model}`
@ -897,7 +909,7 @@ function createTask(task) {
if (task.previewPrompt.innerText.trim() === '') {
task.previewPrompt.innerHTML = '&nbsp;' // allows the results to be collapsed
}
return taskEntry.id
}
function getCurrentUserRequest() {
@ -931,7 +943,8 @@ function getCurrentUserRequest() {
output_quality: parseInt(outputQualityField.value),
metadata_output_format: document.querySelector('#metadata_output_format').value,
original_prompt: promptField.value,
active_tags: (activeTags.map(x => x.name))
active_tags: (activeTags.map(x => x.name)),
inactive_tags: (activeTags.filter(tag => tag.inactive === true).map(x => x.name))
}
}
if (IMAGE_REGEX.test(initImagePreview.src)) {
@ -957,6 +970,7 @@ function getCurrentUserRequest() {
}
if (useUpscalingField.checked) {
newTask.reqBody.use_upscale = upscaleModelField.value
newTask.reqBody.upscale_amount = upscaleAmountField.value
}
if (hypernetworkModelField.value) {
newTask.reqBody.use_hypernetwork_model = hypernetworkModelField.value
@ -1152,8 +1166,10 @@ function onDimensionChange() {
diskPathField.disabled = !saveToDiskField.checked
upscaleModelField.disabled = !useUpscalingField.checked
upscaleAmountField.disabled = !useUpscalingField.checked
useUpscalingField.addEventListener('change', function(e) {
upscaleModelField.disabled = !this.checked
upscaleAmountField.disabled = !this.checked
})
makeImageBtn.addEventListener('click', makeImage)
@ -1290,17 +1306,23 @@ async function getModels() {
vaeOptions.unshift('') // add a None option
hypernetworkOptions.unshift('') // add a None option
function createModelOptions(modelField, selectedModel) {
return function(modelName) {
const modelOption = document.createElement('option')
modelOption.value = modelName
modelOption.innerText = modelName !== '' ? modelName : 'None'
function createModelOptions(modelField, selectedModel, path="") {
return function fn(modelName) {
if (typeof(modelName) == 'string') {
const modelOption = document.createElement('option')
modelOption.value = path + modelName
modelOption.innerHTML = modelName !== '' ? (path != "" ? "&nbsp;&nbsp;"+modelName : modelName) : 'None'
if (modelName === selectedModel) {
modelOption.selected = true
if (path + modelName === selectedModel) {
modelOption.selected = true
}
modelField.appendChild(modelOption)
} else {
const modelGroup = document.createElement('optgroup')
modelGroup.label = path + modelName[0]
modelField.appendChild(modelGroup)
modelName[1].forEach( createModelOptions(modelField, selectedModel, path + modelName[0] + "/" ) )
}
modelField.appendChild(modelOption)
}
}

View File

@ -83,6 +83,7 @@ var PARAMETERS = [
type: ParameterType.checkbox,
label: "Process newest jobs first",
note: "reverse the normal processing order",
icon: "fa-arrow-down-short-wide",
default: false,
},
{
@ -100,7 +101,7 @@ var PARAMETERS = [
note: "Faster performance requires more GPU memory (VRAM)<br/><br/>" +
"<b>Balanced:</b> nearly as fast as High, much lower VRAM usage<br/>" +
"<b>High:</b> fastest, maximum GPU memory usage</br>" +
"<b>Low:</b> slowest, force-used for GPUs with 4 GB (or less) memory",
"<b>Low:</b> slowest, force-used for GPUs with 3 to 4 GB memory",
icon: "fa-forward",
default: "balanced",
options: [

View File

@ -13,8 +13,15 @@ function initTheme() {
.filter(sheet => sheet.href?.startsWith(window.location.origin))
.flatMap(sheet => Array.from(sheet.cssRules))
.forEach(rule => {
var selector = rule.selectorText; // TODO: also do selector == ":root", re-run un-set props
var selector = rule.selectorText;
if (selector && selector.startsWith(".theme-") && !selector.includes(" ")) {
if (DEFAULT_THEME) { // re-add props that dont change (css needs this so they update correctly)
Array.from(DEFAULT_THEME.rule.style)
.filter(cssVariable => !Array.from(rule.style).includes(cssVariable))
.forEach(cssVariable => {
rule.style.setProperty(cssVariable, DEFAULT_THEME.rule.style.getPropertyValue(cssVariable));
});
}
var theme_key = selector.substring(1);
THEMES.push({
key: theme_key,
@ -62,12 +69,6 @@ function themeFieldChanged() {
var theme = THEMES.find(t => t.key == theme_key);
let borderColor = undefined
if (theme) {
// refresh variables incase they are back referencing
Array.from(DEFAULT_THEME.rule.style)
.filter(cssVariable => !Array.from(theme.rule.style).includes(cssVariable))
.forEach(cssVariable => {
body.style.setProperty(cssVariable, DEFAULT_THEME.rule.style.getPropertyValue(cssVariable));
});
borderColor = theme.rule.style.getPropertyValue('--input-border-color').trim()
if (!borderColor.startsWith('#')) {
borderColor = theme.rule.style.getPropertyValue('--theme-color-fallback')

View File

@ -20,6 +20,19 @@ function getNextSibling(elem, selector) {
}
}
function findClosestAncestor(element, selector) {
if (!element || !element.parentNode) {
// reached the top of the DOM tree, return null
return null;
} else if (element.parentNode.matches(selector)) {
// found an ancestor that matches the selector, return it
return element.parentNode;
} else {
// continue searching upwards
return findClosestAncestor(element.parentNode, selector);
}
}
/* Panel Stuff */

View File

@ -74,6 +74,7 @@
// update activeTags
const tag = activeTags.splice(currentPos, 1)
activeTags.splice(droppedPos, 0, tag[0])
document.dispatchEvent(new Event('refreshImageModifiers'))
}
}
};

View File

@ -58,6 +58,7 @@
break
}
}
document.dispatchEvent(new Event('refreshImageModifiers'))
}
}
})

View File

@ -46,7 +46,7 @@
return obj;
});
console.log(activeTags)
document.dispatchEvent(new Event('refreshImageModifiers'))
}
})
}