Compare commits

...

295 Commits
v2.5.30 ... cf

Author SHA1 Message Date
7417c1af48 changelog 2023-06-03 10:02:34 +05:30
dd95df8f02 Refactor the default model download code, remove check_models.py, don't check in legacy paths since that's already migrated during initialization; Download CodeFormer's model only when it's used for the first time 2023-06-02 16:34:29 +05:30
0860e35d17 sdkit 1.0.101 - CodeFormer as an option to improve faces 2023-06-01 16:50:01 +05:30
32c4f10626 Merge pull request #1274 from patriceac/beta
Support for CodeFormer face restoration
2023-06-01 15:28:25 +05:30
3e90eafafb Merge branch 'cf' into beta 2023-06-01 15:27:37 +05:30
16fcb4ed79 Merge pull request #1314 from JeLuF/dndgan
Fix GFPGAN settings import
2023-05-29 15:46:28 +05:30
9be48b3fc5 Merge pull request #1317 from ogmaresca/fix-metadata-SyntaxWarning
Fix SyntaxWarning on startup
2023-05-29 10:15:29 +05:30
7830ec7ca2 Fix SyntaxWarning on startup
Fixes
```
/ssd2/easydiffusion/ui/easydiffusion/utils/save_utils.py:222: SyntaxWarning: "is not" with a literal. Did you mean "!="?
  if task_data.use_upscale is not "latent_upscaler" and "latent_upscaler_steps" in metadata:
  ```
2023-05-28 14:39:36 -04:00
0ebf9df207 Merge pull request #1316 from JeLuF/fix1312
Fix #1312 - invert model A and B ratio in merge
2023-05-28 17:33:26 +05:30
40682405cc Merge pull request #1309 from ogmaresca/add-tiling-to-metadata
Add tiling and latent upscaler steps to metadata
2023-05-28 17:33:00 +05:30
9fdd482811 Merge pull request #1311 from patriceac/patch-3
Fix regression in restore task to UI flow
2023-05-28 17:32:26 +05:30
7202ffba6e Fix #1312 - invert model A and B ratio in merge 2023-05-28 02:36:56 +02:00
30dcc7477f Fix GFPGAN settings import
The word None which many txt metadata files contain as value for the GFPGAN field should not be considered to be a model name.
If the value is None, disable the checkbox
2023-05-28 01:43:58 +02:00
6826435046 Fix restore task to UI flow
Fixes a regression introduced by https://github.com/cmdr2/stable-diffusion-ui/pull/1304
2023-05-27 00:26:25 -07:00
69d937e0b1 Add tiling and latent upscaler steps to metadata
Also fix txt metadata labels when also embedding metadata
2023-05-26 19:51:30 -04:00
edd92b724f UniPC TU 2 isn't working with diffusers either 2023-05-26 19:47:00 +05:30
0990d8fc4d Merge pull request #1304 from JeLuF/dndfix
Remove warning when reusing settings - Fixes #1290
2023-05-26 15:24:56 +05:30
1da35e89f6 Capitalization 2023-05-25 18:38:40 +05:30
d818107953 Remove warning when reusing settings - Fixes #1290 2023-05-25 13:36:45 +02:00
b3f65c0b3c changelog 2023-05-25 15:52:05 +05:30
59c322dc3b Show seamless tiling only in diffusers mode 2023-05-25 15:41:41 +05:30
096f9ad3a6 Merge branch 'beta' of github.com:cmdr2/stable-diffusion-ui into beta 2023-05-25 15:39:02 +05:30
5c8965b3ab changelog 2023-05-25 15:38:49 +05:30
090f8f6070 Merge pull request #1300 from JeLuF/tile
Add seamless tiling support
2023-05-25 15:38:15 +05:30
5f4fc63645 Merge branch 'beta' of github.com:cmdr2/stable-diffusion-ui into beta 2023-05-25 15:37:37 +05:30
a0b3b5af53 sdkit 1.0.98 - seamless tiling 2023-05-25 15:36:27 +05:30
351dd97500 Merge pull request #1303 from cmdr2/main
Main
2023-05-25 15:05:05 +05:30
b511000441 Merge pull request #1302 from cmdr2/beta
Beta
2023-05-25 14:57:51 +05:30
523131de79 Merge pull request #1298 from JeLuF/confix
Fix confirmation dialog
2023-05-25 07:19:21 +05:30
9dfa300083 Add seamless tiling support 2023-05-25 00:16:14 +02:00
3ea74af76d Fix confirmation dialog
By splitting the confirmation function into two halves, the closure was lost
2023-05-24 19:29:54 +02:00
3d7e16cfd9 changelog 2023-05-24 16:29:58 +05:30
db265309a5 Show an explanation for why the CPU toggle is disabled; utility class for alert() and confirm() that matches the ED theme; code formatting 2023-05-24 16:24:29 +05:30
8554b0eab2 Better reporting of model load errors - sends the report to the browser UI during the next image rendering task 2023-05-24 16:02:53 +05:30
f641e6e69d Merge branch 'beta' of github.com:cmdr2/stable-diffusion-ui into beta 2023-05-24 15:40:26 +05:30
30c07eab6b Cleaner reporting of errors in the UI; Suggest increasing the page size if that's the error 2023-05-24 15:30:55 +05:30
eba83386c1 make a note about a flood fill library 2023-05-24 10:08:00 +05:30
d3334f9dfa Merge branch 'beta' of github.com:cmdr2/stable-diffusion-ui into beta 2023-05-23 16:55:52 +05:30
a87dca1ef4 changelog 2023-05-23 16:55:42 +05:30
2bab4341a3 Add 'Latent Upscaler' as an option in the upscaling dropdown 2023-05-23 16:53:53 +05:30
01fb2fde47 Merge pull request #1293 from JeLuF/edready
Add 'ED is ready, go to localhost:9000' msg to log
2023-05-23 15:15:08 +05:30
e93a49134a Merge pull request #1296 from cmdr2/beta
Beta
2023-05-23 15:12:27 +05:30
0127714929 Add 'ED is ready, go to localhost:9000' msg to log
Sometimes the browser window does not open (esp. on Linux and Mac).
Show a prominent message to the log so that users don't wait for hours.
2023-05-22 21:19:31 +02:00
29ec34169c Merge branch 'beta' of github.com:cmdr2/stable-diffusion-ui into beta 2023-05-22 18:07:00 +05:30
d60cb61e58 sdkit 1.0.97 - flatten arguments sent to latent upscaler 2023-05-22 18:06:38 +05:30
d4582e9e6e Merge pull request #1288 from JeLuF/getconfx
get_config: return default value if conf file is corrupted
2023-05-22 16:11:05 +05:30
a84d29c49c Merge pull request #1286 from JeLuF/vramsetting
Automatically use 'Low' when VRAM<4.5GB
2023-05-22 16:10:43 +05:30
e76a91a78d Merge pull request #1287 from JeLuF/typo1
Network settings - Fix typo.
2023-05-22 16:08:38 +05:30
ea9861d180 Less min VRAM requirement 2023-05-22 16:07:50 +05:30
e48c73d277 Merge pull request #1289 from cmdr2/beta
Beta
2023-05-22 16:02:22 +05:30
0f6caaec33 get_config: return default value if conf file is corrupted 2023-05-22 10:21:19 +02:00
a5a1d33589 Fix face restoration model selection 2023-05-21 18:32:48 -07:00
cac4bd11d2 Network settings - Fix typo.
The 'Please restart' text should be part of the note, not the label
2023-05-21 17:59:06 +02:00
70a3beeaa2 Automatically use 'Low' when VRAM<4.5GB 2023-05-21 17:42:47 +02:00
566cb55f36 Merge pull request #1285 from ogmaresca/save-clip-skip-in-metadata-files
Add Clip Skip to metadata files
2023-05-20 10:44:58 +05:30
a6dbdf664b Add Clip Skip to metadata files
Also, force the properties to be in a consistent order so that, for example, LoRA strength will always be the line below LoRA model. I've rearranged the properties so that they are saved in the same order that the properties are laid out in the UI
2023-05-19 19:05:32 -04:00
bdf36a8dab sdkit 1.0.96 - missing xformers import 2023-05-19 18:36:37 +05:30
760909f495 Update CHANGES.md 2023-05-19 18:23:10 +05:30
40c9f1f51d Merge pull request #1276 from ogmaresca/ddpm_deis_samplers
Add DDPM and DEIS samplers for diffusers
2023-05-19 18:21:09 +05:30
4349c595b8 Merge branch 'beta' into ddpm_deis_samplers 2023-05-19 18:21:03 +05:30
da27fc7782 Merge pull request #1278 from JeLuF/clipskip
Add Clip Skip support
2023-05-19 18:19:45 +05:30
11e1436e2e 2 GB cards aren't exactly 2 GB 2023-05-19 17:56:20 +05:30
107323d8e7 sdkit 1.0.95 - lower vram usage for high mode 2023-05-19 17:42:47 +05:30
83557d4b3c Merge branch 'beta' of github.com:cmdr2/stable-diffusion-ui into beta 2023-05-19 17:29:20 +05:30
b08e9b7982 changelog 2023-05-19 17:29:10 +05:30
415213878d sdkit 1.0.94 - vram optimizations - perform softmax in half precision 2023-05-19 17:28:54 +05:30
53b23756a4 formatting 2023-05-19 17:26:04 +05:30
063d14d2ac Allow GPUs with less than 2 GB, instead of restricting to 3 GB 2023-05-19 17:25:53 +05:30
3d99f0dd9c Merge pull request #1279 from JeLuF/pbpdev
Fail gracefully if proc access isn't possible
2023-05-19 17:07:36 +05:30
f3ce5ed279 Merge pull request #1283 from cmdr2/beta
Beta
2023-05-19 17:06:00 +05:30
b77036443f Fail gracefully if proc access isn't possible 2023-05-18 16:04:28 +02:00
00603ce124 Add Clip Skip support 2023-05-18 13:55:45 +02:00
d3dd15eb63 Fix unipc_tu 2023-05-17 21:44:28 -04:00
9d408a62bf Add DDPM and DEIS samplers for diffusers
These new samplers will be hidden when diffusers is disabled.
Also, samplers that aren't implemented in diffusers yet will be disabled when using diffusers
2023-05-17 21:13:06 -04:00
e4a7537952 Merge pull request #1273 from patriceac/patch-2
Fix error when removing image
2023-05-17 14:47:51 +05:30
4313166dbf Merge pull request #1275 from cmdr2/beta
Beta
2023-05-17 14:40:28 +05:30
a25364732b Support for CodeFormer
Depends on https://github.com/easydiffusion/sdkit/pull/34.
2023-05-17 02:04:20 -07:00
0adaf6c0a0 Merge branch 'beta' of https://github.com/patriceac/stable-diffusion-ui into beta 2023-05-16 18:20:46 -07:00
9410879b73 Fix error when removing image
Error report: https://discord.com/channels/1014774730907209781/1085803885500825600/1108150298289115187
2023-05-16 17:43:14 -07:00
1dc326cc41 Merge pull request #1270 from JeLuF/patch-26
Add GTX1630 to list of FP32 GPUs
2023-05-16 18:11:39 +05:30
1605c5fbcc changelog 2023-05-16 16:03:12 +05:30
7562a882f4 sdkit 1.0.93 - lower vram usage for balanced mode, by using attention slice of 1 2023-05-16 16:02:20 +05:30
366bc72759 Add GTX1630 to list of FP32 GPUs
https://discord.com/channels/1014774730907209781/1014774732018683926/1107677076233912340
2023-05-15 17:01:21 +02:00
2f7990b9cf Merge branch 'beta' of github.com:cmdr2/stable-diffusion-ui into beta 2023-05-12 16:49:29 +05:30
45db4bb036 sdkit 1.0.92 - more vram optimizations for low,balanced,high - reduces VRAM usage by 20% (especially with larger images) 2023-05-12 16:49:13 +05:30
4a04226cc0 Merge pull request #1265 from cmdr2/beta
Beta
2023-05-12 14:50:22 +05:30
8142fd0701 Update CHANGES.md 2023-05-12 14:50:04 +05:30
7240c91db7 Update CHANGES.md 2023-05-12 14:48:48 +05:30
a9318a9ba0 Merge pull request #1264 from cmdr2/beta
Beta
2023-05-12 14:46:55 +05:30
1cba62af24 changelog 2023-05-11 16:30:32 +05:30
add05228bd sdkit 1.0.91 - use slice size 1 for low vram usage mode, to reduce VRAM usage 2023-05-11 16:30:06 +05:30
4bca739b3d changelog 2023-05-11 14:52:30 +05:30
566a83ce3f sdkit 1.0.89 - use half precision in test diffusers for low vram usage mode' 2023-05-11 14:49:15 +05:30
ca19a488a8 Merge branch 'beta' of github.com:cmdr2/stable-diffusion-ui into beta 2023-05-10 20:21:09 +05:30
08f44472f8 changelog 2023-05-10 20:20:59 +05:30
2d1be6186e sdkit 1.0.88 - Fix LoRA in low VRAM mode 2023-05-10 20:19:17 +05:30
ca362ef78d Merge pull request #1248 from patriceac/patch-1
Fix restoration of inactive image modifiers
2023-05-08 16:19:39 +05:30
a255d74abf Merge pull request #1250 from ogmaresca/allow-dragging-image-modal
Allow grabbing the image to scroll zoomed in images
2023-05-08 16:19:18 +05:30
fec2140896 Allow grabbing the image to scroll zoomed in images 2023-05-04 19:36:10 -04:00
3f9ec378a0 Fix restoration of inactive image modifiers 2023-05-04 09:16:19 -07:00
64cfd55065 sdkit 1.0.87 - typo 2023-05-04 16:31:40 +05:30
f9cfe1da45 sdkit 1.0.86 - don't use cpu offload for mps/mac, doesn't make sense since the memory is shared between GPU/CPU 2023-05-04 16:09:28 +05:30
b27a14b1b4 sdkit 1.0.85 - torch.Generator fix for mps/mac 2023-05-04 16:04:45 +05:30
4dbdb802b6 Merge pull request #1243 from cmdr2/beta
Beta
2023-05-04 10:14:35 +05:30
cbd74e7510 Merge branch 'beta' of github.com:cmdr2/stable-diffusion-ui into beta 2023-05-04 10:12:15 +05:30
8e416cef25 Disable self test link 2023-05-04 10:12:06 +05:30
ae9afab6c1 Merge pull request #1242 from JeLuF/fix_hyper
Default value for hypernetworkStrength
2023-05-04 10:07:56 +05:30
06c990e94d Default value for hypernetworkStrength
Don't fail when the Hypernetwork Strength text input field is empty
2023-05-04 00:05:11 +02:00
4d31078579 Merge branch 'beta' of github.com:cmdr2/stable-diffusion-ui into beta 2023-05-03 18:47:18 +05:30
01c7712961 Increase task timeout from 15 mins to 30 mins 2023-05-03 18:47:09 +05:30
c18bf3e413 Update CHANGES.md 2023-05-03 18:18:18 +05:30
5c95bcc65d changelog 2023-05-03 16:13:57 +05:30
75f0780bd1 sdkit 1.0.84 - VRAM optimizations for the diffusers version 2023-05-03 16:12:11 +05:30
843d22d0d4 Update README.md 2023-05-03 14:53:18 +05:30
33a49a57e6 Merge branch 'beta' of github.com:cmdr2/stable-diffusion-ui into beta 2023-05-02 18:26:38 +05:30
eaba64a64a Log device usage stats during thread startup 2023-05-02 18:26:29 +05:30
679b828cf5 Toast notification support (#1228)
* Toast notifications for ED

Adding support for toast notifications for use in Core and user plugins.

* Revert "Toast notifications for ED"

This reverts commit dde51c0cef.

* Toast notifications for ED

Adding support for toast notifications for use in Core and user plugins.
2023-05-02 16:00:16 +05:30
d231c533ae "Please restart" note for network settings (#1233)
* "Please restart" note for network changes

https://discord.com/channels/1014774730907209781/1101629831839494344

* typo
2023-05-02 15:59:02 +05:30
5a9e74cef7 Update PRIVACY.md 2023-05-02 11:05:58 +05:30
49599dc3ba Update PRIVACY.md 2023-05-02 11:03:09 +05:30
c1e5c8dc86 Update PRIVACY.md 2023-05-02 11:00:57 +05:30
35d36f9eb3 Update PRIVACY.md 2023-05-02 10:53:17 +05:30
2b8c199e56 Create PRIVACY.md 2023-05-02 10:52:58 +05:30
654749de40 Revert "Toast notifications for ED"
This reverts commit dde51c0cef.
2023-04-29 19:26:11 -07:00
dde51c0cef Toast notifications for ED
Adding support for toast notifications for use in Core and user plugins.
2023-04-29 19:25:10 -07:00
729f7eb24a Remove prettier github action 2023-04-29 08:18:36 +05:30
51e067b050 Try using the pinned version of prettier 2023-04-29 08:04:15 +05:30
4dc2a96d41 Print the diff 2023-04-29 07:55:11 +05:30
1b40a6baa3 Print the diff 2023-04-29 07:54:14 +05:30
50fdc32ff8 Print the diff 2023-04-29 07:44:38 +05:30
e7fd0b3a05 Print the diff 2023-04-29 07:42:10 +05:30
20d6e17d4d Print the diff 2023-04-29 07:40:24 +05:30
262a1464c3 Print the diff 2023-04-29 07:38:11 +05:30
31c54c4a41 Print the diff 2023-04-29 07:34:13 +05:30
6cf05df5ee Add the config explicitly 2023-04-29 07:31:09 +05:30
f90a13571c typo 2023-04-29 07:25:20 +05:30
a6fe023519 Try using a third-party action for prettier 2023-04-29 07:23:54 +05:30
e550b15094 Use the prettier config file 2023-04-29 07:14:42 +05:30
0ebad77083 One more try 2023-04-29 07:09:57 +05:30
3100fae118 Delete prettier.yml 2023-04-29 07:05:50 +05:30
01202c5c2e Merge pull request #1224 from JeLuF/patch-23
Disable "TypedStorage is deprecated" user warnings
2023-04-29 07:04:46 +05:30
ae52d9ef22 Disable "TypedStorage is deprecated" user warnings
These warnings clog up the logfiles and worry users.
2023-04-28 22:52:21 +02:00
70a37fda57 prettier 2023-04-28 18:15:57 +05:30
da3f894ed4 another 2023-04-28 18:13:38 +05:30
2e362d57eb Adjust prettier config 2023-04-28 18:10:58 +05:30
03fedfd0d5 fix formatting 2023-04-28 18:07:43 +05:30
039395f221 Fix formatting 2023-04-28 17:57:26 +05:30
1381be16ad Fix formatting 2023-04-28 17:55:03 +05:30
9af511e457 Check prettier style in a github action 2023-04-28 16:42:38 +05:30
d18cefc519 Formatting 2023-04-28 16:38:55 +05:30
07f52c38ef sdkit 1.0.83 - formatting 2023-04-28 16:35:30 +05:30
a46ff731d8 sdkit 1.0.82 - VAE slicing for pytorch 2.0, don't fail to hash files smaller than 3 MB 2023-04-28 16:03:35 +05:30
469585ddda Use ES5 style trailing commas, to avoid unnecessary lines during code diffs 2023-04-28 15:50:44 +05:30
3000e53cc0 Show suggestions for handling system RAM exhaustion 2023-04-28 15:38:52 +05:30
db0722aca7 Allow ES5 style trailing commas in prettier 2023-04-28 15:32:42 +05:30
af0058d2aa Merge pull request #1219 from lucasmarcelli/prettier-beta-take-two
add prettier for JS style
2023-04-28 15:16:32 +05:30
aad1afb70e add prettier for JS style 2023-04-27 13:56:56 -04:00
228a5c4552 Merge pull request #1217 from cmdr2/beta
Beta
2023-04-27 15:58:55 +05:30
dc43eb29e1 Merge branch 'beta' of github.com:cmdr2/stable-diffusion-ui into beta 2023-04-27 15:58:17 +05:30
400cb218ba Don't override net config if env variables don't exist 2023-04-27 15:58:06 +05:30
0fbe3cfb8f Merge pull request #1216 from cmdr2/main
Main
2023-04-27 15:50:26 +05:30
9a01e917c6 Merge pull request #1215 from cmdr2/beta
Beta
2023-04-27 15:50:06 +05:30
e01d68fce3 Merge branch 'beta' of github.com:cmdr2/stable-diffusion-ui into beta 2023-04-27 15:48:51 +05:30
60f2f5ea19 changelog 2023-04-27 15:48:40 +05:30
2333beda5f Hardware req 2023-04-27 15:40:39 +05:30
8174f94172 Merge pull request #1207 from cmdr2/main
Main
2023-04-26 16:41:12 +05:30
6a6ea5009a Merge pull request #1182 from JeLuF/get_config
Don't write config.bat and config.sh any more
2023-04-26 16:35:52 +05:30
24d0e7566f Copy get_config.py in on_sd_start for the first run, when on_env_start hasn't yet been updated 2023-04-26 16:34:27 +05:30
fe8c208e7c Copy get_config.py in on_sd_start for the first run, when on_env_start hasn't yet been updated 2023-04-26 16:33:43 +05:30
ba7a49e834 Merge pull request #1204 from JeLuF/patch-22
Don't use python packages from the user's home directory
2023-04-26 16:29:00 +05:30
216323fcf4 No longer close to the fastest, the arms race continues 2023-04-26 16:25:42 +05:30
fb18c93bd6 Suppress debug log 2023-04-26 16:25:02 +05:30
382dee1fd1 Merge pull request #1116 from ogmaresca/createTab-utility-function
Add a utility function to create tabs with lazy loading functionality
2023-04-26 16:21:51 +05:30
9399fb5371 Don't use python packages from the user's home directory
PYTHONNOUSERSITE is required to ignore packages installed to `/home/user/.local/`. Since these folders are outside of our control, they can cause conflicts in ED's python env.

https://discord.com/channels/1014774730907209781/1100375010650103808

Fixes #1193
2023-04-25 21:02:36 +02:00
92d8dfe963 Merge pull request #1198 from cmdr2/revert-1197-revert-1195-patch-21
Revert "Revert "Stop messing with %USERPROFILE%""
2023-04-24 14:32:37 +05:30
3ae851ab1f Revert "Revert "Stop messing with %USERPROFILE%"" 2023-04-24 14:32:18 +05:30
fb1e3de3c7 Merge pull request #1197 from cmdr2/revert-1195-patch-21
Revert "Stop messing with %USERPROFILE%"
2023-04-24 14:31:14 +05:30
6fbb24ae3d Revert "Stop messing with %USERPROFILE%" 2023-04-24 14:30:52 +05:30
943776dd14 Merge pull request #1195 from JeLuF/patch-21
Stop messing with %USERPROFILE%
2023-04-24 14:30:26 +05:30
bb607927d0 Stop messing with %USERPROFILE%
Set HF_HOME, so that the models don't get downloaded again.
2023-04-23 12:54:20 +02:00
ce95072845 Update README.md 2023-04-22 19:53:13 +05:30
36344732ac Update README.md 2023-04-22 19:47:19 +05:30
1f4e4d8d82 change 2023-04-22 19:43:15 +05:30
4ef10222e1 changelog 2023-04-22 15:48:03 +05:30
d7b91db204 changelog 2023-04-22 15:47:26 +05:30
5acf5949a6 sdkit 1.0.81 - use tf32 = True for ampere GPUs 2023-04-22 15:42:24 +05:30
6b075256e8 Merge pull request #1190 from cmdr2/beta
Force mac to downgrade from torch 2.0
2023-04-22 14:56:30 +05:30
3d740555c3 Force mac to downgrade from torch 2.0 2023-04-22 14:54:52 +05:30
49b2fc5b33 Merge pull request #1189 from cmdr2/beta
Keep the task alive during step callbacks. Thanks Madrang
2023-04-22 14:35:57 +05:30
f7235cf82c Keep the task alive during step callbacks. Thanks Madrang 2023-04-21 20:59:14 +05:30
56dbddd472 Merge pull request #1186 from cmdr2/beta
Beta
2023-04-21 19:12:08 +05:30
eb16296873 Restrict AMD cards on Linux to torch 1.13.1 and ROCm 5.2. Avoids black images on some AMD cards. Temp hack until AMD works properly on torch 2.0 2023-04-21 19:08:51 +05:30
1967299417 Download GFPGAN 1.4 by default on new Windows installations (NSIS) 2023-04-21 16:21:24 +05:30
c7d8164c48 Merge pull request #1184 from cmdr2/beta
Don't copy bootstrap.bat unnecessarily
2023-04-21 16:10:02 +05:30
1864921d1d Don't copy bootstrap.bat unnecessarily 2023-04-21 16:09:32 +05:30
75bdb214c7 Merge pull request #1183 from cmdr2/beta
Install PyTorch 2.0 by default on new installations
2023-04-21 16:03:10 +05:30
0b19adba75 changelog 2023-04-21 16:01:24 +05:30
2e84a421f3 Show sdkit installation progress during the first run 2023-04-21 15:49:38 +05:30
fea77e97a0 actually fix the img2img error in the new diffusers version 2023-04-21 15:26:14 +05:30
e1b6cc2a86 typo 2023-04-21 15:13:29 +05:30
0921573644 sdkit 1.0.78 - fix the 'astype' error with the new diffusers version 2023-04-21 15:11:26 +05:30
5eec05c0c4 Don't write config.bat and config.sh any more 2023-04-21 00:09:27 +02:00
526fc989c1 Allow any version of torch/torchvision 2023-04-20 18:40:45 +05:30
023b78d1c9 Allow rocm5.2 2023-04-20 17:46:34 +05:30
670410b539 sdkit 1.0.77 - fix inpainting bug on diffusers 2023-04-20 17:42:12 +05:30
76e379d7e1 Don't install xformers, it downgrades the torch version. Still need to fix this 2023-04-20 17:07:10 +05:30
cde57109e4 Revert "Fetch release notes only from the main or beta branches"
This reverts commit bc142c9ecd.
2023-04-20 16:57:52 +05:30
bc142c9ecd Fetch release notes only from the main or beta branches 2023-04-20 16:55:34 +05:30
6c148f1791 Don't install xformers for AMD on Linux; changelog 2023-04-20 16:48:38 +05:30
534bb2dd84 Use xformers 0.0.16 to speed up image generation 2023-04-20 16:44:06 +05:30
d0f4476ba5 Suggest downloading a model downloading in the troubleshooting steps. Thanks JeLuf 2023-04-20 16:22:42 +05:30
6287bcd00a sdkit 1.0.76 - use 256 as the tile size for realesrgan, instead of 128. slightly more VRAM, but faster upscaling 2023-04-20 16:17:27 +05:30
fcbcb7d471 changelog 2023-04-19 16:46:34 +05:30
cb527919a2 sdkit 1.0.75 - upgrade to diffusers 0.15.1 2023-04-19 16:45:28 +05:30
83c34ea52f Remove unnecessary hotfix 2023-04-19 16:31:04 +05:30
35c75115de Log errors during module and model initialization 2023-04-19 16:20:08 +05:30
7c75a61700 Typo 2023-04-19 16:15:15 +05:30
34ea49147c Update the check_models.py script during startup 2023-04-19 16:13:29 +05:30
c1e8637a9f Re-implement the code for downloading models in python. Save some eyeballs from bleeding 2023-04-19 16:11:16 +05:30
becbef4fac Include ROCm in the list of allowed versions 2023-04-18 17:36:52 +05:30
f22ecc454a Merge branch 'beta' of github.com:cmdr2/stable-diffusion-ui into beta 2023-04-18 17:14:44 +05:30
bf3df097b8 Don't use ROCm on Linux if an NVIDIA card is present 2023-04-18 17:14:24 +05:30
7fc2ed28b1 Merge pull request #1166 from JeLuF/not_yet
Don't save model_path if initial load fails
2023-04-18 16:34:12 +05:30
30a133bad9 Allow torch 1.11 to continue being installed 2023-04-18 16:10:46 +05:30
d8d44c579c Typo 2023-04-18 15:43:56 +05:30
80384e6ee1 Install PyTorch 2.0 by default, but allow existing PyTorch 1.13.1 installations to continue running; Unify and streamline the installation of dependencies 2023-04-18 15:42:33 +05:30
0898f98355 Merge branch 'beta' of github.com:cmdr2/stable-diffusion-ui into beta 2023-04-18 15:05:53 +05:30
e7dc41e271 Automatic AMD GPU detection on Linux (#1078)
* Automatic AMD GPU detection on Linux

Automatically detects AMD GPUs and installs the ROCm version of PyTorch instead of the cuda one

A later improvement may be to detect the GPU ROCm version and handle GPUs that dont work on upstream ROCm, ether because they're too old and need a special patched version, or too new and need `HSA_OVERRIDE_GFX_VERSION=10.3.0` added, possibly check through `rocminfo`?

* Address stdout suppression and download failure

* If any NVIDIA GPU is found, always use it

* Use /proc/bus/pci/devices to detect GPUs

* Fix comparisons

`-eq` and `-ne` only work for numbers

* Add back -q

---------

Co-authored-by: JeLuF <jf@mormo.org>
2023-04-18 15:02:39 +05:30
0f0f475241 Merge branch 'beta' of github.com:cmdr2/stable-diffusion-ui into beta 2023-04-18 14:44:23 +05:30
127ee68486 Merge pull request #1171 from JeLuF/p0417
Don't download 1.4 if other models are available
2023-04-18 14:43:45 +05:30
b204b02b05 Merge pull request #1173 from JeLuF/patch-20
Add "Start scanning..." to getModels()
2023-04-18 14:41:09 +05:30
893b6d985c Add "Start scanning..." to getModels()
Provide a hint to users what ED is currently using. 
Use case: User has built an infinite loop using symlinks, ED model scan will never finish.

https://discord.com/channels/1014774730907209781/1097764777553580092
2023-04-18 09:12:26 +02:00
44824fb5f9 Don't download 1.4 if other models are available 2023-04-17 23:22:44 +02:00
dc21cbe59d Typo 2023-04-17 16:25:49 +05:30
e16d9f4742 Merge branch 'torch2' into beta 2023-04-17 16:24:41 +05:30
f2b5843e6c merge beta 2023-04-17 15:50:51 +05:30
16229caa8e Merge branch 'beta' of github.com:cmdr2/stable-diffusion-ui into beta 2023-04-17 15:43:18 +05:30
0c0525e11b sdkit 1.0.72 - use the extra attn precision yaml code for diffusers, which doesn't auto-detect black images yet 2023-04-17 15:43:08 +05:30
11517b0969 Merge pull request #1168 from cmdr2/beta
Tiled upscaling for better VRAM usage, auto-detect black images and try fp32 attention precision, and full precision if that fails too
2023-04-17 15:11:41 +05:30
1ba3a139d9 Don't save model_path if initial load fails
Fixes #882

If the load of the model fails during the initialization, an attempt to render an
image using the same model fails because ED doesn't notice that the model has to
be loaded. This PR ensures that the model is being reloaded if the initial load
fails. If the second load attempt fails as well, the user will get a more helpful
error message than 'model not loaded yet'.
2023-04-16 21:59:29 +02:00
80bcfabc48 Upgrade to PyTorch 2.0; Doesn't use a special repo url for pytorch on Linux 2023-04-14 17:32:27 +05:30
4192f87d6b Don't scan safetensors files in load_default_models() (#1155)
* Don't scan safetensors when loading them

* Don't scan safetensors files

* Update model_manager.py

---------

Co-authored-by: cmdr2 <shashank.shekhar.global@gmail.com>
2023-04-14 17:11:35 +05:30
03c8a0fca5 sdkit 1.0.70 - use plms for warming up the model, avoiding any non-deterministic effects from the default ancestral sampler 2023-04-12 15:31:10 +05:30
a3d2c71ed6 sdkit 1.0.69 - allow loading models without vae weights in diffusers 2023-04-11 15:34:44 +05:30
8ba0b34853 Log the stack trace when the model fails to load 2023-04-11 15:13:41 +05:30
424ec40fa5 sdkit 1.0.68 - fix brainfade 2023-04-11 12:00:03 +05:30
210429a259 sdkit 1.0.67 - detect black-images on model load and use fp32 attention precision or full precision if needed 2023-04-10 17:59:14 +05:30
df806d5dfa changelog 2023-04-10 16:31:58 +05:30
b07046f6a2 gc between rendering images and applying filters 2023-04-10 16:31:24 +05:30
4cdb8a7d2a sdkit 1.0.66 - lower VRAM usage for realesrgan upscaling 2023-04-10 16:29:30 +05:30
2f83f2bd48 Merge branch 'beta' of github.com:cmdr2/stable-diffusion-ui into beta 2023-04-10 15:56:07 +05:30
8a6cf3cfae Remove redundant hotfix 2023-04-10 15:55:55 +05:30
514e40569e Merge pull request #1138 from patriceac/patch-65
Properly reset LoRA selection from Use Settings
2023-04-10 10:54:12 +05:30
f30f98abd8 Merge pull request #1143 from patriceac/patch-67
Fix the tooltip display for long modifiers
2023-04-10 10:53:46 +05:30
c611d26306 Merge pull request #1144 from patriceac/patch-68
Fix the styling of disabled image modifiers
2023-04-10 10:53:06 +05:30
9ee38d0b70 Fix the styling of disabled image modifiers
Long custom modifiers in a disabled state (e.g. right-click on the image tag) are properly restored as disabled but are incorrectly shown as "active" in the UI. I somehow missed that in https://github.com/cmdr2/stable-diffusion-ui/pull/1062, so here is the fix for that. This change only applies to plugins that try to restore image modifiers, no change to the regular UI.
2023-04-09 18:38:36 -07:00
5e45f37232 Fix the tooltip display for long modifiers 2023-04-09 15:33:05 -07:00
4f2df2d188 Properly reset LoRA selection from Use Settings 2023-04-08 18:19:03 -07:00
ae470e35c8 Merge pull request #1137 from cmdr2/beta
Beta
2023-04-08 20:16:31 +05:30
0f4b62cb97 Hotfix - apply the config overrides to the Settings UI *after* the default config-apply function, not before it 2023-04-08 20:13:44 +05:30
c086098af1 Update README.md 2023-04-08 11:11:25 +05:30
4c7b4c7592 Merge pull request #1135 from JeLuF/ctrlv
Don't paste empty prompts
2023-04-08 09:01:07 +05:30
9e244f758c Don't paste empty prompts
When pasting e.g. an image, window.clipboardData).getData('text') returns an empty string, which would delete the prompt.
https://discord.com/channels/1014774730907209781/1093186485563424838
2023-04-07 22:24:35 +02:00
e7c0b9bd76 Merge branch 'beta' of github.com:cmdr2/stable-diffusion-ui into beta 2023-04-07 15:52:02 +05:30
d7317e8252 sdkit 1.0.65 - upgrade compel to 1.0.5 2023-04-07 15:51:44 +05:30
36a187d3c5 Merge pull request #1133 from cmdr2/beta
Beta
2023-04-07 15:12:53 +05:30
a6ec401440 Merge pull request #1130 from JeLuF/callback
fix filename_format for 'show only upscaled'==false
2023-04-07 15:12:33 +05:30
7a2048b2cb Merge pull request #1132 from cmdr2/beta
Beta
2023-04-07 15:08:25 +05:30
37082ad430 Merge pull request #1131 from cmdr2/main
Main
2023-04-07 15:04:17 +05:30
c571521e87 Merge pull request #1129 from JeLuF/font.css
fix font definition for some users
2023-04-07 12:42:11 +05:30
c438cd47b9 fix filename_format for 'show only upscaled'
https://discord.com/channels/1014774730907209781/1093703857516843019
2023-04-07 09:08:46 +02:00
3ce6c3dc61 fix font definition for some users
I don't know why it breaks only for a few users, but this patch seems to fix the issue.
https://discord.com/channels/1014774730907209781/1081867905907572746
https://discord.com/channels/1014774730907209781/1093215237307637901/1093223612615491604
2023-04-07 08:46:19 +02:00
17f60d3c7f Disable ding sound when the UI loads. It'll now only play upon task completion 2023-04-07 09:56:07 +05:30
7d4d85284b Disable ding sound when the UI loads. It'll now only play upon task completion 2023-04-07 09:55:30 +05:30
9c091a9edf Fix JSDoc 2023-04-06 16:37:17 -04:00
60ca5641ae Merge remote-tracking branch 'origin/beta' into createTab-utility-function 2023-04-06 16:36:25 -04:00
c115a9aa3d changelog 2023-04-06 16:39:20 +05:30
6529240808 Merge branch 'beta' of github.com:cmdr2/stable-diffusion-ui into beta 2023-04-06 16:21:14 +05:30
0d570b3fae version 2023-04-06 16:21:07 +05:30
0778078350 Merge pull request #1087 from ogmaresca/custom-folder-filename-formats-2
Allow loading/saving app.config from plugins and support custom folder/filename formats from app.config
2023-04-06 16:19:59 +05:30
888dc05cde Merge pull request #1107 from ogmaresca/save-lora-strength-in-metadata
Add LoRA Strength to metadata files
2023-04-06 15:47:23 +05:30
687da5b64a Undo UI cleanup (#1106)
* Moving to InvokeAI attention weighting syntax

* Fix restoration of disabled image tags

Fix the restoration inactive image tags.

* Undo feature UX cleanup

Just show the undo button when there's no task for a more consistent UI.

* cleanup code

* Revert "cleanup code"

This reverts commit 03199c5a4f.

* Update image-modifiers.js

* Update image-modifiers.js
2023-04-06 15:42:48 +05:30
5b00d54c76 Merge pull request #1108 from ogmaresca/increase-random-seed-range
Increase the random seed range
2023-04-06 15:39:43 +05:30
38b4a7856e Merge pull request #1109 from patriceac/patch-64
Reset the LoRA dropdown if not present in the task
2023-04-06 15:39:16 +05:30
4f899bd83d Merge pull request #1112 from DianaNites/patch-2
Persistentish user configuration
2023-04-06 15:32:42 +05:30
7f80f7c46b Merge pull request #1117 from ogmaresca/allow-switching-between-previewed-images
Add back/forward buttons to switch between images in tasks
2023-04-06 15:28:38 +05:30
88d415d3f9 Add back/forward buttons to switch between images in tasks 2023-04-03 19:35:37 -04:00
3fc93e2c57 Add a utility function to create tabs with lazy loading functionality 2023-04-03 17:39:34 -04:00
ec0b08e4d0 user configuration 2023-04-02 09:16:33 -07:00
6aa048e3ad Reset the LoRA dropdown if not present in the task 2023-04-02 02:04:03 -07:00
bc711414a8 Increase the random seed range 2023-04-01 21:26:32 -04:00
07b467e4bc Add LoRA Strength to metadata files 2023-04-01 11:42:20 -04:00
afc18619db Remove console.log 2023-03-31 17:26:16 -04:00
fe635db3ab Remove log.infos 2023-03-29 21:02:35 -04:00
7e53eb658c Allow loading/saving app.config from plugins and support custom folder/filename formats from app.config 2023-03-29 20:56:24 -04:00
55 changed files with 15083 additions and 14786 deletions

1
.gitignore vendored
View File

@ -3,3 +3,4 @@ installer
installer.tar
dist
.idea/*
node_modules/*

9
.prettierignore Normal file
View File

@ -0,0 +1,9 @@
*.min.*
*.py
*.json
*.html
/*
!/ui
/ui/easydiffusion
!/ui/plugins
!/ui/media

7
.prettierrc.json Normal file
View File

@ -0,0 +1,7 @@
{
"printWidth": 120,
"tabWidth": 4,
"semi": false,
"arrowParens": "always",
"trailingComma": "es5"
}

View File

@ -2,8 +2,9 @@
## v2.5
### Major Changes
- **Nearly twice as fast** - significantly faster speed of image generation. We're now pretty close to automatic1111's speed. Code contributions are welcome to make our project even faster: https://github.com/easydiffusion/sdkit/#is-it-fast
- **Nearly twice as fast** - significantly faster speed of image generation. Code contributions are welcome to make our project even faster: https://github.com/easydiffusion/sdkit/#is-it-fast
- **Mac M1/M2 support** - Experimental support for Mac M1/M2. Thanks @michaelgallacher, @JeLuf and vishae.
- **AMD support for Linux** - Experimental support for AMD GPUs on Linux. Thanks @DianaNites and @JeLuf.
- **Full support for Stable Diffusion 2.1 (including CPU)** - supports loading v1.4 or v2.0 or v2.1 models seamlessly. No need to enable "Test SD2", and no need to add `sd2_` to your SD 2.0 model file names. Works on CPU as well.
- **Memory optimized Stable Diffusion 2.1** - you can now use Stable Diffusion 2.1 models, with the same low VRAM optimizations that we've always had for SD 1.4. Please note, the SD 2.0 and 2.1 models require more GPU and System RAM, as compared to the SD 1.4 and 1.5 models.
- **11 new samplers!** - explore the new samplers, some of which can generate great images in less than 10 inference steps! We've added the Karras and UniPC samplers. Thanks @Schorny for the UniPC samplers.
@ -21,6 +22,26 @@
Our focus continues to remain on an easy installation experience, and an easy user-interface. While still remaining pretty powerful, in terms of features and speed.
### Detailed changelog
* 2.5.40 - 3 Jun 2023 - Added CodeFormer as another option for fixing faces and eyes. CodeFormer tends to perform better than GFPGAN for many images. Thanks @patriceac for the implementation, and for contacting the CodeFormer team (who were supportive of it being integrated into Easy Diffusion).
* 2.5.39 - 25 May 2023 - (beta-only) Seamless Tiling - make seamlessly tiled images, e.g. rock and grass textures. Thanks @JeLuf.
* 2.5.38 - 24 May 2023 - Better reporting of errors, and show an explanation if the user cannot disable the "Use CPU" setting.
* 2.5.38 - 23 May 2023 - Add Latent Upscaler as another option for upscaling images. Thanks @JeLuf for the implementation of the Latent Upscaler model.
* 2.5.37 - 19 May 2023 - (beta-only) Two more samplers: DDPM and DEIS. Also disables the samplers that aren't working yet in the Diffusers version. Thanks @ogmaresca.
* 2.5.37 - 19 May 2023 - (beta-only) Support CLIP-Skip. You can set this option under the models dropdown. Thanks @JeLuf.
* 2.5.37 - 19 May 2023 - (beta-only) More VRAM optimizations for all modes in diffusers. The VRAM usage for diffusers in "low" and "balanced" should now be equal or less than the non-diffusers version. Performs softmax in half precision, like sdkit does.
* 2.5.36 - 16 May 2023 - (beta-only) More VRAM optimizations for "balanced" VRAM usage mode.
* 2.5.36 - 11 May 2023 - (beta-only) More VRAM optimizations for "low" VRAM usage mode.
* 2.5.36 - 10 May 2023 - (beta-only) Bug fix for "meta" error when using a LoRA in 'low' VRAM usage mode.
* 2.5.35 - 8 May 2023 - Allow dragging a zoomed-in image (after opening an image with the "expand" button). Thanks @ogmaresca.
* 2.5.35 - 3 May 2023 - (beta-only) First round of VRAM Optimizations for the "Test Diffusers" version. This change significantly reduces the amount of VRAM used by the diffusers version during image generation. The VRAM usage is still not equal to the "non-diffusers" version, but more optimizations are coming soon.
* 2.5.34 - 22 Apr 2023 - Don't start the browser in an incognito new profile (on Windows). Thanks @JeLuf.
* 2.5.33 - 21 Apr 2023 - Install PyTorch 2.0 on new installations (on Windows and Linux).
* 2.5.32 - 19 Apr 2023 - Automatically check for black images, and set full-precision if necessary (for attn). This means custom models based on Stable Diffusion v2.1 will just work, without needing special command-line arguments or editing of yaml config files.
* 2.5.32 - 18 Apr 2023 - Automatic support for AMD graphics cards on Linux. Thanks @DianaNites and @JeLuf.
* 2.5.31 - 10 Apr 2023 - Reduce VRAM usage while upscaling.
* 2.5.31 - 6 Apr 2023 - Allow seeds upto `4,294,967,295`. Thanks @ogmaresca.
* 2.5.31 - 6 Apr 2023 - Buttons to show the previous/next image in the image popup. Thanks @ogmaresca.
* 2.5.30 - 5 Apr 2023 - Fix a bug where the JPEG image quality wasn't being respected when embedding the metadata into it. Thanks @JeLuf.
* 2.5.30 - 1 Apr 2023 - (beta-only) Slider to control the strength of the LoRA model.
* 2.5.30 - 28 Mar 2023 - Refactor task entry config to use a generating method. Added ability for plugins to easily add to this. Removed confusing sentence from `contributing.md`
* 2.5.30 - 28 Mar 2023 - Allow the user to undo the deletion of tasks or images, instead of showing a pop-up each time. The new `Undo` button will be present at the top of the UI. Thanks @JeLuf.

View File

@ -235,7 +235,7 @@ Section "MainSection" SEC01
NScurl::http get "https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt" "$INSTDIR\models\stable-diffusion\sd-v1-4.ckpt" /CANCEL /INSIST /END
DetailPrint 'Downloading the GFPGAN model...'
NScurl::http get "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth" "$INSTDIR\models\gfpgan\GFPGANv1.3.pth" /CANCEL /INSIST /END
NScurl::http get "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/GFPGANv1.4.pth" "$INSTDIR\models\gfpgan\GFPGANv1.4.pth" /CANCEL /INSIST /END
DetailPrint 'Downloading the RealESRGAN_x4plus model...'
NScurl::http get "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth" "$INSTDIR\models\realesrgan\RealESRGAN_x4plus.pth" /CANCEL /INSIST /END

9
PRIVACY.md Normal file
View File

@ -0,0 +1,9 @@
// placeholder until a more formal and legal-sounding privacy policy document is written. but the information below is true.
This is a summary of whether Easy Diffusion uses your data or tracks you:
* The short answer is - Easy Diffusion does *not* use your data, and does *not* track you.
* Easy Diffusion does not send your prompts or usage or analytics to anyone. There is no tracking. We don't even know how many people use Easy Diffusion, let alone their prompts.
* Easy Diffusion fetches updates to the code whenever it starts up. It does this by contacting GitHub directly, via SSL (secure connection). Only your computer and GitHub and [this repository](https://github.com/cmdr2/stable-diffusion-ui) are involved, and no third party is involved. Some countries intercepts SSL connections, that's not something we can do much about. GitHub does *not* share statistics (even with me) about how many people fetched code updates.
* Easy Diffusion fetches the models from huggingface.co and github.com, if they don't exist on your PC. For e.g. if the safety checker (NSFW) model doesn't exist, it'll try to download it.
* Easy Diffusion fetches code packages from pypi.org, which is the standard hosting service for all Python projects. That's where packages installed via `pip install` are stored.
* Occasionally, antivirus software are known to *incorrectly* flag and delete some model files, which will result in Easy Diffusion re-downloading `pytorch_model.bin`. This *incorrect deletion* affects other Stable Diffusion UIs as well, like Invoke AI - https://itch.io/post/7509488

View File

@ -1,5 +1,5 @@
# Easy Diffusion 2.5
### The easiest way to install and use [Stable Diffusion](https://github.com/CompVis/stable-diffusion) on your own computer.
### The easiest way to install and use [Stable Diffusion](https://github.com/CompVis/stable-diffusion) on your computer.
Does not require technical knowledge, does not require pre-installed software. 1-click install, powerful features, friendly community.
@ -16,6 +16,13 @@ Click the download button for your operating system:
<a href="https://github.com/cmdr2/stable-diffusion-ui/releases/download/v2.5.24/Easy-Diffusion-Mac.zip"><img src="https://github.com/cmdr2/stable-diffusion-ui/raw/main/media/download-mac.png" width="200" /></a>
</p>
**Hardware requirements:**
- **Windows:** NVIDIA graphics card (minimum 2 GB RAM), or run on your CPU.
- **Linux:** NVIDIA or AMD graphics card (minimum 2 GB RAM), or run on your CPU.
- **Mac:** M1 or M2, or run on your CPU.
- Minimum 8 GB of system RAM.
- Atleast 25 GB of space on the hard disk.
The installer will take care of whatever is needed. If you face any problems, you can join the friendly [Discord community](https://discord.com/invite/u9yhsFmEkB) and ask for assistance.
## On Windows:
@ -53,7 +60,7 @@ Just delete the `EasyDiffusion` folder to uninstall all the downloaded packages.
### Image generation
- **Supports**: "*Text to Image*" and "*Image to Image*".
- **19 Samplers**: `ddim`, `plms`, `heun`, `euler`, `euler_a`, `dpm2`, `dpm2_a`, `lms`, `dpm_solver_stability`, `dpmpp_2s_a`, `dpmpp_2m`, `dpmpp_sde`, `dpm_fast`, `dpm_adaptive`, `unipc_snr`, `unipc_tu`, `unipc_tq`, `unipc_snr_2`, `unipc_tu_2`.
- **21 Samplers**: `ddim`, `plms`, `heun`, `euler`, `euler_a`, `dpm2`, `dpm2_a`, `lms`, `dpm_solver_stability`, `dpmpp_2s_a`, `dpmpp_2m`, `dpmpp_sde`, `dpm_fast`, `dpm_adaptive`, `ddpm`, `deis`, `unipc_snr`, `unipc_tu`, `unipc_tq`, `unipc_snr_2`, `unipc_tu_2`.
- **In-Painting**: Specify areas of your image to paint into.
- **Simple Drawing Tool**: Draw basic images to guide the AI, without needing an external drawing program.
- **Face Correction (GFPGAN)**
@ -79,7 +86,7 @@ Just delete the `EasyDiffusion` folder to uninstall all the downloaded packages.
### Performance and security
- **Fast**: Creates a 512x512 image with euler_a in 5 seconds, on an NVIDIA 3060 12GB.
- **Low Memory Usage**: Create 512x512 images with less than 3 GB of GPU RAM, and 768x768 images with less than 4 GB of GPU RAM!
- **Low Memory Usage**: Create 512x512 images with less than 2 GB of GPU RAM, and 768x768 images with less than 3 GB of GPU RAM!
- **Use CPU setting**: If you don't have a compatible graphics card, but still want to run it on your CPU.
- **Multi-GPU support**: Automatically spreads your tasks across multiple GPUs (if available), for faster performance!
- **Auto scan for malicious models**: Uses picklescan to prevent malicious models.
@ -108,14 +115,6 @@ Useful for judging (and stopping) an image quickly, without waiting for it to fi
![Screenshot of task queue](https://user-images.githubusercontent.com/844287/217043984-0b35f73b-1318-47cb-9eed-a2a91b430490.png)
# System Requirements
1. Windows 10/11, or Linux. Experimental support for Mac is coming soon.
2. An NVIDIA graphics card, preferably with 4GB or more of VRAM. If you don't have a compatible graphics card, it'll automatically run in the slower "CPU Mode".
3. Minimum 8 GB of RAM and 25GB of disk space.
You don't need to install or struggle with Python, Anaconda, Docker etc. The installer will take care of whatever is needed.
----
# How to use?

9
package.json Normal file
View File

@ -0,0 +1,9 @@
{
"scripts": {
"prettier-fix": "npx prettier --write \"./**/*.js\"",
"prettier-check": "npx prettier --check \"./**/*.js\""
},
"devDependencies": {
"prettier": "^1.19.1"
}
}

View File

@ -1,13 +1,158 @@
'''
This script checks if the given modules exist
'''
"""
This script checks and installs the required modules.
import sys
import pkgutil
This script runs inside the legacy "stable-diffusion" folder
modules = sys.argv[1:]
missing_modules = []
for m in modules:
if pkgutil.find_loader(m) is None:
print('module', m, 'not found')
exit(1)
TODO - Maybe replace the bulk of this script with a call to `pip install -f requirements.txt`, with
a custom index URL depending on the platform.
"""
import os
from importlib.metadata import version as pkg_version
import platform
import traceback
os_name = platform.system()
modules_to_check = {
"torch": ("1.11.0", "1.13.1", "2.0.0"),
"torchvision": ("0.12.0", "0.14.1", "0.15.1"),
"sdkit": "1.0.101",
"stable-diffusion-sdkit": "2.1.4",
"rich": "12.6.0",
"uvicorn": "0.19.0",
"fastapi": "0.85.1",
# "xformers": "0.0.16",
}
def version(module_name: str) -> str:
try:
return pkg_version(module_name)
except:
return None
def install(module_name: str, module_version: str):
if module_name == "xformers" and (os_name == "Darwin" or is_amd_on_linux()):
return
index_url = None
if module_name in ("torch", "torchvision"):
module_version, index_url = apply_torch_install_overrides(module_version)
if is_amd_on_linux(): # hack until AMD works properly on torch 2.0 (avoids black images on some cards)
if module_name == "torch":
module_version = "1.13.1+rocm5.2"
elif module_name == "torchvision":
module_version = "0.14.1+rocm5.2"
elif os_name == "Darwin":
if module_name == "torch":
module_version = "1.13.1"
elif module_name == "torchvision":
module_version = "0.14.1"
install_cmd = f"python -m pip install --upgrade {module_name}=={module_version}"
if index_url:
install_cmd += f" --index-url {index_url}"
if module_name == "sdkit" and version("sdkit") is not None:
install_cmd += " -q"
print(">", install_cmd)
os.system(install_cmd)
def init():
for module_name, allowed_versions in modules_to_check.items():
if os.path.exists(f"../src/{module_name}"):
print(f"Skipping {module_name} update, since it's in developer/editable mode")
continue
allowed_versions, latest_version = get_allowed_versions(module_name, allowed_versions)
requires_install = False
if module_name in ("torch", "torchvision"):
if version(module_name) is None: # allow any torch version
requires_install = True
elif os_name == "Darwin" and ( # force mac to downgrade from torch 2.0
version("torch").startswith("2.") or version("torchvision").startswith("0.15.")
):
requires_install = True
elif version(module_name) not in allowed_versions:
requires_install = True
if requires_install:
try:
install(module_name, latest_version)
except:
traceback.print_exc()
fail(module_name)
print(f"{module_name}: {version(module_name)}")
### utilities
def get_allowed_versions(module_name: str, allowed_versions: tuple):
allowed_versions = (allowed_versions,) if isinstance(allowed_versions, str) else allowed_versions
latest_version = allowed_versions[-1]
if module_name in ("torch", "torchvision"):
allowed_versions = include_cuda_versions(allowed_versions)
return allowed_versions, latest_version
def apply_torch_install_overrides(module_version: str):
index_url = None
if os_name == "Windows":
module_version += "+cu117"
index_url = "https://download.pytorch.org/whl/cu117"
elif is_amd_on_linux():
index_url = "https://download.pytorch.org/whl/rocm5.2"
return module_version, index_url
def include_cuda_versions(module_versions: tuple) -> tuple:
"Adds CUDA-specific versions to the list of allowed version numbers"
allowed_versions = tuple(module_versions)
allowed_versions += tuple(f"{v}+cu116" for v in module_versions)
allowed_versions += tuple(f"{v}+cu117" for v in module_versions)
allowed_versions += tuple(f"{v}+rocm5.2" for v in module_versions)
allowed_versions += tuple(f"{v}+rocm5.4.2" for v in module_versions)
return allowed_versions
def is_amd_on_linux():
if os_name == "Linux":
try:
with open("/proc/bus/pci/devices", "r") as f:
device_info = f.read()
if "amdgpu" in device_info and "nvidia" not in device_info:
return True
except:
return False
return False
def fail(module_name):
print(
f"""Error installing {module_name}. Sorry about that, please try to:
1. Run this installer again.
2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting
3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB
4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues
Thanks!"""
)
exit(1)
### start
init()

46
scripts/get_config.py Normal file
View File

@ -0,0 +1,46 @@
import os
import argparse
import sys
# The config file is in the same directory as this script
config_directory = os.path.dirname(__file__)
config_yaml = os.path.join(config_directory, "config.yaml")
config_json = os.path.join(config_directory, "config.json")
parser = argparse.ArgumentParser(description='Get values from config file')
parser.add_argument('--default', dest='default', action='store',
help='default value, to be used if the setting is not defined in the config file')
parser.add_argument('key', metavar='key', nargs='+',
help='config key to return')
args = parser.parse_args()
if os.path.isfile(config_yaml):
import yaml
with open(config_yaml, 'r') as configfile:
try:
config = yaml.safe_load(configfile)
except Exception as e:
print(e, file=sys.stderr)
config = {}
elif os.path.isfile(config_json):
import json
with open(config_json, 'r') as configfile:
try:
config = json.load(configfile)
except Exception as e:
print(e, file=sys.stderr)
config = {}
else:
config = {}
for k in args.key:
if k in config:
config = config[k]
else:
if args.default != None:
print(args.default)
exit()
print(config)

View File

@ -8,6 +8,20 @@ if exist "scripts\config.bat" (
@call scripts\config.bat
)
if exist "scripts\user_config.bat" (
@call scripts\user_config.bat
)
if exist "stable-diffusion\env" (
@set PYTHONPATH=%PYTHONPATH%;%cd%\stable-diffusion\env\lib\site-packages
)
if exist "scripts\get_config.py" (
@FOR /F "tokens=* USEBACKQ" %%F IN (`python scripts\get_config.py --default=main update_branch`) DO (
@SET update_branch=%%F
)
)
if "%update_branch%"=="" (
set update_branch=main
)
@ -53,6 +67,8 @@ if "%update_branch%"=="" (
@xcopy sd-ui-files\ui ui /s /i /Y /q
@copy sd-ui-files\scripts\on_sd_start.bat scripts\ /Y
@copy sd-ui-files\scripts\check_modules.py scripts\ /Y
@copy sd-ui-files\scripts\check_models.py scripts\ /Y
@copy sd-ui-files\scripts\get_config.py scripts\ /Y
@copy "sd-ui-files\scripts\Start Stable Diffusion UI.cmd" . /Y
@copy "sd-ui-files\scripts\Developer Console.cmd" . /Y

View File

@ -4,10 +4,22 @@ source ./scripts/functions.sh
printf "\n\nEasy Diffusion\n\n"
export PYTHONNOUSERSITE=y
if [ -f "scripts/config.sh" ]; then
source scripts/config.sh
fi
if [ -f "scripts/user_config.sh" ]; then
source scripts/user_config.sh
fi
export PYTHONPATH=$(pwd)/installer_files/env/lib/python3.8/site-packages:$(pwd)/stable-diffusion/env/lib/python3.8/site-packages
if [ -f "scripts/get_config.py" ]; then
export update_branch="$( python scripts/get_config.py --default=main update_branch )"
fi
if [ "$update_branch" == "" ]; then
export update_branch="main"
fi
@ -38,6 +50,8 @@ cp -Rf sd-ui-files/ui .
cp sd-ui-files/scripts/on_sd_start.sh scripts/
cp sd-ui-files/scripts/bootstrap.sh scripts/
cp sd-ui-files/scripts/check_modules.py scripts/
cp sd-ui-files/scripts/check_models.py scripts/
cp sd-ui-files/scripts/get_config.py scripts/
cp sd-ui-files/scripts/start.sh .
cp sd-ui-files/scripts/developer_console.sh .
cp sd-ui-files/scripts/functions.sh scripts/

View File

@ -4,11 +4,12 @@
@REM Note to self: Please rewrite this in Python. For the sake of your own sanity.
@copy sd-ui-files\scripts\on_env_start.bat scripts\ /Y
@copy sd-ui-files\scripts\bootstrap.bat scripts\ /Y
@copy sd-ui-files\scripts\check_modules.py scripts\ /Y
@copy sd-ui-files\scripts\check_models.py scripts\ /Y
@copy sd-ui-files\scripts\get_config.py scripts\ /Y
if exist "%cd%\profile" (
set USERPROFILE=%cd%\profile
set HF_HOME=%cd%\profile\.cache\huggingface
)
@rem set the correct installer path (current vs legacy)
@ -34,8 +35,6 @@ call conda activate
@REM remove the old version of the dev console script, if it's still present
if exist "Open Developer Console.cmd" del "Open Developer Console.cmd"
@call python -c "import os; import shutil; frm = 'sd-ui-files\\ui\\hotfix\\9c24e6cd9f499d02c4f21a033736dabd365962dc80fe3aeb57a8f85ea45a20a3.26fead7ea4f0f843f6eb4055dfd25693f1a71f3c6871b184042d4b126244e142'; dst = os.path.join(os.path.expanduser('~'), '.cache', 'huggingface', 'transformers', '9c24e6cd9f499d02c4f21a033736dabd365962dc80fe3aeb57a8f85ea45a20a3.26fead7ea4f0f843f6eb4055dfd25693f1a71f3c6871b184042d4b126244e142'); shutil.copyfile(frm, dst) if os.path.exists(dst) else print(''); print('Hotfixed broken JSON file from OpenAI');"
@rem create the stable-diffusion folder, to work with legacy installations
if not exist "stable-diffusion" mkdir stable-diffusion
cd stable-diffusion
@ -49,111 +48,22 @@ if exist "env" (
if exist src rename src src-old
if exist ldm rename ldm ldm-old
if not exist "..\models\stable-diffusion" mkdir "..\models\stable-diffusion"
if not exist "..\models\gfpgan" mkdir "..\models\gfpgan"
if not exist "..\models\realesrgan" mkdir "..\models\realesrgan"
if not exist "..\models\vae" mkdir "..\models\vae"
@rem migrate the legacy models to the correct path (if already downloaded)
if exist "sd-v1-4.ckpt" move sd-v1-4.ckpt ..\models\stable-diffusion\
if exist "custom-model.ckpt" move custom-model.ckpt ..\models\stable-diffusion\
if exist "GFPGANv1.3.pth" move GFPGANv1.3.pth ..\models\gfpgan\
if exist "RealESRGAN_x4plus.pth" move RealESRGAN_x4plus.pth ..\models\realesrgan\
if exist "RealESRGAN_x4plus_anime_6B.pth" move RealESRGAN_x4plus_anime_6B.pth ..\models\realesrgan\
if not exist "%INSTALL_ENV_DIR%\DLLs\libssl-1_1-x64.dll" copy "%INSTALL_ENV_DIR%\Library\bin\libssl-1_1-x64.dll" "%INSTALL_ENV_DIR%\DLLs\"
if not exist "%INSTALL_ENV_DIR%\DLLs\libcrypto-1_1-x64.dll" copy "%INSTALL_ENV_DIR%\Library\bin\libcrypto-1_1-x64.dll" "%INSTALL_ENV_DIR%\DLLs\"
@rem install torch and torchvision
call python ..\scripts\check_modules.py torch torchvision
if "%ERRORLEVEL%" EQU "0" (
echo "torch and torchvision have already been installed."
) else (
echo "Installing torch and torchvision.."
@REM prevent from using packages from the user's home directory, to avoid conflicts
set PYTHONNOUSERSITE=1
set PYTHONPATH=%INSTALL_ENV_DIR%\lib\site-packages
call python -m pip install --upgrade torch==1.13.1+cu116 torchvision==0.14.1+cu116 --extra-index-url https://download.pytorch.org/whl/cu116 || (
echo "Error installing torch. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!"
pause
exit /b
)
)
@rem install or upgrade the required modules
set PATH=C:\Windows\System32;%PATH%
@rem install/upgrade sdkit
call python ..\scripts\check_modules.py sdkit sdkit.models ldm transformers numpy antlr4 gfpgan realesrgan
if "%ERRORLEVEL%" EQU "0" (
echo "sdkit is already installed."
@REM prevent from using packages from the user's home directory, to avoid conflicts
set PYTHONNOUSERSITE=1
set PYTHONPATH=%INSTALL_ENV_DIR%\lib\site-packages
@rem skip sdkit upgrade if in developer-mode
if not exist "..\src\sdkit" (
@REM prevent from using packages from the user's home directory, to avoid conflicts
set PYTHONNOUSERSITE=1
set PYTHONPATH=%INSTALL_ENV_DIR%\lib\site-packages
call python -m pip install --upgrade sdkit==1.0.64 -q || (
echo "Error updating sdkit"
)
)
) else (
echo "Installing sdkit: https://pypi.org/project/sdkit/"
@REM prevent from using packages from the user's home directory, to avoid conflicts
set PYTHONNOUSERSITE=1
set PYTHONPATH=%INSTALL_ENV_DIR%\lib\site-packages
call python -m pip install sdkit==1.0.64 || (
echo "Error installing sdkit. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!"
pause
exit /b
)
)
call python -c "from importlib.metadata import version; print('sdkit version:', version('sdkit'))"
@rem upgrade stable-diffusion-sdkit
call python -m pip install --upgrade stable-diffusion-sdkit==2.1.4 -q || (
echo "Error updating stable-diffusion-sdkit"
)
call python -c "from importlib.metadata import version; print('stable-diffusion version:', version('stable-diffusion-sdkit'))"
@rem install rich
call python ..\scripts\check_modules.py rich
if "%ERRORLEVEL%" EQU "0" (
echo "rich has already been installed."
) else (
echo "Installing rich.."
set PYTHONNOUSERSITE=1
set PYTHONPATH=%INSTALL_ENV_DIR%\lib\site-packages
call python -m pip install rich || (
echo "Error installing rich. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!"
pause
exit /b
)
)
set PATH=C:\Windows\System32;%PATH%
call python ..\scripts\check_modules.py uvicorn fastapi
@if "%ERRORLEVEL%" EQU "0" (
echo "Packages necessary for Easy Diffusion were already installed"
) else (
@echo. & echo "Downloading packages necessary for Easy Diffusion..." & echo.
set PYTHONNOUSERSITE=1
set PYTHONPATH=%INSTALL_ENV_DIR%\lib\site-packages
@call conda install -c conda-forge -y uvicorn fastapi || (
echo "Error installing the packages necessary for Easy Diffusion. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!"
pause
exit /b
)
@rem Download the required packages
call python ..\scripts\check_modules.py
if "%ERRORLEVEL%" NEQ "0" (
pause
exit /b
)
call WHERE uvicorn > .tmp
@ -169,162 +79,6 @@ call WHERE uvicorn > .tmp
@echo conda_sd_ui_deps_installed >> ..\scripts\install_status.txt
)
@if exist "..\models\stable-diffusion\sd-v1-4.ckpt" (
for %%I in ("..\models\stable-diffusion\sd-v1-4.ckpt") do if "%%~zI" EQU "4265380512" (
echo "Data files (weights) necessary for Stable Diffusion were already downloaded. Using the HuggingFace 4 GB Model."
) else (
for %%J in ("..\models\stable-diffusion\sd-v1-4.ckpt") do if "%%~zJ" EQU "7703807346" (
echo "Data files (weights) necessary for Stable Diffusion were already downloaded. Using the HuggingFace 7 GB Model."
) else (
for %%K in ("..\models\stable-diffusion\sd-v1-4.ckpt") do if "%%~zK" EQU "7703810927" (
echo "Data files (weights) necessary for Stable Diffusion were already downloaded. Using the Waifu Model."
) else (
echo. & echo "The model file present at models\stable-diffusion\sd-v1-4.ckpt is invalid. It is only %%~zK bytes in size. Re-downloading.." & echo.
del "..\models\stable-diffusion\sd-v1-4.ckpt"
)
)
)
)
@if not exist "..\models\stable-diffusion\sd-v1-4.ckpt" (
@echo. & echo "Downloading data files (weights) for Stable Diffusion.." & echo.
@call curl -L -k https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt > ..\models\stable-diffusion\sd-v1-4.ckpt
@if exist "..\models\stable-diffusion\sd-v1-4.ckpt" (
for %%I in ("..\models\stable-diffusion\sd-v1-4.ckpt") do if "%%~zI" NEQ "4265380512" (
echo. & echo "Error: The downloaded model file was invalid! Bytes downloaded: %%~zI" & echo.
echo. & echo "Error downloading the data files (weights) for Stable Diffusion. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
pause
exit /b
)
) else (
@echo. & echo "Error downloading the data files (weights) for Stable Diffusion. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
pause
exit /b
)
)
@if exist "..\models\gfpgan\GFPGANv1.3.pth" (
for %%I in ("..\models\gfpgan\GFPGANv1.3.pth") do if "%%~zI" EQU "348632874" (
echo "Data files (weights) necessary for GFPGAN (Face Correction) were already downloaded"
) else (
echo. & echo "The GFPGAN model file present at models\gfpgan\GFPGANv1.3.pth is invalid. It is only %%~zI bytes in size. Re-downloading.." & echo.
del "..\models\gfpgan\GFPGANv1.3.pth"
)
)
@if not exist "..\models\gfpgan\GFPGANv1.3.pth" (
@echo. & echo "Downloading data files (weights) for GFPGAN (Face Correction).." & echo.
@call curl -L -k https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth > ..\models\gfpgan\GFPGANv1.3.pth
@if exist "..\models\gfpgan\GFPGANv1.3.pth" (
for %%I in ("..\models\gfpgan\GFPGANv1.3.pth") do if "%%~zI" NEQ "348632874" (
echo. & echo "Error: The downloaded GFPGAN model file was invalid! Bytes downloaded: %%~zI" & echo.
echo. & echo "Error downloading the data files (weights) for GFPGAN (Face Correction). Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
pause
exit /b
)
) else (
@echo. & echo "Error downloading the data files (weights) for GFPGAN (Face Correction). Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
pause
exit /b
)
)
@if exist "..\models\realesrgan\RealESRGAN_x4plus.pth" (
for %%I in ("..\models\realesrgan\RealESRGAN_x4plus.pth") do if "%%~zI" EQU "67040989" (
echo "Data files (weights) necessary for ESRGAN (Resolution Upscaling) x4plus were already downloaded"
) else (
echo. & echo "The RealESRGAN model file present at models\realesrgan\RealESRGAN_x4plus.pth is invalid. It is only %%~zI bytes in size. Re-downloading.." & echo.
del "..\models\realesrgan\RealESRGAN_x4plus.pth"
)
)
@if not exist "..\models\realesrgan\RealESRGAN_x4plus.pth" (
@echo. & echo "Downloading data files (weights) for ESRGAN (Resolution Upscaling) x4plus.." & echo.
@call curl -L -k https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth > ..\models\realesrgan\RealESRGAN_x4plus.pth
@if exist "..\models\realesrgan\RealESRGAN_x4plus.pth" (
for %%I in ("..\models\realesrgan\RealESRGAN_x4plus.pth") do if "%%~zI" NEQ "67040989" (
echo. & echo "Error: The downloaded ESRGAN x4plus model file was invalid! Bytes downloaded: %%~zI" & echo.
echo. & echo "Error downloading the data files (weights) for ESRGAN (Resolution Upscaling) x4plus. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
pause
exit /b
)
) else (
@echo. & echo "Error downloading the data files (weights) for ESRGAN (Resolution Upscaling) x4plus. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
pause
exit /b
)
)
@if exist "..\models\realesrgan\RealESRGAN_x4plus_anime_6B.pth" (
for %%I in ("..\models\realesrgan\RealESRGAN_x4plus_anime_6B.pth") do if "%%~zI" EQU "17938799" (
echo "Data files (weights) necessary for ESRGAN (Resolution Upscaling) x4plus_anime were already downloaded"
) else (
echo. & echo "The RealESRGAN model file present at models\realesrgan\RealESRGAN_x4plus_anime_6B.pth is invalid. It is only %%~zI bytes in size. Re-downloading.." & echo.
del "..\models\realesrgan\RealESRGAN_x4plus_anime_6B.pth"
)
)
@if not exist "..\models\realesrgan\RealESRGAN_x4plus_anime_6B.pth" (
@echo. & echo "Downloading data files (weights) for ESRGAN (Resolution Upscaling) x4plus_anime.." & echo.
@call curl -L -k https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth > ..\models\realesrgan\RealESRGAN_x4plus_anime_6B.pth
@if exist "..\models\realesrgan\RealESRGAN_x4plus_anime_6B.pth" (
for %%I in ("..\models\realesrgan\RealESRGAN_x4plus_anime_6B.pth") do if "%%~zI" NEQ "17938799" (
echo. & echo "Error: The downloaded ESRGAN x4plus_anime model file was invalid! Bytes downloaded: %%~zI" & echo.
echo. & echo "Error downloading the data files (weights) for ESRGAN (Resolution Upscaling) x4plus_anime. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
pause
exit /b
)
) else (
@echo. & echo "Error downloading the data files (weights) for ESRGAN (Resolution Upscaling) x4plus_anime. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
pause
exit /b
)
)
@if exist "..\models\vae\vae-ft-mse-840000-ema-pruned.ckpt" (
for %%I in ("..\models\vae\vae-ft-mse-840000-ema-pruned.ckpt") do if "%%~zI" EQU "334695179" (
echo "Data files (weights) necessary for the default VAE (sd-vae-ft-mse-original) were already downloaded"
) else (
echo. & echo "The default VAE (sd-vae-ft-mse-original) file present at models\vae\vae-ft-mse-840000-ema-pruned.ckpt is invalid. It is only %%~zI bytes in size. Re-downloading.." & echo.
del "..\models\vae\vae-ft-mse-840000-ema-pruned.ckpt"
)
)
@if not exist "..\models\vae\vae-ft-mse-840000-ema-pruned.ckpt" (
@echo. & echo "Downloading data files (weights) for the default VAE (sd-vae-ft-mse-original).." & echo.
@call curl -L -k https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.ckpt > ..\models\vae\vae-ft-mse-840000-ema-pruned.ckpt
@if exist "..\models\vae\vae-ft-mse-840000-ema-pruned.ckpt" (
for %%I in ("..\models\vae\vae-ft-mse-840000-ema-pruned.ckpt") do if "%%~zI" NEQ "334695179" (
echo. & echo "Error: The downloaded default VAE (sd-vae-ft-mse-original) file was invalid! Bytes downloaded: %%~zI" & echo.
echo. & echo "Error downloading the data files (weights) for the default VAE (sd-vae-ft-mse-original). Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
pause
exit /b
)
) else (
@echo. & echo "Error downloading the data files (weights) for the default VAE (sd-vae-ft-mse-original). Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
pause
exit /b
)
)
@>nul findstr /m "sd_install_complete" ..\scripts\install_status.txt
@if "%ERRORLEVEL%" NEQ "0" (
@echo sd_weights_downloaded >> ..\scripts\install_status.txt
@ -343,14 +97,25 @@ call python --version
@cd ..
@set SD_UI_PATH=%cd%\ui
@FOR /F "tokens=* USEBACKQ" %%F IN (`python scripts\get_config.py --default=9000 net listen_port`) DO (
@SET ED_BIND_PORT=%%F
)
@FOR /F "tokens=* USEBACKQ" %%F IN (`python scripts\get_config.py --default=False net listen_to_network`) DO (
if "%%F" EQU "True" (
@SET ED_BIND_IP=0.0.0.0
) else (
@SET ED_BIND_IP=127.0.0.1
)
)
@cd stable-diffusion
@rem set any overrides
set HF_HUB_DISABLE_SYMLINKS_WARNING=true
@if NOT DEFINED SD_UI_BIND_PORT set SD_UI_BIND_PORT=9000
@if NOT DEFINED SD_UI_BIND_IP set SD_UI_BIND_IP=0.0.0.0
@uvicorn main:server_api --app-dir "%SD_UI_PATH%" --port %SD_UI_BIND_PORT% --host %SD_UI_BIND_IP% --log-level error
@uvicorn main:server_api --app-dir "%SD_UI_PATH%" --port %ED_BIND_PORT% --host %ED_BIND_IP% --log-level error
@pause

View File

@ -4,6 +4,8 @@ cp sd-ui-files/scripts/functions.sh scripts/
cp sd-ui-files/scripts/on_env_start.sh scripts/
cp sd-ui-files/scripts/bootstrap.sh scripts/
cp sd-ui-files/scripts/check_modules.py scripts/
cp sd-ui-files/scripts/check_models.py scripts/
cp sd-ui-files/scripts/get_config.py scripts/
source ./scripts/functions.sh
@ -18,11 +20,6 @@ if [ -e "open_dev_console.sh" ]; then
rm "open_dev_console.sh"
fi
python -c "import os; import shutil; frm = 'sd-ui-files/ui/hotfix/9c24e6cd9f499d02c4f21a033736dabd365962dc80fe3aeb57a8f85ea45a20a3.26fead7ea4f0f843f6eb4055dfd25693f1a71f3c6871b184042d4b126244e142'; dst = os.path.join(os.path.expanduser('~'), '.cache', 'huggingface', 'transformers', '9c24e6cd9f499d02c4f21a033736dabd365962dc80fe3aeb57a8f85ea45a20a3.26fead7ea4f0f843f6eb4055dfd25693f1a71f3c6871b184042d4b126244e142'); shutil.copyfile(frm, dst) if os.path.exists(dst) else print(''); print('Hotfixed broken JSON file from OpenAI');"
# Caution, this file will make your eyes and brain bleed. It's such an unholy mess.
# Note to self: Please rewrite this in Python. For the sake of your own sanity.
# set the correct installer path (current vs legacy)
if [ -e "installer_files/env" ]; then
export INSTALL_ENV_DIR="$(pwd)/installer_files/env"
@ -44,258 +41,14 @@ fi
if [ -e "src" ]; then mv src src-old; fi
if [ -e "ldm" ]; then mv ldm ldm-old; fi
mkdir -p "../models/stable-diffusion"
mkdir -p "../models/gfpgan"
mkdir -p "../models/realesrgan"
mkdir -p "../models/vae"
# migrate the legacy models to the correct path (if already downloaded)
if [ -e "sd-v1-4.ckpt" ]; then mv sd-v1-4.ckpt ../models/stable-diffusion/; fi
if [ -e "custom-model.ckpt" ]; then mv custom-model.ckpt ../models/stable-diffusion/; fi
if [ -e "GFPGANv1.3.pth" ]; then mv GFPGANv1.3.pth ../models/gfpgan/; fi
if [ -e "RealESRGAN_x4plus.pth" ]; then mv RealESRGAN_x4plus.pth ../models/realesrgan/; fi
if [ -e "RealESRGAN_x4plus_anime_6B.pth" ]; then mv RealESRGAN_x4plus_anime_6B.pth ../models/realesrgan/; fi
OS_NAME=$(uname -s)
case "${OS_NAME}" in
Linux*) OS_NAME="linux";;
Darwin*) OS_NAME="macos";;
*) echo "Unknown OS: $OS_NAME! This script runs only on Linux or Mac" && exit
esac
# install torch and torchvision
if python ../scripts/check_modules.py torch torchvision; then
# temp fix for installations that installed torch 2.0 by mistake
if [ "$OS_NAME" == "linux" ]; then
python -m pip install --upgrade torch==1.13.1+cu116 torchvision==0.14.1+cu116 --extra-index-url https://download.pytorch.org/whl/cu116 -q
elif [ "$OS_NAME" == "macos" ]; then
python -m pip install --upgrade torch==1.13.1 torchvision==0.14.1 -q
fi
echo "torch and torchvision have already been installed."
else
echo "Installing torch and torchvision.."
export PYTHONNOUSERSITE=1
export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages"
if [ "$OS_NAME" == "linux" ]; then
if python -m pip install --upgrade torch==1.13.1+cu116 torchvision==0.14.1+cu116 --extra-index-url https://download.pytorch.org/whl/cu116 ; then
echo "Installed."
else
fail "torch install failed"
fi
elif [ "$OS_NAME" == "macos" ]; then
if python -m pip install --upgrade torch==1.13.1 torchvision==0.14.1 ; then
echo "Installed."
else
fail "torch install failed"
fi
fi
# Download the required packages
if ! python ../scripts/check_modules.py; then
read -p "Press any key to continue"
exit 1
fi
# install/upgrade sdkit
if python ../scripts/check_modules.py sdkit sdkit.models ldm transformers numpy antlr4 gfpgan realesrgan ; then
echo "sdkit is already installed."
# skip sdkit upgrade if in developer-mode
if [ ! -e "../src/sdkit" ]; then
export PYTHONNOUSERSITE=1
export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages"
python -m pip install --upgrade sdkit==1.0.64 -q
fi
else
echo "Installing sdkit: https://pypi.org/project/sdkit/"
export PYTHONNOUSERSITE=1
export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages"
if python -m pip install sdkit==1.0.64 ; then
echo "Installed."
else
fail "sdkit install failed"
fi
fi
python -c "from importlib.metadata import version; print('sdkit version:', version('sdkit'))"
# upgrade stable-diffusion-sdkit
python -m pip install --upgrade stable-diffusion-sdkit==2.1.4 -q
python -c "from importlib.metadata import version; print('stable-diffusion version:', version('stable-diffusion-sdkit'))"
# install rich
if python ../scripts/check_modules.py rich; then
echo "rich has already been installed."
else
echo "Installing rich.."
export PYTHONNOUSERSITE=1
export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages"
if python -m pip install rich ; then
echo "Installed."
else
fail "Install failed for rich"
fi
fi
if python ../scripts/check_modules.py uvicorn fastapi ; then
echo "Packages necessary for Easy Diffusion were already installed"
else
printf "\n\nDownloading packages necessary for Easy Diffusion..\n\n"
export PYTHONNOUSERSITE=1
export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages"
if conda install -c conda-forge -y uvicorn fastapi ; then
echo "Installed. Testing.."
else
fail "'conda install uvicorn' failed"
fi
if ! command -v uvicorn &> /dev/null; then
fail "UI packages not found!"
fi
fi
if [ -f "../models/stable-diffusion/sd-v1-4.ckpt" ]; then
model_size=`filesize "../models/stable-diffusion/sd-v1-4.ckpt"`
if [ "$model_size" -eq "4265380512" ] || [ "$model_size" -eq "7703807346" ] || [ "$model_size" -eq "7703810927" ]; then
echo "Data files (weights) necessary for Stable Diffusion were already downloaded"
else
printf "\n\nThe model file present at models/stable-diffusion/sd-v1-4.ckpt is invalid. It is only $model_size bytes in size. Re-downloading.."
rm ../models/stable-diffusion/sd-v1-4.ckpt
fi
fi
if [ ! -f "../models/stable-diffusion/sd-v1-4.ckpt" ]; then
echo "Downloading data files (weights) for Stable Diffusion.."
curl -L -k https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt > ../models/stable-diffusion/sd-v1-4.ckpt
if [ -f "../models/stable-diffusion/sd-v1-4.ckpt" ]; then
model_size=`filesize "../models/stable-diffusion/sd-v1-4.ckpt"`
if [ ! "$model_size" == "4265380512" ]; then
fail "The downloaded model file was invalid! Bytes downloaded: $model_size"
fi
else
fail "Error downloading the data files (weights) for Stable Diffusion"
fi
fi
if [ -f "../models/gfpgan/GFPGANv1.3.pth" ]; then
model_size=`filesize "../models/gfpgan/GFPGANv1.3.pth"`
if [ "$model_size" -eq "348632874" ]; then
echo "Data files (weights) necessary for GFPGAN (Face Correction) were already downloaded"
else
printf "\n\nThe model file present at models/gfpgan/GFPGANv1.3.pth is invalid. It is only $model_size bytes in size. Re-downloading.."
rm ../models/gfpgan/GFPGANv1.3.pth
fi
fi
if [ ! -f "../models/gfpgan/GFPGANv1.3.pth" ]; then
echo "Downloading data files (weights) for GFPGAN (Face Correction).."
curl -L -k https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth > ../models/gfpgan/GFPGANv1.3.pth
if [ -f "../models/gfpgan/GFPGANv1.3.pth" ]; then
model_size=`filesize "../models/gfpgan/GFPGANv1.3.pth"`
if [ ! "$model_size" -eq "348632874" ]; then
fail "The downloaded GFPGAN model file was invalid! Bytes downloaded: $model_size"
fi
else
fail "Error downloading the data files (weights) for GFPGAN (Face Correction)."
fi
fi
if [ -f "../models/realesrgan/RealESRGAN_x4plus.pth" ]; then
model_size=`filesize "../models/realesrgan/RealESRGAN_x4plus.pth"`
if [ "$model_size" -eq "67040989" ]; then
echo "Data files (weights) necessary for ESRGAN (Resolution Upscaling) x4plus were already downloaded"
else
printf "\n\nThe model file present at models/realesrgan/RealESRGAN_x4plus.pth is invalid. It is only $model_size bytes in size. Re-downloading.."
rm ../models/realesrgan/RealESRGAN_x4plus.pth
fi
fi
if [ ! -f "../models/realesrgan/RealESRGAN_x4plus.pth" ]; then
echo "Downloading data files (weights) for ESRGAN (Resolution Upscaling) x4plus.."
curl -L -k https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth > ../models/realesrgan/RealESRGAN_x4plus.pth
if [ -f "../models/realesrgan/RealESRGAN_x4plus.pth" ]; then
model_size=`filesize "../models/realesrgan/RealESRGAN_x4plus.pth"`
if [ ! "$model_size" -eq "67040989" ]; then
fail "The downloaded ESRGAN x4plus model file was invalid! Bytes downloaded: $model_size"
fi
else
fail "Error downloading the data files (weights) for ESRGAN (Resolution Upscaling) x4plus"
fi
fi
if [ -f "../models/realesrgan/RealESRGAN_x4plus_anime_6B.pth" ]; then
model_size=`filesize "../models/realesrgan/RealESRGAN_x4plus_anime_6B.pth"`
if [ "$model_size" -eq "17938799" ]; then
echo "Data files (weights) necessary for ESRGAN (Resolution Upscaling) x4plus_anime were already downloaded"
else
printf "\n\nThe model file present at models/realesrgan/RealESRGAN_x4plus_anime_6B.pth is invalid. It is only $model_size bytes in size. Re-downloading.."
rm ../models/realesrgan/RealESRGAN_x4plus_anime_6B.pth
fi
fi
if [ ! -f "../models/realesrgan/RealESRGAN_x4plus_anime_6B.pth" ]; then
echo "Downloading data files (weights) for ESRGAN (Resolution Upscaling) x4plus_anime.."
curl -L -k https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth > ../models/realesrgan/RealESRGAN_x4plus_anime_6B.pth
if [ -f "../models/realesrgan/RealESRGAN_x4plus_anime_6B.pth" ]; then
model_size=`filesize "../models/realesrgan/RealESRGAN_x4plus_anime_6B.pth"`
if [ ! "$model_size" -eq "17938799" ]; then
fail "The downloaded ESRGAN x4plus_anime model file was invalid! Bytes downloaded: $model_size"
fi
else
fail "Error downloading the data files (weights) for ESRGAN (Resolution Upscaling) x4plus_anime."
fi
fi
if [ -f "../models/vae/vae-ft-mse-840000-ema-pruned.ckpt" ]; then
model_size=`filesize "../models/vae/vae-ft-mse-840000-ema-pruned.ckpt"`
if [ "$model_size" -eq "334695179" ]; then
echo "Data files (weights) necessary for the default VAE (sd-vae-ft-mse-original) were already downloaded"
else
printf "\n\nThe model file present at models/vae/vae-ft-mse-840000-ema-pruned.ckpt is invalid. It is only $model_size bytes in size. Re-downloading.."
rm ../models/vae/vae-ft-mse-840000-ema-pruned.ckpt
fi
fi
if [ ! -f "../models/vae/vae-ft-mse-840000-ema-pruned.ckpt" ]; then
echo "Downloading data files (weights) for the default VAE (sd-vae-ft-mse-original).."
curl -L -k https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.ckpt > ../models/vae/vae-ft-mse-840000-ema-pruned.ckpt
if [ -f "../models/vae/vae-ft-mse-840000-ema-pruned.ckpt" ]; then
model_size=`filesize "../models/vae/vae-ft-mse-840000-ema-pruned.ckpt"`
if [ ! "$model_size" -eq "334695179" ]; then
printf "\n\nError: The downloaded default VAE (sd-vae-ft-mse-original) file was invalid! Bytes downloaded: $model_size\n\n"
printf "\n\nError downloading the data files (weights) for the default VAE (sd-vae-ft-mse-original). Sorry about that, please try to:\n 1. Run this installer again.\n 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting\n 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB\n 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues\nThanks!\n\n"
read -p "Press any key to continue"
exit
fi
else
printf "\n\nError downloading the data files (weights) for the default VAE (sd-vae-ft-mse-original). Sorry about that, please try to:\n 1. Run this installer again.\n 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting\n 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB\n 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues\nThanks!\n\n"
read -p "Press any key to continue"
exit
fi
if ! command -v uvicorn &> /dev/null; then
fail "UI packages not found!"
fi
if [ `grep -c sd_install_complete ../scripts/install_status.txt` -gt "0" ]; then
@ -316,8 +69,17 @@ python --version
cd ..
export SD_UI_PATH=`pwd`/ui
export ED_BIND_PORT="$( python scripts/get_config.py --default=9000 net listen_port )"
case "$( python scripts/get_config.py --default=False net listen_to_network )" in
"True")
export ED_BIND_IP=0.0.0.0
;;
"False")
export ED_BIND_IP=127.0.0.1
;;
esac
cd stable-diffusion
uvicorn main:server_api --app-dir "$SD_UI_PATH" --port ${SD_UI_BIND_PORT:-9000} --host ${SD_UI_BIND_IP:-0.0.0.0} --log-level error
uvicorn main:server_api --app-dir "$SD_UI_PATH" --port "$ED_BIND_PORT" --host "$ED_BIND_IP" --log-level error
read -p "Press any key to continue"

View File

@ -1,17 +1,18 @@
import json
import logging
import os
import socket
import sys
import json
import traceback
import logging
import shlex
import urllib
from rich.logging import RichHandler
from sdkit.utils import log as sdkit_log # hack, so we can overwrite the log config
import warnings
from easydiffusion import task_manager
from easydiffusion.utils import log
from rich.logging import RichHandler
from rich.console import Console
from rich.panel import Panel
from sdkit.utils import log as sdkit_log # hack, so we can overwrite the log config
# Remove all handlers associated with the root logger object.
for handler in logging.root.handlers[:]:
@ -55,15 +56,42 @@ APP_CONFIG_DEFAULTS = {
},
}
IMAGE_EXTENSIONS = [".png", ".apng", ".jpg", ".jpeg", ".jfif", ".pjpeg", ".pjp", ".jxl", ".gif", ".webp", ".avif", ".svg"]
IMAGE_EXTENSIONS = [
".png",
".apng",
".jpg",
".jpeg",
".jfif",
".pjpeg",
".pjp",
".jxl",
".gif",
".webp",
".avif",
".svg",
]
CUSTOM_MODIFIERS_DIR = os.path.abspath(os.path.join(SD_DIR, "..", "modifiers"))
CUSTOM_MODIFIERS_PORTRAIT_EXTENSIONS=[".portrait", "_portrait", " portrait", "-portrait"]
CUSTOM_MODIFIERS_LANDSCAPE_EXTENSIONS=[".landscape", "_landscape", " landscape", "-landscape"]
CUSTOM_MODIFIERS_PORTRAIT_EXTENSIONS = [
".portrait",
"_portrait",
" portrait",
"-portrait",
]
CUSTOM_MODIFIERS_LANDSCAPE_EXTENSIONS = [
".landscape",
"_landscape",
" landscape",
"-landscape",
]
def init():
os.makedirs(USER_UI_PLUGINS_DIR, exist_ok=True)
os.makedirs(USER_SERVER_PLUGINS_DIR, exist_ok=True)
# https://pytorch.org/docs/stable/storage.html
warnings.filterwarnings("ignore", category=UserWarning, message="TypedStorage is deprecated")
load_server_plugins()
update_render_threads()
@ -81,14 +109,10 @@ def getConfig(default_val=APP_CONFIG_DEFAULTS):
config["net"] = {}
if os.getenv("SD_UI_BIND_PORT") is not None:
config["net"]["listen_port"] = int(os.getenv("SD_UI_BIND_PORT"))
else:
config["net"]["listen_port"] = 9000
if os.getenv("SD_UI_BIND_IP") is not None:
config["net"]["listen_to_network"] = os.getenv("SD_UI_BIND_IP") == "0.0.0.0"
else:
config["net"]["listen_to_network"] = True
return config
except Exception as e:
except Exception:
log.warn(traceback.format_exc())
return default_val
@ -101,50 +125,6 @@ def setConfig(config):
except:
log.error(traceback.format_exc())
try: # config.bat
config_bat_path = os.path.join(CONFIG_DIR, "config.bat")
config_bat = []
if "update_branch" in config:
config_bat.append(f"@set update_branch={config['update_branch']}")
config_bat.append(f"@set SD_UI_BIND_PORT={config['net']['listen_port']}")
bind_ip = "0.0.0.0" if config["net"]["listen_to_network"] else "127.0.0.1"
config_bat.append(f"@set SD_UI_BIND_IP={bind_ip}")
# Preserve these variables if they are set
for var in PRESERVE_CONFIG_VARS:
if os.getenv(var) is not None:
config_bat.append(f"@set {var}={os.getenv(var)}")
if len(config_bat) > 0:
with open(config_bat_path, "w", encoding="utf-8") as f:
f.write("\n".join(config_bat))
except:
log.error(traceback.format_exc())
try: # config.sh
config_sh_path = os.path.join(CONFIG_DIR, "config.sh")
config_sh = ["#!/bin/bash"]
if "update_branch" in config:
config_sh.append(f"export update_branch={config['update_branch']}")
config_sh.append(f"export SD_UI_BIND_PORT={config['net']['listen_port']}")
bind_ip = "0.0.0.0" if config["net"]["listen_to_network"] else "127.0.0.1"
config_sh.append(f"export SD_UI_BIND_IP={bind_ip}")
# Preserve these variables if they are set
for var in PRESERVE_CONFIG_VARS:
if os.getenv(var) is not None:
config_bat.append(f'export {var}="{shlex.quote(os.getenv(var))}"')
if len(config_sh) > 1:
with open(config_sh_path, "w", encoding="utf-8") as f:
f.write("\n".join(config_sh))
except:
log.error(traceback.format_exc())
def save_to_config(ckpt_model_name, vae_model_name, hypernetwork_model_name, vram_usage_level):
config = getConfig()
@ -233,18 +213,56 @@ def getIPConfig():
def open_browser():
config = getConfig()
ui = config.get("ui", {})
net = config.get("net", {"listen_port": 9000})
net = config.get("net", {})
port = net.get("listen_port", 9000)
if ui.get("open_browser_on_start", True):
import webbrowser
webbrowser.open(f"http://localhost:{port}")
Console().print(
Panel(
"\n"
+ "[white]Easy Diffusion is ready to serve requests.\n\n"
+ "A new browser tab should have been opened by now.\n"
+ f"If not, please open your web browser and navigate to [bold yellow underline]http://localhost:{port}/\n",
title="Easy Diffusion is ready",
style="bold yellow on blue",
)
)
def fail_and_die(fail_type: str, data: str):
suggestions = [
"Run this installer again.",
"If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB",
"If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues",
]
if fail_type == "model_download":
fail_label = f"Error downloading the {data} model"
suggestions.insert(
1,
"If that doesn't fix it, please try to download the file manually. The address to download from, and the destination to save to are printed above this message.",
)
else:
fail_label = "Error while installing Easy Diffusion"
msg = [f"{fail_label}. Sorry about that, please try to:"]
for i, suggestion in enumerate(suggestions):
msg.append(f"{i+1}. {suggestion}")
msg.append("Thanks!")
print("\n".join(msg))
exit(1)
def get_image_modifiers():
modifiers_json_path = os.path.join(SD_UI_DIR, "modifiers.json")
modifier_categories = {}
original_category_order=[]
original_category_order = []
with open(modifiers_json_path, "r", encoding="utf-8") as f:
modifiers_file = json.load(f)
@ -254,14 +272,14 @@ def get_image_modifiers():
# convert modifiers from a list of objects to a dict of dicts
for category_item in modifiers_file:
category_name = category_item['category']
category_name = category_item["category"]
original_category_order.append(category_name)
category = {}
for modifier_item in category_item['modifiers']:
for modifier_item in category_item["modifiers"]:
modifier = {}
for preview_item in modifier_item['previews']:
modifier[preview_item['name']] = preview_item['path']
category[modifier_item['modifier']] = modifier
for preview_item in modifier_item["previews"]:
modifier[preview_item["name"]] = preview_item["path"]
category[modifier_item["modifier"]] = modifier
modifier_categories[category_name] = category
def scan_directory(directory_path: str, category_name="Modifiers"):
@ -274,12 +292,27 @@ def get_image_modifiers():
modifier_name = entry.name[: -len(file_extension[0])]
modifier_path = f"custom/{entry.path[len(CUSTOM_MODIFIERS_DIR) + 1:]}"
# URL encode path segments
modifier_path = "/".join(map(lambda segment: urllib.parse.quote(segment), modifier_path.split("/")))
modifier_path = "/".join(
map(
lambda segment: urllib.parse.quote(segment),
modifier_path.split("/"),
)
)
is_portrait = True
is_landscape = True
portrait_extension = list(filter(lambda e: modifier_name.lower().endswith(e), CUSTOM_MODIFIERS_PORTRAIT_EXTENSIONS))
landscape_extension = list(filter(lambda e: modifier_name.lower().endswith(e), CUSTOM_MODIFIERS_LANDSCAPE_EXTENSIONS))
portrait_extension = list(
filter(
lambda e: modifier_name.lower().endswith(e),
CUSTOM_MODIFIERS_PORTRAIT_EXTENSIONS,
)
)
landscape_extension = list(
filter(
lambda e: modifier_name.lower().endswith(e),
CUSTOM_MODIFIERS_LANDSCAPE_EXTENSIONS,
)
)
if len(portrait_extension) > 0:
is_landscape = False
@ -287,24 +320,24 @@ def get_image_modifiers():
elif len(landscape_extension) > 0:
is_portrait = False
modifier_name = modifier_name[: -len(landscape_extension[0])]
if (category_name not in modifier_categories):
if category_name not in modifier_categories:
modifier_categories[category_name] = {}
category = modifier_categories[category_name]
if (modifier_name not in category):
if modifier_name not in category:
category[modifier_name] = {}
if (is_portrait or "portrait" not in category[modifier_name]):
if is_portrait or "portrait" not in category[modifier_name]:
category[modifier_name]["portrait"] = modifier_path
if (is_landscape or "landscape" not in category[modifier_name]):
if is_landscape or "landscape" not in category[modifier_name]:
category[modifier_name]["landscape"] = modifier_path
elif entry.is_dir():
scan_directory(
entry.path,
entry.name if directory_path==CUSTOM_MODIFIERS_DIR else f"{category_name}/{entry.name}",
entry.name if directory_path == CUSTOM_MODIFIERS_DIR else f"{category_name}/{entry.name}",
)
scan_directory(CUSTOM_MODIFIERS_DIR)
@ -317,12 +350,12 @@ def get_image_modifiers():
# convert the modifiers back into a list of objects
modifier_categories_list = []
for category_name in [*original_category_order, *custom_categories]:
category = { 'category': category_name, 'modifiers': [] }
category = {"category": category_name, "modifiers": []}
for modifier_name in sorted(modifier_categories[category_name].keys(), key=str.casefold):
modifier = { 'modifier': modifier_name, 'previews': [] }
modifier = {"modifier": modifier_name, "previews": []}
for preview_name, preview_path in modifier_categories[category_name][modifier_name].items():
modifier['previews'].append({ 'name': preview_name, 'path': preview_path })
category['modifiers'].append(modifier)
modifier["previews"].append({"name": preview_name, "path": preview_path})
category["modifiers"].append(modifier)
modifier_categories_list.append(category)
return modifier_categories_list

View File

@ -1,9 +1,9 @@
import os
import platform
import torch
import traceback
import re
import traceback
import torch
from easydiffusion.utils import log
"""
@ -118,7 +118,10 @@ def auto_pick_devices(currently_active_devices):
# These already-running devices probably aren't terrible, since they were picked in the past.
# Worst case, the user can restart the program and that'll get rid of them.
devices = list(
filter((lambda x: x["mem_free"] > mem_free_threshold or x["device"] in currently_active_devices), devices)
filter(
(lambda x: x["mem_free"] > mem_free_threshold or x["device"] in currently_active_devices),
devices,
)
)
devices = list(map(lambda x: x["device"], devices))
return devices
@ -162,6 +165,7 @@ def needs_to_force_full_precision(context):
and (
" 1660" in device_name
or " 1650" in device_name
or " 1630" in device_name
or " t400" in device_name
or " t550" in device_name
or " t600" in device_name
@ -221,9 +225,9 @@ def is_device_compatible(device):
try:
_, mem_total = torch.cuda.mem_get_info(device)
mem_total /= float(10**9)
if mem_total < 3.0:
if mem_total < 1.9:
if is_device_compatible.history.get(device) == None:
log.warn(f"GPU {device} with less than 3 GB of VRAM is not compatible with Stable Diffusion")
log.warn(f"GPU {device} with less than 2 GB of VRAM is not compatible with Stable Diffusion")
is_device_compatible.history[device] = 1
return False
except RuntimeError as e:

View File

@ -1,13 +1,24 @@
import os
import shutil
from glob import glob
import traceback
from easydiffusion import app
from easydiffusion.types import TaskData
from easydiffusion.utils import log
from sdkit import Context
from sdkit.models import load_model, unload_model, scan_model
from sdkit.models import load_model, scan_model, unload_model, download_model, get_model_info_from_db
from sdkit.utils import hash_file_quick
KNOWN_MODEL_TYPES = ["stable-diffusion", "vae", "hypernetwork", "gfpgan", "realesrgan", "lora"]
KNOWN_MODEL_TYPES = [
"stable-diffusion",
"vae",
"hypernetwork",
"gfpgan",
"realesrgan",
"lora",
"codeformer",
]
MODEL_EXTENSIONS = {
"stable-diffusion": [".ckpt", ".safetensors"],
"vae": [".vae.pt", ".ckpt", ".safetensors"],
@ -15,14 +26,22 @@ MODEL_EXTENSIONS = {
"gfpgan": [".pth"],
"realesrgan": [".pth"],
"lora": [".ckpt", ".safetensors"],
"codeformer": [".pth"],
}
DEFAULT_MODELS = {
"stable-diffusion": [ # needed to support the legacy installations
"custom-model", # only one custom model file was supported initially, creatively named 'custom-model'
"sd-v1-4", # Default fallback.
"stable-diffusion": [
{"file_name": "sd-v1-4.ckpt", "model_id": "1.4"},
],
"gfpgan": [
{"file_name": "GFPGANv1.4.pth", "model_id": "1.4"},
],
"realesrgan": [
{"file_name": "RealESRGAN_x4plus.pth", "model_id": "x4plus"},
{"file_name": "RealESRGAN_x4plus_anime_6B.pth", "model_id": "x4plus_anime_6"},
],
"vae": [
{"file_name": "vae-ft-mse-840000-ema-pruned.ckpt", "model_id": "vae-ft-mse-840000-ema-pruned"},
],
"gfpgan": ["GFPGANv1.3"],
"realesrgan": ["RealESRGAN_x4plus"],
}
MODELS_TO_LOAD_ON_START = ["stable-diffusion", "vae", "hypernetwork", "lora"]
@ -31,6 +50,8 @@ known_models = {}
def init():
make_model_folders()
migrate_legacy_model_location() # if necessary
download_default_models_if_necessary()
getModels() # run this once, to cache the picklescan results
@ -41,16 +62,27 @@ def load_default_models(context: Context):
for model_type in MODELS_TO_LOAD_ON_START:
context.model_paths[model_type] = resolve_model_to_use(model_type=model_type)
try:
load_model(context, model_type)
load_model(
context,
model_type,
scan_model=context.model_paths[model_type] != None
and not context.model_paths[model_type].endswith(".safetensors"),
)
if model_type in context.model_load_errors:
del context.model_load_errors[model_type]
except Exception as e:
log.error(f"[red]Error while loading {model_type} model: {context.model_paths[model_type]}[/red]")
log.error(f"[red]Error: {e}[/red]")
log.error(f"[red]Consider removing the model from the model folder.[red]")
log.exception(e)
del context.model_paths[model_type]
context.model_load_errors[model_type] = str(e) # storing the entire Exception can lead to memory leaks
def unload_all(context: Context):
for model_type in KNOWN_MODEL_TYPES:
unload_model(context, model_type)
if model_type in context.model_load_errors:
del context.model_load_errors[model_type]
def resolve_model_to_use(model_name: str = None, model_type: str = None):
@ -58,7 +90,7 @@ def resolve_model_to_use(model_name: str = None, model_type: str = None):
default_models = DEFAULT_MODELS.get(model_type, [])
config = app.getConfig()
model_dirs = [os.path.join(app.MODELS_DIR, model_type), app.SD_DIR]
model_dir = os.path.join(app.MODELS_DIR, model_type)
if not model_name: # When None try user configured model.
# config = getConfig()
if "model" in config and model_type in config["model"]:
@ -66,42 +98,41 @@ def resolve_model_to_use(model_name: str = None, model_type: str = None):
if model_name:
# Check models directory
models_dir_path = os.path.join(app.MODELS_DIR, model_type, model_name)
model_path = os.path.join(model_dir, model_name)
if os.path.exists(model_path):
return model_path
for model_extension in model_extensions:
if os.path.exists(models_dir_path + model_extension):
return models_dir_path + model_extension
if os.path.exists(model_path + model_extension):
return model_path + model_extension
if os.path.exists(model_name + model_extension):
return os.path.abspath(model_name + model_extension)
# Default locations
if model_name in default_models:
default_model_path = os.path.join(app.SD_DIR, model_name)
for model_extension in model_extensions:
if os.path.exists(default_model_path + model_extension):
return default_model_path + model_extension
# Can't find requested model, check the default paths.
for default_model in default_models:
for model_dir in model_dirs:
default_model_path = os.path.join(model_dir, default_model)
for model_extension in model_extensions:
if os.path.exists(default_model_path + model_extension):
if model_name is not None:
log.warn(
f"Could not find the configured custom model {model_name}{model_extension}. Using the default one: {default_model_path}{model_extension}"
)
return default_model_path + model_extension
if model_type == "stable-diffusion":
for default_model in default_models:
default_model_path = os.path.join(model_dir, default_model["file_name"])
if os.path.exists(default_model_path):
if model_name is not None:
log.warn(
f"Could not find the configured custom model {model_name}. Using the default one: {default_model_path}"
)
return default_model_path
return None
def reload_models_if_necessary(context: Context, task_data: TaskData):
face_fix_lower = task_data.use_face_correction.lower() if task_data.use_face_correction else ""
upscale_lower = task_data.use_upscale.lower() if task_data.use_upscale else ""
model_paths_in_req = {
"stable-diffusion": task_data.use_stable_diffusion_model,
"vae": task_data.use_vae_model,
"hypernetwork": task_data.use_hypernetwork_model,
"gfpgan": task_data.use_face_correction,
"realesrgan": task_data.use_upscale,
"codeformer": task_data.use_face_correction if "codeformer" in face_fix_lower else None,
"gfpgan": task_data.use_face_correction if "gfpgan" in face_fix_lower else None,
"realesrgan": task_data.use_upscale if "realesrgan" in upscale_lower else None,
"latent_upscaler": True if "latent_upscaler" in upscale_lower else None,
"nsfw_checker": True if task_data.block_nsfw else None,
"lora": task_data.use_lora_model,
}
@ -111,14 +142,26 @@ def reload_models_if_necessary(context: Context, task_data: TaskData):
if context.model_paths.get(model_type) != path
}
if set_vram_optimizations(context): # reload SD
if task_data.codeformer_upscale_faces and "realesrgan" not in models_to_reload.keys():
models_to_reload["realesrgan"] = resolve_model_to_use(
DEFAULT_MODELS["realesrgan"][0]["file_name"], "realesrgan"
)
if set_vram_optimizations(context) or set_clip_skip(context, task_data): # reload SD
models_to_reload["stable-diffusion"] = model_paths_in_req["stable-diffusion"]
for model_type, model_path_in_req in models_to_reload.items():
context.model_paths[model_type] = model_path_in_req
action_fn = unload_model if context.model_paths[model_type] is None else load_model
action_fn(context, model_type, scan_model=False) # we've scanned them already
try:
action_fn(context, model_type, scan_model=False) # we've scanned them already
if model_type in context.model_load_errors:
del context.model_load_errors[model_type]
except Exception as e:
log.exception(e)
if action_fn == load_model:
context.model_load_errors[model_type] = str(e) # storing the entire Exception can lead to memory leaks
def resolve_model_paths(task_data: TaskData):
@ -130,11 +173,49 @@ def resolve_model_paths(task_data: TaskData):
task_data.use_lora_model = resolve_model_to_use(task_data.use_lora_model, model_type="lora")
if task_data.use_face_correction:
task_data.use_face_correction = resolve_model_to_use(task_data.use_face_correction, "gfpgan")
if task_data.use_upscale:
if "gfpgan" in task_data.use_face_correction.lower():
model_type = "gfpgan"
elif "codeformer" in task_data.use_face_correction.lower():
model_type = "codeformer"
download_if_necessary("codeformer", "codeformer.pth", "codeformer-0.1.0")
task_data.use_face_correction = resolve_model_to_use(task_data.use_face_correction, model_type)
if task_data.use_upscale and "realesrgan" in task_data.use_upscale.lower():
task_data.use_upscale = resolve_model_to_use(task_data.use_upscale, "realesrgan")
def fail_if_models_did_not_load(context: Context):
for model_type in KNOWN_MODEL_TYPES:
if model_type in context.model_load_errors:
e = context.model_load_errors[model_type]
raise Exception(f"Could not load the {model_type} model! Reason: " + e)
def download_default_models_if_necessary():
for model_type, models in DEFAULT_MODELS.items():
for model in models:
try:
download_if_necessary(model_type, model["file_name"], model["model_id"])
except:
traceback.print_exc()
app.fail_and_die(fail_type="model_download", data=model_type)
print(model_type, "model(s) found.")
def download_if_necessary(model_type: str, file_name: str, model_id: str):
model_path = os.path.join(app.MODELS_DIR, model_type, file_name)
expected_hash = get_model_info_from_db(model_type=model_type, model_id=model_id)["quick_hash"]
other_models_exist = any_model_exists(model_type)
known_model_exists = os.path.exists(model_path)
known_model_is_corrupt = known_model_exists and hash_file_quick(model_path) != expected_hash
if known_model_is_corrupt or (not other_models_exist and not known_model_exists):
print("> download", model_type, model_id)
download_model(model_type, model_id, download_base_dir=app.MODELS_DIR)
def set_vram_optimizations(context: Context):
config = app.getConfig()
vram_usage_level = config.get("vram_usage_level", "balanced")
@ -146,6 +227,36 @@ def set_vram_optimizations(context: Context):
return False
def migrate_legacy_model_location():
'Move the models inside the legacy "stable-diffusion" folder, to their respective folders'
for model_type, models in DEFAULT_MODELS.items():
for model in models:
file_name = model["file_name"]
legacy_path = os.path.join(app.SD_DIR, file_name)
if os.path.exists(legacy_path):
shutil.move(legacy_path, os.path.join(app.MODELS_DIR, model_type, file_name))
def any_model_exists(model_type: str) -> bool:
extensions = MODEL_EXTENSIONS.get(model_type, [])
for ext in extensions:
if any(glob(f"{app.MODELS_DIR}/{model_type}/**/*{ext}", recursive=True)):
return True
return False
def set_clip_skip(context: Context, task_data: TaskData):
clip_skip = task_data.clip_skip
if clip_skip != context.clip_skip:
context.clip_skip = clip_skip
return True
return False
def make_model_folders():
for model_type in KNOWN_MODEL_TYPES:
model_dir_path = os.path.join(app.MODELS_DIR, model_type)
@ -167,13 +278,23 @@ def is_malicious_model(file_path):
if scan_result.issues_count > 0 or scan_result.infected_files > 0:
log.warn(
":warning: [bold red]Scan %s: %d scanned, %d issue, %d infected.[/bold red]"
% (file_path, scan_result.scanned_files, scan_result.issues_count, scan_result.infected_files)
% (
file_path,
scan_result.scanned_files,
scan_result.issues_count,
scan_result.infected_files,
)
)
return True
else:
log.debug(
"Scan %s: [green]%d scanned, %d issue, %d infected.[/green]"
% (file_path, scan_result.scanned_files, scan_result.issues_count, scan_result.infected_files)
% (
file_path,
scan_result.scanned_files,
scan_result.issues_count,
scan_result.infected_files,
)
)
return False
except Exception as e:
@ -183,17 +304,12 @@ def is_malicious_model(file_path):
def getModels():
models = {
"active": {
"stable-diffusion": "sd-v1-4",
"vae": "",
"hypernetwork": "",
"lora": "",
},
"options": {
"stable-diffusion": ["sd-v1-4"],
"vae": [],
"hypernetwork": [],
"lora": [],
"codeformer": ["codeformer"],
},
}
@ -201,13 +317,13 @@ def getModels():
class MaliciousModelException(Exception):
"Raised when picklescan reports a problem with a model"
pass
def scan_directory(directory, suffixes, directoriesFirst: bool = True):
nonlocal models_scanned
tree = []
for entry in sorted(
os.scandir(directory), key=lambda entry: (entry.is_file() == directoriesFirst, entry.name.lower())
os.scandir(directory),
key=lambda entry: (entry.is_file() == directoriesFirst, entry.name.lower()),
):
if entry.is_file():
matching_suffix = list(filter(lambda s: entry.name.endswith(s), suffixes))
@ -243,6 +359,7 @@ def getModels():
except MaliciousModelException as e:
models["scan-error"] = e
log.info(f"[green]Scanning all model folders for models...[/]")
# custom models
listModels(model_type="stable-diffusion")
listModels(model_type="vae")
@ -253,9 +370,4 @@ def getModels():
if models_scanned > 0:
log.info(f"[green]Scanned {models_scanned} models. Nothing infected[/]")
# legacy
custom_weight_path = os.path.join(app.SD_DIR, "custom-model.ckpt")
if os.path.exists(custom_weight_path):
models["options"]["stable-diffusion"].append("custom-model")
return models

View File

@ -1,16 +1,24 @@
import queue
import time
import json
import pprint
import queue
import time
from easydiffusion import device_manager
from easydiffusion.types import TaskData, Response, Image as ResponseImage, UserInitiatedStop, GenerateImageRequest
from easydiffusion.utils import get_printable_request, save_images_to_disk, log
from easydiffusion.types import GenerateImageRequest
from easydiffusion.types import Image as ResponseImage
from easydiffusion.types import Response, TaskData, UserInitiatedStop
from easydiffusion.utils import get_printable_request, log, save_images_to_disk
from sdkit import Context
from sdkit.generate import generate_images
from sdkit.filter import apply_filters
from sdkit.utils import img_to_buffer, img_to_base64_str, latent_samples_to_images, diffusers_latent_samples_to_images
from sdkit.generate import generate_images
from sdkit.utils import (
diffusers_latent_samples_to_images,
gc,
img_to_base64_str,
img_to_buffer,
latent_samples_to_images,
get_device_usage,
)
context = Context() # thread-local
"""
@ -25,6 +33,8 @@ def init(device):
context.stop_processing = False
context.temp_images = {}
context.partial_x_samples = None
context.model_load_errors = {}
context.enable_codeformer = True
from easydiffusion import app
@ -33,18 +43,29 @@ def init(device):
app_config.get("test_diffusers", False) and app_config.get("update_branch", "main") != "main"
)
log.info("Device usage during initialization:")
get_device_usage(device, log_info=True, process_usage_only=False)
device_manager.device_init(context, device)
def make_images(
req: GenerateImageRequest, task_data: TaskData, data_queue: queue.Queue, task_temp_images: list, step_callback
req: GenerateImageRequest,
task_data: TaskData,
data_queue: queue.Queue,
task_temp_images: list,
step_callback,
):
context.stop_processing = False
print_task_info(req, task_data)
images, seeds = make_images_internal(req, task_data, data_queue, task_temp_images, step_callback)
res = Response(req, task_data, images=construct_response(images, seeds, task_data, base_seed=req.seed))
res = Response(
req,
task_data,
images=construct_response(images, seeds, task_data, base_seed=req.seed),
)
res = res.json()
data_queue.put(json.dumps(res))
log.info("Task completed")
@ -53,16 +74,19 @@ def make_images(
def print_task_info(req: GenerateImageRequest, task_data: TaskData):
req_str = pprint.pformat(get_printable_request(req)).replace("[", "\[")
req_str = pprint.pformat(get_printable_request(req, task_data)).replace("[", "\[")
task_str = pprint.pformat(task_data.dict()).replace("[", "\[")
log.info(f"request: {req_str}")
log.info(f"task data: {task_str}")
def make_images_internal(
req: GenerateImageRequest, task_data: TaskData, data_queue: queue.Queue, task_temp_images: list, step_callback
req: GenerateImageRequest,
task_data: TaskData,
data_queue: queue.Queue,
task_temp_images: list,
step_callback,
):
images, user_stopped = generate_images_internal(
req,
task_data,
@ -72,7 +96,8 @@ def make_images_internal(
task_data.stream_image_progress,
task_data.stream_image_progress_interval,
)
filtered_images = filter_images(task_data, images, user_stopped)
gc(context)
filtered_images = filter_images(req, task_data, images, user_stopped)
if task_data.save_to_disk_path is not None:
save_images_to_disk(images, filtered_images, req, task_data)
@ -128,28 +153,51 @@ def generate_images_internal(
return images, user_stopped
def filter_images(task_data: TaskData, images: list, user_stopped):
def filter_images(req: GenerateImageRequest, task_data: TaskData, images: list, user_stopped):
if user_stopped:
return images
filters_to_apply = []
filter_params = {}
if task_data.block_nsfw:
filters_to_apply.append("nsfw_checker")
if task_data.use_face_correction and "gfpgan" in task_data.use_face_correction.lower():
if task_data.use_face_correction and "codeformer" in task_data.use_face_correction.lower():
filters_to_apply.append("codeformer")
filter_params["upscale_faces"] = task_data.codeformer_upscale_faces
elif task_data.use_face_correction and "gfpgan" in task_data.use_face_correction.lower():
filters_to_apply.append("gfpgan")
if task_data.use_upscale and "realesrgan" in task_data.use_upscale.lower():
filters_to_apply.append("realesrgan")
if task_data.use_upscale:
if "realesrgan" in task_data.use_upscale.lower():
filters_to_apply.append("realesrgan")
elif task_data.use_upscale == "latent_upscaler":
filters_to_apply.append("latent_upscaler")
filter_params["latent_upscaler_options"] = {
"prompt": req.prompt,
"negative_prompt": req.negative_prompt,
"seed": req.seed,
"num_inference_steps": task_data.latent_upscaler_steps,
"guidance_scale": 0,
}
filter_params["scale"] = task_data.upscale_amount
if len(filters_to_apply) == 0:
return images
return apply_filters(context, filters_to_apply, images, scale=task_data.upscale_amount)
return apply_filters(context, filters_to_apply, images, **filter_params)
def construct_response(images: list, seeds: list, task_data: TaskData, base_seed: int):
return [
ResponseImage(
data=img_to_base64_str(img, task_data.output_format, task_data.output_quality, task_data.output_lossless),
data=img_to_base64_str(
img,
task_data.output_format,
task_data.output_quality,
task_data.output_lossless,
),
seed=seed,
)
for img, seed in zip(images, seeds)

View File

@ -2,28 +2,30 @@
Notes:
async endpoints always run on the main thread. Without they run on the thread pool.
"""
import datetime
import mimetypes
import os
import traceback
import datetime
from typing import List, Union
from easydiffusion import app, model_manager, task_manager
from easydiffusion.types import GenerateImageRequest, MergeRequest, TaskData
from easydiffusion.utils import log
from fastapi import FastAPI, HTTPException
from fastapi.staticfiles import StaticFiles
from pydantic import BaseModel, Extra
from starlette.responses import FileResponse, JSONResponse, StreamingResponse
from pydantic import BaseModel
from easydiffusion import app, model_manager, task_manager
from easydiffusion.types import TaskData, GenerateImageRequest, MergeRequest
from easydiffusion.utils import log
import mimetypes
log.info(f"started in {app.SD_DIR}")
log.info(f"started at {datetime.datetime.now():%x %X}")
server_api = FastAPI()
NOCACHE_HEADERS = {"Cache-Control": "no-cache, no-store, must-revalidate", "Pragma": "no-cache", "Expires": "0"}
NOCACHE_HEADERS = {
"Cache-Control": "no-cache, no-store, must-revalidate",
"Pragma": "no-cache",
"Expires": "0",
}
class NoCacheStaticFiles(StaticFiles):
@ -44,7 +46,7 @@ class NoCacheStaticFiles(StaticFiles):
return super().is_not_modified(response_headers, request_headers)
class SetAppConfigRequest(BaseModel):
class SetAppConfigRequest(BaseModel, extra=Extra.allow):
update_branch: str = None
render_devices: Union[List[str], List[int], str, int] = None
model_vae: str = None
@ -65,11 +67,17 @@ def init():
name="custom-thumbnails",
)
server_api.mount("/media", NoCacheStaticFiles(directory=os.path.join(app.SD_UI_DIR, "media")), name="media")
server_api.mount(
"/media",
NoCacheStaticFiles(directory=os.path.join(app.SD_UI_DIR, "media")),
name="media",
)
for plugins_dir, dir_prefix in app.UI_PLUGINS_SOURCES:
server_api.mount(
f"/plugins/{dir_prefix}", NoCacheStaticFiles(directory=plugins_dir), name=f"plugins-{dir_prefix}"
f"/plugins/{dir_prefix}",
NoCacheStaticFiles(directory=plugins_dir),
name=f"plugins-{dir_prefix}",
)
@server_api.post("/app_config")
@ -136,6 +144,10 @@ def set_app_config_internal(req: SetAppConfigRequest):
config["test_diffusers"] = req.test_diffusers
for property, property_value in req.dict().items():
if property_value is not None and property not in req.__fields__:
config[property] = property_value
try:
app.setConfig(config)
@ -242,8 +254,8 @@ def render_internal(req: dict):
def model_merge_internal(req: dict):
try:
from sdkit.train import merge_models
from easydiffusion.utils.save_utils import filename_regex
from sdkit.train import merge_models
mergeReq: MergeRequest = MergeRequest.parse_obj(req)
@ -251,7 +263,11 @@ def model_merge_internal(req: dict):
model_manager.resolve_model_to_use(mergeReq.model0, "stable-diffusion"),
model_manager.resolve_model_to_use(mergeReq.model1, "stable-diffusion"),
mergeReq.ratio,
os.path.join(app.MODELS_DIR, "stable-diffusion", filename_regex.sub("_", mergeReq.out_path)),
os.path.join(
app.MODELS_DIR,
"stable-diffusion",
filename_regex.sub("_", mergeReq.out_path),
),
mergeReq.use_fp16,
)
return JSONResponse({"status": "OK"}, headers=NOCACHE_HEADERS)

View File

@ -7,16 +7,18 @@ Notes:
import json
import traceback
TASK_TTL = 15 * 60 # seconds, Discard last session's task timeout
TASK_TTL = 30 * 60 # seconds, Discard last session's task timeout
import torch
import queue, threading, time, weakref
import queue
import threading
import time
import weakref
from typing import Any, Hashable
import torch
from easydiffusion import device_manager
from easydiffusion.types import TaskData, GenerateImageRequest
from easydiffusion.types import GenerateImageRequest, TaskData
from easydiffusion.utils import log
from sdkit.utils import gc
THREAD_NAME_PREFIX = ""
@ -167,7 +169,7 @@ class DataCache:
raise Exception("DataCache.put" + ERR_LOCK_FAILED)
try:
self._base[key] = (self._get_ttl_time(ttl), value)
except Exception as e:
except Exception:
log.error(traceback.format_exc())
return False
else:
@ -264,7 +266,7 @@ def thread_get_next_task():
def thread_render(device):
global current_state, current_state_error
from easydiffusion import renderer, model_manager
from easydiffusion import model_manager, renderer
try:
renderer.init(device)
@ -317,6 +319,9 @@ def thread_render(device):
def step_callback():
global current_state_error
task_cache.keep(id(task), TASK_TTL)
session_cache.keep(task.task_data.session_id, TASK_TTL)
if (
isinstance(current_state_error, SystemExit)
or isinstance(current_state_error, StopAsyncIteration)
@ -331,10 +336,15 @@ def thread_render(device):
current_state = ServerStates.LoadingModel
model_manager.resolve_model_paths(task.task_data)
model_manager.reload_models_if_necessary(renderer.context, task.task_data)
model_manager.fail_if_models_did_not_load(renderer.context)
current_state = ServerStates.Rendering
task.response = renderer.make_images(
task.render_request, task.task_data, task.buffer_queue, task.temp_images, step_callback
task.render_request,
task.task_data,
task.buffer_queue,
task.temp_images,
step_callback,
)
# Before looping back to the generator, mark cache as still alive.
task_cache.keep(id(task), TASK_TTL)

View File

@ -1,6 +1,7 @@
from pydantic import BaseModel
from typing import Any
from pydantic import BaseModel
class GenerateImageRequest(BaseModel):
prompt: str = ""
@ -22,6 +23,7 @@ class GenerateImageRequest(BaseModel):
sampler_name: str = None # "ddim", "plms", "heun", "euler", "euler_a", "dpm2", "dpm2_a", "lms"
hypernetwork_strength: float = 0
lora_alpha: float = 0
tiling: str = "none" # "none", "x", "y", "xy"
class TaskData(BaseModel):
@ -31,8 +33,9 @@ class TaskData(BaseModel):
vram_usage_level: str = "balanced" # or "low" or "medium"
use_face_correction: str = None # or "GFPGANv1.3"
use_upscale: str = None # or "RealESRGAN_x4plus" or "RealESRGAN_x4plus_anime_6B"
use_upscale: str = None # or "RealESRGAN_x4plus" or "RealESRGAN_x4plus_anime_6B" or "latent_upscaler"
upscale_amount: int = 4 # or 2
latent_upscaler_steps: int = 10
use_stable_diffusion_model: str = "sd-v1-4"
# use_stable_diffusion_config: str = "v1-inference"
use_vae_model: str = None
@ -47,6 +50,8 @@ class TaskData(BaseModel):
metadata_output_format: str = "txt" # or "json"
stream_image_progress: bool = False
stream_image_progress_interval: int = 5
clip_skip: bool = False
codeformer_upscale_faces: bool = False
class MergeRequest(BaseModel):

View File

@ -1,42 +1,130 @@
import os
import time
import re
import time
from datetime import datetime
from functools import reduce
from easydiffusion.types import TaskData, GenerateImageRequest
from sdkit.utils import save_images, save_dicts
from easydiffusion import app
from easydiffusion.types import GenerateImageRequest, TaskData
from numpy import base_repr
from sdkit.utils import save_dicts, save_images
filename_regex = re.compile("[^a-zA-Z0-9._-]")
img_number_regex = re.compile("([0-9]{5,})")
# keep in sync with `ui/media/js/dnd.js`
TASK_TEXT_MAPPING = {
"prompt": "Prompt",
"negative_prompt": "Negative Prompt",
"seed": "Seed",
"use_stable_diffusion_model": "Stable Diffusion model",
"clip_skip": "Clip Skip",
"use_vae_model": "VAE model",
"sampler_name": "Sampler",
"width": "Width",
"height": "Height",
"seed": "Seed",
"num_inference_steps": "Steps",
"guidance_scale": "Guidance Scale",
"prompt_strength": "Prompt Strength",
"use_lora_model": "LoRA model",
"lora_alpha": "LoRA Strength",
"use_hypernetwork_model": "Hypernetwork model",
"hypernetwork_strength": "Hypernetwork Strength",
"tiling": "Seamless Tiling",
"use_face_correction": "Use Face Correction",
"use_upscale": "Use Upscaling",
"upscale_amount": "Upscale By",
"sampler_name": "Sampler",
"negative_prompt": "Negative Prompt",
"use_stable_diffusion_model": "Stable Diffusion model",
"use_vae_model": "VAE model",
"use_hypernetwork_model": "Hypernetwork model",
"hypernetwork_strength": "Hypernetwork Strength",
"use_lora_model": "LoRA model",
# "lora_alpha": "LoRA Strength",
"latent_upscaler_steps": "Latent Upscaler Steps"
}
time_placeholders = {
"$yyyy": "%Y",
"$MM": "%m",
"$dd": "%d",
"$HH": "%H",
"$mm": "%M",
"$ss": "%S",
}
other_placeholders = {
"$id": lambda req, task_data: filename_regex.sub("_", task_data.session_id),
"$p": lambda req, task_data: filename_regex.sub("_", req.prompt)[:50],
"$s": lambda req, task_data: str(req.seed),
}
class ImageNumber:
_factory = None
_evaluated = False
def __init__(self, factory):
self._factory = factory
self._evaluated = None
def __call__(self) -> int:
if self._evaluated is None:
self._evaluated = self._factory()
return self._evaluated
def format_placeholders(format: str, req: GenerateImageRequest, task_data: TaskData, now=None):
if now is None:
now = time.time()
for placeholder, time_format in time_placeholders.items():
if placeholder in format:
format = format.replace(placeholder, datetime.fromtimestamp(now).strftime(time_format))
for placeholder, replace_func in other_placeholders.items():
if placeholder in format:
format = format.replace(placeholder, replace_func(req, task_data))
return format
def format_folder_name(format: str, req: GenerateImageRequest, task_data: TaskData):
format = format_placeholders(format, req, task_data)
return filename_regex.sub("_", format)
def format_file_name(
format: str,
req: GenerateImageRequest,
task_data: TaskData,
now: float,
batch_file_number: int,
folder_img_number: ImageNumber,
):
format = format_placeholders(format, req, task_data, now)
if "$n" in format:
format = format.replace("$n", f"{folder_img_number():05}")
if "$tsb64" in format:
img_id = base_repr(int(now * 10000), 36)[-7:] + base_repr(
int(batch_file_number), 36
) # Base 36 conversion, 0-9, A-Z
format = format.replace("$tsb64", img_id)
if "$ts" in format:
format = format.replace("$ts", str(int(now * 1000) + batch_file_number))
return filename_regex.sub("_", format)
def save_images_to_disk(images: list, filtered_images: list, req: GenerateImageRequest, task_data: TaskData):
now = time.time()
save_dir_path = os.path.join(task_data.save_to_disk_path, filename_regex.sub("_", task_data.session_id))
app_config = app.getConfig()
folder_format = app_config.get("folder_format", "$id")
save_dir_path = os.path.join(task_data.save_to_disk_path, format_folder_name(folder_format, req, task_data))
metadata_entries = get_metadata_entries_for_request(req, task_data)
make_filename = make_filename_callback(req, now=now)
file_number = calculate_img_number(save_dir_path, task_data)
make_filename = make_filename_callback(
app_config.get("filename_format", "$p_$tsb64"),
req,
task_data,
file_number,
now=now,
)
if task_data.show_only_filtered_image or filtered_images is images:
save_images(
@ -48,7 +136,7 @@ def save_images_to_disk(images: list, filtered_images: list, req: GenerateImageR
output_lossless=task_data.output_lossless,
)
if task_data.metadata_output_format:
for metadata_output_format in task_data.metadata_output_format.split(','):
for metadata_output_format in task_data.metadata_output_format.split(","):
if metadata_output_format.lower() in ["json", "txt", "embed"]:
save_dicts(
metadata_entries,
@ -58,7 +146,14 @@ def save_images_to_disk(images: list, filtered_images: list, req: GenerateImageR
file_format=task_data.output_format,
)
else:
make_filter_filename = make_filename_callback(req, now=now, suffix="filtered")
make_filter_filename = make_filename_callback(
app_config.get("filename_format", "$p_$tsb64"),
req,
task_data,
file_number,
now=now,
suffix="filtered",
)
save_images(
images,
@ -76,44 +171,23 @@ def save_images_to_disk(images: list, filtered_images: list, req: GenerateImageR
output_quality=task_data.output_quality,
output_lossless=task_data.output_lossless,
)
if task_data.metadata_output_format.lower() in ["json", "txt", "embed"]:
save_dicts(
metadata_entries,
save_dir_path,
file_name=make_filter_filename,
output_format=task_data.metadata_output_format,
file_format=task_data.output_format,
)
if task_data.metadata_output_format:
for metadata_output_format in task_data.metadata_output_format.split(","):
if metadata_output_format.lower() in ["json", "txt", "embed"]:
save_dicts(
metadata_entries,
save_dir_path,
file_name=make_filter_filename,
output_format=task_data.metadata_output_format,
file_format=task_data.output_format,
)
def get_metadata_entries_for_request(req: GenerateImageRequest, task_data: TaskData):
metadata = get_printable_request(req)
metadata.update(
{
"use_stable_diffusion_model": task_data.use_stable_diffusion_model,
"use_vae_model": task_data.use_vae_model,
"use_hypernetwork_model": task_data.use_hypernetwork_model,
"use_lora_model": task_data.use_lora_model,
"use_face_correction": task_data.use_face_correction,
"use_upscale": task_data.use_upscale,
}
)
if metadata["use_upscale"] is not None:
metadata["upscale_amount"] = task_data.upscale_amount
if task_data.use_hypernetwork_model is None:
del metadata["hypernetwork_strength"]
if task_data.use_lora_model is None:
if "lora_alpha" in metadata:
del metadata["lora_alpha"]
from easydiffusion import app
app_config = app.getConfig()
if not app_config.get("test_diffusers", False) and "use_lora_model" in metadata:
del metadata["use_lora_model"]
metadata = get_printable_request(req, task_data)
# if text, format it in the text format expected by the UI
is_txt_format = task_data.metadata_output_format.lower() == "txt"
is_txt_format = task_data.metadata_output_format and "txt" in task_data.metadata_output_format.lower().split(",")
if is_txt_format:
metadata = {TASK_TEXT_MAPPING[key]: val for key, val in metadata.items() if key in TASK_TEXT_MAPPING}
@ -124,25 +198,101 @@ def get_metadata_entries_for_request(req: GenerateImageRequest, task_data: TaskD
return entries
def get_printable_request(req: GenerateImageRequest):
metadata = req.dict()
del metadata["init_image"]
del metadata["init_image_mask"]
if req.init_image is None:
def get_printable_request(req: GenerateImageRequest, task_data: TaskData):
req_metadata = req.dict()
task_data_metadata = task_data.dict()
# Save the metadata in the order defined in TASK_TEXT_MAPPING
metadata = {}
for key in TASK_TEXT_MAPPING.keys():
if key in req_metadata:
metadata[key] = req_metadata[key]
elif key in task_data_metadata:
metadata[key] = task_data_metadata[key]
# Clean up the metadata
if req.init_image is None and "prompt_strength" in metadata:
del metadata["prompt_strength"]
if task_data.use_upscale is None and "upscale_amount" in metadata:
del metadata["upscale_amount"]
if task_data.use_hypernetwork_model is None and "hypernetwork_strength" in metadata:
del metadata["hypernetwork_strength"]
if task_data.use_lora_model is None and "lora_alpha" in metadata:
del metadata["lora_alpha"]
if task_data.use_upscale != "latent_upscaler" and "latent_upscaler_steps" in metadata:
del metadata["latent_upscaler_steps"]
app_config = app.getConfig()
if not app_config.get("test_diffusers", False):
for key in (x for x in ["use_lora_model", "lora_alpha", "clip_skip", "tiling", "latent_upscaler_steps"] if x in metadata):
del metadata[key]
return metadata
def make_filename_callback(req: GenerateImageRequest, suffix=None, now=None):
def make_filename_callback(
filename_format: str,
req: GenerateImageRequest,
task_data: TaskData,
folder_img_number: int,
suffix=None,
now=None,
):
if now is None:
now = time.time()
def make_filename(i):
img_id = base_repr(int(now * 10000), 36)[-7:] + base_repr(int(i),36) # Base 36 conversion, 0-9, A-Z
prompt_flattened = filename_regex.sub("_", req.prompt)[:50]
name = f"{prompt_flattened}_{img_id}"
name = format_file_name(filename_format, req, task_data, now, i, folder_img_number)
name = name if suffix is None else f"{name}_{suffix}"
return name
return make_filename
def _calculate_img_number(save_dir_path: str, task_data: TaskData):
def get_highest_img_number(accumulator: int, file: os.DirEntry) -> int:
if not file.is_file:
return accumulator
if len(list(filter(lambda e: file.name.endswith(e), app.IMAGE_EXTENSIONS))) == 0:
return accumulator
get_highest_img_number.number_of_images = get_highest_img_number.number_of_images + 1
number_match = img_number_regex.match(file.name)
if not number_match:
return accumulator
file_number = number_match.group().lstrip("0")
# Handle 00000
return int(file_number) if file_number else 0
get_highest_img_number.number_of_images = 0
highest_file_number = -1
if os.path.isdir(save_dir_path):
existing_files = list(os.scandir(save_dir_path))
highest_file_number = reduce(get_highest_img_number, existing_files, -1)
calculated_img_number = max(highest_file_number, get_highest_img_number.number_of_images - 1)
if task_data.session_id in _calculate_img_number.session_img_numbers:
calculated_img_number = max(
_calculate_img_number.session_img_numbers[task_data.session_id],
calculated_img_number,
)
calculated_img_number = calculated_img_number + 1
_calculate_img_number.session_img_numbers[task_data.session_id] = calculated_img_number
return calculated_img_number
_calculate_img_number.session_img_numbers = {}
def calculate_img_number(save_dir_path: str, task_data: TaskData):
return ImageNumber(lambda: _calculate_img_number(save_dir_path, task_data))

View File

@ -1,171 +0,0 @@
{
"_name_or_path": "clip-vit-large-patch14/",
"architectures": [
"CLIPModel"
],
"initializer_factor": 1.0,
"logit_scale_init_value": 2.6592,
"model_type": "clip",
"projection_dim": 768,
"text_config": {
"_name_or_path": "",
"add_cross_attention": false,
"architectures": null,
"attention_dropout": 0.0,
"bad_words_ids": null,
"bos_token_id": 0,
"chunk_size_feed_forward": 0,
"cross_attention_hidden_size": null,
"decoder_start_token_id": null,
"diversity_penalty": 0.0,
"do_sample": false,
"dropout": 0.0,
"early_stopping": false,
"encoder_no_repeat_ngram_size": 0,
"eos_token_id": 2,
"finetuning_task": null,
"forced_bos_token_id": null,
"forced_eos_token_id": null,
"hidden_act": "quick_gelu",
"hidden_size": 768,
"id2label": {
"0": "LABEL_0",
"1": "LABEL_1"
},
"initializer_factor": 1.0,
"initializer_range": 0.02,
"intermediate_size": 3072,
"is_decoder": false,
"is_encoder_decoder": false,
"label2id": {
"LABEL_0": 0,
"LABEL_1": 1
},
"layer_norm_eps": 1e-05,
"length_penalty": 1.0,
"max_length": 20,
"max_position_embeddings": 77,
"min_length": 0,
"model_type": "clip_text_model",
"no_repeat_ngram_size": 0,
"num_attention_heads": 12,
"num_beam_groups": 1,
"num_beams": 1,
"num_hidden_layers": 12,
"num_return_sequences": 1,
"output_attentions": false,
"output_hidden_states": false,
"output_scores": false,
"pad_token_id": 1,
"prefix": null,
"problem_type": null,
"projection_dim" : 768,
"pruned_heads": {},
"remove_invalid_values": false,
"repetition_penalty": 1.0,
"return_dict": true,
"return_dict_in_generate": false,
"sep_token_id": null,
"task_specific_params": null,
"temperature": 1.0,
"tie_encoder_decoder": false,
"tie_word_embeddings": true,
"tokenizer_class": null,
"top_k": 50,
"top_p": 1.0,
"torch_dtype": null,
"torchscript": false,
"transformers_version": "4.16.0.dev0",
"use_bfloat16": false,
"vocab_size": 49408
},
"text_config_dict": {
"hidden_size": 768,
"intermediate_size": 3072,
"num_attention_heads": 12,
"num_hidden_layers": 12,
"projection_dim": 768
},
"torch_dtype": "float32",
"transformers_version": null,
"vision_config": {
"_name_or_path": "",
"add_cross_attention": false,
"architectures": null,
"attention_dropout": 0.0,
"bad_words_ids": null,
"bos_token_id": null,
"chunk_size_feed_forward": 0,
"cross_attention_hidden_size": null,
"decoder_start_token_id": null,
"diversity_penalty": 0.0,
"do_sample": false,
"dropout": 0.0,
"early_stopping": false,
"encoder_no_repeat_ngram_size": 0,
"eos_token_id": null,
"finetuning_task": null,
"forced_bos_token_id": null,
"forced_eos_token_id": null,
"hidden_act": "quick_gelu",
"hidden_size": 1024,
"id2label": {
"0": "LABEL_0",
"1": "LABEL_1"
},
"image_size": 224,
"initializer_factor": 1.0,
"initializer_range": 0.02,
"intermediate_size": 4096,
"is_decoder": false,
"is_encoder_decoder": false,
"label2id": {
"LABEL_0": 0,
"LABEL_1": 1
},
"layer_norm_eps": 1e-05,
"length_penalty": 1.0,
"max_length": 20,
"min_length": 0,
"model_type": "clip_vision_model",
"no_repeat_ngram_size": 0,
"num_attention_heads": 16,
"num_beam_groups": 1,
"num_beams": 1,
"num_hidden_layers": 24,
"num_return_sequences": 1,
"output_attentions": false,
"output_hidden_states": false,
"output_scores": false,
"pad_token_id": null,
"patch_size": 14,
"prefix": null,
"problem_type": null,
"projection_dim" : 768,
"pruned_heads": {},
"remove_invalid_values": false,
"repetition_penalty": 1.0,
"return_dict": true,
"return_dict_in_generate": false,
"sep_token_id": null,
"task_specific_params": null,
"temperature": 1.0,
"tie_encoder_decoder": false,
"tie_word_embeddings": true,
"tokenizer_class": null,
"top_k": 50,
"top_p": 1.0,
"torch_dtype": null,
"torchscript": false,
"transformers_version": "4.16.0.dev0",
"use_bfloat16": false
},
"vision_config_dict": {
"hidden_size": 1024,
"intermediate_size": 4096,
"num_attention_heads": 16,
"num_hidden_layers": 24,
"patch_size": 14,
"projection_dim": 768
}
}

View File

@ -30,7 +30,7 @@
<h1>
<img id="logo_img" src="/media/images/icon-512x512.png" >
Easy Diffusion
<small>v2.5.30 <span id="updateBranchLabel"></span></small>
<small>v2.5.40 <span id="updateBranchLabel"></span></small>
</h1>
</div>
<div id="server-status">
@ -135,11 +135,14 @@
<button id="reload-models" class="secondaryButton reloadModels"><i class='fa-solid fa-rotate'></i></button>
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/Custom-Models" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Click to learn more about custom models</span></i></a>
</td></tr>
<!-- <tr id="modelConfigSelection" class="pl-5"><td><label for="model_config">Model Config:</i></label></td><td>
<select id="model_config" name="model_config">
</select>
</td></tr> -->
<tr class="pl-5"><td><label for="vae_model">Custom VAE:</i></label></td><td>
<tr class="pl-5 displayNone" id="clip_skip_config">
<td><label for="clip_skip">Clip Skip:</label></td>
<td>
<input id="clip_skip" name="clip_skip" type="checkbox">
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/Clip-Skip" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Click to learn more about Clip Skip</span></i></a>
</td>
</tr>
<tr class="pl-5"><td><label for="vae_model">Custom VAE:</label></td><td>
<input id="vae_model" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/VAE-Variational-Auto-Encoder" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Click to learn more about VAEs</span></i></a>
</td></tr>
@ -154,16 +157,18 @@
<option value="dpm2_a">DPM2 Ancestral</option>
<option value="lms">LMS</option>
<option value="dpm_solver_stability">DPM Solver (Stability AI)</option>
<option value="dpmpp_2s_a">DPM++ 2s Ancestral (Karras)</option>
<option value="dpmpp_2s_a" class="k_diffusion-only">DPM++ 2s Ancestral (Karras)</option>
<option value="dpmpp_2m">DPM++ 2m (Karras)</option>
<option value="dpmpp_sde">DPM++ SDE (Karras)</option>
<option value="dpm_fast">DPM Fast (Karras)</option>
<option value="dpm_adaptive">DPM Adaptive (Karras)</option>
<option value="unipc_snr">UniPC SNR</option>
<option value="dpmpp_sde" class="k_diffusion-only">DPM++ SDE (Karras)</option>
<option value="dpm_fast" class="k_diffusion-only">DPM Fast (Karras)</option>
<option value="dpm_adaptive" class="k_diffusion-only">DPM Adaptive (Karras)</option>
<option value="ddpm" class="diffusers-only">DDPM</option>
<option value="deis" class="diffusers-only">DEIS</option>
<option value="unipc_snr" class="k_diffusion-only">UniPC SNR</option>
<option value="unipc_tu">UniPC TU</option>
<option value="unipc_snr_2">UniPC SNR 2</option>
<option value="unipc_tu_2">UniPC TU 2</option>
<option value="unipc_tq">UniPC TQ</option>
<option value="unipc_snr_2" class="k_diffusion-only">UniPC SNR 2</option>
<option value="unipc_tu_2" class="k_diffusion-only">UniPC TU 2</option>
<option value="unipc_tq" class="k_diffusion-only">UniPC TQ</option>
</select>
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/How-to-Use#samplers" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Click to learn more about samplers</span></i></a>
</td></tr>
@ -217,20 +222,29 @@
<tr class="pl-5"><td><label for="num_inference_steps">Inference Steps:</label></td><td> <input id="num_inference_steps" name="num_inference_steps" size="4" value="25" onkeypress="preventNonNumericalInput(event)"></td></tr>
<tr class="pl-5"><td><label for="guidance_scale_slider">Guidance Scale:</label></td><td> <input id="guidance_scale_slider" name="guidance_scale_slider" class="editor-slider" value="75" type="range" min="11" max="500"> <input id="guidance_scale" name="guidance_scale" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"></td></tr>
<tr id="prompt_strength_container" class="pl-5"><td><label for="prompt_strength_slider">Prompt Strength:</label></td><td> <input id="prompt_strength_slider" name="prompt_strength_slider" class="editor-slider" value="80" type="range" min="0" max="99"> <input id="prompt_strength" name="prompt_strength" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"><br/></td></tr>
<tr id="lora_model_container" class="pl-5"><td><label for="lora_model">LoRA:</i></label></td><td>
<tr id="lora_model_container" class="pl-5"><td><label for="lora_model">LoRA:</label></td><td>
<input id="lora_model" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
</td></tr>
<tr id="lora_alpha_container" class="pl-5">
<td><label for="lora_alpha_slider">LoRA strength:</label></td>
<td><label for="lora_alpha_slider">LoRA Strength:</label></td>
<td> <input id="lora_alpha_slider" name="lora_alpha_slider" class="editor-slider" value="50" type="range" min="0" max="100"> <input id="lora_alpha" name="lora_alpha" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"><br/></td>
</tr>
<tr class="pl-5"><td><label for="hypernetwork_model">Hypernetwork:</i></label></td><td>
<tr class="pl-5"><td><label for="hypernetwork_model">Hypernetwork:</label></td><td>
<input id="hypernetwork_model" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
</td></tr>
<tr id="hypernetwork_strength_container" class="pl-5">
<td><label for="hypernetwork_strength_slider">Hypernetwork Strength:</label></td>
<td> <input id="hypernetwork_strength_slider" name="hypernetwork_strength_slider" class="editor-slider" value="100" type="range" min="0" max="100"> <input id="hypernetwork_strength" name="hypernetwork_strength" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"><br/></td>
</tr>
<tr id="tiling_container" class="pl-5"><td><label for="tiling">Seamless Tiling:</label></td><td>
<select id="tiling" name="tiling">
<option value="none" selected>None</option>
<option value="x">Horizontal</option>
<option value="y">Vertical</option>
<option value="xy">Both</option>
</select>
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/Seamless-Tiling" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Click to learn more about Seamless Tiling</span></i></a>
</td></tr>
<tr class="pl-5"><td><label for="output_format">Output Format:</label></td><td>
<select id="output_format" name="output_format">
<option value="jpeg" selected>jpeg</option>
@ -238,7 +252,7 @@
<option value="webp">webp</option>
</select>
<span id="output_lossless_container" class="displayNone">
<input id="output_lossless" name="output_lossless" type="checkbox"><label for="output_lossless">Lossless</label></td></tr>
<input id="output_lossless" name="output_lossless" type="checkbox"><label for="output_lossless">Lossless</label>
</span>
</td></tr>
<tr class="pl-5" id="output_quality_row"><td><label for="output_quality">Image Quality:</label></td><td>
@ -249,18 +263,27 @@
<div><ul>
<li><b class="settings-subheader">Render Settings</b></li>
<li class="pl-5"><input id="stream_image_progress" name="stream_image_progress" type="checkbox"> <label for="stream_image_progress">Show a live preview <small>(uses more VRAM, slower images)</small></label></li>
<li class="pl-5"><input id="use_face_correction" name="use_face_correction" type="checkbox"> <label for="use_face_correction">Fix incorrect faces and eyes</label> <div style="display:inline-block;"><input id="gfpgan_model" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" /></div></li>
<li class="pl-5">
<input id="use_face_correction" name="use_face_correction" type="checkbox"> <label for="use_face_correction">Fix incorrect faces and eyes</label> <div style="display:inline-block;"><input id="gfpgan_model" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" /></div>
<div id="codeformer_settings" class="displayNone sub-settings">
<input id="codeformer_upscale_faces" name="codeformer_upscale_faces" type="checkbox"><label for="codeformer_upscale_faces">Upscale Faces <small>(improves the resolution of faces)</small></label>
</div>
</li>
<li class="pl-5">
<input id="use_upscale" name="use_upscale" type="checkbox"> <label for="use_upscale">Scale up by</label>
<select id="upscale_amount" name="upscale_amount">
<option value="2">2x</option>
<option value="4" selected>4x</option>
<option id="upscale_amount_2x" value="2">2x</option>
<option id="upscale_amount_4x" value="4" selected>4x</option>
</select>
with
<select id="upscale_model" name="upscale_model">
<option value="RealESRGAN_x4plus" selected>RealESRGAN_x4plus</option>
<option value="RealESRGAN_x4plus_anime_6B">RealESRGAN_x4plus_anime_6B</option>
<option value="latent_upscaler">Latent Upscaler 2x</option>
</select>
<div id="latent_upscaler_settings" class="displayNone sub-settings">
<label for="latent_upscaler_steps_slider">Upscaling Steps:</label></td><td> <input id="latent_upscaler_steps_slider" name="latent_upscaler_steps_slider" class="editor-slider" value="10" type="range" min="1" max="50"> <input id="latent_upscaler_steps" name="latent_upscaler_steps" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)">
</div>
</li>
<li class="pl-5"><input id="show_only_filtered_image" name="show_only_filtered_image" type="checkbox" checked> <label for="show_only_filtered_image">Show only the corrected/upscaled image</label></li>
</ul></div>
@ -293,6 +316,12 @@
<div id="preview" class="col-free">
<div id="initial-text">
Type a prompt and press the "Make Image" button.<br/><br/>You can set an "Initial Image" if you want to guide the AI.<br/><br/>
You can also add modifiers like "Realistic", "Pencil Sketch", "ArtStation" etc by browsing through the "Image Modifiers" section
and selecting the desired modifiers.<br/><br/>
Click "Image Settings" for additional settings like seed, image size, number of images to generate etc.<br/><br/>Enjoy! :)
</div>
<div id="preview-content">
<div id="preview-tools" class="displayNone">
<button id="clear-all-previews" class="secondaryButton"><i class="fa-solid fa-trash-can icon"></i> Clear All</button>
@ -326,12 +355,6 @@
<div class="clearfix" style="clear: both;"></div>
</div>
</div>
<div id="initial-text">
Type a prompt and press the "Make Image" button.<br/><br/>You can set an "Initial Image" if you want to guide the AI.<br/><br/>
You can also add modifiers like "Realistic", "Pencil Sketch", "ArtStation" etc by browsing through the "Image Modifiers" section
and selecting the desired modifiers.<br/><br/>
Click "Image Settings" for additional settings like seed, image size, number of images to generate etc.<br/><br/>Enjoy! :)
</div>
</div>
</div>
@ -520,7 +543,7 @@ async function init() {
}
})
playSound()
// playSound()
}
init()

View File

@ -3,7 +3,7 @@
font-family: 'Work Sans';
font-style: normal;
font-weight: 400;
src: local(''),
src: local('Work Sans'),
url('/media/fonts/work-sans-v18-latin-regular.woff2') format('woff2'), /* Chrome 26+, Opera 23+, Firefox 39+ */
url('/media/fonts/work-sans-v18-latin-regular.woff') format('woff'); /* Chrome 6+, Firefox 3.6+, IE 9+, Safari 5.1+ */
}
@ -13,7 +13,7 @@
font-family: 'Work Sans';
font-style: normal;
font-weight: 600;
src: local(''),
src: local('Work Sans'),
url('/media/fonts/work-sans-v18-latin-600.woff2') format('woff2'), /* Chrome 26+, Opera 23+, Firefox 39+ */
url('/media/fonts/work-sans-v18-latin-600.woff') format('woff'); /* Chrome 6+, Firefox 3.6+, IE 9+, Safari 5.1+ */
}
@ -23,7 +23,7 @@
font-family: 'Work Sans';
font-style: normal;
font-weight: 700;
src: local(''),
src: local('Work Sans'),
url('/media/fonts/work-sans-v18-latin-700.woff2') format('woff2'), /* Chrome 26+, Opera 23+, Firefox 39+ */
url('/media/fonts/work-sans-v18-latin-700.woff') format('woff'); /* Chrome 6+, Firefox 3.6+, IE 9+, Safari 5.1+ */
}
@ -33,8 +33,8 @@
font-family: 'Work Sans';
font-style: normal;
font-weight: 800;
src: local(''),
src: local('Work Sans'),
url('/media/fonts/work-sans-v18-latin-800.woff2') format('woff2'), /* Chrome 26+, Opera 23+, Firefox 39+ */
url('/media/fonts/work-sans-v18-latin-800.woff') format('woff'); /* Chrome 6+, Firefox 3.6+, IE 9+, Safari 5.1+ */
}

View File

@ -70,6 +70,14 @@
max-height: calc(100vh - (var(--popup-padding) * 2) - 4px);
}
#viewFullSizeImgModal img:not(.natural-zoom) {
cursor: grab;
}
#viewFullSizeImgModal .grabbing img:not(.natural-zoom) {
cursor: grabbing;
}
#viewFullSizeImgModal .content > div::-webkit-scrollbar-track, #viewFullSizeImgModal .content > div::-webkit-scrollbar-corner {
background: rgba(0, 0, 0, .5)
}

View File

@ -238,6 +238,10 @@ code {
#stopImage:hover {
background: rgb(177, 27, 0);
}
#undo {
float: right;
margin-left: 5px;
}
div#render-buttons {
gap: 3px;
@ -1298,3 +1302,60 @@ body.wait-pause {
.displayNone {
display:none !important;
}
.sub-settings {
padding-top: 3pt;
padding-bottom: 3pt;
padding-left: 5pt;
}
/* TOAST NOTIFICATIONS */
.toast-notification {
position: fixed;
bottom: 10px;
right: -300px;
width: 300px;
background-color: #333;
color: #fff;
padding: 10px 20px;
border-radius: 5px;
box-shadow: 0 0 10px rgba(0, 0, 0, 0.5);
z-index: 9999;
animation: slideInRight 0.5s ease forwards;
transition: bottom 0.5s ease; /* Add a transition to smoothly reposition the toasts */
}
.toast-notification-error {
color: red;
}
@keyframes slideInRight {
from {
right: -300px;
}
to {
right: 10px;
}
}
.toast-notification.hide {
animation: slideOutRight 0.5s ease forwards;
}
@keyframes slideOutRight {
from {
right: 10px;
}
to {
right: -300px;
}
}
@keyframes slideDown {
from {
bottom: 10px;
}
to {
bottom: 0;
}
}

View File

@ -153,6 +153,10 @@
position: absolute;
z-index: 3;
}
.modifier-card-overlay:hover ~ .modifier-card-container .modifier-card-label.tooltip .tooltip-text {
visibility: visible;
opacity: 1;
}
.modifier-card:hover > .modifier-card-image-container .modifier-card-image-overlay {
opacity: 1;
}
@ -220,4 +224,4 @@
#modifier-settings-config textarea {
width: 90%;
height: 150px;
}
}

View File

@ -58,7 +58,7 @@
font-size: 10pt;
font-weight: normal;
transition: none;
transition:property: none;
transition-property: none;
cursor: default;
}

View File

@ -33,7 +33,7 @@
--input-height: 18px;
--tertiary-background-color: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) + (2 * var(--value-step))));
--tertiary-border-color: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) + (3 * var(--value-step))));
--tertiary-color: var(--input-text-color)
--tertiary-color: var(--input-text-color);
/* Main theme color, hex color fallback. */
--theme-color-fallback: #673AB6;

View File

@ -13,6 +13,7 @@ const SETTINGS_IDS_LIST = [
"num_outputs_total",
"num_outputs_parallel",
"stable_diffusion_model",
"clip_skip",
"vae_model",
"hypernetwork_model",
"lora_model",
@ -24,6 +25,7 @@ const SETTINGS_IDS_LIST = [
"prompt_strength",
"hypernetwork_strength",
"lora_alpha",
"tiling",
"output_format",
"output_quality",
"output_lossless",
@ -33,6 +35,7 @@ const SETTINGS_IDS_LIST = [
"gfpgan_model",
"use_upscale",
"upscale_amount",
"latent_upscaler_steps",
"block_nsfw",
"show_only_filtered_image",
"upscale_model",
@ -52,27 +55,27 @@ const SETTINGS_IDS_LIST = [
"auto_scroll",
"zip_toggle",
"tree_toggle",
"json_toggle"
"json_toggle",
]
const IGNORE_BY_DEFAULT = [
"prompt"
]
const IGNORE_BY_DEFAULT = ["prompt"]
const SETTINGS_SECTIONS = [ // gets the "keys" property filled in with an ordered list of settings in this section via initSettings
{ id: "editor-inputs", name: "Prompt" },
const SETTINGS_SECTIONS = [
// gets the "keys" property filled in with an ordered list of settings in this section via initSettings
{ id: "editor-inputs", name: "Prompt" },
{ id: "editor-settings", name: "Image Settings" },
{ id: "system-settings", name: "System Settings" },
{ id: "container", name: "Other" }
{ id: "container", name: "Other" },
]
async function initSettings() {
SETTINGS_IDS_LIST.forEach(id => {
SETTINGS_IDS_LIST.forEach((id) => {
var element = document.getElementById(id)
if (!element) {
console.error(`Missing settings element ${id}`)
}
if (id in SETTINGS) { // don't create it again
if (id in SETTINGS) {
// don't create it again
return
}
SETTINGS[id] = {
@ -81,28 +84,28 @@ async function initSettings() {
label: getSettingLabel(element),
default: getSetting(element),
value: getSetting(element),
ignore: IGNORE_BY_DEFAULT.includes(id)
ignore: IGNORE_BY_DEFAULT.includes(id),
}
element.addEventListener("input", settingChangeHandler)
element.addEventListener("change", settingChangeHandler)
})
var unsorted_settings_ids = [...SETTINGS_IDS_LIST]
SETTINGS_SECTIONS.forEach(section => {
SETTINGS_SECTIONS.forEach((section) => {
var name = section.name
var element = document.getElementById(section.id)
var unsorted_ids = unsorted_settings_ids.map(id => `#${id}`).join(",")
var children = unsorted_ids == "" ? [] : Array.from(element.querySelectorAll(unsorted_ids));
var unsorted_ids = unsorted_settings_ids.map((id) => `#${id}`).join(",")
var children = unsorted_ids == "" ? [] : Array.from(element.querySelectorAll(unsorted_ids))
section.keys = []
children.forEach(e => {
children.forEach((e) => {
section.keys.push(e.id)
})
unsorted_settings_ids = unsorted_settings_ids.filter(id => children.find(e => e.id == id) == undefined)
unsorted_settings_ids = unsorted_settings_ids.filter((id) => children.find((e) => e.id == id) == undefined)
})
loadSettings()
}
function getSetting(element) {
if (element.dataset && 'path' in element.dataset) {
if (element.dataset && "path" in element.dataset) {
return element.dataset.path
}
if (typeof element === "string" || element instanceof String) {
@ -114,7 +117,7 @@ function getSetting(element) {
return element.value
}
function setSetting(element, value) {
if (element.dataset && 'path' in element.dataset) {
if (element.dataset && "path" in element.dataset) {
element.dataset.path = value
return // no need to dispatch any event here because the models are not loaded yet
}
@ -127,8 +130,7 @@ function setSetting(element, value) {
}
if (element.type == "checkbox") {
element.checked = value
}
else {
} else {
element.value = value
}
element.dispatchEvent(new Event("input"))
@ -136,11 +138,11 @@ function setSetting(element, value) {
}
function saveSettings() {
var saved_settings = Object.values(SETTINGS).map(setting => {
var saved_settings = Object.values(SETTINGS).map((setting) => {
return {
key: setting.key,
value: setting.value,
ignore: setting.ignore
ignore: setting.ignore,
}
})
localStorage.setItem(SETTINGS_KEY, JSON.stringify(saved_settings))
@ -151,16 +153,16 @@ function loadSettings() {
var saved_settings_text = localStorage.getItem(SETTINGS_KEY)
if (saved_settings_text) {
var saved_settings = JSON.parse(saved_settings_text)
if (saved_settings.find(s => s.key == "auto_save_settings")?.value == false) {
if (saved_settings.find((s) => s.key == "auto_save_settings")?.value == false) {
setSetting("auto_save_settings", false)
return
}
CURRENTLY_LOADING_SETTINGS = true
saved_settings.forEach(saved_setting => {
saved_settings.forEach((saved_setting) => {
var setting = SETTINGS[saved_setting.key]
if (!setting) {
console.warn(`Attempted to load setting ${saved_setting.key}, but no setting found`);
return null;
console.warn(`Attempted to load setting ${saved_setting.key}, but no setting found`)
return null
}
setting.ignore = saved_setting.ignore
if (!setting.ignore) {
@ -169,10 +171,25 @@ function loadSettings() {
}
})
CURRENTLY_LOADING_SETTINGS = false
}
else {
} else if (localStorage.length < 2) {
// localStorage is too short for OldSettings
// So this is likely the first time Easy Diffusion is running.
// Initialize vram_usage_level based on the available VRAM
function initGPUProfile(event) {
if ( "detail" in event
&& "active" in event.detail
&& "cuda:0" in event.detail.active
&& event.detail.active["cuda:0"].mem_total <4.5 )
{
vramUsageLevelField.value = "low"
vramUsageLevelField.dispatchEvent(new Event("change"))
}
document.removeEventListener("system_info_update", initGPUProfile)
}
document.addEventListener("system_info_update", initGPUProfile)
} else {
CURRENTLY_LOADING_SETTINGS = true
tryLoadOldSettings();
tryLoadOldSettings()
CURRENTLY_LOADING_SETTINGS = false
saveSettings()
}
@ -180,9 +197,9 @@ function loadSettings() {
function loadDefaultSettingsSection(section_id) {
CURRENTLY_LOADING_SETTINGS = true
var section = SETTINGS_SECTIONS.find(s => s.id == section_id);
section.keys.forEach(key => {
var setting = SETTINGS[key];
var section = SETTINGS_SECTIONS.find((s) => s.id == section_id)
section.keys.forEach((key) => {
var setting = SETTINGS[key]
setting.value = setting.default
setSetting(setting.element, setting.value)
})
@ -218,10 +235,10 @@ function getSettingLabel(element) {
function fillSaveSettingsConfigTable() {
saveSettingsConfigTable.textContent = ""
SETTINGS_SECTIONS.forEach(section => {
SETTINGS_SECTIONS.forEach((section) => {
var section_row = `<tr><th>${section.name}</th><td></td></tr>`
saveSettingsConfigTable.insertAdjacentHTML("beforeend", section_row)
section.keys.forEach(key => {
section.keys.forEach((key) => {
var setting = SETTINGS[key]
var element = setting.element
var checkbox_id = `shouldsave_${element.id}`
@ -234,7 +251,7 @@ function fillSaveSettingsConfigTable() {
var newrow = `<tr><td><label for="${checkbox_id}">${setting.label}</label></td><td><input id="${checkbox_id}" name="${checkbox_id}" ${is_checked} type="checkbox" ></td><td><small>(${value})</small></td></tr>`
saveSettingsConfigTable.insertAdjacentHTML("beforeend", newrow)
var checkbox = document.getElementById(checkbox_id)
checkbox.addEventListener("input", event => {
checkbox.addEventListener("input", (event) => {
setting.ignore = !checkbox.checked
saveSettings()
})
@ -245,9 +262,6 @@ function fillSaveSettingsConfigTable() {
// configureSettingsSaveBtn
var autoSaveSettings = document.getElementById("auto_save_settings")
var configSettingsButton = document.createElement("button")
configSettingsButton.textContent = "Configure"
@ -256,33 +270,32 @@ autoSaveSettings.insertAdjacentElement("beforebegin", configSettingsButton)
autoSaveSettings.addEventListener("change", () => {
configSettingsButton.style.display = autoSaveSettings.checked ? "block" : "none"
})
configSettingsButton.addEventListener('click', () => {
configSettingsButton.addEventListener("click", () => {
fillSaveSettingsConfigTable()
saveSettingsConfigOverlay.classList.add("active")
})
resetImageSettingsButton.addEventListener('click', event => {
loadDefaultSettingsSection("editor-settings");
resetImageSettingsButton.addEventListener("click", (event) => {
loadDefaultSettingsSection("editor-settings")
event.stopPropagation()
})
function tryLoadOldSettings() {
console.log("Loading old user settings")
// load v1 auto-save.js settings
var old_map = {
"guidance_scale_slider": "guidance_scale",
"prompt_strength_slider": "prompt_strength"
guidance_scale_slider: "guidance_scale",
prompt_strength_slider: "prompt_strength",
}
var settings_key_v1 = "user_settings"
var saved_settings_text = localStorage.getItem(settings_key_v1)
if (saved_settings_text) {
var saved_settings = JSON.parse(saved_settings_text)
Object.keys(saved_settings.should_save).forEach(key => {
Object.keys(saved_settings.should_save).forEach((key) => {
key = key in old_map ? old_map[key] : key
if (!(key in SETTINGS)) return
SETTINGS[key].ignore = !saved_settings.should_save[key]
});
Object.keys(saved_settings.values).forEach(key => {
})
Object.keys(saved_settings.values).forEach((key) => {
key = key in old_map ? old_map[key] : key
if (!(key in SETTINGS)) return
var setting = SETTINGS[key]
@ -290,38 +303,42 @@ function tryLoadOldSettings() {
setting.value = saved_settings.values[key]
setSetting(setting.element, setting.value)
}
});
})
localStorage.removeItem(settings_key_v1)
}
// load old individually stored items
var individual_settings_map = { // maps old localStorage-key to new SETTINGS-key
"soundEnabled": "sound_toggle",
"saveToDisk": "save_to_disk",
"useCPU": "use_cpu",
"diskPath": "diskPath",
"useFaceCorrection": "use_face_correction",
"useUpscaling": "use_upscale",
"showOnlyFilteredImage": "show_only_filtered_image",
"streamImageProgress": "stream_image_progress",
"outputFormat": "output_format",
"autoSaveSettings": "auto_save_settings",
};
Object.keys(individual_settings_map).forEach(localStorageKey => {
var localStorageValue = localStorage.getItem(localStorageKey);
var individual_settings_map = {
// maps old localStorage-key to new SETTINGS-key
soundEnabled: "sound_toggle",
saveToDisk: "save_to_disk",
useCPU: "use_cpu",
diskPath: "diskPath",
useFaceCorrection: "use_face_correction",
useUpscaling: "use_upscale",
showOnlyFilteredImage: "show_only_filtered_image",
streamImageProgress: "stream_image_progress",
outputFormat: "output_format",
autoSaveSettings: "auto_save_settings",
}
Object.keys(individual_settings_map).forEach((localStorageKey) => {
var localStorageValue = localStorage.getItem(localStorageKey)
if (localStorageValue !== null) {
let key = individual_settings_map[localStorageKey]
var setting = SETTINGS[key]
if (!setting) {
console.warn(`Attempted to map old setting ${key}, but no setting found`);
return null;
console.warn(`Attempted to map old setting ${key}, but no setting found`)
return null
}
if (setting.element.type == "checkbox" && (typeof localStorageValue === "string" || localStorageValue instanceof String)) {
if (
setting.element.type == "checkbox" &&
(typeof localStorageValue === "string" || localStorageValue instanceof String)
) {
localStorageValue = localStorageValue == "true"
}
setting.value = localStorageValue
setSetting(setting.element, setting.value)
localStorage.removeItem(localStorageKey);
localStorage.removeItem(localStorageKey)
}
})
}

View File

@ -1,25 +1,25 @@
"use strict" // Opt in to a restricted variant of JavaScript
const EXT_REGEX = /(?:\.([^.]+))?$/
const TEXT_EXTENSIONS = ['txt', 'json']
const IMAGE_EXTENSIONS = ['jpg', 'jpeg', 'png', 'bmp', 'tiff', 'tif', 'tga', 'webp']
const TEXT_EXTENSIONS = ["txt", "json"]
const IMAGE_EXTENSIONS = ["jpg", "jpeg", "png", "bmp", "tiff", "tif", "tga", "webp"]
function parseBoolean(stringValue) {
if (typeof stringValue === 'boolean') {
if (typeof stringValue === "boolean") {
return stringValue
}
if (typeof stringValue === 'number') {
if (typeof stringValue === "number") {
return stringValue !== 0
}
if (typeof stringValue !== 'string') {
if (typeof stringValue !== "string") {
return false
}
switch(stringValue?.toLowerCase()?.trim()) {
switch (stringValue?.toLowerCase()?.trim()) {
case "true":
case "yes":
case "on":
case "1":
return true;
return true
case "false":
case "no":
@ -28,67 +28,77 @@ function parseBoolean(stringValue) {
case "none":
case null:
case undefined:
return false;
return false
}
try {
return Boolean(JSON.parse(stringValue));
return Boolean(JSON.parse(stringValue))
} catch {
return Boolean(stringValue)
}
}
// keep in sync with `ui/easydiffusion/utils/save_utils.py`
const TASK_MAPPING = {
prompt: { name: 'Prompt',
prompt: {
name: "Prompt",
setUI: (prompt) => {
promptField.value = prompt
},
readUI: () => promptField.value,
parse: (val) => val
parse: (val) => val,
},
negative_prompt: { name: 'Negative Prompt',
negative_prompt: {
name: "Negative Prompt",
setUI: (negative_prompt) => {
negativePromptField.value = negative_prompt
},
readUI: () => negativePromptField.value,
parse: (val) => val
parse: (val) => val,
},
active_tags: { name: "Image Modifiers",
active_tags: {
name: "Image Modifiers",
setUI: (active_tags) => {
refreshModifiersState(active_tags)
},
readUI: () => activeTags.map(x => x.name),
parse: (val) => val
readUI: () => activeTags.map((x) => x.name),
parse: (val) => val,
},
inactive_tags: { name: "Inactive Image Modifiers",
inactive_tags: {
name: "Inactive Image Modifiers",
setUI: (inactive_tags) => {
refreshInactiveTags(inactive_tags)
},
readUI: () => activeTags.filter(tag => tag.inactive === true).map(x => x.name),
parse: (val) => val
readUI: () => activeTags.filter((tag) => tag.inactive === true).map((x) => x.name),
parse: (val) => val,
},
width: { name: 'Width',
width: {
name: "Width",
setUI: (width) => {
const oldVal = widthField.value
widthField.value = width
if (!widthField.value) {
widthField.value = oldVal
}
widthField.dispatchEvent(new Event("change"))
},
readUI: () => parseInt(widthField.value),
parse: (val) => parseInt(val)
parse: (val) => parseInt(val),
},
height: { name: 'Height',
height: {
name: "Height",
setUI: (height) => {
const oldVal = heightField.value
heightField.value = height
if (!heightField.value) {
heightField.value = oldVal
}
heightField.dispatchEvent(new Event("change"))
},
readUI: () => parseInt(heightField.value),
parse: (val) => parseInt(val)
parse: (val) => parseInt(val),
},
seed: { name: 'Seed',
seed: {
name: "Seed",
setUI: (seed) => {
if (!seed) {
randomSeedField.checked = true
@ -97,89 +107,108 @@ const TASK_MAPPING = {
return
}
randomSeedField.checked = false
randomSeedField.dispatchEvent(new Event('change')) // let plugins know that the state of the random seed toggle changed
randomSeedField.dispatchEvent(new Event("change")) // let plugins know that the state of the random seed toggle changed
seedField.disabled = false
seedField.value = seed
},
readUI: () => parseInt(seedField.value), // just return the value the user is seeing in the UI
parse: (val) => parseInt(val)
parse: (val) => parseInt(val),
},
num_inference_steps: { name: 'Steps',
num_inference_steps: {
name: "Steps",
setUI: (num_inference_steps) => {
numInferenceStepsField.value = num_inference_steps
},
readUI: () => parseInt(numInferenceStepsField.value),
parse: (val) => parseInt(val)
parse: (val) => parseInt(val),
},
guidance_scale: { name: 'Guidance Scale',
guidance_scale: {
name: "Guidance Scale",
setUI: (guidance_scale) => {
guidanceScaleField.value = guidance_scale
updateGuidanceScaleSlider()
},
readUI: () => parseFloat(guidanceScaleField.value),
parse: (val) => parseFloat(val)
parse: (val) => parseFloat(val),
},
prompt_strength: { name: 'Prompt Strength',
prompt_strength: {
name: "Prompt Strength",
setUI: (prompt_strength) => {
promptStrengthField.value = prompt_strength
updatePromptStrengthSlider()
},
readUI: () => parseFloat(promptStrengthField.value),
parse: (val) => parseFloat(val)
parse: (val) => parseFloat(val),
},
init_image: { name: 'Initial Image',
init_image: {
name: "Initial Image",
setUI: (init_image) => {
initImagePreview.src = init_image
},
readUI: () => initImagePreview.src,
parse: (val) => val
parse: (val) => val,
},
mask: { name: 'Mask',
mask: {
name: "Mask",
setUI: (mask) => {
setTimeout(() => { // add a delay to insure this happens AFTER the main image loads (which reloads the inpainter)
setTimeout(() => {
// add a delay to insure this happens AFTER the main image loads (which reloads the inpainter)
imageInpainter.setImg(mask)
}, 250)
maskSetting.checked = Boolean(mask)
},
readUI: () => (maskSetting.checked ? imageInpainter.getImg() : undefined),
parse: (val) => val
parse: (val) => val,
},
preserve_init_image_color_profile: { name: 'Preserve Color Profile',
preserve_init_image_color_profile: {
name: "Preserve Color Profile",
setUI: (preserve_init_image_color_profile) => {
applyColorCorrectionField.checked = parseBoolean(preserve_init_image_color_profile)
},
readUI: () => applyColorCorrectionField.checked,
parse: (val) => parseBoolean(val)
parse: (val) => parseBoolean(val),
},
use_face_correction: { name: 'Use Face Correction',
use_face_correction: {
name: "Use Face Correction",
setUI: (use_face_correction) => {
const oldVal = gfpganModelField.value
gfpganModelField.value = getModelPath(use_face_correction, ['.pth'])
if (gfpganModelField.value) { // Is a valid value for the field.
useFaceCorrectionField.checked = true
gfpganModelField.disabled = false
} else { // Not a valid value, restore the old value and disable the filter.
console.log("use face correction", use_face_correction)
if (use_face_correction == null || use_face_correction == "None") {
gfpganModelField.disabled = true
gfpganModelField.value = oldVal
useFaceCorrectionField.checked = false
} else {
gfpganModelField.value = getModelPath(use_face_correction, [".pth"])
if (gfpganModelField.value) {
// Is a valid value for the field.
useFaceCorrectionField.checked = true
gfpganModelField.disabled = false
} else {
// Not a valid value, restore the old value and disable the filter.
gfpganModelField.disabled = true
gfpganModelField.value = oldVal
useFaceCorrectionField.checked = false
}
}
//useFaceCorrectionField.checked = parseBoolean(use_face_correction)
},
readUI: () => (useFaceCorrectionField.checked ? gfpganModelField.value : undefined),
parse: (val) => val
parse: (val) => val,
},
use_upscale: { name: 'Use Upscaling',
use_upscale: {
name: "Use Upscaling",
setUI: (use_upscale) => {
const oldVal = upscaleModelField.value
upscaleModelField.value = getModelPath(use_upscale, ['.pth'])
if (upscaleModelField.value) { // Is a valid value for the field.
upscaleModelField.value = getModelPath(use_upscale, [".pth"])
if (upscaleModelField.value) {
// Is a valid value for the field.
useUpscalingField.checked = true
upscaleModelField.disabled = false
upscaleAmountField.disabled = false
} else { // Not a valid value, restore the old value and disable the filter.
} else {
// Not a valid value, restore the old value and disable the filter.
upscaleModelField.disabled = true
upscaleAmountField.disabled = true
upscaleModelField.value = oldVal
@ -187,27 +216,38 @@ const TASK_MAPPING = {
}
},
readUI: () => (useUpscalingField.checked ? upscaleModelField.value : undefined),
parse: (val) => val
parse: (val) => val,
},
upscale_amount: { name: 'Upscale By',
upscale_amount: {
name: "Upscale By",
setUI: (upscale_amount) => {
upscaleAmountField.value = upscale_amount
},
readUI: () => upscaleAmountField.value,
parse: (val) => val
parse: (val) => val,
},
sampler_name: { name: 'Sampler',
latent_upscaler_steps: {
name: "Latent Upscaler Steps",
setUI: (latent_upscaler_steps) => {
latentUpscalerStepsField.value = latent_upscaler_steps
},
readUI: () => latentUpscalerStepsField.value,
parse: (val) => val,
},
sampler_name: {
name: "Sampler",
setUI: (sampler_name) => {
samplerField.value = sampler_name
},
readUI: () => samplerField.value,
parse: (val) => val
parse: (val) => val,
},
use_stable_diffusion_model: { name: 'Stable Diffusion model',
use_stable_diffusion_model: {
name: "Stable Diffusion model",
setUI: (use_stable_diffusion_model) => {
const oldVal = stableDiffusionModelField.value
use_stable_diffusion_model = getModelPath(use_stable_diffusion_model, ['.ckpt', '.safetensors'])
use_stable_diffusion_model = getModelPath(use_stable_diffusion_model, [".ckpt", ".safetensors"])
stableDiffusionModelField.value = use_stable_diffusion_model
if (!stableDiffusionModelField.value) {
@ -215,118 +255,162 @@ const TASK_MAPPING = {
}
},
readUI: () => stableDiffusionModelField.value,
parse: (val) => val
parse: (val) => val,
},
use_vae_model: { name: 'VAE model',
clip_skip: {
name: "Clip Skip",
setUI: (value) => {
clip_skip.checked = value
},
readUI: () => clip_skip.checked,
parse: (val) => Boolean(val),
},
tiling: {
name: "Tiling",
setUI: (val) => {
tilingField.value = val
},
readUI: () => tilingField.value,
parse: (val) => val,
},
use_vae_model: {
name: "VAE model",
setUI: (use_vae_model) => {
const oldVal = vaeModelField.value
use_vae_model = (use_vae_model === undefined || use_vae_model === null || use_vae_model === 'None' ? '' : use_vae_model)
use_vae_model =
use_vae_model === undefined || use_vae_model === null || use_vae_model === "None" ? "" : use_vae_model
if (use_vae_model !== '') {
use_vae_model = getModelPath(use_vae_model, ['.vae.pt', '.ckpt'])
use_vae_model = use_vae_model !== '' ? use_vae_model : oldVal
if (use_vae_model !== "") {
use_vae_model = getModelPath(use_vae_model, [".vae.pt", ".ckpt"])
use_vae_model = use_vae_model !== "" ? use_vae_model : oldVal
}
vaeModelField.value = use_vae_model
},
readUI: () => vaeModelField.value,
parse: (val) => val
parse: (val) => val,
},
use_lora_model: { name: 'LoRA model',
use_lora_model: {
name: "LoRA model",
setUI: (use_lora_model) => {
const oldVal = loraModelField.value
use_lora_model = (use_lora_model === undefined || use_lora_model === null || use_lora_model === 'None' ? '' : use_lora_model)
use_lora_model =
use_lora_model === undefined || use_lora_model === null || use_lora_model === "None"
? ""
: use_lora_model
if (use_lora_model !== '') {
use_lora_model = getModelPath(use_lora_model, ['.ckpt', '.safetensors'])
use_lora_model = use_lora_model !== '' ? use_lora_model : oldVal
if (use_lora_model !== "") {
use_lora_model = getModelPath(use_lora_model, [".ckpt", ".safetensors"])
use_lora_model = use_lora_model !== "" ? use_lora_model : oldVal
}
loraModelField.value = use_lora_model
},
readUI: () => loraModelField.value,
parse: (val) => val
parse: (val) => val,
},
use_hypernetwork_model: { name: 'Hypernetwork model',
lora_alpha: {
name: "LoRA Strength",
setUI: (lora_alpha) => {
loraAlphaField.value = lora_alpha
updateLoraAlphaSlider()
},
readUI: () => parseFloat(loraAlphaField.value),
parse: (val) => parseFloat(val),
},
use_hypernetwork_model: {
name: "Hypernetwork model",
setUI: (use_hypernetwork_model) => {
const oldVal = hypernetworkModelField.value
use_hypernetwork_model = (use_hypernetwork_model === undefined || use_hypernetwork_model === null || use_hypernetwork_model === 'None' ? '' : use_hypernetwork_model)
use_hypernetwork_model =
use_hypernetwork_model === undefined ||
use_hypernetwork_model === null ||
use_hypernetwork_model === "None"
? ""
: use_hypernetwork_model
if (use_hypernetwork_model !== '') {
use_hypernetwork_model = getModelPath(use_hypernetwork_model, ['.pt'])
use_hypernetwork_model = use_hypernetwork_model !== '' ? use_hypernetwork_model : oldVal
if (use_hypernetwork_model !== "") {
use_hypernetwork_model = getModelPath(use_hypernetwork_model, [".pt"])
use_hypernetwork_model = use_hypernetwork_model !== "" ? use_hypernetwork_model : oldVal
}
hypernetworkModelField.value = use_hypernetwork_model
hypernetworkModelField.dispatchEvent(new Event('change'))
hypernetworkModelField.dispatchEvent(new Event("change"))
},
readUI: () => hypernetworkModelField.value,
parse: (val) => val
parse: (val) => val,
},
hypernetwork_strength: { name: 'Hypernetwork Strength',
hypernetwork_strength: {
name: "Hypernetwork Strength",
setUI: (hypernetwork_strength) => {
hypernetworkStrengthField.value = hypernetwork_strength
updateHypernetworkStrengthSlider()
},
readUI: () => parseFloat(hypernetworkStrengthField.value),
parse: (val) => parseFloat(val)
parse: (val) => parseFloat(val),
},
num_outputs: { name: 'Parallel Images',
num_outputs: {
name: "Parallel Images",
setUI: (num_outputs) => {
numOutputsParallelField.value = num_outputs
},
readUI: () => parseInt(numOutputsParallelField.value),
parse: (val) => val
parse: (val) => val,
},
use_cpu: { name: 'Use CPU',
use_cpu: {
name: "Use CPU",
setUI: (use_cpu) => {
useCPUField.checked = use_cpu
},
readUI: () => useCPUField.checked,
parse: (val) => val
parse: (val) => val,
},
stream_image_progress: { name: 'Stream Image Progress',
stream_image_progress: {
name: "Stream Image Progress",
setUI: (stream_image_progress) => {
streamImageProgressField.checked = (parseInt(numOutputsTotalField.value) > 50 ? false : stream_image_progress)
streamImageProgressField.checked = parseInt(numOutputsTotalField.value) > 50 ? false : stream_image_progress
},
readUI: () => streamImageProgressField.checked,
parse: (val) => Boolean(val)
parse: (val) => Boolean(val),
},
show_only_filtered_image: { name: 'Show only the corrected/upscaled image',
show_only_filtered_image: {
name: "Show only the corrected/upscaled image",
setUI: (show_only_filtered_image) => {
showOnlyFilteredImageField.checked = show_only_filtered_image
},
readUI: () => showOnlyFilteredImageField.checked,
parse: (val) => Boolean(val)
parse: (val) => Boolean(val),
},
output_format: { name: 'Output Format',
output_format: {
name: "Output Format",
setUI: (output_format) => {
outputFormatField.value = output_format
},
readUI: () => outputFormatField.value,
parse: (val) => val
parse: (val) => val,
},
save_to_disk_path: { name: 'Save to disk path',
save_to_disk_path: {
name: "Save to disk path",
setUI: (save_to_disk_path) => {
saveToDiskField.checked = Boolean(save_to_disk_path)
diskPathField.value = save_to_disk_path
},
readUI: () => diskPathField.value,
parse: (val) => val
}
parse: (val) => val,
},
}
function restoreTaskToUI(task, fieldsToSkip) {
fieldsToSkip = fieldsToSkip || []
if ('numOutputsTotal' in task) {
if ("numOutputsTotal" in task) {
numOutputsTotalField.value = task.numOutputsTotal
}
if ('seed' in task) {
if ("seed" in task) {
randomSeedField.checked = false
seedField.value = task.seed
}
if (!('reqBody' in task)) {
if (!("reqBody" in task)) {
return
}
for (const key in TASK_MAPPING) {
@ -336,26 +420,32 @@ function restoreTaskToUI(task, fieldsToSkip) {
}
// properly reset fields not present in the task
if (!('use_hypernetwork_model' in task.reqBody)) {
if (!("use_hypernetwork_model" in task.reqBody)) {
hypernetworkModelField.value = ""
hypernetworkModelField.dispatchEvent(new Event("change"))
}
if (!("use_lora_model" in task.reqBody)) {
loraModelField.value = ""
loraModelField.dispatchEvent(new Event("change"))
}
// restore the original prompt if provided (e.g. use settings), fallback to prompt as needed (e.g. copy/paste or d&d)
promptField.value = task.reqBody.original_prompt
if (!('original_prompt' in task.reqBody)) {
if (!("original_prompt" in task.reqBody)) {
promptField.value = task.reqBody.prompt
}
promptField.dispatchEvent(new Event("input"))
// properly reset checkboxes
if (!('use_face_correction' in task.reqBody)) {
if (!("use_face_correction" in task.reqBody)) {
useFaceCorrectionField.checked = false
gfpganModelField.disabled = true
}
if (!('use_upscale' in task.reqBody)) {
if (!("use_upscale" in task.reqBody)) {
useUpscalingField.checked = false
}
if (!('mask' in task.reqBody) && maskSetting.checked) {
if (!("mask" in task.reqBody) && maskSetting.checked) {
maskSetting.checked = false
maskSetting.dispatchEvent(new Event("click"))
}
@ -366,15 +456,18 @@ function restoreTaskToUI(task, fieldsToSkip) {
if (IMAGE_REGEX.test(initImagePreview.src) && task.reqBody.init_image == undefined) {
// hide source image
initImageClearBtn.dispatchEvent(new Event("click"))
}
else if (task.reqBody.init_image !== undefined) {
} else if (task.reqBody.init_image !== undefined) {
// listen for inpainter loading event, which happens AFTER the main image loads (which reloads the inpainter)
initImagePreview.addEventListener('load', function() {
if (Boolean(task.reqBody.mask)) {
imageInpainter.setImg(task.reqBody.mask)
maskSetting.checked = true
}
}, { once: true })
initImagePreview.addEventListener(
"load",
function() {
if (Boolean(task.reqBody.mask)) {
imageInpainter.setImg(task.reqBody.mask)
maskSetting.checked = true
}
},
{ once: true }
)
initImagePreview.src = task.reqBody.init_image
}
}
@ -384,28 +477,26 @@ function readUI() {
reqBody[key] = TASK_MAPPING[key].readUI()
}
return {
'numOutputsTotal': parseInt(numOutputsTotalField.value),
'seed': TASK_MAPPING['seed'].readUI(),
'reqBody': reqBody
numOutputsTotal: parseInt(numOutputsTotalField.value),
seed: TASK_MAPPING["seed"].readUI(),
reqBody: reqBody,
}
}
function getModelPath(filename, extensions)
{
function getModelPath(filename, extensions) {
if (typeof filename !== "string") {
return
}
let pathIdx
if (filename.includes('/models/stable-diffusion/')) {
pathIdx = filename.indexOf('/models/stable-diffusion/') + 25 // Linux, Mac paths
}
else if (filename.includes('\\models\\stable-diffusion\\')) {
pathIdx = filename.indexOf('\\models\\stable-diffusion\\') + 25 // Linux, Mac paths
if (filename.includes("/models/stable-diffusion/")) {
pathIdx = filename.indexOf("/models/stable-diffusion/") + 25 // Linux, Mac paths
} else if (filename.includes("\\models\\stable-diffusion\\")) {
pathIdx = filename.indexOf("\\models\\stable-diffusion\\") + 25 // Linux, Mac paths
}
if (pathIdx >= 0) {
filename = filename.slice(pathIdx)
}
extensions.forEach(ext => {
extensions.forEach((ext) => {
if (filename.endsWith(ext)) {
filename = filename.slice(0, filename.length - ext.length)
}
@ -414,26 +505,26 @@ function getModelPath(filename, extensions)
}
const TASK_TEXT_MAPPING = {
prompt: 'Prompt',
width: 'Width',
height: 'Height',
seed: 'Seed',
num_inference_steps: 'Steps',
guidance_scale: 'Guidance Scale',
prompt_strength: 'Prompt Strength',
use_face_correction: 'Use Face Correction',
use_upscale: 'Use Upscaling',
upscale_amount: 'Upscale By',
sampler_name: 'Sampler',
negative_prompt: 'Negative Prompt',
use_stable_diffusion_model: 'Stable Diffusion model',
use_hypernetwork_model: 'Hypernetwork model',
hypernetwork_strength: 'Hypernetwork Strength'
prompt: "Prompt",
width: "Width",
height: "Height",
seed: "Seed",
num_inference_steps: "Steps",
guidance_scale: "Guidance Scale",
prompt_strength: "Prompt Strength",
use_face_correction: "Use Face Correction",
use_upscale: "Use Upscaling",
upscale_amount: "Upscale By",
sampler_name: "Sampler",
negative_prompt: "Negative Prompt",
use_stable_diffusion_model: "Stable Diffusion model",
use_hypernetwork_model: "Hypernetwork model",
hypernetwork_strength: "Hypernetwork Strength",
}
function parseTaskFromText(str) {
const taskReqBody = {}
const lines = str.split('\n')
const lines = str.split("\n")
if (lines.length === 0) {
return
}
@ -441,14 +532,14 @@ function parseTaskFromText(str) {
// Prompt
let knownKeyOnFirstLine = false
for (let key in TASK_TEXT_MAPPING) {
if (lines[0].startsWith(TASK_TEXT_MAPPING[key] + ':')) {
if (lines[0].startsWith(TASK_TEXT_MAPPING[key] + ":")) {
knownKeyOnFirstLine = true
break
}
}
if (!knownKeyOnFirstLine) {
taskReqBody.prompt = lines[0]
console.log('Prompt:', taskReqBody.prompt)
console.log("Prompt:", taskReqBody.prompt)
}
for (const key in TASK_TEXT_MAPPING) {
@ -456,18 +547,18 @@ function parseTaskFromText(str) {
continue
}
const name = TASK_TEXT_MAPPING[key];
const name = TASK_TEXT_MAPPING[key]
let val = undefined
const reName = new RegExp(`${name}\\ *:\\ *(.*)(?:\\r\\n|\\r|\\n)*`, 'igm')
const match = reName.exec(str);
const reName = new RegExp(`${name}\\ *:\\ *(.*)(?:\\r\\n|\\r|\\n)*`, "igm")
const match = reName.exec(str)
if (match) {
str = str.slice(0, match.index) + str.slice(match.index + match[0].length)
val = match[1]
}
if (val !== undefined) {
taskReqBody[key] = TASK_MAPPING[key].parse(val.trim())
console.log(TASK_MAPPING[key].name + ':', taskReqBody[key])
console.log(TASK_MAPPING[key].name + ":", taskReqBody[key])
if (!str) {
break
}
@ -477,18 +568,19 @@ function parseTaskFromText(str) {
return undefined
}
const task = { reqBody: taskReqBody }
if ('seed' in taskReqBody) {
if ("seed" in taskReqBody) {
task.seed = taskReqBody.seed
}
return task
}
async function parseContent(text) {
text = text.trim();
if (text.startsWith('{') && text.endsWith('}')) {
text = text.trim()
if (text.startsWith("{") && text.endsWith("}")) {
try {
const task = JSON.parse(text)
if (!('reqBody' in task)) { // support the format saved to the disk, by the UI
if (!("reqBody" in task)) {
// support the format saved to the disk, by the UI
task.reqBody = Object.assign({}, task)
}
restoreTaskToUI(task)
@ -500,7 +592,8 @@ async function parseContent(text) {
}
// Normal txt file.
const task = parseTaskFromText(text)
if (text.toLowerCase().includes('seed:') && task) { // only parse valid task content
if (text.toLowerCase().includes("seed:") && task) {
// only parse valid task content
restoreTaskToUI(task)
return true
} else {
@ -517,21 +610,25 @@ async function readFile(file, i) {
}
function dropHandler(ev) {
console.log('Content dropped...')
console.log("Content dropped...")
let items = []
if (ev?.dataTransfer?.items) { // Use DataTransferItemList interface
if (ev?.dataTransfer?.items) {
// Use DataTransferItemList interface
items = Array.from(ev.dataTransfer.items)
items = items.filter(item => item.kind === 'file')
items = items.map(item => item.getAsFile())
} else if (ev?.dataTransfer?.files) { // Use DataTransfer interface
items = items.filter((item) => item.kind === "file")
items = items.map((item) => item.getAsFile())
} else if (ev?.dataTransfer?.files) {
// Use DataTransfer interface
items = Array.from(ev.dataTransfer.files)
}
items.forEach(item => {item.file_ext = EXT_REGEX.exec(item.name.toLowerCase())[1]})
items.forEach((item) => {
item.file_ext = EXT_REGEX.exec(item.name.toLowerCase())[1]
})
let text_items = items.filter(item => TEXT_EXTENSIONS.includes(item.file_ext))
let image_items = items.filter(item => IMAGE_EXTENSIONS.includes(item.file_ext))
let text_items = items.filter((item) => TEXT_EXTENSIONS.includes(item.file_ext))
let image_items = items.filter((item) => IMAGE_EXTENSIONS.includes(item.file_ext))
if (image_items.length > 0 && ev.target == initImageSelector) {
return // let the event bubble up, so that the Init Image filepicker can receive this
@ -541,7 +638,7 @@ function dropHandler(ev) {
text_items.forEach(readFile)
}
function dragOverHandler(ev) {
console.log('Content in drop zone')
console.log("Content in drop zone")
// Prevent default behavior (Prevent file/content from being opened)
ev.preventDefault()
@ -549,73 +646,72 @@ function dragOverHandler(ev) {
ev.dataTransfer.dropEffect = "copy"
let img = new Image()
img.src = '//' + location.host + '/media/images/favicon-32x32.png'
img.src = "//" + location.host + "/media/images/favicon-32x32.png"
ev.dataTransfer.setDragImage(img, 16, 16)
}
document.addEventListener("drop", dropHandler)
document.addEventListener("dragover", dragOverHandler)
const TASK_REQ_NO_EXPORT = [
"use_cpu",
"save_to_disk_path"
]
const resetSettings = document.getElementById('reset-image-settings')
const TASK_REQ_NO_EXPORT = ["use_cpu", "save_to_disk_path"]
const resetSettings = document.getElementById("reset-image-settings")
function checkReadTextClipboardPermission (result) {
function checkReadTextClipboardPermission(result) {
if (result.state != "granted" && result.state != "prompt") {
return
}
// PASTE ICON
const pasteIcon = document.createElement('i')
pasteIcon.className = 'fa-solid fa-paste section-button'
const pasteIcon = document.createElement("i")
pasteIcon.className = "fa-solid fa-paste section-button"
pasteIcon.innerHTML = `<span class="simple-tooltip top-left">Paste Image Settings</span>`
pasteIcon.addEventListener('click', async (event) => {
pasteIcon.addEventListener("click", async (event) => {
event.stopPropagation()
// Add css class 'active'
pasteIcon.classList.add('active')
pasteIcon.classList.add("active")
// In 350 ms remove the 'active' class
asyncDelay(350).then(() => pasteIcon.classList.remove('active'))
asyncDelay(350).then(() => pasteIcon.classList.remove("active"))
// Retrieve clipboard content and try to parse it
const text = await navigator.clipboard.readText();
const text = await navigator.clipboard.readText()
await parseContent(text)
})
resetSettings.parentNode.insertBefore(pasteIcon, resetSettings)
}
navigator.permissions.query({ name: "clipboard-read" }).then(checkReadTextClipboardPermission, (reason) => console.log('clipboard-read is not available. %o', reason))
navigator.permissions
.query({ name: "clipboard-read" })
.then(checkReadTextClipboardPermission, (reason) => console.log("clipboard-read is not available. %o", reason))
document.addEventListener('paste', async (event) => {
document.addEventListener("paste", async (event) => {
if (event.target) {
const targetTag = event.target.tagName.toLowerCase()
// Disable when targeting input elements.
if (targetTag === 'input' || targetTag === 'textarea') {
if (targetTag === "input" || targetTag === "textarea") {
return
}
}
const paste = (event.clipboardData || window.clipboardData).getData('text')
const paste = (event.clipboardData || window.clipboardData).getData("text")
const selection = window.getSelection()
if (selection.toString().trim().length <= 0 && await parseContent(paste)) {
if (paste != "" && selection.toString().trim().length <= 0 && (await parseContent(paste))) {
event.preventDefault()
return
}
})
// Adds a copy and a paste icon if the browser grants permission to write to clipboard.
function checkWriteToClipboardPermission (result) {
function checkWriteToClipboardPermission(result) {
if (result.state != "granted" && result.state != "prompt") {
return
}
// COPY ICON
const copyIcon = document.createElement('i')
copyIcon.className = 'fa-solid fa-clipboard section-button'
const copyIcon = document.createElement("i")
copyIcon.className = "fa-solid fa-clipboard section-button"
copyIcon.innerHTML = `<span class="simple-tooltip top-left">Copy Image Settings</span>`
copyIcon.addEventListener('click', (event) => {
copyIcon.addEventListener("click", (event) => {
event.stopPropagation()
// Add css class 'active'
copyIcon.classList.add('active')
copyIcon.classList.add("active")
// In 350 ms remove the 'active' class
asyncDelay(350).then(() => copyIcon.classList.remove('active'))
asyncDelay(350).then(() => copyIcon.classList.remove("active"))
const uiState = readUI()
TASK_REQ_NO_EXPORT.forEach((key) => delete uiState.reqBody[key])
if (uiState.reqBody.init_image && !IMAGE_REGEX.test(uiState.reqBody.init_image)) {
@ -628,8 +724,8 @@ function checkWriteToClipboardPermission (result) {
}
// Determine which access we have to the clipboard. Clipboard access is only available on localhost or via TLS.
navigator.permissions.query({ name: "clipboard-write" }).then(checkWriteToClipboardPermission, (e) => {
if (e instanceof TypeError && typeof navigator?.clipboard?.writeText === 'function') {
if (e instanceof TypeError && typeof navigator?.clipboard?.writeText === "function") {
// Fix for firefox https://bugzilla.mozilla.org/show_bug.cgi?id=1560373
checkWriteToClipboardPermission({state:"granted"})
checkWriteToClipboardPermission({ state: "granted" })
}
})

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,44 +1,45 @@
"use strict"
/**
* @typedef {object} ImageModalRequest
* @property {string} src
* @property {ImageModalRequest | () => ImageModalRequest | undefined} previous
* @property {ImageModalRequest | () => ImageModalRequest | undefined} next
*/
/**
* @type {(() => (string | ImageModalRequest) | string | ImageModalRequest) => {}}
*/
const imageModal = (function() {
const zoomElem = createElement(
'i',
undefined,
['fa-solid', 'tertiaryButton'],
)
const backElem = createElement("i", undefined, ["fa-solid", "fa-arrow-left", "tertiaryButton"])
const closeElem = createElement(
'i',
undefined,
['fa-solid', 'fa-xmark', 'tertiaryButton'],
)
const forwardElem = createElement("i", undefined, ["fa-solid", "fa-arrow-right", "tertiaryButton"])
const menuBarElem = createElement('div', undefined, 'menu-bar', [zoomElem, closeElem])
const zoomElem = createElement("i", undefined, ["fa-solid", "tertiaryButton"])
const imageContainer = createElement('div', undefined, 'image-wrapper')
const closeElem = createElement("i", undefined, ["fa-solid", "fa-xmark", "tertiaryButton"])
const backdrop = createElement('div', undefined, 'backdrop')
const menuBarElem = createElement("div", undefined, "menu-bar", [backElem, forwardElem, zoomElem, closeElem])
const modalContainer = createElement('div', undefined, 'content', [menuBarElem, imageContainer])
const imageContainer = createElement("div", undefined, "image-wrapper")
const modalElem = createElement(
'div',
{ id: 'viewFullSizeImgModal' },
['popup'],
[backdrop, modalContainer],
)
const backdrop = createElement("div", undefined, "backdrop")
const modalContainer = createElement("div", undefined, "content", [menuBarElem, imageContainer])
const modalElem = createElement("div", { id: "viewFullSizeImgModal" }, ["popup"], [backdrop, modalContainer])
document.body.appendChild(modalElem)
const setZoomLevel = (value) => {
const img = imageContainer.querySelector('img')
const img = imageContainer.querySelector("img")
if (value) {
zoomElem.classList.remove('fa-magnifying-glass-plus')
zoomElem.classList.add('fa-magnifying-glass-minus')
zoomElem.classList.remove("fa-magnifying-glass-plus")
zoomElem.classList.add("fa-magnifying-glass-minus")
if (img) {
img.classList.remove('natural-zoom')
img.classList.remove("natural-zoom")
let zoomLevel = typeof value === 'number' ? value : img.dataset.zoomLevel
let zoomLevel = typeof value === "number" ? value : img.dataset.zoomLevel
if (!zoomLevel) {
zoomLevel = 100
}
@ -48,34 +49,164 @@ const imageModal = (function() {
img.height = img.naturalHeight * (+zoomLevel / 100)
}
} else {
zoomElem.classList.remove('fa-magnifying-glass-minus')
zoomElem.classList.add('fa-magnifying-glass-plus')
zoomElem.classList.remove("fa-magnifying-glass-minus")
zoomElem.classList.add("fa-magnifying-glass-plus")
if (img) {
img.classList.add('natural-zoom')
img.removeAttribute('width')
img.removeAttribute('height')
img.classList.add("natural-zoom")
img.removeAttribute("width")
img.removeAttribute("height")
}
}
}
zoomElem.addEventListener(
'click',
() => setZoomLevel(imageContainer.querySelector('img')?.classList?.contains('natural-zoom')),
zoomElem.addEventListener("click", () =>
setZoomLevel(imageContainer.querySelector("img")?.classList?.contains("natural-zoom"))
)
const close = () => {
imageContainer.innerHTML = ''
modalElem.classList.remove('active')
document.body.style.overflow = 'initial'
const initialState = () => ({
previous: undefined,
next: undefined,
start: {
x: 0,
y: 0,
},
scroll: {
x: 0,
y: 0,
},
})
const state = initialState()
// Allow grabbing the image to scroll
const stopGrabbing = (e) => {
if(imageContainer.classList.contains("grabbing")) {
imageContainer.classList.remove("grabbing")
e?.preventDefault()
console.log(`stopGrabbing()`, e)
}
}
window.addEventListener('keydown', (e) => {
if (e.key === 'Escape' && modalElem.classList.contains('active')) {
const addImageGrabbing = (image) => {
image?.addEventListener('mousedown', (e) => {
if (!image.classList.contains("natural-zoom")) {
e.stopPropagation()
e.stopImmediatePropagation()
e.preventDefault()
imageContainer.classList.add("grabbing")
state.start.x = e.pageX - imageContainer.offsetLeft
state.scroll.x = imageContainer.scrollLeft
state.start.y = e.pageY - imageContainer.offsetTop
state.scroll.y = imageContainer.scrollTop
}
})
image?.addEventListener('mouseup', stopGrabbing)
image?.addEventListener('mouseleave', stopGrabbing)
image?.addEventListener('mousemove', (e) => {
if(imageContainer.classList.contains("grabbing")) {
e.stopPropagation()
e.stopImmediatePropagation()
e.preventDefault()
// Might need to increase this multiplier based on the image size to window size ratio
// The default 1:1 is pretty slow
const multiplier = 1.0
const deltaX = e.pageX - imageContainer.offsetLeft - state.start.x
imageContainer.scrollLeft = state.scroll.x - (deltaX * multiplier)
const deltaY = e.pageY - imageContainer.offsetTop - state.start.y
imageContainer.scrollTop = state.scroll.y - (deltaY * multiplier)
}
})
}
const clear = () => {
imageContainer.innerHTML = ""
Object.entries(initialState()).forEach(([key, value]) => state[key] = value)
stopGrabbing()
}
const close = () => {
clear()
modalElem.classList.remove("active")
document.body.style.overflow = "initial"
}
/**
* @param {() => (string | ImageModalRequest) | string | ImageModalRequest} optionsFactory
*/
function init(optionsFactory) {
if (!optionsFactory) {
close()
return
}
clear()
const options = typeof optionsFactory === "function" ? optionsFactory() : optionsFactory
const src = typeof options === "string" ? options : options.src
const imgElem = createElement("img", { src }, "natural-zoom")
addImageGrabbing(imgElem)
imageContainer.appendChild(imgElem)
modalElem.classList.add("active")
document.body.style.overflow = "hidden"
setZoomLevel(false)
if (typeof options === "object" && options.previous) {
state.previous = options.previous
backElem.style.display = "unset"
} else {
backElem.style.display = "none"
}
if (typeof options === "object" && options.next) {
state.next = options.next
forwardElem.style.display = "unset"
} else {
forwardElem.style.display = "none"
}
}
const back = () => {
if (state.previous) {
init(state.previous)
} else {
backElem.style.display = "none"
}
}
const forward = () => {
if (state.next) {
init(state.next)
} else {
forwardElem.style.display = "none"
}
}
window.addEventListener("keydown", (e) => {
if (modalElem.classList.contains("active")) {
switch (e.key) {
case "Escape":
close()
break
case "ArrowLeft":
back()
break
case "ArrowRight":
forward()
break
}
}
})
window.addEventListener('click', (e) => {
if (modalElem.classList.contains('active')) {
window.addEventListener("click", (e) => {
if (modalElem.classList.contains("active")) {
if (e.target === backdrop || e.target === closeElem) {
close()
}
@ -86,15 +217,12 @@ const imageModal = (function() {
}
})
return (optionsFactory) => {
const options = typeof optionsFactory === 'function' ? optionsFactory() : optionsFactory
const src = typeof options === 'string' ? options : options.src
backElem.addEventListener("click", back)
// TODO center it if < window size
const imgElem = createElement('img', { src }, 'natural-zoom')
imageContainer.appendChild(imgElem)
modalElem.classList.add('active')
document.body.style.overflow = 'hidden'
setZoomLevel(false)
}
forwardElem.addEventListener("click", forward)
/**
* @param {() => (string | ImageModalRequest) | string | ImageModalRequest} optionsFactory
*/
return (optionsFactory) => init(optionsFactory)
})()

View File

@ -3,26 +3,26 @@ let modifiers = []
let customModifiersGroupElement = undefined
let customModifiersInitialContent
let editorModifierEntries = document.querySelector('#editor-modifiers-entries')
let editorModifierTagsList = document.querySelector('#editor-inputs-tags-list')
let editorTagsContainer = document.querySelector('#editor-inputs-tags-container')
let modifierCardSizeSlider = document.querySelector('#modifier-card-size-slider')
let previewImageField = document.querySelector('#preview-image')
let modifierSettingsBtn = document.querySelector('#modifier-settings-btn')
let modifierSettingsOverlay = document.querySelector('#modifier-settings-config')
let customModifiersTextBox = document.querySelector('#custom-modifiers-input')
let customModifierEntriesToolbar = document.querySelector('#editor-modifiers-entries-toolbar')
let editorModifierEntries = document.querySelector("#editor-modifiers-entries")
let editorModifierTagsList = document.querySelector("#editor-inputs-tags-list")
let editorTagsContainer = document.querySelector("#editor-inputs-tags-container")
let modifierCardSizeSlider = document.querySelector("#modifier-card-size-slider")
let previewImageField = document.querySelector("#preview-image")
let modifierSettingsBtn = document.querySelector("#modifier-settings-btn")
let modifierSettingsOverlay = document.querySelector("#modifier-settings-config")
let customModifiersTextBox = document.querySelector("#custom-modifiers-input")
let customModifierEntriesToolbar = document.querySelector("#editor-modifiers-entries-toolbar")
const modifierThumbnailPath = 'media/modifier-thumbnails'
const activeCardClass = 'modifier-card-active'
const modifierThumbnailPath = "media/modifier-thumbnails"
const activeCardClass = "modifier-card-active"
const CUSTOM_MODIFIERS_KEY = "customModifiers"
function createModifierCard(name, previews, removeBy) {
const modifierCard = document.createElement('div')
const modifierCard = document.createElement("div")
let style = previewImageField.value
let styleIndex = (style=='portrait') ? 0 : 1
let styleIndex = style == "portrait" ? 0 : 1
modifierCard.className = 'modifier-card'
modifierCard.className = "modifier-card"
modifierCard.innerHTML = `
<div class="modifier-card-overlay"></div>
<div class="modifier-card-image-container">
@ -34,35 +34,35 @@ function createModifierCard(name, previews, removeBy) {
<div class="modifier-card-label"><p></p></div>
</div>`
const image = modifierCard.querySelector('.modifier-card-image')
const errorText = modifierCard.querySelector('.modifier-card-error-label')
const label = modifierCard.querySelector('.modifier-card-label')
const image = modifierCard.querySelector(".modifier-card-image")
const errorText = modifierCard.querySelector(".modifier-card-error-label")
const label = modifierCard.querySelector(".modifier-card-label")
errorText.innerText = 'No Image'
errorText.innerText = "No Image"
if (typeof previews == 'object') {
image.src = previews[styleIndex]; // portrait
image.setAttribute('preview-type', style)
if (typeof previews == "object") {
image.src = previews[styleIndex] // portrait
image.setAttribute("preview-type", style)
} else {
image.remove()
}
const maxLabelLength = 30
const cardLabel = removeBy ? name.replace('by ', '') : name
const cardLabel = removeBy ? name.replace("by ", "") : name
if(cardLabel.length <= maxLabelLength) {
label.querySelector('p').innerText = cardLabel
if (cardLabel.length <= maxLabelLength) {
label.querySelector("p").innerText = cardLabel
} else {
const tooltipText = document.createElement('span')
tooltipText.className = 'tooltip-text'
const tooltipText = document.createElement("span")
tooltipText.className = "tooltip-text"
tooltipText.innerText = name
label.classList.add('tooltip')
label.classList.add("tooltip")
label.appendChild(tooltipText)
label.querySelector('p').innerText = cardLabel.substring(0, maxLabelLength) + '...'
label.querySelector("p").innerText = cardLabel.substring(0, maxLabelLength) + "..."
}
label.querySelector('p').dataset.fullName = name // preserve the full name
label.querySelector("p").dataset.fullName = name // preserve the full name
return modifierCard
}
@ -71,55 +71,58 @@ function createModifierGroup(modifierGroup, initiallyExpanded, removeBy) {
const title = modifierGroup.category
const modifiers = modifierGroup.modifiers
const titleEl = document.createElement('h5')
titleEl.className = 'collapsible'
const titleEl = document.createElement("h5")
titleEl.className = "collapsible"
titleEl.innerText = title
const modifiersEl = document.createElement('div')
modifiersEl.classList.add('collapsible-content', 'editor-modifiers-leaf')
const modifiersEl = document.createElement("div")
modifiersEl.classList.add("collapsible-content", "editor-modifiers-leaf")
if (initiallyExpanded === true) {
titleEl.className += ' active'
titleEl.className += " active"
}
modifiers.forEach(modObj => {
modifiers.forEach((modObj) => {
const modifierName = modObj.modifier
const modifierPreviews = modObj?.previews?.map(preview => `${IMAGE_REGEX.test(preview.image) ? preview.image : modifierThumbnailPath + '/' + preview.path}`)
const modifierPreviews = modObj?.previews?.map(
(preview) =>
`${IMAGE_REGEX.test(preview.image) ? preview.image : modifierThumbnailPath + "/" + preview.path}`
)
const modifierCard = createModifierCard(modifierName, modifierPreviews, removeBy)
if(typeof modifierCard == 'object') {
if (typeof modifierCard == "object") {
modifiersEl.appendChild(modifierCard)
const trimmedName = trimModifiers(modifierName)
modifierCard.addEventListener('click', () => {
if (activeTags.map(x => trimModifiers(x.name)).includes(trimmedName)) {
modifierCard.addEventListener("click", () => {
if (activeTags.map((x) => trimModifiers(x.name)).includes(trimmedName)) {
// remove modifier from active array
activeTags = activeTags.filter(x => trimModifiers(x.name) != trimmedName)
activeTags = activeTags.filter((x) => trimModifiers(x.name) != trimmedName)
toggleCardState(trimmedName, false)
} else {
// add modifier to active array
activeTags.push({
'name': modifierName,
'element': modifierCard.cloneNode(true),
'originElement': modifierCard,
'previews': modifierPreviews
name: modifierName,
element: modifierCard.cloneNode(true),
originElement: modifierCard,
previews: modifierPreviews,
})
toggleCardState(trimmedName, true)
}
refreshTagsList()
document.dispatchEvent(new Event('refreshImageModifiers'))
document.dispatchEvent(new Event("refreshImageModifiers"))
})
}
})
let brk = document.createElement('br')
brk.style.clear = 'both'
let brk = document.createElement("br")
brk.style.clear = "both"
modifiersEl.appendChild(brk)
let e = document.createElement('div')
e.className = 'modifier-category'
let e = document.createElement("div")
e.className = "modifier-category"
e.appendChild(titleEl)
e.appendChild(modifiersEl)
@ -130,87 +133,98 @@ function createModifierGroup(modifierGroup, initiallyExpanded, removeBy) {
function trimModifiers(tag) {
// Remove trailing '-' and/or '+'
tag = tag.replace(/[-+]+$/, '');
tag = tag.replace(/[-+]+$/, "")
// Remove parentheses at beginning and end
return tag.replace(/^[(]+|[\s)]+$/g, '');
return tag.replace(/^[(]+|[\s)]+$/g, "")
}
async function loadModifiers() {
try {
let res = await fetch('/get/modifiers')
let res = await fetch("/get/modifiers")
if (res.status === 200) {
res = await res.json()
modifiers = res; // update global variable
modifiers = res // update global variable
res.reverse()
res.forEach((modifierGroup, idx) => {
createModifierGroup(modifierGroup, idx === res.length - 1, modifierGroup === 'Artist' ? true : false) // only remove "By " for artists
createModifierGroup(modifierGroup, idx === res.length - 1, modifierGroup === "Artist" ? true : false) // only remove "By " for artists
})
createCollapsibles(editorModifierEntries)
}
} catch (e) {
console.error('error fetching modifiers', e)
console.error("error fetching modifiers", e)
}
loadCustomModifiers()
resizeModifierCards(modifierCardSizeSlider.value)
document.dispatchEvent(new Event('loadImageModifiers'))
document.dispatchEvent(new Event("loadImageModifiers"))
}
function refreshModifiersState(newTags, inactiveTags) {
// clear existing modifiers
document.querySelector('#editor-modifiers').querySelectorAll('.modifier-card').forEach(modifierCard => {
const modifierName = modifierCard.querySelector('.modifier-card-label p').dataset.fullName // pick the full modifier name
if (activeTags.map(x => x.name).includes(modifierName)) {
modifierCard.classList.remove(activeCardClass)
modifierCard.querySelector('.modifier-card-image-overlay').innerText = '+'
}
})
document
.querySelector("#editor-modifiers")
.querySelectorAll(".modifier-card")
.forEach((modifierCard) => {
const modifierName = modifierCard.querySelector(".modifier-card-label p").dataset.fullName // pick the full modifier name
if (activeTags.map((x) => x.name).includes(modifierName)) {
modifierCard.classList.remove(activeCardClass)
modifierCard.querySelector(".modifier-card-image-overlay").innerText = "+"
}
})
activeTags = []
// set new modifiers
newTags.forEach(tag => {
newTags.forEach((tag) => {
let found = false
document.querySelector('#editor-modifiers').querySelectorAll('.modifier-card').forEach(modifierCard => {
const modifierName = modifierCard.querySelector('.modifier-card-label p').dataset.fullName
const shortModifierName = modifierCard.querySelector('.modifier-card-label p').innerText
if (trimModifiers(tag) == trimModifiers(modifierName)) {
// add modifier to active array
if (!activeTags.map(x => x.name).includes(tag)) { // only add each tag once even if several custom modifier cards share the same tag
const imageModifierCard = modifierCard.cloneNode(true)
imageModifierCard.querySelector('.modifier-card-label p').innerText = tag.replace(modifierName, shortModifierName)
activeTags.push({
'name': tag,
'element': imageModifierCard,
'originElement': modifierCard
})
document
.querySelector("#editor-modifiers")
.querySelectorAll(".modifier-card")
.forEach((modifierCard) => {
const modifierName = modifierCard.querySelector(".modifier-card-label p").dataset.fullName
const shortModifierName = modifierCard.querySelector(".modifier-card-label p").innerText
if (trimModifiers(tag) == trimModifiers(modifierName)) {
// add modifier to active array
if (!activeTags.map((x) => x.name).includes(tag)) {
// only add each tag once even if several custom modifier cards share the same tag
const imageModifierCard = modifierCard.cloneNode(true)
imageModifierCard.querySelector(".modifier-card-label p").innerText = tag.replace(
modifierName,
shortModifierName
)
activeTags.push({
name: tag,
element: imageModifierCard,
originElement: modifierCard,
})
}
modifierCard.classList.add(activeCardClass)
modifierCard.querySelector(".modifier-card-image-overlay").innerText = "-"
found = true
}
modifierCard.classList.add(activeCardClass)
modifierCard.querySelector('.modifier-card-image-overlay').innerText = '-'
found = true
}
})
if (found == false) { // custom tag went missing, create one here
})
if (found == false) {
// custom tag went missing, create one here
let modifierCard = createModifierCard(tag, undefined, false) // create a modifier card for the missing tag, no image
modifierCard.addEventListener('click', () => {
if (activeTags.map(x => x.name).includes(tag)) {
modifierCard.addEventListener("click", () => {
if (activeTags.map((x) => x.name).includes(tag)) {
// remove modifier from active array
activeTags = activeTags.filter(x => x.name != tag)
activeTags = activeTags.filter((x) => x.name != tag)
modifierCard.classList.remove(activeCardClass)
modifierCard.querySelector('.modifier-card-image-overlay').innerText = '+'
modifierCard.querySelector(".modifier-card-image-overlay").innerText = "+"
}
refreshTagsList()
})
activeTags.push({
'name': tag,
'element': modifierCard,
'originElement': undefined // no origin element for missing tags
name: tag,
element: modifierCard,
originElement: undefined, // no origin element for missing tags
})
}
})
@ -220,41 +234,44 @@ function refreshModifiersState(newTags, inactiveTags) {
function refreshInactiveTags(inactiveTags) {
// update inactive tags
if (inactiveTags !== undefined && inactiveTags.length > 0) {
activeTags.forEach (tag => {
if (inactiveTags.find(element => element === tag.name) !== undefined) {
activeTags.forEach((tag) => {
if (inactiveTags.find((element) => element === tag.name) !== undefined) {
tag.inactive = true
}
})
}
// update cards
let overlays = document.querySelector('#editor-inputs-tags-list').querySelectorAll('.modifier-card-overlay')
overlays.forEach (i => {
let modifierName = i.parentElement.getElementsByClassName('modifier-card-label')[0].getElementsByTagName("p")[0].innerText
if (inactiveTags?.find(element => element === modifierName) !== undefined) {
i.parentElement.classList.add('modifier-toggle-inactive')
let overlays = document.querySelector("#editor-inputs-tags-list").querySelectorAll(".modifier-card-overlay")
overlays.forEach((i) => {
let modifierName = i.parentElement.getElementsByClassName("modifier-card-label")[0].getElementsByTagName("p")[0]
.dataset.fullName
if (inactiveTags?.find((element) => trimModifiers(element) === modifierName) !== undefined) {
i.parentElement.classList.add("modifier-toggle-inactive")
}
})
}
function refreshTagsList(inactiveTags) {
editorModifierTagsList.innerHTML = ''
editorModifierTagsList.innerHTML = ""
if (activeTags.length == 0) {
editorTagsContainer.style.display = 'none'
editorTagsContainer.style.display = "none"
return
} else {
editorTagsContainer.style.display = 'block'
editorTagsContainer.style.display = "block"
}
activeTags.forEach((tag, index) => {
tag.element.querySelector('.modifier-card-image-overlay').innerText = '-'
tag.element.classList.add('modifier-card-tiny')
tag.element.querySelector(".modifier-card-image-overlay").innerText = "-"
tag.element.classList.add("modifier-card-tiny")
editorModifierTagsList.appendChild(tag.element)
tag.element.addEventListener('click', () => {
let idx = activeTags.findIndex(o => { return o.name === tag.name })
tag.element.addEventListener("click", () => {
let idx = activeTags.findIndex((o) => {
return o.name === tag.name
})
if (idx !== -1) {
toggleCardState(activeTags[idx].name, false)
@ -262,88 +279,91 @@ function refreshTagsList(inactiveTags) {
activeTags.splice(idx, 1)
refreshTagsList()
}
document.dispatchEvent(new Event('refreshImageModifiers'))
document.dispatchEvent(new Event("refreshImageModifiers"))
})
})
let brk = document.createElement('br')
brk.style.clear = 'both'
let brk = document.createElement("br")
brk.style.clear = "both"
editorModifierTagsList.appendChild(brk)
refreshInactiveTags(inactiveTags)
document.dispatchEvent(new Event('refreshImageModifiers')) // notify plugins that the image tags have been refreshed
document.dispatchEvent(new Event("refreshImageModifiers")) // notify plugins that the image tags have been refreshed
}
function toggleCardState(modifierName, makeActive) {
document.querySelector('#editor-modifiers').querySelectorAll('.modifier-card').forEach(card => {
const name = card.querySelector('.modifier-card-label').innerText
if ( trimModifiers(modifierName) == trimModifiers(name)
|| trimModifiers(modifierName) == 'by ' + trimModifiers(name)) {
if(makeActive) {
card.classList.add(activeCardClass)
card.querySelector('.modifier-card-image-overlay').innerText = '-'
document
.querySelector("#editor-modifiers")
.querySelectorAll(".modifier-card")
.forEach((card) => {
const name = card.querySelector(".modifier-card-label").innerText
if (
trimModifiers(modifierName) == trimModifiers(name) ||
trimModifiers(modifierName) == "by " + trimModifiers(name)
) {
if (makeActive) {
card.classList.add(activeCardClass)
card.querySelector(".modifier-card-image-overlay").innerText = "-"
} else {
card.classList.remove(activeCardClass)
card.querySelector(".modifier-card-image-overlay").innerText = "+"
}
}
else{
card.classList.remove(activeCardClass)
card.querySelector('.modifier-card-image-overlay').innerText = '+'
}
}
})
})
}
function changePreviewImages(val) {
const previewImages = document.querySelectorAll('.modifier-card-image-container img')
const previewImages = document.querySelectorAll(".modifier-card-image-container img")
let previewArr = []
modifiers.map(x => x.modifiers).forEach(x => previewArr.push(...x.map(m => m.previews)))
previewArr = previewArr.map(x => {
modifiers.map((x) => x.modifiers).forEach((x) => previewArr.push(...x.map((m) => m.previews)))
previewArr = previewArr.map((x) => {
let obj = {}
x.forEach(preview => {
x.forEach((preview) => {
obj[preview.name] = preview.path
})
return obj
})
previewImages.forEach(previewImage => {
const currentPreviewType = previewImage.getAttribute('preview-type')
const relativePreviewPath = previewImage.src.split(modifierThumbnailPath + '/').pop()
previewImages.forEach((previewImage) => {
const currentPreviewType = previewImage.getAttribute("preview-type")
const relativePreviewPath = previewImage.src.split(modifierThumbnailPath + "/").pop()
const previews = previewArr.find(preview => relativePreviewPath == preview[currentPreviewType])
const previews = previewArr.find((preview) => relativePreviewPath == preview[currentPreviewType])
if(typeof previews == 'object') {
if (typeof previews == "object") {
let preview = null
if (val == 'portrait') {
if (val == "portrait") {
preview = previews.portrait
}
else if (val == 'landscape') {
} else if (val == "landscape") {
preview = previews.landscape
}
if(preview != null) {
if (preview != null) {
previewImage.src = `${modifierThumbnailPath}/${preview}`
previewImage.setAttribute('preview-type', val)
previewImage.setAttribute("preview-type", val)
}
}
})
}
function resizeModifierCards(val) {
const cardSizePrefix = 'modifier-card-size_'
const modifierCardClass = 'modifier-card'
const cardSizePrefix = "modifier-card-size_"
const modifierCardClass = "modifier-card"
const modifierCards = document.querySelectorAll(`.${modifierCardClass}`)
const cardSize = n => `${cardSizePrefix}${n}`
const cardSize = (n) => `${cardSizePrefix}${n}`
modifierCards.forEach(card => {
modifierCards.forEach((card) => {
// remove existing size classes
const classes = card.className.split(' ').filter(c => !c.startsWith(cardSizePrefix))
card.className = classes.join(' ').trim()
const classes = card.className.split(" ").filter((c) => !c.startsWith(cardSizePrefix))
card.className = classes.join(" ").trim()
if(val != 0) {
if (val != 0) {
card.classList.add(cardSize(val))
}
})
@ -352,7 +372,7 @@ function resizeModifierCards(val) {
modifierCardSizeSlider.onchange = () => resizeModifierCards(modifierCardSizeSlider.value)
previewImageField.onchange = () => changePreviewImages(previewImageField.value)
modifierSettingsBtn.addEventListener('click', function(e) {
modifierSettingsBtn.addEventListener("click", function(e) {
modifierSettingsOverlay.classList.add("active")
customModifiersTextBox.setSelectionRange(0, 0)
customModifiersTextBox.focus()
@ -360,7 +380,7 @@ modifierSettingsBtn.addEventListener('click', function(e) {
e.stopPropagation()
})
modifierSettingsOverlay.addEventListener('keydown', function(e) {
modifierSettingsOverlay.addEventListener("keydown", function(e) {
switch (e.key) {
case "Escape": // Escape to cancel
customModifiersTextBox.value = customModifiersInitialContent // undo the changes
@ -368,7 +388,8 @@ modifierSettingsOverlay.addEventListener('keydown', function(e) {
e.stopPropagation()
break
case "Enter":
if (e.ctrlKey) { // Ctrl+Enter to confirm
if (e.ctrlKey) {
// Ctrl+Enter to confirm
modifierSettingsOverlay.classList.remove("active")
e.stopPropagation()
break
@ -383,7 +404,7 @@ function saveCustomModifiers() {
}
function loadCustomModifiers() {
PLUGINS['MODIFIERS_LOAD'].forEach(fn=>fn.loader.call())
PLUGINS["MODIFIERS_LOAD"].forEach((fn) => fn.loader.call())
}
customModifiersTextBox.addEventListener('change', saveCustomModifiers)
customModifiersTextBox.addEventListener("change", saveCustomModifiers)

File diff suppressed because it is too large Load Diff

View File

@ -3,25 +3,27 @@
* @readonly
* @enum {string}
*/
var ParameterType = {
var ParameterType = {
checkbox: "checkbox",
select: "select",
select_multiple: "select_multiple",
slider: "slider",
custom: "custom",
};
}
/**
* JSDoc style
* @typedef {object} Parameter
* @property {string} id
* @property {ParameterType} type
* @property {string} label
* @property {?string} note
* @property {keyof ParameterType} type
* @property {string | (parameter: Parameter) => (HTMLElement | string)} label
* @property {string | (parameter: Parameter) => (HTMLElement | string) | undefined} note
* @property {(parameter: Parameter) => (HTMLElement | string) | undefined} render
* @property {string | undefined} icon
* @property {number|boolean|string} default
* @property {boolean?} saveInAppConfig
*/
/** @type {Array.<Parameter>} */
var PARAMETERS = [
{
@ -30,13 +32,14 @@ var PARAMETERS = [
label: "Theme",
default: "theme-default",
note: "customize the look and feel of the ui",
options: [ // Note: options expanded dynamically
options: [
// Note: options expanded dynamically
{
value: "theme-default",
label: "Default"
}
label: "Default",
},
],
icon: "fa-palette"
icon: "fa-palette",
},
{
id: "save_to_disk",
@ -52,7 +55,7 @@ var PARAMETERS = [
label: "Save Location",
render: (parameter) => {
return `<input id="${parameter.id}" name="${parameter.id}" size="30" disabled>`
}
},
},
{
id: "metadata_output_format",
@ -63,19 +66,19 @@ var PARAMETERS = [
options: [
{
value: "none",
label: "none"
label: "none",
},
{
value: "txt",
label: "txt"
label: "txt",
},
{
value: "json",
label: "json"
label: "json",
},
{
value: "embed",
label: "embed"
label: "embed",
},
{
value: "embed,txt",
@ -118,21 +121,23 @@ var PARAMETERS = [
note: "starts the default browser on startup",
icon: "fa-window-restore",
default: true,
saveInAppConfig: true,
},
{
id: "vram_usage_level",
type: ParameterType.select,
label: "GPU Memory Usage",
note: "Faster performance requires more GPU memory (VRAM)<br/><br/>" +
"<b>Balanced:</b> nearly as fast as High, much lower VRAM usage<br/>" +
"<b>High:</b> fastest, maximum GPU memory usage</br>" +
"<b>Low:</b> slowest, recommended for GPUs with 3 to 4 GB memory",
note:
"Faster performance requires more GPU memory (VRAM)<br/><br/>" +
"<b>Balanced:</b> nearly as fast as High, much lower VRAM usage<br/>" +
"<b>High:</b> fastest, maximum GPU memory usage</br>" +
"<b>Low:</b> slowest, recommended for GPUs with 3 to 4 GB memory",
icon: "fa-forward",
default: "balanced",
options: [
{value: "balanced", label: "Balanced"},
{value: "high", label: "High"},
{value: "low", label: "Low"}
{ value: "balanced", label: "Balanced" },
{ value: "high", label: "High" },
{ value: "low", label: "Low" },
],
},
{
@ -168,7 +173,8 @@ var PARAMETERS = [
id: "confirm_dangerous_actions",
type: ParameterType.checkbox,
label: "Confirm dangerous actions",
note: "Actions that might lead to data loss must either be clicked with the shift key pressed, or confirmed in an 'Are you sure?' dialog",
note:
"Actions that might lead to data loss must either be clicked with the shift key pressed, or confirmed in an 'Are you sure?' dialog",
icon: "fa-check-double",
default: true,
},
@ -176,25 +182,29 @@ var PARAMETERS = [
id: "listen_to_network",
type: ParameterType.checkbox,
label: "Make Stable Diffusion available on your network",
note: "Other devices on your network can access this web page",
note: "Other devices on your network can access this web page. Please restart the program after changing this.",
icon: "fa-network-wired",
default: true,
saveInAppConfig: true,
},
{
id: "listen_port",
type: ParameterType.custom,
label: "Network port",
note: "Port that this server listens to. The '9000' part in 'http://localhost:9000'",
note:
"Port that this server listens to. The '9000' part in 'http://localhost:9000'. Please restart the program after changing this.",
icon: "fa-anchor",
render: (parameter) => {
return `<input id="${parameter.id}" name="${parameter.id}" size="6" value="9000" onkeypress="preventNonNumericalInput(event)">`
}
},
saveInAppConfig: true,
},
{
id: "use_beta_channel",
type: ParameterType.checkbox,
label: "Beta channel",
note: "Get the latest features immediately (but could be less stable). Please restart the program after changing this.",
note:
"Get the latest features immediately (but could be less stable). Please restart the program after changing this.",
icon: "fa-fire",
default: false,
},
@ -202,14 +212,16 @@ var PARAMETERS = [
id: "test_diffusers",
type: ParameterType.checkbox,
label: "Test Diffusers",
note: "<b>Experimental! Can have bugs!</b> Use upcoming features (like LoRA) in our new engine. Please press Save, then restart the program after changing this.",
note:
"<b>Experimental! Can have bugs!</b> Use upcoming features (like LoRA) in our new engine. Please press Save, then restart the program after changing this.",
icon: "fa-bolt",
default: false,
saveInAppConfig: true,
},
];
]
function getParameterSettingsEntry(id) {
let parameter = PARAMETERS.filter(p => p.id === id)
let parameter = PARAMETERS.filter((p) => p.id === id)
if (parameter.length === 0) {
return
}
@ -217,63 +229,117 @@ function getParameterSettingsEntry(id) {
}
function sliderUpdate(event) {
if (event.srcElement.id.endsWith('-input')) {
let slider = document.getElementById(event.srcElement.id.slice(0,-6))
if (event.srcElement.id.endsWith("-input")) {
let slider = document.getElementById(event.srcElement.id.slice(0, -6))
slider.value = event.srcElement.value
slider.dispatchEvent(new Event("change"))
} else {
let field = document.getElementById(event.srcElement.id+'-input')
let field = document.getElementById(event.srcElement.id + "-input")
field.value = event.srcElement.value
field.dispatchEvent(new Event("change"))
}
}
/**
* @param {Parameter} parameter
* @returns {string | HTMLElement}
*/
function getParameterElement(parameter) {
switch (parameter.type) {
case ParameterType.checkbox:
var is_checked = parameter.default ? " checked" : "";
var is_checked = parameter.default ? " checked" : ""
return `<input id="${parameter.id}" name="${parameter.id}"${is_checked} type="checkbox">`
case ParameterType.select:
case ParameterType.select_multiple:
var options = (parameter.options || []).map(option => `<option value="${option.value}">${option.label}</option>`).join("")
var multiple = (parameter.type == ParameterType.select_multiple ? 'multiple' : '')
var options = (parameter.options || [])
.map((option) => `<option value="${option.value}">${option.label}</option>`)
.join("")
var multiple = parameter.type == ParameterType.select_multiple ? "multiple" : ""
return `<select id="${parameter.id}" name="${parameter.id}" ${multiple}>${options}</select>`
case ParameterType.slider:
return `<input id="${parameter.id}" name="${parameter.id}" class="editor-slider" type="range" value="${parameter.default}" min="${parameter.slider_min}" max="${parameter.slider_max}" oninput="sliderUpdate(event)"> <input id="${parameter.id}-input" name="${parameter.id}-input" size="4" value="${parameter.default}" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)" oninput="sliderUpdate(event)">&nbsp;${parameter.slider_unit}`
case ParameterType.custom:
return parameter.render(parameter)
default:
console.error(`Invalid type for parameter ${parameter.id}`);
console.error(`Invalid type ${parameter.type} for parameter ${parameter.id}`)
return "ERROR: Invalid Type"
}
}
let parametersTable = document.querySelector("#system-settings .parameters-table")
/* fill in the system settings popup table */
function initParameters() {
PARAMETERS.forEach(parameter => {
var element = getParameterElement(parameter)
var note = parameter.note ? `<small>${parameter.note}</small>` : "";
var icon = parameter.icon ? `<i class="fa ${parameter.icon}"></i>` : "";
var newrow = document.createElement('div')
newrow.innerHTML = `
<div>${icon}</div>
<div><label for="${parameter.id}">${parameter.label}</label>${note}</div>
<div>${element}</div>`
/**
* fill in the system settings popup table
* @param {Array<Parameter> | undefined} parameters
* */
function initParameters(parameters) {
parameters.forEach((parameter) => {
const element = getParameterElement(parameter)
const elementWrapper = createElement("div")
if (element instanceof Node) {
elementWrapper.appendChild(element)
} else {
elementWrapper.innerHTML = element
}
const note = typeof parameter.note === "function" ? parameter.note(parameter) : parameter.note
const noteElements = []
if (note) {
const noteElement = createElement("small")
if (note instanceof Node) {
noteElement.appendChild(note)
} else {
noteElement.innerHTML = note || ""
}
noteElements.push(noteElement)
}
const icon = parameter.icon ? [createElement("i", undefined, ["fa", parameter.icon])] : []
const label = typeof parameter.label === "function" ? parameter.label(parameter) : parameter.label
const labelElement = createElement("label", { for: parameter.id })
if (label instanceof Node) {
labelElement.appendChild(label)
} else {
labelElement.innerHTML = label
}
const newrow = createElement(
"div",
{ "data-setting-id": parameter.id, "data-save-in-app-config": parameter.saveInAppConfig },
undefined,
[
createElement("div", undefined, undefined, icon),
createElement("div", undefined, undefined, [labelElement, ...noteElements]),
elementWrapper,
]
)
parametersTable.appendChild(newrow)
parameter.settingsEntry = newrow
})
}
initParameters()
initParameters(PARAMETERS)
let vramUsageLevelField = document.querySelector('#vram_usage_level')
let useCPUField = document.querySelector('#use_cpu')
let autoPickGPUsField = document.querySelector('#auto_pick_gpus')
let useGPUsField = document.querySelector('#use_gpus')
let saveToDiskField = document.querySelector('#save_to_disk')
let diskPathField = document.querySelector('#diskPath')
let metadataOutputFormatField = document.querySelector('#metadata_output_format')
// listen to parameters from plugins
PARAMETERS.addEventListener("push", (...items) => {
initParameters(items)
if (items.find((item) => item.saveInAppConfig)) {
console.log(
"Reloading app config for new parameters",
items.map((p) => p.id)
)
getAppConfig()
}
})
let vramUsageLevelField = document.querySelector("#vram_usage_level")
let useCPUField = document.querySelector("#use_cpu")
let autoPickGPUsField = document.querySelector("#auto_pick_gpus")
let useGPUsField = document.querySelector("#use_gpus")
let saveToDiskField = document.querySelector("#save_to_disk")
let diskPathField = document.querySelector("#diskPath")
let metadataOutputFormatField = document.querySelector("#metadata_output_format")
let listenToNetworkField = document.querySelector("#listen_to_network")
let listenPortField = document.querySelector("#listen_port")
let useBetaChannelField = document.querySelector("#use_beta_channel")
@ -281,32 +347,34 @@ let uiOpenBrowserOnStartField = document.querySelector("#ui_open_browser_on_star
let confirmDangerousActionsField = document.querySelector("#confirm_dangerous_actions")
let testDiffusers = document.querySelector("#test_diffusers")
let saveSettingsBtn = document.querySelector('#save-system-settings-btn')
let saveSettingsBtn = document.querySelector("#save-system-settings-btn")
async function changeAppConfig(configDelta) {
try {
let res = await fetch('/app_config', {
method: 'POST',
let res = await fetch("/app_config", {
method: "POST",
headers: {
'Content-Type': 'application/json'
"Content-Type": "application/json",
},
body: JSON.stringify(configDelta)
body: JSON.stringify(configDelta),
})
res = await res.json()
console.log('set config status response', res)
console.log("set config status response", res)
} catch (e) {
console.log('set config status error', e)
console.log("set config status error", e)
}
}
async function getAppConfig() {
try {
let res = await fetch('/get/app_config')
let res = await fetch("/get/app_config")
const config = await res.json()
if (config.update_branch === 'beta') {
applySettingsFromConfig(config)
// custom overrides
if (config.update_branch === "beta") {
useBetaChannelField.checked = true
document.querySelector("#updateBranchLabel").innerText = "(beta)"
} else {
@ -321,90 +389,146 @@ async function getAppConfig() {
if (config.net && config.net.listen_port !== undefined) {
listenPortField.value = config.net.listen_port
}
if (config.test_diffusers === undefined || config.update_branch === 'main') {
document.querySelector("#lora_model_container").style.display = 'none'
document.querySelector("#lora_alpha_container").style.display = 'none'
const testDiffusersEnabled = config.test_diffusers && config.update_branch !== "main"
testDiffusers.checked = testDiffusersEnabled
if (!testDiffusersEnabled) {
document.querySelector("#lora_model_container").style.display = "none"
document.querySelector("#lora_alpha_container").style.display = "none"
document.querySelector("#tiling_container").style.display = "none"
document.querySelectorAll("#sampler_name option.diffusers-only").forEach((option) => {
option.style.display = "none"
})
} else {
testDiffusers.checked = config.test_diffusers && config.update_branch !== 'main'
document.querySelector("#lora_model_container").style.display = (testDiffusers.checked ? '' : 'none')
document.querySelector("#lora_alpha_container").style.display = (testDiffusers.checked && loraModelField.value !== "" ? '' : 'none')
document.querySelector("#lora_model_container").style.display = ""
document.querySelector("#lora_alpha_container").style.display = loraModelField.value ? "" : "none"
document.querySelector("#tiling_container").style.display = ""
document.querySelectorAll("#sampler_name option.k_diffusion-only").forEach((option) => {
option.disabled = true
})
document.querySelector("#clip_skip_config").classList.remove("displayNone")
}
console.log('get config status response', config)
console.log("get config status response", config)
return config
} catch (e) {
console.log('get config status error', e)
console.log("get config status error", e)
return {}
}
}
saveToDiskField.addEventListener('change', function(e) {
function applySettingsFromConfig(config) {
Array.from(parametersTable.children).forEach((parameterRow) => {
if (parameterRow.dataset.settingId in config && parameterRow.dataset.saveInAppConfig === "true") {
const configValue = config[parameterRow.dataset.settingId]
const parameterElement =
document.getElementById(parameterRow.dataset.settingId) ||
parameterRow.querySelector("input") ||
parameterRow.querySelector("select")
switch (parameterElement?.tagName) {
case "INPUT":
if (parameterElement.type === "checkbox") {
parameterElement.checked = configValue
} else {
parameterElement.value = configValue
}
parameterElement.dispatchEvent(new Event("change"))
break
case "SELECT":
if (Array.isArray(configValue)) {
Array.from(parameterElement.options).forEach((option) => {
if (configValue.includes(option.value || option.text)) {
option.selected = true
}
})
} else {
parameterElement.value = configValue
}
parameterElement.dispatchEvent(new Event("change"))
break
}
}
})
}
saveToDiskField.addEventListener("change", function(e) {
diskPathField.disabled = !this.checked
metadataOutputFormatField.disabled = !this.checked
})
function getCurrentRenderDeviceSelection() {
let selectedGPUs = $('#use_gpus').val()
let selectedGPUs = $("#use_gpus").val()
if (useCPUField.checked && !autoPickGPUsField.checked) {
return 'cpu'
return "cpu"
}
if (autoPickGPUsField.checked || selectedGPUs.length == 0) {
return 'auto'
return "auto"
}
return selectedGPUs.join(',')
return selectedGPUs.join(",")
}
useCPUField.addEventListener('click', function() {
let gpuSettingEntry = getParameterSettingsEntry('use_gpus')
let autoPickGPUSettingEntry = getParameterSettingsEntry('auto_pick_gpus')
useCPUField.addEventListener("click", function() {
let gpuSettingEntry = getParameterSettingsEntry("use_gpus")
let autoPickGPUSettingEntry = getParameterSettingsEntry("auto_pick_gpus")
if (this.checked) {
gpuSettingEntry.style.display = 'none'
autoPickGPUSettingEntry.style.display = 'none'
autoPickGPUsField.setAttribute('data-old-value', autoPickGPUsField.checked)
gpuSettingEntry.style.display = "none"
autoPickGPUSettingEntry.style.display = "none"
autoPickGPUsField.setAttribute("data-old-value", autoPickGPUsField.checked)
autoPickGPUsField.checked = false
} else if (useGPUsField.options.length >= MIN_GPUS_TO_SHOW_SELECTION) {
gpuSettingEntry.style.display = ''
autoPickGPUSettingEntry.style.display = ''
let oldVal = autoPickGPUsField.getAttribute('data-old-value')
if (oldVal === null || oldVal === undefined) { // the UI started with CPU selected by default
gpuSettingEntry.style.display = ""
autoPickGPUSettingEntry.style.display = ""
let oldVal = autoPickGPUsField.getAttribute("data-old-value")
if (oldVal === null || oldVal === undefined) {
// the UI started with CPU selected by default
autoPickGPUsField.checked = true
} else {
autoPickGPUsField.checked = (oldVal === 'true')
autoPickGPUsField.checked = oldVal === "true"
}
gpuSettingEntry.style.display = (autoPickGPUsField.checked ? 'none' : '')
gpuSettingEntry.style.display = autoPickGPUsField.checked ? "none" : ""
}
})
useGPUsField.addEventListener('click', function() {
let selectedGPUs = $('#use_gpus').val()
autoPickGPUsField.checked = (selectedGPUs.length === 0)
useGPUsField.addEventListener("click", function() {
let selectedGPUs = $("#use_gpus").val()
autoPickGPUsField.checked = selectedGPUs.length === 0
})
autoPickGPUsField.addEventListener('click', function() {
autoPickGPUsField.addEventListener("click", function() {
if (this.checked) {
$('#use_gpus').val([])
$("#use_gpus").val([])
}
let gpuSettingEntry = getParameterSettingsEntry('use_gpus')
gpuSettingEntry.style.display = (this.checked ? 'none' : '')
let gpuSettingEntry = getParameterSettingsEntry("use_gpus")
gpuSettingEntry.style.display = this.checked ? "none" : ""
})
async function setDiskPath(defaultDiskPath, force=false) {
async function setDiskPath(defaultDiskPath, force = false) {
var diskPath = getSetting("diskPath")
if (force || diskPath == '' || diskPath == undefined || diskPath == "undefined") {
if (force || diskPath == "" || diskPath == undefined || diskPath == "undefined") {
setSetting("diskPath", defaultDiskPath)
}
}
function setDeviceInfo(devices) {
let cpu = devices.all.cpu.name
let allGPUs = Object.keys(devices.all).filter(d => d != 'cpu')
let allGPUs = Object.keys(devices.all).filter((d) => d != "cpu")
let activeGPUs = Object.keys(devices.active)
function ID_TO_TEXT(d) {
let info = devices.all[d]
if ("mem_free" in info && "mem_total" in info) {
return `${info.name} <small>(${d}) (${info.mem_free.toFixed(1)}Gb free / ${info.mem_total.toFixed(1)} Gb total)</small>`
return `${info.name} <small>(${d}) (${info.mem_free.toFixed(1)}Gb free / ${info.mem_total.toFixed(
1
)} Gb total)</small>`
} else {
return `${info.name} <small>(${d}) (no memory info)</small>`
}
@ -413,95 +537,138 @@ function setDeviceInfo(devices) {
allGPUs = allGPUs.map(ID_TO_TEXT)
activeGPUs = activeGPUs.map(ID_TO_TEXT)
let systemInfoEl = document.querySelector('#system-info')
systemInfoEl.querySelector('#system-info-cpu').innerText = cpu
systemInfoEl.querySelector('#system-info-gpus-all').innerHTML = allGPUs.join('</br>')
systemInfoEl.querySelector('#system-info-rendering-devices').innerHTML = activeGPUs.join('</br>')
let systemInfoEl = document.querySelector("#system-info")
systemInfoEl.querySelector("#system-info-cpu").innerText = cpu
systemInfoEl.querySelector("#system-info-gpus-all").innerHTML = allGPUs.join("</br>")
systemInfoEl.querySelector("#system-info-rendering-devices").innerHTML = activeGPUs.join("</br>")
}
function setHostInfo(hosts) {
let port = listenPortField.value
hosts = hosts.map(addr => `http://${addr}:${port}/`).map(url => `<div><a href="${url}">${url}</a></div>`)
document.querySelector('#system-info-server-hosts').innerHTML = hosts.join('')
hosts = hosts.map((addr) => `http://${addr}:${port}/`).map((url) => `<div><a href="${url}">${url}</a></div>`)
document.querySelector("#system-info-server-hosts").innerHTML = hosts.join("")
}
async function getSystemInfo() {
try {
const res = await SD.getSystemInfo()
let devices = res['devices']
let devices = res["devices"]
let allDeviceIds = Object.keys(devices['all']).filter(d => d !== 'cpu')
let activeDeviceIds = Object.keys(devices['active']).filter(d => d !== 'cpu')
let allDeviceIds = Object.keys(devices["all"]).filter((d) => d !== "cpu")
let activeDeviceIds = Object.keys(devices["active"]).filter((d) => d !== "cpu")
if (activeDeviceIds.length === 0) {
useCPUField.checked = true
}
if (allDeviceIds.length < MIN_GPUS_TO_SHOW_SELECTION || useCPUField.checked) {
let gpuSettingEntry = getParameterSettingsEntry('use_gpus')
gpuSettingEntry.style.display = 'none'
let autoPickGPUSettingEntry = getParameterSettingsEntry('auto_pick_gpus')
autoPickGPUSettingEntry.style.display = 'none'
let gpuSettingEntry = getParameterSettingsEntry("use_gpus")
gpuSettingEntry.style.display = "none"
let autoPickGPUSettingEntry = getParameterSettingsEntry("auto_pick_gpus")
autoPickGPUSettingEntry.style.display = "none"
}
if (allDeviceIds.length === 0) {
useCPUField.checked = true
useCPUField.disabled = true // no compatible GPUs, so make the CPU mandatory
getParameterSettingsEntry("use_cpu").addEventListener("click", function() {
alert(
"Sorry, we could not find a compatible graphics card! Easy Diffusion supports graphics cards with minimum 2 GB of RAM. " +
"Only NVIDIA cards are supported on Windows. NVIDIA and AMD cards are supported on Linux.<br/><br/>" +
"If you have a compatible graphics card, please try updating to the latest drivers.<br/><br/>" +
"Only the CPU can be used for generating images, without a compatible graphics card.",
"No compatible graphics card found!"
)
})
}
autoPickGPUsField.checked = (devices['config'] === 'auto')
autoPickGPUsField.checked = devices["config"] === "auto"
useGPUsField.innerHTML = ''
allDeviceIds.forEach(device => {
let deviceName = devices['all'][device]['name']
useGPUsField.innerHTML = ""
allDeviceIds.forEach((device) => {
let deviceName = devices["all"][device]["name"]
let deviceOption = `<option value="${device}">${deviceName} (${device})</option>`
useGPUsField.insertAdjacentHTML('beforeend', deviceOption)
useGPUsField.insertAdjacentHTML("beforeend", deviceOption)
})
if (autoPickGPUsField.checked) {
let gpuSettingEntry = getParameterSettingsEntry('use_gpus')
gpuSettingEntry.style.display = 'none'
let gpuSettingEntry = getParameterSettingsEntry("use_gpus")
gpuSettingEntry.style.display = "none"
} else {
$('#use_gpus').val(activeDeviceIds)
$("#use_gpus").val(activeDeviceIds)
}
setDeviceInfo(devices)
setHostInfo(res['hosts'])
document.dispatchEvent(new CustomEvent("system_info_update", { detail: devices }))
setHostInfo(res["hosts"])
let force = false
if (res['enforce_output_dir'] !== undefined) {
force = res['enforce_output_dir']
if (res["enforce_output_dir"] !== undefined) {
force = res["enforce_output_dir"]
if (force == true) {
saveToDiskField.checked = true
metadataOutputFormatField.disabled = false
saveToDiskField.checked = true
metadataOutputFormatField.disabled = false
}
saveToDiskField.disabled = force
diskPathField.disabled = force
}
setDiskPath(res['default_output_dir'], force)
setDiskPath(res["default_output_dir"], force)
} catch (e) {
console.log('error fetching devices', e)
console.log("error fetching devices", e)
}
}
saveSettingsBtn.addEventListener('click', function() {
if (listenPortField.value == '') {
alert('The network port field must not be empty.')
saveSettingsBtn.addEventListener("click", function() {
if (listenPortField.value == "") {
alert("The network port field must not be empty.")
return
}
if (listenPortField.value < 1 || listenPortField.value > 65535) {
alert('The network port must be a number from 1 to 65535')
alert("The network port must be a number from 1 to 65535")
return
}
let updateBranch = (useBetaChannelField.checked ? 'beta' : 'main')
changeAppConfig({
'render_devices': getCurrentRenderDeviceSelection(),
'update_branch': updateBranch,
'ui_open_browser_on_start': uiOpenBrowserOnStartField.checked,
'listen_to_network': listenToNetworkField.checked,
'listen_port': listenPortField.value,
'test_diffusers': testDiffusers.checked
const updateBranch = useBetaChannelField.checked ? "beta" : "main"
const updateAppConfigRequest = {
render_devices: getCurrentRenderDeviceSelection(),
update_branch: updateBranch,
}
Array.from(parametersTable.children).forEach((parameterRow) => {
if (parameterRow.dataset.saveInAppConfig === "true") {
const parameterElement =
document.getElementById(parameterRow.dataset.settingId) ||
parameterRow.querySelector("input") ||
parameterRow.querySelector("select")
switch (parameterElement?.tagName) {
case "INPUT":
if (parameterElement.type === "checkbox") {
updateAppConfigRequest[parameterRow.dataset.settingId] = parameterElement.checked
} else {
updateAppConfigRequest[parameterRow.dataset.settingId] = parameterElement.value
}
break
case "SELECT":
if (parameterElement.multiple) {
updateAppConfigRequest[parameterRow.dataset.settingId] = Array.from(parameterElement.options)
.filter((option) => option.selected)
.map((option) => option.value || option.text)
} else {
updateAppConfigRequest[parameterRow.dataset.settingId] = parameterElement.value
}
break
default:
console.error(
`Setting parameter ${parameterRow.dataset.settingId} couldn't be saved to app.config - element #${parameter.id} is a <${parameterElement?.tagName} /> instead of a <input /> or a <select />!`
)
break
}
}
})
saveSettingsBtn.classList.add('active')
asyncDelay(300).then(() => saveSettingsBtn.classList.remove('active'))
const savePromise = changeAppConfig(updateAppConfigRequest)
saveSettingsBtn.classList.add("active")
Promise.all([savePromise, asyncDelay(300)]).then(() => saveSettingsBtn.classList.remove("active"))
})
document.addEventListener("system_info_update", (e) => setDeviceInfo(e.detail))

View File

@ -3,7 +3,7 @@ const PLUGIN_API_VERSION = "1.0"
const PLUGINS = {
/**
* Register new buttons to show on each output image.
*
*
* Example:
* PLUGINS['IMAGE_INFO_BUTTONS'].push({
* text: 'Make a Similar Image',
@ -29,14 +29,20 @@ const PLUGINS = {
MODIFIERS_LOAD: [],
TASK_CREATE: [],
OUTPUTS_FORMATS: new ServiceContainer(
function png() { return (reqBody) => new SD.RenderTask(reqBody) }
, function jpeg() { return (reqBody) => new SD.RenderTask(reqBody) }
, function webp() { return (reqBody) => new SD.RenderTask(reqBody) }
function png() {
return (reqBody) => new SD.RenderTask(reqBody)
},
function jpeg() {
return (reqBody) => new SD.RenderTask(reqBody)
},
function webp() {
return (reqBody) => new SD.RenderTask(reqBody)
}
),
}
PLUGINS.OUTPUTS_FORMATS.register = function(...args) {
const service = ServiceContainer.prototype.register.apply(this, args)
if (typeof outputFormatField !== 'undefined') {
if (typeof outputFormatField !== "undefined") {
const newOption = document.createElement("option")
newOption.setAttribute("value", service.name)
newOption.innerText = service.name
@ -46,13 +52,13 @@ PLUGINS.OUTPUTS_FORMATS.register = function(...args) {
}
function loadScript(url) {
const script = document.createElement('script')
const script = document.createElement("script")
const promiseSrc = new PromiseSource()
script.addEventListener('error', () => promiseSrc.reject(new Error(`Script "${url}" couldn't be loaded.`)))
script.addEventListener('load', () => promiseSrc.resolve(url))
script.src = url + '?t=' + Date.now()
script.addEventListener("error", () => promiseSrc.reject(new Error(`Script "${url}" couldn't be loaded.`)))
script.addEventListener("load", () => promiseSrc.resolve(url))
script.src = url + "?t=" + Date.now()
console.log('loading script', url)
console.log("loading script", url)
document.head.appendChild(script)
return promiseSrc.promise
@ -60,7 +66,7 @@ function loadScript(url) {
async function loadUIPlugins() {
try {
const res = await fetch('/get/ui_plugins')
const res = await fetch("/get/ui_plugins")
if (!res.ok) {
console.error(`Error HTTP${res.status} while loading plugins list. - ${res.statusText}`)
return
@ -69,6 +75,6 @@ async function loadUIPlugins() {
const loadingPromises = plugins.map(loadScript)
return await Promise.allSettled(loadingPromises)
} catch (e) {
console.log('error fetching plugin paths', e)
console.log("error fetching plugin paths", e)
}
}

View File

@ -21,14 +21,13 @@ let hypernetworkModelField = new ModelDropdown(document.querySelector('#hypernet
3) Model dropdowns will be refreshed automatically when the reload models button is invoked.
*/
class ModelDropdown
{
class ModelDropdown {
modelFilter //= document.querySelector("#model-filter")
modelFilterArrow //= document.querySelector("#model-filter-arrow")
modelList //= document.querySelector("#model-list")
modelResult //= document.querySelector("#model-result")
modelNoResult //= document.querySelector("#model-no-result")
currentSelection //= { elem: undefined, value: '', path: ''}
highlightedModelEntry //= undefined
activeModel //= undefined
@ -59,11 +58,11 @@ class ModelDropdown
set disabled(state) {
this.modelFilter.disabled = state
if (this.modelFilterArrow) {
this.modelFilterArrow.style.color = state ? 'dimgray' : ''
this.modelFilterArrow.style.color = state ? "dimgray" : ""
}
}
get modelElements() {
return this.modelList.querySelectorAll('.model-file')
return this.modelList.querySelectorAll(".model-file")
}
addEventListener(type, listener, options) {
return this.modelFilter.addEventListener(type, listener, options)
@ -82,21 +81,37 @@ class ModelDropdown
}
}
/* SEARCHABLE INPUT */
constructor (input, modelKey, noneEntry = '') {
/* SEARCHABLE INPUT */
constructor(input, modelKey, noneEntry = "") {
this.modelFilter = input
this.noneEntry = noneEntry
this.modelKey = modelKey
if (modelsOptions !== undefined) { // reuse models from cache (only useful for plugins, which are loaded after models)
this.inputModels = modelsOptions[this.modelKey]
if (modelsOptions !== undefined) {
// reuse models from cache (only useful for plugins, which are loaded after models)
this.inputModels = []
let modelKeys = Array.isArray(this.modelKey) ? this.modelKey : [this.modelKey]
for (let i = 0; i < modelKeys.length; i++) {
let key = modelKeys[i]
this.inputModels.push(...modelsOptions[key])
}
this.populateModels()
}
document.addEventListener("refreshModels", this.bind(function(e) {
// reload the models
this.inputModels = modelsOptions[this.modelKey]
this.populateModels()
}, this))
document.addEventListener(
"refreshModels",
this.bind(function(e) {
// reload the models
this.inputModels = modelsOptions[this.modelKey]
this.inputModels = []
let modelKeys = Array.isArray(this.modelKey) ? this.modelKey : [this.modelKey]
for (let i = 0; i < modelKeys.length; i++) {
let key = modelKeys[i]
this.inputModels.push(...modelsOptions[key])
}
this.populateModels()
}, this)
)
}
saveCurrentSelection(elem, value, path) {
@ -105,13 +120,13 @@ class ModelDropdown
this.currentSelection.path = path
this.modelFilter.dataset.path = path
this.modelFilter.value = value
this.modelFilter.dispatchEvent(new Event('change'))
this.modelFilter.dispatchEvent(new Event("change"))
}
processClick(e) {
e.preventDefault()
if (e.srcElement.classList.contains('model-file') || e.srcElement.classList.contains('fa-file')) {
const elem = e.srcElement.classList.contains('model-file') ? e.srcElement : e.srcElement.parentElement
if (e.srcElement.classList.contains("model-file") || e.srcElement.classList.contains("fa-file")) {
const elem = e.srcElement.classList.contains("model-file") ? e.srcElement : e.srcElement.parentElement
this.saveCurrentSelection(elem, elem.innerText, elem.dataset.path)
this.hideModelList()
this.modelFilter.focus()
@ -126,66 +141,67 @@ class ModelDropdown
return undefined
}
return modelElements.slice(0, index).reverse().find(e => e.style.display === 'list-item')
return modelElements
.slice(0, index)
.reverse()
.find((e) => e.style.display === "list-item")
}
getLastVisibleChild(elem) {
let lastElementChild = elem.lastElementChild
if (lastElementChild.style.display == 'list-item') return lastElementChild
if (lastElementChild.style.display == "list-item") return lastElementChild
return this.getPreviousVisibleSibling(lastElementChild)
}
getNextVisibleSibling(elem) {
const modelElements = Array.from(this.modelElements)
const index = modelElements.indexOf(elem)
return modelElements.slice(index + 1).find(e => e.style.display === 'list-item')
return modelElements.slice(index + 1).find((e) => e.style.display === "list-item")
}
getFirstVisibleChild(elem) {
let firstElementChild = elem.firstElementChild
if (firstElementChild.style.display == 'list-item') return firstElementChild
if (firstElementChild.style.display == "list-item") return firstElementChild
return this.getNextVisibleSibling(firstElementChild)
}
selectModelEntry(elem) {
if (elem) {
if (this.highlightedModelEntry !== undefined) {
this.highlightedModelEntry.classList.remove('selected')
this.highlightedModelEntry.classList.remove("selected")
}
this.saveCurrentSelection(elem, elem.innerText, elem.dataset.path)
elem.classList.add('selected')
elem.scrollIntoView({block: 'nearest'})
elem.classList.add("selected")
elem.scrollIntoView({ block: "nearest" })
this.highlightedModelEntry = elem
}
}
selectPreviousFile() {
const elem = this.getPreviousVisibleSibling(this.highlightedModelEntry)
if (elem) {
this.selectModelEntry(elem)
}
else
{
} else {
//this.highlightedModelEntry.parentElement.parentElement.scrollIntoView({block: 'nearest'})
this.highlightedModelEntry.closest('.model-list').scrollTop = 0
this.highlightedModelEntry.closest(".model-list").scrollTop = 0
}
this.modelFilter.select()
}
selectNextFile() {
this.selectModelEntry(this.getNextVisibleSibling(this.highlightedModelEntry))
this.modelFilter.select()
}
selectFirstFile() {
this.selectModelEntry(this.modelList.querySelector('.model-file'))
this.highlightedModelEntry.scrollIntoView({block: 'nearest'})
this.selectModelEntry(this.modelList.querySelector(".model-file"))
this.highlightedModelEntry.scrollIntoView({ block: "nearest" })
this.modelFilter.select()
}
selectLastFile() {
const elems = this.modelList.querySelectorAll('.model-file:last-child')
this.selectModelEntry(elems[elems.length -1])
const elems = this.modelList.querySelectorAll(".model-file:last-child")
this.selectModelEntry(elems[elems.length - 1])
this.modelFilter.select()
}
@ -198,57 +214,57 @@ class ModelDropdown
}
validEntrySelected() {
return (this.modelNoResult.style.display === 'none')
return this.modelNoResult.style.display === "none"
}
processKey(e) {
switch (e.key) {
case 'Escape':
case "Escape":
e.preventDefault()
this.resetSelection()
break
case 'Enter':
case "Enter":
e.preventDefault()
if (this.validEntrySelected()) {
if (this.modelList.style.display != 'block') {
if (this.modelList.style.display != "block") {
this.showModelList()
}
else
{
this.saveCurrentSelection(this.highlightedModelEntry, this.highlightedModelEntry.innerText, this.highlightedModelEntry.dataset.path)
} else {
this.saveCurrentSelection(
this.highlightedModelEntry,
this.highlightedModelEntry.innerText,
this.highlightedModelEntry.dataset.path
)
this.hideModelList()
this.showAllEntries()
}
this.modelFilter.focus()
}
else
{
} else {
this.resetSelection()
}
break
case 'ArrowUp':
case "ArrowUp":
e.preventDefault()
if (this.validEntrySelected()) {
this.selectPreviousFile()
}
break
case 'ArrowDown':
case "ArrowDown":
e.preventDefault()
if (this.validEntrySelected()) {
this.selectNextFile()
}
break
case 'ArrowLeft':
if (this.modelList.style.display != 'block') {
case "ArrowLeft":
if (this.modelList.style.display != "block") {
e.preventDefault()
}
break
case 'ArrowRight':
if (this.modelList.style.display != 'block') {
case "ArrowRight":
if (this.modelList.style.display != "block") {
e.preventDefault()
}
break
case 'PageUp':
case "PageUp":
e.preventDefault()
if (this.validEntrySelected()) {
this.selectPreviousFile()
@ -261,7 +277,7 @@ class ModelDropdown
this.selectPreviousFile()
}
break
case 'PageDown':
case "PageDown":
e.preventDefault()
if (this.validEntrySelected()) {
this.selectNextFile()
@ -274,201 +290,195 @@ class ModelDropdown
this.selectNextFile()
}
break
case 'Home':
case "Home":
//if (this.modelList.style.display != 'block') {
e.preventDefault()
if (this.validEntrySelected()) {
this.selectFirstFile()
}
e.preventDefault()
if (this.validEntrySelected()) {
this.selectFirstFile()
}
//}
break
case 'End':
case "End":
//if (this.modelList.style.display != 'block') {
e.preventDefault()
if (this.validEntrySelected()) {
this.selectLastFile()
}
e.preventDefault()
if (this.validEntrySelected()) {
this.selectLastFile()
}
//}
break
default:
//console.log(e.key)
//console.log(e.key)
}
}
modelListFocus() {
this.selectEntry()
this.showAllEntries()
}
showModelList() {
this.modelList.style.display = 'block'
this.modelList.style.display = "block"
this.selectEntry()
this.showAllEntries()
//this.modelFilter.value = ''
this.modelFilter.select() // preselect the entire string so user can just start typing.
this.modelFilter.focus()
this.modelFilter.style.cursor = 'auto'
this.modelFilter.style.cursor = "auto"
}
hideModelList() {
this.modelList.style.display = 'none'
this.modelList.style.display = "none"
this.modelFilter.value = this.currentSelection.value
this.modelFilter.style.cursor = ''
this.modelFilter.style.cursor = ""
}
toggleModelList(e) {
e.preventDefault()
if (!this.modelFilter.disabled) {
if (this.modelList.style.display != 'block') {
if (this.modelList.style.display != "block") {
this.showModelList()
}
else
{
} else {
this.hideModelList()
this.modelFilter.select()
}
}
}
selectEntry(path) {
if (path !== undefined) {
const entries = this.modelElements;
const entries = this.modelElements
for (const elem of entries) {
if (elem.dataset.path == path) {
this.saveCurrentSelection(elem, elem.innerText, elem.dataset.path)
this.highlightedModelEntry = elem
elem.scrollIntoView({block: 'nearest'})
elem.scrollIntoView({ block: "nearest" })
break
}
}
}
if (this.currentSelection.elem !== undefined) {
// select the previous element
if (this.highlightedModelEntry !== undefined && this.highlightedModelEntry != this.currentSelection.elem) {
this.highlightedModelEntry.classList.remove('selected')
this.highlightedModelEntry.classList.remove("selected")
}
this.currentSelection.elem.classList.add('selected')
this.currentSelection.elem.classList.add("selected")
this.highlightedModelEntry = this.currentSelection.elem
this.currentSelection.elem.scrollIntoView({block: 'nearest'})
}
else
{
this.currentSelection.elem.scrollIntoView({ block: "nearest" })
} else {
this.selectFirstFile()
}
}
highlightModelAtPosition(e) {
let elem = document.elementFromPoint(e.clientX, e.clientY)
if (elem.classList.contains('model-file')) {
if (elem.classList.contains("model-file")) {
this.highlightModel(elem)
}
}
highlightModel(elem) {
if (elem.classList.contains('model-file')) {
if (elem.classList.contains("model-file")) {
if (this.highlightedModelEntry !== undefined && this.highlightedModelEntry != elem) {
this.highlightedModelEntry.classList.remove('selected')
this.highlightedModelEntry.classList.remove("selected")
}
elem.classList.add('selected')
elem.classList.add("selected")
this.highlightedModelEntry = elem
}
}
showAllEntries() {
this.modelList.querySelectorAll('li').forEach(function(li) {
if (li.id !== 'model-no-result') {
li.style.display = 'list-item'
this.modelList.querySelectorAll("li").forEach(function(li) {
if (li.id !== "model-no-result") {
li.style.display = "list-item"
}
})
this.modelNoResult.style.display = 'none'
this.modelNoResult.style.display = "none"
}
filterList(e) {
const filter = this.modelFilter.value.toLowerCase()
let found = false
let showAllChildren = false
this.modelList.querySelectorAll('li').forEach(function(li) {
if (li.classList.contains('model-folder')) {
this.modelList.querySelectorAll("li").forEach(function(li) {
if (li.classList.contains("model-folder")) {
showAllChildren = false
}
if (filter == '') {
li.style.display = 'list-item'
if (filter == "") {
li.style.display = "list-item"
found = true
} else if (showAllChildren || li.textContent.toLowerCase().match(filter)) {
li.style.display = 'list-item'
if (li.classList.contains('model-folder') && li.firstChild.textContent.toLowerCase().match(filter)) {
li.style.display = "list-item"
if (li.classList.contains("model-folder") && li.firstChild.textContent.toLowerCase().match(filter)) {
showAllChildren = true
}
found = true
} else {
li.style.display = 'none'
li.style.display = "none"
}
})
if (found) {
this.modelResult.style.display = 'list-item'
this.modelNoResult.style.display = 'none'
const elem = this.getNextVisibleSibling(this.modelList.querySelector('.model-file'))
this.modelResult.style.display = "list-item"
this.modelNoResult.style.display = "none"
const elem = this.getNextVisibleSibling(this.modelList.querySelector(".model-file"))
this.highlightModel(elem)
elem.scrollIntoView({block: 'nearest'})
elem.scrollIntoView({ block: "nearest" })
} else {
this.modelResult.style.display = "none"
this.modelNoResult.style.display = "list-item"
}
else
{
this.modelResult.style.display = 'none'
this.modelNoResult.style.display = 'list-item'
}
this.modelList.style.display = 'block'
this.modelList.style.display = "block"
}
/* MODEL LOADER */
getElementDimensions(element) {
// Clone the element
const clone = element.cloneNode(true)
// Copy the styles of the original element to the cloned element
const originalStyles = window.getComputedStyle(element)
for (let i = 0; i < originalStyles.length; i++) {
const property = originalStyles[i]
clone.style[property] = originalStyles.getPropertyValue(property)
}
// Set its visibility to hidden and display to inline-block
clone.style.visibility = "hidden"
clone.style.display = "inline-block"
// Put the cloned element next to the original element
element.parentNode.insertBefore(clone, element.nextSibling)
// Get its width and height
const width = clone.offsetWidth
const height = clone.offsetHeight
// Remove it from the DOM
clone.remove()
// Return its width and height
return { width, height }
}
/**
* @param {Array<string>} models
* @param {Array<string>} models
*/
sortStringArray(models) {
models.sort((a, b) => a.localeCompare(b, undefined, { sensitivity: 'base' }))
models.sort((a, b) => a.localeCompare(b, undefined, { sensitivity: "base" }))
}
populateModels() {
this.activeModel = this.modelFilter.dataset.path
this.currentSelection = { elem: undefined, value: '', path: ''}
this.currentSelection = { elem: undefined, value: "", path: "" }
this.highlightedModelEntry = undefined
this.flatModelList = []
if(this.modelList !== undefined) {
if (this.modelList !== undefined) {
this.modelList.remove()
this.modelFilterArrow.remove()
}
@ -478,39 +488,39 @@ class ModelDropdown
createDropdown() {
// create dropdown entries
let rootModelList = this.createRootModelList(this.inputModels)
this.modelFilter.insertAdjacentElement('afterend', rootModelList)
this.modelFilter.insertAdjacentElement("afterend", rootModelList)
this.modelFilter.insertAdjacentElement(
'afterend',
createElement(
'i',
{ id: `${this.modelFilter.id}-model-filter-arrow` },
['model-selector-arrow', 'fa-solid', 'fa-angle-down'],
),
"afterend",
createElement("i", { id: `${this.modelFilter.id}-model-filter-arrow` }, [
"model-selector-arrow",
"fa-solid",
"fa-angle-down",
])
)
this.modelFilter.classList.add('model-selector')
this.modelFilter.classList.add("model-selector")
this.modelFilterArrow = document.querySelector(`#${this.modelFilter.id}-model-filter-arrow`)
if (this.modelFilterArrow) {
this.modelFilterArrow.style.color = this.modelFilter.disabled ? 'dimgray' : ''
this.modelFilterArrow.style.color = this.modelFilter.disabled ? "dimgray" : ""
}
this.modelList = document.querySelector(`#${this.modelFilter.id}-model-list`)
this.modelResult = document.querySelector(`#${this.modelFilter.id}-model-result`)
this.modelNoResult = document.querySelector(`#${this.modelFilter.id}-model-no-result`)
if (this.modelFilterInitialized !== true) {
this.modelFilter.addEventListener('input', this.bind(this.filterList, this))
this.modelFilter.addEventListener('focus', this.bind(this.modelListFocus, this))
this.modelFilter.addEventListener('blur', this.bind(this.hideModelList, this))
this.modelFilter.addEventListener('click', this.bind(this.showModelList, this))
this.modelFilter.addEventListener('keydown', this.bind(this.processKey, this))
this.modelFilter.addEventListener("input", this.bind(this.filterList, this))
this.modelFilter.addEventListener("focus", this.bind(this.modelListFocus, this))
this.modelFilter.addEventListener("blur", this.bind(this.hideModelList, this))
this.modelFilter.addEventListener("click", this.bind(this.showModelList, this))
this.modelFilter.addEventListener("keydown", this.bind(this.processKey, this))
this.modelFilterInitialized = true
}
this.modelFilterArrow.addEventListener('mousedown', this.bind(this.toggleModelList, this))
this.modelList.addEventListener('mousemove', this.bind(this.highlightModelAtPosition, this))
this.modelList.addEventListener('mousedown', this.bind(this.processClick, this))
this.modelFilterArrow.addEventListener("mousedown", this.bind(this.toggleModelList, this))
this.modelList.addEventListener("mousemove", this.bind(this.highlightModelAtPosition, this))
this.modelList.addEventListener("mousedown", this.bind(this.processClick, this))
let mf = this.modelFilter
this.modelFilter.addEventListener('focus', function() {
this.modelFilter.addEventListener("focus", function() {
let modelFilterStyle = window.getComputedStyle(mf)
rootModelList.style.minWidth = modelFilterStyle.width
})
@ -520,74 +530,62 @@ class ModelDropdown
/**
* @param {Array<string | object} modelTree
* @param {string} folderName
* @param {boolean} isRootFolder
* @param {string} folderName
* @param {boolean} isRootFolder
* @returns {HTMLElement}
*/
createModelNodeList(folderName, modelTree, isRootFolder) {
const listElement = createElement('ul')
const listElement = createElement("ul")
const foldersMap = new Map()
const modelsMap = new Map()
modelTree.forEach(model => {
modelTree.forEach((model) => {
if (Array.isArray(model)) {
const [childFolderName, childModels] = model
foldersMap.set(
childFolderName,
this.createModelNodeList(
`${folderName || ''}/${childFolderName}`,
childModels,
false,
),
this.createModelNodeList(`${folderName || ""}/${childFolderName}`, childModels, false)
)
} else {
const classes = ['model-file']
const classes = ["model-file"]
if (isRootFolder) {
classes.push('in-root-folder')
classes.push("in-root-folder")
}
// Remove the leading slash from the model path
const fullPath = folderName ? `${folderName.substring(1)}/${model}` : model
modelsMap.set(
model,
createElement(
'li',
{ 'data-path': fullPath },
classes,
[
createElement('i', undefined, ['fa-regular', 'fa-file', 'icon']),
model,
],
),
createElement("li", { "data-path": fullPath }, classes, [
createElement("i", undefined, ["fa-regular", "fa-file", "icon"]),
model,
])
)
}
})
const childFolderNames = Array.from(foldersMap.keys())
this.sortStringArray(childFolderNames)
const folderElements = childFolderNames.map(name => foldersMap.get(name))
const folderElements = childFolderNames.map((name) => foldersMap.get(name))
const modelNames = Array.from(modelsMap.keys())
this.sortStringArray(modelNames)
const modelElements = modelNames.map(name => modelsMap.get(name))
const modelElements = modelNames.map((name) => modelsMap.get(name))
if (modelElements.length && folderName) {
listElement.appendChild(
createElement(
'li',
"li",
undefined,
['model-folder'],
[
createElement('i', undefined, ['fa-regular', 'fa-folder-open', 'icon']),
folderName.substring(1),
],
["model-folder"],
[createElement("i", undefined, ["fa-regular", "fa-folder-open", "icon"]), folderName.substring(1)]
)
)
}
// const allModelElements = isRootFolder ? [...folderElements, ...modelElements] : [...modelElements, ...folderElements]
const allModelElements = [...modelElements, ...folderElements]
allModelElements.forEach(e => listElement.appendChild(e))
allModelElements.forEach((e) => listElement.appendChild(e))
return listElement
}
@ -596,37 +594,21 @@ class ModelDropdown
* @returns {HTMLElement}
*/
createRootModelList(modelTree) {
const rootList = createElement(
'ul',
{ id: `${this.modelFilter.id}-model-list` },
['model-list'],
)
const rootList = createElement("ul", { id: `${this.modelFilter.id}-model-list` }, ["model-list"])
rootList.appendChild(
createElement(
'li',
{ id: `${this.modelFilter.id}-model-no-result` },
['model-no-result'],
'No result'
),
createElement("li", { id: `${this.modelFilter.id}-model-no-result` }, ["model-no-result"], "No result")
)
if (this.noneEntry) {
rootList.appendChild(
createElement(
'li',
{ 'data-path': '' },
['model-file', 'in-root-folder'],
this.noneEntry,
),
createElement("li", { "data-path": "" }, ["model-file", "in-root-folder"], this.noneEntry)
)
}
if (modelTree.length > 0) {
const containerListItem = createElement(
'li',
{ id: `${this.modelFilter.id}-model-result` },
['model-result'],
)
const containerListItem = createElement("li", { id: `${this.modelFilter.id}-model-result` }, [
"model-result",
])
//console.log(containerListItem)
containerListItem.appendChild(this.createModelNodeList(undefined, modelTree, true))
rootList.appendChild(containerListItem)
@ -640,13 +622,16 @@ class ModelDropdown
async function getModels() {
try {
modelsCache = await SD.getModels()
modelsOptions = modelsCache['options']
modelsOptions = modelsCache["options"]
if ("scan-error" in modelsCache) {
// let previewPane = document.getElementById('tab-content-wrapper')
let previewPane = document.getElementById('preview')
previewPane.style.background="red"
previewPane.style.textAlign="center"
previewPane.innerHTML = '<H1>🔥Malware alert!🔥</H1><h2>The file <i>' + modelsCache['scan-error'] + '</i> in your <tt>models/stable-diffusion</tt> folder is probably malware infected.</h2><h2>Please delete this file from the folder before proceeding!</h2>After deleting the file, reload this page.<br><br><button onClick="window.location.reload();">Reload Page</button>'
let previewPane = document.getElementById("preview")
previewPane.style.background = "red"
previewPane.style.textAlign = "center"
previewPane.innerHTML =
"<H1>🔥Malware alert!🔥</H1><h2>The file <i>" +
modelsCache["scan-error"] +
'</i> in your <tt>models/stable-diffusion</tt> folder is probably malware infected.</h2><h2>Please delete this file from the folder before proceeding!</h2>After deleting the file, reload this page.<br><br><button onClick="window.location.reload();">Reload Page</button>'
makeImageBtn.disabled = true
}
@ -667,11 +652,11 @@ async function getModels() {
*/
// notify ModelDropdown objects to refresh
document.dispatchEvent(new Event('refreshModels'))
document.dispatchEvent(new Event("refreshModels"))
} catch (e) {
console.log('get models error', e)
console.log("get models error", e)
}
}
// reload models button
document.querySelector('#reload-models').addEventListener('click', getModels)
document.querySelector("#reload-models").addEventListener("click", getModels)

View File

@ -1,82 +1,85 @@
const themeField = document.getElementById("theme");
var DEFAULT_THEME = {};
var THEMES = []; // initialized in initTheme from data in css
const themeField = document.getElementById("theme")
var DEFAULT_THEME = {}
var THEMES = [] // initialized in initTheme from data in css
function getThemeName(theme) {
theme = theme.replace("theme-", "");
theme = theme.split("-").map(word => word.charAt(0).toUpperCase() + word.slice(1)).join(" ");
return theme;
theme = theme.replace("theme-", "")
theme = theme
.split("-")
.map((word) => word.charAt(0).toUpperCase() + word.slice(1))
.join(" ")
return theme
}
// init themefield
function initTheme() {
Array.from(document.styleSheets)
.filter(sheet => sheet.href?.startsWith(window.location.origin))
.flatMap(sheet => Array.from(sheet.cssRules))
.forEach(rule => {
var selector = rule.selectorText;
.filter((sheet) => sheet.href?.startsWith(window.location.origin))
.flatMap((sheet) => Array.from(sheet.cssRules))
.forEach((rule) => {
var selector = rule.selectorText
if (selector && selector.startsWith(".theme-") && !selector.includes(" ")) {
if (DEFAULT_THEME) { // re-add props that dont change (css needs this so they update correctly)
if (DEFAULT_THEME) {
// re-add props that dont change (css needs this so they update correctly)
Array.from(DEFAULT_THEME.rule.style)
.filter(cssVariable => !Array.from(rule.style).includes(cssVariable))
.forEach(cssVariable => {
rule.style.setProperty(cssVariable, DEFAULT_THEME.rule.style.getPropertyValue(cssVariable));
});
.filter((cssVariable) => !Array.from(rule.style).includes(cssVariable))
.forEach((cssVariable) => {
rule.style.setProperty(cssVariable, DEFAULT_THEME.rule.style.getPropertyValue(cssVariable))
})
}
var theme_key = selector.substring(1);
var theme_key = selector.substring(1)
THEMES.push({
key: theme_key,
name: getThemeName(theme_key),
rule: rule
rule: rule,
})
}
if (selector && selector == ":root") {
DEFAULT_THEME = {
key: "theme-default",
name: "Default",
rule: rule
};
rule: rule,
}
}
});
THEMES.forEach(theme => {
var new_option = document.createElement("option");
new_option.setAttribute("value", theme.key);
new_option.innerText = theme.name;
themeField.appendChild(new_option);
});
})
THEMES.forEach((theme) => {
var new_option = document.createElement("option")
new_option.setAttribute("value", theme.key)
new_option.innerText = theme.name
themeField.appendChild(new_option)
})
// setup the style transitions a second after app initializes, so initial style is instant
setTimeout(() => {
var body = document.querySelector("body");
var style = document.createElement('style');
style.innerHTML = "* { transition: background 0.5s, color 0.5s, background-color 0.5s; }";
body.appendChild(style);
}, 1000);
var body = document.querySelector("body")
var style = document.createElement("style")
style.innerHTML = "* { transition: background 0.5s, color 0.5s, background-color 0.5s; }"
body.appendChild(style)
}, 1000)
}
initTheme();
initTheme()
function themeFieldChanged() {
var theme_key = themeField.value;
var theme_key = themeField.value
var body = document.querySelector("body");
body.classList.remove(...THEMES.map(theme => theme.key));
body.classList.add(theme_key);
//
var body = document.querySelector("body")
body.classList.remove(...THEMES.map((theme) => theme.key))
body.classList.add(theme_key)
body.style = "";
var theme = THEMES.find(t => t.key == theme_key);
//
body.style = ""
var theme = THEMES.find((t) => t.key == theme_key)
let borderColor = undefined
if (theme) {
borderColor = theme.rule.style.getPropertyValue('--input-border-color').trim()
if (!borderColor.startsWith('#')) {
borderColor = theme.rule.style.getPropertyValue('--theme-color-fallback')
borderColor = theme.rule.style.getPropertyValue("--input-border-color").trim()
if (!borderColor.startsWith("#")) {
borderColor = theme.rule.style.getPropertyValue("--theme-color-fallback")
}
} else {
borderColor = DEFAULT_THEME.rule.style.getPropertyValue('--theme-color-fallback')
borderColor = DEFAULT_THEME.rule.style.getPropertyValue("--theme-color-fallback")
}
document.querySelector('meta[name="theme-color"]').setAttribute("content", borderColor)
}
themeField.addEventListener('change', themeFieldChanged);
themeField.addEventListener("change", themeFieldChanged)

File diff suppressed because it is too large Load Diff

View File

@ -1,28 +1,32 @@
(function () {
;(function() {
"use strict"
let autoScroll = document.querySelector("#auto_scroll")
// observe for changes in the preview pane
var observer = new MutationObserver(function (mutations) {
mutations.forEach(function (mutation) {
if (mutation.target.className == 'img-batch') {
var observer = new MutationObserver(function(mutations) {
mutations.forEach(function(mutation) {
if (mutation.target.className == "img-batch") {
Autoscroll(mutation.target)
}
})
})
observer.observe(document.getElementById('preview'), {
childList: true,
subtree: true
observer.observe(document.getElementById("preview"), {
childList: true,
subtree: true,
})
function Autoscroll(target) {
if (autoScroll.checked && target !== null) {
const img = target.querySelector('img')
img.addEventListener('load', function() {
img.closest('.imageTaskContainer').scrollIntoView()
}, { once: true })
const img = target.querySelector("img")
img.addEventListener(
"load",
function() {
img?.closest(".imageTaskContainer").scrollIntoView()
},
{ once: true }
)
}
}
})()

View File

@ -1,93 +1,116 @@
(function () { "use strict"
if (typeof editorModifierTagsList !== 'object') {
console.error('editorModifierTagsList missing...')
;(function() {
"use strict"
if (typeof editorModifierTagsList !== "object") {
console.error("editorModifierTagsList missing...")
return
}
const styleSheet = document.createElement("style");
const styleSheet = document.createElement("style")
styleSheet.textContent = `
.modifier-card-tiny.drag-sort-active {
background: transparent;
border: 2px dashed white;
opacity:0.2;
}
`;
document.head.appendChild(styleSheet);
`
document.head.appendChild(styleSheet)
// observe for changes in tag list
const observer = new MutationObserver(function (mutations) {
// mutations.forEach(function (mutation) {
if (editorModifierTagsList.childNodes.length > 0) {
ModifierDragAndDrop(editorModifierTagsList)
}
// })
const observer = new MutationObserver(function(mutations) {
// mutations.forEach(function (mutation) {
if (editorModifierTagsList.childNodes.length > 0) {
ModifierDragAndDrop(editorModifierTagsList)
}
// })
})
observer.observe(editorModifierTagsList, {
childList: true
childList: true,
})
let current
function ModifierDragAndDrop(target) {
let overlays = document.querySelector('#editor-inputs-tags-list').querySelectorAll('.modifier-card-overlay')
overlays.forEach (i => {
i.parentElement.draggable = true;
let overlays = document.querySelector("#editor-inputs-tags-list").querySelectorAll(".modifier-card-overlay")
overlays.forEach((i) => {
i.parentElement.draggable = true
i.parentElement.ondragstart = (e) => {
current = i
i.parentElement.getElementsByClassName('modifier-card-image-overlay')[0].innerText = ''
i.parentElement.getElementsByClassName("modifier-card-image-overlay")[0].innerText = ""
i.parentElement.draggable = true
i.parentElement.classList.add('drag-sort-active')
for(let item of document.querySelector('#editor-inputs-tags-list').getElementsByClassName('modifier-card-image-overlay')) {
if (item.parentElement.parentElement.getElementsByClassName('modifier-card-overlay')[0] != current) {
item.parentElement.parentElement.getElementsByClassName('modifier-card-image-overlay')[0].style.opacity = 0
if(item.parentElement.getElementsByClassName('modifier-card-image').length > 0) {
item.parentElement.getElementsByClassName('modifier-card-image')[0].style.filter = 'none'
i.parentElement.classList.add("drag-sort-active")
for (let item of document
.querySelector("#editor-inputs-tags-list")
.getElementsByClassName("modifier-card-image-overlay")) {
if (
item.parentElement.parentElement.getElementsByClassName("modifier-card-overlay")[0] != current
) {
item.parentElement.parentElement.getElementsByClassName(
"modifier-card-image-overlay"
)[0].style.opacity = 0
if (item.parentElement.getElementsByClassName("modifier-card-image").length > 0) {
item.parentElement.getElementsByClassName("modifier-card-image")[0].style.filter = "none"
}
item.parentElement.parentElement.style.transform = 'none'
item.parentElement.parentElement.style.boxShadow = 'none'
item.parentElement.parentElement.style.transform = "none"
item.parentElement.parentElement.style.boxShadow = "none"
}
item.innerText = ''
item.innerText = ""
}
}
i.ondragenter = (e) => {
e.preventDefault()
if (i != current) {
let currentPos = 0, droppedPos = 0;
let currentPos = 0,
droppedPos = 0
for (let it = 0; it < overlays.length; it++) {
if (current == overlays[it]) { currentPos = it; }
if (i == overlays[it]) { droppedPos = it; }
if (current == overlays[it]) {
currentPos = it
}
if (i == overlays[it]) {
droppedPos = it
}
}
if (i.parentElement != current.parentElement) {
let currentPos = 0, droppedPos = 0
let currentPos = 0,
droppedPos = 0
for (let it = 0; it < overlays.length; it++) {
if (current == overlays[it]) { currentPos = it }
if (i == overlays[it]) { droppedPos = it }
if (current == overlays[it]) {
currentPos = it
}
if (i == overlays[it]) {
droppedPos = it
}
}
if (currentPos < droppedPos) {
current = i.parentElement.parentNode.insertBefore(current.parentElement, i.parentElement.nextSibling).getElementsByClassName('modifier-card-overlay')[0]
current = i.parentElement.parentNode
.insertBefore(current.parentElement, i.parentElement.nextSibling)
.getElementsByClassName("modifier-card-overlay")[0]
} else {
current = i.parentElement.parentNode.insertBefore(current.parentElement, i.parentElement).getElementsByClassName('modifier-card-overlay')[0]
current = i.parentElement.parentNode
.insertBefore(current.parentElement, i.parentElement)
.getElementsByClassName("modifier-card-overlay")[0]
}
// update activeTags
const tag = activeTags.splice(currentPos, 1)
activeTags.splice(droppedPos, 0, tag[0])
document.dispatchEvent(new Event('refreshImageModifiers'))
document.dispatchEvent(new Event("refreshImageModifiers"))
}
}
};
}
i.ondragover = (e) => {
e.preventDefault()
}
i.parentElement.ondragend = (e) => {
i.parentElement.classList.remove('drag-sort-active')
for(let item of document.querySelector('#editor-inputs-tags-list').getElementsByClassName('modifier-card-image-overlay')) {
item.style.opacity = ''
item.innerText = '-'
i.parentElement.classList.remove("drag-sort-active")
for (let item of document
.querySelector("#editor-inputs-tags-list")
.getElementsByClassName("modifier-card-image-overlay")) {
item.style.opacity = ""
item.innerText = "-"
}
}
})

View File

@ -1,35 +1,37 @@
(function () {
;(function() {
"use strict"
const MAX_WEIGHT = 5
if (typeof editorModifierTagsList !== 'object') {
console.error('editorModifierTagsList missing...')
if (typeof editorModifierTagsList !== "object") {
console.error("editorModifierTagsList missing...")
return
}
// observe for changes in tag list
const observer = new MutationObserver(function (mutations) {
// mutations.forEach(function (mutation) {
if (editorModifierTagsList.childNodes.length > 0) {
ModifierMouseWheel(editorModifierTagsList)
}
// })
const observer = new MutationObserver(function(mutations) {
// mutations.forEach(function (mutation) {
if (editorModifierTagsList.childNodes.length > 0) {
ModifierMouseWheel(editorModifierTagsList)
}
// })
})
observer.observe(editorModifierTagsList, {
childList: true
childList: true,
})
function ModifierMouseWheel(target) {
let overlays = document.querySelector('#editor-inputs-tags-list').querySelectorAll('.modifier-card-overlay')
overlays.forEach (i => {
let overlays = document.querySelector("#editor-inputs-tags-list").querySelectorAll(".modifier-card-overlay")
overlays.forEach((i) => {
i.onwheel = (e) => {
if (e.ctrlKey == true) {
e.preventDefault()
const delta = Math.sign(event.deltaY)
let s = i.parentElement.getElementsByClassName('modifier-card-label')[0].getElementsByTagName("p")[0].innerText
let s = i.parentElement
.getElementsByClassName("modifier-card-label")[0]
.getElementsByTagName("p")[0].innerText
let t
// find the corresponding tag
for (let it = 0; it < overlays.length; it++) {
@ -38,43 +40,40 @@
break
}
}
if (s.charAt(0) !== '(' && s.charAt(s.length - 1) !== ')' && s.trim().includes(' ')) {
s = '(' + s + ')'
t = '(' + t + ')'
if (s.charAt(0) !== "(" && s.charAt(s.length - 1) !== ")" && s.trim().includes(" ")) {
s = "(" + s + ")"
t = "(" + t + ")"
}
if (delta < 0) {
// wheel scrolling up
if (s.substring(s.length - 1) == '-') {
if (s.substring(s.length - 1) == "-") {
s = s.substring(0, s.length - 1)
t = t.substring(0, t.length - 1)
}
else
{
if (s.substring(s.length - MAX_WEIGHT) !== '+'.repeat(MAX_WEIGHT)) {
s = s + '+'
t = t + '+'
} else {
if (s.substring(s.length - MAX_WEIGHT) !== "+".repeat(MAX_WEIGHT)) {
s = s + "+"
t = t + "+"
}
}
}
else{
} else {
// wheel scrolling down
if (s.substring(s.length - 1) == '+') {
if (s.substring(s.length - 1) == "+") {
s = s.substring(0, s.length - 1)
t = t.substring(0, t.length - 1)
}
else
{
if (s.substring(s.length - MAX_WEIGHT) !== '-'.repeat(MAX_WEIGHT)) {
s = s + '-'
t = t + '-'
} else {
if (s.substring(s.length - MAX_WEIGHT) !== "-".repeat(MAX_WEIGHT)) {
s = s + "-"
t = t + "-"
}
}
}
if (s.charAt(0) === '(' && s.charAt(s.length - 1) === ')') {
if (s.charAt(0) === "(" && s.charAt(s.length - 1) === ")") {
s = s.substring(1, s.length - 1)
t = t.substring(1, t.length - 1)
}
i.parentElement.getElementsByClassName('modifier-card-label')[0].getElementsByTagName("p")[0].innerText = s
i.parentElement
.getElementsByClassName("modifier-card-label")[0]
.getElementsByTagName("p")[0].innerText = s
// update activeTags
for (let it = 0; it < overlays.length; it++) {
if (i == overlays[it]) {
@ -82,7 +81,7 @@
break
}
}
document.dispatchEvent(new Event('refreshImageModifiers'))
document.dispatchEvent(new Event("refreshImageModifiers"))
}
}
})

View File

@ -1,31 +1,31 @@
(function() {
PLUGINS['MODIFIERS_LOAD'].push({
;(function() {
PLUGINS["MODIFIERS_LOAD"].push({
loader: function() {
let customModifiers = localStorage.getItem(CUSTOM_MODIFIERS_KEY, '')
let customModifiers = localStorage.getItem(CUSTOM_MODIFIERS_KEY, "")
customModifiersTextBox.value = customModifiers
if (customModifiersGroupElement !== undefined) {
customModifiersGroupElement.remove()
}
if (customModifiers && customModifiers.trim() !== '') {
customModifiers = customModifiers.split('\n')
customModifiers = customModifiers.filter(m => m.trim() !== '')
if (customModifiers && customModifiers.trim() !== "") {
customModifiers = customModifiers.split("\n")
customModifiers = customModifiers.filter((m) => m.trim() !== "")
customModifiers = customModifiers.map(function(m) {
return {
"modifier": m
modifier: m,
}
})
let customGroup = {
'category': 'Custom Modifiers',
'modifiers': customModifiers
category: "Custom Modifiers",
modifiers: customModifiers,
}
customModifiersGroupElement = createModifierGroup(customGroup, true)
createCollapsibles(customModifiersGroupElement)
}
}
},
})
})()

View File

@ -26,39 +26,39 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
after `jasmine.js` and `jasmine_html.js`, but before `boot1.js` or any project
source files or spec files are loaded.
*/
(function() {
const jasmineRequire = window.jasmineRequire || require('./jasmine.js');
;(function() {
const jasmineRequire = window.jasmineRequire || require("./jasmine.js")
/**
* ## Require &amp; Instantiate
*
* Require Jasmine's core files. Specifically, this requires and attaches all of Jasmine's code to the `jasmine` reference.
*/
const jasmine = jasmineRequire.core(jasmineRequire),
global = jasmine.getGlobal();
global.jasmine = jasmine;
/**
* ## Require &amp; Instantiate
*
* Require Jasmine's core files. Specifically, this requires and attaches all of Jasmine's code to the `jasmine` reference.
*/
const jasmine = jasmineRequire.core(jasmineRequire),
global = jasmine.getGlobal()
global.jasmine = jasmine
/**
* Since this is being run in a browser and the results should populate to an HTML page, require the HTML-specific Jasmine code, injecting the same reference.
*/
jasmineRequire.html(jasmine);
/**
* Since this is being run in a browser and the results should populate to an HTML page, require the HTML-specific Jasmine code, injecting the same reference.
*/
jasmineRequire.html(jasmine)
/**
* Create the Jasmine environment. This is used to run all specs in a project.
*/
const env = jasmine.getEnv();
/**
* Create the Jasmine environment. This is used to run all specs in a project.
*/
const env = jasmine.getEnv()
/**
* ## The Global Interface
*
* Build up the functions that will be exposed as the Jasmine public interface. A project can customize, rename or alias any of these functions as desired, provided the implementation remains unchanged.
*/
const jasmineInterface = jasmineRequire.interface(jasmine, env);
/**
* ## The Global Interface
*
* Build up the functions that will be exposed as the Jasmine public interface. A project can customize, rename or alias any of these functions as desired, provided the implementation remains unchanged.
*/
const jasmineInterface = jasmineRequire.interface(jasmine, env)
/**
* Add all of the Jasmine global/public interface to the global scope, so a project can use the public interface directly. For example, calling `describe` in specs instead of `jasmine.getEnv().describe`.
*/
for (const property in jasmineInterface) {
global[property] = jasmineInterface[property];
}
})();
/**
* Add all of the Jasmine global/public interface to the global scope, so a project can use the public interface directly. For example, calling `describe` in specs instead of `jasmine.getEnv().describe`.
*/
for (const property in jasmineInterface) {
global[property] = jasmineInterface[property]
}
})()

View File

@ -33,100 +33,98 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
after `boot0.js` is loaded and before this file is loaded.
*/
(function() {
const env = jasmine.getEnv();
;(function() {
const env = jasmine.getEnv()
/**
* ## Runner Parameters
*
* More browser specific code - wrap the query string in an object and to allow for getting/setting parameters from the runner user interface.
*/
/**
* ## Runner Parameters
*
* More browser specific code - wrap the query string in an object and to allow for getting/setting parameters from the runner user interface.
*/
const queryString = new jasmine.QueryString({
getWindowLocation: function() {
return window.location;
const queryString = new jasmine.QueryString({
getWindowLocation: function() {
return window.location
},
})
const filterSpecs = !!queryString.getParam("spec")
const config = {
stopOnSpecFailure: queryString.getParam("stopOnSpecFailure"),
stopSpecOnExpectationFailure: queryString.getParam("stopSpecOnExpectationFailure"),
hideDisabled: queryString.getParam("hideDisabled"),
}
});
const filterSpecs = !!queryString.getParam('spec');
const random = queryString.getParam("random")
const config = {
stopOnSpecFailure: queryString.getParam('stopOnSpecFailure'),
stopSpecOnExpectationFailure: queryString.getParam(
'stopSpecOnExpectationFailure'
),
hideDisabled: queryString.getParam('hideDisabled')
};
const random = queryString.getParam('random');
if (random !== undefined && random !== '') {
config.random = random;
}
const seed = queryString.getParam('seed');
if (seed) {
config.seed = seed;
}
/**
* ## Reporters
* The `HtmlReporter` builds all of the HTML UI for the runner page. This reporter paints the dots, stars, and x's for specs, as well as all spec names and all failures (if any).
*/
const htmlReporter = new jasmine.HtmlReporter({
env: env,
navigateWithNewParam: function(key, value) {
return queryString.navigateWithNewParam(key, value);
},
addToExistingQueryString: function(key, value) {
return queryString.fullStringWithNewParam(key, value);
},
getContainer: function() {
return document.body;
},
createElement: function() {
return document.createElement.apply(document, arguments);
},
createTextNode: function() {
return document.createTextNode.apply(document, arguments);
},
timer: new jasmine.Timer(),
filterSpecs: filterSpecs
});
/**
* The `jsApiReporter` also receives spec results, and is used by any environment that needs to extract the results from JavaScript.
*/
env.addReporter(jsApiReporter);
env.addReporter(htmlReporter);
/**
* Filter which specs will be run by matching the start of the full name against the `spec` query param.
*/
const specFilter = new jasmine.HtmlSpecFilter({
filterString: function() {
return queryString.getParam('spec');
if (random !== undefined && random !== "") {
config.random = random
}
});
config.specFilter = function(spec) {
return specFilter.matches(spec.getFullName());
};
env.configure(config);
/**
* ## Execution
*
* Replace the browser window's `onload`, ensure it's called, and then run all of the loaded specs. This includes initializing the `HtmlReporter` instance and then executing the loaded Jasmine environment. All of this will happen after all of the specs are loaded.
*/
const currentWindowOnload = window.onload;
window.onload = function() {
if (currentWindowOnload) {
currentWindowOnload();
const seed = queryString.getParam("seed")
if (seed) {
config.seed = seed
}
htmlReporter.initialize();
env.execute();
};
})();
/**
* ## Reporters
* The `HtmlReporter` builds all of the HTML UI for the runner page. This reporter paints the dots, stars, and x's for specs, as well as all spec names and all failures (if any).
*/
const htmlReporter = new jasmine.HtmlReporter({
env: env,
navigateWithNewParam: function(key, value) {
return queryString.navigateWithNewParam(key, value)
},
addToExistingQueryString: function(key, value) {
return queryString.fullStringWithNewParam(key, value)
},
getContainer: function() {
return document.body
},
createElement: function() {
return document.createElement.apply(document, arguments)
},
createTextNode: function() {
return document.createTextNode.apply(document, arguments)
},
timer: new jasmine.Timer(),
filterSpecs: filterSpecs,
})
/**
* The `jsApiReporter` also receives spec results, and is used by any environment that needs to extract the results from JavaScript.
*/
env.addReporter(jsApiReporter)
env.addReporter(htmlReporter)
/**
* Filter which specs will be run by matching the start of the full name against the `spec` query param.
*/
const specFilter = new jasmine.HtmlSpecFilter({
filterString: function() {
return queryString.getParam("spec")
},
})
config.specFilter = function(spec) {
return specFilter.matches(spec.getFullName())
}
env.configure(config)
/**
* ## Execution
*
* Replace the browser window's `onload`, ensure it's called, and then run all of the loaded specs. This includes initializing the `HtmlReporter` instance and then executing the loaded Jasmine environment. All of this will happen after all of the specs are loaded.
*/
const currentWindowOnload = window.onload
window.onload = function() {
if (currentWindowOnload) {
currentWindowOnload()
}
htmlReporter.initialize()
env.execute()
}
})()

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -2,34 +2,34 @@
const JASMINE_SESSION_ID = `jasmine-${String(Date.now()).slice(8)}`
beforeEach(function () {
beforeEach(function() {
jasmine.DEFAULT_TIMEOUT_INTERVAL = 15 * 60 * 1000 // Test timeout after 15 minutes
jasmine.addMatchers({
toBeOneOf: function () {
toBeOneOf: function() {
return {
compare: function (actual, expected) {
compare: function(actual, expected) {
return {
pass: expected.includes(actual)
pass: expected.includes(actual),
}
}
},
}
}
},
})
})
describe('stable-diffusion-ui', function() {
describe("stable-diffusion-ui", function() {
beforeEach(function() {
expect(typeof SD).toBe('object')
expect(typeof SD.serverState).toBe('object')
expect(typeof SD.serverState.status).toBe('string')
expect(typeof SD).toBe("object")
expect(typeof SD.serverState).toBe("object")
expect(typeof SD.serverState.status).toBe("string")
})
it('should be able to reach the backend', async function() {
it("should be able to reach the backend", async function() {
expect(SD.serverState.status).toBe(SD.ServerStates.unavailable)
SD.sessionId = JASMINE_SESSION_ID
await SD.init()
expect(SD.isServerAvailable()).toBeTrue()
})
it('enfore the current task state', function() {
it("enfore the current task state", function() {
const task = new SD.Task()
expect(task.status).toBe(SD.TaskStatus.init)
expect(task.isPending).toBeTrue()
@ -65,149 +65,161 @@ describe('stable-diffusion-ui', function() {
task._setStatus(SD.TaskStatus.completed)
}).toThrowError()
})
it('should be able to run tasks', async function() {
expect(typeof SD.Task.run).toBe('function')
it("should be able to run tasks", async function() {
expect(typeof SD.Task.run).toBe("function")
const promiseGenerator = (function*(val) {
expect(val).toBe('start')
expect(val).toBe("start")
expect(yield 1 + 1).toBe(4)
expect(yield 2 + 2).toBe(8)
yield asyncDelay(500)
expect(yield 3 + 3).toBe(12)
expect(yield 4 + 4).toBe(16)
return 8 + 8
})('start')
const callback = function({value, done}) {
return {value: 2 * value, done}
})("start")
const callback = function({ value, done }) {
return { value: 2 * value, done }
}
expect(await SD.Task.run(promiseGenerator, {callback})).toBe(32)
expect(await SD.Task.run(promiseGenerator, { callback })).toBe(32)
})
it('should be able to queue tasks', async function() {
expect(typeof SD.Task.enqueue).toBe('function')
it("should be able to queue tasks", async function() {
expect(typeof SD.Task.enqueue).toBe("function")
const promiseGenerator = (function*(val) {
expect(val).toBe('start')
expect(val).toBe("start")
expect(yield 1 + 1).toBe(4)
expect(yield 2 + 2).toBe(8)
yield asyncDelay(500)
expect(yield 3 + 3).toBe(12)
expect(yield 4 + 4).toBe(16)
return 8 + 8
})('start')
const callback = function({value, done}) {
return {value: 2 * value, done}
})("start")
const callback = function({ value, done }) {
return { value: 2 * value, done }
}
const gen = SD.Task.asGenerator({generator: promiseGenerator, callback})
const gen = SD.Task.asGenerator({ generator: promiseGenerator, callback })
expect(await SD.Task.enqueue(gen)).toBe(32)
})
it('should be able to chain handlers', async function() {
expect(typeof SD.Task.enqueue).toBe('function')
it("should be able to chain handlers", async function() {
expect(typeof SD.Task.enqueue).toBe("function")
const promiseGenerator = (function*(val) {
expect(val).toBe('start')
expect(yield {test: '1'}).toEqual({test: '1', foo: 'bar'})
expect(val).toBe("start")
expect(yield { test: "1" }).toEqual({ test: "1", foo: "bar" })
expect(yield 2 + 2).toEqual(8)
yield asyncDelay(500)
expect(yield 3 + 3).toEqual(12)
expect(yield {test: 4}).toEqual({test: 8, foo: 'bar'})
return {test: 8}
})('start')
const gen1 = SD.Task.asGenerator({generator: promiseGenerator, callback: function({value, done}) {
if (typeof value === "object") {
value['foo'] = 'bar'
}
return {value, done}
}})
const gen2 = SD.Task.asGenerator({generator: gen1, callback: function({value, done}) {
if (typeof value === 'number') {
value = 2 * value
}
if (typeof value === 'object' && typeof value.test === 'number') {
value.test = 2 * value.test
}
return {value, done}
}})
expect(await SD.Task.enqueue(gen2)).toEqual({test:32, foo: 'bar'})
expect(yield { test: 4 }).toEqual({ test: 8, foo: "bar" })
return { test: 8 }
})("start")
const gen1 = SD.Task.asGenerator({
generator: promiseGenerator,
callback: function({ value, done }) {
if (typeof value === "object") {
value["foo"] = "bar"
}
return { value, done }
},
})
const gen2 = SD.Task.asGenerator({
generator: gen1,
callback: function({ value, done }) {
if (typeof value === "number") {
value = 2 * value
}
if (typeof value === "object" && typeof value.test === "number") {
value.test = 2 * value.test
}
return { value, done }
},
})
expect(await SD.Task.enqueue(gen2)).toEqual({ test: 32, foo: "bar" })
})
describe('ServiceContainer', function() {
it('should be able to register providers', function() {
describe("ServiceContainer", function() {
it("should be able to register providers", function() {
const cont = new ServiceContainer(
function foo() {
this.bar = ''
this.bar = ""
},
function bar() {
return () => 0
},
{ name: 'zero', definition: 0 },
{ name: 'ctx', definition: () => Object.create(null), singleton: true },
{ name: 'test',
{ name: "zero", definition: 0 },
{ name: "ctx", definition: () => Object.create(null), singleton: true },
{
name: "test",
definition: (ctx, missing, one, foo) => {
expect(ctx).toEqual({ran: true})
expect(ctx).toEqual({ ran: true })
expect(one).toBe(1)
expect(typeof foo).toBe('object')
expect(typeof foo).toBe("object")
expect(foo.bar).toBeDefined()
expect(typeof missing).toBe('undefined')
return {foo: 'bar'}
}, dependencies: ['ctx', 'missing', 'one', 'foo']
expect(typeof missing).toBe("undefined")
return { foo: "bar" }
},
dependencies: ["ctx", "missing", "one", "foo"],
}
)
const fooObj = cont.get('foo')
expect(typeof fooObj).toBe('object')
const fooObj = cont.get("foo")
expect(typeof fooObj).toBe("object")
fooObj.ran = true
const ctx = cont.get('ctx')
const ctx = cont.get("ctx")
expect(ctx).toEqual({})
ctx.ran = true
const bar = cont.get('bar')
expect(typeof bar).toBe('function')
const bar = cont.get("bar")
expect(typeof bar).toBe("function")
expect(bar()).toBe(0)
cont.register({name: 'one', definition: 1})
const test = cont.get('test')
expect(typeof test).toBe('object')
expect(test.foo).toBe('bar')
cont.register({ name: "one", definition: 1 })
const test = cont.get("test")
expect(typeof test).toBe("object")
expect(test.foo).toBe("bar")
})
})
it('should be able to stream data in chunks', async function() {
it("should be able to stream data in chunks", async function() {
expect(SD.isServerAvailable()).toBeTrue()
const nbr_steps = 15
let res = await fetch('/render', {
method: 'POST',
let res = await fetch("/render", {
method: "POST",
headers: {
'Content-Type': 'application/json'
"Content-Type": "application/json",
},
body: JSON.stringify({
"prompt": "a photograph of an astronaut riding a horse",
"negative_prompt": "",
"width": 128,
"height": 128,
"seed": Math.floor(Math.random() * 10000000),
prompt: "a photograph of an astronaut riding a horse",
negative_prompt: "",
width: 128,
height: 128,
seed: Math.floor(Math.random() * 10000000),
"sampler": "plms",
"use_stable_diffusion_model": "sd-v1-4",
"num_inference_steps": nbr_steps,
"guidance_scale": 7.5,
sampler: "plms",
use_stable_diffusion_model: "sd-v1-4",
num_inference_steps: nbr_steps,
guidance_scale: 7.5,
"numOutputsParallel": 1,
"stream_image_progress": true,
"show_only_filtered_image": true,
"output_format": "jpeg",
numOutputsParallel: 1,
stream_image_progress: true,
show_only_filtered_image: true,
output_format: "jpeg",
"session_id": JASMINE_SESSION_ID,
session_id: JASMINE_SESSION_ID,
}),
})
expect(res.ok).toBeTruthy()
const renderRequest = await res.json()
expect(typeof renderRequest.stream).toBe('string')
expect(typeof renderRequest.stream).toBe("string")
expect(renderRequest.task).toBeDefined()
// Wait for server status to update.
await SD.waitUntil(() => {
console.log('Waiting for %s to be received...', renderRequest.task)
return (!SD.serverState.tasks || SD.serverState.tasks[String(renderRequest.task)])
}, 250, 10 * 60 * 1000)
await SD.waitUntil(
() => {
console.log("Waiting for %s to be received...", renderRequest.task)
return !SD.serverState.tasks || SD.serverState.tasks[String(renderRequest.task)]
},
250,
10 * 60 * 1000
)
// Wait for task to start on server.
await SD.waitUntil(() => {
console.log('Waiting for %s to start...', renderRequest.task)
return !SD.serverState.tasks || SD.serverState.tasks[String(renderRequest.task)] !== 'pending'
console.log("Waiting for %s to start...", renderRequest.task)
return !SD.serverState.tasks || SD.serverState.tasks[String(renderRequest.task)] !== "pending"
}, 250)
const reader = new SD.ChunkedStreamReader(renderRequest.stream)
@ -217,24 +229,24 @@ describe('stable-diffusion-ui', function() {
if (!value || value.length <= 0) {
return
}
return reader.readStreamAsJSON(value.join(''))
return reader.readStreamAsJSON(value.join(""))
}
reader.onNext = function({done, value}) {
reader.onNext = function({ done, value }) {
console.log(value)
if (typeof value === 'object' && 'status' in value) {
if (typeof value === "object" && "status" in value) {
done = true
}
return {done, value}
return { done, value }
}
let lastUpdate = undefined
let stepCount = 0
let complete = false
//for await (const stepUpdate of reader) {
for await (const stepUpdate of reader.open()) {
console.log('ChunkedStreamReader received ', stepUpdate)
console.log("ChunkedStreamReader received ", stepUpdate)
lastUpdate = stepUpdate
if (complete) {
expect(stepUpdate.status).toBe('succeeded')
expect(stepUpdate.status).toBe("succeeded")
expect(stepUpdate.output).toHaveSize(1)
} else {
expect(stepUpdate.total_steps).toBe(nbr_steps)
@ -246,70 +258,76 @@ describe('stable-diffusion-ui', function() {
}
}
}
for(let i=1; i <= 5; ++i) {
for (let i = 1; i <= 5; ++i) {
res = await fetch(renderRequest.stream)
expect(res.ok).toBeTruthy()
const cachedResponse = await res.json()
console.log('Cache test %s received %o', i, cachedResponse)
console.log("Cache test %s received %o", i, cachedResponse)
expect(lastUpdate).toEqual(cachedResponse)
}
})
describe('should be able to make renders', function() {
describe("should be able to make renders", function() {
beforeEach(function() {
expect(SD.isServerAvailable()).toBeTrue()
})
it('basic inline request', async function() {
it("basic inline request", async function() {
let stepCount = 0
let complete = false
const result = await SD.render({
"prompt": "a photograph of an astronaut riding a horse",
"width": 128,
"height": 128,
"num_inference_steps": 10,
"show_only_filtered_image": false,
//"use_face_correction": 'GFPGANv1.3',
"use_upscale": "RealESRGAN_x4plus",
"session_id": JASMINE_SESSION_ID,
}, function(event) {
console.log(this, event)
if ('update' in event) {
const stepUpdate = event.update
if (complete || (stepUpdate.status && stepUpdate.step === stepUpdate.total_steps)) {
expect(stepUpdate.status).toBe('succeeded')
expect(stepUpdate.output).toHaveSize(2)
} else {
expect(stepUpdate.step).toBe(stepCount)
if (stepUpdate.step === stepUpdate.total_steps) {
complete = true
const result = await SD.render(
{
prompt: "a photograph of an astronaut riding a horse",
width: 128,
height: 128,
num_inference_steps: 10,
show_only_filtered_image: false,
//"use_face_correction": 'GFPGANv1.3',
use_upscale: "RealESRGAN_x4plus",
session_id: JASMINE_SESSION_ID,
},
function(event) {
console.log(this, event)
if ("update" in event) {
const stepUpdate = event.update
if (complete || (stepUpdate.status && stepUpdate.step === stepUpdate.total_steps)) {
expect(stepUpdate.status).toBe("succeeded")
expect(stepUpdate.output).toHaveSize(2)
} else {
stepCount++
expect(stepUpdate.step).toBe(stepCount)
if (stepUpdate.step === stepUpdate.total_steps) {
complete = true
} else {
stepCount++
}
}
}
}
})
)
console.log(result)
expect(result.status).toBe('succeeded')
expect(result.status).toBe("succeeded")
expect(result.output).toHaveSize(2)
})
it('post and reader request', async function() {
it("post and reader request", async function() {
const renderTask = new SD.RenderTask({
"prompt": "a photograph of an astronaut riding a horse",
"width": 128,
"height": 128,
"seed": SD.MAX_SEED_VALUE,
"num_inference_steps": 10,
"session_id": JASMINE_SESSION_ID,
prompt: "a photograph of an astronaut riding a horse",
width: 128,
height: 128,
seed: SD.MAX_SEED_VALUE,
num_inference_steps: 10,
session_id: JASMINE_SESSION_ID,
})
expect(renderTask.status).toBe(SD.TaskStatus.init)
const timeout = -1
const renderRequest = await renderTask.post(timeout)
expect(typeof renderRequest.stream).toBe('string')
expect(typeof renderRequest.stream).toBe("string")
expect(renderTask.status).toBe(SD.TaskStatus.waiting)
expect(renderTask.streamUrl).toBe(renderRequest.stream)
await renderTask.waitUntil({state: SD.TaskStatus.processing, callback: () => console.log('Waiting for render task to start...') })
await renderTask.waitUntil({
state: SD.TaskStatus.processing,
callback: () => console.log("Waiting for render task to start..."),
})
expect(renderTask.status).toBe(SD.TaskStatus.processing)
let stepCount = 0
@ -318,7 +336,7 @@ describe('stable-diffusion-ui', function() {
for await (const stepUpdate of renderTask.reader.open()) {
console.log(stepUpdate)
if (complete || (stepUpdate.status && stepUpdate.step === stepUpdate.total_steps)) {
expect(stepUpdate.status).toBe('succeeded')
expect(stepUpdate.status).toBe("succeeded")
expect(stepUpdate.output).toHaveSize(1)
} else {
expect(stepUpdate.step).toBe(stepCount)
@ -330,28 +348,28 @@ describe('stable-diffusion-ui', function() {
}
}
expect(renderTask.status).toBe(SD.TaskStatus.completed)
expect(renderTask.result.status).toBe('succeeded')
expect(renderTask.result.status).toBe("succeeded")
expect(renderTask.result.output).toHaveSize(1)
})
it('queued request', async function() {
it("queued request", async function() {
let stepCount = 0
let complete = false
const renderTask = new SD.RenderTask({
"prompt": "a photograph of an astronaut riding a horse",
"width": 128,
"height": 128,
"num_inference_steps": 10,
"show_only_filtered_image": false,
prompt: "a photograph of an astronaut riding a horse",
width: 128,
height: 128,
num_inference_steps: 10,
show_only_filtered_image: false,
//"use_face_correction": 'GFPGANv1.3',
"use_upscale": "RealESRGAN_x4plus",
"session_id": JASMINE_SESSION_ID,
use_upscale: "RealESRGAN_x4plus",
session_id: JASMINE_SESSION_ID,
})
await renderTask.enqueue(function(event) {
console.log(this, event)
if ('update' in event) {
if ("update" in event) {
const stepUpdate = event.update
if (complete || (stepUpdate.status && stepUpdate.step === stepUpdate.total_steps)) {
expect(stepUpdate.status).toBe('succeeded')
expect(stepUpdate.status).toBe("succeeded")
expect(stepUpdate.output).toHaveSize(2)
} else {
expect(stepUpdate.step).toBe(stepCount)
@ -364,12 +382,12 @@ describe('stable-diffusion-ui', function() {
}
})
console.log(renderTask.result)
expect(renderTask.result.status).toBe('succeeded')
expect(renderTask.result.status).toBe("succeeded")
expect(renderTask.result.output).toHaveSize(2)
})
})
describe('# Special cases', function() {
it('should throw an exception on set for invalid sessionId', function() {
describe("# Special cases", function() {
it("should throw an exception on set for invalid sessionId", function() {
expect(function() {
SD.sessionId = undefined
}).toThrowError("Can't set sessionId to undefined.")
@ -386,16 +404,17 @@ if (!PLUGINS.SELFTEST) {
PLUGINS.SELFTEST = {}
}
loadUIPlugins().then(function() {
console.log('loadCompleted', loadEvent)
describe('@Plugins', function() {
it('exposes hooks to overide', function() {
expect(typeof PLUGINS.IMAGE_INFO_BUTTONS).toBe('object')
expect(typeof PLUGINS.TASK_CREATE).toBe('object')
console.log("loadCompleted", loadEvent)
describe("@Plugins", function() {
it("exposes hooks to overide", function() {
expect(typeof PLUGINS.IMAGE_INFO_BUTTONS).toBe("object")
expect(typeof PLUGINS.TASK_CREATE).toBe("object")
})
describe('supports selftests', function() { // Hook to allow plugins to define tests.
describe("supports selftests", function() {
// Hook to allow plugins to define tests.
const pluginsTests = Object.keys(PLUGINS.SELFTEST).filter((key) => PLUGINS.SELFTEST.hasOwnProperty(key))
if (!pluginsTests || pluginsTests.length <= 0) {
it('but nothing loaded...', function() {
it("but nothing loaded...", function() {
expect(true).toBeTruthy()
})
return

View File

@ -1,4 +1,4 @@
(function() {
;(function() {
"use strict"
///////////////////// Function section
@ -18,146 +18,133 @@
return y
}
function getCurrentTime() {
const now = new Date();
let hours = now.getHours();
let minutes = now.getMinutes();
let seconds = now.getSeconds();
const now = new Date()
let hours = now.getHours()
let minutes = now.getMinutes()
let seconds = now.getSeconds()
hours = hours < 10 ? `0${hours}` : hours;
minutes = minutes < 10 ? `0${minutes}` : minutes;
seconds = seconds < 10 ? `0${seconds}` : seconds;
hours = hours < 10 ? `0${hours}` : hours
minutes = minutes < 10 ? `0${minutes}` : minutes
seconds = seconds < 10 ? `0${seconds}` : seconds
return `${hours}:${minutes}:${seconds}`;
return `${hours}:${minutes}:${seconds}`
}
function addLogMessage(message) {
const logContainer = document.getElementById('merge-log');
logContainer.innerHTML += `<i>${getCurrentTime()}</i> ${message}<br>`;
const logContainer = document.getElementById("merge-log")
logContainer.innerHTML += `<i>${getCurrentTime()}</i> ${message}<br>`
// Scroll to the bottom of the log
logContainer.scrollTop = logContainer.scrollHeight;
logContainer.scrollTop = logContainer.scrollHeight
document.querySelector('#merge-log-container').style.display = 'block'
}
document.querySelector("#merge-log-container").style.display = "block"
}
function addLogSeparator() {
const logContainer = document.getElementById('merge-log');
logContainer.innerHTML += '<hr>'
const logContainer = document.getElementById("merge-log")
logContainer.innerHTML += "<hr>"
logContainer.scrollTop = logContainer.scrollHeight;
logContainer.scrollTop = logContainer.scrollHeight
}
function drawDiagram(fn) {
const SIZE = 300
const canvas = document.getElementById('merge-canvas');
const canvas = document.getElementById("merge-canvas")
canvas.height = canvas.width = SIZE
const ctx = canvas.getContext('2d');
const ctx = canvas.getContext("2d")
// Draw coordinate system
ctx.scale(1, -1);
ctx.translate(0, -canvas.height);
ctx.lineWidth = 1;
ctx.beginPath();
ctx.scale(1, -1)
ctx.translate(0, -canvas.height)
ctx.lineWidth = 1
ctx.beginPath()
ctx.strokeStyle = 'white'
ctx.moveTo(0,0); ctx.lineTo(0,SIZE); ctx.lineTo(SIZE,SIZE); ctx.lineTo(SIZE,0); ctx.lineTo(0,0); ctx.lineTo(SIZE,SIZE);
ctx.strokeStyle = "white"
ctx.moveTo(0, 0)
ctx.lineTo(0, SIZE)
ctx.lineTo(SIZE, SIZE)
ctx.lineTo(SIZE, 0)
ctx.lineTo(0, 0)
ctx.lineTo(SIZE, SIZE)
ctx.stroke()
ctx.beginPath()
ctx.setLineDash([1,2])
ctx.setLineDash([1, 2])
const n = SIZE / 10
for (let i=n; i<SIZE; i+=n) {
ctx.moveTo(0,i)
ctx.lineTo(SIZE,i)
ctx.moveTo(i,0)
ctx.lineTo(i,SIZE)
for (let i = n; i < SIZE; i += n) {
ctx.moveTo(0, i)
ctx.lineTo(SIZE, i)
ctx.moveTo(i, 0)
ctx.lineTo(i, SIZE)
}
ctx.stroke()
ctx.beginPath()
ctx.setLineDash([])
ctx.beginPath();
ctx.strokeStyle = 'black'
ctx.lineWidth = 3;
ctx.beginPath()
ctx.strokeStyle = "black"
ctx.lineWidth = 3
// Plot function
const numSamples = 20;
const numSamples = 20
for (let i = 0; i <= numSamples; i++) {
const x = i / numSamples;
const y = fn(x);
const canvasX = x * SIZE;
const canvasY = y * SIZE;
const x = i / numSamples
const y = fn(x)
const canvasX = x * SIZE
const canvasY = y * SIZE
if (i === 0) {
ctx.moveTo(canvasX, canvasY);
ctx.moveTo(canvasX, canvasY)
} else {
ctx.lineTo(canvasX, canvasY);
ctx.lineTo(canvasX, canvasY)
}
}
ctx.stroke()
// Plot alpha values (yellow boxes)
let start = parseFloat( document.querySelector('#merge-start').value )
let step = parseFloat( document.querySelector('#merge-step').value )
let iterations = document.querySelector('#merge-count').value>>0
let start = parseFloat(document.querySelector("#merge-start").value)
let step = parseFloat(document.querySelector("#merge-step").value)
let iterations = document.querySelector("#merge-count").value >> 0
ctx.beginPath()
ctx.fillStyle = "yellow"
for (let i=0; i< iterations; i++) {
const alpha = ( start + i * step ) / 100
const x = alpha*SIZE
for (let i = 0; i < iterations; i++) {
const alpha = (start + i * step) / 100
const x = alpha * SIZE
const y = fn(alpha) * SIZE
if (x <= SIZE) {
ctx.rect(x-3,y-3,6,6)
ctx.rect(x - 3, y - 3, 6, 6)
ctx.fill()
} else {
ctx.strokeStyle = 'red'
ctx.moveTo(0,0); ctx.lineTo(0,SIZE); ctx.lineTo(SIZE,SIZE); ctx.lineTo(SIZE,0); ctx.lineTo(0,0); ctx.lineTo(SIZE,SIZE);
ctx.strokeStyle = "red"
ctx.moveTo(0, 0)
ctx.lineTo(0, SIZE)
ctx.lineTo(SIZE, SIZE)
ctx.lineTo(SIZE, 0)
ctx.lineTo(0, 0)
ctx.lineTo(SIZE, SIZE)
ctx.stroke()
addLogMessage('<i>Warning: maximum ratio is &#8805; 100%</i>')
addLogMessage("<i>Warning: maximum ratio is &#8805; 100%</i>")
}
}
}
function updateChart() {
let fn = (x) => x
switch (document.querySelector('#merge-interpolation').value) {
case 'SmoothStep':
switch (document.querySelector("#merge-interpolation").value) {
case "SmoothStep":
fn = smoothstep
break
case 'SmootherStep':
case "SmootherStep":
fn = smootherstep
break
case 'SmoothestStep':
case "SmoothestStep":
fn = smootheststep
break
}
drawDiagram(fn)
}
/////////////////////// Tab implementation
document.querySelector('.tab-container')?.insertAdjacentHTML('beforeend', `
<span id="tab-merge" class="tab">
<span><i class="fa fa-code-merge icon"></i> Merge models</span>
</span>
`)
document.querySelector('#tab-content-wrapper')?.insertAdjacentHTML('beforeend', `
<div id="tab-content-merge" class="tab-content">
<div id="merge" class="tab-content-inner">
Loading..
</div>
</div>
`)
const tabMerge = document.querySelector('#tab-merge')
if (tabMerge) {
linkTabContents(tabMerge)
}
const merge = document.querySelector('#merge')
if (!merge) {
// merge tab not found, dont exec plugin code.
return
}
document.querySelector('body').insertAdjacentHTML('beforeend', `
<style>
createTab({
id: "merge",
icon: "fa-code-merge",
label: "Merge models",
css: `
#tab-content-merge .tab-content-inner {
max-width: 100%;
padding: 10pt;
@ -233,226 +220,235 @@
}
.merge-container #merge-warning {
color: rgb(153, 153, 153);
}
</style>
`)
merge.innerHTML = `
<div class="merge-container panel-box">
<div class="merge-input">
<p><label for="#mergeModelA">Select Model A:</label></p>
<input id="mergeModelA" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
<p><label for="#mergeModelB">Select Model B:</label></p>
<input id="mergeModelB" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
<br/><br/>
<p id="merge-warning"><small><b>Important:</b> Please merge models of similar type.<br/>For e.g. <code>SD 1.4</code> models with only <code>SD 1.4/1.5</code> models,<br/><code>SD 2.0</code> with <code>SD 2.0</code>-type, and <code>SD 2.1</code> with <code>SD 2.1</code>-type models.</small></p>
<br/>
<table>
<tr>
<td><label for="#merge-filename">Output file name:</label></td>
<td><input id="merge-filename" size=24> <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Base name of the output file.<br>Mix ratio and file suffix will be appended to this.</span></i></td>
</tr>
<tr>
<td><label for="#merge-fp">Output precision:</label></td>
<td><select id="merge-fp">
<option value="fp16">fp16 (smaller file size)</option>
<option value="fp32">fp32 (larger file size)</option>
</select>
<i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Image generation uses fp16, so it's a good choice.<br>Use fp32 if you want to use the result models for more mixes</span></i>
</td>
</tr>
<tr>
<td><label for="#merge-format">Output file format:</label></td>
<td><select id="merge-format">
<option value="safetensors">Safetensors (recommended)</option>
<option value="ckpt">CKPT/Pickle (legacy format)</option>
</select>
</td>
</tr>
</table>
<br/>
<div id="merge-log-container">
<p><label for="#merge-log">Log messages:</label></p>
<div id="merge-log"></div>
</div>
</div>
<div class="merge-config">
<div class="tab-container">
<span id="tab-merge-opts-single" class="tab active">
<span>Make a single file</small></span>
</span>
<span id="tab-merge-opts-batch" class="tab">
<span>Make multiple variations</small></span>
</span>
</div>
<div>
<div id="tab-content-merge-opts-single" class="tab-content active">
<div class="tab-content-inner">
<small>Saves a single merged model file, at the specified merge ratio.</small><br/><br/>
<label for="#single-merge-ratio-slider">Merge ratio:</label>
<input id="single-merge-ratio-slider" name="single-merge-ratio-slider" class="editor-slider" value="50" type="range" min="1" max="1000">
<input id="single-merge-ratio" size=2 value="5">%
<i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Model A's contribution to the mix. The rest will be from Model B.</span></i>
}`,
content: `
<div class="merge-container panel-box">
<div class="merge-input">
<p><label for="#mergeModelA">Select Model A:</label></p>
<input id="mergeModelA" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
<p><label for="#mergeModelB">Select Model B:</label></p>
<input id="mergeModelB" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
<br/><br/>
<p id="merge-warning"><small><b>Important:</b> Please merge models of similar type.<br/>For e.g. <code>SD 1.4</code> models with only <code>SD 1.4/1.5</code> models,<br/><code>SD 2.0</code> with <code>SD 2.0</code>-type, and <code>SD 2.1</code> with <code>SD 2.1</code>-type models.</small></p>
<br/>
<table>
<tr>
<td><label for="#merge-filename">Output file name:</label></td>
<td><input id="merge-filename" size=24> <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Base name of the output file.<br>Mix ratio and file suffix will be appended to this.</span></i></td>
</tr>
<tr>
<td><label for="#merge-fp">Output precision:</label></td>
<td><select id="merge-fp">
<option value="fp16">fp16 (smaller file size)</option>
<option value="fp32">fp32 (larger file size)</option>
</select>
<i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Image generation uses fp16, so it's a good choice.<br>Use fp32 if you want to use the result models for more mixes</span></i>
</td>
</tr>
<tr>
<td><label for="#merge-format">Output file format:</label></td>
<td><select id="merge-format">
<option value="safetensors">Safetensors (recommended)</option>
<option value="ckpt">CKPT/Pickle (legacy format)</option>
</select>
</td>
</tr>
</table>
<br/>
<div id="merge-log-container">
<p><label for="#merge-log">Log messages:</label></p>
<div id="merge-log"></div>
</div>
</div>
<div class="merge-config">
<div class="tab-container">
<span id="tab-merge-opts-single" class="tab active">
<span>Make a single file</small></span>
</span>
<span id="tab-merge-opts-batch" class="tab">
<span>Make multiple variations</small></span>
</span>
</div>
<div>
<div id="tab-content-merge-opts-single" class="tab-content active">
<div class="tab-content-inner">
<small>Saves a single merged model file, at the specified merge ratio.</small><br/><br/>
<label for="#single-merge-ratio-slider">Merge ratio:</label>
<input id="single-merge-ratio-slider" name="single-merge-ratio-slider" class="editor-slider" value="50" type="range" min="1" max="1000">
<input id="single-merge-ratio" size=2 value="5">%
<i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Model A's contribution to the mix. The rest will be from Model B.</span></i>
</div>
</div>
</div>
<div id="tab-content-merge-opts-batch" class="tab-content">
<div class="tab-content-inner">
<small>Saves multiple variations of the model, at different merge ratios.<br/>Each variation will be saved as a separate file.</small><br/><br/>
<table>
<tr><td><label for="#merge-count">Number of variations:</label></td>
<td> <input id="merge-count" size=2 value="5"></td>
<td> <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Number of models to create</span></i></td></tr>
<tr><td><label for="#merge-start">Starting merge ratio:</label></td>
<td> <input id="merge-start" size=2 value="5">%</td>
<td> <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Smallest share of model A in the mix</span></i></td></tr>
<tr><td><label for="#merge-step">Increment each step:</label></td>
<td> <input id="merge-step" size=2 value="10">%</td>
<td> <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Share of model A added into the mix per step</span></i></td></tr>
<tr><td><label for="#merge-interpolation">Interpolation model:</label></td>
<td> <select id="merge-interpolation">
<option>Exact</option>
<option>SmoothStep</option>
<option>SmootherStep</option>
<option>SmoothestStep</option>
</select></td>
<td> <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Sigmoid function to be applied to the model share before mixing</span></i></td></tr>
</table>
<br/>
<small>Preview of variation ratios:</small><br/>
<canvas id="merge-canvas" width="400" height="400"></canvas>
<div id="tab-content-merge-opts-batch" class="tab-content">
<div class="tab-content-inner">
<small>Saves multiple variations of the model, at different merge ratios.<br/>Each variation will be saved as a separate file.</small><br/><br/>
<table>
<tr><td><label for="#merge-count">Number of variations:</label></td>
<td> <input id="merge-count" size=2 value="5"></td>
<td> <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Number of models to create</span></i></td></tr>
<tr><td><label for="#merge-start">Starting merge ratio:</label></td>
<td> <input id="merge-start" size=2 value="5">%</td>
<td> <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Smallest share of model A in the mix</span></i></td></tr>
<tr><td><label for="#merge-step">Increment each step:</label></td>
<td> <input id="merge-step" size=2 value="10">%</td>
<td> <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Share of model A added into the mix per step</span></i></td></tr>
<tr><td><label for="#merge-interpolation">Interpolation model:</label></td>
<td> <select id="merge-interpolation">
<option>Exact</option>
<option>SmoothStep</option>
<option>SmootherStep</option>
<option>SmoothestStep</option>
</select></td>
<td> <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Sigmoid function to be applied to the model share before mixing</span></i></td></tr>
</table>
<br/>
<small>Preview of variation ratios:</small><br/>
<canvas id="merge-canvas" width="400" height="400"></canvas>
</div>
</div>
</div>
</div>
</div>
<div class="merge-buttons">
<button id="merge-button" class="primaryButton">Merge models</button>
</div>
</div>`
const tabSettingsSingle = document.querySelector('#tab-merge-opts-single')
const tabSettingsBatch = document.querySelector('#tab-merge-opts-batch')
linkTabContents(tabSettingsSingle)
linkTabContents(tabSettingsBatch)
console.log('Activate')
let mergeModelAField = new ModelDropdown(document.querySelector('#mergeModelA'), 'stable-diffusion')
let mergeModelBField = new ModelDropdown(document.querySelector('#mergeModelB'), 'stable-diffusion')
updateChart()
// slider
const singleMergeRatioField = document.querySelector('#single-merge-ratio')
const singleMergeRatioSlider = document.querySelector('#single-merge-ratio-slider')
function updateSingleMergeRatio() {
singleMergeRatioField.value = singleMergeRatioSlider.value / 10
singleMergeRatioField.dispatchEvent(new Event("change"))
}
function updateSingleMergeRatioSlider() {
if (singleMergeRatioField.value < 0) {
singleMergeRatioField.value = 0
} else if (singleMergeRatioField.value > 100) {
singleMergeRatioField.value = 100
}
singleMergeRatioSlider.value = singleMergeRatioField.value * 10
singleMergeRatioSlider.dispatchEvent(new Event("change"))
}
singleMergeRatioSlider.addEventListener('input', updateSingleMergeRatio)
singleMergeRatioField.addEventListener('input', updateSingleMergeRatioSlider)
updateSingleMergeRatio()
document.querySelector('.merge-config').addEventListener('change', updateChart)
document.querySelector('#merge-button').addEventListener('click', async function(e) {
// Build request template
let model0 = mergeModelAField.value
let model1 = mergeModelBField.value
let request = { model0: model0, model1: model1 }
request['use_fp16'] = document.querySelector('#merge-fp').value == 'fp16'
let iterations = document.querySelector('#merge-count').value>>0
let start = parseFloat( document.querySelector('#merge-start').value )
let step = parseFloat( document.querySelector('#merge-step').value )
if (isTabActive(tabSettingsSingle)) {
start = parseFloat(singleMergeRatioField.value)
step = 0
iterations = 1
addLogMessage(`merge ratio = ${start}%`)
} else {
addLogMessage(`start = ${start}%`)
addLogMessage(`step = ${step}%`)
}
if (start + (iterations-1) * step >= 100) {
addLogMessage('<i>Aborting: maximum ratio is &#8805; 100%</i>')
addLogMessage('Reduce the number of variations or the step size')
addLogSeparator()
document.querySelector('#merge-count').focus()
return
}
if (document.querySelector('#merge-filename').value == "") {
addLogMessage('<i>Aborting: No output file name specified</i>')
addLogSeparator()
document.querySelector('#merge-filename').focus()
return
}
// Disable merge button
e.target.disabled=true
e.target.classList.add('disabled')
let cursor = $("body").css("cursor");
let label = document.querySelector('#merge-button').innerHTML
$("body").css("cursor", "progress");
document.querySelector('#merge-button').innerHTML = 'Merging models ...'
addLogMessage("Merging models")
addLogMessage("Model A: "+model0)
addLogMessage("Model B: "+model1)
// Batch main loop
for (let i=0; i<iterations; i++) {
let alpha = ( start + i * step ) / 100
switch (document.querySelector('#merge-interpolation').value) {
case 'SmoothStep':
alpha = smoothstep(alpha)
break
case 'SmootherStep':
alpha = smootherstep(alpha)
break
case 'SmoothestStep':
alpha = smootheststep(alpha)
break
</div>
</div>
<div class="merge-buttons">
<button id="merge-button" class="primaryButton">Merge models</button>
</div>
</div>`,
onOpen: ({ firstOpen }) => {
if (!firstOpen) {
return
}
addLogMessage(`merging batch job ${i+1}/${iterations}, alpha = ${alpha.toFixed(5)}...`)
request['out_path'] = document.querySelector('#merge-filename').value
request['out_path'] += '-' + alpha.toFixed(5) + '.' + document.querySelector('#merge-format').value
addLogMessage(`&nbsp;&nbsp;filename: ${request['out_path']}`)
const tabSettingsSingle = document.querySelector("#tab-merge-opts-single")
const tabSettingsBatch = document.querySelector("#tab-merge-opts-batch")
linkTabContents(tabSettingsSingle)
linkTabContents(tabSettingsBatch)
request['ratio'] = alpha
let res = await fetch('/model/merge', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(request) })
const data = await res.json();
addLogMessage(JSON.stringify(data))
}
addLogMessage("<b>Done.</b> The models have been saved to your <tt>models/stable-diffusion</tt> folder.")
addLogSeparator()
// Re-enable merge button
$("body").css("cursor", cursor);
document.querySelector('#merge-button').innerHTML = label
e.target.disabled=false
e.target.classList.remove('disabled')
console.log("Activate")
let mergeModelAField = new ModelDropdown(document.querySelector("#mergeModelA"), "stable-diffusion")
let mergeModelBField = new ModelDropdown(document.querySelector("#mergeModelB"), "stable-diffusion")
updateChart()
// Update model list
stableDiffusionModelField.innerHTML = ''
vaeModelField.innerHTML = ''
hypernetworkModelField.innerHTML = ''
await getModels()
// slider
const singleMergeRatioField = document.querySelector("#single-merge-ratio")
const singleMergeRatioSlider = document.querySelector("#single-merge-ratio-slider")
function updateSingleMergeRatio() {
singleMergeRatioField.value = singleMergeRatioSlider.value / 10
singleMergeRatioField.dispatchEvent(new Event("change"))
}
function updateSingleMergeRatioSlider() {
if (singleMergeRatioField.value < 0) {
singleMergeRatioField.value = 0
} else if (singleMergeRatioField.value > 100) {
singleMergeRatioField.value = 100
}
singleMergeRatioSlider.value = singleMergeRatioField.value * 10
singleMergeRatioSlider.dispatchEvent(new Event("change"))
}
singleMergeRatioSlider.addEventListener("input", updateSingleMergeRatio)
singleMergeRatioField.addEventListener("input", updateSingleMergeRatioSlider)
updateSingleMergeRatio()
document.querySelector(".merge-config").addEventListener("change", updateChart)
document.querySelector("#merge-button").addEventListener("click", async function(e) {
// Build request template
let model0 = mergeModelAField.value
let model1 = mergeModelBField.value
let request = { model0: model0, model1: model1 }
request["use_fp16"] = document.querySelector("#merge-fp").value == "fp16"
let iterations = document.querySelector("#merge-count").value >> 0
let start = parseFloat(document.querySelector("#merge-start").value)
let step = parseFloat(document.querySelector("#merge-step").value)
if (isTabActive(tabSettingsSingle)) {
start = parseFloat(singleMergeRatioField.value)
step = 0
iterations = 1
addLogMessage(`merge ratio = ${start}%`)
} else {
addLogMessage(`start = ${start}%`)
addLogMessage(`step = ${step}%`)
}
if (start + (iterations - 1) * step >= 100) {
addLogMessage("<i>Aborting: maximum ratio is &#8805; 100%</i>")
addLogMessage("Reduce the number of variations or the step size")
addLogSeparator()
document.querySelector("#merge-count").focus()
return
}
if (document.querySelector("#merge-filename").value == "") {
addLogMessage("<i>Aborting: No output file name specified</i>")
addLogSeparator()
document.querySelector("#merge-filename").focus()
return
}
// Disable merge button
e.target.disabled = true
e.target.classList.add("disabled")
let cursor = $("body").css("cursor")
let label = document.querySelector("#merge-button").innerHTML
$("body").css("cursor", "progress")
document.querySelector("#merge-button").innerHTML = "Merging models ..."
addLogMessage("Merging models")
addLogMessage("Model A: " + model0)
addLogMessage("Model B: " + model1)
// Batch main loop
for (let i = 0; i < iterations; i++) {
let alpha = (start + i * step) / 100
if (isTabActive(tabSettingsBatch)) {
switch (document.querySelector("#merge-interpolation").value) {
case "SmoothStep":
alpha = smoothstep(alpha)
break
case "SmootherStep":
alpha = smootherstep(alpha)
break
case "SmoothestStep":
alpha = smootheststep(alpha)
break
}
}
addLogMessage(`merging batch job ${i + 1}/${iterations}, alpha = ${alpha.toFixed(5)}...`)
request["out_path"] = document.querySelector("#merge-filename").value
request["out_path"] += "-" + alpha.toFixed(5) + "." + document.querySelector("#merge-format").value
addLogMessage(`&nbsp;&nbsp;filename: ${request["out_path"]}`)
// sdkit documentation: "ratio - the ratio of the second model. 1 means only the second model will be used."
request["ratio"] = 1-alpha
let res = await fetch("/model/merge", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(request),
})
const data = await res.json()
addLogMessage(JSON.stringify(data))
}
addLogMessage(
"<b>Done.</b> The models have been saved to your <tt>models/stable-diffusion</tt> folder."
)
addLogSeparator()
// Re-enable merge button
$("body").css("cursor", cursor)
document.querySelector("#merge-button").innerHTML = label
e.target.disabled = false
e.target.classList.remove("disabled")
// Update model list
stableDiffusionModelField.innerHTML = ""
vaeModelField.innerHTML = ""
hypernetworkModelField.innerHTML = ""
await getModels()
})
},
})
})()

View File

@ -1,52 +1,52 @@
(function () {
;(function() {
"use strict"
var styleSheet = document.createElement("style");
var styleSheet = document.createElement("style")
styleSheet.textContent = `
.modifier-card-tiny.modifier-toggle-inactive {
background: transparent;
border: 2px dashed red;
opacity:0.2;
}
`;
document.head.appendChild(styleSheet);
`
document.head.appendChild(styleSheet)
// observe for changes in tag list
var observer = new MutationObserver(function (mutations) {
// mutations.forEach(function (mutation) {
if (editorModifierTagsList.childNodes.length > 0) {
ModifierToggle()
}
// })
var observer = new MutationObserver(function(mutations) {
// mutations.forEach(function (mutation) {
if (editorModifierTagsList.childNodes.length > 0) {
ModifierToggle()
}
// })
})
observer.observe(editorModifierTagsList, {
childList: true
childList: true,
})
function ModifierToggle() {
let overlays = document.querySelector('#editor-inputs-tags-list').querySelectorAll('.modifier-card-overlay')
overlays.forEach (i => {
let overlays = document.querySelector("#editor-inputs-tags-list").querySelectorAll(".modifier-card-overlay")
overlays.forEach((i) => {
i.oncontextmenu = (e) => {
e.preventDefault()
if (i.parentElement.classList.contains('modifier-toggle-inactive')) {
i.parentElement.classList.remove('modifier-toggle-inactive')
}
else
{
i.parentElement.classList.add('modifier-toggle-inactive')
if (i.parentElement.classList.contains("modifier-toggle-inactive")) {
i.parentElement.classList.remove("modifier-toggle-inactive")
} else {
i.parentElement.classList.add("modifier-toggle-inactive")
}
// refresh activeTags
let modifierName = i.parentElement.getElementsByClassName('modifier-card-label')[0].getElementsByTagName("p")[0].dataset.fullName
activeTags = activeTags.map(obj => {
let modifierName = i.parentElement
.getElementsByClassName("modifier-card-label")[0]
.getElementsByTagName("p")[0].dataset.fullName
activeTags = activeTags.map((obj) => {
if (trimModifiers(obj.name) === trimModifiers(modifierName)) {
return {...obj, inactive: (obj.element.classList.contains('modifier-toggle-inactive'))};
return { ...obj, inactive: obj.element.classList.contains("modifier-toggle-inactive") }
}
return obj;
});
document.dispatchEvent(new Event('refreshImageModifiers'))
return obj
})
document.dispatchEvent(new Event("refreshImageModifiers"))
}
})
}

View File

@ -1,64 +1,53 @@
(function() {
;(function() {
// Register selftests when loaded by jasmine.
if (typeof PLUGINS?.SELFTEST === 'object') {
if (typeof PLUGINS?.SELFTEST === "object") {
PLUGINS.SELFTEST["release-notes"] = function() {
it('should be able to fetch CHANGES.md', async function() {
let releaseNotes = await fetch(`https://raw.githubusercontent.com/cmdr2/stable-diffusion-ui/main/CHANGES.md`)
it("should be able to fetch CHANGES.md", async function() {
let releaseNotes = await fetch(
`https://raw.githubusercontent.com/cmdr2/stable-diffusion-ui/main/CHANGES.md`
)
expect(releaseNotes.status).toBe(200)
})
}
}
document.querySelector('.tab-container')?.insertAdjacentHTML('beforeend', `
<span id="tab-news" class="tab">
<span><i class="fa fa-bolt icon"></i> What's new?</span>
</span>
`)
document.querySelector('#tab-content-wrapper')?.insertAdjacentHTML('beforeend', `
<div id="tab-content-news" class="tab-content">
<div id="news" class="tab-content-inner">
Loading..
</div>
</div>
`)
const tabNews = document.querySelector('#tab-news')
if (tabNews) {
linkTabContents(tabNews)
}
const news = document.querySelector('#news')
if (!news) {
// news tab not found, dont exec plugin code.
return
}
document.querySelector('body').insertAdjacentHTML('beforeend', `
<style>
createTab({
id: "news",
icon: "fa-bolt",
label: "What's new",
css: `
#tab-content-news .tab-content-inner {
max-width: 100%;
text-align: left;
padding: 10pt;
}
</style>
`)
`,
onOpen: async ({ firstOpen }) => {
if (firstOpen) {
const loadMarkedScriptPromise = loadScript("/media/js/marked.min.js")
loadScript('/media/js/marked.min.js').then(async function() {
let appConfig = await fetch('/get/app_config')
if (!appConfig.ok) {
console.error('[release-notes] Failed to get app_config.')
return
}
appConfig = await appConfig.json()
let appConfig = await fetch("/get/app_config")
if (!appConfig.ok) {
console.error("[release-notes] Failed to get app_config.")
return
}
appConfig = await appConfig.json()
const updateBranch = appConfig.update_branch || 'main'
const updateBranch = appConfig.update_branch || "main"
let releaseNotes = await fetch(`https://raw.githubusercontent.com/cmdr2/stable-diffusion-ui/${updateBranch}/CHANGES.md`)
if (!releaseNotes.ok) {
console.error('[release-notes] Failed to get CHANGES.md.')
return
}
releaseNotes = await releaseNotes.text()
news.innerHTML = marked.parse(releaseNotes)
let releaseNotes = await fetch(
`https://raw.githubusercontent.com/cmdr2/stable-diffusion-ui/${updateBranch}/CHANGES.md`
)
if (!releaseNotes.ok) {
console.error("[release-notes] Failed to get CHANGES.md.")
return
}
releaseNotes = await releaseNotes.text()
await loadMarkedScriptPromise
return marked.parse(releaseNotes)
}
},
})
})()
})()

View File

@ -1,6 +1,7 @@
/* SD-UI Selftest Plugin.js
*/
(function() { "use strict"
;(function() {
"use strict"
const ID_PREFIX = "selftest-plugin"
const links = document.getElementById("community-links")
@ -10,16 +11,18 @@
}
// Add link to Jasmine SpecRunner
const pluginLink = document.createElement('li')
const pluginLink = document.createElement("li")
const options = {
'stopSpecOnExpectationFailure': "true",
'stopOnSpecFailure': 'false',
'random': 'false',
'hideDisabled': 'false'
stopSpecOnExpectationFailure: "true",
stopOnSpecFailure: "false",
random: "false",
hideDisabled: "false",
}
const optStr = Object.entries(options).map(([key, val]) => `${key}=${val}`).join('&')
const optStr = Object.entries(options)
.map(([key, val]) => `${key}=${val}`)
.join("&")
pluginLink.innerHTML = `<a id="${ID_PREFIX}-starttest" href="${location.protocol}/plugins/core/SpecRunner.html?${optStr}" target="_blank"><i class="fa-solid fa-vial-circle-check"></i> Start SelfTest</a>`
links.appendChild(pluginLink)
console.log('%s loaded!', ID_PREFIX)
console.log("%s loaded!", ID_PREFIX)
})()