Compare commits

...

245 Commits

Author SHA1 Message Date
5d0e5e96d6 Include wbem in env PATH (to find wmic) 2024-10-17 10:28:53 +05:30
7f9394b621 Refactor force-fp32 to not crash on PCs without cuda 2024-10-12 12:45:48 +05:30
76c8e18fcf [webui] Force full-precision on NVIDIA 16xx graphics cards 2024-10-12 12:23:45 +05:30
f514fe6c11 Different way to use python path in dev shell 2024-10-11 19:46:36 +05:30
c947004ec9 Don't use shell=True in dev console 2024-10-11 19:36:05 +05:30
154b550e0e Handle connection error during ping 2024-10-11 19:18:35 +05:30
739ad3a964 Remove fixes for condabin, since we copy that for linux/mac now 2024-10-11 19:14:07 +05:30
648187d2aa use condabin's conda instead of bin/conda 2024-10-11 19:13:02 +05:30
79cfee0447 debugging 2024-10-11 19:05:37 +05:30
7e00e3c260 typo 2024-10-11 19:04:01 +05:30
4ab9d7aebb Expose the CONDA_BASEPATH env variable 2024-10-11 19:03:10 +05:30
3ff9d9c3bb Attempt 2 at fixing conda basepath 2024-10-11 18:57:49 +05:30
02a2dce049 Use condabin's conda binary on linux and mac. Works around a bug in conda 4.14 (very old version) 2024-10-11 18:48:48 +05:30
72095a6d97 Copy webui_console.py in on_sd_start script 2024-10-11 17:34:23 +05:30
3580fa5e32 update changelog for 3.5 2024-10-11 17:21:08 +05:30
926c55cf69 update changelog for 3.5 2024-10-11 17:19:58 +05:30
8bf8d0b1a1 update changelog for 3.5 2024-10-11 17:18:26 +05:30
b3f714923f changelog 2024-10-11 15:13:42 +05:30
7e208fb682 Remove temp hack for debugging 2024-10-11 13:38:45 +05:30
d0cd340cbd Fix bug with post-install browser open 2024-10-11 13:37:26 +05:30
696b65049f Merge branch 'forge' of github.com:cmdr2/stable-diffusion-ui into forge 2024-10-11 13:34:45 +05:30
cabf4a4f07 Error reporting while starting the post-installation browser; Temp hack 2024-10-11 13:34:23 +05:30
f6e6b1ae5f Copy webui_console.py in sh 2024-10-11 12:58:28 +05:30
ec18bae5e4 Copy webui_console.py in bat 2024-10-11 12:57:58 +05:30
e804247acb WebUI dev console script 2024-10-11 12:55:35 +05:30
07483891b6 Tweak message for 'still installing' 2024-10-11 12:55:24 +05:30
dfa552585e Don't open the browser while webui is still installing. wait until the webui actually starts up. 2024-10-11 12:28:20 +05:30
05cf4be89b Check for context attr 2024-10-11 12:05:29 +05:30
e8ee9275bc Don't parse if no filters are applied 2024-10-11 11:39:13 +05:30
c46f29abb0 Include scheduler in drag-and-drop 2024-10-09 21:18:15 +05:30
8bd5eb5ce4 Remove unnecessary patch for sdkit's model downloader. ED now handles both kinds of model folder names 2024-10-09 18:19:21 +05:30
78dcc7cb03 Bring back the earlier model scanning logic 2024-10-09 18:16:15 +05:30
e32b34e2f4 Show version 3.0.10 for ED 2 and 3.0 users, and 3.5.0 for webui users 2024-10-09 14:33:04 +05:30
16463431dd case 2024-10-09 14:19:28 +05:30
9761b172de Use a monitor thread for restarting the webui process if it has crashed. Sometimes it would crash but the process would be a zombie. An external monitor ensures that it gets killed and restarted 2024-10-09 13:47:06 +05:30
d6adb17746 Fix a bug where the vae wouldn't be unloaded in webui 2024-10-09 13:41:56 +05:30
3327244da2 Scheduler selection in the UI; Store Scheduler and Distilled Guidance in metadata 2024-10-09 11:43:45 +05:30
d283fb0776 Option to select Distilled Guidance Scale; Show warnings for Euler Ancestral sampler with Flux 2024-10-09 11:12:28 +05:30
90bc1456c9 Suggest guidance value for Flux and non-Flux models 2024-10-08 18:54:06 +05:30
84c8284a90 Proxy webui api 2024-10-08 18:35:08 +05:30
c1193377b6 Use vram_usage_level while starting webui 2024-10-07 13:33:25 +05:30
5a5d37ba52 Enable rendering only the CPU 2024-10-07 12:59:19 +05:30
b6ba782c35 Support both WebUI and ED folder names for models 2024-10-07 12:48:27 +05:30
9abc76482c Wait until the webui backend responds with a 200 OK to ping requests 2024-10-07 12:39:07 +05:30
6f4e2017f4 Install Forge automatically by creating a conda environment and cloing the forge repo 2024-10-07 11:28:28 +05:30
6ea7dd36da Fix a bug where the config file wasn't actually read on linux/mac 2024-10-02 15:30:29 +05:30
391e12e20d Require onnxruntime for nsfw checking 2024-10-01 16:41:26 +05:30
4e3a5cb6d9 Automatically restart webui if it stops/crashes 2024-10-01 16:25:56 +05:30
f51ab909ff Reset VAE upon restart 2024-10-01 16:25:24 +05:30
754a5f5e52 Case-insensitive model directories 2024-10-01 13:55:35 +05:30
9a12a8618c First working version of dynamic backends, with Forge and ed_diffusers (v3) and ed_classic (v2). Does not auto-install Forge yet 2024-10-01 10:54:58 +05:30
2eb0c9106a Temporarily disable the auto-selection of the appropriate controlnet model 2024-09-24 17:43:34 +05:30
a0de0b5814 Minor refactoring of SD dir variables 2024-09-24 17:40:00 +05:30
6559c41b2e minor log import change 2024-09-24 17:27:50 +05:30
5fe3acd44b Use 1.4 by default, instead of 1.5 2024-09-09 18:34:32 +05:30
716f30fecb Merge pull request #1810 from easydiffusion/beta
Beta
2024-06-14 09:49:27 +05:30
364902f8a1 Ignore text in the version string when comparing them 2024-06-14 09:48:49 +05:30
a261a2d47d Fix #1779 - add to PATH only if it isn't present, to avoid exploding the PATH variable each time the function is called 2024-06-14 09:43:30 +05:30
dea962dc89 Merge pull request #1808 from easydiffusion/beta
temp hotfix for rocm torch
2024-06-13 14:06:14 +05:30
d062c2149a temp hotfix for rocm torch 2024-06-13 14:05:11 +05:30
7d49dc105e Merge pull request #1806 from easydiffusion/beta
Don't crash if psutils fails to get cpu or memory usage
2024-06-11 18:28:07 +05:30
fcdc3f2dd0 Don't crash if psutils fails to get cpu or memory usage 2024-06-11 18:27:21 +05:30
d17b167a81 Merge pull request #1803 from easydiffusion/beta
Support legacy installations with torch 1.11, as well as an option for people to upgrade to the latest sdkit+diffusers
2024-06-07 10:33:56 +05:30
1fa83eda0e Merge pull request #1800 from siakc/uvicorn-run-programmatically
Enhancement - using uvicorn.run() instead of os.system()
2024-06-06 17:51:06 +05:30
969751a195 Use uvicorn.run since it's clearer to read 2024-06-06 17:50:41 +05:30
1ae8675487 typo 2024-06-06 16:20:04 +05:30
05f0bfebba Upgrade torch if using the newer sdkit versions 2024-06-06 16:18:11 +05:30
91ad53cd94 Enhancement - using uvicorn.run() instead of os.system() 2024-06-06 10:28:01 +03:30
de680dfd09 Print diffusers' version 2024-06-05 18:53:45 +05:30
4edeb14e94 Allow a user to opt-in to the latest sdkit+diffusers version, while keeping existing 2.0.15.x users restricted to diffusers 0.21.4. This avoids a lengthy upgrade for existing users, while allowing users to opt-in to the latest version. More to come. 2024-06-05 18:46:22 +05:30
e64cf9c9eb Merge pull request #1796 from easydiffusion/beta
Another typo
2024-06-01 09:02:08 +05:30
66d0c4726e Another typo 2024-06-01 09:01:35 +05:30
c923b44f56 Merge pull request #1795 from easydiffusion/beta
typo
2024-05-31 19:30:50 +05:30
b9c343195b typo 2024-05-31 19:30:29 +05:30
4427e8d3dd Merge pull request #1794 from easydiffusion/beta
Generalize the hotfix for missing sdkit dependencies. This is still a…
2024-05-31 19:28:32 +05:30
87c8fe2758 Generalize the hotfix for missing sdkit dependencies. This is still a temporary hotfix, but will ensure that missing packages are installed, not assume that having picklescan means everything's good 2024-05-31 19:27:30 +05:30
70acde7809 Merge pull request #1792 from easydiffusion/beta
Hotfix - sdkit's dependencies aren't getting pulled for some reason
2024-05-31 10:57:43 +05:30
c4b938f132 Hotfix - sdkit's dependencies aren't getting pulled for some reason 2024-05-31 10:56:43 +05:30
d6fdb8d5a9 Merge pull request #1788 from easydiffusion/beta
Hotfix for older accelerate version in the Windows installer
2024-05-30 17:51:32 +05:30
54ac1f7169 Hotfix for older accelerate version in the Windows installer 2024-05-30 17:50:36 +05:30
deebfc6850 Merge pull request #1787 from easydiffusion/beta
Controlnet Strength and SDXL Controlnet support for img2img and inpainting
2024-05-30 13:22:12 +05:30
21644adbe1 sdkit 2.0.15.6 - typo that prevented 0 controlnet strength 2024-05-29 10:01:04 +05:30
fe3c648a24 sdkit 2.0.15.5 - minor null check 2024-05-28 19:46:59 +05:30
05f3523364 Set the controlnet alpha correctly from older exports; Fix a bug with null lora model in exports 2024-05-28 19:16:48 +05:30
4d9b023378 changelog 2024-05-28 18:48:23 +05:30
44789bf16b sdkit 2.0.15.4 - Controlnet strength slider 2024-05-28 18:45:08 +05:30
ad649a8050 sdkit 2.0.15.3 - disable watermarking on SDXL ControlNets to avoid visual artifacts 2024-05-28 09:00:57 +05:30
723304204e diffusers 0.21.4 2024-05-27 15:26:09 +05:30
ddf54d589e v3.0.8 - use sdkit 2.0.15.1, to enable SDXL Controlnets for img2img and inpainting, using diffusers 0.21.4 2024-05-27 15:18:14 +05:30
a5c9c44e53 Merge pull request #1784 from easydiffusion/beta
Another hotfix for setuptools version on Windows and Linux/mac
2024-05-27 10:58:06 +05:30
4d28c78fcc Another hotfix for setuptools version on Windows and Linux/mac 2024-05-27 10:57:20 +05:30
7dc01370ea Merge pull request #1783 from easydiffusion/beta
Pin setuptools to 0.59
2024-05-27 10:45:49 +05:30
21ff109632 Pin setuptools to 0.59 2024-05-27 10:45:19 +05:30
9b0a654d32 Merge pull request #1782 from easydiffusion/beta
Hotfix to pin setuptools to 0.69 - for #1781
2024-05-27 10:38:41 +05:30
fb749dbe24 Potential hotfix for #1781 - pin setuptools to a specific version, until clip is upgraded 2024-05-27 10:37:09 +05:30
17ef1e04f7 Roll back sdkit 2.0.18 (again) 2024-03-19 20:06:49 +05:30
a5b9eefcf9 v3.0.8 - update diffusers to v0.26.3 2024-03-19 19:17:10 +05:30
e5519cda37 sdkit 2.0.18 (diffusers 0.26.3) 2024-03-19 19:12:00 +05:30
d1bd9e2a16 Prev version 2024-03-13 19:53:14 +05:30
5924d01789 Temporarily revert to sdkit 2.0.15 2024-03-13 19:05:55 +05:30
47432fe54e diffusers 0.26.3 2024-03-13 19:01:09 +05:30
8660a79ccd v3.0.8 - update diffusers to v0.26.3 2024-03-13 18:44:17 +05:30
dfb26ed781 Merge pull request #1702 from easydiffusion/beta
Beta
2023-12-12 18:10:46 +05:30
547febafba Autosave the VAE tiling setting 2023-12-12 18:10:04 +05:30
85eaa305cc Hotfix for #1701 - run disable VAE tiling only on pipelines that support it 2023-12-12 18:07:56 +05:30
25272ce083 Kofi only 2023-12-12 12:48:13 +05:30
212fa77b47 Merge pull request #1700 from easydiffusion/beta
Beta
2023-12-12 12:47:24 +05:30
e77629c525 Version and changelog 2023-12-11 22:31:12 +05:30
097780be26 Setting to enable/disable VAE tiling 2023-12-11 22:28:19 +05:30
6489cd785d Merge pull request #1648 from michaelachrisco/main
Fix Sampler learn more link
2023-11-05 19:16:14 +05:30
a4e651e27e Click to learn more about samplers should go to wiki page 2023-10-28 23:40:20 -07:00
bedf176e62 Merge pull request #1630 from easydiffusion/beta
Beta
2023-10-12 10:06:42 +05:30
398a0509d7 Banner change 2023-10-12 10:05:43 +05:30
52cc99bf1f Revert "Revert the support banner experiment"
This reverts commit 45a14a9be9.
2023-10-12 09:58:29 +05:30
824e057d7b Merge pull request #1624 from easydiffusion/beta
sdkit 2.0.15 - fix for gfgpan/realesrgan in parallel threads
2023-10-06 09:54:40 +05:30
9bd4b3a6d0 sdkit 2.0.15 - fix for gfgpan/realesrgan in parallel threads with Stable Diffusion 2023-10-05 19:04:19 +05:30
307b00cc05 Merge pull request #1622 from easydiffusion/beta
Beta
2023-10-03 19:38:11 +05:30
8a98df4673 Merge branch 'beta' of github.com:cmdr2/stable-diffusion-ui into beta 2023-10-03 19:35:36 +05:30
45a14a9be9 Revert the support banner experiment 2023-10-03 19:35:23 +05:30
e419276e34 Merge pull request #1621 from easydiffusion/main
Main
2023-10-03 12:45:36 +05:30
0a92b7b1d5 Merge pull request #1620 from easydiffusion/beta
Use sd 2.1.5
2023-10-03 12:42:11 +05:30
f110168366 Use sd 2.1.5 2023-10-03 12:41:51 +05:30
ce24a05909 Merge pull request #1619 from easydiffusion/beta
Beta
2023-10-03 12:39:29 +05:30
45facf64e5 sdkit 2.0.14 - pin transformers 4.33.2 (via sd 2.1.5) and acccelerate 0.23.0, and k-diffusion to 0.0.12 2023-10-02 12:08:58 +05:30
e999832c26 Prevent the user from changing the metadata format if the server has set force_save_metadata 2023-09-30 20:11:28 +05:30
4c8d5a7077 Allow setting the metadata field in the server settings, instead of forcing json whenever force_save_path is set 2023-09-29 20:23:24 +05:30
81643cb3af Merge pull request #1611 from easydiffusion/beta
Fix error if a user doesn't have any LoRA models in the folder
2023-09-28 10:06:23 +05:30
7a9bc883df Fix error if a user doesn't have any LoRA models in the folder 2023-09-27 19:32:21 +05:30
6280a80129 Merge pull request #1608 from easydiffusion/beta
LoRA Manager and Upload Thumbnails
2023-09-27 19:24:41 +05:30
a33908b6de Changelog for LoRA manager and 'Upload thumbnails' 2023-09-27 19:22:35 +05:30
0ea5620413 Multi-gpu GFPGAN not fixed yet 2023-09-27 19:03:45 +05:30
e23eb1fea8 Save metadata as json if using force_save_path 2023-09-26 20:58:27 +05:30
41f2c82eaf Save metadata if force_save_path is enabled. We can make this more flexible later 2023-09-26 20:54:16 +05:30
91e3bfe58f Merge pull request #1604 from flavioislima/fix/rocm_url
FIX: ROCM download URL
2023-09-25 14:06:45 +05:30
83d5519a31 Merge pull request #1605 from JeLuF/hover
Fix 'Swap width&height' tooltip
2023-09-25 14:06:11 +05:30
cc2666b9d6 Fix 'Swap w&h' tooltip 2023-09-24 22:06:30 +02:00
954493fef5 FIX: ROCM download URL 2023-09-23 20:08:19 +01:00
967c3681cd Merge pull request #1598 from JeLuF/loraman3
LoraManager: Remove old plugin file
2023-09-19 11:18:41 +05:30
87c9df5c0d Remove old plugin file 2023-09-18 18:55:34 +02:00
62136768d2 typo 2023-09-18 21:25:39 +05:30
b71b7804fc Merge branch 'beta' of github.com:cmdr2/stable-diffusion-ui into beta 2023-09-18 21:25:22 +05:30
e8b7751374 typo 2023-09-18 21:25:08 +05:30
54d4433141 Merge pull request #1596 from JeLuF/loraman2
LoraManager: Implement 'Upload thumbnail' button
2023-09-18 10:42:25 +05:30
14dbebbc35 LoraManager: Implement 'Upload thumbnail' button 2023-09-17 22:37:17 +02:00
d6a02a31a7 LoraManager: Implement 'Upload thumbnail' button 2023-09-17 22:36:50 +02:00
86e2ac40ae changelog 2023-09-15 19:09:45 +05:30
a12ed7533b Fix broken embeddings dialog when the lora info couldn't be fetched 2023-09-15 19:09:14 +05:30
9fb0ee2d1b Merge pull request #1588 from JeLuF/loraman1
Loramanager fixes
2023-09-15 19:02:17 +05:30
6311b80474 Loramanager fixes
- avoid console errors in python and JS code
- suppress localhost:9000/null links
2023-09-14 23:15:27 +02:00
c13d1093ee sdkit 2.0.12 - actually use the gfpgan fix. 2.0.11 was bad 2023-09-14 20:01:53 +05:30
dd7deeba53 v3.0.6 2023-09-14 19:53:44 +05:30
338aef3e95 sdkit 2.0.11 - fix for gfpgan when using multiple GPUs in parallel 2023-09-14 19:52:50 +05:30
134c98ccb5 Merge pull request #1565 from JeLuF/loramanager
Lora Manager
2023-09-14 19:05:17 +05:30
d12877987f Merge pull request #1584 from easydiffusion/beta
Beta
2023-09-13 18:14:26 +05:30
676316e5e4 Merge pull request #1583 from JeLuF/poor
🔥 FIX Linux installer: Don't use rich
2023-09-13 18:12:56 +05:30
52761ad88c Update check_modules.py 2023-09-13 13:45:21 +02:00
f5e489ba87 Don't use rich
During the first installation, rich is not yet installed
2023-09-13 13:39:34 +02:00
982af1fff3 Merge pull request #1581 from easydiffusion/main
Main
2023-09-13 13:12:32 +05:30
1cff398c20 Merge pull request #1580 from easydiffusion/beta
Beta
2023-09-13 13:12:14 +05:30
a6271d2c4e Merge pull request #1563 from JeLuF/amdperm
AMD/Linux: Warn about file permissions
2023-09-05 16:32:13 +05:30
60f8cc6883 Merge pull request #1567 from easydiffusion/beta
Beta
2023-09-05 16:31:34 +05:30
ffb8feba6b Merge pull request #1564 from JeLuF/wmic
Windows: Show GPU list and driver versions in log
2023-09-05 16:18:42 +05:30
4aca3c4639 Lora Manager 2023-09-04 01:36:32 +02:00
120f9e567c Windows: Show GPU list and driver versions in log 2023-09-03 13:49:30 +02:00
c0492511df AMD/Linux: Warn about file permissions 2023-09-03 13:44:06 +02:00
1075a5ed93 changelog 2023-09-02 19:30:56 +05:30
58d3507155 sdkit 2.0.10 - SDXL ControlNet support; upgrade to diffusers 0.20.2 2023-09-02 19:30:27 +05:30
ae0c9b6a6b Merge branch 'beta' of github.com:cmdr2/stable-diffusion-ui into beta 2023-09-02 18:34:33 +05:30
ad1374af1d bring back config print 2023-09-02 18:34:12 +05:30
8436e8a71e Merge pull request #1560 from JeLuF/mdir-err
Error handling for models_dir
2023-09-02 08:23:54 +05:30
ea07483465 Error handling for models_dir 2023-09-01 22:54:03 +02:00
51f857c3f3 Merge pull request #1559 from easydiffusion/beta
Beta
2023-09-01 20:30:54 +05:30
74c0ca0902 changelog 2023-09-01 19:53:51 +05:30
ad5641fa3e Fix incorrect metadata generation of embeddings, by removing duplicated logic. The UI already handles this 2023-09-01 19:52:20 +05:30
b0294f8cbd Support banner 2023-09-01 19:31:46 +05:30
5d4498ff85 changelog 2023-09-01 19:29:54 +05:30
d52fb15746 Merge pull request #1558 from easydiffusion/beta
Revert "Continue using uvicorn directly on windows"
2023-09-01 18:29:43 +05:30
ee6be74e72 Revert "Continue using uvicorn directly on windows"
This reverts commit 3a5e0cb2d2.
2023-09-01 18:29:19 +05:30
4cbc86f945 Merge pull request #1557 from easydiffusion/beta
Continue using uvicorn directly on windows
2023-09-01 17:52:19 +05:30
3a5e0cb2d2 Continue using uvicorn directly on windows 2023-09-01 17:51:15 +05:30
7916b8d26a Merge pull request #1556 from easydiffusion/beta
Ignore unknown AMD GPUs
2023-09-01 17:05:04 +05:30
a0842b4659 Ignore unknown AMD GPUs 2023-09-01 17:04:38 +05:30
14ee87ca80 Merge pull request #1555 from easydiffusion/beta
AMD on Linux
2023-09-01 15:58:35 +05:30
cec1d7d6c9 hide debug log 2023-09-01 13:30:46 +05:30
9aeae4d16e note to self 2023-09-01 13:25:00 +05:30
9c1b741d89 Relative path for src 2023-09-01 13:17:50 +05:30
c71a74f857 Merge pull request #1491 from JeLuF/launcher
Pythonize the uvicorn startup
2023-09-01 13:10:45 +05:30
524612cee5 Different PYTHONPATH for Windows and Linux/Mac 2023-09-01 13:09:01 +05:30
11e47b3871 Merge pull request #1554 from easydiffusion/main
Main
2023-09-01 12:51:47 +05:30
4a1b2be45c Merge pull request #1553 from easydiffusion/beta
Beta
2023-09-01 12:51:23 +05:30
d641aa2f6e Fix ordering of help topics 2023-09-01 11:08:11 +05:30
237c7a5348 3.0.4 2023-09-01 10:42:29 +05:30
19f37907d9 Allow changing the models directory via a setting, to share models with other locations on the disk 2023-09-01 10:40:18 +05:30
b8706da990 Merge pull request #1548 from easydiffusion/beta
Beta
2023-08-31 22:28:03 +05:30
b458d57355 Keep the old test_diffusers id around to prevent broken plugins 2023-08-31 22:24:27 +05:30
a5962dae33 Allow underscore in embeddings path 2023-08-31 22:19:04 +05:30
670768e5b3 Allow hyphens in embeddings 2023-08-31 22:16:48 +05:30
f02b915cd0 Fix typo when using force_save_path 2023-08-31 22:11:42 +05:30
71bbbeb936 Update help topics 2023-08-31 21:25:29 +05:30
e084b78b53 Update README.md 2023-08-31 20:14:06 +05:30
013860e3c0 Merge pull request #1546 from easydiffusion/beta
Use v3 for everyone
2023-08-31 20:03:57 +05:30
7a118eeb15 Rename the test_diffusers config key to upgrade all the existing users to the v3 engine. Users can now opt to disable v3. This upgrades existing users who had maybe tried diffusers many months ago (when it was still unstable) and decided against it (at that time). 2023-08-31 19:20:26 +05:30
df408b25e5 changelog 2023-08-31 15:59:23 +05:30
536082c1a6 Save filtered images to disk if required by the API, for e.g. when clicking 'Upscale' or 'Fix Faces on the image 2023-08-31 15:57:53 +05:30
b986ca3059 Update README.md 2023-08-31 12:44:52 +05:30
4bf9e577b9 Merge pull request #1541 from easydiffusion/beta
Beta
2023-08-31 09:57:11 +05:30
a7c12e61d8 Fix incorrect tiling message in the task info 2023-08-30 19:32:29 +05:30
847d27bffb sdkit 2.0.9 - another fix for torch 2.0 and onnx export 2023-08-30 19:32:09 +05:30
781e812f22 sdkit 2.0.8 - temp hack for allowing onnx export on pytorch 2.0 2023-08-30 18:51:34 +05:30
e49b5e0e6b changelog 2023-08-30 18:24:46 +05:30
8f1c1b128e sdkit 2.0.7 - Allow loading NovelAI-based models 2023-08-30 18:24:23 +05:30
04cbb052d7 bump version 2023-08-30 17:54:19 +05:30
16f0950ebd sdkit 2.0.6 - Fix broken VAE tiling 2023-08-30 17:42:50 +05:30
e959a3d7ab ui 2023-08-30 17:42:32 +05:30
fc9941abaa Merge pull request #1539 from easydiffusion/beta
Server-side setting to block_nsfw
2023-08-30 16:25:04 +05:30
f177011395 changelog 2023-08-30 16:22:08 +05:30
80e47be5a5 Prevent block_nsfw from getting edited via the HTTP api 2023-08-30 16:22:05 +05:30
9a9f6e3559 Server-side config to allow force-blocking of NSFW images 2023-08-30 16:13:10 +05:30
1a6e0234b3 Merge pull request #1538 from easydiffusion/beta
Beta
2023-08-30 15:35:21 +05:30
56bea46e3a Use absolute config path 2023-08-30 15:34:55 +05:30
a09441b2c8 Change the tensorrt installation commands to what NVIDIA suggested over chat 2023-08-30 15:14:05 +05:30
105994d96d Merge pull request #1536 from easydiffusion/beta
Beta
2023-08-30 14:58:24 +05:30
d641647b1e Merge branch 'beta' of github.com:cmdr2/stable-diffusion-ui into beta 2023-08-30 14:57:53 +05:30
672574d278 sdkit 2.0.5 - don't download the safety checker unless necessary 2023-08-30 14:57:37 +05:30
f1ded17399 Merge pull request #1535 from easydiffusion/beta
Beta
2023-08-30 14:44:41 +05:30
d254e3e2fd Merge pull request #1534 from easydiffusion/main
Main
2023-08-30 14:44:09 +05:30
ab5450bb27 Don't download codeformer and controlnet if not being used 2023-08-30 14:39:43 +05:30
a2e9e5eb57 Remove old files 2023-08-30 13:18:03 +05:30
8965f11ab4 Update CONTRIBUTING.md 2023-08-30 13:15:19 +05:30
1dd5644e7a Update build.bat and build.sh to create the installers for Windows and Mac/Linux (respectively) 2023-08-30 13:09:12 +05:30
37f813506e Merge pull request #1533 from easydiffusion/beta
Beta
2023-08-29 20:11:44 +05:30
a5d5ed90e6 Merge pull request #1532 from easydiffusion/main
Main
2023-08-29 20:10:26 +05:30
3792a1bc0d sdkit 2.0.4 - use sd 1.5 fp16 by default, if no model is present 2023-08-29 20:06:54 +05:30
fbafa56ecb Use torch 2.0.1 and torchvision 0.15.2 by default on Windows 2023-08-29 18:52:06 +05:30
2f910c69b8 unused file 2023-08-29 17:54:23 +05:30
bf06cc48bb Merge branch 'beta' of github.com:cmdr2/stable-diffusion-ui into beta 2023-08-29 17:52:11 +05:30
3ef67ebc73 NSIS - v3, create from an existing installation 2023-08-29 17:51:58 +05:30
0c4318fb31 Update README.md 2023-08-29 17:49:50 +05:30
c55ced93db Update FUNDING.yml 2023-08-29 17:48:50 +05:30
807d940001 Merge pull request #1528 from JeLuF/inputmode
inputmode=numeric/decimal for <input> fields
2023-08-29 15:09:13 +05:30
8c27fa136c inputmode=numeric/decimal for <input> fields 2023-08-29 10:02:06 +02:00
8e7a6077e5 Make it work on Windows 2023-08-13 15:01:32 +02:00
53a79c1a81 Automatically detect whether NAVI1/2 or NAVI3 ROCm versions are needed 2023-08-11 22:36:20 +02:00
e9f54c8bae Launch uvicorn from check_modules.py 2023-08-11 21:31:45 +02:00
c978863e5f Add uvicorn-launch to check_modules.py 2023-08-08 22:16:57 +02:00
52 changed files with 4979 additions and 1254 deletions

4
.gitignore vendored
View File

@ -3,4 +3,6 @@ installer
installer.tar
dist
.idea/*
node_modules/*
node_modules/*
.tmp1
.tmp2

View File

@ -1,5 +1,21 @@
# What's new?
## v3.5 (preview)
### Major Changes
- **Flux** - full support for the Flux model, including quantized bnb and nf4 models.
- **LyCORIS** - including `LoCon`, `Hada`, `IA3` and `Lokr`.
- **11 new samplers** - `DDIM CFG++`, `DPM Fast`, `DPM++ 2m SDE Heun`, `DPM++ 3M SDE`, `Restart`, `Heun PP2`, `IPNDM`, `IPNDM_V`, `LCM`, `[Forge] Flux Realistic`, `[Forge] Flux Realistic (Slow)`.
- **15 new schedulers** - `Uniform`, `Karras`, `Exponential`, `Polyexponential`, `SGM Uniform`, `KL Optimal`, `Align Your Steps`, `Normal`, `DDIM`, `Beta`, `Turbo`, `Align Your Steps GITS`, `Align Your Steps 11`, `Align Your Steps 32`.
- **42 new Controlnet filters, and support for lots of new ControlNet models** (including QR ControlNets).
- **5 upscalers** - `SwinIR`, `ScuNET`, `Nearest`, `Lanczos`, `ESRGAN`.
- **Faster than v3.0**
- **Major rewrite of the code** - We've switched to `Forge WebUI` under the hood, which brings a lot of new features, faster image generation, and support for all the extensions in the Forge/Automatic1111 community. This allows Easy Diffusion to stay up-to-date with the latest features, and focus on making the UI and installation experience even easier.
v3.5 is currently an optional upgrade, and you can switch between the v3.0 (diffusers) engine and the v3.5 (webui) engine using the `Settings` tab in the UI.
### Detailed changelog
* 3.5.0 - 11 Oct 2024 - **Preview release** of the new v3.5 engine, powered by Forge WebUI (a fork of Automatic1111). This enables Flux, SD3, LyCORIS and lots of new features, while using the same familiar Easy Diffusion interface.
## v3.0
### Major Changes
- **ControlNet** - Full support for ControlNet, with native integration of the common ControlNet models. Just select a control image, then choose the ControlNet filter/model and run. No additional configuration or download necessary. Supports custom ControlNets as well.
@ -17,6 +33,21 @@
- **Major rewrite of the code** - We've switched to using diffusers under-the-hood, which allows us to release new features faster, and focus on making the UI and installer even easier to use.
### Detailed changelog
* 3.0.10 - 11 Oct 2024 - **Major Update** - An option to upgrade to v3.5, which enables Flux, Stable Diffusion 3, LyCORIS models and lots more.
* 3.0.9 - 28 May 2024 - Slider for controlling the strength of controlnets.
* 3.0.8 - 27 May 2024 - SDXL ControlNets for Img2Img and Inpainting.
* 3.0.7 - 11 Dec 2023 - Setting to enable/disable VAE tiling (in the Image Settings panel). Sometimes VAE tiling reduces the quality of the image, so this setting will help control that.
* 3.0.6 - 18 Sep 2023 - Add thumbnails to embeddings from the UI, using the new `Upload Thumbnail` button in the Embeddings popup. Thanks @JeLuf.
* 3.0.6 - 15 Sep 2023 - Fix broken embeddings dialog when LoRA information couldn't be fetched.
* 3.0.6 - 14 Sep 2023 - UI for adding notes to LoRA files (to help you remember which prompts to use). Also added a button to automatically fetch prompts from Civitai for a LoRA file, using the `Import from Civitai` button. Thanks @JeLuf.
* 3.0.5 - 2 Sep 2023 - Support SDXL ControlNets.
* 3.0.4 - 1 Sep 2023 - Fix incorrect metadata generated for embeddings, when the exact word doesn't match the case, or is part of a larger word.
* 3.0.4 - 1 Sep 2023 - Simplify the installation for AMD users on Linux. Thanks @JeLuf.
* 3.0.4 - 1 Sep 2023 - Allow using a different folder for models. This is useful if you want to share a models folder across different software, or on a different drive. You can change this path in the Settings tab.
* 3.0.3 - 31 Aug 2023 - Auto-save images to disk (if enabled by the user) when upscaling/fixing using the buttons on the image.
* 3.0.3 - 30 Aug 2023 - Allow loading NovelAI-based custom models.
* 3.0.3 - 30 Aug 2023 - Fix broken VAE tiling. This allows you to create larger images with lesser VRAM usage.
* 3.0.3 - 30 Aug 2023 - Allow blocking NSFW images using a server-side config. This prevents the browser from generating NSFW images or changing the config. Open `config.yaml` in a text editor (e.g. Notepad), and add `block_nsfw: true` at the end, and save the file.
* 3.0.2 - 29 Aug 2023 - Fixed incorrect matching of embeddings from prompts.
* 3.0.2 - 24 Aug 2023 - Fix broken seamless tiling.
* 3.0.2 - 23 Aug 2023 - Fix styling on mobile devices.

View File

@ -47,3 +47,5 @@ Build the Windows installer using Windows, and the Linux installer using Linux.
1. Run `build.bat` or `./build.sh` depending on whether you're in Windows or Linux.
2. Make a new GitHub release and upload the Windows and Linux installer builds created inside the `dist` folder.
For NSIS (on Windows), you need to have these plugins in the `nsis/Plugins` folder: `amd64-unicode`, `x86-ansi`, `x86-unicode`

Binary file not shown.

Before

Width:  |  Height:  |  Size: 288 KiB

View File

@ -1 +0,0 @@
!define EXISTING_INSTALLATION_DIR "D:\path\to\installed\easy-diffusion"

Binary file not shown.

Before

Width:  |  Height:  |  Size: 200 KiB

View File

@ -7,9 +7,9 @@ RequestExecutionLevel user
!AddPluginDir /amd64-unicode "."
; HM NIS Edit Wizard helper defines
!define PRODUCT_NAME "Easy Diffusion"
!define PRODUCT_VERSION "2.5"
!define PRODUCT_VERSION "3.0"
!define PRODUCT_PUBLISHER "cmdr2 and contributors"
!define PRODUCT_WEB_SITE "https://stable-diffusion-ui.github.io"
!define PRODUCT_WEB_SITE "https://easydiffusion.github.io"
!define PRODUCT_DIR_REGKEY "Software\Microsoft\Easy Diffusion\App Paths\installer.exe"
; MUI 1.67 compatible ------
@ -165,9 +165,9 @@ FunctionEnd
; MUI Settings
;---------------------------------------------------------------------------------------------------------
!define MUI_ABORTWARNING
!define MUI_ICON "cyborg_flower_girl.ico"
!define MUI_ICON "${EXISTING_INSTALLATION_DIR}\installer_files\cyborg_flower_girl.ico"
!define MUI_WELCOMEFINISHPAGE_BITMAP "cyborg_flower_girl.bmp"
!define MUI_WELCOMEFINISHPAGE_BITMAP "${EXISTING_INSTALLATION_DIR}\installer_files\cyborg_flower_girl.bmp"
; Welcome page
!define MUI_WELCOMEPAGE_TEXT "This installer will guide you through the installation of Easy Diffusion.$\n$\n\
@ -176,8 +176,8 @@ Click Next to continue."
Page custom MediaPackDialog
; License page
!insertmacro MUI_PAGE_LICENSE "..\LICENSE"
!insertmacro MUI_PAGE_LICENSE "..\CreativeML Open RAIL-M License"
!insertmacro MUI_PAGE_LICENSE "${EXISTING_INSTALLATION_DIR}\LICENSE"
!insertmacro MUI_PAGE_LICENSE "${EXISTING_INSTALLATION_DIR}\CreativeML Open RAIL-M License"
; Directory page
!define MUI_PAGE_CUSTOMFUNCTION_LEAVE "DirectoryLeave"
!insertmacro MUI_PAGE_DIRECTORY
@ -210,29 +210,33 @@ ShowInstDetails show
; List of files to be installed
Section "MainSection" SEC01
SetOutPath "$INSTDIR"
File "..\CreativeML Open RAIL-M License"
File "..\How to install and run.txt"
File "..\LICENSE"
File "..\scripts\Start Stable Diffusion UI.cmd"
File "${EXISTING_INSTALLATION_DIR}\CreativeML Open RAIL-M License"
File "${EXISTING_INSTALLATION_DIR}\How to install and run.txt"
File "${EXISTING_INSTALLATION_DIR}\LICENSE"
File "${EXISTING_INSTALLATION_DIR}\Start Stable Diffusion UI.cmd"
File /r "${EXISTING_INSTALLATION_DIR}\installer_files"
File /r "${EXISTING_INSTALLATION_DIR}\profile"
File /r "${EXISTING_INSTALLATION_DIR}\sd-ui-files"
SetOutPath "$INSTDIR\installer_files"
File "cyborg_flower_girl.ico"
SetOutPath "$INSTDIR\scripts"
File "${EXISTING_INSTALLATION_DIR}\scripts\install_status.txt"
File "..\scripts\on_env_start.bat"
File "${EXISTING_INSTALLATION_DIR}\scripts\on_env_start.bat"
File "C:\windows\system32\curl.exe"
CreateDirectory "$INSTDIR\models"
File "${EXISTING_INSTALLATION_DIR}\scripts\config.yaml.sample"
CreateDirectory "$INSTDIR\models\stable-diffusion"
CreateDirectory "$INSTDIR\models\gfpgan"
CreateDirectory "$INSTDIR\models\realesrgan"
CreateDirectory "$INSTDIR\models\vae"
CreateDirectory "$INSTDIR\profile\.cache\huggingface\hub"
SetOutPath "$INSTDIR\profile\.cache\huggingface\hub"
File /r /x pytorch_model.bin "${EXISTING_INSTALLATION_DIR}\profile\.cache\huggingface\hub\models--openai--clip-vit-large-patch14"
CreateDirectory "$SMPROGRAMS\Easy Diffusion"
CreateShortCut "$SMPROGRAMS\Easy Diffusion\Easy Diffusion.lnk" "$INSTDIR\Start Stable Diffusion UI.cmd" "" "$INSTDIR\installer_files\cyborg_flower_girl.ico"
DetailPrint 'Downloading the Stable Diffusion 1.4 model...'
NScurl::http get "https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt" "$INSTDIR\models\stable-diffusion\sd-v1-4.ckpt" /CANCEL /INSIST /END
DetailPrint 'Downloading the Stable Diffusion 1.5 model...'
NScurl::http get "https://github.com/easydiffusion/sdkit-test-data/releases/download/assets/sd-v1-5.safetensors" "$INSTDIR\models\stable-diffusion\sd-v1-5.safetensors" /CANCEL /INSIST /END
DetailPrint 'Downloading the GFPGAN model...'
NScurl::http get "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/GFPGANv1.4.pth" "$INSTDIR\models\gfpgan\GFPGANv1.4.pth" /CANCEL /INSIST /END

View File

@ -3,7 +3,9 @@
Does not require technical knowledge, does not require pre-installed software. 1-click install, powerful features, friendly community.
[Installation guide](#installation) | [Troubleshooting guide](https://github.com/easydiffusion/easydiffusion/wiki/Troubleshooting) | <sub>[![Discord Server](https://img.shields.io/discord/1014774730907209781?label=Discord)](https://discord.com/invite/u9yhsFmEkB)</sub> <sup>(for support queries, and development discussions)</sup>
️‍🔥🎉 **New!** Support for SDXL, ControlNet, multiple LoRA files, embeddings (and a lot more) have been added!
[Installation guide](#installation) | [Troubleshooting guide](https://github.com/easydiffusion/easydiffusion/wiki/Troubleshooting) | [User guide](https://github.com/easydiffusion/easydiffusion/wiki) | <sub>[![Discord Server](https://img.shields.io/discord/1014774730907209781?label=Discord)](https://discord.com/invite/u9yhsFmEkB)</sub> <sup>(for support queries, and development discussions)</sup>
---
![262597678-11089485-2514-4a11-88fb-c3acc81fc9ec](https://github.com/easydiffusion/easydiffusion/assets/844287/050b5e15-e909-45bf-8162-a38234830e38)
@ -102,7 +104,7 @@ Just delete the `EasyDiffusion` folder to uninstall all the downloaded packages.
- **Auto scan for malicious models**: Uses picklescan to prevent malicious models.
- **Safetensors support**: Support loading models in the safetensor format, for improved safety.
- **Auto-updater**: Gets you the latest improvements and bug-fixes to a rapidly evolving project.
- **Developer Console**: A developer-mode for those who want to modify their Stable Diffusion code, and edit the conda environment.
- **Developer Console**: A developer-mode for those who want to modify their Stable Diffusion code, modify packages, and edit the conda environment.
**(and a lot more)**

View File

@ -1,48 +1,78 @@
@echo off
setlocal enabledelayedexpansion
@echo "Hi there, what you are running is meant for the developers of this project, not for users." & echo.
@echo "If you only want to use the Stable Diffusion UI, you've downloaded the wrong file."
@echo "If you only want to use Easy Diffusion, you've downloaded the wrong file."
@echo "Please download and follow the instructions at https://github.com/easydiffusion/easydiffusion#installation" & echo.
@echo "If you are actually a developer of this project, please type Y and press enter" & echo.
set /p answer=Are you a developer of this project (Y/N)?
if /i "%answer:~,1%" NEQ "Y" exit /b
mkdir dist\win\stable-diffusion-ui\scripts
@REM mkdir dist\linux-mac\stable-diffusion-ui\scripts
@rem verify dependencies
call makensis /VERSION >.tmp1 2>.tmp2
if "!ERRORLEVEL!" NEQ "0" (
echo makensis.exe not found! Download it from https://sourceforge.net/projects/nsisbi/files/ and set it on the PATH variable.
pause
exit
)
@rem copy the installer files for Windows
set /p OUT_DIR=Output folder path (will create the installer files inside this, e.g. F:\EasyDiffusion):
copy scripts\on_env_start.bat dist\win\stable-diffusion-ui\scripts\
copy scripts\bootstrap.bat dist\win\stable-diffusion-ui\scripts\
copy scripts\config.yaml.sample dist\win\stable-diffusion-ui\scripts\config.yaml
copy "scripts\Start Stable Diffusion UI.cmd" dist\win\stable-diffusion-ui\
copy LICENSE dist\win\stable-diffusion-ui\
copy "CreativeML Open RAIL-M License" dist\win\stable-diffusion-ui\
copy "How to install and run.txt" dist\win\stable-diffusion-ui\
echo. > dist\win\stable-diffusion-ui\scripts\install_status.txt
mkdir "%OUT_DIR%\scripts"
mkdir "%OUT_DIR%\installer_files"
@rem copy the installer files for Linux and Mac
set BASE_DIR=%cd%
@REM copy scripts\on_env_start.sh dist\linux-mac\stable-diffusion-ui\scripts\
@REM copy scripts\bootstrap.sh dist\linux-mac\stable-diffusion-ui\scripts\
@REM copy scripts\start.sh dist\linux-mac\stable-diffusion-ui\
@REM copy LICENSE dist\linux-mac\stable-diffusion-ui\
@REM copy "CreativeML Open RAIL-M License" dist\linux-mac\stable-diffusion-ui\
@REM copy "How to install and run.txt" dist\linux-mac\stable-diffusion-ui\
@REM echo. > dist\linux-mac\stable-diffusion-ui\scripts\install_status.txt
@rem STEP 1: copy the installer files for Windows
@rem make the zip
cd dist\win
call powershell Compress-Archive -Path stable-diffusion-ui -DestinationPath ..\stable-diffusion-ui-windows.zip
cd ..\..
@REM cd dist\linux-mac
@REM call powershell Compress-Archive -Path stable-diffusion-ui -DestinationPath ..\stable-diffusion-ui-linux.zip
@REM call powershell Compress-Archive -Path stable-diffusion-ui -DestinationPath ..\stable-diffusion-ui-mac.zip
@REM cd ..\..
echo "Build ready. Upload the zip files inside the 'dist' folder."
copy "%BASE_DIR%\scripts\on_env_start.bat" "%OUT_DIR%\scripts\"
copy "%BASE_DIR%\scripts\config.yaml.sample" "%OUT_DIR%\scripts\config.yaml.sample"
copy "%BASE_DIR%\scripts\Start Stable Diffusion UI.cmd" "%OUT_DIR%\"
copy "%BASE_DIR%\LICENSE" "%OUT_DIR%\"
copy "%BASE_DIR%\CreativeML Open RAIL-M License" "%OUT_DIR%\"
copy "%BASE_DIR%\How to install and run.txt" "%OUT_DIR%\"
copy "%BASE_DIR%\NSIS\cyborg_flower_girl.ico" "%OUT_DIR%\installer_files\"
copy "%BASE_DIR%\NSIS\cyborg_flower_girl.bmp" "%OUT_DIR%\installer_files\"
echo. > "%OUT_DIR%\scripts\install_status.txt"
echo ----
echo Basic files ready. Verify the files in %OUT_DIR%, then press Enter to initialize the environment, or close to quit.
echo ----
pause
@rem STEP 2: Initialize the environment with git, python and conda
cd /d "%OUT_DIR%\"
call "%BASE_DIR%\scripts\bootstrap.bat"
echo ----
echo Environment ready. Verify the environment, then press Enter to download the necessary packages, or close to quit.
echo ----
pause
@rem STEP 3: Download the packages and create a working installation
cd /d "%OUT_DIR%\"
start "Install Easy Diffusion" /D "%OUT_DIR%" "Start Stable Diffusion UI.cmd"
echo ----
echo Installation in progress (in a new window). Once complete, verify the installation, then press Enter to create an installer from these files, or close to quit.
echo ----
pause
@rem STEP 4: Build the installer from a working installation
cd /d "%OUT_DIR%\"
echo ^^!define EXISTING_INSTALLATION_DIR "%OUT_DIR%" > nsisconf.nsh
call makensis /NOCD /V4 "%BASE_DIR%\NSIS\sdui.nsi"
echo ----
if "!ERRORLEVEL!" EQU "0" (
echo Installer built successfully at %OUT_DIR%
) else (
echo Installer failed to build at %OUT_DIR%
)
echo ----
pause

View File

@ -1,7 +1,7 @@
#!/bin/bash
printf "Hi there, what you are running is meant for the developers of this project, not for users.\n\n"
printf "If you only want to use the Stable Diffusion UI, you've downloaded the wrong file.\n"
printf "If you only want to use Easy Diffusion, you've downloaded the wrong file.\n"
printf "Please download and follow the instructions at https://github.com/easydiffusion/easydiffusion#installation \n\n"
printf "If you are actually a developer of this project, please type Y and press enter\n\n"
@ -11,40 +11,30 @@ case $yn in
* ) exit;;
esac
# mkdir -p dist/win/stable-diffusion-ui/scripts
mkdir -p dist/linux-mac/stable-diffusion-ui/scripts
# copy the installer files for Windows
# cp scripts/on_env_start.bat dist/win/stable-diffusion-ui/scripts/
# cp scripts/bootstrap.bat dist/win/stable-diffusion-ui/scripts/
# cp "scripts/Start Stable Diffusion UI.cmd" dist/win/stable-diffusion-ui/
# cp LICENSE dist/win/stable-diffusion-ui/
# cp "CreativeML Open RAIL-M License" dist/win/stable-diffusion-ui/
# cp "How to install and run.txt" dist/win/stable-diffusion-ui/
# echo "" > dist/win/stable-diffusion-ui/scripts/install_status.txt
mkdir -p dist/linux-mac/easy-diffusion/scripts
# copy the installer files for Linux and Mac
cp scripts/on_env_start.sh dist/linux-mac/stable-diffusion-ui/scripts/
cp scripts/bootstrap.sh dist/linux-mac/stable-diffusion-ui/scripts/
cp scripts/functions.sh dist/linux-mac/stable-diffusion-ui/scripts/
cp scripts/config.yaml.sample dist/linux-mac/stable-diffusion-ui/scripts/config.yaml
cp scripts/start.sh dist/linux-mac/stable-diffusion-ui/
cp LICENSE dist/linux-mac/stable-diffusion-ui/
cp "CreativeML Open RAIL-M License" dist/linux-mac/stable-diffusion-ui/
cp "How to install and run.txt" dist/linux-mac/stable-diffusion-ui/
echo "" > dist/linux-mac/stable-diffusion-ui/scripts/install_status.txt
cp scripts/on_env_start.sh dist/linux-mac/easy-diffusion/scripts/
cp scripts/bootstrap.sh dist/linux-mac/easy-diffusion/scripts/
cp scripts/functions.sh dist/linux-mac/easy-diffusion/scripts/
cp scripts/config.yaml.sample dist/linux-mac/easy-diffusion/scripts/config.yaml.sample
cp scripts/start.sh dist/linux-mac/easy-diffusion/
cp LICENSE dist/linux-mac/easy-diffusion/
cp "CreativeML Open RAIL-M License" dist/linux-mac/easy-diffusion/
cp "How to install and run.txt" dist/linux-mac/easy-diffusion/
echo "" > dist/linux-mac/easy-diffusion/scripts/install_status.txt
# set the permissions
chmod u+x dist/linux-mac/easy-diffusion/scripts/on_env_start.sh
chmod u+x dist/linux-mac/easy-diffusion/scripts/bootstrap.sh
chmod u+x dist/linux-mac/easy-diffusion/start.sh
# make the zip
# cd dist/win
# zip -r ../stable-diffusion-ui-windows.zip stable-diffusion-ui
# cd ../..
cd dist/linux-mac
zip -r ../stable-diffusion-ui-linux.zip stable-diffusion-ui
zip -r ../stable-diffusion-ui-mac.zip stable-diffusion-ui
zip -r ../Easy-Diffusion-Linux.zip easy-diffusion
zip -r ../Easy-Diffusion-Mac.zip easy-diffusion
cd ../..
echo "Build ready. Upload the zip files inside the 'dist' folder."

Binary file not shown.

View File

@ -3,7 +3,7 @@
cd /d %~dp0
echo Install dir: %~dp0
set PATH=C:\Windows\System32;%PATH%
set PATH=C:\Windows\System32;C:\Windows\System32\wbem;%PATH%
set PYTHONHOME=
if exist "on_sd_start.bat" (
@ -15,7 +15,7 @@ if exist "on_sd_start.bat" (
echo download. This will not work.
echo.
echo Recommended: Please close this window and download the installer from
echo https://stable-diffusion-ui.github.io/docs/installation/
echo https://easydiffusion.github.io/docs/installation/
echo.
echo ================================================================================
echo.
@ -39,6 +39,7 @@ call where conda
call conda --version
echo .
echo COMSPEC=%COMSPEC%
wmic path win32_VideoController get name,AdapterRAM,DriverDate,DriverVersion
@rem Download the rest of the installer and UI
call scripts\on_env_start.bat

View File

@ -14,6 +14,8 @@ set LEGACY_INSTALL_ENV_DIR=%cd%\installer
set MICROMAMBA_DOWNLOAD_URL=https://github.com/easydiffusion/easydiffusion/releases/download/v1.1/micromamba.exe
set umamba_exists=F
set PYTHONHOME=
set OLD_APPDATA=%APPDATA%
set OLD_USERPROFILE=%USERPROFILE%
set APPDATA=%cd%\installer_files\appdata
@ -22,15 +24,12 @@ set USERPROFILE=%cd%\profile
@rem figure out whether git and conda needs to be installed
if exist "%INSTALL_ENV_DIR%" set PATH=%INSTALL_ENV_DIR%;%INSTALL_ENV_DIR%\Library\bin;%INSTALL_ENV_DIR%\Scripts;%INSTALL_ENV_DIR%\Library\usr\bin;%PATH%
set PACKAGES_TO_INSTALL=
set PACKAGES_TO_INSTALL=git python=3.8.5
if not exist "%LEGACY_INSTALL_ENV_DIR%\etc\profile.d\conda.sh" (
if not exist "%INSTALL_ENV_DIR%\etc\profile.d\conda.sh" set PACKAGES_TO_INSTALL=%PACKAGES_TO_INSTALL% conda python=3.8.5
if not exist "%INSTALL_ENV_DIR%\etc\profile.d\conda.sh" set PACKAGES_TO_INSTALL=%PACKAGES_TO_INSTALL% conda
)
call git --version >.tmp1 2>.tmp2
if "!ERRORLEVEL!" NEQ "0" set PACKAGES_TO_INSTALL=%PACKAGES_TO_INSTALL% git
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" --version >.tmp1 2>.tmp2
if "!ERRORLEVEL!" EQU "0" set umamba_exists=T

File diff suppressed because it is too large Load Diff

View File

@ -6,6 +6,7 @@ import shutil
# The config file is in the same directory as this script
config_directory = os.path.dirname(__file__)
config_yaml = os.path.join(config_directory, "..", "config.yaml")
config_yaml = os.path.abspath(config_yaml)
config_json = os.path.join(config_directory, "config.json")
parser = argparse.ArgumentParser(description='Get values from config file')

View File

@ -71,6 +71,7 @@ if "%update_branch%"=="" (
@copy sd-ui-files\scripts\check_modules.py scripts\ /Y
@copy sd-ui-files\scripts\get_config.py scripts\ /Y
@copy sd-ui-files\scripts\config.yaml.sample scripts\ /Y
@copy sd-ui-files\scripts\webui_console.py scripts\ /Y
@copy "sd-ui-files\scripts\Start Stable Diffusion UI.cmd" . /Y
@copy "sd-ui-files\scripts\Developer Console.cmd" . /Y

View File

@ -54,6 +54,7 @@ cp sd-ui-files/scripts/bootstrap.sh scripts/
cp sd-ui-files/scripts/check_modules.py scripts/
cp sd-ui-files/scripts/get_config.py scripts/
cp sd-ui-files/scripts/config.yaml.sample scripts/
cp sd-ui-files/scripts/webui_console.py scripts/
cp sd-ui-files/scripts/start.sh .
cp sd-ui-files/scripts/developer_console.sh .
cp sd-ui-files/scripts/functions.sh scripts/

View File

@ -7,6 +7,7 @@
@copy sd-ui-files\scripts\check_modules.py scripts\ /Y
@copy sd-ui-files\scripts\get_config.py scripts\ /Y
@copy sd-ui-files\scripts\config.yaml.sample scripts\ /Y
@copy sd-ui-files\scripts\webui_console.py scripts\ /Y
if exist "%cd%\profile" (
set HF_HOME=%cd%\profile\.cache\huggingface
@ -34,6 +35,7 @@ call conda activate
@REM remove the old version of the dev console script, if it's still present
if exist "Open Developer Console.cmd" del "Open Developer Console.cmd"
if exist "ui\plugins\ui\merge.plugin.js" del "ui\plugins\ui\merge.plugin.js"
@rem create the stable-diffusion folder, to work with legacy installations
if not exist "stable-diffusion" mkdir stable-diffusion
@ -52,73 +54,24 @@ if exist ldm rename ldm ldm-old
if not exist "%INSTALL_ENV_DIR%\DLLs\libssl-1_1-x64.dll" copy "%INSTALL_ENV_DIR%\Library\bin\libssl-1_1-x64.dll" "%INSTALL_ENV_DIR%\DLLs\"
if not exist "%INSTALL_ENV_DIR%\DLLs\libcrypto-1_1-x64.dll" copy "%INSTALL_ENV_DIR%\Library\bin\libcrypto-1_1-x64.dll" "%INSTALL_ENV_DIR%\DLLs\"
cd ..
@rem set any overrides
set HF_HUB_DISABLE_SYMLINKS_WARNING=true
@rem install or upgrade the required modules
set PATH=C:\Windows\System32;%PATH%
@REM prevent from using packages from the user's home directory, to avoid conflicts
set PYTHONNOUSERSITE=1
set PYTHONPATH=%INSTALL_ENV_DIR%\lib\site-packages
@rem Download the required packages
call python ..\scripts\check_modules.py
if "%ERRORLEVEL%" NEQ "0" (
pause
exit /b
)
call WHERE uvicorn > .tmp
@>nul findstr /m "uvicorn" .tmp
@if "%ERRORLEVEL%" NEQ "0" (
@echo. & echo "UI packages not found! Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/easydiffusion/easydiffusion/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/easydiffusion/easydiffusion/issues" & echo "Thanks!" & echo.
pause
exit /b
)
@>nul findstr /m "conda_sd_ui_deps_installed" ..\scripts\install_status.txt
@if "%ERRORLEVEL%" NEQ "0" (
@echo conda_sd_ui_deps_installed >> ..\scripts\install_status.txt
)
@>nul findstr /m "sd_install_complete" ..\scripts\install_status.txt
@if "%ERRORLEVEL%" NEQ "0" (
@echo sd_weights_downloaded >> ..\scripts\install_status.txt
@echo sd_install_complete >> ..\scripts\install_status.txt
)
@echo. & echo "Easy Diffusion installation complete! Starting the server!" & echo.
@set SD_DIR=%cd%
set PYTHONPATH=%INSTALL_ENV_DIR%\lib\site-packages
echo PYTHONPATH=%PYTHONPATH%
@rem Download the required packages
call where python
call python --version
@cd ..
@set SD_UI_PATH=%cd%\ui
call python scripts\check_modules.py --launch-uvicorn
pause
exit /b
@FOR /F "tokens=* USEBACKQ" %%F IN (`python scripts\get_config.py --default=9000 net listen_port`) DO (
@SET ED_BIND_PORT=%%F
)
@FOR /F "tokens=* USEBACKQ" %%F IN (`python scripts\get_config.py --default=False net listen_to_network`) DO (
if "%%F" EQU "True" (
@FOR /F "tokens=* USEBACKQ" %%G IN (`python scripts\get_config.py --default=0.0.0.0 net bind_ip`) DO (
@SET ED_BIND_IP=%%G
)
) else (
@SET ED_BIND_IP=127.0.0.1
)
)
@cd stable-diffusion
@rem set any overrides
set HF_HUB_DISABLE_SYMLINKS_WARNING=true
@python -m uvicorn main:server_api --app-dir "%SD_UI_PATH%" --port %ED_BIND_PORT% --host %ED_BIND_IP% --log-level error
@pause

View File

@ -6,20 +6,29 @@ cp sd-ui-files/scripts/bootstrap.sh scripts/
cp sd-ui-files/scripts/check_modules.py scripts/
cp sd-ui-files/scripts/get_config.py scripts/
cp sd-ui-files/scripts/config.yaml.sample scripts/
cp sd-ui-files/scripts/webui_console.py scripts/
source ./scripts/functions.sh
# activate the installer env
CONDA_BASEPATH=$(conda info --base)
export CONDA_BASEPATH=$(conda info --base)
source "$CONDA_BASEPATH/etc/profile.d/conda.sh" # avoids the 'shell not initialized' error
conda activate || fail "Failed to activate conda"
# hack to fix conda 4.14 on older installations
cp $CONDA_BASEPATH/condabin/conda $CONDA_BASEPATH/bin/conda
# remove the old version of the dev console script, if it's still present
if [ -e "open_dev_console.sh" ]; then
rm "open_dev_console.sh"
fi
if [ -e "ui/plugins/ui/merge.plugin.js" ]; then
rm "ui/plugins/ui/merge.plugin.js"
fi
# set the correct installer path (current vs legacy)
if [ -e "installer_files/env" ]; then
export INSTALL_ENV_DIR="$(pwd)/installer_files/env"
@ -41,45 +50,8 @@ fi
if [ -e "src" ]; then mv src src-old; fi
if [ -e "ldm" ]; then mv ldm ldm-old; fi
# Download the required packages
if ! python ../scripts/check_modules.py; then
read -p "Press any key to continue"
exit 1
fi
if ! command -v uvicorn &> /dev/null; then
fail "UI packages not found!"
fi
if [ `grep -c sd_install_complete ../scripts/install_status.txt` -gt "0" ]; then
echo sd_weights_downloaded >> ../scripts/install_status.txt
echo sd_install_complete >> ../scripts/install_status.txt
fi
printf "\n\nEasy Diffusion installation complete, starting the server!\n\n"
SD_PATH=`pwd`
export PYTORCH_ENABLE_MPS_FALLBACK=1
export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages"
echo "PYTHONPATH=$PYTHONPATH"
which python
python --version
cd ..
export SD_UI_PATH=`pwd`/ui
export ED_BIND_PORT="$( python scripts/get_config.py --default=9000 net listen_port )"
case "$( python scripts/get_config.py --default=False net listen_to_network )" in
"True")
export ED_BIND_IP=$( python scripts/get_config.py --default=0.0.0.0 net bind_ip)
;;
"False")
export ED_BIND_IP=127.0.0.1
;;
esac
cd stable-diffusion
uvicorn main:server_api --app-dir "$SD_UI_PATH" --port "$ED_BIND_PORT" --host "$ED_BIND_IP" --log-level error
# Download the required packages
python scripts/check_modules.py --launch-uvicorn
read -p "Press any key to continue"

View File

@ -11,7 +11,7 @@ if [ -f "on_sd_start.bat" ]; then
echo download. This will not work.
echo
echo Recommended: Please close this window and download the installer from
echo https://stable-diffusion-ui.github.io/docs/installation/
echo https://easydiffusion.github.io/docs/installation/
echo
echo ================================================================================
echo

101
scripts/webui_console.py Normal file
View File

@ -0,0 +1,101 @@
import os
import platform
import subprocess
def configure_env(dir):
env_entries = {
"PATH": [
f"{dir}",
f"{dir}/bin",
f"{dir}/Library/bin",
f"{dir}/Scripts",
f"{dir}/usr/bin",
],
"PYTHONPATH": [
f"{dir}",
f"{dir}/lib/site-packages",
f"{dir}/lib/python3.10/site-packages",
],
"PYTHONHOME": [],
"PY_LIBS": [
f"{dir}/Scripts/Lib",
f"{dir}/Scripts/Lib/site-packages",
f"{dir}/lib",
f"{dir}/lib/python3.10/site-packages",
],
"PY_PIP": [f"{dir}/Scripts", f"{dir}/bin"],
}
if platform.system() == "Windows":
env_entries["PATH"].append("C:/Windows/System32")
env_entries["PATH"].append("C:/Windows/System32/wbem")
env_entries["PYTHONNOUSERSITE"] = ["1"]
env_entries["PYTHON"] = [f"{dir}/python"]
env_entries["GIT"] = [f"{dir}/Library/bin/git"]
else:
env_entries["PATH"].append("/bin")
env_entries["PATH"].append("/usr/bin")
env_entries["PATH"].append("/usr/sbin")
env_entries["PYTHONNOUSERSITE"] = ["y"]
env_entries["PYTHON"] = [f"{dir}/bin/python"]
env_entries["GIT"] = [f"{dir}/bin/git"]
env = {}
for key, paths in env_entries.items():
paths = [p.replace("/", os.path.sep) for p in paths]
paths = os.pathsep.join(paths)
os.environ[key] = paths
return env
def print_env_info():
which_cmd = "where" if platform.system() == "Windows" else "which"
python = "python"
def locate_python():
nonlocal python
python = subprocess.getoutput(f"{which_cmd} python")
python = python.split("\n")
python = python[0].strip()
print("python: ", python)
locate_python()
def run(cmd):
with subprocess.Popen(cmd) as p:
p.wait()
run([which_cmd, "git"])
run(["git", "--version"])
run([which_cmd, "python"])
run([python, "--version"])
print(f"PATH={os.environ['PATH']}")
if platform.system() == "Windows":
print(f"COMSPEC={os.environ['COMSPEC']}")
print("")
run("wmic path win32_VideoController get name,AdapterRAM,DriverDate,DriverVersion".split(" "))
print(f"PYTHONPATH={os.environ['PYTHONPATH']}")
print("")
def open_dev_shell():
if platform.system() == "Windows":
subprocess.Popen("cmd").communicate()
else:
subprocess.Popen("bash").communicate()
if __name__ == "__main__":
env_dir = os.path.abspath(os.path.join("webui", "system"))
configure_env(env_dir)
print_env_info()
open_dev_shell()

View File

@ -11,7 +11,7 @@ from ruamel.yaml import YAML
import urllib
import warnings
from easydiffusion import task_manager
from easydiffusion import task_manager, backend_manager
from easydiffusion.utils import log
from rich.logging import RichHandler
from rich.console import Console
@ -36,11 +36,10 @@ ROOT_DIR = os.path.abspath(os.path.join(SD_DIR, ".."))
SD_UI_DIR = os.getenv("SD_UI_PATH", None)
CONFIG_DIR = os.path.abspath(os.path.join(SD_UI_DIR, "..", "scripts"))
MODELS_DIR = os.path.abspath(os.path.join(SD_DIR, "..", "models"))
BUCKET_DIR = os.path.abspath(os.path.join(SD_DIR, "..", "bucket"))
CONFIG_DIR = os.path.abspath(os.path.join(ROOT_DIR, "scripts"))
BUCKET_DIR = os.path.abspath(os.path.join(ROOT_DIR, "bucket"))
USER_PLUGINS_DIR = os.path.abspath(os.path.join(SD_DIR, "..", "plugins"))
USER_PLUGINS_DIR = os.path.abspath(os.path.join(ROOT_DIR, "plugins"))
CORE_PLUGINS_DIR = os.path.abspath(os.path.join(SD_UI_DIR, "plugins"))
USER_UI_PLUGINS_DIR = os.path.join(USER_PLUGINS_DIR, "ui")
@ -61,7 +60,7 @@ APP_CONFIG_DEFAULTS = {
"ui": {
"open_browser_on_start": True,
},
"test_diffusers": True,
"backend": "ed_diffusers",
}
IMAGE_EXTENSIONS = [
@ -78,7 +77,7 @@ IMAGE_EXTENSIONS = [
".avif",
".svg",
]
CUSTOM_MODIFIERS_DIR = os.path.abspath(os.path.join(SD_DIR, "..", "modifiers"))
CUSTOM_MODIFIERS_DIR = os.path.abspath(os.path.join(ROOT_DIR, "modifiers"))
CUSTOM_MODIFIERS_PORTRAIT_EXTENSIONS = [
".portrait",
"_portrait",
@ -92,14 +91,25 @@ CUSTOM_MODIFIERS_LANDSCAPE_EXTENSIONS = [
"-landscape",
]
MODELS_DIR = os.path.abspath(os.path.join(ROOT_DIR, "models"))
def init():
global MODELS_DIR
os.makedirs(USER_UI_PLUGINS_DIR, exist_ok=True)
os.makedirs(USER_SERVER_PLUGINS_DIR, exist_ok=True)
# https://pytorch.org/docs/stable/storage.html
warnings.filterwarnings("ignore", category=UserWarning, message="TypedStorage is deprecated")
config = getConfig()
config_models_dir = config.get("models_dir", None)
if config_models_dir is not None and config_models_dir != "":
MODELS_DIR = config_models_dir
backend_manager.start_backend()
def init_render_threads():
load_server_plugins()
@ -109,6 +119,7 @@ def init_render_threads():
def getConfig(default_val=APP_CONFIG_DEFAULTS):
config_yaml_path = os.path.join(CONFIG_DIR, "..", "config.yaml")
config_yaml_path = os.path.abspath(config_yaml_path)
# migrate the old config yaml location
config_legacy_yaml = os.path.join(CONFIG_DIR, "config.yaml")
@ -116,9 +127,9 @@ def getConfig(default_val=APP_CONFIG_DEFAULTS):
shutil.move(config_legacy_yaml, config_yaml_path)
def set_config_on_startup(config: dict):
if getConfig.__test_diffusers_on_startup is None:
getConfig.__test_diffusers_on_startup = config.get("test_diffusers", True)
config["config_on_startup"] = {"test_diffusers": getConfig.__test_diffusers_on_startup}
if getConfig.__use_backend_on_startup is None:
getConfig.__use_backend_on_startup = config.get("backend", "ed_diffusers")
config["config_on_startup"] = {"backend": getConfig.__use_backend_on_startup}
if os.path.isfile(config_yaml_path):
try:
@ -136,6 +147,15 @@ def getConfig(default_val=APP_CONFIG_DEFAULTS):
else:
config["net"]["listen_to_network"] = True
if "backend" not in config:
if "use_v3_engine" in config:
config["backend"] = "ed_diffusers" if config["use_v3_engine"] else "ed_classic"
else:
config["backend"] = "ed_diffusers"
# this default will need to be smarter when WebUI becomes the main backend, but needs to maintain backwards
# compatibility with existing ED 3.0 installations that haven't opted into the WebUI backend, and haven't
# set a "use_v3_engine" flag in their config
set_config_on_startup(config)
return config
@ -166,12 +186,15 @@ def getConfig(default_val=APP_CONFIG_DEFAULTS):
return default_val
getConfig.__test_diffusers_on_startup = None
getConfig.__use_backend_on_startup = None
def setConfig(config):
global MODELS_DIR
try: # config.yaml
config_yaml_path = os.path.join(CONFIG_DIR, "..", "config.yaml")
config_yaml_path = os.path.abspath(config_yaml_path)
yaml = YAML()
if not hasattr(config, "_yaml_comment"):
@ -205,6 +228,9 @@ def setConfig(config):
except:
log.error(traceback.format_exc())
if config.get("models_dir"):
MODELS_DIR = config["models_dir"]
def save_to_config(ckpt_model_name, vae_model_name, hypernetwork_model_name, vram_usage_level):
config = getConfig()
@ -293,28 +319,43 @@ def getIPConfig():
def open_browser():
from easydiffusion.backend_manager import backend
config = getConfig()
ui = config.get("ui", {})
net = config.get("net", {})
port = net.get("listen_port", 9000)
if ui.get("open_browser_on_start", True):
import webbrowser
if backend.is_installed():
if ui.get("open_browser_on_start", True):
import webbrowser
log.info("Opening browser..")
log.info("Opening browser..")
webbrowser.open(f"http://localhost:{port}")
webbrowser.open(f"http://localhost:{port}")
Console().print(
Panel(
"\n"
+ "[white]Easy Diffusion is ready to serve requests.\n\n"
+ "A new browser tab should have been opened by now.\n"
+ f"If not, please open your web browser and navigate to [bold yellow underline]http://localhost:{port}/\n",
title="Easy Diffusion is ready",
style="bold yellow on blue",
Console().print(
Panel(
"\n"
+ "[white]Easy Diffusion is ready to serve requests.\n\n"
+ "A new browser tab should have been opened by now.\n"
+ f"If not, please open your web browser and navigate to [bold yellow underline]http://localhost:{port}/\n",
title="Easy Diffusion is ready",
style="bold yellow on blue",
)
)
else:
backend_name = config["backend"]
Console().print(
Panel(
"\n"
+ f"[white]Backend: {backend_name} is still installing..\n\n"
+ "A new browser tab will open automatically after it finishes.\n"
+ f"If it does not, please open your web browser and navigate to [bold yellow underline]http://localhost:{port}/\n",
title=f"Backend engine is installing",
style="bold yellow on blue",
)
)
)
def fail_and_die(fail_type: str, data: str):

View File

@ -0,0 +1,105 @@
import os
import ast
import sys
import importlib.util
import traceback
from easydiffusion.utils import log
backend = None
curr_backend_name = None
def is_valid_backend(file_path):
with open(file_path, "r", encoding="utf-8") as file:
node = ast.parse(file.read())
# Check for presence of a dictionary named 'ed_info'
for item in node.body:
if isinstance(item, ast.Assign):
for target in item.targets:
if isinstance(target, ast.Name) and target.id == "ed_info":
return True
return False
def find_valid_backends(root_dir) -> dict:
backends_path = os.path.join(root_dir, "backends")
valid_backends = {}
if not os.path.exists(backends_path):
return valid_backends
for item in os.listdir(backends_path):
item_path = os.path.join(backends_path, item)
if os.path.isdir(item_path):
init_file = os.path.join(item_path, "__init__.py")
if os.path.exists(init_file) and is_valid_backend(init_file):
valid_backends[item] = item_path
elif item.endswith(".py"):
if is_valid_backend(item_path):
backend_name = os.path.splitext(item)[0] # strip the .py extension
valid_backends[backend_name] = item_path
return valid_backends
def load_backend_module(backend_name, backend_dict):
if backend_name not in backend_dict:
raise ValueError(f"Backend '{backend_name}' not found.")
module_path = backend_dict[backend_name]
mod_dir = os.path.dirname(module_path)
sys.path.insert(0, mod_dir)
# If it's a package (directory), add its parent directory to sys.path
if os.path.isdir(module_path):
module_path = os.path.join(module_path, "__init__.py")
spec = importlib.util.spec_from_file_location(backend_name, module_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
if mod_dir in sys.path:
sys.path.remove(mod_dir)
log.info(f"Loaded backend: {module}")
return module
def start_backend():
global backend, curr_backend_name
from easydiffusion.app import getConfig, ROOT_DIR
curr_dir = os.path.dirname(__file__)
backends = find_valid_backends(curr_dir)
plugin_backends = find_valid_backends(ROOT_DIR)
backends.update(plugin_backends)
config = getConfig()
backend_name = config["backend"]
if backend_name not in backends:
raise RuntimeError(
f"Couldn't find the backend configured in config.yaml: {backend_name}. Please check the name!"
)
if backend is not None and backend_name != curr_backend_name:
try:
backend.stop_backend()
except:
log.exception(traceback.format_exc())
log.info(f"Loading backend: {backend_name}")
backend = load_backend_module(backend_name, backends)
try:
backend.start_backend()
except:
log.exception(traceback.format_exc())

View File

@ -0,0 +1,28 @@
from sdkit_common import (
start_backend,
stop_backend,
install_backend,
uninstall_backend,
is_installed,
create_sdkit_context,
ping,
load_model,
unload_model,
set_options,
generate_images,
filter_images,
get_url,
stop_rendering,
refresh_models,
list_controlnet_filters,
)
ed_info = {
"name": "Classic backend for Easy Diffusion v2",
"version": (1, 0, 0),
"type": "backend",
}
def create_context():
return create_sdkit_context(use_diffusers=False)

View File

@ -0,0 +1,28 @@
from sdkit_common import (
start_backend,
stop_backend,
install_backend,
uninstall_backend,
is_installed,
create_sdkit_context,
ping,
load_model,
unload_model,
set_options,
generate_images,
filter_images,
get_url,
stop_rendering,
refresh_models,
list_controlnet_filters,
)
ed_info = {
"name": "Diffusers Backend for Easy Diffusion v3",
"version": (1, 0, 0),
"type": "backend",
}
def create_context():
return create_sdkit_context(use_diffusers=True)

View File

@ -0,0 +1,246 @@
from sdkit import Context
from easydiffusion.types import UserInitiatedStop
from sdkit.utils import (
diffusers_latent_samples_to_images,
gc,
img_to_base64_str,
latent_samples_to_images,
)
opts = {}
def install_backend():
pass
def start_backend():
print("Started sdkit backend")
def stop_backend():
pass
def uninstall_backend():
pass
def is_installed():
return True
def create_sdkit_context(use_diffusers):
c = Context()
c.test_diffusers = use_diffusers
return c
def ping(timeout=1):
return True
def load_model(context, model_type, **kwargs):
from sdkit.models import load_model
load_model(context, model_type, **kwargs)
def unload_model(context, model_type, **kwargs):
from sdkit.models import unload_model
unload_model(context, model_type, **kwargs)
def set_options(context, **kwargs):
if "vae_tiling" in kwargs and context.test_diffusers:
pipe = context.models["stable-diffusion"]["default"]
vae_tiling = kwargs["vae_tiling"]
if vae_tiling:
if hasattr(pipe, "enable_vae_tiling"):
pipe.enable_vae_tiling()
else:
if hasattr(pipe, "disable_vae_tiling"):
pipe.disable_vae_tiling()
for key in (
"output_format",
"output_quality",
"output_lossless",
"stream_image_progress",
"stream_image_progress_interval",
):
if key in kwargs:
opts[key] = kwargs[key]
def generate_images(
context: Context,
callback=None,
controlnet_filter=None,
distilled_guidance_scale: float = 3.5,
scheduler_name: str = "simple",
output_type="pil",
**req,
):
from sdkit.generate import generate_images
if req["init_image"] is not None and not context.test_diffusers:
req["sampler_name"] = "ddim"
gc(context)
context.stop_processing = False
if req["control_image"] and controlnet_filter:
controlnet_filter = convert_ED_controlnet_filter_name(controlnet_filter)
req["control_image"] = filter_images(context, req["control_image"], controlnet_filter)[0]
callback = make_step_callback(context, callback)
try:
images = generate_images(context, callback=callback, **req)
except UserInitiatedStop:
images = []
if context.partial_x_samples is not None:
if context.test_diffusers:
images = diffusers_latent_samples_to_images(context, context.partial_x_samples)
else:
images = latent_samples_to_images(context, context.partial_x_samples)
finally:
if hasattr(context, "partial_x_samples") and context.partial_x_samples is not None:
if not context.test_diffusers:
del context.partial_x_samples
context.partial_x_samples = None
gc(context)
if output_type == "base64":
output_format = opts.get("output_format", "jpeg")
output_quality = opts.get("output_quality", 75)
output_lossless = opts.get("output_lossless", False)
images = [img_to_base64_str(img, output_format, output_quality, output_lossless) for img in images]
return images
def filter_images(context: Context, images, filters, filter_params={}, input_type="pil"):
gc(context)
if "nsfw_checker" in filters:
filters.remove("nsfw_checker") # handled by ED directly
if len(filters) == 0:
return images
images = _filter_images(context, images, filters, filter_params)
if input_type == "base64":
output_format = opts.get("output_format", "jpg")
output_quality = opts.get("output_quality", 75)
output_lossless = opts.get("output_lossless", False)
images = [img_to_base64_str(img, output_format, output_quality, output_lossless) for img in images]
return images
def _filter_images(context, images, filters, filter_params={}):
from sdkit.filter import apply_filters
filters = filters if isinstance(filters, list) else [filters]
filters = convert_ED_controlnet_filter_name(filters)
for filter_name in filters:
params = filter_params.get(filter_name, {})
previous_state = before_filter(context, filter_name, params)
try:
images = apply_filters(context, filter_name, images, **params)
finally:
after_filter(context, filter_name, params, previous_state)
return images
def before_filter(context, filter_name, filter_params):
if filter_name == "codeformer":
from easydiffusion.model_manager import DEFAULT_MODELS, resolve_model_to_use
default_realesrgan = DEFAULT_MODELS["realesrgan"][0]["file_name"]
prev_realesrgan_path = None
upscale_faces = filter_params.get("upscale_faces", False)
if upscale_faces and default_realesrgan not in context.model_paths["realesrgan"]:
prev_realesrgan_path = context.model_paths.get("realesrgan")
context.model_paths["realesrgan"] = resolve_model_to_use(default_realesrgan, "realesrgan")
load_model(context, "realesrgan")
return prev_realesrgan_path
def after_filter(context, filter_name, filter_params, previous_state):
if filter_name == "codeformer":
prev_realesrgan_path = previous_state
if prev_realesrgan_path:
context.model_paths["realesrgan"] = prev_realesrgan_path
load_model(context, "realesrgan")
def get_url():
pass
def stop_rendering(context):
context.stop_processing = True
def refresh_models():
pass
def list_controlnet_filters():
from sdkit.models.model_loader.controlnet_filters import filters as cn_filters
return cn_filters
def make_step_callback(context, callback):
def on_step(x_samples, i, *args):
stream_image_progress = opts.get("stream_image_progress", False)
stream_image_progress_interval = opts.get("stream_image_progress_interval", 3)
if context.test_diffusers:
context.partial_x_samples = (x_samples, args[0])
else:
context.partial_x_samples = x_samples
if stream_image_progress and stream_image_progress_interval > 0 and i % stream_image_progress_interval == 0:
if context.test_diffusers:
images = diffusers_latent_samples_to_images(context, context.partial_x_samples)
else:
images = latent_samples_to_images(context, context.partial_x_samples)
else:
images = None
if callback:
callback(images, i, *args)
if context.stop_processing:
raise UserInitiatedStop("User requested that we stop processing")
return on_step
def convert_ED_controlnet_filter_name(filter):
def cn(n):
if n.startswith("controlnet_"):
return n[len("controlnet_") :]
return n
if isinstance(filter, list):
return [cn(f) for f in filter]
return cn(filter)

View File

@ -0,0 +1,450 @@
import os
import platform
import subprocess
import threading
from threading import local
import psutil
import time
import shutil
from easydiffusion.app import ROOT_DIR, getConfig
from easydiffusion.model_manager import get_model_dirs
from easydiffusion.utils import log
from . import impl
from .impl import (
ping,
load_model,
unload_model,
set_options,
generate_images,
filter_images,
get_url,
stop_rendering,
refresh_models,
list_controlnet_filters,
)
ed_info = {
"name": "WebUI backend for Easy Diffusion",
"version": (1, 0, 0),
"type": "backend",
}
WEBUI_REPO = "https://github.com/lllyasviel/stable-diffusion-webui-forge.git"
WEBUI_COMMIT = "f4d5e8cac16a42fa939e78a0956b4c30e2b47bb5"
BACKEND_DIR = os.path.abspath(os.path.join(ROOT_DIR, "webui"))
SYSTEM_DIR = os.path.join(BACKEND_DIR, "system")
WEBUI_DIR = os.path.join(BACKEND_DIR, "webui")
OS_NAME = platform.system()
MODELS_TO_OVERRIDE = {
"stable-diffusion": "--ckpt-dir",
"vae": "--vae-dir",
"hypernetwork": "--hypernetwork-dir",
"gfpgan": "--gfpgan-models-path",
"realesrgan": "--realesrgan-models-path",
"lora": "--lora-dir",
"codeformer": "--codeformer-models-path",
"embeddings": "--embeddings-dir",
"controlnet": "--controlnet-dir",
}
backend_process = None
conda = "conda"
def locate_conda():
global conda
which = "where" if OS_NAME == "Windows" else "which"
conda = subprocess.getoutput(f"{which} conda")
conda = conda.split("\n")
conda = conda[0].strip()
print("conda: ", conda)
locate_conda()
def install_backend():
print("Installing the WebUI backend..")
# create the conda env
run([conda, "create", "-y", "--prefix", SYSTEM_DIR], cwd=ROOT_DIR)
print("Installing packages..")
# install python 3.10 and git in the conda env
run([conda, "install", "-y", "--prefix", SYSTEM_DIR, "-c", "conda-forge", "python=3.10", "git"], cwd=ROOT_DIR)
# print info
run_in_conda(["git", "--version"], cwd=ROOT_DIR)
run_in_conda(["python", "--version"], cwd=ROOT_DIR)
# clone webui
run_in_conda(["git", "clone", WEBUI_REPO, WEBUI_DIR], cwd=ROOT_DIR)
# install cpu-only torch if the PC doesn't have a graphics card (for Windows and Linux).
# this avoids WebUI installing a CUDA version and trying to activate it
if OS_NAME in ("Windows", "Linux") and not has_discrete_graphics_card():
run_in_conda(["python", "-m", "pip", "install", "torch", "torchvision"], cwd=WEBUI_DIR)
def start_backend():
config = getConfig()
backend_config = config.get("backend_config", {})
if not os.path.exists(BACKEND_DIR):
install_backend()
was_still_installing = not is_installed()
if backend_config.get("auto_update", True):
run_in_conda(["git", "add", "-A", "."], cwd=WEBUI_DIR)
run_in_conda(["git", "stash"], cwd=WEBUI_DIR)
run_in_conda(["git", "reset", "--hard"], cwd=WEBUI_DIR)
run_in_conda(["git", "fetch"], cwd=WEBUI_DIR)
run_in_conda(["git", "-c", "advice.detachedHead=false", "checkout", WEBUI_COMMIT], cwd=WEBUI_DIR)
# hack to prevent webui-macos-env.sh from overwriting the COMMANDLINE_ARGS env variable
mac_webui_file = os.path.join(WEBUI_DIR, "webui-macos-env.sh")
if os.path.exists(mac_webui_file):
os.remove(mac_webui_file)
impl.WEBUI_HOST = backend_config.get("host", "localhost")
impl.WEBUI_PORT = backend_config.get("port", "7860")
env = dict(os.environ)
env.update(get_env())
def restart_if_webui_dies_after_starting():
has_started = False
while True:
try:
impl.ping(timeout=1)
is_first_start = not has_started
has_started = True
if was_still_installing and is_first_start:
ui = config.get("ui", {})
net = config.get("net", {})
port = net.get("listen_port", 9000)
if ui.get("open_browser_on_start", True):
import webbrowser
log.info("Opening browser..")
webbrowser.open(f"http://localhost:{port}")
except (TimeoutError, ConnectionError):
if has_started: # process probably died
print("######################## WebUI probably died. Restarting...")
stop_backend()
backend_thread = threading.Thread(target=target)
backend_thread.start()
break
except Exception:
import traceback
log.exception(traceback.format_exc())
time.sleep(1)
def target():
global backend_process
cmd = "webui.bat" if OS_NAME == "Windows" else "./webui.sh"
print("starting", cmd, WEBUI_DIR)
backend_process = run_in_conda([cmd], cwd=WEBUI_DIR, env=env, wait=False, output_prefix="[WebUI] ")
restart_if_dead_thread = threading.Thread(target=restart_if_webui_dies_after_starting)
restart_if_dead_thread.start()
backend_process.wait()
backend_thread = threading.Thread(target=target)
backend_thread.start()
start_proxy()
def start_proxy():
# proxy
from easydiffusion.server import server_api
from fastapi import FastAPI, Request
from fastapi.responses import Response
import json
URI_PREFIX = "/webui"
webui_proxy = FastAPI(root_path=f"{URI_PREFIX}", docs_url="/swagger")
@webui_proxy.get("{uri:path}")
def proxy_get(uri: str, req: Request):
if uri == "/openapi-proxy.json":
uri = "/openapi.json"
res = impl.webui_get(uri, headers=req.headers)
content = res.content
headers = dict(res.headers)
if uri == "/docs":
content = res.text.replace("url: '/openapi.json'", f"url: '{URI_PREFIX}/openapi-proxy.json'")
elif uri == "/openapi.json":
content = res.json()
content["paths"] = {f"{URI_PREFIX}{k}": v for k, v in content["paths"].items()}
content = json.dumps(content)
if isinstance(content, str):
content = bytes(content, encoding="utf-8")
headers["content-length"] = str(len(content))
# Return the same response back to the client
return Response(content=content, status_code=res.status_code, headers=headers)
@webui_proxy.post("{uri:path}")
async def proxy_post(uri: str, req: Request):
body = await req.body()
res = impl.webui_post(uri, data=body, headers=req.headers)
# Return the same response back to the client
return Response(content=res.content, status_code=res.status_code, headers=dict(res.headers))
server_api.mount(f"{URI_PREFIX}", webui_proxy)
def stop_backend():
global backend_process
if backend_process:
try:
kill(backend_process.pid)
except:
pass
backend_process = None
def uninstall_backend():
shutil.rmtree(BACKEND_DIR)
def is_installed():
if not os.path.exists(BACKEND_DIR) or not os.path.exists(SYSTEM_DIR) or not os.path.exists(WEBUI_DIR):
return True
env = dict(os.environ)
env.update(get_env())
try:
out = check_output_in_conda(["python", "-m", "pip", "show", "torch"], env=env)
return "Version" in out.decode()
except subprocess.CalledProcessError:
pass
return False
def read_output(pipe, prefix=""):
while True:
output = pipe.readline()
if output:
print(f"{prefix}{output.decode('utf-8')}", end="")
else:
break # Pipe is closed, subprocess has likely exited
def run(cmds: list, cwd=None, env=None, stream_output=True, wait=True, output_prefix=""):
p = subprocess.Popen(cmds, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if stream_output:
output_thread = threading.Thread(target=read_output, args=(p.stdout, output_prefix))
output_thread.start()
if wait:
p.wait()
return p
def run_in_conda(cmds: list, *args, **kwargs):
cmds = [conda, "run", "--no-capture-output", "--prefix", SYSTEM_DIR] + cmds
return run(cmds, *args, **kwargs)
def check_output_in_conda(cmds: list, cwd=None, env=None):
cmds = [conda, "run", "--no-capture-output", "--prefix", SYSTEM_DIR] + cmds
return subprocess.check_output(cmds, cwd=cwd, env=env, stderr=subprocess.PIPE)
def create_context():
context = local()
# temp hack, throws an attribute not found error otherwise
context.device = "cuda:0"
context.half_precision = True
context.vram_usage_level = None
context.models = {}
context.model_paths = {}
context.model_configs = {}
context.device_name = None
context.vram_optimizations = set()
context.vram_usage_level = "balanced"
context.test_diffusers = False
context.enable_codeformer = False
return context
def get_env():
dir = os.path.abspath(SYSTEM_DIR)
if not os.path.exists(dir):
raise RuntimeError("The system folder is missing!")
config = getConfig()
models_dir = config.get("models_dir", os.path.join(ROOT_DIR, "models"))
model_path_args = get_model_path_args()
env_entries = {
"PATH": [
f"{dir}",
f"{dir}/bin",
f"{dir}/Library/bin",
f"{dir}/Scripts",
f"{dir}/usr/bin",
],
"PYTHONPATH": [
f"{dir}",
f"{dir}/lib/site-packages",
f"{dir}/lib/python3.10/site-packages",
],
"PYTHONHOME": [],
"PY_LIBS": [
f"{dir}/Scripts/Lib",
f"{dir}/Scripts/Lib/site-packages",
f"{dir}/lib",
f"{dir}/lib/python3.10/site-packages",
],
"PY_PIP": [f"{dir}/Scripts", f"{dir}/bin"],
"PIP_INSTALLER_LOCATION": [], # [f"{dir}/python/get-pip.py"],
"TRANSFORMERS_CACHE": [f"{dir}/transformers-cache"],
"HF_HUB_DISABLE_SYMLINKS_WARNING": ["true"],
"COMMANDLINE_ARGS": [f'--api --models-dir "{models_dir}" {model_path_args} --skip-torch-cuda-test'],
"SKIP_VENV": ["1"],
"SD_WEBUI_RESTARTING": ["1"],
}
if OS_NAME == "Windows":
env_entries["PATH"].append("C:/Windows/System32")
env_entries["PATH"].append("C:/Windows/System32/wbem")
env_entries["PYTHONNOUSERSITE"] = ["1"]
env_entries["PYTHON"] = [f"{dir}/python"]
env_entries["GIT"] = [f"{dir}/Library/bin/git"]
else:
env_entries["PATH"].append("/bin")
env_entries["PATH"].append("/usr/bin")
env_entries["PATH"].append("/usr/sbin")
env_entries["PYTHONNOUSERSITE"] = ["y"]
env_entries["PYTHON"] = [f"{dir}/bin/python"]
env_entries["GIT"] = [f"{dir}/bin/git"]
env_entries["venv_dir"] = ["-"]
if OS_NAME == "Darwin":
# based on https://github.com/lllyasviel/stable-diffusion-webui-forge/blob/e26abf87ecd1eefd9ab0a198eee56f9c643e4001/webui-macos-env.sh
# hack - have to define these here, otherwise webui-macos-env.sh will overwrite COMMANDLINE_ARGS
env_entries["COMMANDLINE_ARGS"][0] += " --upcast-sampling --no-half-vae --use-cpu interrogate"
env_entries["PYTORCH_ENABLE_MPS_FALLBACK"] = ["1"]
cpu_name = str(subprocess.check_output(["sysctl", "-n", "machdep.cpu.brand_string"]))
if "Intel" in cpu_name:
env_entries["TORCH_COMMAND"] = ["pip install torch==2.1.2 torchvision==0.16.2"]
else:
env_entries["TORCH_COMMAND"] = ["pip install torch==2.3.1 torchvision==0.18.1"]
else:
import torch
from easydiffusion.device_manager import needs_to_force_full_precision, is_cuda_available
vram_usage_level = config.get("vram_usage_level", "balanced")
if config.get("render_devices", "auto") == "cpu" or not has_discrete_graphics_card() or not is_cuda_available():
env_entries["COMMANDLINE_ARGS"][0] += " --always-cpu"
else:
c = local()
c.device_name = torch.cuda.get_device_name()
if needs_to_force_full_precision(c):
env_entries["COMMANDLINE_ARGS"][0] += " --no-half --precision full"
if vram_usage_level == "low":
env_entries["COMMANDLINE_ARGS"][0] += " --always-low-vram"
elif vram_usage_level == "high":
env_entries["COMMANDLINE_ARGS"][0] += " --always-high-vram"
env = {}
for key, paths in env_entries.items():
paths = [p.replace("/", os.path.sep) for p in paths]
paths = os.pathsep.join(paths)
env[key] = paths
return env
def has_discrete_graphics_card():
system = OS_NAME
if system == "Windows":
try:
output = subprocess.check_output(
["wmic", "path", "win32_videocontroller", "get", "name"], stderr=subprocess.STDOUT
)
# Filter for discrete graphics cards (NVIDIA, AMD, etc.)
discrete_gpus = ["NVIDIA", "AMD", "ATI"]
return any(gpu in output.decode() for gpu in discrete_gpus)
except subprocess.CalledProcessError:
return False
elif system == "Linux":
try:
output = subprocess.check_output(["lspci"], stderr=subprocess.STDOUT)
# Check for discrete GPUs (NVIDIA, AMD)
discrete_gpus = ["NVIDIA", "AMD", "Advanced Micro Devices"]
return any(gpu in line for line in output.decode().splitlines() for gpu in discrete_gpus)
except subprocess.CalledProcessError:
return False
elif system == "Darwin": # macOS
try:
output = subprocess.check_output(["system_profiler", "SPDisplaysDataType"], stderr=subprocess.STDOUT)
# Check for discrete GPU in the output
return "NVIDIA" in output.decode() or "AMD" in output.decode()
except subprocess.CalledProcessError:
return False
return False
# https://stackoverflow.com/a/25134985
def kill(proc_pid):
process = psutil.Process(proc_pid)
for proc in process.children(recursive=True):
proc.kill()
process.kill()
def get_model_path_args():
args = []
for model_type, flag in MODELS_TO_OVERRIDE.items():
model_dir = get_model_dirs(model_type)[0]
args.append(f'{flag} "{model_dir}"')
return " ".join(args)

View File

@ -0,0 +1,654 @@
import os
import requests
from requests.exceptions import ConnectTimeout, ConnectionError
from typing import Union, List
from threading import local as Context
from threading import Thread
import uuid
import time
from copy import deepcopy
from sdkit.utils import base64_str_to_img, img_to_base64_str
WEBUI_HOST = "localhost"
WEBUI_PORT = "7860"
DEFAULT_WEBUI_OPTIONS = {
"show_progress_every_n_steps": 3,
"show_progress_grid": True,
"live_previews_enable": False,
"forge_additional_modules": [],
}
webui_opts: dict = None
curr_models = {
"stable-diffusion": None,
"vae": None,
}
def set_options(context, **kwargs):
changed_opts = {}
opts_mapping = {
"stream_image_progress": ("live_previews_enable", bool),
"stream_image_progress_interval": ("show_progress_every_n_steps", int),
"clip_skip": ("CLIP_stop_at_last_layers", int),
"clip_skip_sdxl": ("sdxl_clip_l_skip", bool),
"output_format": ("samples_format", str),
}
for ed_key, webui_key in opts_mapping.items():
webui_key, webui_type = webui_key
if ed_key in kwargs and (webui_opts is None or webui_opts.get(webui_key, False) != webui_type(kwargs[ed_key])):
changed_opts[webui_key] = webui_type(kwargs[ed_key])
if changed_opts:
changed_opts["sd_model_checkpoint"] = curr_models["stable-diffusion"]
print(f"Got options: {kwargs}. Sending options: {changed_opts}")
try:
res = webui_post("/sdapi/v1/options", json=changed_opts)
if res.status_code != 200:
raise Exception(res.text)
webui_opts.update(changed_opts)
except Exception as e:
print(f"Error setting options: {e}")
def ping(timeout=1):
"timeout (in seconds)"
global webui_opts
try:
res = webui_get("/internal/ping", timeout=timeout)
if res.status_code != 200:
raise ConnectTimeout(res.text)
if webui_opts is None:
try:
res = webui_post("/sdapi/v1/options", json=DEFAULT_WEBUI_OPTIONS)
if res.status_code != 200:
raise Exception(res.text)
except Exception as e:
print(f"Error setting options: {e}")
try:
res = webui_get("/sdapi/v1/options")
if res.status_code != 200:
raise Exception(res.text)
webui_opts = res.json()
except Exception as e:
print(f"Error getting options: {e}")
return True
except (ConnectTimeout, ConnectionError) as e:
raise TimeoutError(e)
def load_model(context, model_type, **kwargs):
model_path = context.model_paths[model_type]
if webui_opts is None:
print("Server not ready, can't set the model")
return
if model_type == "stable-diffusion":
model_name = os.path.basename(model_path)
model_name = os.path.splitext(model_name)[0]
print(f"setting sd model: {model_name}")
if curr_models[model_type] != model_name:
try:
res = webui_post("/sdapi/v1/options", json={"sd_model_checkpoint": model_name})
if res.status_code != 200:
raise Exception(res.text)
except Exception as e:
raise RuntimeError(
f"The engine failed to set the required options. Please check the logs in the command line window for more details."
)
curr_models[model_type] = model_name
elif model_type == "vae":
if curr_models[model_type] != model_path:
vae_model = [model_path] if model_path else []
opts = {"sd_model_checkpoint": curr_models["stable-diffusion"], "forge_additional_modules": vae_model}
print("setting opts 2", opts)
try:
res = webui_post("/sdapi/v1/options", json=opts)
if res.status_code != 200:
raise Exception(res.text)
except Exception as e:
raise RuntimeError(
f"The engine failed to set the required options. Please check the logs in the command line window for more details."
)
curr_models[model_type] = model_path
def unload_model(context, model_type, **kwargs):
if model_type == "vae":
context.model_paths[model_type] = None
load_model(context, model_type)
def generate_images(
context: Context,
prompt: str = "",
negative_prompt: str = "",
seed: int = 42,
width: int = 512,
height: int = 512,
num_outputs: int = 1,
num_inference_steps: int = 25,
guidance_scale: float = 7.5,
distilled_guidance_scale: float = 3.5,
init_image=None,
init_image_mask=None,
control_image=None,
control_alpha=1.0,
controlnet_filter=None,
prompt_strength: float = 0.8,
preserve_init_image_color_profile=False,
strict_mask_border=False,
sampler_name: str = "euler_a",
scheduler_name: str = "simple",
hypernetwork_strength: float = 0,
tiling=None,
lora_alpha: Union[float, List[float]] = 0,
sampler_params={},
callback=None,
output_type="pil",
):
task_id = str(uuid.uuid4())
sampler_name = convert_ED_sampler_names(sampler_name)
controlnet_filter = convert_ED_controlnet_filter_name(controlnet_filter)
cmd = {
"force_task_id": task_id,
"prompt": prompt,
"negative_prompt": negative_prompt,
"sampler_name": sampler_name,
"scheduler": scheduler_name,
"steps": num_inference_steps,
"seed": seed,
"cfg_scale": guidance_scale,
"distilled_cfg_scale": distilled_guidance_scale,
"batch_size": num_outputs,
"width": width,
"height": height,
}
if init_image:
cmd["init_images"] = [init_image]
cmd["denoising_strength"] = prompt_strength
if init_image_mask:
cmd["mask"] = init_image_mask
cmd["include_init_images"] = True
cmd["inpainting_fill"] = 1
cmd["initial_noise_multiplier"] = 1
cmd["inpaint_full_res"] = 1
if context.model_paths.get("lora"):
lora_model = context.model_paths["lora"]
lora_model = lora_model if isinstance(lora_model, list) else [lora_model]
lora_alpha = lora_alpha if isinstance(lora_alpha, list) else [lora_alpha]
for lora, alpha in zip(lora_model, lora_alpha):
lora = os.path.basename(lora)
lora = os.path.splitext(lora)[0]
cmd["prompt"] += f" <lora:{lora}:{alpha}>"
if controlnet_filter and control_image and context.model_paths.get("controlnet"):
controlnet_model = context.model_paths["controlnet"]
model_hash = auto1111_hash(controlnet_model)
controlnet_model = os.path.basename(controlnet_model)
controlnet_model = os.path.splitext(controlnet_model)[0]
print(f"setting controlnet model: {controlnet_model}")
controlnet_model = f"{controlnet_model} [{model_hash}]"
cmd["alwayson_scripts"] = {
"controlnet": {
"args": [
{
"image": control_image,
"weight": control_alpha,
"module": controlnet_filter,
"model": controlnet_model,
"resize_mode": "Crop and Resize",
"threshold_a": 50,
"threshold_b": 130,
}
]
}
}
operation_to_apply = "img2img" if init_image else "txt2img"
stream_image_progress = webui_opts.get("live_previews_enable", False)
progress_thread = Thread(
target=image_progress_thread, args=(task_id, callback, stream_image_progress, num_outputs, num_inference_steps)
)
progress_thread.start()
print(f"task id: {task_id}")
print_request(operation_to_apply, cmd)
res = webui_post(f"/sdapi/v1/{operation_to_apply}", json=cmd)
if res.status_code == 200:
res = res.json()
else:
raise Exception(
"The engine failed while generating this image. Please check the logs in the command-line window for more details."
)
import json
print(json.loads(res["info"])["infotexts"])
images = res["images"]
if output_type == "pil":
images = [base64_str_to_img(img) for img in images]
elif output_type == "base64":
images = [base64_buffer_to_base64_img(img) for img in images]
return images
def filter_images(context: Context, images, filters, filter_params={}, input_type="pil"):
"""
* context: Context
* images: str or PIL.Image or list of str/PIL.Image - image to filter. if a string is passed, it needs to be a base64-encoded image
* filters: filter_type (string) or list of strings
* filter_params: dict
returns: [PIL.Image] - list of filtered images
"""
images = images if isinstance(images, list) else [images]
filters = filters if isinstance(filters, list) else [filters]
if "nsfw_checker" in filters:
filters.remove("nsfw_checker") # handled by ED directly
args = {}
controlnet_filters = []
print(filter_params)
for filter_name in filters:
params = filter_params.get(filter_name, {})
if filter_name == "gfpgan":
args["gfpgan_visibility"] = 1
if filter_name in ("realesrgan", "esrgan_4x", "lanczos", "nearest", "scunet", "swinir"):
args["upscaler_1"] = params.get("upscaler", "RealESRGAN_x4plus")
args["upscaling_resize"] = params.get("scale", 4)
if args["upscaler_1"] == "RealESRGAN_x4plus":
args["upscaler_1"] = "R-ESRGAN 4x+"
elif args["upscaler_1"] == "RealESRGAN_x4plus_anime_6B":
args["upscaler_1"] = "R-ESRGAN 4x+ Anime6B"
if filter_name == "codeformer":
args["codeformer_visibility"] = 1
args["codeformer_weight"] = params.get("codeformer_fidelity", 0.5)
if filter_name.startswith("controlnet_"):
filter_name = convert_ED_controlnet_filter_name(filter_name)
controlnet_filters.append(filter_name)
print(f"filtering {len(images)} images with {args}. {controlnet_filters=}")
if len(filters) > len(controlnet_filters):
filtered_images = extra_batch_images(images, input_type=input_type, **args)
else:
filtered_images = images
for filter_name in controlnet_filters:
filtered_images = controlnet_filter(filtered_images, module=filter_name, input_type=input_type)
return filtered_images
def get_url():
return f"//{WEBUI_HOST}:{WEBUI_PORT}/?__theme=dark"
def stop_rendering(context):
try:
res = webui_post("/sdapi/v1/interrupt")
if res.status_code != 200:
raise Exception(res.text)
except Exception as e:
print(f"Error interrupting webui: {e}")
def refresh_models():
def make_refresh_call(type):
try:
webui_post(f"/sdapi/v1/refresh-{type}")
except:
pass
try:
for type in ("checkpoints", "vae"):
t = Thread(target=make_refresh_call, args=(type,))
t.start()
except Exception as e:
print(f"Error refreshing models: {e}")
def list_controlnet_filters():
return [
"openpose",
"openpose_face",
"openpose_faceonly",
"openpose_hand",
"openpose_full",
"animal_openpose",
"densepose_parula (black bg & blue torso)",
"densepose (pruple bg & purple torso)",
"dw_openpose_full",
"mediapipe_face",
"instant_id_face_keypoints",
"InsightFace+CLIP-H (IPAdapter)",
"InsightFace (InstantID)",
"canny",
"mlsd",
"scribble_hed",
"scribble_hedsafe",
"scribble_pidinet",
"scribble_pidsafe",
"scribble_xdog",
"softedge_hed",
"softedge_hedsafe",
"softedge_pidinet",
"softedge_pidsafe",
"softedge_teed",
"normal_bae",
"depth_midas",
"normal_midas",
"depth_zoe",
"depth_leres",
"depth_leres++",
"depth_anything_v2",
"depth_anything",
"depth_hand_refiner",
"depth_marigold",
"lineart_coarse",
"lineart_realistic",
"lineart_anime",
"lineart_standard (from white bg & black line)",
"lineart_anime_denoise",
"reference_adain",
"reference_only",
"reference_adain+attn",
"tile_colorfix",
"tile_resample",
"tile_colorfix+sharp",
"CLIP-ViT-H (IPAdapter)",
"CLIP-G (Revision)",
"CLIP-G (Revision ignore prompt)",
"CLIP-ViT-bigG (IPAdapter)",
"InsightFace+CLIP-H (IPAdapter)",
"inpaint_only",
"inpaint_only+lama",
"inpaint_global_harmonious",
"seg_ufade20k",
"seg_ofade20k",
"seg_anime_face",
"seg_ofcoco",
"shuffle",
"segment",
"invert (from white bg & black line)",
"threshold",
"t2ia_sketch_pidi",
"t2ia_color_grid",
"recolor_intensity",
"recolor_luminance",
"blur_gaussian",
]
def controlnet_filter(images, module="none", processor_res=512, threshold_a=64, threshold_b=64, input_type="pil"):
if input_type == "pil":
images = [img_to_base64_str(x) for x in images]
payload = {
"controlnet_module": module,
"controlnet_input_images": images,
"controlnet_processor_res": processor_res,
"controlnet_threshold_a": threshold_a,
"controlnet_threshold_b": threshold_b,
}
res = webui_post("/controlnet/detect", json=payload)
res = res.json()
filtered_images = res["images"]
if input_type == "pil":
filtered_images = [base64_str_to_img(img) for img in filtered_images]
elif input_type == "base64":
filtered_images = [base64_buffer_to_base64_img(img) for img in filtered_images]
return filtered_images
def image_progress_thread(task_id, callback, stream_image_progress, total_images, total_steps):
from PIL import Image
last_preview_id = -1
EMPTY_IMAGE = Image.new("RGB", (1, 1))
while True:
res = webui_post(
f"/internal/progress",
json={"id_task": task_id, "live_preview": stream_image_progress, "id_live_preview": last_preview_id},
)
if res.status_code == 200:
res = res.json()
else:
raise RuntimeError(f"Unexpected progress response. Status code: {res.status_code}. Res: {res.text}")
last_preview_id = res["id_live_preview"]
if res["progress"] is not None:
step_num = int(res["progress"] * total_steps)
if res["live_preview"] is not None:
img = res["live_preview"]
img = base64_str_to_img(img)
images = [EMPTY_IMAGE] * total_images
images[0] = img
else:
images = None
callback(images, step_num)
if res["completed"] == True:
print("Complete!")
break
time.sleep(0.5)
def webui_get(uri, *args, **kwargs):
url = f"http://{WEBUI_HOST}:{WEBUI_PORT}{uri}"
return requests.get(url, *args, **kwargs)
def webui_post(uri, *args, **kwargs):
url = f"http://{WEBUI_HOST}:{WEBUI_PORT}{uri}"
return requests.post(url, *args, **kwargs)
def print_request(operation_to_apply, args):
args = deepcopy(args)
if "init_images" in args:
args["init_images"] = ["img" for _ in args["init_images"]]
if "mask" in args:
args["mask"] = "mask_img"
controlnet_args = args.get("alwayson_scripts", {}).get("controlnet", {}).get("args", [])
if controlnet_args:
controlnet_args[0]["image"] = "control_image"
print(f"operation: {operation_to_apply}, args: {args}")
def auto1111_hash(file_path):
import hashlib
with open(file_path, "rb") as f:
f.seek(0x100000)
b = f.read(0x10000)
return hashlib.sha256(b).hexdigest()[:8]
def extra_batch_images(
images, # list of PIL images
name_list=None, # list of image names
resize_mode=0,
show_extras_results=True,
gfpgan_visibility=0,
codeformer_visibility=0,
codeformer_weight=0,
upscaling_resize=2,
upscaling_resize_w=512,
upscaling_resize_h=512,
upscaling_crop=True,
upscaler_1="None",
upscaler_2="None",
extras_upscaler_2_visibility=0,
upscale_first=False,
use_async=False,
input_type="pil",
):
if name_list is not None:
if len(name_list) != len(images):
raise RuntimeError("len(images) != len(name_list)")
else:
name_list = [f"image{i + 1:05}" for i in range(len(images))]
if input_type == "pil":
images = [img_to_base64_str(x) for x in images]
image_list = []
for name, image in zip(name_list, images):
image_list.append({"data": image, "name": name})
payload = {
"resize_mode": resize_mode,
"show_extras_results": show_extras_results,
"gfpgan_visibility": gfpgan_visibility,
"codeformer_visibility": codeformer_visibility,
"codeformer_weight": codeformer_weight,
"upscaling_resize": upscaling_resize,
"upscaling_resize_w": upscaling_resize_w,
"upscaling_resize_h": upscaling_resize_h,
"upscaling_crop": upscaling_crop,
"upscaler_1": upscaler_1,
"upscaler_2": upscaler_2,
"extras_upscaler_2_visibility": extras_upscaler_2_visibility,
"upscale_first": upscale_first,
"imageList": image_list,
}
res = webui_post("/sdapi/v1/extra-batch-images", json=payload)
if res.status_code == 200:
res = res.json()
else:
raise Exception(
"The engine failed while filtering this image. Please check the logs in the command-line window for more details."
)
images = res["images"]
if input_type == "pil":
images = [base64_str_to_img(img) for img in images]
elif input_type == "base64":
images = [base64_buffer_to_base64_img(img) for img in images]
return images
def base64_buffer_to_base64_img(img):
output_format = webui_opts.get("samples_format", "jpeg")
mime_type = f"image/{output_format.lower()}"
return f"data:{mime_type};base64," + img
def convert_ED_sampler_names(sampler_name):
name_mapping = {
"dpmpp_2m": "DPM++ 2M",
"dpmpp_sde": "DPM++ SDE",
"dpmpp_2m_sde": "DPM++ 2M SDE",
"dpmpp_2m_sde_heun": "DPM++ 2M SDE Heun",
"dpmpp_2s_a": "DPM++ 2S a",
"dpmpp_3m_sde": "DPM++ 3M SDE",
"euler_a": "Euler a",
"euler": "Euler",
"lms": "LMS",
"heun": "Heun",
"dpm2": "DPM2",
"dpm2_a": "DPM2 a",
"dpm_fast": "DPM fast",
"dpm_adaptive": "DPM adaptive",
"restart": "Restart",
"heun_pp2": "HeunPP2",
"ipndm": "IPNDM",
"ipndm_v": "IPNDM_V",
"deis": "DEIS",
"ddim": "DDIM",
"ddim_cfgpp": "DDIM CFG++",
"plms": "PLMS",
"unipc": "UniPC",
"lcm": "LCM",
"ddpm": "DDPM",
"forge_flux_realistic": "[Forge] Flux Realistic",
"forge_flux_realistic_slow": "[Forge] Flux Realistic (Slow)",
# deprecated samplers in 3.5
"dpm_solver_stability": None,
"unipc_snr": None,
"unipc_tu": None,
"unipc_snr_2": None,
"unipc_tu_2": None,
"unipc_tq": None,
}
return name_mapping.get(sampler_name)
def convert_ED_controlnet_filter_name(filter):
if filter is None:
return None
def cn(n):
if n.startswith("controlnet_"):
return n[len("controlnet_") :]
return n
mapping = {
"controlnet_scribble_hedsafe": None,
"controlnet_scribble_pidsafe": None,
"controlnet_softedge_pidsafe": "controlnet_softedge_pidisafe",
"controlnet_normal_bae": "controlnet_normalbae",
"controlnet_segment": None,
}
if isinstance(filter, list):
return [cn(mapping.get(f, f)) for f in filter]
return cn(mapping.get(filter, filter))

View File

@ -55,8 +55,13 @@ def init():
return bucketfiles
else:
bucket_id = crud.get_bucket_by_path(db, path).id
bucket = crud.get_bucket_by_path(db, path)
if bucket == None:
raise HTTPException(status_code=404, detail="Bucket not found")
bucket_id = bucket.id
bucketfile = db.query(models.BucketFile).filter(models.BucketFile.bucket_id == bucket_id, models.BucketFile.filename == filename).first()
if bucketfile == None:
raise HTTPException(status_code=404, detail="File not found")
suffix = get_suffix_from_filename(filename)

View File

@ -243,7 +243,8 @@ def get_processor_name():
if platform.system() == "Windows":
return platform.processor()
elif platform.system() == "Darwin":
os.environ["PATH"] = os.environ["PATH"] + os.pathsep + "/usr/sbin"
if "/usr/sbin" not in os.environ["PATH"].split(os.pathsep):
os.environ["PATH"] = os.environ["PATH"] + os.pathsep + "/usr/sbin"
command = "sysctl -n machdep.cpu.brand_string"
return subprocess.check_output(command, shell=True).decode().strip()
elif platform.system() == "Linux":

View File

@ -33,4 +33,3 @@ class Bucket(BucketBase):
class Config:
orm_mode = True

View File

@ -8,7 +8,7 @@ from easydiffusion import app
from easydiffusion.types import ModelsData
from easydiffusion.utils import log
from sdkit import Context
from sdkit.models import load_model, scan_model, unload_model, download_model, get_model_info_from_db
from sdkit.models import scan_model, download_model, get_model_info_from_db
from sdkit.models.model_loader.controlnet_filters import filters as cn_filters
from sdkit.utils import hash_file_quick
from sdkit.models.model_loader.embeddings import get_embedding_token
@ -25,15 +25,15 @@ KNOWN_MODEL_TYPES = [
"controlnet",
]
MODEL_EXTENSIONS = {
"stable-diffusion": [".ckpt", ".safetensors"],
"vae": [".vae.pt", ".ckpt", ".safetensors"],
"hypernetwork": [".pt", ".safetensors"],
"stable-diffusion": [".ckpt", ".safetensors", ".sft", ".gguf"],
"vae": [".vae.pt", ".ckpt", ".safetensors", ".sft"],
"hypernetwork": [".pt", ".safetensors", ".sft"],
"gfpgan": [".pth"],
"realesrgan": [".pth"],
"lora": [".ckpt", ".safetensors", ".pt"],
"lora": [".ckpt", ".safetensors", ".sft", ".pt"],
"codeformer": [".pth"],
"embeddings": [".pt", ".bin", ".safetensors"],
"controlnet": [".pth", ".safetensors"],
"embeddings": [".pt", ".bin", ".safetensors", ".sft"],
"controlnet": [".pth", ".safetensors", ".sft"],
}
DEFAULT_MODELS = {
"stable-diffusion": [
@ -51,6 +51,16 @@ DEFAULT_MODELS = {
],
}
MODELS_TO_LOAD_ON_START = ["stable-diffusion", "vae", "hypernetwork", "lora"]
ALTERNATE_FOLDER_NAMES = { # for WebUI compatibility
"stable-diffusion": "Stable-diffusion",
"vae": "VAE",
"hypernetwork": "hypernetworks",
"codeformer": "Codeformer",
"gfpgan": "GFPGAN",
"realesrgan": "RealESRGAN",
"lora": "Lora",
"controlnet": "ControlNet",
}
known_models = {}
@ -63,6 +73,7 @@ def init():
def load_default_models(context: Context):
from easydiffusion import runtime
from easydiffusion.backend_manager import backend
runtime.set_vram_optimizations(context)
@ -70,7 +81,7 @@ def load_default_models(context: Context):
for model_type in MODELS_TO_LOAD_ON_START:
context.model_paths[model_type] = resolve_model_to_use(model_type=model_type, fail_if_not_found=False)
try:
load_model(
backend.load_model(
context,
model_type,
scan_model=context.model_paths[model_type] != None
@ -92,9 +103,11 @@ def load_default_models(context: Context):
def unload_all(context: Context):
from easydiffusion.backend_manager import backend
for model_type in KNOWN_MODEL_TYPES:
unload_model(context, model_type)
if model_type in context.model_load_errors:
backend.unload_model(context, model_type)
if hasattr(context, "model_load_errors") and model_type in context.model_load_errors:
del context.model_load_errors[model_type]
@ -119,33 +132,33 @@ def resolve_model_to_use_single(model_name: str = None, model_type: str = None,
default_models = DEFAULT_MODELS.get(model_type, [])
config = app.getConfig()
model_dir = os.path.join(app.MODELS_DIR, model_type)
if not model_name: # When None try user configured model.
# config = getConfig()
if "model" in config and model_type in config["model"]:
model_name = config["model"][model_type]
if model_name:
# Check models directory
model_path = os.path.join(model_dir, model_name)
if os.path.exists(model_path):
return model_path
for model_extension in model_extensions:
if os.path.exists(model_path + model_extension):
return model_path + model_extension
if os.path.exists(model_name + model_extension):
return os.path.abspath(model_name + model_extension)
for model_dir in get_model_dirs(model_type):
if model_name:
# Check models directory
model_path = os.path.join(model_dir, model_name)
if os.path.exists(model_path):
return model_path
for model_extension in model_extensions:
if os.path.exists(model_path + model_extension):
return model_path + model_extension
if os.path.exists(model_name + model_extension):
return os.path.abspath(model_name + model_extension)
# Can't find requested model, check the default paths.
if model_type == "stable-diffusion" and not fail_if_not_found:
for default_model in default_models:
default_model_path = os.path.join(model_dir, default_model["file_name"])
if os.path.exists(default_model_path):
if model_name is not None:
log.warn(
f"Could not find the configured custom model {model_name}. Using the default one: {default_model_path}"
)
return default_model_path
# Can't find requested model, check the default paths.
if model_type == "stable-diffusion" and not fail_if_not_found:
for default_model in default_models:
default_model_path = os.path.join(model_dir, default_model["file_name"])
if os.path.exists(default_model_path):
if model_name is not None:
log.warn(
f"Could not find the configured custom model {model_name}. Using the default one: {default_model_path}"
)
return default_model_path
if model_name and fail_if_not_found:
raise FileNotFoundError(
@ -154,6 +167,8 @@ def resolve_model_to_use_single(model_name: str = None, model_type: str = None,
def reload_models_if_necessary(context: Context, models_data: ModelsData, models_to_force_reload: list = []):
from easydiffusion.backend_manager import backend
models_to_reload = {
model_type: path
for model_type, path in models_data.model_paths.items()
@ -175,7 +190,7 @@ def reload_models_if_necessary(context: Context, models_data: ModelsData, models
for model_type, model_path_in_req in models_to_reload.items():
context.model_paths[model_type] = model_path_in_req
action_fn = unload_model if context.model_paths[model_type] is None else load_model
action_fn = backend.unload_model if context.model_paths[model_type] is None else backend.load_model
extra_params = models_data.model_params.get(model_type, {})
try:
action_fn(context, model_type, scan_model=False, **extra_params) # we've scanned them already
@ -183,19 +198,28 @@ def reload_models_if_necessary(context: Context, models_data: ModelsData, models
del context.model_load_errors[model_type]
except Exception as e:
log.exception(e)
if action_fn == load_model:
if action_fn == backend.load_model:
context.model_load_errors[model_type] = str(e) # storing the entire Exception can lead to memory leaks
def resolve_model_paths(models_data: ModelsData):
model_paths = models_data.model_paths
skip_models = cn_filters + [
"latent_upscaler",
"nsfw_checker",
"esrgan_4x",
"lanczos",
"nearest",
"scunet",
"swinir",
]
for model_type in model_paths:
skip_models = cn_filters + ["latent_upscaler", "nsfw_checker"]
if model_type in skip_models: # doesn't use model paths
continue
if model_type == "codeformer":
if model_type == "codeformer" and model_paths[model_type]:
download_if_necessary("codeformer", "codeformer.pth", "codeformer-0.1.0")
elif model_type == "controlnet":
elif model_type == "controlnet" and model_paths[model_type]:
model_id = model_paths[model_type]
model_info = get_model_info_from_db(model_type=model_type, model_id=model_id)
if model_info:
@ -225,7 +249,8 @@ def download_default_models_if_necessary():
def download_if_necessary(model_type: str, file_name: str, model_id: str, skip_if_others_exist=True):
model_path = os.path.join(app.MODELS_DIR, model_type, file_name)
model_dir = get_model_dirs(model_type)[0]
model_path = os.path.join(model_dir, file_name)
expected_hash = get_model_info_from_db(model_type=model_type, model_id=model_id)["quick_hash"]
other_models_exist = any_model_exists(model_type) and skip_if_others_exist
@ -245,23 +270,42 @@ def migrate_legacy_model_location():
file_name = model["file_name"]
legacy_path = os.path.join(app.SD_DIR, file_name)
if os.path.exists(legacy_path):
shutil.move(legacy_path, os.path.join(app.MODELS_DIR, model_type, file_name))
model_dir = get_model_dirs(model_type)[0]
shutil.move(legacy_path, os.path.join(model_dir, file_name))
def any_model_exists(model_type: str) -> bool:
extensions = MODEL_EXTENSIONS.get(model_type, [])
for ext in extensions:
if any(glob(f"{app.MODELS_DIR}/{model_type}/**/*{ext}", recursive=True)):
return True
for model_dir in get_model_dirs(model_type):
for ext in extensions:
if any(glob(f"{model_dir}/**/*{ext}", recursive=True)):
return True
return False
def make_model_folders():
for model_type in KNOWN_MODEL_TYPES:
model_dir_path = os.path.join(app.MODELS_DIR, model_type)
model_dir_path = get_model_dirs(model_type)[0]
os.makedirs(model_dir_path, exist_ok=True)
try:
os.makedirs(model_dir_path, exist_ok=True)
except Exception as e:
from rich.console import Console
from rich.panel import Panel
Console().print(
Panel(
"\n"
+ f"Error while creating the models directory: '{model_dir_path}'\n"
+ f"Error: {e}\n\n"
+ f"[white]Check the 'models_dir:' line in the file '{os.path.join(app.ROOT_DIR, 'config.yaml')}'.[/white]\n",
title="Fatal Error starting Easy Diffusion",
style="bold yellow on red",
)
)
input("Press Enter to terminate...")
exit(1)
help_file_name = f"Place your {model_type} model files here.txt"
help_file_contents = f'Supported extensions: {" or ".join(MODEL_EXTENSIONS.get(model_type))}'
@ -303,28 +347,32 @@ def is_malicious_model(file_path):
def getModels(scan_for_malicious: bool = True):
from easydiffusion.backend_manager import backend
backend.refresh_models()
models = {
"options": {
"stable-diffusion": [{"sd-v1-4": "SD 1.4"}],
"stable-diffusion": [],
"vae": [],
"hypernetwork": [],
"lora": [],
"codeformer": [{"codeformer": "CodeFormer"}],
"embeddings": [],
"controlnet": [
{"control_v11p_sd15_canny": "Canny (*)"},
{"control_v11p_sd15_openpose": "OpenPose (*)"},
{"control_v11p_sd15_normalbae": "Normal BAE (*)"},
{"control_v11f1p_sd15_depth": "Depth (*)"},
{"control_v11p_sd15_scribble": "Scribble"},
{"control_v11p_sd15_softedge": "Soft Edge"},
{"control_v11p_sd15_inpaint": "Inpaint"},
{"control_v11p_sd15_lineart": "Line Art"},
{"control_v11p_sd15s2_lineart_anime": "Line Art Anime"},
{"control_v11p_sd15_mlsd": "Straight Lines"},
{"control_v11p_sd15_seg": "Segment"},
{"control_v11e_sd15_shuffle": "Shuffle"},
{"control_v11f1e_sd15_tile": "Tile"},
# {"control_v11p_sd15_canny": "Canny (*)"},
# {"control_v11p_sd15_openpose": "OpenPose (*)"},
# {"control_v11p_sd15_normalbae": "Normal BAE (*)"},
# {"control_v11f1p_sd15_depth": "Depth (*)"},
# {"control_v11p_sd15_scribble": "Scribble"},
# {"control_v11p_sd15_softedge": "Soft Edge"},
# {"control_v11p_sd15_inpaint": "Inpaint"},
# {"control_v11p_sd15_lineart": "Line Art"},
# {"control_v11p_sd15s2_lineart_anime": "Line Art Anime"},
# {"control_v11p_sd15_mlsd": "Straight Lines"},
# {"control_v11p_sd15_seg": "Segment"},
# {"control_v11e_sd15_shuffle": "Shuffle"},
# {"control_v11f1e_sd15_tile": "Tile"},
],
},
}
@ -339,6 +387,9 @@ def getModels(scan_for_malicious: bool = True):
tree = list(default_entries)
if not os.path.exists(directory):
return tree
for entry in sorted(
os.scandir(directory),
key=lambda entry: (entry.is_file() == directoriesFirst, entry.name.lower()),
@ -361,6 +412,8 @@ def getModels(scan_for_malicious: bool = True):
model_id = entry.name[: -len(matching_suffix)]
if callable(nameFilter):
model_id = nameFilter(model_id)
if model_id is None:
continue
model_exists = False
for m in tree: # allows default "named" models, like CodeFormer and known ControlNet models
@ -381,17 +434,18 @@ def getModels(scan_for_malicious: bool = True):
nonlocal models_scanned
model_extensions = MODEL_EXTENSIONS.get(model_type, [])
models_dir = os.path.join(app.MODELS_DIR, model_type)
if not os.path.exists(models_dir):
os.makedirs(models_dir)
models_dirs = get_model_dirs(model_type)
if not os.path.exists(models_dirs[0]):
os.makedirs(models_dirs[0])
try:
default_tree = models["options"].get(model_type, [])
models["options"][model_type] = scan_directory(
models_dir, model_extensions, default_entries=default_tree, nameFilter=nameFilter
)
except MaliciousModelException as e:
models["scan-error"] = str(e)
for model_dir in models_dirs:
try:
default_tree = models["options"].get(model_type, [])
models["options"][model_type] = scan_directory(
model_dir, model_extensions, default_entries=default_tree, nameFilter=nameFilter
)
except MaliciousModelException as e:
models["scan-error"] = str(e)
if scan_for_malicious:
log.info(f"[green]Scanning all model folders for models...[/]")
@ -399,7 +453,7 @@ def getModels(scan_for_malicious: bool = True):
listModels(model_type="stable-diffusion")
listModels(model_type="vae")
listModels(model_type="hypernetwork")
listModels(model_type="gfpgan")
listModels(model_type="gfpgan", nameFilter=lambda x: (x if "gfpgan" in x.lower() else None))
listModels(model_type="lora")
listModels(model_type="embeddings", nameFilter=get_embedding_token)
listModels(model_type="controlnet")
@ -408,3 +462,20 @@ def getModels(scan_for_malicious: bool = True):
log.info(f"[green]Scanned {models_scanned} models. Nothing infected[/]")
return models
def get_model_dirs(model_type: str, base_dir=None):
"Returns the possible model directory paths for the given model type. Mainly used for WebUI compatibility"
if base_dir is None:
base_dir = app.MODELS_DIR
dirs = [os.path.join(base_dir, model_type)]
if model_type in ALTERNATE_FOLDER_NAMES:
alt_dir = ALTERNATE_FOLDER_NAMES[model_type]
alt_dir = os.path.join(base_dir, alt_dir)
if os.path.exists(alt_dir) and os.path.isdir(alt_dir):
dirs.append(alt_dir)
return dirs

View File

@ -3,8 +3,6 @@ import os
import platform
from importlib.metadata import version as pkg_version
from sdkit.utils import log
from easydiffusion import app
# future home of scripts/check_modules.py
@ -12,9 +10,9 @@ from easydiffusion import app
manifest = {
"tensorrt": {
"install": [
"nvidia-cudnn --pre --extra-index-url=https://pypi.nvidia.com --trusted-host pypi.nvidia.com",
"tensorrt-libs --pre --extra-index-url=https://pypi.nvidia.com --trusted-host pypi.nvidia.com",
"tensorrt --pre --extra-index-url=https://pypi.nvidia.com --trusted-host pypi.nvidia.com",
"wheel",
"nvidia-cudnn-cu11==8.9.4.25",
"tensorrt==9.0.0.post11.dev1 --pre --extra-index-url=https://pypi.nvidia.com --trusted-host pypi.nvidia.com",
],
"uninstall": ["tensorrt"],
# TODO also uninstall tensorrt-libs and nvidia-cudnn, but do it upon restarting (avoid 'file in use' error)
@ -50,6 +48,8 @@ def is_installed(module_name) -> bool:
def install(module_name):
from easydiffusion.utils import log
if is_installed(module_name):
log.info(f"{module_name} has already been installed!")
return
@ -79,6 +79,8 @@ def install(module_name):
def uninstall(module_name):
from easydiffusion.utils import log
if not is_installed(module_name):
log.info(f"{module_name} hasn't been installed!")
return

View File

@ -1,4 +1,5 @@
"""
(OUTDATED DOC)
A runtime that runs on a specific device (in a thread).
It can run various tasks like image generation, image filtering, model merge etc by using that thread-local context.
@ -6,42 +7,35 @@ It can run various tasks like image generation, image filtering, model merge etc
This creates an `sdkit.Context` that's bound to the device specified while calling the `init()` function.
"""
from easydiffusion import device_manager
from easydiffusion.utils import log
from sdkit import Context
from sdkit.utils import get_device_usage
context = Context() # thread-local
"""
runtime data (bound locally to this thread), for e.g. device, references to loaded models, optimization flags etc
"""
context = None
def init(device):
"""
Initializes the fields that will be bound to this runtime's context, and sets the current torch device
"""
global context
from easydiffusion import device_manager
from easydiffusion.backend_manager import backend
from easydiffusion.app import getConfig
context = backend.create_context()
context.stop_processing = False
context.temp_images = {}
context.partial_x_samples = None
context.model_load_errors = {}
context.enable_codeformer = True
from easydiffusion import app
app_config = app.getConfig()
context.test_diffusers = app_config.get("test_diffusers", True)
log.info("Device usage during initialization:")
get_device_usage(device, log_info=True, process_usage_only=False)
device_manager.device_init(context, device)
def set_vram_optimizations(context: Context):
from easydiffusion import app
def set_vram_optimizations(context):
from easydiffusion.app import getConfig
config = app.getConfig()
config = getConfig()
vram_usage_level = config.get("vram_usage_level", "balanced")
if vram_usage_level != context.vram_usage_level:

View File

@ -2,6 +2,7 @@
Notes:
async endpoints always run on the main thread. Without they run on the thread pool.
"""
import datetime
import mimetypes
import os
@ -15,9 +16,12 @@ from easydiffusion.types import (
FilterImageRequest,
MergeRequest,
TaskData,
RenderTaskData,
ModelsData,
OutputFormatData,
SaveToDiskData,
convert_legacy_render_req_to_new,
convert_legacy_controlnet_filter_name,
)
from easydiffusion.utils import log
from fastapi import FastAPI, HTTPException
@ -36,6 +40,7 @@ NOCACHE_HEADERS = {
"Pragma": "no-cache",
"Expires": "0",
}
PROTECTED_CONFIG_KEYS = ("block_nsfw",) # can't change these via the HTTP API
class NoCacheStaticFiles(StaticFiles):
@ -63,7 +68,10 @@ class SetAppConfigRequest(BaseModel, extra=Extra.allow):
ui_open_browser_on_start: bool = None
listen_to_network: bool = None
listen_port: int = None
test_diffusers: bool = True
use_v3_engine: bool = True
backend: str = "ed_diffusers"
models_dir: str = None
vram_usage_level: str = "balanced"
def init():
@ -151,6 +159,12 @@ def init():
def shutdown_event(): # Signal render thread to close on shutdown
task_manager.current_state_error = SystemExit("Application shutting down.")
@server_api.on_event("startup")
def start_event():
from easydiffusion.app import open_browser
open_browser()
# API implementations
def set_app_config_internal(req: SetAppConfigRequest):
@ -172,10 +186,13 @@ def set_app_config_internal(req: SetAppConfigRequest):
config["net"] = {}
config["net"]["listen_port"] = int(req.listen_port)
config["test_diffusers"] = req.test_diffusers
config["use_v3_engine"] = req.backend == "ed_diffusers"
config["backend"] = req.backend
config["models_dir"] = req.models_dir
config["vram_usage_level"] = req.vram_usage_level
for property, property_value in req.dict().items():
if property_value is not None and property not in req.__fields__:
if property_value is not None and property not in req.__fields__ and property not in PROTECTED_CONFIG_KEYS:
config[property] = property_value
try:
@ -204,8 +221,15 @@ def read_web_data_internal(key: str = None, **kwargs):
if not key: # /get without parameters, stable-diffusion easter egg.
raise HTTPException(status_code=418, detail="StableDiffusion is drawing a teapot!") # HTTP418 I'm a teapot
elif key == "app_config":
return JSONResponse(app.getConfig(), headers=NOCACHE_HEADERS)
config = app.getConfig()
if "models_dir" not in config:
config["models_dir"] = app.MODELS_DIR
return JSONResponse(config, headers=NOCACHE_HEADERS)
elif key == "system_info":
from easydiffusion.backend_manager import backend
config = app.getConfig()
output_dir = config.get("force_save_path", os.path.join(os.path.expanduser("~"), app.OUTPUT_DIRNAME))
@ -215,6 +239,8 @@ def read_web_data_internal(key: str = None, **kwargs):
"hosts": app.getIPConfig(),
"default_output_dir": output_dir,
"enforce_output_dir": ("force_save_path" in config),
"enforce_output_metadata": ("force_save_metadata" in config),
"backend_url": backend.get_url(),
}
system_info["devices"]["config"] = config.get("render_devices", "auto")
return JSONResponse(system_info, headers=NOCACHE_HEADERS)
@ -261,14 +287,15 @@ def render_internal(req: dict):
# separate out the request data into rendering and task-specific data
render_req: GenerateImageRequest = GenerateImageRequest.parse_obj(req)
task_data: TaskData = TaskData.parse_obj(req)
task_data: RenderTaskData = RenderTaskData.parse_obj(req)
models_data: ModelsData = ModelsData.parse_obj(req)
output_format: OutputFormatData = OutputFormatData.parse_obj(req)
save_data: SaveToDiskData = SaveToDiskData.parse_obj(req)
# Overwrite user specified save path
config = app.getConfig()
if "force_save_path" in config:
task_data.save_to_disk_path = config["force_save_path"]
save_data.save_to_disk_path = config["force_save_path"]
render_req.init_image_mask = req.get("mask") # hack: will rename this in the HTTP API in a future revision
@ -280,7 +307,7 @@ def render_internal(req: dict):
)
# enqueue the task
task = RenderTask(render_req, task_data, models_data, output_format)
task = RenderTask(render_req, task_data, models_data, output_format, save_data)
return enqueue_task(task)
except HTTPException as e:
raise e
@ -291,13 +318,23 @@ def render_internal(req: dict):
def filter_internal(req: dict):
try:
session_id = req.get("session_id", "session")
filter_req: FilterImageRequest = FilterImageRequest.parse_obj(req)
task_data: TaskData = TaskData.parse_obj(req)
models_data: ModelsData = ModelsData.parse_obj(req)
output_format: OutputFormatData = OutputFormatData.parse_obj(req)
save_data: SaveToDiskData = SaveToDiskData.parse_obj(req)
filter_req.filter = convert_legacy_controlnet_filter_name(filter_req.filter)
for model_name in ("realesrgan", "esrgan_4x", "lanczos", "nearest", "scunet", "swinir"):
if models_data.model_paths.get(model_name):
if model_name not in filter_req.filter_params:
filter_req.filter_params[model_name] = {}
filter_req.filter_params[model_name]["upscaler"] = models_data.model_paths[model_name]
# enqueue the task
task = FilterTask(filter_req, session_id, models_data, output_format)
task = FilterTask(filter_req, task_data, models_data, output_format, save_data)
return enqueue_task(task)
except HTTPException as e:
raise e
@ -329,15 +366,13 @@ def model_merge_internal(req: dict):
mergeReq: MergeRequest = MergeRequest.parse_obj(req)
sd_model_dir = model_manager.get_model_dir("stable-diffusion")[0]
merge_models(
model_manager.resolve_model_to_use(mergeReq.model0, "stable-diffusion"),
model_manager.resolve_model_to_use(mergeReq.model1, "stable-diffusion"),
mergeReq.ratio,
os.path.join(
app.MODELS_DIR,
"stable-diffusion",
filename_regex.sub("_", mergeReq.out_path),
),
os.path.join(sd_model_dir, filename_regex.sub("_", mergeReq.out_path)),
mergeReq.use_fp16,
)
return JSONResponse({"status": "OK"}, headers=NOCACHE_HEADERS)
@ -456,8 +491,8 @@ def modify_package_internal(package_name: str, req: dict):
log.error(traceback.format_exc())
return HTTPException(status_code=500, detail=str(e))
def get_sha256_internal(obj_path):
import hashlib
from easydiffusion.utils import sha256sum
path = obj_path.split("/")
@ -477,4 +512,3 @@ def get_sha256_internal(obj_path):
log.error(str(e))
log.error(traceback.format_exc())
return HTTPException(status_code=500, detail=str(e))

View File

@ -4,6 +4,7 @@ Notes:
Use weak_thread_data to store all other data using weak keys.
This will allow for garbage collection after the thread dies.
"""
import json
import traceback
@ -19,7 +20,6 @@ import torch
from easydiffusion import device_manager
from easydiffusion.tasks import Task
from easydiffusion.utils import log
from sdkit.utils import gc
THREAD_NAME_PREFIX = ""
ERR_LOCK_FAILED = " failed to acquire lock within timeout."
@ -233,6 +233,8 @@ def thread_render(device):
global current_state, current_state_error
from easydiffusion import model_manager, runtime
from easydiffusion.backend_manager import backend
from requests import ConnectionError
try:
runtime.init(device)
@ -244,8 +246,17 @@ def thread_render(device):
}
current_state = ServerStates.LoadingModel
model_manager.load_default_models(runtime.context)
while True:
try:
if backend.ping(timeout=1):
break
time.sleep(1)
except (TimeoutError, ConnectionError):
time.sleep(1)
model_manager.load_default_models(runtime.context)
current_state = ServerStates.Online
except Exception as e:
log.error(traceback.format_exc())
@ -291,7 +302,6 @@ def thread_render(device):
task.buffer_queue.put(json.dumps(task.response))
log.error(traceback.format_exc())
finally:
gc(runtime.context)
task.lock.release()
keep_task_alive(task)

View File

@ -1,12 +1,24 @@
import os
import json
import pprint
import time
from sdkit.filter import apply_filters
from sdkit.models import load_model
from sdkit.utils import img_to_base64_str, get_image, log
from numpy import base_repr
from sdkit.utils import img_to_base64_str, log, save_images, base64_str_to_img
from easydiffusion import model_manager, runtime
from easydiffusion.types import FilterImageRequest, FilterImageResponse, ModelsData, OutputFormatData
from easydiffusion.types import (
FilterImageRequest,
FilterImageResponse,
ModelsData,
OutputFormatData,
SaveToDiskData,
TaskData,
GenerateImageRequest,
)
from easydiffusion.utils import filter_nsfw
from easydiffusion.utils.save_utils import format_folder_name
from .task import Task
@ -15,17 +27,28 @@ class FilterTask(Task):
"For applying filters to input images"
def __init__(
self, req: FilterImageRequest, session_id: str, models_data: ModelsData, output_format: OutputFormatData
self,
req: FilterImageRequest,
task_data: TaskData,
models_data: ModelsData,
output_format: OutputFormatData,
save_data: SaveToDiskData,
):
super().__init__(session_id)
super().__init__(task_data.session_id)
task_data.request_id = self.id
self.request = req
self.task_data = task_data
self.models_data = models_data
self.output_format = output_format
self.save_data = save_data
# convert to multi-filter format, if necessary
if isinstance(req.filter, str):
req.filter_params = {req.filter: req.filter_params}
if req.filter not in req.filter_params:
req.filter_params = {req.filter: req.filter_params}
req.filter = [req.filter]
if not isinstance(req.image, list):
@ -34,82 +57,73 @@ class FilterTask(Task):
def run(self):
"Runs the image filtering task on the assigned thread"
from easydiffusion import app
from easydiffusion.backend_manager import backend
context = runtime.context
model_manager.resolve_model_paths(self.models_data)
model_manager.reload_models_if_necessary(context, self.models_data)
model_manager.fail_if_models_did_not_load(context)
print_task_info(self.request, self.models_data, self.output_format)
print_task_info(self.request, self.models_data, self.output_format, self.save_data)
if isinstance(self.request.image, list):
images = [get_image(img) for img in self.request.image]
else:
images = get_image(self.request.image)
images = filter_images(context, images, self.request.filter, self.request.filter_params)
has_nsfw_filter = "nsfw_filter" in self.request.filter
output_format = self.output_format
images = [
img_to_base64_str(
img, output_format.output_format, output_format.output_quality, output_format.output_lossless
backend.set_options(
context,
output_format=output_format.output_format,
output_quality=output_format.output_quality,
output_lossless=output_format.output_lossless,
)
images = backend.filter_images(
context, self.request.image, self.request.filter, self.request.filter_params, input_type="base64"
)
if has_nsfw_filter:
images = filter_nsfw(images)
if self.save_data.save_to_disk_path is not None:
app_config = app.getConfig()
folder_format = app_config.get("folder_format", "$id")
dummy_req = GenerateImageRequest()
img_id = base_repr(int(time.time() * 10000), 36)[-7:] # Base 36 conversion, 0-9, A-Z
save_dir_path = os.path.join(
self.save_data.save_to_disk_path, format_folder_name(folder_format, dummy_req, self.task_data)
)
images_pil = [base64_str_to_img(img) for img in images]
save_images(
images_pil,
save_dir_path,
file_name=img_id,
output_format=output_format.output_format,
output_quality=output_format.output_quality,
output_lossless=output_format.output_lossless,
)
for img in images
]
res = FilterImageResponse(self.request, self.models_data, images=images)
res = res.json()
self.buffer_queue.put(json.dumps(res))
log.info("Filter task completed")
self.response = res
def filter_images(context, images, filters, filter_params={}):
filters = filters if isinstance(filters, list) else [filters]
for filter_name in filters:
params = filter_params.get(filter_name, {})
previous_state = before_filter(context, filter_name, params)
try:
images = apply_filters(context, filter_name, images, **params)
finally:
after_filter(context, filter_name, params, previous_state)
return images
def before_filter(context, filter_name, filter_params):
if filter_name == "codeformer":
from easydiffusion.model_manager import DEFAULT_MODELS, resolve_model_to_use
default_realesrgan = DEFAULT_MODELS["realesrgan"][0]["file_name"]
prev_realesrgan_path = None
upscale_faces = filter_params.get("upscale_faces", False)
if upscale_faces and default_realesrgan not in context.model_paths["realesrgan"]:
prev_realesrgan_path = context.model_paths.get("realesrgan")
context.model_paths["realesrgan"] = resolve_model_to_use(default_realesrgan, "realesrgan")
load_model(context, "realesrgan")
return prev_realesrgan_path
def after_filter(context, filter_name, filter_params, previous_state):
if filter_name == "codeformer":
prev_realesrgan_path = previous_state
if prev_realesrgan_path:
context.model_paths["realesrgan"] = prev_realesrgan_path
load_model(context, "realesrgan")
def print_task_info(req: FilterImageRequest, models_data: ModelsData, output_format: OutputFormatData):
def print_task_info(
req: FilterImageRequest, models_data: ModelsData, output_format: OutputFormatData, save_data: SaveToDiskData
):
req_str = pprint.pformat({"filter": req.filter, "filter_params": req.filter_params}).replace("[", "\[")
models_data = pprint.pformat(models_data.dict()).replace("[", "\[")
output_format = pprint.pformat(output_format.dict()).replace("[", "\[")
save_data = pprint.pformat(save_data.dict()).replace("[", "\[")
log.info(f"request: {req_str}")
log.info(f"models data: {models_data}")
log.info(f"output format: {output_format}")
log.info(f"save data: {save_data}")

View File

@ -2,49 +2,59 @@ import json
import pprint
import queue
import time
from PIL import Image
from easydiffusion import model_manager, runtime
from easydiffusion.types import GenerateImageRequest, ModelsData, OutputFormatData
from easydiffusion.types import GenerateImageRequest, ModelsData, OutputFormatData, SaveToDiskData
from easydiffusion.types import Image as ResponseImage
from easydiffusion.types import GenerateImageResponse, TaskData, UserInitiatedStop
from easydiffusion.utils import get_printable_request, log, save_images_to_disk
from sdkit.generate import generate_images
from easydiffusion.types import GenerateImageResponse, RenderTaskData
from easydiffusion.utils import get_printable_request, log, save_images_to_disk, filter_nsfw
from sdkit.utils import (
diffusers_latent_samples_to_images,
gc,
img_to_base64_str,
base64_str_to_img,
img_to_buffer,
latent_samples_to_images,
resize_img,
get_image,
log,
)
from .task import Task
from .filter_images import filter_images
class RenderTask(Task):
"For image generation"
def __init__(
self, req: GenerateImageRequest, task_data: TaskData, models_data: ModelsData, output_format: OutputFormatData
self,
req: GenerateImageRequest,
task_data: RenderTaskData,
models_data: ModelsData,
output_format: OutputFormatData,
save_data: SaveToDiskData,
):
super().__init__(task_data.session_id)
task_data.request_id = self.id
self.render_request: GenerateImageRequest = req # Initial Request
self.task_data: TaskData = task_data
self.render_request = req # Initial Request
self.task_data = task_data
self.models_data = models_data
self.output_format = output_format
self.save_data = save_data
self.temp_images: list = [None] * req.num_outputs * (1 if task_data.show_only_filtered_image else 2)
def run(self):
"Runs the image generation task on the assigned thread"
from easydiffusion import task_manager
from easydiffusion import task_manager, app
from easydiffusion.backend_manager import backend
context = runtime.context
config = app.getConfig()
if config.get("block_nsfw", False): # override if set on the server
self.task_data.block_nsfw = True
def step_callback():
task_manager.keep_task_alive(self)
@ -53,7 +63,7 @@ class RenderTask(Task):
if isinstance(task_manager.current_state_error, (SystemExit, StopAsyncIteration)) or isinstance(
self.error, StopAsyncIteration
):
context.stop_processing = True
backend.stop_rendering(context)
if isinstance(task_manager.current_state_error, StopAsyncIteration):
self.error = task_manager.current_state_error
task_manager.current_state_error = None
@ -63,11 +73,7 @@ class RenderTask(Task):
model_manager.resolve_model_paths(self.models_data)
models_to_force_reload = []
if (
runtime.set_vram_optimizations(context)
or self.has_param_changed(context, "clip_skip")
or self.trt_needs_reload(context)
):
if runtime.set_vram_optimizations(context) or self.has_param_changed(context, "clip_skip"):
models_to_force_reload.append("stable-diffusion")
model_manager.reload_models_if_necessary(context, self.models_data, models_to_force_reload)
@ -80,13 +86,15 @@ class RenderTask(Task):
self.task_data,
self.models_data,
self.output_format,
self.save_data,
self.buffer_queue,
self.temp_images,
step_callback,
self,
)
def has_param_changed(self, context, param_name):
if not context.test_diffusers:
if not getattr(context, "test_diffusers", False):
return False
if "stable-diffusion" not in context.models or "params" not in context.models["stable-diffusion"]:
return True
@ -95,49 +103,36 @@ class RenderTask(Task):
new_val = self.models_data.model_params.get("stable-diffusion", {}).get(param_name, False)
return model["params"].get(param_name) != new_val
def trt_needs_reload(self, context):
if not context.test_diffusers:
return False
if "stable-diffusion" not in context.models or "params" not in context.models["stable-diffusion"]:
return True
model = context.models["stable-diffusion"]
# curr_convert_to_trt = model["params"].get("convert_to_tensorrt")
new_convert_to_trt = self.models_data.model_params.get("stable-diffusion", {}).get("convert_to_tensorrt", False)
pipe = model["default"]
is_trt_loaded = hasattr(pipe.unet, "_allocate_trt_buffers") or hasattr(
pipe.unet, "_allocate_trt_buffers_backup"
)
if new_convert_to_trt and not is_trt_loaded:
return True
curr_build_config = model["params"].get("trt_build_config")
new_build_config = self.models_data.model_params.get("stable-diffusion", {}).get("trt_build_config", {})
return new_convert_to_trt and curr_build_config != new_build_config
def make_images(
context,
req: GenerateImageRequest,
task_data: TaskData,
task_data: RenderTaskData,
models_data: ModelsData,
output_format: OutputFormatData,
save_data: SaveToDiskData,
data_queue: queue.Queue,
task_temp_images: list,
step_callback,
task,
):
context.stop_processing = False
print_task_info(req, task_data, models_data, output_format)
print_task_info(req, task_data, models_data, output_format, save_data)
images, seeds = make_images_internal(
context, req, task_data, models_data, output_format, data_queue, task_temp_images, step_callback
context,
req,
task_data,
models_data,
output_format,
save_data,
data_queue,
task_temp_images,
step_callback,
task,
)
res = GenerateImageResponse(
req, task_data, models_data, output_format, images=construct_response(images, seeds, output_format)
req, task_data, models_data, output_format, save_data, images=construct_response(images, seeds, output_format)
)
res = res.json()
data_queue.put(json.dumps(res))
@ -147,48 +142,72 @@ def make_images(
def print_task_info(
req: GenerateImageRequest, task_data: TaskData, models_data: ModelsData, output_format: OutputFormatData
req: GenerateImageRequest,
task_data: RenderTaskData,
models_data: ModelsData,
output_format: OutputFormatData,
save_data: SaveToDiskData,
):
req_str = pprint.pformat(get_printable_request(req, task_data, output_format)).replace("[", "\[")
req_str = pprint.pformat(get_printable_request(req, task_data, models_data, output_format, save_data)).replace(
"[", "\["
)
task_str = pprint.pformat(task_data.dict()).replace("[", "\[")
models_data = pprint.pformat(models_data.dict()).replace("[", "\[")
output_format = pprint.pformat(output_format.dict()).replace("[", "\[")
save_data = pprint.pformat(save_data.dict()).replace("[", "\[")
log.info(f"request: {req_str}")
log.info(f"task data: {task_str}")
# log.info(f"models data: {models_data}")
log.info(f"models data: {models_data}")
log.info(f"output format: {output_format}")
log.info(f"save data: {save_data}")
def make_images_internal(
context,
req: GenerateImageRequest,
task_data: TaskData,
task_data: RenderTaskData,
models_data: ModelsData,
output_format: OutputFormatData,
save_data: SaveToDiskData,
data_queue: queue.Queue,
task_temp_images: list,
step_callback,
task,
):
images, user_stopped = generate_images_internal(
from easydiffusion.backend_manager import backend
# prep the nsfw_filter
if task_data.block_nsfw:
filter_nsfw([Image.new("RGB", (1, 1))]) # hack - ensures that the model is available
images = generate_images_internal(
context,
req,
task_data,
models_data,
output_format,
data_queue,
task_temp_images,
step_callback,
task_data.stream_image_progress,
task_data.stream_image_progress_interval,
)
gc(context)
user_stopped = isinstance(task.error, StopAsyncIteration)
filters, filter_params = task_data.filters, task_data.filter_params
filtered_images = filter_images(context, images, filters, filter_params) if not user_stopped else images
if len(filters) > 0 and not user_stopped:
filtered_images = backend.filter_images(context, images, filters, filter_params, input_type="base64")
else:
filtered_images = images
if task_data.save_to_disk_path is not None:
save_images_to_disk(images, filtered_images, req, task_data, output_format)
if task_data.block_nsfw:
filtered_images = filter_nsfw(filtered_images)
if save_data.save_to_disk_path is not None:
images_pil = [base64_str_to_img(img) for img in images]
filtered_images_pil = [base64_str_to_img(img) for img in filtered_images]
save_images_to_disk(images_pil, filtered_images_pil, req, task_data, models_data, output_format, save_data)
seeds = [*range(req.seed, req.seed + len(images))]
if task_data.show_only_filtered_image or filtered_images is images:
@ -200,148 +219,92 @@ def make_images_internal(
def generate_images_internal(
context,
req: GenerateImageRequest,
task_data: TaskData,
task_data: RenderTaskData,
models_data: ModelsData,
output_format: OutputFormatData,
data_queue: queue.Queue,
task_temp_images: list,
step_callback,
stream_image_progress: bool,
stream_image_progress_interval: int,
):
context.temp_images.clear()
from easydiffusion.backend_manager import backend
callback = make_step_callback(
callback = make_step_callback(context, req, task_data, data_queue, task_temp_images, step_callback)
req.width, req.height = map(lambda x: x - x % 8, (req.width, req.height)) # clamp to 8
if req.control_image and task_data.control_filter_to_apply:
req.controlnet_filter = task_data.control_filter_to_apply
if req.init_image is not None and int(req.num_inference_steps * req.prompt_strength) == 0:
req.prompt_strength = 1 / req.num_inference_steps if req.num_inference_steps > 0 else 1
backend.set_options(
context,
req,
task_data,
data_queue,
task_temp_images,
step_callback,
stream_image_progress,
stream_image_progress_interval,
output_format=output_format.output_format,
output_quality=output_format.output_quality,
output_lossless=output_format.output_lossless,
vae_tiling=task_data.enable_vae_tiling,
stream_image_progress=stream_image_progress,
stream_image_progress_interval=stream_image_progress_interval,
clip_skip=2 if task_data.clip_skip else 1,
)
try:
if req.init_image is not None and not context.test_diffusers:
req.sampler_name = "ddim"
images = backend.generate_images(context, callback=callback, output_type="base64", **req.dict())
req.width, req.height = map(lambda x: x - x % 8, (req.width, req.height)) # clamp to 8
if req.control_image and task_data.control_filter_to_apply:
req.control_image = get_image(req.control_image)
req.control_image = resize_img(req.control_image.convert("RGB"), req.width, req.height, clamp_to_8=True)
req.control_image = filter_images(context, req.control_image, task_data.control_filter_to_apply)[0]
if req.init_image is not None and int(req.num_inference_steps * req.prompt_strength) == 0:
req.prompt_strength = 1 / req.num_inference_steps if req.num_inference_steps > 0 else 1
if context.test_diffusers:
pipe = context.models["stable-diffusion"]["default"]
if hasattr(pipe.unet, "_allocate_trt_buffers_backup"):
setattr(pipe.unet, "_allocate_trt_buffers", pipe.unet._allocate_trt_buffers_backup)
delattr(pipe.unet, "_allocate_trt_buffers_backup")
if hasattr(pipe.unet, "_allocate_trt_buffers"):
convert_to_trt = models_data.model_params["stable-diffusion"].get("convert_to_tensorrt", False)
if convert_to_trt:
pipe.unet.forward = pipe.unet._trt_forward
# pipe.vae.decoder.forward = pipe.vae.decoder._trt_forward
log.info(f"Setting unet.forward to TensorRT")
else:
log.info(f"Not using TensorRT for unet.forward")
pipe.unet.forward = pipe.unet._non_trt_forward
# pipe.vae.decoder.forward = pipe.vae.decoder._non_trt_forward
setattr(pipe.unet, "_allocate_trt_buffers_backup", pipe.unet._allocate_trt_buffers)
delattr(pipe.unet, "_allocate_trt_buffers")
images = generate_images(context, callback=callback, **req.dict())
user_stopped = False
except UserInitiatedStop:
images = []
user_stopped = True
if context.partial_x_samples is not None:
if context.test_diffusers:
images = diffusers_latent_samples_to_images(context, context.partial_x_samples)
else:
images = latent_samples_to_images(context, context.partial_x_samples)
finally:
if hasattr(context, "partial_x_samples") and context.partial_x_samples is not None:
if not context.test_diffusers:
del context.partial_x_samples
context.partial_x_samples = None
return images, user_stopped
return images
def construct_response(images: list, seeds: list, output_format: OutputFormatData):
return [
ResponseImage(
data=img_to_base64_str(
img,
output_format.output_format,
output_format.output_quality,
output_format.output_lossless,
),
seed=seed,
)
for img, seed in zip(images, seeds)
]
return [ResponseImage(data=img, seed=seed) for img, seed in zip(images, seeds)]
def make_step_callback(
context,
req: GenerateImageRequest,
task_data: TaskData,
task_data: RenderTaskData,
data_queue: queue.Queue,
task_temp_images: list,
step_callback,
stream_image_progress: bool,
stream_image_progress_interval: int,
):
from easydiffusion.backend_manager import backend
n_steps = req.num_inference_steps if req.init_image is None else int(req.num_inference_steps * req.prompt_strength)
last_callback_time = -1
def update_temp_img(x_samples, task_temp_images: list):
def update_temp_img(images, task_temp_images: list):
partial_images = []
if context.test_diffusers:
images = diffusers_latent_samples_to_images(context, x_samples)
else:
images = latent_samples_to_images(context, x_samples)
if images is None:
return []
if task_data.block_nsfw:
images = filter_images(context, images, "nsfw_checker")
images = filter_nsfw(images, print_log=False)
for i, img in enumerate(images):
img = img.convert("RGB")
img = resize_img(img, req.width, req.height)
buf = img_to_buffer(img, output_format="JPEG")
context.temp_images[f"{task_data.request_id}/{i}"] = buf
task_temp_images[i] = buf
partial_images.append({"path": f"/image/tmp/{task_data.request_id}/{i}"})
del images
return partial_images
def on_image_step(x_samples, i, *args):
def on_image_step(images, i, *args):
nonlocal last_callback_time
if context.test_diffusers:
context.partial_x_samples = (x_samples, args[0])
else:
context.partial_x_samples = x_samples
step_time = time.time() - last_callback_time if last_callback_time != -1 else -1
last_callback_time = time.time()
progress = {"step": i, "step_time": step_time, "total_steps": n_steps}
if stream_image_progress and stream_image_progress_interval > 0 and i % stream_image_progress_interval == 0:
progress["output"] = update_temp_img(context.partial_x_samples, task_temp_images)
if images is not None:
progress["output"] = update_temp_img(images, task_temp_images)
data_queue.put(json.dumps(progress))
step_callback()
if context.stop_processing:
raise UserInitiatedStop("User requested that we stop processing")
return on_image_step

View File

@ -14,19 +14,22 @@ class GenerateImageRequest(BaseModel):
num_outputs: int = 1
num_inference_steps: int = 50
guidance_scale: float = 7.5
distilled_guidance_scale: float = 3.5
init_image: Any = None
init_image_mask: Any = None
control_image: Any = None
control_alpha: Union[float, List[float]] = None
controlnet_filter: str = None
prompt_strength: float = 0.8
preserve_init_image_color_profile = False
strict_mask_border = False
preserve_init_image_color_profile: bool = False
strict_mask_border: bool = False
sampler_name: str = None # "ddim", "plms", "heun", "euler", "euler_a", "dpm2", "dpm2_a", "lms"
scheduler_name: str = None
hypernetwork_strength: float = 0
lora_alpha: Union[float, List[float]] = 0
tiling: str = "none" # "none", "x", "y", "xy"
tiling: str = None # None, "x", "y", "xy"
class FilterImageRequest(BaseModel):
@ -58,10 +61,17 @@ class OutputFormatData(BaseModel):
output_lossless: bool = False
class SaveToDiskData(BaseModel):
save_to_disk_path: str = None
metadata_output_format: str = "txt" # or "json"
class TaskData(BaseModel):
request_id: str = None
session_id: str = "session"
save_to_disk_path: str = None
class RenderTaskData(TaskData):
vram_usage_level: str = "balanced" # or "low" or "medium"
use_face_correction: Union[str, List[str]] = None # or "GFPGANv1.3"
@ -77,10 +87,10 @@ class TaskData(BaseModel):
filters: List[str] = []
filter_params: Dict[str, Dict[str, Any]] = {}
control_filter_to_apply: Union[str, List[str]] = None
enable_vae_tiling: bool = True
show_only_filtered_image: bool = False
block_nsfw: bool = False
metadata_output_format: str = "txt" # or "json"
stream_image_progress: bool = False
stream_image_progress_interval: int = 5
clip_skip: bool = False
@ -93,7 +103,7 @@ class MergeRequest(BaseModel):
model1: str = None
ratio: float = None
out_path: str = "mix"
use_fp16 = True
use_fp16: bool = True
class Image:
@ -126,12 +136,14 @@ class GenerateImageResponse:
task_data: TaskData,
models_data: ModelsData,
output_format: OutputFormatData,
save_data: SaveToDiskData,
images: list,
):
self.render_request = render_request
self.task_data = task_data
self.models_data = models_data
self.output_format = output_format
self.save_data = save_data
self.images = images
def json(self):
@ -141,6 +153,7 @@ class GenerateImageResponse:
task_data = self.task_data.dict()
task_data.update(self.output_format.dict())
task_data.update(self.save_data.dict())
res = {
"status": "succeeded",
@ -203,22 +216,19 @@ def convert_legacy_render_req_to_new(old_req: dict):
model_paths["controlnet"] = old_req.get("use_controlnet_model")
model_paths["embeddings"] = old_req.get("use_embeddings_model")
model_paths["gfpgan"] = old_req.get("use_face_correction", "")
model_paths["gfpgan"] = model_paths["gfpgan"] if "gfpgan" in model_paths["gfpgan"].lower() else None
## ensure that the model name is in the model path
for model_name in ("gfpgan", "codeformer"):
model_paths[model_name] = old_req.get("use_face_correction", "")
model_paths[model_name] = model_paths[model_name] if model_name in model_paths[model_name].lower() else None
model_paths["codeformer"] = old_req.get("use_face_correction", "")
model_paths["codeformer"] = model_paths["codeformer"] if "codeformer" in model_paths["codeformer"].lower() else None
for model_name in ("realesrgan", "latent_upscaler", "esrgan_4x", "lanczos", "nearest", "scunet", "swinir"):
model_paths[model_name] = old_req.get("use_upscale", "")
model_paths[model_name] = model_paths[model_name] if model_name in model_paths[model_name].lower() else None
model_paths["realesrgan"] = old_req.get("use_upscale", "")
model_paths["realesrgan"] = model_paths["realesrgan"] if "realesrgan" in model_paths["realesrgan"].lower() else None
model_paths["latent_upscaler"] = old_req.get("use_upscale", "")
model_paths["latent_upscaler"] = (
model_paths["latent_upscaler"] if "latent_upscaler" in model_paths["latent_upscaler"].lower() else None
)
if "control_filter_to_apply" in old_req:
filter_model = old_req["control_filter_to_apply"]
model_paths[filter_model] = filter_model
old_req["control_filter_to_apply"] = convert_legacy_controlnet_filter_name(old_req["control_filter_to_apply"])
if old_req.get("block_nsfw"):
model_paths["nsfw_checker"] = "nsfw_checker"
@ -234,8 +244,12 @@ def convert_legacy_render_req_to_new(old_req: dict):
}
# move the filter params
if model_paths["realesrgan"]:
filter_params["realesrgan"] = {"scale": int(old_req.get("upscale_amount", 4))}
for model_name in ("realesrgan", "esrgan_4x", "lanczos", "nearest", "scunet", "swinir"):
if model_paths[model_name]:
filter_params[model_name] = {
"upscaler": model_paths[model_name],
"scale": int(old_req.get("upscale_amount", 4)),
}
if model_paths["latent_upscaler"]:
filter_params["latent_upscaler"] = {
"prompt": old_req["prompt"],
@ -254,14 +268,31 @@ def convert_legacy_render_req_to_new(old_req: dict):
if old_req.get("block_nsfw"):
filters.append("nsfw_checker")
if model_paths["codeformer"]:
filters.append("codeformer")
elif model_paths["gfpgan"]:
filters.append("gfpgan")
for model_name in ("gfpgan", "codeformer"):
if model_paths[model_name]:
filters.append(model_name)
break
if model_paths["realesrgan"]:
filters.append("realesrgan")
elif model_paths["latent_upscaler"]:
filters.append("latent_upscaler")
for model_name in ("realesrgan", "latent_upscaler", "esrgan_4x", "lanczos", "nearest", "scunet", "swinir"):
if model_paths[model_name]:
filters.append(model_name)
break
return new_req
def convert_legacy_controlnet_filter_name(filter):
from easydiffusion.backend_manager import backend
if filter is None:
return None
controlnet_filter_names = backend.list_controlnet_filters()
def apply(f):
return f"controlnet_{f}" if f in controlnet_filter_names else f
if isinstance(filter, list):
return [apply(f) for f in filter]
return apply(filter)

View File

@ -1,4 +1,5 @@
import logging
import hashlib
log = logging.getLogger("easydiffusion")
@ -6,6 +7,8 @@ from .save_utils import (
save_images_to_disk,
get_printable_request,
)
from .nsfw_checker import filter_nsfw
def sha256sum(filename):
sha256 = hashlib.sha256()
@ -17,4 +20,3 @@ def sha256sum(filename):
sha256.update(data)
return sha256.hexdigest()

View File

@ -0,0 +1,80 @@
# possibly move this to sdkit in the future
import os
# mirror of https://huggingface.co/AdamCodd/vit-base-nsfw-detector/blob/main/onnx/model_quantized.onnx
NSFW_MODEL_URL = (
"https://github.com/easydiffusion/sdkit-test-data/releases/download/assets/vit-base-nsfw-detector-quantized.onnx"
)
MODEL_HASH_QUICK = "220123559305b1b07b7a0894c3471e34dccd090d71cdf337dd8012f9e40d6c28"
nsfw_check_model = None
def filter_nsfw(images, blur_radius: float = 75, print_log=True):
global nsfw_check_model
from easydiffusion.model_manager import get_model_dirs
from sdkit.utils import base64_str_to_img, img_to_base64_str, download_file, log, hash_file_quick
import onnxruntime as ort
from PIL import ImageFilter
import numpy as np
if nsfw_check_model is None:
model_dir = get_model_dirs("nsfw-checker")[0]
model_path = os.path.join(model_dir, "vit-base-nsfw-detector-quantized.onnx")
os.makedirs(model_dir, exist_ok=True)
if not os.path.exists(model_path) or hash_file_quick(model_path) != MODEL_HASH_QUICK:
download_file(NSFW_MODEL_URL, model_path)
nsfw_check_model = ort.InferenceSession(model_path, providers=["CPUExecutionProvider"])
# Preprocess the input image
def preprocess_image(img):
img = img.convert("RGB")
# config based on based on https://huggingface.co/AdamCodd/vit-base-nsfw-detector/blob/main/onnx/preprocessor_config.json
# Resize the image
img = img.resize((384, 384))
# Normalize the image
img = np.array(img) / 255.0 # Scale pixel values to [0, 1]
mean = np.array([0.5, 0.5, 0.5])
std = np.array([0.5, 0.5, 0.5])
img = (img - mean) / std
# Transpose to match input shape (batch_size, channels, height, width)
img = np.transpose(img, (2, 0, 1)).astype(np.float32)
# Add batch dimension
img = np.expand_dims(img, axis=0)
return img
# Run inference
input_name = nsfw_check_model.get_inputs()[0].name
output_name = nsfw_check_model.get_outputs()[0].name
if print_log:
log.info("Running NSFW checker (onnx)")
results = []
for img in images:
is_base64 = isinstance(img, str)
input_img = base64_str_to_img(img) if is_base64 else img
result = nsfw_check_model.run([output_name], {input_name: preprocess_image(input_img)})
is_nsfw = [np.argmax(arr) == 1 for arr in result][0]
if is_nsfw:
output_img = input_img.filter(ImageFilter.GaussianBlur(blur_radius))
output_img = img_to_base64_str(output_img) if is_base64 else output_img
else:
output_img = img
results.append(output_img)
return results

View File

@ -7,7 +7,14 @@ from datetime import datetime
from functools import reduce
from easydiffusion import app
from easydiffusion.types import GenerateImageRequest, TaskData, OutputFormatData
from easydiffusion.types import (
GenerateImageRequest,
TaskData,
RenderTaskData,
OutputFormatData,
SaveToDiskData,
ModelsData,
)
from numpy import base_repr
from sdkit.utils import save_dicts, save_images
from sdkit.models.model_loader.embeddings import get_embedding_token
@ -24,12 +31,15 @@ TASK_TEXT_MAPPING = {
"clip_skip": "Clip Skip",
"use_controlnet_model": "ControlNet model",
"control_filter_to_apply": "ControlNet Filter",
"control_alpha": "ControlNet Strength",
"use_vae_model": "VAE model",
"sampler_name": "Sampler",
"scheduler_name": "Scheduler",
"width": "Width",
"height": "Height",
"num_inference_steps": "Steps",
"guidance_scale": "Guidance Scale",
"distilled_guidance_scale": "Distilled Guidance",
"prompt_strength": "Prompt Strength",
"use_lora_model": "LoRA model",
"lora_alpha": "LoRA Strength",
@ -95,7 +105,7 @@ def format_folder_name(format: str, req: GenerateImageRequest, task_data: TaskDa
def format_file_name(
format: str,
req: GenerateImageRequest,
task_data: TaskData,
task_data: RenderTaskData,
now: float,
batch_file_number: int,
folder_img_number: ImageNumber,
@ -118,13 +128,19 @@ def format_file_name(
def save_images_to_disk(
images: list, filtered_images: list, req: GenerateImageRequest, task_data: TaskData, output_format: OutputFormatData
images: list,
filtered_images: list,
req: GenerateImageRequest,
task_data: RenderTaskData,
models_data: ModelsData,
output_format: OutputFormatData,
save_data: SaveToDiskData,
):
now = time.time()
app_config = app.getConfig()
folder_format = app_config.get("folder_format", "$id")
save_dir_path = os.path.join(task_data.save_to_disk_path, format_folder_name(folder_format, req, task_data))
metadata_entries = get_metadata_entries_for_request(req, task_data, output_format)
save_dir_path = os.path.join(save_data.save_to_disk_path, format_folder_name(folder_format, req, task_data))
metadata_entries = get_metadata_entries_for_request(req, task_data, models_data, output_format, save_data)
file_number = calculate_img_number(save_dir_path, task_data)
make_filename = make_filename_callback(
app_config.get("filename_format", "$p_$tsb64"),
@ -143,8 +159,8 @@ def save_images_to_disk(
output_quality=output_format.output_quality,
output_lossless=output_format.output_lossless,
)
if task_data.metadata_output_format:
for metadata_output_format in task_data.metadata_output_format.split(","):
if save_data.metadata_output_format:
for metadata_output_format in save_data.metadata_output_format.split(","):
if metadata_output_format.lower() in ["json", "txt", "embed"]:
save_dicts(
metadata_entries,
@ -179,8 +195,8 @@ def save_images_to_disk(
output_quality=output_format.output_quality,
output_lossless=output_format.output_lossless,
)
if task_data.metadata_output_format:
for metadata_output_format in task_data.metadata_output_format.split(","):
if save_data.metadata_output_format:
for metadata_output_format in save_data.metadata_output_format.split(","):
if metadata_output_format.lower() in ["json", "txt", "embed"]:
save_dicts(
metadata_entries,
@ -191,11 +207,17 @@ def save_images_to_disk(
)
def get_metadata_entries_for_request(req: GenerateImageRequest, task_data: TaskData, output_format: OutputFormatData):
metadata = get_printable_request(req, task_data, output_format)
def get_metadata_entries_for_request(
req: GenerateImageRequest,
task_data: RenderTaskData,
models_data: ModelsData,
output_format: OutputFormatData,
save_data: SaveToDiskData,
):
metadata = get_printable_request(req, task_data, models_data, output_format, save_data)
# if text, format it in the text format expected by the UI
is_txt_format = task_data.metadata_output_format and "txt" in task_data.metadata_output_format.lower().split(",")
is_txt_format = save_data.metadata_output_format and "txt" in save_data.metadata_output_format.lower().split(",")
if is_txt_format:
def format_value(value):
@ -214,13 +236,20 @@ def get_metadata_entries_for_request(req: GenerateImageRequest, task_data: TaskD
return entries
def get_printable_request(req: GenerateImageRequest, task_data: TaskData, output_format: OutputFormatData):
def get_printable_request(
req: GenerateImageRequest,
task_data: RenderTaskData,
models_data: ModelsData,
output_format: OutputFormatData,
save_data: SaveToDiskData,
):
req_metadata = req.dict()
task_data_metadata = task_data.dict()
task_data_metadata.update(output_format.dict())
task_data_metadata.update(save_data.dict())
app_config = app.getConfig()
using_diffusers = app_config.get("test_diffusers", True)
using_diffusers = app_config.get("backend", "ed_diffusers") in ("ed_diffusers", "webui")
# Save the metadata in the order defined in TASK_TEXT_MAPPING
metadata = {}
@ -230,25 +259,11 @@ def get_printable_request(req: GenerateImageRequest, task_data: TaskData, output
elif key in task_data_metadata:
metadata[key] = task_data_metadata[key]
if key == "use_embeddings_model" and using_diffusers:
embeddings_extensions = {".pt", ".bin", ".safetensors"}
if key == "use_embeddings_model" and task_data_metadata[key] and using_diffusers:
embeddings_used = models_data.model_paths["embeddings"]
embeddings_used = embeddings_used if isinstance(embeddings_used, list) else [embeddings_used]
def scan_directory(directory_path: str):
used_embeddings = []
for entry in os.scandir(directory_path):
if entry.is_file():
# Check if the filename has the right extension
if not any(map(lambda ext: entry.name.endswith(ext), embeddings_extensions)):
continue
embedding_name_regex = regex.compile(r"(^|[\s,])" + regex.escape(get_embedding_token(entry.name)) + r"([+-]*$|[\s,]|[+-]+[\s,])")
if embedding_name_regex.search(req.prompt) or embedding_name_regex.search(req.negative_prompt):
used_embeddings.append(entry.path)
elif entry.is_dir():
used_embeddings.extend(scan_directory(entry.path))
return used_embeddings
used_embeddings = scan_directory(os.path.join(app.MODELS_DIR, "embeddings"))
metadata["use_embeddings_model"] = used_embeddings if len(used_embeddings) > 0 else None
metadata["use_embeddings_model"] = embeddings_used if len(embeddings_used) > 0 else None
# Clean up the metadata
if req.init_image is None and "prompt_strength" in metadata:
@ -269,7 +284,17 @@ def get_printable_request(req: GenerateImageRequest, task_data: TaskData, output
del metadata[key]
else:
for key in (
x for x in ["use_lora_model", "lora_alpha", "clip_skip", "tiling", "latent_upscaler_steps", "use_controlnet_model", "control_filter_to_apply"] if x in metadata
x
for x in [
"use_lora_model",
"lora_alpha",
"clip_skip",
"tiling",
"latent_upscaler_steps",
"use_controlnet_model",
"control_filter_to_apply",
]
if x in metadata
):
del metadata[key]
@ -279,7 +304,7 @@ def get_printable_request(req: GenerateImageRequest, task_data: TaskData, output
def make_filename_callback(
filename_format: str,
req: GenerateImageRequest,
task_data: TaskData,
task_data: RenderTaskData,
folder_img_number: int,
suffix=None,
now=None,
@ -296,7 +321,7 @@ def make_filename_callback(
return make_filename
def _calculate_img_number(save_dir_path: str, task_data: TaskData):
def _calculate_img_number(save_dir_path: str, task_data: RenderTaskData):
def get_highest_img_number(accumulator: int, file: os.DirEntry) -> int:
if not file.is_file:
return accumulator
@ -340,5 +365,5 @@ def _calculate_img_number(save_dir_path: str, task_data: TaskData):
_calculate_img_number.session_img_numbers = {}
def calculate_img_number(save_dir_path: str, task_data: TaskData):
def calculate_img_number(save_dir_path: str, task_data: RenderTaskData):
return ImageNumber(lambda: _calculate_img_number(save_dir_path, task_data))

View File

@ -35,7 +35,13 @@
<h1>
<img id="logo_img" src="/media/images/icon-512x512.png" >
Easy Diffusion
<small><span id="version">v3.0.2</span> <span id="updateBranchLabel"></span></small>
<small>
<span id="version">
<span class="gated-feature" data-feature-keys="backend_ed_classic backend_ed_diffusers">v3.0.10</span>
<span class="gated-feature" data-feature-keys="backend_webui">v3.5.0</span>
</span> <span id="updateBranchLabel"></span>
<div id="engine-logo" class="gated-feature" data-feature-keys="backend_webui">(Powered by <a id="backend-url" href="https://github.com/lllyasviel/stable-diffusion-webui-forge" target="_blank">Stable Diffusion WebUI Forge</a>)</div>
</small>
</h1>
</div>
<div id="server-status">
@ -73,7 +79,7 @@
</div>
<div id="prompt-toolbar-right" class="toolbar-right">
<button id="image-modifier-dropdown" class="tertiaryButton smallButton">+ Image Modifiers</button>
<button id="embeddings-button" class="tertiaryButton smallButton displayNone">+ Embedding</button>
<button id="embeddings-button" class="tertiaryButton smallButton gated-feature" data-feature-keys="backend_ed_diffusers backend_webui">+ Embedding</button>
</div>
</div>
<textarea id="prompt" class="col-free">a photograph of an astronaut riding a horse</textarea>
@ -83,7 +89,7 @@
<a href="https://github.com/easydiffusion/easydiffusion/wiki/Writing-prompts#negative-prompts" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top">Click to learn more about Negative Prompts</span></i></a>
<small>(optional)</small>
</label>
<button id="negative-embeddings-button" class="tertiaryButton smallButton displayNone">+ Negative Embedding</button>
<button id="negative-embeddings-button" class="tertiaryButton smallButton gated-feature" data-feature-keys="backend_ed_diffusers backend_webui">+ Negative Embedding</button>
<div class="collapsible-content">
<textarea id="negative_prompt" name="negative_prompt" placeholder="list the things to remove from the image (e.g. fog, green)"></textarea>
</div>
@ -155,11 +161,11 @@
<div id="editor-settings-entries" class="collapsible-content">
<div><table>
<tr><b class="settings-subheader">Image Settings</b></tr>
<tr class="pl-5"><td><label for="seed">Seed:</label></td><td><input id="seed" name="seed" size="10" value="0" onkeypress="preventNonNumericalInput(event)"> <input id="random_seed" name="random_seed" type="checkbox" checked><label for="random_seed">Random</label></td></tr>
<tr class="pl-5"><td><label for="seed">Seed:</label></td><td><input id="seed" name="seed" size="10" value="0" onkeypress="preventNonNumericalInput(event)" inputmode="numeric"> <input id="random_seed" name="random_seed" type="checkbox" checked><label for="random_seed">Random</label></td></tr>
<tr class="pl-5"><td><label for="num_outputs_total">Number of Images:</label></td>
<td><input id="num_outputs_total" name="num_outputs_total" value="1" type="number" value="1" min="1" step="1" onkeypres"="preventNonNumericalInput(event)">
<td><input id="num_outputs_total" name="num_outputs_total" value="1" type="number" value="1" min="1" step="1" onkeypres"="preventNonNumericalInput(event)" inputmode="numeric">
<label><small>(total)</small></label>
<input id="num_outputs_parallel" name="num_outputs_parallel" value="1" type="number" value="1" min="1" step="1" onkeypress="preventNonNumericalInput(event)">
<input id="num_outputs_parallel" name="num_outputs_parallel" value="1" type="number" value="1" min="1" step="1" onkeypress="preventNonNumericalInput(event)" inputmode="numeric">
<label id="num_outputs_parallel_label" for="num_outputs_parallel"><small>(in parallel)</small></label></td>
</tr>
<tr class="pl-5"><td><label for="stable_diffusion_model">Model:</label></td><td class="model-input">
@ -174,14 +180,14 @@
<!-- <label><small>Takes upto 20 mins the first time</small></label> -->
</td>
</tr>
<tr class="pl-5 displayNone" id="clip_skip_config">
<tr class="pl-5 gated-feature" id="clip_skip_config" data-feature-keys="backend_ed_diffusers backend_webui">
<td><label for="clip_skip">Clip Skip:</label></td>
<td class="diffusers-restart-needed">
<input id="clip_skip" name="clip_skip" type="checkbox">
<a href="https://github.com/easydiffusion/easydiffusion/wiki/Clip-Skip" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Click to learn more about Clip Skip</span></i></a>
</td>
</tr>
<tr id="controlnet_model_container" class="pl-5">
<tr id="controlnet_model_container" class="pl-5 gated-feature" data-feature-keys="backend_ed_diffusers backend_webui">
<td><label for="controlnet_model">ControlNet Image:</label></td>
<td class="diffusers-restart-needed">
<div id="control_image_wrapper" class="preview_image_wrapper">
@ -201,40 +207,94 @@
<option value="openpose_faceonly">OpenPose face-only</option>
<option value="openpose_hand">OpenPose hand</option>
<option value="openpose_full">OpenPose full</option>
<option value="animal_openpose" class="gated-feature" data-feature-keys="backend_webui">animal_openpose</option>
<option value="densepose_parula (black bg & blue torso)" class="gated-feature" data-feature-keys="backend_webui">densepose_parula (black bg & blue torso)</option>
<option value="densepose (pruple bg & purple torso)" class="gated-feature" data-feature-keys="backend_webui">densepose (pruple bg & purple torso)</option>
<option value="dw_openpose_full" class="gated-feature" data-feature-keys="backend_webui">dw_openpose_full</option>
<option value="mediapipe_face" class="gated-feature" data-feature-keys="backend_webui">mediapipe_face</option>
<option value="instant_id_face_keypoints" class="gated-feature" data-feature-keys="backend_webui">instant_id_face_keypoints</option>
<option value="InsightFace+CLIP-H (IPAdapter)" class="gated-feature" data-feature-keys="backend_webui">InsightFace+CLIP-H (IPAdapter)</option>
<option value="InsightFace (InstantID)" class="gated-feature" data-feature-keys="backend_webui">InsightFace (InstantID)</option>
</optgroup>
<optgroup label="Outline">
<option value="canny">Canny (*)</option>
<option value="mlsd">Straight lines</option>
<option value="scribble_hed">Scribble hed (*)</option>
<option value="scribble_hedsafe">Scribble hedsafe</option>
<option value="scribble_hedsafe" class="gated-feature" data-feature-keys="backend_diffusers">Scribble hedsafe</option>
<option value="scribble_pidinet">Scribble pidinet</option>
<option value="scribble_pidsafe">Scribble pidsafe</option>
<option value="scribble_pidsafe" class="gated-feature" data-feature-keys="backend_diffusers">Scribble pidsafe</option>
<option value="scribble_xdog" class="gated-feature" data-feature-keys="backend_webui">scribble_xdog</option>
<option value="softedge_hed">Softedge hed</option>
<option value="softedge_hedsafe">Softedge hedsafe</option>
<option value="softedge_pidinet">Softedge pidinet</option>
<option value="softedge_pidsafe">Softedge pidsafe</option>
<option value="softedge_teed" class="gated-feature" data-feature-keys="backend_webui">softedge_teed</option>
</optgroup>
<optgroup label="Depth">
<option value="normal_bae">Normal bae (*)</option>
<option value="depth_midas">Depth midas</option>
<option value="normal_midas" class="gated-feature" data-feature-keys="backend_webui">normal_midas</option>
<option value="depth_zoe">Depth zoe</option>
<option value="depth_leres">Depth leres</option>
<option value="depth_leres++">Depth leres++</option>
<option value="depth_anything_v2" class="gated-feature" data-feature-keys="backend_webui">depth_anything_v2</option>
<option value="depth_anything" class="gated-feature" data-feature-keys="backend_webui">depth_anything</option>
<option value="depth_hand_refiner" class="gated-feature" data-feature-keys="backend_webui">depth_hand_refiner</option>
<option value="depth_marigold" class="gated-feature" data-feature-keys="backend_webui">depth_marigold</option>
</optgroup>
<optgroup label="Line art">
<option value="lineart_coarse">Lineart coarse</option>
<option value="lineart_realistic">Lineart realistic</option>
<option value="lineart_anime">Lineart anime</option>
<option value="lineart_standard (from white bg & black line)" class="gated-feature" data-feature-keys="backend_webui">lineart_standard (from white bg & black line)</option>
<option value="lineart_anime_denoise" class="gated-feature" data-feature-keys="backend_webui">lineart_anime_denoise</option>
</optgroup>
<optgroup label="Reference" class="gated-feature" data-feature-keys="backend_webui">
<option value="reference_adain">reference_adain</option>
<option value="reference_only">reference_only</option>
<option value="reference_adain+attn">reference_adain+attn</option>
</optgroup>
<optgroup label="Tile" class="gated-feature" data-feature-keys="backend_webui">
<option value="tile_colorfix">tile_colorfix</option>
<option value="tile_resample">tile_resample</option>
<option value="tile_colorfix+sharp">tile_colorfix+sharp</option>
</optgroup>
<optgroup label="CLIP (IPAdapter)" class="gated-feature" data-feature-keys="backend_webui">
<option value="CLIP-ViT-H (IPAdapter)">CLIP-ViT-H (IPAdapter)</option>
<option value="CLIP-G (Revision)">CLIP-G (Revision)</option>
<option value="CLIP-G (Revision ignore prompt)">CLIP-G (Revision ignore prompt)</option>
<option value="CLIP-ViT-bigG (IPAdapter)">CLIP-ViT-bigG (IPAdapter)</option>
<option value="InsightFace+CLIP-H (IPAdapter)">InsightFace+CLIP-H (IPAdapter)</option>
</optgroup>
<optgroup label="Inpaint" class="gated-feature" data-feature-keys="backend_webui">
<option value="inpaint_only">inpaint_only</option>
<option value="inpaint_only+lama">inpaint_only+lama</option>
<option value="inpaint_global_harmonious">inpaint_global_harmonious</option>
</optgroup>
<optgroup label="Segment" class="gated-feature" data-feature-keys="backend_webui">
<option value="seg_ufade20k">seg_ufade20k</option>
<option value="seg_ofade20k">seg_ofade20k</option>
<option value="seg_anime_face">seg_anime_face</option>
<option value="seg_ofcoco">seg_ofcoco</option>
</optgroup>
<optgroup label="Misc">
<option value="shuffle">Shuffle</option>
<option value="segment">Segment</option>
<option value="segment" class="gated-feature" data-feature-keys="backend_diffusers">Segment</option>
<option value="invert (from white bg & black line)" class="gated-feature" data-feature-keys="backend_webui">invert (from white bg & black line)</option>
<option value="threshold" class="gated-feature" data-feature-keys="backend_webui">threshold</option>
<option value="t2ia_sketch_pidi" class="gated-feature" data-feature-keys="backend_webui">t2ia_sketch_pidi</option>
<option value="t2ia_color_grid" class="gated-feature" data-feature-keys="backend_webui">t2ia_color_grid</option>
<option value="recolor_intensity" class="gated-feature" data-feature-keys="backend_webui">recolor_intensity</option>
<option value="recolor_luminance" class="gated-feature" data-feature-keys="backend_webui">recolor_luminance</option>
<option value="blur_gaussian" class="gated-feature" data-feature-keys="backend_webui">blur_gaussian</option>
</optgroup>
</select>
<br/>
<label for="controlnet_model"><small>Model:</small></label> <input id="controlnet_model" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
<!-- <br/>
<label><small>Will download the necessary models, the first time.</small></label> -->
<br/>
<label><small>Will download the necessary models, the first time.</small></label>
<label for="controlnet_alpha_slider"><small>Strength:</small></label> <input id="controlnet_alpha_slider" name="controlnet_alpha_slider" class="editor-slider" value="10" type="range" min="0" max="10"> <input id="controlnet_alpha" name="controlnet_alpha" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)" inputmode="decimal">
</div>
</td>
</tr>
@ -246,28 +306,60 @@
<select id="sampler_name" name="sampler_name">
<option value="plms">PLMS</option>
<option value="ddim">DDIM</option>
<option value="ddim_cfgpp" class="gated-feature" data-feature-keys="backend_webui">DDIM CFG++</option>
<option value="heun">Heun</option>
<option value="euler">Euler</option>
<option value="euler_a" selected>Euler Ancestral</option>
<option value="dpm2">DPM2</option>
<option value="dpm2_a">DPM2 Ancestral</option>
<option value="dpm_fast" class="gated-feature" data-feature-keys="backend_webui">DPM Fast</option>
<option value="dpm_adaptive" class="gated-feature" data-feature-keys="backend_ed_classic backend_webui">DPM Adaptive</option>
<option value="lms">LMS</option>
<option value="dpm_solver_stability">DPM Solver (Stability AI)</option>
<option value="dpm_solver_stability" class="gated-feature" data-feature-keys="backend_ed_classic backend_ed_diffusers">DPM Solver (Stability AI)</option>
<option value="dpmpp_2s_a">DPM++ 2s Ancestral (Karras)</option>
<option value="dpmpp_2m">DPM++ 2m (Karras)</option>
<option value="dpmpp_2m_sde" class="diffusers-only">DPM++ 2m SDE (Karras)</option>
<option value="dpmpp_2m_sde" class="gated-feature" data-feature-keys="backend_ed_diffusers backend_webui">DPM++ 2m SDE</option>
<option value="dpmpp_2m_sde_heun" class="gated-feature" data-feature-keys="backend_webui">DPM++ 2m SDE Heun</option>
<option value="dpmpp_3m_sde" class="gated-feature" data-feature-keys="backend_webui">DPM++ 3M SDE</option>
<option value="dpmpp_sde">DPM++ SDE (Karras)</option>
<option value="dpm_adaptive" class="k_diffusion-only">DPM Adaptive (Karras)</option>
<option value="ddpm" class="diffusers-only">DDPM</option>
<option value="deis" class="diffusers-only">DEIS</option>
<option value="unipc_snr" class="k_diffusion-only">UniPC SNR</option>
<option value="unipc_tu">UniPC TU</option>
<option value="unipc_snr_2" class="k_diffusion-only">UniPC SNR 2</option>
<option value="unipc_tu_2" class="k_diffusion-only">UniPC TU 2</option>
<option value="unipc_tq" class="k_diffusion-only">UniPC TQ</option>
<option value="restart" class="gated-feature" data-feature-keys="backend_webui">Restart</option>
<option value="heun_pp2" class="gated-feature" data-feature-keys="backend_webui">Heun PP2</option>
<option value="ipndm" class="gated-feature" data-feature-keys="backend_webui">IPNDM</option>
<option value="ipndm_v" class="gated-feature" data-feature-keys="backend_webui">IPNDM_V</option>
<option value="ddpm" class="gated-feature" data-feature-keys="backend_ed_diffusers backend_webui">DDPM</option>
<option value="deis" class="gated-feature" data-feature-keys="backend_ed_diffusers backend_webui">DEIS</option>
<option value="lcm" class="gated-feature" data-feature-keys="backend_webui">LCM</option>
<option value="forge_flux_realistic" class="gated-feature" data-feature-keys="backend_webui">[Forge] Flux Realistic</option>
<option value="forge_flux_realistic_slow" class="gated-feature" data-feature-keys="backend_webui">[Forge] Flux Realistic (Slow)</option>
<option value="unipc_snr" class="gated-feature" data-feature-keys="backend_ed_classic">UniPC SNR</option>
<option value="unipc_tu" class="gated-feature" data-feature-keys="backend_ed_classic backend_ed_diffusers">UniPC TU</option>
<option value="unipc_snr_2" class="gated-feature" data-feature-keys="backend_ed_classic">UniPC SNR 2</option>
<option value="unipc_tu_2" class="gated-feature" data-feature-keys="backend_ed_classic">UniPC TU 2</option>
<option value="unipc_tq" class="gated-feature" data-feature-keys="backend_ed_classic">UniPC TQ</option>
</select>
<a href="https://github.com/easydiffusion/easydiffusion/wiki/How-to-Use#samplers" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Click to learn more about samplers</span></i></a>
</td></tr>
<tr class="pl-5 warning-label displayNone" id="fluxSamplerWarning"><td></td><td>Please avoid 'Euler Ancestral' with Flux!</td></tr>
<tr id="schedulerSelection" class="pl-5 gated-feature" data-feature-keys="backend_webui"><td><label for="scheduler_name">Scheduler:</label></td><td>
<select id="scheduler_name" name="scheduler_name">
<option value="automatic">Automatic</option>
<option value="uniform">Uniform</option>
<option value="karras">Karras</option>
<option value="exponential">Exponential</option>
<option value="polyexponential">Polyexponential</option>
<option value="sgm_uniform">SGM Uniform</option>
<option value="kl_optimal">KL Optimal</option>
<option value="align_your_steps">Align Your Steps</option>
<option value="simple" selected>Simple</option>
<option value="normal">Normal</option>
<option value="ddim">DDIM</option>
<option value="beta">Beta</option>
<option value="turbo">Turbo</option>
<option value="align_your_steps_GITS">Align Your Steps GITS</option>
<option value="align_your_steps_11">Align Your Steps 11</option>
<option value="align_your_steps_32">Align Your Steps 32</option>
</select>
</td></tr>
<tr class="pl-5"><td><label>Image Size: </label></td><td id="image-size-options">
<select id="width" name="width" value="512">
<option value="128">128</option>
@ -291,7 +383,9 @@
<option value="2048">2048</option>
</select>
<label id="widthLabel" for="width"><small><span>(width)</span></small></label>
<span id="swap-width-height" class="clickable smallButton" style="margin-left: 2px; margin-right:2px;"><i class="fa-solid fa-right-left"><span class="simple-tooltip top-left"> Swap width and height </span></i></span>
<div class="tooltip-container">
<span id="swap-width-height" class="clickable smallButton" style="margin-left: 2px; margin-right:2px;"><i class="fa-solid fa-right-left"><span class="simple-tooltip top-left"> Swap width and height </span></i></span>
</div>
<select id="height" name="height" value="512">
<option value="128">128</option>
<option value="192">192</option>
@ -318,9 +412,9 @@
<span id="recent-resolutions-button" class="clickable"><i class="fa-solid fa-sliders"><span class="simple-tooltip top-left"> Advanced sizes </span></i></span>
<div id="recent-resolutions-popup" class="displayNone">
<small>Custom size:</small><br>
<input id="custom-width" name="custom-width" type="number" min="128" value="512" onkeypress="preventNonNumericalInput(event)">
<input id="custom-width" name="custom-width" type="number" min="128" value="512" onkeypress="preventNonNumericalInput(event)" inputmode="numeric">
&times;
<input id="custom-height" name="custom-height" type="number" min="128" value="512" onkeypress="preventNonNumericalInput(event)"><br>
<input id="custom-height" name="custom-height" type="number" min="128" value="512" onkeypress="preventNonNumericalInput(event)" inputmode="numeric"><br>
<small>Resize:</small><br>
<input id="resize-slider" name="resize-slider" class="editor-slider" value="1" type="range" min="0.4" max="2" step="0.005" style="width:100%;"><br>
<div id="enlarge-buttons"><button data-factor="0.5" class="tertiaryButton smallButton">×0.5</button>&nbsp;<button data-factor="1.2" class="tertiaryButton smallButton">×1.2</button>&nbsp;<button data-factor="1.5" class="tertiaryButton smallButton">×1.5</button>&nbsp;<button data-factor="2" class="tertiaryButton smallButton">×2</button>&nbsp;<button data-factor="3" class="tertiaryButton smallButton">×3</button></div>
@ -340,12 +434,14 @@
</div>
</div>
</div>
<div id="small_image_warning" class="displayNone">Small image sizes can cause bad image quality</div>
<div id="small_image_warning" class="displayNone warning-label">Small image sizes can cause bad image quality</div>
</td></tr>
<tr class="pl-5"><td><label for="num_inference_steps">Inference Steps:</label></td><td> <input id="num_inference_steps" name="num_inference_steps" type="number" min="1" step="1" style="width: 42pt" value="25" onkeypress="preventNonNumericalInput(event)"></td></tr>
<tr class="pl-5"><td><label for="guidance_scale_slider">Guidance Scale:</label></td><td> <input id="guidance_scale_slider" name="guidance_scale_slider" class="editor-slider" value="75" type="range" min="11" max="500"> <input id="guidance_scale" name="guidance_scale" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"></td></tr>
<tr id="prompt_strength_container" class="pl-5"><td><label for="prompt_strength_slider">Prompt Strength:</label></td><td> <input id="prompt_strength_slider" name="prompt_strength_slider" class="editor-slider" value="80" type="range" min="0" max="99"> <input id="prompt_strength" name="prompt_strength" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"><br/></td></tr>
<tr id="lora_model_container" class="pl-5">
<tr class="pl-5"><td><label for="num_inference_steps">Inference Steps:</label></td><td> <input id="num_inference_steps" name="num_inference_steps" type="number" min="1" step="1" style="width: 42pt" value="25" onkeypress="preventNonNumericalInput(event)" inputmode="numeric"></td></tr>
<tr class="pl-5"><td><label for="guidance_scale_slider">Guidance Scale:</label></td><td> <input id="guidance_scale_slider" name="guidance_scale_slider" class="editor-slider" value="75" type="range" min="11" max="500"> <input id="guidance_scale" name="guidance_scale" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)" inputmode="decimal"></td></tr>
<tr class="pl-5 displayNone warning-label" id="guidanceWarning"><td></td><td id="guidanceWarningText"></td></tr>
<tr id="prompt_strength_container" class="pl-5"><td><label for="prompt_strength_slider">Prompt Strength:</label></td><td> <input id="prompt_strength_slider" name="prompt_strength_slider" class="editor-slider" value="80" type="range" min="0" max="99"> <input id="prompt_strength" name="prompt_strength" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)" inputmode="decimal"><br/></td></tr>
<tr id="distilled_guidance_scale_container" class="pl-5 displayNone"><td><label for="distilled_guidance_scale_slider">Distilled Guidance:</label></td><td> <input id="distilled_guidance_scale_slider" name="distilled_guidance_scale_slider" class="editor-slider" value="35" type="range" min="11" max="500"> <input id="distilled_guidance_scale" name="distilled_guidance_scale" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)" inputmode="decimal"></td></tr>
<tr id="lora_model_container" class="pl-5 gated-feature" data-feature-keys="backend_ed_diffusers backend_webui">
<td>
<label for="lora_model">LoRA:</label>
</td>
@ -353,14 +449,14 @@
<div id="lora_model" data-path=""></div>
</td>
</tr>
<tr id="hypernetwork_model_container" class="pl-5"><td><label for="hypernetwork_model">Hypernetwork:</label></td><td>
<tr id="hypernetwork_model_container" class="pl-5 gated-feature" data-feature-keys="backend_ed_classic"><td><label for="hypernetwork_model">Hypernetwork:</label></td><td>
<input id="hypernetwork_model" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
</td></tr>
<tr id="hypernetwork_strength_container" class="pl-5">
<tr id="hypernetwork_strength_container" class="pl-5 gated-feature" data-feature-keys="backend_ed_classic">
<td><label for="hypernetwork_strength_slider">Hypernetwork Strength:</label></td>
<td> <input id="hypernetwork_strength_slider" name="hypernetwork_strength_slider" class="editor-slider" value="100" type="range" min="0" max="100"> <input id="hypernetwork_strength" name="hypernetwork_strength" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"><br/></td>
<td> <input id="hypernetwork_strength_slider" name="hypernetwork_strength_slider" class="editor-slider" value="100" type="range" min="0" max="100"> <input id="hypernetwork_strength" name="hypernetwork_strength" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)" inputmode="decimal"><br/></td>
</tr>
<tr id="tiling_container" class="pl-5">
<tr id="tiling_container" class="pl-5 gated-feature" data-feature-keys="backend_ed_diffusers">
<td><label for="tiling">Seamless Tiling:</label></td>
<td class="diffusers-restart-needed">
<select id="tiling" name="tiling">
@ -383,8 +479,15 @@
</span>
</td></tr>
<tr class="pl-5" id="output_quality_row"><td><label for="output_quality">Image Quality:</label></td><td>
<input id="output_quality_slider" name="output_quality" class="editor-slider" value="75" type="range" min="10" max="95"> <input id="output_quality" name="output_quality" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)">
<input id="output_quality_slider" name="output_quality" class="editor-slider" value="75" type="range" min="10" max="95"> <input id="output_quality" name="output_quality" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)" inputmode="numeric">
</td></tr>
<tr class="pl-5 gated-feature" data-feature-keys="backend_ed_diffusers">
<td><label for="tiling">Enable VAE Tiling:</label></td>
<td class="diffusers-restart-needed">
<input id="enable_vae_tiling" name="enable_vae_tiling" type="checkbox" checked>
<label><small>Optimizes memory for larger images</small></label>
</td>
</tr>
</table></div>
<div><ul>
@ -393,8 +496,8 @@
<li class="pl-5" id="use_face_correction_container">
<input id="use_face_correction" name="use_face_correction" type="checkbox"> <label for="use_face_correction">Fix incorrect faces and eyes</label> <div style="display:inline-block;"><input id="gfpgan_model" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" /></div>
<table id="codeformer_settings" class="displayNone sub-settings">
<tr class="pl-5"><td><label for="codeformer_fidelity_slider">Strength:</label></td><td><input id="codeformer_fidelity_slider" name="codeformer_fidelity_slider" class="editor-slider" value="5" type="range" min="0" max="10"> <input id="codeformer_fidelity" name="codeformer_fidelity" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"></td></tr>
<tr class="pl-5"><td><label for="codeformer_upscale_faces">Upscale Faces:</label></td><td><input id="codeformer_upscale_faces" name="codeformer_upscale_faces" type="checkbox" checked> <label><small>(improves the resolution of faces)</small></label></td></tr>
<tr class="pl-5"><td><label for="codeformer_fidelity_slider">Strength:</label></td><td><input id="codeformer_fidelity_slider" name="codeformer_fidelity_slider" class="editor-slider" value="5" type="range" min="0" max="10"> <input id="codeformer_fidelity" name="codeformer_fidelity" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)" inputmode="decimal"></td></tr>
<tr class="pl-5 gated-feature" data-feature-keys="backend_ed_diffusers"><td><label for="codeformer_upscale_faces">Upscale Faces:</label></td><td><input id="codeformer_upscale_faces" name="codeformer_upscale_faces" type="checkbox" checked> <label><small>(improves the resolution of faces)</small></label></td></tr>
</table>
</li>
<li class="pl-5">
@ -407,10 +510,16 @@
<select id="upscale_model" name="upscale_model">
<option value="RealESRGAN_x4plus" selected>RealESRGAN_x4plus</option>
<option value="RealESRGAN_x4plus_anime_6B">RealESRGAN_x4plus_anime_6B</option>
<option value="latent_upscaler">Latent Upscaler 2x</option>
<option value="ESRGAN_4x" class="pl-5 gated-feature" data-feature-keys="backend_webui">ESRGAN_4x</option>
<option value="Lanczos" class="pl-5 gated-feature" data-feature-keys="backend_webui">Lanczos</option>
<option value="Nearest" class="pl-5 gated-feature" data-feature-keys="backend_webui">Nearest</option>
<option value="ScuNET" class="pl-5 gated-feature" data-feature-keys="backend_webui">ScuNET GAN</option>
<option value="ScuNET PSNR" class="pl-5 gated-feature" data-feature-keys="backend_webui">ScuNET PSNR</option>
<option value="SwinIR_4x" class="pl-5 gated-feature" data-feature-keys="backend_webui">SwinIR 4x</option>
<option value="latent_upscaler" class="pl-5 gated-feature" data-feature-keys="backend_ed_classic backend_ed_diffusers">Latent Upscaler 2x</option>
</select>
<table id="latent_upscaler_settings" class="displayNone sub-settings">
<tr class="pl-5"><td><label for="latent_upscaler_steps_slider">Upscaling Steps:</label></td><td><input id="latent_upscaler_steps_slider" name="latent_upscaler_steps_slider" class="editor-slider" value="10" type="range" min="1" max="50"> <input id="latent_upscaler_steps" name="latent_upscaler_steps" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"></td></tr>
<tr class="pl-5"><td><label for="latent_upscaler_steps_slider">Upscaling Steps:</label></td><td><input id="latent_upscaler_steps_slider" name="latent_upscaler_steps_slider" class="editor-slider" value="10" type="range" min="1" max="50"> <input id="latent_upscaler_steps" name="latent_upscaler_steps" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)" inputmode="numeric"></td></tr>
</table>
</li>
<li class="pl-5"><input id="show_only_filtered_image" name="show_only_filtered_image" type="checkbox" checked> <label for="show_only_filtered_image">Show only the corrected/upscaled image</label></li>
@ -455,14 +564,14 @@
<div class="dropdown-content">
<div class="dropdown-item">
<input id="thumbnail_size" name="thumbnail_size" class="editor-slider" type="range" value="70" min="5" max="200" oninput="sliderUpdate(event)">
<input id="thumbnail_size-input" name="thumbnail_size-input" size="3" value="70" pattern="^[0-9.]+$" onkeypress="preventNonNumericalInput(event)" oninput="sliderUpdate(event)">&nbsp;%
<input id="thumbnail_size-input" name="thumbnail_size-input" size="3" value="70" pattern="^[0-9.]+$" onkeypress="preventNonNumericalInput(event)" oninput="sliderUpdate(event)" inputmode="numeric">&nbsp;%
</div>
</div>
</div>
<div class="clearfix" style="clear: both;"></div>
</div>
<div id="supportBanner" class="displayNone">
If you found this project useful and want to help keep it alive, please consider <a href="https://ko-fi.com/easydiffusion" target="_blank">buying me a coffee</a> or <a href="https://www.patreon.com/EasyDiffusion" target="_blank">supporting me on Patreon</a> to help cover the cost of development and maintenance! Or even better, <a href="https://cmdr2.itch.io/easydiffusion" target="_blank">purchasing it at the full price</a>. Thank you for your support!
If you found this project useful and want to help keep it alive, please consider <a href="https://ko-fi.com/easydiffusion" target="_blank">buying me a coffee</a> to help cover the cost of development and maintenance! Thanks for your support!
</div>
</div>
</div>
@ -508,28 +617,44 @@
<div class="float-container">
<div class="float-child">
<h1>Help</h1>
<ul id="help-links">
<li><span class="help-section">Using the software</span>
<div id="help-links">
<h4><span class="help-section"><b>Basics</b></span></h4>
<ul>
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/How-To-Use" target="_blank"><i class="fa-solid fa-book fa-fw"></i> How to use</a>
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/UI-Overview" target="_blank"><i class="fa-solid fa-list fa-fw"></i> UI Overview</a>
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/Writing-Prompts" target="_blank"><i class="fa-solid fa-pen-to-square fa-fw"></i> Writing prompts</a>
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/Inpainting" target="_blank"><i class="fa-solid fa-paintbrush fa-fw"></i> Inpainting</a>
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/Run-on-Multiple-GPUs" target="_blank"><i class="fa-solid fa-paintbrush fa-fw"></i> Run on Multiple GPUs</a>
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/How-To-Use" target="_blank">How to use</a></li>
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/Writing-Prompts" target="_blank">Writing prompts</a></li>
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/Image-Modifiers" target="_blank">Image Modifiers</a></li>
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/Inpainting" target="_blank">Inpainting</a></li>
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/Samplers" target="_blank">Samplers</a></li>
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/UI-Overview" target="_blank">Summary of every UI option</a></li>
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/Troubleshooting" target="_blank">Common error messages (and solutions)</a></li>
</ul>
<li><span class="help-section">Installation</span>
<h4><span class="help-section"><b>Intermediate</b></span></h4>
<ul>
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/Troubleshooting" target="_blank"><i class="fa-solid fa-circle-question fa-fw"></i> Troubleshooting</a>
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/Custom-Models" target="_blank">Custom Models</a></li>
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/Prompt-Syntax" target="_blank">Prompt Syntax (weights, emphasis etc)</a></li>
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/UI-Plugins" target="_blank">UI Plugins</a></li>
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/Embeddings" target="_blank">Embeddings</a></li>
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/LoRA" target="_blank">LoRA</a></li>
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/SDXL" target="_blank">SDXL</a></li>
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/ControlNet" target="_blank">ControlNet</a></li>
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/Seamless-Tiling" target="_blank">Seamless Tiling</a></li>
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/xFormers" target="_blank">xFormers</a></li>
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/The-beta-channel" target="_blank">The beta channel</a></li>
</ul>
<li><span class="help-section">Downloadable Content</span>
<h4><span class="help-section"><b>Advanced topics</b></span></h4>
<ul>
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/Custom-Models" target="_blank"><i class="fa-solid fa-images fa-fw"></i> Custom Models</a>
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/UI-Plugins" target="_blank"><i class="fa-solid fa-puzzle-piece fa-fw"></i> UI Plugins</a>
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/VAE-Variational-Auto-Encoder" target="_blank"><i class="fa-solid fa-hand-sparkles fa-fw"></i> VAE Variational Auto Encoder</a>
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/Run-on-Multiple-GPUs" target="_blank">Run on Multiple GPUs</a></li>
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/Model-Merging" target="_blank">Model Merging</a></li>
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/Custom-Modifiers" target="_blank">Custom Modifiers</a></li>
</ul>
</ul>
<h4><span class="help-section"><b>Misc</b></span></h4>
<ul>
<li> <a href="https://theally.notion.site/The-Definitive-Stable-Diffusion-Glossary-1d1e6d15059c41e6a6b4306b4ecd9df9" target="_blank">Glossary of Stable Diffusion related terms</a></li>
</ul>
</div>
</div>
<div class="float-child">
@ -712,7 +837,7 @@
<span class="embeddings-action-text">Expand Categories</span>
</button>
<i class="fa-solid fa-magnifying-glass"></i>
<input id="embeddings-search-box" type="text" spellcheck="false" autocomplete="off" placeholder="Search...">
<input id="embeddings-search-box" type="text" spellcheck="false" autocomplete="off" placeholder="Search..." inputmode="search">
<label for="embedding-card-size-selector"><small>Thumbnail Size:</small></label>
<select id="embedding-card-size-selector" name="embedding-card-size-selector">
<option value="-2">0</option>
@ -798,6 +923,8 @@
<p>This license of this software forbids you from sharing any content that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, <br/>spread misinformation and target vulnerable groups. For the full list of restrictions please read <a href="https://github.com/easydiffusion/easydiffusion/blob/main/LICENSE" target="_blank">the license</a>.</p>
<p>By using this software, you consent to the terms and conditions of the license.</p>
</div>
<input id="test_diffusers" type="checkbox" style="display: none" checked /> <!-- for backwards compatibility -->
<input id="use_v3_engine" type="checkbox" style="display: none" checked /> <!-- for backwards compatibility -->
</div>
</div>
</body>

View File

@ -9,6 +9,3 @@ server.init()
model_manager.init()
app.init_render_threads()
bucket_manager.init()
# start the browser ui
app.open_browser()

View File

@ -79,6 +79,7 @@
}
.parameters-table .fa-fire,
.parameters-table .fa-bolt {
.parameters-table .fa-bolt,
.parameters-table .fa-robot {
color: #F7630C;
}

View File

@ -36,6 +36,15 @@ code {
transform: translateY(4px);
cursor: pointer;
}
#engine-logo {
font-size: 8pt;
padding-left: 10pt;
color: var(--small-label-color);
}
#engine-logo a {
text-decoration: none;
/* color: var(--small-label-color); */
}
#prompt {
width: 100%;
height: 65pt;
@ -541,7 +550,7 @@ div.img-preview img {
position: relative;
background: var(--background-color4);
display: flex;
padding: 12px 0 0;
padding: 6px 0 0;
}
.tab .icon {
padding-right: 4pt;
@ -609,11 +618,18 @@ div.img-preview img {
margin: auto;
padding: 0px;
}
#help-links ul {
list-style-type: disc;
padding-left: 12pt;
}
#help-links li {
padding-bottom: 12pt;
padding-bottom: 6pt;
display: block;
font-size: 10pt;
}
#help-links ul li {
display: list-item;
}
#help-links li .fa-fw {
padding-right: 2pt;
}
@ -650,6 +666,15 @@ div.img-preview img {
display: block;
}
.gated-feature {
display: none;
}
.warning-label {
font-size: smaller;
color: var(--status-orange);
}
.display-settings {
float: right;
position: relative;
@ -1207,6 +1232,12 @@ input::file-selector-button {
visibility: visible;
}
}
.tooltip-container {
display: inline-block;
position: relative;
}
.simple-tooltip.right {
right: 0px;
top: 50%;
@ -1446,11 +1477,6 @@ div.top-right {
margin-top: 6px;
}
#small_image_warning {
font-size: smaller;
color: var(--status-orange);
}
button#save-system-settings-btn {
padding: 4pt 8pt;
}
@ -2026,4 +2052,4 @@ div#enlarge-buttons {
border-radius: 4pt;
padding-top: 6pt;
color: var(--small-label-color);
}
}

View File

@ -16,10 +16,12 @@ const SETTINGS_IDS_LIST = [
"clip_skip",
"vae_model",
"sampler_name",
"scheduler_name",
"width",
"height",
"num_inference_steps",
"guidance_scale",
"distilled_guidance_scale",
"prompt_strength",
"tiling",
"output_format",
@ -56,6 +58,8 @@ const SETTINGS_IDS_LIST = [
"extract_lora_from_prompt",
"embedding-card-size-selector",
"lora_model",
"enable_vae_tiling",
"controlnet_alpha",
]
const IGNORE_BY_DEFAULT = ["prompt"]

View File

@ -131,6 +131,15 @@ const TASK_MAPPING = {
readUI: () => parseFloat(guidanceScaleField.value),
parse: (val) => parseFloat(val),
},
distilled_guidance_scale: {
name: "Distilled Guidance",
setUI: (distilled_guidance_scale) => {
distilledGuidanceScaleField.value = distilled_guidance_scale
updateDistilledGuidanceScaleSlider()
},
readUI: () => parseFloat(distilledGuidanceScaleField.value),
parse: (val) => parseFloat(val),
},
prompt_strength: {
name: "Prompt Strength",
setUI: (prompt_strength) => {
@ -242,6 +251,14 @@ const TASK_MAPPING = {
readUI: () => samplerField.value,
parse: (val) => val,
},
scheduler_name: {
name: "Scheduler",
setUI: (scheduler_name) => {
schedulerField.value = scheduler_name
},
readUI: () => schedulerField.value,
parse: (val) => val,
},
use_stable_diffusion_model: {
name: "Stable Diffusion model",
setUI: (use_stable_diffusion_model) => {
@ -268,7 +285,11 @@ const TASK_MAPPING = {
tiling: {
name: "Tiling",
setUI: (val) => {
tilingField.value = val
if (val === null || val === "None") {
tilingField.value = "none"
} else {
tilingField.value = val
}
},
readUI: () => tilingField.value,
parse: (val) => val,
@ -305,10 +326,21 @@ const TASK_MAPPING = {
readUI: () => controlImageFilterField.value,
parse: (val) => val,
},
control_alpha: {
name: "ControlNet Strength",
setUI: (control_alpha) => {
control_alpha = control_alpha || 1.0
controlAlphaField.value = control_alpha
updateControlAlphaSlider()
},
readUI: () => parseFloat(controlAlphaField.value),
parse: (val) => val === null ? 1.0 : parseFloat(val),
},
use_lora_model: {
name: "LoRA model",
setUI: (use_lora_model) => {
let modelPaths = []
use_lora_model = use_lora_model === null ? "" : use_lora_model
use_lora_model = Array.isArray(use_lora_model) ? use_lora_model : [use_lora_model]
use_lora_model.forEach((m) => {
if (m.includes("models\\lora\\")) {
@ -525,6 +557,11 @@ function restoreTaskToUI(task, fieldsToSkip) {
// listen for inpainter loading event, which happens AFTER the main image loads (which reloads the inpai
controlImagePreview.src = task.reqBody.control_image
}
if ("use_controlnet_model" in task.reqBody && task.reqBody.use_controlnet_model && !("control_alpha" in task.reqBody)) {
controlAlphaField.value = 1.0
updateControlAlphaSlider()
}
}
function readUI() {
const reqBody = {}
@ -570,11 +607,13 @@ const TASK_TEXT_MAPPING = {
seed: "Seed",
num_inference_steps: "Steps",
guidance_scale: "Guidance Scale",
distilled_guidance_scale: "Distilled Guidance",
prompt_strength: "Prompt Strength",
use_face_correction: "Use Face Correction",
use_upscale: "Use Upscaling",
upscale_amount: "Upscale By",
sampler_name: "Sampler",
scheduler_name: "Scheduler",
negative_prompt: "Negative Prompt",
use_stable_diffusion_model: "Stable Diffusion model",
use_hypernetwork_model: "Hypernetwork model",
@ -583,6 +622,8 @@ const TASK_TEXT_MAPPING = {
lora_alpha: "LoRA Strength",
use_controlnet_model: "ControlNet model",
control_filter_to_apply: "ControlNet Filter",
control_alpha: "ControlNet Strength",
tiling: "Seamless Tiling",
}
function parseTaskFromText(str) {
const taskReqBody = {}

View File

@ -12,8 +12,16 @@ const taskConfigSetup = {
seed: { value: ({ seed }) => seed, label: "Seed" },
dimensions: { value: ({ reqBody }) => `${reqBody?.width}x${reqBody?.height}`, label: "Dimensions" },
sampler_name: "Sampler",
scheduler_name: {
label: "Scheduler",
visible: ({ reqBody }) => reqBody?.scheduler_name,
},
num_inference_steps: "Inference Steps",
guidance_scale: "Guidance Scale",
distilled_guidance_scale: {
label: "Distilled Guidance Scale",
visible: ({ reqBody }) => reqBody?.distilled_guidance_scale,
},
use_stable_diffusion_model: "Model",
clip_skip: {
label: "Clip Skip",
@ -22,7 +30,8 @@ const taskConfigSetup = {
},
tiling: {
label: "Tiling",
visible: ({ reqBody }) => reqBody?.tiling != "none",
visible: ({ reqBody }) =>
reqBody?.tiling != "none" && reqBody?.tiling !== null && reqBody?.tiling !== undefined,
value: ({ reqBody }) => reqBody?.tiling,
},
use_vae_model: {
@ -50,6 +59,10 @@ const taskConfigSetup = {
preserve_init_image_color_profile: "Preserve Color Profile",
strict_mask_border: "Strict Mask Border",
use_controlnet_model: "ControlNet Model",
control_alpha: {
label: "ControlNet Strength",
visible: ({ reqBody }) => !!reqBody?.use_controlnet_model,
},
},
pluginTaskConfig: {},
getCSSKey: (key) =>
@ -71,6 +84,8 @@ let numOutputsParallelField = document.querySelector("#num_outputs_parallel")
let numInferenceStepsField = document.querySelector("#num_inference_steps")
let guidanceScaleSlider = document.querySelector("#guidance_scale_slider")
let guidanceScaleField = document.querySelector("#guidance_scale")
let distilledGuidanceScaleSlider = document.querySelector("#distilled_guidance_scale_slider")
let distilledGuidanceScaleField = document.querySelector("#distilled_guidance_scale")
let outputQualitySlider = document.querySelector("#output_quality_slider")
let outputQualityField = document.querySelector("#output_quality")
let outputQualityRow = document.querySelector("#output_quality_row")
@ -98,6 +113,8 @@ let controlImagePreview = document.querySelector("#control_image_preview")
let controlImageClearBtn = document.querySelector(".control_image_clear")
let controlImageContainer = document.querySelector("#control_image_wrapper")
let controlImageFilterField = document.querySelector("#control_image_filter")
let controlAlphaSlider = document.querySelector("#controlnet_alpha_slider")
let controlAlphaField = document.querySelector("#controlnet_alpha")
let applyColorCorrectionField = document.querySelector("#apply_color_correction")
let strictMaskBorderField = document.querySelector("#strict_mask_border")
let colorCorrectionSetting = document.querySelector("#apply_color_correction_setting")
@ -106,6 +123,8 @@ let promptStrengthSlider = document.querySelector("#prompt_strength_slider")
let promptStrengthField = document.querySelector("#prompt_strength")
let samplerField = document.querySelector("#sampler_name")
let samplerSelectionContainer = document.querySelector("#samplerSelection")
let schedulerField = document.querySelector("#scheduler_name")
let schedulerSelectionContainer = document.querySelector("#schedulerSelection")
let useFaceCorrectionField = document.querySelector("#use_face_correction")
let gfpganModelField = new ModelDropdown(document.querySelector("#gfpgan_model"), ["gfpgan", "codeformer"], "", false)
let useUpscalingField = document.querySelector("#use_upscale")
@ -128,6 +147,7 @@ let hypernetworkStrengthField = document.querySelector("#hypernetwork_strength")
let outputFormatField = document.querySelector("#output_format")
let outputLosslessField = document.querySelector("#output_lossless")
let outputLosslessContainer = document.querySelector("#output_lossless_container")
let enableVAETilingField = document.querySelector("#enable_vae_tiling")
let blockNSFWField = document.querySelector("#block_nsfw")
let showOnlyFilteredImageField = document.querySelector("#show_only_filtered_image")
let updateBranchLabel = document.querySelector("#updateBranchLabel")
@ -510,10 +530,10 @@ function showImages(reqBody, res, outputContainer, livePreview) {
{ text: "Upscale", on_click: onUpscaleClick },
{ text: "Fix Faces", on_click: onFixFacesClick },
],
{
{
text: "Use as Thumbnail",
on_click: onUseAsThumbnailClick,
filter: (req, img) => "use_embeddings_model" in req,
filter: (req, img) => "use_embeddings_model" in req || "use_lora_model" in req
},
]
@ -680,7 +700,7 @@ function getAllModelNames(type) {
// gets a flattened list of all models of a certain type. e.g. "path/subpath/modelname"
// use the filter to search for all models having a certain name.
function getAllModelPathes(type,filter="") {
function getAllModelPathes(type, filter = "") {
function f(tree, prefix) {
if (tree == undefined) {
return []
@ -690,7 +710,7 @@ function getAllModelPathes(type,filter="") {
if (typeof e == "object") {
result = result.concat(f(e[1], prefix + e[0] + "/"))
} else {
if (filter=="" || e==filter) {
if (filter == "" || e == filter) {
result.push(prefix + e)
}
}
@ -700,7 +720,6 @@ function getAllModelPathes(type,filter="") {
return f(modelsOptions[type], "")
}
function onUseAsThumbnailClick(req, img) {
let scale = 1
let targetWidth = img.naturalWidth
@ -748,25 +767,45 @@ function onUseAsThumbnailClick(req, img) {
onUseAsThumbnailClick.croppr.setImage(img.src)
}
let embeddings = req.use_embeddings_model.map((e) => e.split("/").pop())
let LORA = []
useAsThumbSelect.innerHTML=""
if ("use_lora_model" in req) {
LORA = req.use_lora_model
if ("use_embeddings_model" in req) {
let embeddings = req.use_embeddings_model.map((e) => e.split("/").pop())
let embOptions = document.createElement("optgroup")
embOptions.label = "Embeddings"
embOptions.replaceChildren(
...embeddings.map((e) => {
let option = document.createElement("option")
option.innerText = e
option.dataset["type"] = "embeddings"
return option
})
)
useAsThumbSelect.appendChild(embOptions)
}
let optgroup = document.createElement("optgroup")
optgroup.label = "Embeddings"
optgroup.replaceChildren(
...embeddings.map((e) => {
let option = document.createElement("option")
option.innerText = e
option.dataset["type"] = "embeddings"
return option
})
)
useAsThumbSelect.replaceChildren(optgroup)
if ("use_lora_model" in req) {
let LORA = req.use_lora_model
if (typeof LORA == "string") {
LORA = [LORA]
}
LORA = LORA.map((e) => e.split("/").pop())
let loraOptions = document.createElement("optgroup")
loraOptions.label = "LORA"
loraOptions.replaceChildren(
...LORA.map((e) => {
let option = document.createElement("option")
option.innerText = e
option.dataset["type"] = "lora"
return option
})
)
useAsThumbSelect.appendChild(loraOptions)
}
useAsThumbDialog.showModal()
onUseAsThumbnailClick.scale = scale
}
@ -782,6 +821,50 @@ useAsThumbCancelBtn.addEventListener("click", () => {
useAsThumbDialog.close()
})
const Bucket = {
upload(path, blob) {
const formData = new FormData()
formData.append("file", blob)
return fetch(`bucket/${path}`, {
method: "POST",
body: formData,
})
},
getImageAsDataURL(path) {
return fetch(`bucket/${path}`)
.then((response) => {
if (response.status == 200) {
return response.blob()
} else {
throw new Error("Bucket error")
}
})
.then((blob) => {
return new Promise((resolve, reject) => {
const reader = new FileReader()
reader.onload = () => resolve(reader.result)
reader.onerror = reject
reader.readAsDataURL(blob)
})
})
},
getList(path) {
return fetch(`bucket/${path}`)
.then((response) => (response.status == 200 ? response.json() : []))
},
store(path, data) {
return Bucket.upload(`${path}.json`, JSON.stringify(data))
},
retrieve(path) {
return fetch(`bucket/${path}.json`)
.then((response) => (response.status == 200 ? response.json() : null))
},
}
useAsThumbSaveBtn.addEventListener("click", (e) => {
let scale = 1 / onUseAsThumbnailClick.scale
let crop = onUseAsThumbnailClick.croppr.getValue()
@ -793,22 +876,18 @@ useAsThumbSaveBtn.addEventListener("click", (e) => {
.then((thumb) => fetch(thumb))
.then((response) => response.blob())
.then(async function(blob) {
const formData = new FormData()
formData.append("file", blob)
let options = useAsThumbSelect.selectedOptions
let promises = []
for (let embedding of options) {
promises.push(
fetch(`bucket/${profileName}/${embedding.dataset["type"]}/${embedding.value}.png`, {
method: "POST",
body: formData,
})
Bucket.upload(`${profileName}/${embedding.dataset["type"]}/${embedding.value}.png`, blob)
)
}
return Promise.all(promises)
})
.then(() => {
useAsThumbDialog.close()
document.dispatchEvent(new CustomEvent("saveThumb", { detail: useAsThumbSelect.selectedOptions }))
})
.catch((error) => {
console.error(error)
@ -852,6 +931,10 @@ function applyInlineFilter(filterName, path, filterParams, img, statusText, tool
}
filterReq.model_paths[filterName] = path
if (saveToDiskField.checked && diskPathField.value.trim() !== "") {
filterReq.save_to_disk_path = diskPathField.value.trim()
}
tools.spinnerStatus.innerText = statusText
tools.spinner.classList.remove("displayNone")
@ -910,7 +993,20 @@ function onRedoFilter(req, img, e, tools) {
function onUpscaleClick(req, img, e, tools) {
let path = upscaleModelField.value
let scale = parseInt(upscaleAmountField.value)
let filterName = path.toLowerCase().includes("realesrgan") ? "realesrgan" : "latent_upscaler"
let filterName = null
const FILTERS = ["realesrgan", "latent_upscaler", "esrgan_4x", "lanczos", "nearest", "scunet", "swinir"]
for (let idx in FILTERS) {
let f = FILTERS[idx]
if (path.toLowerCase().includes(f)) {
filterName = f
break
}
}
if (!filterName) {
return
}
let statusText = "Upscaling by " + scale + "x using " + filterName
applyInlineFilter(filterName, path, { scale: scale }, img, statusText, tools)
}
@ -967,6 +1063,9 @@ function makeImage() {
if (guidanceScaleField.value == "") {
guidanceScaleField.value = guidanceScaleSlider.value / 10
}
if (distilledGuidanceScaleField.value == "") {
distilledGuidanceScaleField.value = distilledGuidanceScaleSlider.value / 10
}
if (hypernetworkStrengthField.value == "") {
hypernetworkStrengthField.value = hypernetworkStrengthSlider.value / 100
}
@ -1237,7 +1336,6 @@ function getCurrentUserRequest() {
//render_device: undefined, // Set device affinity. Prefer this device, but wont activate.
use_stable_diffusion_model: stableDiffusionModelField.value,
clip_skip: clipSkipField.checked,
tiling: tilingField.value,
use_vae_model: vaeModelField.value,
stream_progress_updates: true,
stream_image_progress: numOutputsTotal > 50 ? false : streamImageProgressField.checked,
@ -1302,6 +1400,11 @@ function getCurrentUserRequest() {
newTask.reqBody.use_lora_model = modelNames
newTask.reqBody.lora_alpha = modelStrengths
}
if (tilingField.value !== "none") {
newTask.reqBody.tiling = tilingField.value
}
newTask.reqBody.enable_vae_tiling = enableVAETilingField.checked
}
if (testDiffusers.checked && document.getElementById("toggle-tensorrt-install").innerHTML == "Uninstall") {
// TRT is installed
@ -1326,10 +1429,17 @@ function getCurrentUserRequest() {
if (controlnetModelField.value !== "" && IMAGE_REGEX.test(controlImagePreview.src)) {
newTask.reqBody.use_controlnet_model = controlnetModelField.value
newTask.reqBody.control_image = controlImagePreview.src
newTask.reqBody.control_alpha = parseFloat(controlAlphaField.value)
if (controlImageFilterField.value !== "") {
newTask.reqBody.control_filter_to_apply = controlImageFilterField.value
}
}
if (stableDiffusionModelField.value.toLowerCase().includes("flux")) {
newTask.reqBody.distilled_guidance_scale = parseFloat(distilledGuidanceScaleField.value)
}
if (schedulerSelectionContainer.style.display !== "none") {
newTask.reqBody.scheduler_name = schedulerField.value
}
return newTask
}
@ -1338,7 +1448,7 @@ function setEmbeddings(task) {
let prompt = task.reqBody.prompt
let negativePrompt = task.reqBody.negative_prompt
let overallPrompt = (prompt + " " + negativePrompt).toLowerCase()
overallPrompt = overallPrompt.replaceAll(/[^a-z0-9\.]/g, " ") // only allow alpha-numeric and dots
overallPrompt = overallPrompt.replaceAll(/[^a-z0-9\-_\.]/g, " ") // only allow alpha-numeric, dots and hyphens
overallPrompt = overallPrompt.split(" ")
let embeddingsTree = modelsOptions["embeddings"]
@ -1562,7 +1672,7 @@ function updateInitialText() {
const countBeforeBanner = localStorage.getItem("countBeforeBanner") || 1
if (countBeforeBanner <= 0) {
// supportBanner.classList.remove("displayNone")
supportBanner.classList.remove("displayNone")
}
}
}
@ -1769,36 +1879,93 @@ controlImagePreview.addEventListener("load", onControlnetModelChange)
controlImagePreview.addEventListener("unload", onControlnetModelChange)
onControlnetModelChange()
function onControlImageFilterChange() {
let filterId = controlImageFilterField.value
if (filterId.includes("openpose")) {
controlnetModelField.value = "control_v11p_sd15_openpose"
} else if (filterId === "canny") {
controlnetModelField.value = "control_v11p_sd15_canny"
} else if (filterId === "mlsd") {
controlnetModelField.value = "control_v11p_sd15_mlsd"
} else if (filterId === "mlsd") {
controlnetModelField.value = "control_v11p_sd15_mlsd"
} else if (filterId.includes("scribble")) {
controlnetModelField.value = "control_v11p_sd15_scribble"
} else if (filterId.includes("softedge")) {
controlnetModelField.value = "control_v11p_sd15_softedge"
} else if (filterId === "normal_bae") {
controlnetModelField.value = "control_v11p_sd15_normalbae"
} else if (filterId.includes("depth")) {
controlnetModelField.value = "control_v11f1p_sd15_depth"
} else if (filterId === "lineart_anime") {
controlnetModelField.value = "control_v11p_sd15s2_lineart_anime"
} else if (filterId.includes("lineart")) {
controlnetModelField.value = "control_v11p_sd15_lineart"
} else if (filterId === "shuffle") {
controlnetModelField.value = "control_v11e_sd15_shuffle"
} else if (filterId === "segment") {
controlnetModelField.value = "control_v11p_sd15_seg"
// tip for Flux
let sdModelField = document.querySelector("#stable_diffusion_model")
function checkGuidanceValue() {
let guidance = parseFloat(guidanceScaleField.value)
let guidanceWarning = document.querySelector("#guidanceWarning")
let guidanceWarningText = document.querySelector("#guidanceWarningText")
if (sdModelField.value.toLowerCase().includes("flux")) {
if (guidance > 1.5) {
guidanceWarningText.innerText = "Flux recommends a 'Guidance Scale' of 1"
guidanceWarning.classList.remove("displayNone")
} else {
guidanceWarning.classList.add("displayNone")
}
} else {
if (guidance < 2) {
guidanceWarningText.innerText = "A higher 'Guidance Scale' is recommended!"
guidanceWarning.classList.remove("displayNone")
} else {
guidanceWarning.classList.add("displayNone")
}
}
}
controlImageFilterField.addEventListener("change", onControlImageFilterChange)
onControlImageFilterChange()
sdModelField.addEventListener("change", checkGuidanceValue)
guidanceScaleField.addEventListener("change", checkGuidanceValue)
guidanceScaleSlider.addEventListener("change", checkGuidanceValue)
function checkGuidanceScaleVisibility() {
let guidanceScaleContainer = document.querySelector("#distilled_guidance_scale_container")
if (sdModelField.value.toLowerCase().includes("flux")) {
guidanceScaleContainer.classList.remove("displayNone")
} else {
guidanceScaleContainer.classList.add("displayNone")
}
}
sdModelField.addEventListener("change", checkGuidanceScaleVisibility)
function checkFluxSampler() {
let samplerWarning = document.querySelector("#fluxSamplerWarning")
if (sdModelField.value.toLowerCase().includes("flux")) {
if (samplerField.value == "euler_a") {
samplerWarning.classList.remove("displayNone")
} else {
samplerWarning.classList.add("displayNone")
}
} else {
samplerWarning.classList.add("displayNone")
}
}
sdModelField.addEventListener("change", checkFluxSampler)
samplerField.addEventListener("change", checkFluxSampler)
document.addEventListener("refreshModels", function() {
checkGuidanceValue()
checkGuidanceScaleVisibility()
checkFluxSampler()
})
// function onControlImageFilterChange() {
// let filterId = controlImageFilterField.value
// if (filterId.includes("openpose")) {
// controlnetModelField.value = "control_v11p_sd15_openpose"
// } else if (filterId === "canny") {
// controlnetModelField.value = "control_v11p_sd15_canny"
// } else if (filterId === "mlsd") {
// controlnetModelField.value = "control_v11p_sd15_mlsd"
// } else if (filterId === "mlsd") {
// controlnetModelField.value = "control_v11p_sd15_mlsd"
// } else if (filterId.includes("scribble")) {
// controlnetModelField.value = "control_v11p_sd15_scribble"
// } else if (filterId.includes("softedge")) {
// controlnetModelField.value = "control_v11p_sd15_softedge"
// } else if (filterId === "normal_bae") {
// controlnetModelField.value = "control_v11p_sd15_normalbae"
// } else if (filterId.includes("depth")) {
// controlnetModelField.value = "control_v11f1p_sd15_depth"
// } else if (filterId === "lineart_anime") {
// controlnetModelField.value = "control_v11p_sd15s2_lineart_anime"
// } else if (filterId.includes("lineart")) {
// controlnetModelField.value = "control_v11p_sd15_lineart"
// } else if (filterId === "shuffle") {
// controlnetModelField.value = "control_v11e_sd15_shuffle"
// } else if (filterId === "segment") {
// controlnetModelField.value = "control_v11p_sd15_seg"
// }
// }
// controlImageFilterField.addEventListener("change", onControlImageFilterChange)
// onControlImageFilterChange()
upscaleModelField.disabled = !useUpscalingField.checked
upscaleAmountField.disabled = !useUpscalingField.checked
@ -1897,6 +2064,27 @@ guidanceScaleSlider.addEventListener("input", updateGuidanceScale)
guidanceScaleField.addEventListener("input", updateGuidanceScaleSlider)
updateGuidanceScale()
/********************* Distilled Guidance **************************/
function updateDistilledGuidanceScale() {
distilledGuidanceScaleField.value = distilledGuidanceScaleSlider.value / 10
distilledGuidanceScaleField.dispatchEvent(new Event("change"))
}
function updateDistilledGuidanceScaleSlider() {
if (distilledGuidanceScaleField.value < 0) {
distilledGuidanceScaleField.value = 0
} else if (distilledGuidanceScaleField.value > 50) {
distilledGuidanceScaleField.value = 50
}
distilledGuidanceScaleSlider.value = distilledGuidanceScaleField.value * 10
distilledGuidanceScaleSlider.dispatchEvent(new Event("change"))
}
distilledGuidanceScaleSlider.addEventListener("input", updateDistilledGuidanceScale)
distilledGuidanceScaleField.addEventListener("input", updateDistilledGuidanceScaleSlider)
updateDistilledGuidanceScale()
/********************* Prompt Strength *******************/
function updatePromptStrength() {
promptStrengthField.value = promptStrengthSlider.value / 100
@ -1946,6 +2134,27 @@ function updateHypernetworkStrengthContainer() {
hypernetworkModelField.addEventListener("change", updateHypernetworkStrengthContainer)
updateHypernetworkStrengthContainer()
/********************* Controlnet Alpha **************************/
function updateControlAlpha() {
controlAlphaField.value = controlAlphaSlider.value / 10
controlAlphaField.dispatchEvent(new Event("change"))
}
function updateControlAlphaSlider() {
if (controlAlphaField.value < 0) {
controlAlphaField.value = 0
} else if (controlAlphaField.value > 10) {
controlAlphaField.value = 10
}
controlAlphaSlider.value = controlAlphaField.value * 10
controlAlphaSlider.dispatchEvent(new Event("change"))
}
controlAlphaSlider.addEventListener("input", updateControlAlpha)
controlAlphaField.addEventListener("input", updateControlAlphaSlider)
updateControlAlpha()
/********************* JPEG/WEBP Quality **********************/
function updateOutputQuality() {
outputQualityField.value = 0 | outputQualitySlider.value
@ -2355,20 +2564,10 @@ function loadThumbnailImageFromFile() {
}
function updateEmbeddingsList(filter = "") {
function html(model, iconlist = [], prefix = "", filter = "") {
function html(model, iconMap = {}, prefix = "", filter = "") {
filter = filter.toLowerCase()
let toplevel = document.createElement("div")
let folders = document.createElement("div")
let embIcon = Object.assign(
{},
...iconlist.map((x) => ({
[x
.toLowerCase()
.split(".")
.slice(0, -1)
.join(".")]: x,
}))
)
let profileName = profileNameField.value
model?.forEach((m) => {
@ -2376,13 +2575,9 @@ function updateEmbeddingsList(filter = "") {
let token = m.toLowerCase()
if (token.search(filter) != -1) {
let button
// if (iconlist.length==0) {
// button = document.createElement("button")
// button.innerText = m
// } else {
let img = "/media/images/noimg.png"
if (token in embIcon) {
img = `/bucket/${profileName}/embeddings/${embIcon[token]}`
if (token in iconMap) {
img = `/bucket/${profileName}/${iconMap[token]}`
}
button = createModifierCard(m, [img, img], true)
// }
@ -2391,7 +2586,7 @@ function updateEmbeddingsList(filter = "") {
toplevel.appendChild(button)
}
} else {
let subdir = html(m[1], iconlist, prefix + m[0] + "/", filter)
let subdir = html(m[1], iconMap, prefix + m[0] + "/", filter)
if (typeof subdir == "object") {
let div1 = document.createElement("div")
let div2 = document.createElement("div")
@ -2450,11 +2645,44 @@ function updateEmbeddingsList(filter = "") {
</div>
`
let loraTokens = []
let profileName = profileNameField.value
fetch(`/bucket/${profileName}/embeddings/`)
.then((response) => (response.status == 200 ? response.json() : []))
.then(async function(iconlist) {
embeddingsList.replaceChildren(html(modelsOptions.embeddings, iconlist, "", filter))
let iconMap = {}
Bucket.getList(`${profileName}/embeddings/`)
.then((icons) => {
iconMap = Object.assign(
{},
...icons.map((x) => ({
[x
.toLowerCase()
.split(".")
.slice(0, -1)
.join(".")]: `embeddings/${x}`,
}))
)
return Bucket.getList(`${profileName}/lora/`)
})
.then(async function (icons) {
for (let lora of loraModelField.value.modelNames) {
let keywords = await getLoraKeywords(lora)
loraTokens = loraTokens.concat(keywords)
let loraname = lora.split("/").pop()
if (icons.includes(`${loraname}.png`)) {
keywords.forEach((kw) => {
iconMap[kw.toLowerCase()] = `lora/${loraname}.png`
})
}
}
let tokenList = [...modelsOptions.embeddings]
if (loraTokens.length != 0) {
tokenList.unshift(['LORA Keywords', loraTokens])
}
embeddingsList.replaceChildren(html(tokenList, iconMap, "", filter))
createCollapsibles(embeddingsList)
if (filter != "") {
embeddingsExpandAll()

View File

@ -97,6 +97,17 @@ var PARAMETERS = [
},
],
},
{
id: "models_dir",
type: ParameterType.custom,
icon: "fa-folder-tree",
label: "Models Folder",
note: "Path to the 'models' folder. Please save and restart Easy Diffusion after changing this.",
saveInAppConfig: true,
render: (parameter) => {
return `<input id="${parameter.id}" name="${parameter.id}" size="30">`
},
},
{
id: "block_nsfw",
type: ParameterType.checkbox,
@ -150,6 +161,7 @@ var PARAMETERS = [
"<b>Low:</b> slowest, recommended for GPUs with 3 to 4 GB memory",
icon: "fa-forward",
default: "balanced",
saveInAppConfig: true,
options: [
{ value: "balanced", label: "Balanced" },
{ value: "high", label: "High" },
@ -238,14 +250,19 @@ var PARAMETERS = [
default: false,
},
{
id: "test_diffusers",
type: ParameterType.checkbox,
label: "Use the new v3 engine (diffusers)",
id: "backend",
type: ParameterType.select,
label: "Engine to use",
note:
"Use our new v3 engine, with additional features like LoRA, ControlNet, SDXL, Embeddings, Tiling and lots more! Please press Save, then restart the program after changing this.",
icon: "fa-bolt",
default: true,
"Use our new v3.5 engine (Forge), with additional features like Flux, SD3, Lycoris and lots more! Please press Save, then restart the program after changing this.",
icon: "fa-robot",
saveInAppConfig: true,
default: "ed_diffusers",
options: [
{ value: "webui", label: "v3.5 (latest)" },
{ value: "ed_diffusers", label: "v3.0" },
{ value: "ed_classic", label: "v2.0" },
],
},
{
id: "cloudflare",
@ -420,8 +437,10 @@ let listenPortField = document.querySelector("#listen_port")
let useBetaChannelField = document.querySelector("#use_beta_channel")
let uiOpenBrowserOnStartField = document.querySelector("#ui_open_browser_on_start")
let confirmDangerousActionsField = document.querySelector("#confirm_dangerous_actions")
let testDiffusers = document.querySelector("#test_diffusers")
let testDiffusers = document.querySelector("#use_v3_engine")
let backendEngine = document.querySelector("#backend")
let profileNameField = document.querySelector("#profileName")
let modelsDirField = document.querySelector("#models_dir")
let saveSettingsBtn = document.querySelector("#save-system-settings-btn")
@ -442,6 +461,23 @@ async function changeAppConfig(configDelta) {
}
}
function getDefaultDisplay(element) {
const tag = element.tagName.toLowerCase();
const defaultDisplays = {
div: 'block',
span: 'inline',
p: 'block',
tr: 'table-row',
table: 'table',
li: 'list-item',
ul: 'block',
ol: 'block',
button: 'inline',
// Add more if needed
};
return defaultDisplays[tag] || 'block'; // Default to 'block' if not listed
}
async function getAppConfig() {
try {
let res = await fetch("/get/app_config")
@ -463,15 +499,19 @@ async function getAppConfig() {
if (config.net && config.net.listen_port !== undefined) {
listenPortField.value = config.net.listen_port
}
modelsDirField.value = config.models_dir
let testDiffusersEnabled = true
if (config.test_diffusers === false) {
if (config.backend === "ed_classic") {
testDiffusersEnabled = false
}
testDiffusers.checked = testDiffusersEnabled
backendEngine.value = config.backend
document.querySelector("#test_diffusers").checked = testDiffusers.checked // don't break plugins
document.querySelector("#use_v3_engine").checked = testDiffusers.checked // don't break plugins
if (config.config_on_startup) {
if (config.config_on_startup?.test_diffusers) {
if (config.config_on_startup?.backend !== "ed_classic") {
document.body.classList.add("diffusers-enabled-on-startup")
document.body.classList.remove("diffusers-disabled-on-startup")
} else {
@ -480,35 +520,29 @@ async function getAppConfig() {
}
}
if (!testDiffusersEnabled) {
document.querySelector("#lora_model_container").style.display = "none"
document.querySelector("#tiling_container").style.display = "none"
document.querySelector("#controlnet_model_container").style.display = "none"
document.querySelector("#hypernetwork_model_container").style.display = ""
document.querySelector("#hypernetwork_strength_container").style.display = ""
document.querySelector("#negative-embeddings-button").style.display = "none"
document.querySelectorAll("#sampler_name option.diffusers-only").forEach((option) => {
option.style.display = "none"
})
if (config.backend === "ed_classic") {
IMAGE_STEP_SIZE = 64
customWidthField.step = IMAGE_STEP_SIZE
customHeightField.step = IMAGE_STEP_SIZE
} else {
document.querySelector("#lora_model_container").style.display = ""
document.querySelector("#tiling_container").style.display = ""
document.querySelector("#controlnet_model_container").style.display = ""
document.querySelector("#hypernetwork_model_container").style.display = "none"
document.querySelector("#hypernetwork_strength_container").style.display = "none"
document.querySelectorAll("#sampler_name option.k_diffusion-only").forEach((option) => {
option.style.display = "none"
})
document.querySelector("#clip_skip_config").classList.remove("displayNone")
document.querySelector("#embeddings-button").classList.remove("displayNone")
IMAGE_STEP_SIZE = 8
customWidthField.step = IMAGE_STEP_SIZE
customHeightField.step = IMAGE_STEP_SIZE
}
customWidthField.step = IMAGE_STEP_SIZE
customHeightField.step = IMAGE_STEP_SIZE
const currentBackendKey = "backend_" + config.backend
document.querySelectorAll('.gated-feature').forEach((element) => {
const featureKeys = element.getAttribute('data-feature-keys').split(' ')
if (featureKeys.includes(currentBackendKey)) {
element.style.display = getDefaultDisplay(element)
} else {
element.style.display = 'none'
}
});
if (config.force_save_metadata) {
metadataOutputFormatField.value = config.force_save_metadata
}
console.log("get config status response", config)
@ -722,12 +756,20 @@ async function getSystemInfo() {
force = res["enforce_output_dir"]
if (force == true) {
saveToDiskField.checked = true
metadataOutputFormatField.disabled = false
metadataOutputFormatField.disabled = res["enforce_output_metadata"]
diskPathField.disabled = true
}
saveToDiskField.disabled = force
diskPathField.disabled = force
} else {
diskPathField.disabled = !saveToDiskField.checked
metadataOutputFormatField.disabled = !saveToDiskField.checked
}
setDiskPath(res["default_output_dir"], force)
// backend info
if (res["backend_url"]) {
document.querySelector("#backend-url").setAttribute("href", res["backend_url"])
}
} catch (e) {
console.log("error fetching devices", e)
}

View File

@ -1,454 +0,0 @@
;(function() {
"use strict"
///////////////////// Function section
function smoothstep(x) {
return x * x * (3 - 2 * x)
}
function smootherstep(x) {
return x * x * x * (x * (x * 6 - 15) + 10)
}
function smootheststep(x) {
let y = -20 * Math.pow(x, 7)
y += 70 * Math.pow(x, 6)
y -= 84 * Math.pow(x, 5)
y += 35 * Math.pow(x, 4)
return y
}
function getCurrentTime() {
const now = new Date()
let hours = now.getHours()
let minutes = now.getMinutes()
let seconds = now.getSeconds()
hours = hours < 10 ? `0${hours}` : hours
minutes = minutes < 10 ? `0${minutes}` : minutes
seconds = seconds < 10 ? `0${seconds}` : seconds
return `${hours}:${minutes}:${seconds}`
}
function addLogMessage(message) {
const logContainer = document.getElementById("merge-log")
logContainer.innerHTML += `<i>${getCurrentTime()}</i> ${message}<br>`
// Scroll to the bottom of the log
logContainer.scrollTop = logContainer.scrollHeight
document.querySelector("#merge-log-container").style.display = "block"
}
function addLogSeparator() {
const logContainer = document.getElementById("merge-log")
logContainer.innerHTML += "<hr>"
logContainer.scrollTop = logContainer.scrollHeight
}
function drawDiagram(fn) {
const SIZE = 300
const canvas = document.getElementById("merge-canvas")
canvas.height = canvas.width = SIZE
const ctx = canvas.getContext("2d")
// Draw coordinate system
ctx.scale(1, -1)
ctx.translate(0, -canvas.height)
ctx.lineWidth = 1
ctx.beginPath()
ctx.strokeStyle = "white"
ctx.moveTo(0, 0)
ctx.lineTo(0, SIZE)
ctx.lineTo(SIZE, SIZE)
ctx.lineTo(SIZE, 0)
ctx.lineTo(0, 0)
ctx.lineTo(SIZE, SIZE)
ctx.stroke()
ctx.beginPath()
ctx.setLineDash([1, 2])
const n = SIZE / 10
for (let i = n; i < SIZE; i += n) {
ctx.moveTo(0, i)
ctx.lineTo(SIZE, i)
ctx.moveTo(i, 0)
ctx.lineTo(i, SIZE)
}
ctx.stroke()
ctx.beginPath()
ctx.setLineDash([])
ctx.beginPath()
ctx.strokeStyle = "black"
ctx.lineWidth = 3
// Plot function
const numSamples = 20
for (let i = 0; i <= numSamples; i++) {
const x = i / numSamples
const y = fn(x)
const canvasX = x * SIZE
const canvasY = y * SIZE
if (i === 0) {
ctx.moveTo(canvasX, canvasY)
} else {
ctx.lineTo(canvasX, canvasY)
}
}
ctx.stroke()
// Plot alpha values (yellow boxes)
let start = parseFloat(document.querySelector("#merge-start").value)
let step = parseFloat(document.querySelector("#merge-step").value)
let iterations = document.querySelector("#merge-count").value >> 0
ctx.beginPath()
ctx.fillStyle = "yellow"
for (let i = 0; i < iterations; i++) {
const alpha = (start + i * step) / 100
const x = alpha * SIZE
const y = fn(alpha) * SIZE
if (x <= SIZE) {
ctx.rect(x - 3, y - 3, 6, 6)
ctx.fill()
} else {
ctx.strokeStyle = "red"
ctx.moveTo(0, 0)
ctx.lineTo(0, SIZE)
ctx.lineTo(SIZE, SIZE)
ctx.lineTo(SIZE, 0)
ctx.lineTo(0, 0)
ctx.lineTo(SIZE, SIZE)
ctx.stroke()
addLogMessage("<i>Warning: maximum ratio is &#8805; 100%</i>")
}
}
}
function updateChart() {
let fn = (x) => x
switch (document.querySelector("#merge-interpolation").value) {
case "SmoothStep":
fn = smoothstep
break
case "SmootherStep":
fn = smootherstep
break
case "SmoothestStep":
fn = smootheststep
break
}
drawDiagram(fn)
}
createTab({
id: "merge",
icon: "fa-code-merge",
label: "Merge models",
css: `
#tab-content-merge .tab-content-inner {
max-width: 100%;
padding: 10pt;
}
.merge-container {
margin-left: 15%;
margin-right: 15%;
text-align: left;
display: inline-grid;
grid-template-columns: 1fr 1fr;
grid-template-rows: auto auto auto;
gap: 0px 0px;
grid-auto-flow: row;
grid-template-areas:
"merge-input merge-config"
"merge-buttons merge-buttons";
}
.merge-container p {
margin-top: 3pt;
margin-bottom: 3pt;
}
.merge-config .tab-content {
background: var(--background-color1);
border-radius: 3pt;
}
.merge-config .tab-content-inner {
text-align: left;
}
.merge-input {
grid-area: merge-input;
padding-left:1em;
}
.merge-config {
grid-area: merge-config;
padding:1em;
}
.merge-config input {
margin-bottom: 3px;
}
.merge-config select {
margin-bottom: 3px;
}
.merge-buttons {
grid-area: merge-buttons;
padding:1em;
text-align: center;
}
#merge-button {
padding: 8px;
width:20em;
}
div#merge-log {
height:150px;
overflow-x:hidden;
overflow-y:scroll;
background:var(--background-color1);
border-radius: 3pt;
}
div#merge-log i {
color: hsl(var(--accent-hue), 100%, calc(2*var(--accent-lightness)));
font-family: monospace;
}
.disabled {
background: var(--background-color4);
color: var(--text-color);
}
#merge-type-tabs {
border-bottom: 1px solid black;
}
#merge-log-container {
display: none;
}
.merge-container #merge-warning {
color: rgb(153, 153, 153);
}`,
content: `
<div class="merge-container panel-box">
<div class="merge-input">
<p><label for="#mergeModelA">Select Model A:</label></p>
<input id="mergeModelA" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
<p><label for="#mergeModelB">Select Model B:</label></p>
<input id="mergeModelB" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
<br/><br/>
<p id="merge-warning"><small><b>Important:</b> Please merge models of similar type.<br/>For e.g. <code>SD 1.4</code> models with only <code>SD 1.4/1.5</code> models,<br/><code>SD 2.0</code> with <code>SD 2.0</code>-type, and <code>SD 2.1</code> with <code>SD 2.1</code>-type models.</small></p>
<br/>
<table>
<tr>
<td><label for="#merge-filename">Output file name:</label></td>
<td><input id="merge-filename" size=24> <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Base name of the output file.<br>Mix ratio and file suffix will be appended to this.</span></i></td>
</tr>
<tr>
<td><label for="#merge-fp">Output precision:</label></td>
<td><select id="merge-fp">
<option value="fp16">fp16 (smaller file size)</option>
<option value="fp32">fp32 (larger file size)</option>
</select>
<i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Image generation uses fp16, so it's a good choice.<br>Use fp32 if you want to use the result models for more mixes</span></i>
</td>
</tr>
<tr>
<td><label for="#merge-format">Output file format:</label></td>
<td><select id="merge-format">
<option value="safetensors">Safetensors (recommended)</option>
<option value="ckpt">CKPT/Pickle (legacy format)</option>
</select>
</td>
</tr>
</table>
<br/>
<div id="merge-log-container">
<p><label for="#merge-log">Log messages:</label></p>
<div id="merge-log"></div>
</div>
</div>
<div class="merge-config">
<div class="tab-container">
<span id="tab-merge-opts-single" class="tab active">
<span>Make a single file</small></span>
</span>
<span id="tab-merge-opts-batch" class="tab">
<span>Make multiple variations</small></span>
</span>
</div>
<div>
<div id="tab-content-merge-opts-single" class="tab-content active">
<div class="tab-content-inner">
<small>Saves a single merged model file, at the specified merge ratio.</small><br/><br/>
<label for="#single-merge-ratio-slider">Merge ratio:</label>
<input id="single-merge-ratio-slider" name="single-merge-ratio-slider" class="editor-slider" value="50" type="range" min="1" max="1000">
<input id="single-merge-ratio" size=2 value="5">%
<i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Model A's contribution to the mix. The rest will be from Model B.</span></i>
</div>
</div>
<div id="tab-content-merge-opts-batch" class="tab-content">
<div class="tab-content-inner">
<small>Saves multiple variations of the model, at different merge ratios.<br/>Each variation will be saved as a separate file.</small><br/><br/>
<table>
<tr><td><label for="#merge-count">Number of variations:</label></td>
<td> <input id="merge-count" size=2 value="5"></td>
<td> <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Number of models to create</span></i></td></tr>
<tr><td><label for="#merge-start">Starting merge ratio:</label></td>
<td> <input id="merge-start" size=2 value="5">%</td>
<td> <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Smallest share of model A in the mix</span></i></td></tr>
<tr><td><label for="#merge-step">Increment each step:</label></td>
<td> <input id="merge-step" size=2 value="10">%</td>
<td> <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Share of model A added into the mix per step</span></i></td></tr>
<tr><td><label for="#merge-interpolation">Interpolation model:</label></td>
<td> <select id="merge-interpolation">
<option>Exact</option>
<option>SmoothStep</option>
<option>SmootherStep</option>
<option>SmoothestStep</option>
</select></td>
<td> <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Sigmoid function to be applied to the model share before mixing</span></i></td></tr>
</table>
<br/>
<small>Preview of variation ratios:</small><br/>
<canvas id="merge-canvas" width="400" height="400"></canvas>
</div>
</div>
</div>
</div>
<div class="merge-buttons">
<button id="merge-button" class="primaryButton">Merge models</button>
</div>
</div>`,
onOpen: ({ firstOpen }) => {
if (!firstOpen) {
return
}
const tabSettingsSingle = document.querySelector("#tab-merge-opts-single")
const tabSettingsBatch = document.querySelector("#tab-merge-opts-batch")
linkTabContents(tabSettingsSingle)
linkTabContents(tabSettingsBatch)
console.log("Activate")
let mergeModelAField = new ModelDropdown(document.querySelector("#mergeModelA"), "stable-diffusion")
let mergeModelBField = new ModelDropdown(document.querySelector("#mergeModelB"), "stable-diffusion")
updateChart()
// slider
const singleMergeRatioField = document.querySelector("#single-merge-ratio")
const singleMergeRatioSlider = document.querySelector("#single-merge-ratio-slider")
function updateSingleMergeRatio() {
singleMergeRatioField.value = singleMergeRatioSlider.value / 10
singleMergeRatioField.dispatchEvent(new Event("change"))
}
function updateSingleMergeRatioSlider() {
if (singleMergeRatioField.value < 0) {
singleMergeRatioField.value = 0
} else if (singleMergeRatioField.value > 100) {
singleMergeRatioField.value = 100
}
singleMergeRatioSlider.value = singleMergeRatioField.value * 10
singleMergeRatioSlider.dispatchEvent(new Event("change"))
}
singleMergeRatioSlider.addEventListener("input", updateSingleMergeRatio)
singleMergeRatioField.addEventListener("input", updateSingleMergeRatioSlider)
updateSingleMergeRatio()
document.querySelector(".merge-config").addEventListener("change", updateChart)
document.querySelector("#merge-button").addEventListener("click", async function(e) {
// Build request template
let model0 = mergeModelAField.value
let model1 = mergeModelBField.value
let request = { model0: model0, model1: model1 }
request["use_fp16"] = document.querySelector("#merge-fp").value == "fp16"
let iterations = document.querySelector("#merge-count").value >> 0
let start = parseFloat(document.querySelector("#merge-start").value)
let step = parseFloat(document.querySelector("#merge-step").value)
if (isTabActive(tabSettingsSingle)) {
start = parseFloat(singleMergeRatioField.value)
step = 0
iterations = 1
addLogMessage(`merge ratio = ${start}%`)
} else {
addLogMessage(`start = ${start}%`)
addLogMessage(`step = ${step}%`)
}
if (start + (iterations - 1) * step >= 100) {
addLogMessage("<i>Aborting: maximum ratio is &#8805; 100%</i>")
addLogMessage("Reduce the number of variations or the step size")
addLogSeparator()
document.querySelector("#merge-count").focus()
return
}
if (document.querySelector("#merge-filename").value == "") {
addLogMessage("<i>Aborting: No output file name specified</i>")
addLogSeparator()
document.querySelector("#merge-filename").focus()
return
}
// Disable merge button
e.target.disabled = true
e.target.classList.add("disabled")
let cursor = $("body").css("cursor")
let label = document.querySelector("#merge-button").innerHTML
$("body").css("cursor", "progress")
document.querySelector("#merge-button").innerHTML = "Merging models ..."
addLogMessage("Merging models")
addLogMessage("Model A: " + model0)
addLogMessage("Model B: " + model1)
// Batch main loop
for (let i = 0; i < iterations; i++) {
let alpha = (start + i * step) / 100
if (isTabActive(tabSettingsBatch)) {
switch (document.querySelector("#merge-interpolation").value) {
case "SmoothStep":
alpha = smoothstep(alpha)
break
case "SmootherStep":
alpha = smootherstep(alpha)
break
case "SmoothestStep":
alpha = smootheststep(alpha)
break
}
}
addLogMessage(`merging batch job ${i + 1}/${iterations}, alpha = ${alpha.toFixed(5)}...`)
request["out_path"] = document.querySelector("#merge-filename").value
request["out_path"] += "-" + alpha.toFixed(5) + "." + document.querySelector("#merge-format").value
addLogMessage(`&nbsp;&nbsp;filename: ${request["out_path"]}`)
// sdkit documentation: "ratio - the ratio of the second model. 1 means only the second model will be used."
request["ratio"] = 1-alpha
let res = await fetch("/model/merge", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(request),
})
const data = await res.json()
addLogMessage(JSON.stringify(data))
}
addLogMessage(
"<b>Done.</b> The models have been saved to your <tt>models/stable-diffusion</tt> folder."
)
addLogSeparator()
// Re-enable merge button
$("body").css("cursor", cursor)
document.querySelector("#merge-button").innerHTML = label
e.target.disabled = false
e.target.classList.remove("disabled")
// Update model list
stableDiffusionModelField.innerHTML = ""
vaeModelField.innerHTML = ""
hypernetworkModelField.innerHTML = ""
await getModels()
})
},
})
})()

View File

@ -0,0 +1,770 @@
;(function() {
"use strict"
let mergeCSS = `
/*********** Main tab ***********/
.tab-centered {
justify-content: center;
}
#model-tool-tab-content {
background-color: var(--background-color3);
}
#model-tool-tab-content .tab-content-inner {
text-align: initial;
}
#model-tool-tab-bar .tab {
margin-bottom: 0px;
border-top-left-radius: var(--input-border-radius);
background-color: var(--background-color3);
padding: 6px 6px 0.8em 6px;
}
#tab-content-merge .tab-content-inner {
max-width: 100%;
padding: 10pt;
}
/*********** Merge UI ***********/
.merge-model-container {
margin-left: 15%;
margin-right: 15%;
text-align: left;
display: inline-grid;
grid-template-columns: 1fr 1fr;
grid-template-rows: auto auto auto;
gap: 0px 0px;
grid-auto-flow: row;
grid-template-areas:
"merge-input merge-config"
"merge-buttons merge-buttons";
}
.merge-model-container p {
margin-top: 3pt;
margin-bottom: 3pt;
}
.merge-config .tab-content {
background: var(--background-color1);
border-radius: 3pt;
}
.merge-config .tab-content-inner {
text-align: left;
}
.merge-input {
grid-area: merge-input;
padding-left:1em;
}
.merge-config {
grid-area: merge-config;
padding:1em;
}
.merge-config input {
margin-bottom: 3px;
}
.merge-config select {
margin-bottom: 3px;
}
.merge-buttons {
grid-area: merge-buttons;
padding:1em;
text-align: center;
}
#merge-button {
padding: 8px;
width:20em;
}
div#merge-log {
height:150px;
overflow-x:hidden;
overflow-y:scroll;
background:var(--background-color1);
border-radius: 3pt;
}
div#merge-log i {
color: hsl(var(--accent-hue), 100%, calc(2*var(--accent-lightness)));
font-family: monospace;
}
.disabled {
background: var(--background-color4);
color: var(--text-color);
}
#merge-type-tabs {
border-bottom: 1px solid black;
}
#merge-log-container {
display: none;
}
.merge-model-container #merge-warning {
color: var(--small-label-color);
}
/*********** LORA UI ***********/
.lora-manager-grid {
display: grid;
gap: 0px 8px;
grid-auto-flow: row;
}
@media screen and (min-width: 1501px) {
.lora-manager-grid textarea {
height:350px;
}
.lora-manager-grid {
grid-template-columns: auto 1fr 1fr;
grid-template-rows: auto 1fr;
grid-template-areas:
"selector selector selector"
"thumbnail keywords notes";
}
}
@media screen and (min-width: 1001px) and (max-width: 1500px) {
.lora-manager-grid textarea {
height:250px;
}
.lora-manager-grid {
grid-template-columns: auto auto;
grid-template-rows: auto auto auto;
grid-template-areas:
"selector selector"
"thumbnail keywords"
"thumbnail notes";
}
}
@media screen and (max-width: 1000px) {
.lora-manager-grid textarea {
height:200px;
}
.lora-manager-grid {
grid-template-columns: auto;
grid-template-rows: auto auto auto auto;
grid-template-areas:
"selector"
"keywords"
"thumbnail"
"notes";
}
}
.lora-manager-grid-selector {
grid-area: selector;
justify-self: start;
}
.lora-manager-grid-thumbnail {
grid-area: thumbnail;
justify-self: center;
}
.lora-manager-grid-keywords {
grid-area: keywords;
}
.lora-manager-grid-notes {
grid-area: notes;
}
.lora-manager-grid p {
margin-bottom: 2px;
}
`
let mergeUI = `
<div class="merge-model-container panel-box">
<div class="merge-input">
<p><label for="#mergeModelA">Select Model A:</label></p>
<input id="mergeModelA" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
<p><label for="#mergeModelB">Select Model B:</label></p>
<input id="mergeModelB" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
<br/><br/>
<p id="merge-warning"><small><b>Important:</b> Please merge models of similar type.<br/>For e.g. <code>SD 1.4</code> models with only <code>SD 1.4/1.5</code> models,<br/><code>SD 2.0</code> with <code>SD 2.0</code>-type, and <code>SD 2.1</code> with <code>SD 2.1</code>-type models.</small></p>
<br/>
<table>
<tr>
<td><label for="#merge-filename">Output file name:</label></td>
<td><input id="merge-filename" size=24> <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Base name of the output file.<br>Mix ratio and file suffix will be appended to this.</span></i></td>
</tr>
<tr>
<td><label for="#merge-fp">Output precision:</label></td>
<td><select id="merge-fp">
<option value="fp16">fp16 (smaller file size)</option>
<option value="fp32">fp32 (larger file size)</option>
</select>
<i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Image generation uses fp16, so it's a good choice.<br>Use fp32 if you want to use the result models for more mixes</span></i>
</td>
</tr>
<tr>
<td><label for="#merge-format">Output file format:</label></td>
<td><select id="merge-format">
<option value="safetensors">Safetensors (recommended)</option>
<option value="ckpt">CKPT/Pickle (legacy format)</option>
</select>
</td>
</tr>
</table>
<br/>
<div id="merge-log-container">
<p><label for="#merge-log">Log messages:</label></p>
<div id="merge-log"></div>
</div>
</div>
<div class="merge-config">
<div class="tab-container">
<span id="tab-merge-opts-single" class="tab active">
<span>Make a single file</small></span>
</span>
<span id="tab-merge-opts-batch" class="tab">
<span>Make multiple variations</small></span>
</span>
</div>
<div>
<div id="tab-content-merge-opts-single" class="tab-content active">
<div class="tab-content-inner">
<small>Saves a single merged model file, at the specified merge ratio.</small><br/><br/>
<label for="#single-merge-ratio-slider">Merge ratio:</label>
<input id="single-merge-ratio-slider" name="single-merge-ratio-slider" class="editor-slider" value="50" type="range" min="1" max="1000">
<input id="single-merge-ratio" size=2 value="5">%
<i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Model A's contribution to the mix. The rest will be from Model B.</span></i>
</div>
</div>
<div id="tab-content-merge-opts-batch" class="tab-content">
<div class="tab-content-inner">
<small>Saves multiple variations of the model, at different merge ratios.<br/>Each variation will be saved as a separate file.</small><br/><br/>
<table>
<tr><td><label for="#merge-count">Number of variations:</label></td>
<td> <input id="merge-count" size=2 value="5"></td>
<td> <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Number of models to create</span></i></td></tr>
<tr><td><label for="#merge-start">Starting merge ratio:</label></td>
<td> <input id="merge-start" size=2 value="5">%</td>
<td> <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Smallest share of model A in the mix</span></i></td></tr>
<tr><td><label for="#merge-step">Increment each step:</label></td>
<td> <input id="merge-step" size=2 value="10">%</td>
<td> <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Share of model A added into the mix per step</span></i></td></tr>
<tr><td><label for="#merge-interpolation">Interpolation model:</label></td>
<td> <select id="merge-interpolation">
<option>Exact</option>
<option>SmoothStep</option>
<option>SmootherStep</option>
<option>SmoothestStep</option>
</select></td>
<td> <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Sigmoid function to be applied to the model share before mixing</span></i></td></tr>
</table>
<br/>
<small>Preview of variation ratios:</small><br/>
<canvas id="merge-canvas" width="400" height="400"></canvas>
</div>
</div>
</div>
</div>
<div class="merge-buttons">
<button id="merge-button" class="primaryButton">Merge models</button>
</div>
</div>`
let loraUI=`
<div class="panel-box lora-manager-grid">
<div class="lora-manager-grid-selector">
<label for="#loraModel">Select Lora:</label>
<input id="loraModel" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
</div>
<div class="lora-manager-grid-thumbnail">
<p style="height:2em;">Thumbnail:</p>
<div style="position:relative; height:256px; width:256px;background-color:#222;border-radius:1em;margin-bottom:1em;">
<i id="lora-manager-image-placeholder" class="fa-regular fa-image" style="font-size:500%;color:#555;position:absolute; top: 50%; left: 50%; transform: translate(-50%,-50%);"></i>
<img id="lora-manager-image" class="displayNone" style="border-radius:6px;max-height:256px;max-width:256px;"/>
</div>
<div style="text-align:center;">
<button class="tertiaryButton" id="lora-manager-upload-button"><i class="fa-solid fa-upload"></i> Upload new thumbnail</button>
<input id="lora-manager-upload-input" name="lora-manager-upload-input" type="file" class="displayNone">
<!-- button class="tertiaryButton"><i class="fa-solid fa-trash-can"></i> Remove</button -->
</div>
</div>
<div class="lora-manager-grid-keywords">
<p style="height:2em;">Keywords:
<span style="float:right;margin-bottom:4px;"><button id="lora-keyword-from-civitai" class="tertiaryButton smallButton">Import from Civitai</button></span></p>
<textarea style="width:100%;resize:vertical;" id="lora-manager-keywords" placeholder="Put LORA specific keywords here..."></textarea>
<p style="color:var(--small-label-color);">
<b>LORA model keywords</b> can be used via the <code>+&nbsp;Embeddings</code> button. They get added to the embedding
keyword menu when the LORA has been selected in the image settings.
</p>
</div>
<div class="lora-manager-grid-notes">
<p style="height:2em;">Notes:</p>
<textarea style="width:100%;resize:vertical;" id="lora-manager-notes" placeholder="Place for things you want to remember..."></textarea>
<p id="civitai-section" class="displayNone">
<b>Civitai model page:</b>
<a id="civitai-model-page" target="_blank"></a>
</p>
</div>
</div>`
let tabHTML=`
<div id="model-tool-tab-bar" class="tab-container tab-centered">
<span id="tab-model-loraUI" class="tab active">
<span><i class="fa-solid fa-key"></i> Lora Keywords</small></span>
</span>
<span id="tab-model-mergeUI" class="tab">
<span><i class="fa-solid fa-code-merge"></i> Merge Models</small></span>
</span>
</div>
<div id="model-tool-tab-content" class="panel-box">
<div id="tab-content-model-loraUI" class="tab-content active">
<div class="tab-content-inner">
${loraUI}
</div>
</div>
<div id="tab-content-model-mergeUI" class="tab-content">
<div class="tab-content-inner">
${mergeUI}
</div>
</div>
</div>`
///////////////////// Function section
function smoothstep(x) {
return x * x * (3 - 2 * x)
}
function smootherstep(x) {
return x * x * x * (x * (x * 6 - 15) + 10)
}
function smootheststep(x) {
let y = -20 * Math.pow(x, 7)
y += 70 * Math.pow(x, 6)
y -= 84 * Math.pow(x, 5)
y += 35 * Math.pow(x, 4)
return y
}
function getCurrentTime() {
const now = new Date()
let hours = now.getHours()
let minutes = now.getMinutes()
let seconds = now.getSeconds()
hours = hours < 10 ? `0${hours}` : hours
minutes = minutes < 10 ? `0${minutes}` : minutes
seconds = seconds < 10 ? `0${seconds}` : seconds
return `${hours}:${minutes}:${seconds}`
}
function addLogMessage(message) {
const logContainer = document.getElementById("merge-log")
logContainer.innerHTML += `<i>${getCurrentTime()}</i> ${message}<br>`
// Scroll to the bottom of the log
logContainer.scrollTop = logContainer.scrollHeight
document.querySelector("#merge-log-container").style.display = "block"
}
function addLogSeparator() {
const logContainer = document.getElementById("merge-log")
logContainer.innerHTML += "<hr>"
logContainer.scrollTop = logContainer.scrollHeight
}
function drawDiagram(fn) {
const SIZE = 300
const canvas = document.getElementById("merge-canvas")
canvas.height = canvas.width = SIZE
const ctx = canvas.getContext("2d")
// Draw coordinate system
ctx.scale(1, -1)
ctx.translate(0, -canvas.height)
ctx.lineWidth = 1
ctx.beginPath()
ctx.strokeStyle = "white"
ctx.moveTo(0, 0)
ctx.lineTo(0, SIZE)
ctx.lineTo(SIZE, SIZE)
ctx.lineTo(SIZE, 0)
ctx.lineTo(0, 0)
ctx.lineTo(SIZE, SIZE)
ctx.stroke()
ctx.beginPath()
ctx.setLineDash([1, 2])
const n = SIZE / 10
for (let i = n; i < SIZE; i += n) {
ctx.moveTo(0, i)
ctx.lineTo(SIZE, i)
ctx.moveTo(i, 0)
ctx.lineTo(i, SIZE)
}
ctx.stroke()
ctx.beginPath()
ctx.setLineDash([])
ctx.beginPath()
ctx.strokeStyle = "black"
ctx.lineWidth = 3
// Plot function
const numSamples = 20
for (let i = 0; i <= numSamples; i++) {
const x = i / numSamples
const y = fn(x)
const canvasX = x * SIZE
const canvasY = y * SIZE
if (i === 0) {
ctx.moveTo(canvasX, canvasY)
} else {
ctx.lineTo(canvasX, canvasY)
}
}
ctx.stroke()
// Plot alpha values (yellow boxes)
let start = parseFloat(document.querySelector("#merge-start").value)
let step = parseFloat(document.querySelector("#merge-step").value)
let iterations = document.querySelector("#merge-count").value >> 0
ctx.beginPath()
ctx.fillStyle = "yellow"
for (let i = 0; i < iterations; i++) {
const alpha = (start + i * step) / 100
const x = alpha * SIZE
const y = fn(alpha) * SIZE
if (x <= SIZE) {
ctx.rect(x - 3, y - 3, 6, 6)
ctx.fill()
} else {
ctx.strokeStyle = "red"
ctx.moveTo(0, 0)
ctx.lineTo(0, SIZE)
ctx.lineTo(SIZE, SIZE)
ctx.lineTo(SIZE, 0)
ctx.lineTo(0, 0)
ctx.lineTo(SIZE, SIZE)
ctx.stroke()
addLogMessage("<i>Warning: maximum ratio is &#8805; 100%</i>")
}
}
}
function updateChart() {
let fn = (x) => x
switch (document.querySelector("#merge-interpolation").value) {
case "SmoothStep":
fn = smoothstep
break
case "SmootherStep":
fn = smootherstep
break
case "SmoothestStep":
fn = smootheststep
break
}
drawDiagram(fn)
}
function initMergeUI() {
const tabSettingsSingle = document.querySelector("#tab-merge-opts-single")
const tabSettingsBatch = document.querySelector("#tab-merge-opts-batch")
linkTabContents(tabSettingsSingle)
linkTabContents(tabSettingsBatch)
let mergeModelAField = new ModelDropdown(document.querySelector("#mergeModelA"), "stable-diffusion")
let mergeModelBField = new ModelDropdown(document.querySelector("#mergeModelB"), "stable-diffusion")
updateChart()
// slider
const singleMergeRatioField = document.querySelector("#single-merge-ratio")
const singleMergeRatioSlider = document.querySelector("#single-merge-ratio-slider")
function updateSingleMergeRatio() {
singleMergeRatioField.value = singleMergeRatioSlider.value / 10
singleMergeRatioField.dispatchEvent(new Event("change"))
}
function updateSingleMergeRatioSlider() {
if (singleMergeRatioField.value < 0) {
singleMergeRatioField.value = 0
} else if (singleMergeRatioField.value > 100) {
singleMergeRatioField.value = 100
}
singleMergeRatioSlider.value = singleMergeRatioField.value * 10
singleMergeRatioSlider.dispatchEvent(new Event("change"))
}
singleMergeRatioSlider.addEventListener("input", updateSingleMergeRatio)
singleMergeRatioField.addEventListener("input", updateSingleMergeRatioSlider)
updateSingleMergeRatio()
document.querySelector(".merge-config").addEventListener("change", updateChart)
document.querySelector("#merge-button").addEventListener("click", async function(e) {
// Build request template
let model0 = mergeModelAField.value
let model1 = mergeModelBField.value
let request = { model0: model0, model1: model1 }
request["use_fp16"] = document.querySelector("#merge-fp").value == "fp16"
let iterations = document.querySelector("#merge-count").value >> 0
let start = parseFloat(document.querySelector("#merge-start").value)
let step = parseFloat(document.querySelector("#merge-step").value)
if (isTabActive(tabSettingsSingle)) {
start = parseFloat(singleMergeRatioField.value)
step = 0
iterations = 1
addLogMessage(`merge ratio = ${start}%`)
} else {
addLogMessage(`start = ${start}%`)
addLogMessage(`step = ${step}%`)
}
if (start + (iterations - 1) * step >= 100) {
addLogMessage("<i>Aborting: maximum ratio is &#8805; 100%</i>")
addLogMessage("Reduce the number of variations or the step size")
addLogSeparator()
document.querySelector("#merge-count").focus()
return
}
if (document.querySelector("#merge-filename").value == "") {
addLogMessage("<i>Aborting: No output file name specified</i>")
addLogSeparator()
document.querySelector("#merge-filename").focus()
return
}
// Disable merge button
e.target.disabled = true
e.target.classList.add("disabled")
let cursor = $("body").css("cursor")
let label = document.querySelector("#merge-button").innerHTML
$("body").css("cursor", "progress")
document.querySelector("#merge-button").innerHTML = "Merging models ..."
addLogMessage("Merging models")
addLogMessage("Model A: " + model0)
addLogMessage("Model B: " + model1)
// Batch main loop
for (let i = 0; i < iterations; i++) {
let alpha = (start + i * step) / 100
if (isTabActive(tabSettingsBatch)) {
switch (document.querySelector("#merge-interpolation").value) {
case "SmoothStep":
alpha = smoothstep(alpha)
break
case "SmootherStep":
alpha = smootherstep(alpha)
break
case "SmoothestStep":
alpha = smootheststep(alpha)
break
}
}
addLogMessage(`merging batch job ${i + 1}/${iterations}, alpha = ${alpha.toFixed(5)}...`)
request["out_path"] = document.querySelector("#merge-filename").value
request["out_path"] += "-" + alpha.toFixed(5) + "." + document.querySelector("#merge-format").value
addLogMessage(`&nbsp;&nbsp;filename: ${request["out_path"]}`)
// sdkit documentation: "ratio - the ratio of the second model. 1 means only the second model will be used."
request["ratio"] = 1-alpha
let res = await fetch("/model/merge", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(request),
})
const data = await res.json()
addLogMessage(JSON.stringify(data))
}
addLogMessage(
"<b>Done.</b> The models have been saved to your <tt>models/stable-diffusion</tt> folder."
)
addLogSeparator()
// Re-enable merge button
$("body").css("cursor", cursor)
document.querySelector("#merge-button").innerHTML = label
e.target.disabled = false
e.target.classList.remove("disabled")
// Update model list
stableDiffusionModelField.innerHTML = ""
vaeModelField.innerHTML = ""
hypernetworkModelField.innerHTML = ""
await getModels()
})
}
const LoraUI = {
modelField: undefined,
keywordsField: undefined,
notesField: undefined,
civitaiImportBtn: undefined,
civitaiSecion: undefined,
civitaiAnchor: undefined,
image: undefined,
imagePlaceholder: undefined,
init() {
LoraUI.modelField = new ModelDropdown(document.querySelector("#loraModel"), "lora", "None")
LoraUI.keywordsField = document.querySelector("#lora-manager-keywords")
LoraUI.notesField = document.querySelector("#lora-manager-notes")
LoraUI.civitaiImportBtn = document.querySelector("#lora-keyword-from-civitai")
LoraUI.civitaiSection = document.querySelector("#civitai-section")
LoraUI.civitaiAnchor = document.querySelector("#civitai-model-page")
LoraUI.image = document.querySelector("#lora-manager-image")
LoraUI.imagePlaceholder = document.querySelector("#lora-manager-image-placeholder")
LoraUI.uploadBtn = document.querySelector("#lora-manager-upload-button")
LoraUI.uploadInput = document.querySelector("#lora-manager-upload-input")
LoraUI.modelField.addEventListener("change", LoraUI.updateFields)
LoraUI.keywordsField.addEventListener("focusout", LoraUI.saveInfos)
LoraUI.notesField.addEventListener("focusout", LoraUI.saveInfos)
LoraUI.civitaiImportBtn.addEventListener("click", LoraUI.importFromCivitai)
LoraUI.uploadBtn.addEventListener("click", (e) => LoraUI.uploadInput.click())
LoraUI.uploadInput.addEventListener("change", LoraUI.uploadLoraThumb)
document.addEventListener("saveThumb", LoraUI.updateFields)
LoraUI.updateFields()
},
uploadLoraThumb(e) {
console.log(e)
if (LoraUI.uploadInput.files.length === 0) {
return
}
let reader = new FileReader()
let file = LoraUI.uploadInput.files[0]
reader.addEventListener("load", (event) => {
let img = document.createElement("img")
img.src = reader.result
onUseAsThumbnailClick(
{
use_lora_model: LoraUI.modelField.value,
},
img
)
})
if (file) {
reader.readAsDataURL(file)
}
},
updateFields() {
document.getElementById("civitai-section").classList.add("displayNone")
Bucket.retrieve(`modelinfo/lora/${LoraUI.modelField.value}`)
.then((info) => {
if (info == null) {
LoraUI.keywordsField.value = ""
LoraUI.notesField.value = ""
LoraUI.hideCivitaiLink()
} else {
LoraUI.keywordsField.value = info.keywords.join("\n")
LoraUI.notesField.value = info.notes
if ("civitai" in info && info["civitai"] != null) {
LoraUI.showCivitaiLink(info.civitai)
} else {
LoraUI.hideCivitaiLink()
}
}
})
Bucket.getImageAsDataURL(`${profileNameField.value}/lora/${LoraUI.modelField.value}.png`)
.then((data) => {
LoraUI.image.src=data
LoraUI.image.classList.remove("displayNone")
LoraUI.imagePlaceholder.classList.add("displayNone")
})
.catch((error) => {
LoraUI.image.classList.add("displayNone")
LoraUI.imagePlaceholder.classList.remove("displayNone")
})
},
saveInfos() {
let info = {
keywords: LoraUI.keywordsField.value
.split("\n")
.filter((x) => (x != "")),
notes: LoraUI.notesField.value,
civitai: LoraUI.civitaiSection.checkVisibility() ? LoraUI.civitaiAnchor.href : null,
}
Bucket.store(`modelinfo/lora/${LoraUI.modelField.value}`, info)
},
importFromCivitai() {
document.body.style["cursor"] = "progress"
fetch("/sha256/lora/"+LoraUI.modelField.value)
.then((result) => result.json())
.then((json) => fetch("https://civitai.com/api/v1/model-versions/by-hash/" + json.digest))
.then((result) => result.json())
.then((json) => {
document.body.style["cursor"] = "default"
if (json == null) {
return
}
if ("trainedWords" in json) {
LoraUI.keywordsField.value = json["trainedWords"].join("\n")
} else {
showToast("No keyword info found.")
}
if ("modelId" in json) {
LoraUI.showCivitaiLink("https://civitai.com/models/" + json.modelId)
} else {
LoraUI.hideCivitaiLink()
}
LoraUI.saveInfos()
})
},
showCivitaiLink(href) {
LoraUI.civitaiSection.classList.remove("displayNone")
LoraUI.civitaiAnchor.href = href
LoraUI.civitaiAnchor.innerHTML = LoraUI.civitaiAnchor.href
},
hideCivitaiLink() {
LoraUI.civitaiSection.classList.add("displayNone")
}
}
createTab({
id: "merge",
icon: "fa-toolbox",
label: "Model tools",
css: mergeCSS,
content: tabHTML,
onOpen: ({ firstOpen }) => {
if (!firstOpen) {
return
}
initMergeUI()
LoraUI.init()
const tabMergeUI = document.querySelector("#tab-model-mergeUI")
const tabLoraUI = document.querySelector("#tab-model-loraUI")
linkTabContents(tabMergeUI)
linkTabContents(tabLoraUI)
},
})
})()
async function getLoraKeywords(model) {
return Bucket.retrieve(`modelinfo/lora/${model}`)
.then((info) => info ? info.keywords : [])
}