mirror of
https://github.com/easydiffusion/easydiffusion.git
synced 2025-08-15 02:39:13 +02:00
Compare commits
167 Commits
Author | SHA1 | Date | |
---|---|---|---|
9571b8addc | |||
9601f304a5 | |||
ff43dac2a7 | |||
0a43305455 | |||
54d8224de2 | |||
c9e34457cd | |||
47c8eb304f | |||
2dd39fa218 | |||
cb618efb98 | |||
e7ca8090fd | |||
7861c57317 | |||
f701b8dc29 | |||
bd10a850fa | |||
0f96688a54 | |||
8eeca90d55 | |||
367e7f7065 | |||
ee19eaae62 | |||
8eb3a3536b | |||
cfd50231e1 | |||
1c8ab9e1b4 | |||
6094cd8578 | |||
353c49a40b | |||
277140f218 | |||
ca9413ccf4 | |||
c9a0d090cb | |||
1cd783d3a3 | |||
1ead764a02 | |||
45f7b35954 | |||
6a41540749 | |||
5b47da67f6 | |||
292f68ff97 | |||
3b554d881a | |||
40ebf468d3 | |||
4bc6e51862 | |||
427861cf13 | |||
da3e7a2eb8 | |||
2979f04c82 | |||
1949d8a50c | |||
ee66c799e0 | |||
7c50b8bf94 | |||
141ff74ece | |||
321e5f1ed6 | |||
6d131d9d8e | |||
7e69b8eb31 | |||
4e0b33e6a4 | |||
54f7e6fcb8 | |||
529169c4da | |||
a2c8c99215 | |||
e8bf3fd009 | |||
465676e9ea | |||
af53b57047 | |||
54b5f75905 | |||
4348333497 | |||
cc31110bcf | |||
f7c04bf7a6 | |||
029509ebad | |||
65102bb64d | |||
b96b55c5ce | |||
1f5aba010e | |||
f0b3bea4e3 | |||
84fae2d9e0 | |||
0b96fa112d | |||
c64bcd23d3 | |||
efd9a22bb5 | |||
159c3edfe3 | |||
f74fa8657b | |||
648b142a4b | |||
426f92595e | |||
82a8d9b644 | |||
ff9430b8a2 | |||
2e69ffcb5e | |||
0ea38db7ef | |||
a69d4c279e | |||
2706149399 | |||
3d0cdc1cb6 | |||
ac605e9352 | |||
5432297691 | |||
e37be0f954 | |||
a99209b674 | |||
cb02b5ba18 | |||
69f14edd80 | |||
14714b950d | |||
13654cb8c0 | |||
00276228cf | |||
8583bb8d7b | |||
d48951fe00 | |||
99bdcfa0a5 | |||
e64e1a92e6 | |||
e278e639a3 | |||
c4bad5c454 | |||
da41a74efc | |||
0dc970562a | |||
2d8401473d | |||
9c91f57b19 | |||
f14afcd129 | |||
5c1a3d82d7 | |||
e02a917569 | |||
347fa0fda1 | |||
6510d4cb02 | |||
91e4ccf6f8 | |||
36249874bc | |||
d2b5d6cce9 | |||
b2922741c9 | |||
300f3e27db | |||
d7330b80a9 | |||
acdd7667b7 | |||
8114fa3f5d | |||
4bc5508f38 | |||
e503c6092e | |||
6a8985d8dd | |||
bee67fd883 | |||
a1d75d40aa | |||
29484867ca | |||
7fa983b971 | |||
617a8b2814 | |||
b924d323d4 | |||
a2efda41d3 | |||
642c114501 | |||
02dd3e457d | |||
ea7b28c9d5 | |||
472ab4a9ce | |||
fca84e3edf | |||
b70235ff92 | |||
6eff591df7 | |||
d0b2bf736e | |||
e5c11ea214 | |||
6b6443406d | |||
3452d7852a | |||
f1fa10badd | |||
1267621424 | |||
8a0ec95fe1 | |||
ba30a63407 | |||
c56a2adbcb | |||
2de96d4dc9 | |||
a486f20892 | |||
49535deb2e | |||
7cbf62cf12 | |||
3b0ace3410 | |||
5a9c8e1d87 | |||
daaa65dc0a | |||
ab4e371524 | |||
927fd304b0 | |||
5af84b8e90 | |||
d425dac499 | |||
d056459e76 | |||
3169485f33 | |||
d9b9f80a93 | |||
d429505b71 | |||
72ee708917 | |||
93bbfac29a | |||
040d7a6563 | |||
e8dd930a50 | |||
31c049ebfe | |||
d343a37fb2 | |||
7097175c6f | |||
8e57c49043 | |||
9f036ceefd | |||
ff3ca8b36b | |||
87a7b70a27 | |||
9c71c966ca | |||
6dc99e676e | |||
3bf5e11f94 | |||
eef9af2266 | |||
8316a002da | |||
c3bf767024 | |||
0a21a69a9f | |||
cbc48e31e1 |
27
3rd-PARTY-LICENSES
Normal file
27
3rd-PARTY-LICENSES
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
jquery-confirm
|
||||||
|
==============
|
||||||
|
https://craftpip.github.io/jquery-confirm/
|
||||||
|
|
||||||
|
jquery-confirm is licensed under the MIT license:
|
||||||
|
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2019 Boniface Pereira
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
17
CHANGES.md
17
CHANGES.md
@ -2,9 +2,10 @@
|
|||||||
|
|
||||||
## v2.4
|
## v2.4
|
||||||
### Major Changes
|
### Major Changes
|
||||||
- **Automatic scanning for malicious model files** - using `picklescan`. Thanks @JeLuf
|
- **Automatic scanning for malicious model files** - using `picklescan`, and support for `safetensor` model format. Thanks @JeLuf
|
||||||
- **Support for custom VAE models**. You can place your VAE files in the `models/vae` folder, and refresh the browser page to use them. More info: https://github.com/cmdr2/stable-diffusion-ui/wiki/VAE-Variational-Auto-Encoder
|
- **Support for custom VAE models**. You can place your VAE files in the `models/vae` folder, and refresh the browser page to use them. More info: https://github.com/cmdr2/stable-diffusion-ui/wiki/VAE-Variational-Auto-Encoder
|
||||||
- **Experimental support for multiple GPUs!** It should work automatically. Just open one browser tab per GPU, and spread your tasks across your GPUs. For e.g. open our UI in two browser tabs if you have two GPUs. You can customize which GPUs it should use in the "Settings" tab, otherwise let it automatically pick the best GPUs. Thanks @madrang . More info: https://github.com/cmdr2/stable-diffusion-ui/wiki/Run-on-Multiple-GPUs
|
- **Experimental support for multiple GPUs!** It should work automatically. Just open one browser tab per GPU, and spread your tasks across your GPUs. For e.g. open our UI in two browser tabs if you have two GPUs. You can customize which GPUs it should use in the "Settings" tab, otherwise let it automatically pick the best GPUs. Thanks @madrang . More info: https://github.com/cmdr2/stable-diffusion-ui/wiki/Run-on-Multiple-GPUs
|
||||||
|
- **Image Editor** - for drawing simple images for guiding the AI. Thanks @mdiller
|
||||||
- **Cleaner UI design** - Show settings and help in new tabs, instead of dropdown popups (which were buggy). Thanks @mdiller
|
- **Cleaner UI design** - Show settings and help in new tabs, instead of dropdown popups (which were buggy). Thanks @mdiller
|
||||||
- **Progress bar.** Thanks @mdiller
|
- **Progress bar.** Thanks @mdiller
|
||||||
- **Custom Image Modifiers** - You can now save your custom image modifiers! Your saved modifiers can include special characters like `{}, (), [], |`
|
- **Custom Image Modifiers** - You can now save your custom image modifiers! Your saved modifiers can include special characters like `{}, (), [], |`
|
||||||
@ -19,8 +20,22 @@
|
|||||||
- Configuration to prevent the browser from opening on startup
|
- Configuration to prevent the browser from opening on startup
|
||||||
- Lots of minor bug fixes
|
- Lots of minor bug fixes
|
||||||
- A `What's New?` tab in the UI
|
- A `What's New?` tab in the UI
|
||||||
|
- Ask for a confimation before clearing the results pane or stopping a render task. The dialog can be skipped by holding down the shift key while clicking on the button.
|
||||||
|
- Show the network addresses of the server in the systems setting dialog
|
||||||
|
- Support loading models in the safetensor format, for improved safety
|
||||||
|
|
||||||
### Detailed changelog
|
### Detailed changelog
|
||||||
|
* 2.4.18 - 5 Dec 2022 - Make JPEG Output quality user controllable. Thanks @JeLuf
|
||||||
|
* 2.4.18 - 5 Dec 2022 - Support loading models in the safetensor format, for improved safety. Thanks @JeLuf
|
||||||
|
* 2.4.18 - 1 Dec 2022 - Image Editor, for drawing simple images for guiding the AI. Thanks @mdiller
|
||||||
|
* 2.4.18 - 1 Dec 2022 - Disable an image modifier temporarily by right-clicking it. Thanks @patriceac
|
||||||
|
* 2.4.17 - 30 Nov 2022 - Scroll to generated image. Thanks @patriceac
|
||||||
|
* 2.4.17 - 30 Nov 2022 - Show the network addresses of the server in the systems setting dialog. Thanks @JeLuf
|
||||||
|
* 2.4.17 - 30 Nov 2022 - Fix a bug where GFPGAN wouldn't work properly when multiple GPUs tried to run it at the same time. Thanks @madrang
|
||||||
|
* 2.4.17 - 30 Nov 2022 - Confirm before stopping or clearing all the tasks. Thanks @JeLuf
|
||||||
|
* 2.4.16 - 29 Nov 2022 - Bug fixes for SD 2.0 - remove the need for patching, default to SD 1.4 model if trying to load an SD2 model in SD1.4.
|
||||||
|
* 2.4.15 - 25 Nov 2022 - Experimental support for SD 2.0. Uses lots of memory, not optimized, probably GPU-only.
|
||||||
|
* 2.4.14 - 22 Nov 2022 - Change the backend to a custom fork of Stable Diffusion
|
||||||
* 2.4.13 - 21 Nov 2022 - Change the modifier weight via mouse wheel, drag to reorder selected modifiers, and some more modifier-related fixes. Thanks @patriceac
|
* 2.4.13 - 21 Nov 2022 - Change the modifier weight via mouse wheel, drag to reorder selected modifiers, and some more modifier-related fixes. Thanks @patriceac
|
||||||
* 2.4.12 - 21 Nov 2022 - Another fix for improving how long images take to generate. Reduces the time taken for an enqueued task to start processing.
|
* 2.4.12 - 21 Nov 2022 - Another fix for improving how long images take to generate. Reduces the time taken for an enqueued task to start processing.
|
||||||
* 2.4.11 - 21 Nov 2022 - Installer improvements: avoid crashing if the username contains a space or special characters, allow moving/renaming the folder after installation on Windows, whitespace fix on git apply
|
* 2.4.11 - 21 Nov 2022 - Installer improvements: avoid crashing if the username contains a space or special characters, allow moving/renaming the folder after installation on Windows, whitespace fix on git apply
|
||||||
|
@ -3,6 +3,8 @@
|
|||||||
|
|
||||||
[](https://discord.com/invite/u9yhsFmEkB) (for support, and development discussion) | [Troubleshooting guide for common problems](Troubleshooting.md)
|
[](https://discord.com/invite/u9yhsFmEkB) (for support, and development discussion) | [Troubleshooting guide for common problems](Troubleshooting.md)
|
||||||
|
|
||||||
|
New! Experimental support for Stable Diffusion 2.0 is available in beta!
|
||||||
|
|
||||||
----
|
----
|
||||||
|
|
||||||
## Step 1: Download the installer
|
## Step 1: Download the installer
|
||||||
@ -28,7 +30,9 @@ The installer will take care of whatever is needed. A friendly [Discord communit
|
|||||||
- **No Dependencies or Technical Knowledge Required**: 1-click install for Windows 10/11 and Linux. *No dependencies*, no need for WSL or Docker or Conda or technical setup. Just download and run!
|
- **No Dependencies or Technical Knowledge Required**: 1-click install for Windows 10/11 and Linux. *No dependencies*, no need for WSL or Docker or Conda or technical setup. Just download and run!
|
||||||
- **Clutter-free UI**: a friendly and simple UI, while providing a lot of powerful features
|
- **Clutter-free UI**: a friendly and simple UI, while providing a lot of powerful features
|
||||||
- Supports "*Text to Image*" and "*Image to Image*"
|
- Supports "*Text to Image*" and "*Image to Image*"
|
||||||
|
- **Stable Diffusion 2.0 support (experimental)** - available in beta channel
|
||||||
- **Custom Models**: Use your own `.ckpt` file, by placing it inside the `models/stable-diffusion` folder!
|
- **Custom Models**: Use your own `.ckpt` file, by placing it inside the `models/stable-diffusion` folder!
|
||||||
|
- **Auto scan for malicious models** - uses picklescan to prevent malicious models
|
||||||
- **Live Preview**: See the image as the AI is drawing it
|
- **Live Preview**: See the image as the AI is drawing it
|
||||||
- **Task Queue**: Queue up all your ideas, without waiting for the current task to finish
|
- **Task Queue**: Queue up all your ideas, without waiting for the current task to finish
|
||||||
- **In-Painting**: Specify areas of your image to paint into
|
- **In-Painting**: Specify areas of your image to paint into
|
||||||
@ -71,7 +75,7 @@ Useful for judging (and stopping) an image quickly, without waiting for it to fi
|
|||||||
You don't need to install or struggle with Python, Anaconda, Docker etc. The installer will take care of whatever is needed.
|
You don't need to install or struggle with Python, Anaconda, Docker etc. The installer will take care of whatever is needed.
|
||||||
|
|
||||||
# Installation
|
# Installation
|
||||||
1. **Download** [for Windows](https://github.com/cmdr2/stable-diffusion-ui/releases/download/v2.3.5/stable-diffusion-ui-windows.zip) or [for Linux](https://github.com/cmdr2/stable-diffusion-ui/releases/download/v2.3.5/stable-diffusion-ui-linux.zip).
|
1. **Download** [for Windows](https://github.com/cmdr2/stable-diffusion-ui/releases/download/v2.4.13/stable-diffusion-ui-windows.zip) or [for Linux](https://github.com/cmdr2/stable-diffusion-ui/releases/download/v2.4.13/stable-diffusion-ui-linux.zip).
|
||||||
|
|
||||||
2. **Extract**:
|
2. **Extract**:
|
||||||
- For Windows: After unzipping the file, please move the `stable-diffusion-ui` folder to your `C:` (or any drive like D:, at the top root level), e.g. `C:\stable-diffusion-ui`. This will avoid a common problem with Windows (file path length limits).
|
- For Windows: After unzipping the file, please move the `stable-diffusion-ui` folder to your `C:` (or any drive like D:, at the top root level), e.g. `C:\stable-diffusion-ui`. This will avoid a common problem with Windows (file path length limits).
|
||||||
|
@ -29,6 +29,18 @@ call conda activate .\stable-diffusion\env
|
|||||||
call where python
|
call where python
|
||||||
call python --version
|
call python --version
|
||||||
|
|
||||||
|
@rem set the PYTHONPATH
|
||||||
|
cd stable-diffusion
|
||||||
|
set SD_DIR=%cd%
|
||||||
|
|
||||||
|
cd env\lib\site-packages
|
||||||
|
set PYTHONPATH=%SD_DIR%;%cd%
|
||||||
|
cd ..\..\..
|
||||||
|
echo PYTHONPATH=%PYTHONPATH%
|
||||||
|
|
||||||
|
cd ..
|
||||||
|
|
||||||
|
@rem done
|
||||||
echo.
|
echo.
|
||||||
|
|
||||||
cmd /k
|
cmd /k
|
||||||
|
@ -42,11 +42,11 @@ if "%PACKAGES_TO_INSTALL%" NEQ "" (
|
|||||||
mkdir "%MAMBA_ROOT_PREFIX%"
|
mkdir "%MAMBA_ROOT_PREFIX%"
|
||||||
call curl -Lk "%MICROMAMBA_DOWNLOAD_URL%" > "%MAMBA_ROOT_PREFIX%\micromamba.exe"
|
call curl -Lk "%MICROMAMBA_DOWNLOAD_URL%" > "%MAMBA_ROOT_PREFIX%\micromamba.exe"
|
||||||
|
|
||||||
@REM if "%ERRORLEVEL%" NEQ "0" (
|
if "%ERRORLEVEL%" NEQ "0" (
|
||||||
@REM echo "There was a problem downloading micromamba. Cannot continue."
|
echo "There was a problem downloading micromamba. Cannot continue."
|
||||||
@REM pause
|
pause
|
||||||
@REM exit /b
|
exit /b
|
||||||
@REM )
|
)
|
||||||
|
|
||||||
mkdir "%APPDATA%"
|
mkdir "%APPDATA%"
|
||||||
mkdir "%USERPROFILE%"
|
mkdir "%USERPROFILE%"
|
||||||
|
@ -35,6 +35,15 @@ if [ "$0" == "bash" ]; then
|
|||||||
which python
|
which python
|
||||||
python --version
|
python --version
|
||||||
|
|
||||||
|
# set the PYTHONPATH
|
||||||
|
cd stable-diffusion
|
||||||
|
SD_PATH=`pwd`
|
||||||
|
export PYTHONPATH="$SD_PATH:$SD_PATH/env/lib/python3.8/site-packages"
|
||||||
|
echo "PYTHONPATH=$PYTHONPATH"
|
||||||
|
cd ..
|
||||||
|
|
||||||
|
# done
|
||||||
|
|
||||||
echo ""
|
echo ""
|
||||||
else
|
else
|
||||||
file_name=$(basename "${BASH_SOURCE[0]}")
|
file_name=$(basename "${BASH_SOURCE[0]}")
|
||||||
|
@ -16,35 +16,42 @@ if exist "%cd%\profile" (
|
|||||||
|
|
||||||
@rem activate the installer env
|
@rem activate the installer env
|
||||||
call conda activate
|
call conda activate
|
||||||
@rem @if "%ERRORLEVEL%" NEQ "0" (
|
@if "%ERRORLEVEL%" NEQ "0" (
|
||||||
@rem @echo. & echo "Error activating conda for Stable Diffusion. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
@echo. & echo "Error activating conda for Stable Diffusion. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
||||||
@rem pause
|
pause
|
||||||
@rem exit /b
|
exit /b
|
||||||
@rem )
|
)
|
||||||
|
|
||||||
@REM remove the old version of the dev console script, if it's still present
|
@REM remove the old version of the dev console script, if it's still present
|
||||||
if exist "Open Developer Console.cmd" del "Open Developer Console.cmd"
|
if exist "Open Developer Console.cmd" del "Open Developer Console.cmd"
|
||||||
|
|
||||||
@call python -c "import os; import shutil; frm = 'sd-ui-files\\ui\\hotfix\\9c24e6cd9f499d02c4f21a033736dabd365962dc80fe3aeb57a8f85ea45a20a3.26fead7ea4f0f843f6eb4055dfd25693f1a71f3c6871b184042d4b126244e142'; dst = os.path.join(os.path.expanduser('~'), '.cache', 'huggingface', 'transformers', '9c24e6cd9f499d02c4f21a033736dabd365962dc80fe3aeb57a8f85ea45a20a3.26fead7ea4f0f843f6eb4055dfd25693f1a71f3c6871b184042d4b126244e142'); shutil.copyfile(frm, dst) if os.path.exists(dst) else print(''); print('Hotfixed broken JSON file from OpenAI');"
|
@call python -c "import os; import shutil; frm = 'sd-ui-files\\ui\\hotfix\\9c24e6cd9f499d02c4f21a033736dabd365962dc80fe3aeb57a8f85ea45a20a3.26fead7ea4f0f843f6eb4055dfd25693f1a71f3c6871b184042d4b126244e142'; dst = os.path.join(os.path.expanduser('~'), '.cache', 'huggingface', 'transformers', '9c24e6cd9f499d02c4f21a033736dabd365962dc80fe3aeb57a8f85ea45a20a3.26fead7ea4f0f843f6eb4055dfd25693f1a71f3c6871b184042d4b126244e142'); shutil.copyfile(frm, dst) if os.path.exists(dst) else print(''); print('Hotfixed broken JSON file from OpenAI');"
|
||||||
|
|
||||||
|
if NOT DEFINED test_sd2 set test_sd2=N
|
||||||
|
|
||||||
@>nul findstr /m "sd_git_cloned" scripts\install_status.txt
|
@>nul findstr /m "sd_git_cloned" scripts\install_status.txt
|
||||||
@if "%ERRORLEVEL%" EQU "0" (
|
@if "%ERRORLEVEL%" EQU "0" (
|
||||||
@echo "Stable Diffusion's git repository was already installed. Updating.."
|
@echo "Stable Diffusion's git repository was already installed. Updating.."
|
||||||
|
|
||||||
@cd stable-diffusion
|
@cd stable-diffusion
|
||||||
|
|
||||||
|
@call git remote set-url origin https://github.com/easydiffusion/diffusion-kit.git
|
||||||
|
|
||||||
@call git reset --hard
|
@call git reset --hard
|
||||||
@call git pull
|
@call git pull
|
||||||
@call git -c advice.detachedHead=false checkout f6cfebffa752ee11a7b07497b8529d5971de916c
|
|
||||||
|
|
||||||
@call git apply --whitespace=nowarn ..\ui\sd_internal\ddim_callback.patch
|
if "%test_sd2%" == "N" (
|
||||||
@call git apply --whitespace=nowarn ..\ui\sd_internal\env_yaml.patch
|
@call git -c advice.detachedHead=false checkout 7f32368ed1030a6e710537047bacd908adea183a
|
||||||
|
)
|
||||||
|
if "%test_sd2%" == "Y" (
|
||||||
|
@call git -c advice.detachedHead=false checkout b1a80dfc75388914252ce363f923103185eaf48f
|
||||||
|
)
|
||||||
|
|
||||||
@cd ..
|
@cd ..
|
||||||
) else (
|
) else (
|
||||||
@echo. & echo "Downloading Stable Diffusion.." & echo.
|
@echo. & echo "Downloading Stable Diffusion.." & echo.
|
||||||
|
|
||||||
@call git clone https://github.com/basujindal/stable-diffusion.git && (
|
@call git clone https://github.com/easydiffusion/diffusion-kit.git stable-diffusion && (
|
||||||
@echo sd_git_cloned >> scripts\install_status.txt
|
@echo sd_git_cloned >> scripts\install_status.txt
|
||||||
) || (
|
) || (
|
||||||
@echo "Error downloading Stable Diffusion. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!"
|
@echo "Error downloading Stable Diffusion. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!"
|
||||||
@ -53,10 +60,7 @@ if exist "Open Developer Console.cmd" del "Open Developer Console.cmd"
|
|||||||
)
|
)
|
||||||
|
|
||||||
@cd stable-diffusion
|
@cd stable-diffusion
|
||||||
@call git -c advice.detachedHead=false checkout f6cfebffa752ee11a7b07497b8529d5971de916c
|
@call git -c advice.detachedHead=false checkout 7f32368ed1030a6e710537047bacd908adea183a
|
||||||
|
|
||||||
@call git apply --whitespace=nowarn ..\ui\sd_internal\ddim_callback.patch
|
|
||||||
@call git apply --whitespace=nowarn ..\ui\sd_internal\env_yaml.patch
|
|
||||||
|
|
||||||
@cd ..
|
@cd ..
|
||||||
)
|
)
|
||||||
@ -88,12 +92,6 @@ if exist "Open Developer Console.cmd" del "Open Developer Console.cmd"
|
|||||||
|
|
||||||
@call conda activate .\env
|
@call conda activate .\env
|
||||||
|
|
||||||
@call conda install -c conda-forge -y --prefix env antlr4-python3-runtime=4.8 || (
|
|
||||||
@echo. & echo "Error installing antlr4-python3-runtime for Stable Diffusion. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
|
||||||
pause
|
|
||||||
exit /b
|
|
||||||
)
|
|
||||||
|
|
||||||
for /f "tokens=*" %%a in ('python -c "import torch; import ldm; import transformers; import numpy; import antlr4; print(42)"') do if "%%a" NEQ "42" (
|
for /f "tokens=*" %%a in ('python -c "import torch; import ldm; import transformers; import numpy; import antlr4; print(42)"') do if "%%a" NEQ "42" (
|
||||||
@echo. & echo "Dependency test failed! Error installing the packages necessary for Stable Diffusion. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
@echo. & echo "Dependency test failed! Error installing the packages necessary for Stable Diffusion. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
||||||
pause
|
pause
|
||||||
@ -117,18 +115,6 @@ set PATH=C:\Windows\System32;%PATH%
|
|||||||
|
|
||||||
set PYTHONPATH=%cd%;%cd%\env\lib\site-packages
|
set PYTHONPATH=%cd%;%cd%\env\lib\site-packages
|
||||||
|
|
||||||
@call pip install -e git+https://github.com/TencentARC/GFPGAN#egg=GFPGAN || (
|
|
||||||
@echo. & echo "Error installing the packages necessary for GFPGAN (Face Correction). Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
|
||||||
pause
|
|
||||||
exit /b
|
|
||||||
)
|
|
||||||
|
|
||||||
@call pip install basicsr==1.4.2 || (
|
|
||||||
@echo. & echo "Error installing the basicsr package necessary for GFPGAN (Face Correction). Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
|
||||||
pause
|
|
||||||
exit /b
|
|
||||||
)
|
|
||||||
|
|
||||||
for /f "tokens=*" %%a in ('python -c "from gfpgan import GFPGANer; print(42)"') do if "%%a" NEQ "42" (
|
for /f "tokens=*" %%a in ('python -c "from gfpgan import GFPGANer; print(42)"') do if "%%a" NEQ "42" (
|
||||||
@echo. & echo "Dependency test failed! Error installing the packages necessary for GFPGAN (Face Correction). Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
@echo. & echo "Dependency test failed! Error installing the packages necessary for GFPGAN (Face Correction). Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
||||||
pause
|
pause
|
||||||
@ -150,12 +136,6 @@ set PATH=C:\Windows\System32;%PATH%
|
|||||||
|
|
||||||
set PYTHONPATH=%cd%;%cd%\env\lib\site-packages
|
set PYTHONPATH=%cd%;%cd%\env\lib\site-packages
|
||||||
|
|
||||||
@call pip install -e git+https://github.com/xinntao/Real-ESRGAN#egg=realesrgan || (
|
|
||||||
@echo. & echo "Error installing the packages necessary for ESRGAN (Resolution Upscaling). Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
|
||||||
pause
|
|
||||||
exit /b
|
|
||||||
)
|
|
||||||
|
|
||||||
for /f "tokens=*" %%a in ('python -c "from basicsr.archs.rrdbnet_arch import RRDBNet; from realesrgan import RealESRGANer; print(42)"') do if "%%a" NEQ "42" (
|
for /f "tokens=*" %%a in ('python -c "from basicsr.archs.rrdbnet_arch import RRDBNet; from realesrgan import RealESRGANer; print(42)"') do if "%%a" NEQ "42" (
|
||||||
@echo. & echo "Dependency test failed! Error installing the packages necessary for ESRGAN (Resolution Upscaling). Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
@echo. & echo "Dependency test failed! Error installing the packages necessary for ESRGAN (Resolution Upscaling). Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
||||||
pause
|
pause
|
||||||
@ -202,6 +182,16 @@ call WHERE uvicorn > .tmp
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@>nul 2>nul call python -c "import safetensors"
|
||||||
|
@if "%ERRORLEVEL%" NEQ "0" (
|
||||||
|
@echo. & echo SafeTensors not found. Installing
|
||||||
|
@call pip install safetensors || (
|
||||||
|
echo "Error installing the safetensors package necessary for Stable Diffusion UI. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!"
|
||||||
|
pause
|
||||||
|
exit /b
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
@>nul findstr /m "conda_sd_ui_deps_installed" ..\scripts\install_status.txt
|
@>nul findstr /m "conda_sd_ui_deps_installed" ..\scripts\install_status.txt
|
||||||
@if "%ERRORLEVEL%" NEQ "0" (
|
@if "%ERRORLEVEL%" NEQ "0" (
|
||||||
@echo conda_sd_ui_deps_installed >> ..\scripts\install_status.txt
|
@echo conda_sd_ui_deps_installed >> ..\scripts\install_status.txt
|
||||||
@ -370,7 +360,9 @@ echo. > "..\models\vae\Put your VAE files here.txt"
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if "%test_sd2%" == "Y" (
|
||||||
|
@call pip install open_clip_torch==2.0.2
|
||||||
|
)
|
||||||
|
|
||||||
@>nul findstr /m "sd_install_complete" ..\scripts\install_status.txt
|
@>nul findstr /m "sd_install_complete" ..\scripts\install_status.txt
|
||||||
@if "%ERRORLEVEL%" NEQ "0" (
|
@if "%ERRORLEVEL%" NEQ "0" (
|
||||||
|
@ -21,33 +21,38 @@ python -c "import os; import shutil; frm = 'sd-ui-files/ui/hotfix/9c24e6cd9f499d
|
|||||||
# Caution, this file will make your eyes and brain bleed. It's such an unholy mess.
|
# Caution, this file will make your eyes and brain bleed. It's such an unholy mess.
|
||||||
# Note to self: Please rewrite this in Python. For the sake of your own sanity.
|
# Note to self: Please rewrite this in Python. For the sake of your own sanity.
|
||||||
|
|
||||||
|
if [ "$test_sd2" == "" ]; then
|
||||||
|
export test_sd2="N"
|
||||||
|
fi
|
||||||
|
|
||||||
if [ -e "scripts/install_status.txt" ] && [ `grep -c sd_git_cloned scripts/install_status.txt` -gt "0" ]; then
|
if [ -e "scripts/install_status.txt" ] && [ `grep -c sd_git_cloned scripts/install_status.txt` -gt "0" ]; then
|
||||||
echo "Stable Diffusion's git repository was already installed. Updating.."
|
echo "Stable Diffusion's git repository was already installed. Updating.."
|
||||||
|
|
||||||
cd stable-diffusion
|
cd stable-diffusion
|
||||||
|
|
||||||
|
git remote set-url origin https://github.com/easydiffusion/diffusion-kit.git
|
||||||
|
|
||||||
git reset --hard
|
git reset --hard
|
||||||
git pull
|
git pull
|
||||||
git -c advice.detachedHead=false checkout f6cfebffa752ee11a7b07497b8529d5971de916c
|
|
||||||
|
|
||||||
git apply --whitespace=nowarn ../ui/sd_internal/ddim_callback.patch || fail "ddim patch failed"
|
if [ "$test_sd2" == "N" ]; then
|
||||||
git apply --whitespace=nowarn ../ui/sd_internal/env_yaml.patch || fail "yaml patch failed"
|
git -c advice.detachedHead=false checkout 7f32368ed1030a6e710537047bacd908adea183a
|
||||||
|
elif [ "$test_sd2" == "Y" ]; then
|
||||||
|
git -c advice.detachedHead=false checkout b1a80dfc75388914252ce363f923103185eaf48f
|
||||||
|
fi
|
||||||
|
|
||||||
cd ..
|
cd ..
|
||||||
else
|
else
|
||||||
printf "\n\nDownloading Stable Diffusion..\n\n"
|
printf "\n\nDownloading Stable Diffusion..\n\n"
|
||||||
|
|
||||||
if git clone https://github.com/basujindal/stable-diffusion.git ; then
|
if git clone https://github.com/easydiffusion/diffusion-kit.git stable-diffusion ; then
|
||||||
echo sd_git_cloned >> scripts/install_status.txt
|
echo sd_git_cloned >> scripts/install_status.txt
|
||||||
else
|
else
|
||||||
fail "git clone of basujindal/stable-diffusion.git failed"
|
fail "git clone of basujindal/stable-diffusion.git failed"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
cd stable-diffusion
|
cd stable-diffusion
|
||||||
git -c advice.detachedHead=false checkout f6cfebffa752ee11a7b07497b8529d5971de916c
|
git -c advice.detachedHead=false checkout 7f32368ed1030a6e710537047bacd908adea183a
|
||||||
|
|
||||||
git apply --whitespace=nowarn ../ui/sd_internal/ddim_callback.patch || fail "ddim patch failed"
|
|
||||||
git apply --whitespace=nowarn ../ui/sd_internal/env_yaml.patch || fail "yaml patch failed"
|
|
||||||
|
|
||||||
cd ..
|
cd ..
|
||||||
fi
|
fi
|
||||||
@ -74,12 +79,6 @@ else
|
|||||||
|
|
||||||
conda activate ./env || fail "conda activate failed"
|
conda activate ./env || fail "conda activate failed"
|
||||||
|
|
||||||
if conda install -c conda-forge --prefix ./env -y antlr4-python3-runtime=4.8 ; then
|
|
||||||
echo "Installed. Testing.."
|
|
||||||
else
|
|
||||||
fail "Error installing antlr4-python3-runtime"
|
|
||||||
fi
|
|
||||||
|
|
||||||
out_test=`python -c "import torch; import ldm; import transformers; import numpy; import antlr4; print(42)"`
|
out_test=`python -c "import torch; import ldm; import transformers; import numpy; import antlr4; print(42)"`
|
||||||
if [ "$out_test" != "42" ]; then
|
if [ "$out_test" != "42" ]; then
|
||||||
fail "Dependency test failed"
|
fail "Dependency test failed"
|
||||||
@ -96,12 +95,6 @@ else
|
|||||||
export PYTHONNOUSERSITE=1
|
export PYTHONNOUSERSITE=1
|
||||||
export PYTHONPATH="$(pwd):$(pwd)/env/lib/site-packages"
|
export PYTHONPATH="$(pwd):$(pwd)/env/lib/site-packages"
|
||||||
|
|
||||||
if pip install -e git+https://github.com/TencentARC/GFPGAN#egg=GFPGAN ; then
|
|
||||||
echo "Installed. Testing.."
|
|
||||||
else
|
|
||||||
fail "Error installing the packages necessary for GFPGAN (Face Correction)."
|
|
||||||
fi
|
|
||||||
|
|
||||||
out_test=`python -c "from gfpgan import GFPGANer; print(42)"`
|
out_test=`python -c "from gfpgan import GFPGANer; print(42)"`
|
||||||
if [ "$out_test" != "42" ]; then
|
if [ "$out_test" != "42" ]; then
|
||||||
echo "EE The dependency check has failed. This usually means that some system libraries are missing."
|
echo "EE The dependency check has failed. This usually means that some system libraries are missing."
|
||||||
@ -121,12 +114,6 @@ else
|
|||||||
export PYTHONNOUSERSITE=1
|
export PYTHONNOUSERSITE=1
|
||||||
export PYTHONPATH="$(pwd):$(pwd)/env/lib/site-packages"
|
export PYTHONPATH="$(pwd):$(pwd)/env/lib/site-packages"
|
||||||
|
|
||||||
if pip install -e git+https://github.com/xinntao/Real-ESRGAN#egg=realesrgan ; then
|
|
||||||
echo "Installed. Testing.."
|
|
||||||
else
|
|
||||||
fail "Error installing the packages necessary for ESRGAN"
|
|
||||||
fi
|
|
||||||
|
|
||||||
out_test=`python -c "from basicsr.archs.rrdbnet_arch import RRDBNet; from realesrgan import RealESRGANer; print(42)"`
|
out_test=`python -c "from basicsr.archs.rrdbnet_arch import RRDBNet; from realesrgan import RealESRGANer; print(42)"`
|
||||||
if [ "$out_test" != "42" ]; then
|
if [ "$out_test" != "42" ]; then
|
||||||
fail "ESRGAN dependency test failed"
|
fail "ESRGAN dependency test failed"
|
||||||
@ -163,6 +150,13 @@ else
|
|||||||
pip install picklescan || fail "Picklescan installation failed."
|
pip install picklescan || fail "Picklescan installation failed."
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if python -c "import safetensors" --help >/dev/null 2>&1; then
|
||||||
|
echo "SafeTensors is already installed."
|
||||||
|
else
|
||||||
|
echo "SafeTensors not found, installing."
|
||||||
|
pip install safetensors || fail "SafeTensors installation failed."
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
mkdir -p "../models/stable-diffusion"
|
mkdir -p "../models/stable-diffusion"
|
||||||
@ -309,6 +303,9 @@ if [ ! -f "../models/vae/vae-ft-mse-840000-ema-pruned.ckpt" ]; then
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [ "$test_sd2" == "Y" ]; then
|
||||||
|
pip install open_clip_torch==2.0.2
|
||||||
|
fi
|
||||||
|
|
||||||
if [ `grep -c sd_install_complete ../scripts/install_status.txt` -gt "0" ]; then
|
if [ `grep -c sd_install_complete ../scripts/install_status.txt` -gt "0" ]; then
|
||||||
echo sd_weights_downloaded >> ../scripts/install_status.txt
|
echo sd_weights_downloaded >> ../scripts/install_status.txt
|
||||||
|
@ -1,6 +0,0 @@
|
|||||||
@call conda --version
|
|
||||||
@call git --version
|
|
||||||
|
|
||||||
cd %CONDA_PREFIX%\..\scripts
|
|
||||||
|
|
||||||
on_env_start.bat
|
|
@ -1,12 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
conda-unpack
|
|
||||||
|
|
||||||
source $CONDA_PREFIX/etc/profile.d/conda.sh
|
|
||||||
|
|
||||||
conda --version
|
|
||||||
git --version
|
|
||||||
|
|
||||||
cd $CONDA_PREFIX/../scripts
|
|
||||||
|
|
||||||
./on_env_start.sh
|
|
103
ui/index.html
103
ui/index.html
@ -3,6 +3,7 @@
|
|||||||
<head>
|
<head>
|
||||||
<title>Stable Diffusion UI</title>
|
<title>Stable Diffusion UI</title>
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||||
|
<meta name="theme-color" content="#673AB6">
|
||||||
<link rel="icon" type="image/png" href="/media/images/favicon-16x16.png" sizes="16x16">
|
<link rel="icon" type="image/png" href="/media/images/favicon-16x16.png" sizes="16x16">
|
||||||
<link rel="icon" type="image/png" href="/media/images/favicon-32x32.png" sizes="32x32">
|
<link rel="icon" type="image/png" href="/media/images/favicon-32x32.png" sizes="32x32">
|
||||||
<link rel="stylesheet" href="/media/css/fonts.css">
|
<link rel="stylesheet" href="/media/css/fonts.css">
|
||||||
@ -11,16 +12,21 @@
|
|||||||
<link rel="stylesheet" href="/media/css/auto-save.css">
|
<link rel="stylesheet" href="/media/css/auto-save.css">
|
||||||
<link rel="stylesheet" href="/media/css/modifier-thumbnails.css">
|
<link rel="stylesheet" href="/media/css/modifier-thumbnails.css">
|
||||||
<link rel="stylesheet" href="/media/css/fontawesome-all.min.css">
|
<link rel="stylesheet" href="/media/css/fontawesome-all.min.css">
|
||||||
<link rel="stylesheet" href="/media/css/drawingboard.min.css">
|
<link rel="stylesheet" href="/media/css/image-editor.css">
|
||||||
|
<link rel="stylesheet" href="/media/css/jquery-confirm.min.css">
|
||||||
|
<link rel="manifest" href="/media/manifest.webmanifest">
|
||||||
<script src="/media/js/jquery-3.6.1.min.js"></script>
|
<script src="/media/js/jquery-3.6.1.min.js"></script>
|
||||||
<script src="/media/js/drawingboard.min.js"></script>
|
<script src="/media/js/jquery-confirm.min.js"></script>
|
||||||
<script src="/media/js/marked.min.js"></script>
|
<script src="/media/js/marked.min.js"></script>
|
||||||
</head>
|
</head>
|
||||||
<body>
|
<body>
|
||||||
<div id="container">
|
<div id="container">
|
||||||
<div id="top-nav">
|
<div id="top-nav">
|
||||||
<div id="logo">
|
<div id="logo">
|
||||||
<h1>Stable Diffusion UI <small>v2.4.13 <span id="updateBranchLabel"></span></small></h1>
|
<h1>
|
||||||
|
Stable Diffusion UI
|
||||||
|
<small>v2.4.18 <span id="updateBranchLabel"></span></small>
|
||||||
|
</h1>
|
||||||
</div>
|
</div>
|
||||||
<div id="server-status">
|
<div id="server-status">
|
||||||
<div id="server-status-color">●</div>
|
<div id="server-status-color">●</div>
|
||||||
@ -58,28 +64,38 @@
|
|||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div id="editor-inputs-init-image" class="row">
|
<div id="editor-inputs-init-image" class="row">
|
||||||
<label for="init_image">Initial Image (img2img) <small>(optional)</small> </label> <input id="init_image" name="init_image" type="file" /><br/>
|
<label for="init_image">Initial Image (img2img) <small>(optional)</small> </label>
|
||||||
|
|
||||||
<div id="init_image_preview_container" class="image_preview_container">
|
<div id="init_image_preview_container" class="image_preview_container">
|
||||||
<div id="init_image_wrapper">
|
<div id="init_image_wrapper">
|
||||||
<img id="init_image_preview" src="" />
|
<img id="init_image_preview" src="" />
|
||||||
<span id="init_image_size_box"></span>
|
<span id="init_image_size_box"></span>
|
||||||
<button class="init_image_clear image_clear_btn">X</button>
|
<button class="init_image_clear image_clear_btn"><i class="fa-solid fa-xmark"></i></button>
|
||||||
|
</div>
|
||||||
|
<div id="init_image_buttons">
|
||||||
|
<div class="button">
|
||||||
|
<i class="fa-regular fa-folder-open"></i>
|
||||||
|
Browse
|
||||||
|
<input id="init_image" name="init_image" type="file" />
|
||||||
|
</div>
|
||||||
|
<div id="init_image_button_draw" class="button">
|
||||||
|
<i class="fa-solid fa-pencil"></i>
|
||||||
|
Draw
|
||||||
|
</div>
|
||||||
|
<div id="inpaint_button_container">
|
||||||
|
<div id="init_image_button_inpaint" class="button">
|
||||||
|
<i class="fa-solid fa-paintbrush"></i>
|
||||||
|
Inpaint
|
||||||
|
</div>
|
||||||
|
<input id="enable_mask" name="enable_mask" type="checkbox">
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<br/>
|
|
||||||
<input id="enable_mask" name="enable_mask" type="checkbox">
|
|
||||||
<label for="enable_mask">
|
|
||||||
In-Painting (beta)
|
|
||||||
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/Inpainting" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip right">Click to learn more about InPainting</span></i></a>
|
|
||||||
<small>(select the area which the AI will paint into)</small>
|
|
||||||
</label>
|
|
||||||
<div id="inpaintingEditor"></div>
|
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div id="editor-inputs-tags-container" class="row">
|
<div id="editor-inputs-tags-container" class="row">
|
||||||
<label>Image Modifiers: <small>(click an Image Modifier to remove it)</small></label>
|
<label>Image Modifiers <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip right">click an Image Modifier to remove it, use Ctrl+Mouse Wheel to adjust its weight</span></i>:</label>
|
||||||
<div id="editor-inputs-tags-list"></div>
|
<div id="editor-inputs-tags-list"></div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
@ -101,7 +117,7 @@
|
|||||||
<div id="editor-settings-entries" class="collapsible-content">
|
<div id="editor-settings-entries" class="collapsible-content">
|
||||||
<div><table>
|
<div><table>
|
||||||
<tr><b class="settings-subheader">Image Settings</b></tr>
|
<tr><b class="settings-subheader">Image Settings</b></tr>
|
||||||
<tr class="pl-5"><td><label for="seed">Seed:</label></td><td><input id="seed" name="seed" size="10" value="30000" onkeypress="preventNonNumericalInput(event)"> <input id="random_seed" name="random_seed" type="checkbox" checked><label for="random_seed">Random</label></td></tr>
|
<tr class="pl-5"><td><label for="seed">Seed:</label></td><td><input id="seed" name="seed" size="10" value="0" onkeypress="preventNonNumericalInput(event)"> <input id="random_seed" name="random_seed" type="checkbox" checked><label for="random_seed">Random</label></td></tr>
|
||||||
<tr class="pl-5"><td><label for="num_outputs_total">Number of Images:</label></td><td><input id="num_outputs_total" name="num_outputs_total" value="1" size="1" onkeypress="preventNonNumericalInput(event)"> <label><small>(total)</small></label> <input id="num_outputs_parallel" name="num_outputs_parallel" value="1" size="1" onkeypress="preventNonNumericalInput(event)"> <label for="num_outputs_parallel"><small>(in parallel)</small></label></td></tr>
|
<tr class="pl-5"><td><label for="num_outputs_total">Number of Images:</label></td><td><input id="num_outputs_total" name="num_outputs_total" value="1" size="1" onkeypress="preventNonNumericalInput(event)"> <label><small>(total)</small></label> <input id="num_outputs_parallel" name="num_outputs_parallel" value="1" size="1" onkeypress="preventNonNumericalInput(event)"> <label for="num_outputs_parallel"><small>(in parallel)</small></label></td></tr>
|
||||||
<tr class="pl-5"><td><label for="stable_diffusion_model">Model:</label></td><td>
|
<tr class="pl-5"><td><label for="stable_diffusion_model">Model:</label></td><td>
|
||||||
<select id="stable_diffusion_model" name="stable_diffusion_model">
|
<select id="stable_diffusion_model" name="stable_diffusion_model">
|
||||||
@ -183,11 +199,14 @@
|
|||||||
<option value="png">png</option>
|
<option value="png">png</option>
|
||||||
</select>
|
</select>
|
||||||
</td></tr>
|
</td></tr>
|
||||||
|
<tr class="pl-5" id="output_quality_row"><td><label for="output_quality">JPEG Quality:</label></td><td>
|
||||||
|
<input id="output_quality_slider" name="output_quality" class="editor-slider" value="75" type="range" min="10" max="95"> <input id="output_quality" name="output_quality" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)">
|
||||||
|
</td></tr>
|
||||||
</table></div>
|
</table></div>
|
||||||
|
|
||||||
<div><ul>
|
<div><ul>
|
||||||
<li><b class="settings-subheader">Render Settings</b></li>
|
<li><b class="settings-subheader">Render Settings</b></li>
|
||||||
<li class="pl-5"><input id="stream_image_progress" name="stream_image_progress" type="checkbox"> <label for="stream_image_progress">Show a live preview <small>(uses more VRAM, and slower image creation)</small></label></li>
|
<li class="pl-5"><input id="stream_image_progress" name="stream_image_progress" type="checkbox"> <label for="stream_image_progress">Show a live preview <small>(uses more VRAM, slower images)</small></label></li>
|
||||||
<li class="pl-5"><input id="use_face_correction" name="use_face_correction" type="checkbox"> <label for="use_face_correction">Fix incorrect faces and eyes <small>(uses GFPGAN)</small></label></li>
|
<li class="pl-5"><input id="use_face_correction" name="use_face_correction" type="checkbox"> <label for="use_face_correction">Fix incorrect faces and eyes <small>(uses GFPGAN)</small></label></li>
|
||||||
<li class="pl-5">
|
<li class="pl-5">
|
||||||
<input id="use_upscale" name="use_upscale" type="checkbox"> <label for="use_upscale">Upscale image by 4x with </label>
|
<input id="use_upscale" name="use_upscale" type="checkbox"> <label for="use_upscale">Upscale image by 4x with </label>
|
||||||
@ -247,8 +266,17 @@
|
|||||||
<br/><br/>
|
<br/><br/>
|
||||||
<div>
|
<div>
|
||||||
<h3><i class="fa fa-microchip icon"></i> System Info</h3>
|
<h3><i class="fa fa-microchip icon"></i> System Info</h3>
|
||||||
<div id="system-info"></div>
|
<div id="system-info">
|
||||||
|
<table>
|
||||||
|
<tr><td><label>Processor:</label></td><td id="system-info-cpu" class="value"></td></tr>
|
||||||
|
<tr><td><label>Compatible Graphics Cards (all):</label></td><td id="system-info-gpus-all" class="value"></td></tr>
|
||||||
|
<tr><td></td><td> </td></tr>
|
||||||
|
<tr><td><label>Used for rendering 🔥:</label></td><td id="system-info-rendering-devices" class="value"></td></tr>
|
||||||
|
<tr><td><label>Server Addresses <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip right">You can access Stable Diffusion UI from other devices using these addresses</span></i> :</label></td><td id="system-info-server-hosts" class="value"></td></tr>
|
||||||
|
</table>
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
<div id="tab-content-about" class="tab-content">
|
<div id="tab-content-about" class="tab-content">
|
||||||
@ -314,6 +342,38 @@
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
<div id="image-editor" class="popup image-editor-popup">
|
||||||
|
<div>
|
||||||
|
<i class="close-button fa-solid fa-xmark"></i>
|
||||||
|
<h1>Image Editor</h1>
|
||||||
|
<div class="flex-container">
|
||||||
|
<div class="editor-controls-left"></div>
|
||||||
|
<div class="editor-controls-center">
|
||||||
|
<div></div>
|
||||||
|
</div>
|
||||||
|
<div class="editor-controls-right">
|
||||||
|
<div></div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div id="image-inpainter" class="popup image-editor-popup">
|
||||||
|
<div>
|
||||||
|
<i class="close-button fa-solid fa-xmark"></i>
|
||||||
|
<h1>Inpainter</h1>
|
||||||
|
<div class="flex-container">
|
||||||
|
<div class="editor-controls-left"></div>
|
||||||
|
<div class="editor-controls-center">
|
||||||
|
<div></div>
|
||||||
|
</div>
|
||||||
|
<div class="editor-controls-right">
|
||||||
|
<div></div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
<div id="footer-spacer"></div>
|
<div id="footer-spacer"></div>
|
||||||
<div id="footer">
|
<div id="footer">
|
||||||
<div class="line-separator"> </div>
|
<div class="line-separator"> </div>
|
||||||
@ -327,25 +387,24 @@
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</body>
|
</body>
|
||||||
|
|
||||||
<script src="media/js/utils.js"></script>
|
<script src="media/js/utils.js"></script>
|
||||||
<script src="media/js/parameters.js"></script>
|
<script src="media/js/parameters.js"></script>
|
||||||
<script src="media/js/plugins.js"></script>
|
<script src="media/js/plugins.js"></script>
|
||||||
<script src="media/js/inpainting-editor.js"></script>
|
|
||||||
<script src="media/js/image-modifiers.js"></script>
|
<script src="media/js/image-modifiers.js"></script>
|
||||||
<script src="media/js/auto-save.js"></script>
|
<script src="media/js/auto-save.js"></script>
|
||||||
<script src="media/js/main.js"></script>
|
<script src="media/js/main.js"></script>
|
||||||
<script src="media/js/themes.js"></script>
|
<script src="media/js/themes.js"></script>
|
||||||
<script src="media/js/dnd.js"></script>
|
<script src="media/js/dnd.js"></script>
|
||||||
|
<script src="media/js/image-editor.js"></script>
|
||||||
<script>
|
<script>
|
||||||
async function init() {
|
async function init() {
|
||||||
await initSettings()
|
await initSettings()
|
||||||
await getModels()
|
await getModels()
|
||||||
await getDiskPath()
|
await getDiskPath()
|
||||||
await getAppConfig()
|
await getAppConfig()
|
||||||
await loadModifiers()
|
|
||||||
await loadUIPlugins()
|
await loadUIPlugins()
|
||||||
await getDevices()
|
await loadModifiers()
|
||||||
|
await getSystemInfo()
|
||||||
|
|
||||||
setInterval(healthCheck, HEALTH_PING_INTERVAL * 1000)
|
setInterval(healthCheck, HEALTH_PING_INTERVAL * 1000)
|
||||||
healthCheck()
|
healthCheck()
|
||||||
|
5
ui/media/css/drawingboard.min.css
vendored
5
ui/media/css/drawingboard.min.css
vendored
File diff suppressed because one or more lines are too long
211
ui/media/css/image-editor.css
Normal file
211
ui/media/css/image-editor.css
Normal file
@ -0,0 +1,211 @@
|
|||||||
|
.editor-controls-left {
|
||||||
|
padding-left: 32px;
|
||||||
|
text-align: left;
|
||||||
|
padding-bottom: 20px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.editor-options-container {
|
||||||
|
display: flex;
|
||||||
|
row-gap: 10px;
|
||||||
|
max-width: 210px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.editor-options-container > * {
|
||||||
|
flex: 1;
|
||||||
|
display: flex;
|
||||||
|
justify-content: center;
|
||||||
|
align-items: center;
|
||||||
|
}
|
||||||
|
|
||||||
|
.editor-options-container > * > * {
|
||||||
|
position: inherit;
|
||||||
|
width: 32px;
|
||||||
|
height: 32px;
|
||||||
|
border-radius: 16px;
|
||||||
|
background: var(--background-color3);
|
||||||
|
cursor: pointer;
|
||||||
|
transition: opacity 0.25s;
|
||||||
|
}
|
||||||
|
.editor-options-container > * > *:hover {
|
||||||
|
opacity: 0.75;
|
||||||
|
}
|
||||||
|
|
||||||
|
.editor-options-container > * > *.active {
|
||||||
|
border: 2px solid #3584e4;
|
||||||
|
}
|
||||||
|
|
||||||
|
.image_editor_opacity .editor-options-container > * > *:not(.active) {
|
||||||
|
border: 1px solid var(--background-color3);
|
||||||
|
}
|
||||||
|
|
||||||
|
.image_editor_color .editor-options-container {
|
||||||
|
flex-wrap: wrap;
|
||||||
|
}
|
||||||
|
.image_editor_color .editor-options-container > * {
|
||||||
|
flex: 20%;
|
||||||
|
}
|
||||||
|
.image_editor_color .editor-options-container > * > * {
|
||||||
|
position: relative;
|
||||||
|
}
|
||||||
|
.image_editor_color .editor-options-container > * > *.active::before {
|
||||||
|
content: "\f00c";
|
||||||
|
display: var(--fa-display,inline-block);
|
||||||
|
font-style: normal;
|
||||||
|
font-variant: normal;
|
||||||
|
line-height: 1;
|
||||||
|
text-rendering: auto;
|
||||||
|
font-family: var(--fa-style-family, "Font Awesome 6 Free");
|
||||||
|
font-weight: var(--fa-style, 900);
|
||||||
|
position: absolute;
|
||||||
|
left: 50%;
|
||||||
|
top: 50%;
|
||||||
|
transform: translate(-50%, -50%) scale(125%);
|
||||||
|
color: black;
|
||||||
|
}
|
||||||
|
.image_editor_color .editor-options-container > *:first-child {
|
||||||
|
flex: 100%;
|
||||||
|
}
|
||||||
|
.image_editor_color .editor-options-container > *:first-child > * {
|
||||||
|
width: 100%;
|
||||||
|
}
|
||||||
|
.image_editor_color .editor-options-container > *:first-child > * > input {
|
||||||
|
width: 100%;
|
||||||
|
height: 100%;
|
||||||
|
opacity: 0;
|
||||||
|
cursor: pointer;
|
||||||
|
}
|
||||||
|
.image_editor_color .editor-options-container > *:first-child > * > span {
|
||||||
|
position: absolute;
|
||||||
|
left: 50%;
|
||||||
|
top: 50%;
|
||||||
|
transform: translate(-50%, -50%);
|
||||||
|
opacity: 0.5;
|
||||||
|
}
|
||||||
|
.image_editor_color .editor-options-container > *:first-child > *.active > span {
|
||||||
|
opacity: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
.image_editor_tool .editor-options-container {
|
||||||
|
flex-wrap: wrap;
|
||||||
|
}
|
||||||
|
|
||||||
|
.image_editor_tool .editor-options-container > * {
|
||||||
|
padding: 2px;
|
||||||
|
flex: 50%;
|
||||||
|
}
|
||||||
|
|
||||||
|
.editor-controls-center {
|
||||||
|
/* background: var(--background-color2); */
|
||||||
|
flex: 1;
|
||||||
|
display: flex;
|
||||||
|
justify-content: center;
|
||||||
|
align-items: center;
|
||||||
|
}
|
||||||
|
|
||||||
|
.editor-controls-center > div {
|
||||||
|
position: relative;
|
||||||
|
background: black;
|
||||||
|
}
|
||||||
|
|
||||||
|
.editor-controls-center canvas {
|
||||||
|
position: absolute;
|
||||||
|
left: 0;
|
||||||
|
top: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
.editor-controls-right {
|
||||||
|
padding: 32px;
|
||||||
|
display: flex;
|
||||||
|
flex-direction: column;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
.editor-controls-right > div:last-child {
|
||||||
|
flex: 1;
|
||||||
|
display: flex;
|
||||||
|
flex-direction: column;
|
||||||
|
min-width: 200px;
|
||||||
|
gap: 5px;
|
||||||
|
justify-content: end;
|
||||||
|
}
|
||||||
|
|
||||||
|
.image-editor-button {
|
||||||
|
width: 100%;
|
||||||
|
height: 32px;
|
||||||
|
border-radius: 16px;
|
||||||
|
background: var(--background-color3);
|
||||||
|
}
|
||||||
|
|
||||||
|
#init_image_button_inpaint .input-toggle {
|
||||||
|
position: absolute;
|
||||||
|
left: 16px;
|
||||||
|
}
|
||||||
|
|
||||||
|
#init_image_button_inpaint .input-toggle input:not(:checked) ~ label {
|
||||||
|
pointer-events: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
.image-editor-popup {
|
||||||
|
--popup-margin: 16px;
|
||||||
|
--popup-padding: 24px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.image-editor-popup > div {
|
||||||
|
margin: var(--popup-margin);
|
||||||
|
padding: var(--popup-padding);
|
||||||
|
min-height: calc(100vh - (2 * var(--popup-margin)));
|
||||||
|
max-width: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
.image-editor-popup h1 {
|
||||||
|
position: absolute;
|
||||||
|
top: 32px;
|
||||||
|
left: 50%;
|
||||||
|
transform: translateX(-50%);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@media screen and (max-width: 700px) {
|
||||||
|
.image-editor-popup > div {
|
||||||
|
margin: 0px;
|
||||||
|
padding: 0px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.image-editor-popup h1 {
|
||||||
|
position: relative;
|
||||||
|
transform: none;
|
||||||
|
left: auto;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
.image-editor-popup > div > div {
|
||||||
|
min-height: calc(100vh - (2 * var(--popup-margin)) - (2 * var(--popup-padding)));
|
||||||
|
}
|
||||||
|
|
||||||
|
.inpainter .image_editor_color {
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
.inpainter .editor-canvas-background {
|
||||||
|
opacity: 0.75;
|
||||||
|
}
|
||||||
|
|
||||||
|
#init_image_preview_container .button {
|
||||||
|
display: flex;
|
||||||
|
padding: 6px;
|
||||||
|
height: 24px;
|
||||||
|
box-shadow: 2px 2px 1px 1px #00000088;
|
||||||
|
}
|
||||||
|
|
||||||
|
#init_image_preview_container .button:hover {
|
||||||
|
background: var(--background-color4)
|
||||||
|
}
|
||||||
|
|
||||||
|
.image-editor-popup .button {
|
||||||
|
display: flex;
|
||||||
|
}
|
||||||
|
.image-editor-popup h4 {
|
||||||
|
text-align: left;
|
||||||
|
}
|
9
ui/media/css/jquery-confirm.min.css
vendored
Normal file
9
ui/media/css/jquery-confirm.min.css
vendored
Normal file
File diff suppressed because one or more lines are too long
@ -44,9 +44,6 @@ code {
|
|||||||
margin-top: 5px;
|
margin-top: 5px;
|
||||||
display: block;
|
display: block;
|
||||||
}
|
}
|
||||||
.image_preview_container {
|
|
||||||
margin-top: 10pt;
|
|
||||||
}
|
|
||||||
.image_clear_btn {
|
.image_clear_btn {
|
||||||
position: absolute;
|
position: absolute;
|
||||||
transform: translate(30%, -30%);
|
transform: translate(30%, -30%);
|
||||||
@ -64,6 +61,11 @@ code {
|
|||||||
top: 0px;
|
top: 0px;
|
||||||
right: 0px;
|
right: 0px;
|
||||||
}
|
}
|
||||||
|
.image_clear_btn:active {
|
||||||
|
position: absolute;
|
||||||
|
top: 0px;
|
||||||
|
left: auto;
|
||||||
|
}
|
||||||
.settings-box ul {
|
.settings-box ul {
|
||||||
font-size: 9pt;
|
font-size: 9pt;
|
||||||
margin-bottom: 5px;
|
margin-bottom: 5px;
|
||||||
@ -210,7 +212,7 @@ code {
|
|||||||
}
|
}
|
||||||
.collapsible-content {
|
.collapsible-content {
|
||||||
display: block;
|
display: block;
|
||||||
padding-left: 15px;
|
padding-left: 10px;
|
||||||
}
|
}
|
||||||
.collapsible-content h5 {
|
.collapsible-content h5 {
|
||||||
padding: 5pt 0pt;
|
padding: 5pt 0pt;
|
||||||
@ -270,32 +272,6 @@ img {
|
|||||||
transform: translateY(25%);
|
transform: translateY(25%);
|
||||||
}
|
}
|
||||||
|
|
||||||
#inpaintingEditor {
|
|
||||||
width: 300pt;
|
|
||||||
height: 300pt;
|
|
||||||
margin-top: 5pt;
|
|
||||||
}
|
|
||||||
.drawing-board-canvas-wrapper {
|
|
||||||
background-size: 100% 100%;
|
|
||||||
}
|
|
||||||
.drawing-board-controls {
|
|
||||||
min-width: 273px;
|
|
||||||
}
|
|
||||||
.drawing-board-control > button {
|
|
||||||
background-color: #eee;
|
|
||||||
border-radius: 3pt;
|
|
||||||
}
|
|
||||||
.drawing-board-control-inner {
|
|
||||||
background-color: #eee;
|
|
||||||
border-radius: 3pt;
|
|
||||||
}
|
|
||||||
#inpaintingEditor canvas {
|
|
||||||
opacity: 0.6;
|
|
||||||
}
|
|
||||||
#enable_mask {
|
|
||||||
margin-top: 8pt;
|
|
||||||
}
|
|
||||||
|
|
||||||
#top-nav {
|
#top-nav {
|
||||||
position: relative;
|
position: relative;
|
||||||
background: var(--background-color4);
|
background: var(--background-color4);
|
||||||
@ -479,8 +455,58 @@ img {
|
|||||||
#prompt_from_file {
|
#prompt_from_file {
|
||||||
display: none;
|
display: none;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#init_image_preview_container {
|
||||||
|
display: flex;
|
||||||
|
margin-top: 6px;
|
||||||
|
margin-bottom: 8px;
|
||||||
|
}
|
||||||
|
|
||||||
|
#init_image_preview_container:not(.has-image) #init_image_wrapper,
|
||||||
|
#init_image_preview_container:not(.has-image) #inpaint_button_container {
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#init_image_buttons {
|
||||||
|
display: flex;
|
||||||
|
gap: 8px;
|
||||||
|
}
|
||||||
|
|
||||||
|
#init_image_preview_container.has-image #init_image_buttons {
|
||||||
|
flex-direction: column;
|
||||||
|
padding-left: 8px;
|
||||||
|
}
|
||||||
|
|
||||||
|
#init_image_buttons .button {
|
||||||
|
position: relative;
|
||||||
|
height: 32px;
|
||||||
|
width: 150px;
|
||||||
|
}
|
||||||
|
|
||||||
|
#init_image_buttons .button > input {
|
||||||
|
position: absolute;
|
||||||
|
left: 0;
|
||||||
|
top: 0;
|
||||||
|
right: 0;
|
||||||
|
bottom: 0;
|
||||||
|
opacity: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
#inpaint_button_container {
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
gap: 8px;
|
||||||
|
}
|
||||||
|
|
||||||
|
#init_image_wrapper {
|
||||||
|
grid-row: span 3;
|
||||||
|
position: relative;
|
||||||
|
width: fit-content;
|
||||||
|
max-height: 150px;
|
||||||
|
}
|
||||||
|
|
||||||
#init_image_preview {
|
#init_image_preview {
|
||||||
max-width: 150px;
|
|
||||||
max-height: 150px;
|
max-height: 150px;
|
||||||
height: 100%;
|
height: 100%;
|
||||||
width: 100%;
|
width: 100%;
|
||||||
@ -488,23 +514,18 @@ img {
|
|||||||
border-radius: 6px;
|
border-radius: 6px;
|
||||||
transition: all 1s ease-in-out;
|
transition: all 1s ease-in-out;
|
||||||
}
|
}
|
||||||
|
/*
|
||||||
#init_image_preview:hover {
|
#init_image_preview:hover {
|
||||||
max-width: 500px;
|
max-width: 500px;
|
||||||
max-height: 1000px;
|
max-height: 1000px;
|
||||||
|
|
||||||
transition: all 1s 0.5s ease-in-out;
|
transition: all 1s 0.5s ease-in-out;
|
||||||
}
|
} */
|
||||||
|
|
||||||
#init_image_wrapper {
|
|
||||||
position: relative;
|
|
||||||
width: fit-content;
|
|
||||||
}
|
|
||||||
|
|
||||||
#init_image_size_box {
|
#init_image_size_box {
|
||||||
position: absolute;
|
position: absolute;
|
||||||
right: 0px;
|
right: 0px;
|
||||||
bottom: 3px;
|
bottom: 0px;
|
||||||
padding: 3px;
|
padding: 3px;
|
||||||
background: black;
|
background: black;
|
||||||
color: white;
|
color: white;
|
||||||
@ -556,6 +577,10 @@ option {
|
|||||||
cursor: pointer;
|
cursor: pointer;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
input[type="file"] * {
|
||||||
|
cursor: pointer;
|
||||||
|
}
|
||||||
|
|
||||||
input,
|
input,
|
||||||
select,
|
select,
|
||||||
textarea {
|
textarea {
|
||||||
@ -594,12 +619,26 @@ input[type="file"] {
|
|||||||
}
|
}
|
||||||
|
|
||||||
button,
|
button,
|
||||||
input::file-selector-button {
|
input::file-selector-button,
|
||||||
|
.button {
|
||||||
padding: 2px 4px;
|
padding: 2px 4px;
|
||||||
border-radius: 4px;
|
border-radius: var(--input-border-radius);
|
||||||
background: var(--button-color);
|
background: var(--button-color);
|
||||||
color: var(--button-text-color);
|
color: var(--button-text-color);
|
||||||
border: var(--button-border);
|
border: var(--button-border);
|
||||||
|
align-items: center;
|
||||||
|
justify-content: center;
|
||||||
|
cursor: pointer;
|
||||||
|
}
|
||||||
|
|
||||||
|
.button i {
|
||||||
|
margin-right: 8px;
|
||||||
|
}
|
||||||
|
|
||||||
|
button:hover,
|
||||||
|
.button:hover {
|
||||||
|
transition-duration: 0.1s;
|
||||||
|
background: hsl(var(--accent-hue), 100%, calc(var(--accent-lightness) + 6%));
|
||||||
}
|
}
|
||||||
|
|
||||||
input::file-selector-button {
|
input::file-selector-button {
|
||||||
@ -658,11 +697,15 @@ input::file-selector-button {
|
|||||||
opacity: 1;
|
opacity: 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* MOBILE SUPPORT */
|
/* Small screens */
|
||||||
@media screen and (max-width: 700px) {
|
@media screen and (max-width: 1265px) {
|
||||||
#top-nav {
|
#top-nav {
|
||||||
flex-direction: column;
|
flex-direction: column;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* MOBILE SUPPORT */
|
||||||
|
@media screen and (max-width: 700px) {
|
||||||
body {
|
body {
|
||||||
margin: 0px;
|
margin: 0px;
|
||||||
}
|
}
|
||||||
@ -712,7 +755,7 @@ input::file-selector-button {
|
|||||||
padding-right: 0px;
|
padding-right: 0px;
|
||||||
}
|
}
|
||||||
#server-status {
|
#server-status {
|
||||||
display: none;
|
top: 75%;
|
||||||
}
|
}
|
||||||
.popup > div {
|
.popup > div {
|
||||||
padding-left: 5px !important;
|
padding-left: 5px !important;
|
||||||
@ -730,6 +773,15 @@ input::file-selector-button {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@media screen and (max-width: 500px) {
|
||||||
|
#server-status #server-status-msg {
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
#server-status:hover #server-status-msg {
|
||||||
|
display: inline;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@media (min-width: 700px) {
|
@media (min-width: 700px) {
|
||||||
/* #editor {
|
/* #editor {
|
||||||
max-width: 480px;
|
max-width: 480px;
|
||||||
@ -750,6 +802,8 @@ input::file-selector-button {
|
|||||||
|
|
||||||
#promptsFromFileBtn {
|
#promptsFromFileBtn {
|
||||||
font-size: 9pt;
|
font-size: 9pt;
|
||||||
|
display: inline;
|
||||||
|
background-color: var(--accent-color);
|
||||||
}
|
}
|
||||||
|
|
||||||
.section-button {
|
.section-button {
|
||||||
@ -951,8 +1005,8 @@ input::file-selector-button {
|
|||||||
display: none;
|
display: none;
|
||||||
}
|
}
|
||||||
|
|
||||||
#tab-content-wrapper {
|
#tab-content-wrapper > * {
|
||||||
border-top: 8px solid var(--background-color1);
|
padding-top: 8px;
|
||||||
}
|
}
|
||||||
|
|
||||||
.tab-content-inner {
|
.tab-content-inner {
|
||||||
@ -989,16 +1043,21 @@ i.active {
|
|||||||
float: right;
|
float: right;
|
||||||
font-weight: bold;
|
font-weight: bold;
|
||||||
}
|
}
|
||||||
button:hover {
|
|
||||||
transition-duration: 0.1s;
|
|
||||||
background: hsl(var(--accent-hue), 100%, calc(var(--accent-lightness) + 6%));
|
|
||||||
}
|
|
||||||
|
|
||||||
button:active {
|
button:active {
|
||||||
transition-duration: 0.1s;
|
transition-duration: 0.1s;
|
||||||
background-color: hsl(var(--accent-hue), 100%, calc(var(--accent-lightness) + 24%));
|
background-color: hsl(var(--accent-hue), 100%, calc(var(--accent-lightness) + 24%));
|
||||||
|
position: relative;
|
||||||
|
top: 1px;
|
||||||
|
left: 1px;
|
||||||
}
|
}
|
||||||
|
|
||||||
button#save-system-settings-btn {
|
button#save-system-settings-btn {
|
||||||
padding: 4pt 8pt;
|
padding: 4pt 8pt;
|
||||||
}
|
}
|
||||||
|
#ip-info a {
|
||||||
|
color:var(--text-color)
|
||||||
|
}
|
||||||
|
#ip-info div {
|
||||||
|
line-height: 200%;
|
||||||
|
}
|
||||||
|
@ -19,7 +19,7 @@
|
|||||||
--input-border-color: var(--background-color4);
|
--input-border-color: var(--background-color4);
|
||||||
|
|
||||||
--button-text-color: var(--input-text-color);
|
--button-text-color: var(--input-text-color);
|
||||||
--button-color: var(--accent-color);
|
--button-color: var(--input-background-color);
|
||||||
--button-border: none;
|
--button-border: none;
|
||||||
|
|
||||||
/* other */
|
/* other */
|
||||||
@ -30,6 +30,9 @@
|
|||||||
--primary-button-border: none;
|
--primary-button-border: none;
|
||||||
--input-switch-padding: 1px;
|
--input-switch-padding: 1px;
|
||||||
--input-height: 18px;
|
--input-height: 18px;
|
||||||
|
|
||||||
|
/* Main theme color, hex color fallback. */
|
||||||
|
--theme-color-fallback: #673AB6;
|
||||||
}
|
}
|
||||||
|
|
||||||
.theme-light {
|
.theme-light {
|
||||||
@ -39,11 +42,12 @@
|
|||||||
--background-color4: #cccccc;
|
--background-color4: #cccccc;
|
||||||
|
|
||||||
--text-color: black;
|
--text-color: black;
|
||||||
--button-text-color: white;
|
|
||||||
|
|
||||||
--input-text-color: black;
|
--input-text-color: black;
|
||||||
--input-background-color: #f8f9fa;
|
--input-background-color: #f8f9fa;
|
||||||
--input-border-color: grey;
|
--input-border-color: grey;
|
||||||
|
|
||||||
|
--theme-color-fallback: #aaaaaa;
|
||||||
}
|
}
|
||||||
|
|
||||||
.theme-discord {
|
.theme-discord {
|
||||||
@ -58,6 +62,8 @@
|
|||||||
--input-border-size: 2px;
|
--input-border-size: 2px;
|
||||||
--input-background-color: #202225;
|
--input-background-color: #202225;
|
||||||
--input-border-color: var(--input-background-color);
|
--input-border-color: var(--input-background-color);
|
||||||
|
|
||||||
|
--theme-color-fallback: #202225;
|
||||||
}
|
}
|
||||||
|
|
||||||
.theme-cool-blue {
|
.theme-cool-blue {
|
||||||
@ -71,8 +77,10 @@
|
|||||||
--background-color4: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) - (3 * var(--value-step))));
|
--background-color4: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) - (3 * var(--value-step))));
|
||||||
|
|
||||||
--input-background-color: var(--background-color3);
|
--input-background-color: var(--background-color3);
|
||||||
|
|
||||||
--accent-hue: 212;
|
--accent-hue: 212;
|
||||||
|
|
||||||
|
--theme-color-fallback: #0056b8;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -87,6 +95,8 @@
|
|||||||
--background-color4: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) - (3 * var(--value-step))));
|
--background-color4: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) - (3 * var(--value-step))));
|
||||||
|
|
||||||
--input-background-color: var(--background-color3);
|
--input-background-color: var(--background-color3);
|
||||||
|
|
||||||
|
--theme-color-fallback: #5300b8;
|
||||||
}
|
}
|
||||||
|
|
||||||
.theme-super-dark {
|
.theme-super-dark {
|
||||||
@ -101,6 +111,8 @@
|
|||||||
|
|
||||||
--input-background-color: var(--background-color3);
|
--input-background-color: var(--background-color3);
|
||||||
--input-border-size: 0px;
|
--input-border-size: 0px;
|
||||||
|
|
||||||
|
--theme-color-fallback: #000000;
|
||||||
}
|
}
|
||||||
|
|
||||||
.theme-wild {
|
.theme-wild {
|
||||||
@ -117,10 +129,11 @@
|
|||||||
|
|
||||||
--input-border-size: 1px;
|
--input-border-size: 1px;
|
||||||
--input-background-color: hsl(222, var(--main-saturation), calc(var(--value-base) - (2 * var(--value-step))));
|
--input-background-color: hsl(222, var(--main-saturation), calc(var(--value-base) - (2 * var(--value-step))));
|
||||||
--input-text-color: red;
|
--input-text-color: #FF0000;
|
||||||
--input-border-color: green;
|
--input-border-color: #005E05;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
.theme-gnomie {
|
.theme-gnomie {
|
||||||
--background-color1: #242424;
|
--background-color1: #242424;
|
||||||
--background-color2: #353535;
|
--background-color2: #353535;
|
||||||
@ -136,11 +149,12 @@
|
|||||||
--input-background-color: #2a2a2a;
|
--input-background-color: #2a2a2a;
|
||||||
--input-border-size: 0px;
|
--input-border-size: 0px;
|
||||||
--input-border-color: var(--input-background-color);
|
--input-border-color: var(--input-background-color);
|
||||||
|
|
||||||
|
--theme-color-fallback: #2168bf;
|
||||||
}
|
}
|
||||||
|
|
||||||
.theme-gnomie .panel-box {
|
.theme-gnomie .panel-box {
|
||||||
border: none;
|
border: none;
|
||||||
box-shadow: 0px 1px 2px rgba(0, 0, 0, 0.25);
|
box-shadow: 0px 1px 2px rgba(0, 0, 0, 0.25);
|
||||||
border-radius: 10px;
|
border-radius: 10px;
|
||||||
}
|
}
|
||||||
|
|
BIN
ui/media/images/fa-eraser.png
Normal file
BIN
ui/media/images/fa-eraser.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 11 KiB |
BIN
ui/media/images/fa-eye-dropper.png
Normal file
BIN
ui/media/images/fa-eye-dropper.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 12 KiB |
BIN
ui/media/images/fa-pencil.png
Normal file
BIN
ui/media/images/fa-pencil.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 10 KiB |
@ -21,6 +21,7 @@ const SETTINGS_IDS_LIST = [
|
|||||||
"guidance_scale",
|
"guidance_scale",
|
||||||
"prompt_strength",
|
"prompt_strength",
|
||||||
"output_format",
|
"output_format",
|
||||||
|
"output_quality",
|
||||||
"negative_prompt",
|
"negative_prompt",
|
||||||
"stream_image_progress",
|
"stream_image_progress",
|
||||||
"use_face_correction",
|
"use_face_correction",
|
||||||
@ -35,6 +36,7 @@ const SETTINGS_IDS_LIST = [
|
|||||||
"sound_toggle",
|
"sound_toggle",
|
||||||
"turbo",
|
"turbo",
|
||||||
"use_full_precision",
|
"use_full_precision",
|
||||||
|
"confirm_dangerous_actions",
|
||||||
"auto_save_settings"
|
"auto_save_settings"
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -55,6 +57,9 @@ async function initSettings() {
|
|||||||
if (!element) {
|
if (!element) {
|
||||||
console.error(`Missing settings element ${id}`)
|
console.error(`Missing settings element ${id}`)
|
||||||
}
|
}
|
||||||
|
if (id in SETTINGS) { // don't create it again
|
||||||
|
return
|
||||||
|
}
|
||||||
SETTINGS[id] = {
|
SETTINGS[id] = {
|
||||||
key: id,
|
key: id,
|
||||||
element: element,
|
element: element,
|
||||||
|
@ -51,6 +51,13 @@ const TASK_MAPPING = {
|
|||||||
readUI: () => negativePromptField.value,
|
readUI: () => negativePromptField.value,
|
||||||
parse: (val) => val
|
parse: (val) => val
|
||||||
},
|
},
|
||||||
|
active_tags: { name: "Image Modifiers",
|
||||||
|
setUI: (active_tags) => {
|
||||||
|
refreshModifiersState(active_tags)
|
||||||
|
},
|
||||||
|
readUI: () => activeTags.map(x => x.name),
|
||||||
|
parse: (val) => val
|
||||||
|
},
|
||||||
width: { name: 'Width',
|
width: { name: 'Width',
|
||||||
setUI: (width) => {
|
setUI: (width) => {
|
||||||
const oldVal = widthField.value
|
const oldVal = widthField.value
|
||||||
@ -78,13 +85,14 @@ const TASK_MAPPING = {
|
|||||||
if (!seed) {
|
if (!seed) {
|
||||||
randomSeedField.checked = true
|
randomSeedField.checked = true
|
||||||
seedField.disabled = true
|
seedField.disabled = true
|
||||||
|
seedField.value = 0
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
randomSeedField.checked = false
|
randomSeedField.checked = false
|
||||||
seedField.disabled = false
|
seedField.disabled = false
|
||||||
seedField.value = seed
|
seedField.value = seed
|
||||||
},
|
},
|
||||||
readUI: () => (randomSeedField.checked ? Math.floor(Math.random() * 10000000) : parseInt(seedField.value)),
|
readUI: () => parseInt(seedField.value), // just return the value the user is seeing in the UI
|
||||||
parse: (val) => parseInt(val)
|
parse: (val) => parseInt(val)
|
||||||
},
|
},
|
||||||
num_inference_steps: { name: 'Steps',
|
num_inference_steps: { name: 'Steps',
|
||||||
@ -120,10 +128,12 @@ const TASK_MAPPING = {
|
|||||||
},
|
},
|
||||||
mask: { name: 'Mask',
|
mask: { name: 'Mask',
|
||||||
setUI: (mask) => {
|
setUI: (mask) => {
|
||||||
inpaintingEditor.setImg(mask)
|
setTimeout(() => { // add a delay to insure this happens AFTER the main image loads (which reloads the inpainter)
|
||||||
|
imageInpainter.setImg(mask)
|
||||||
|
}, 250)
|
||||||
maskSetting.checked = Boolean(mask)
|
maskSetting.checked = Boolean(mask)
|
||||||
},
|
},
|
||||||
readUI: () => (maskSetting.checked ? inpaintingEditor.getImg() : undefined),
|
readUI: () => (maskSetting.checked ? imageInpainter.getImg() : undefined),
|
||||||
parse: (val) => val
|
parse: (val) => val
|
||||||
},
|
},
|
||||||
|
|
||||||
@ -185,9 +195,9 @@ const TASK_MAPPING = {
|
|||||||
parse: (val) => val
|
parse: (val) => val
|
||||||
},
|
},
|
||||||
|
|
||||||
numOutputsParallel: { name: 'Parallel Images',
|
num_outputs: { name: 'Parallel Images',
|
||||||
setUI: (numOutputsParallel) => {
|
setUI: (num_outputs) => {
|
||||||
numOutputsParallelField.value = numOutputsParallel
|
numOutputsParallelField.value = num_outputs
|
||||||
},
|
},
|
||||||
readUI: () => parseInt(numOutputsParallelField.value),
|
readUI: () => parseInt(numOutputsParallelField.value),
|
||||||
parse: (val) => val
|
parse: (val) => val
|
||||||
@ -267,11 +277,6 @@ function restoreTaskToUI(task, fieldsToSkip) {
|
|||||||
// restore the original tag
|
// restore the original tag
|
||||||
promptField.value = task.reqBody.original_prompt || task.reqBody.prompt
|
promptField.value = task.reqBody.original_prompt || task.reqBody.prompt
|
||||||
|
|
||||||
// Restore modifiers
|
|
||||||
if (task.reqBody.active_tags) {
|
|
||||||
refreshModifiersState(task.reqBody.active_tags)
|
|
||||||
}
|
|
||||||
|
|
||||||
// properly reset checkboxes
|
// properly reset checkboxes
|
||||||
if (!('use_face_correction' in task.reqBody)) {
|
if (!('use_face_correction' in task.reqBody)) {
|
||||||
useFaceCorrectionField.checked = false
|
useFaceCorrectionField.checked = false
|
||||||
@ -287,18 +292,11 @@ function restoreTaskToUI(task, fieldsToSkip) {
|
|||||||
// Show the source picture if present
|
// Show the source picture if present
|
||||||
initImagePreview.src = (task.reqBody.init_image == undefined ? '' : task.reqBody.init_image)
|
initImagePreview.src = (task.reqBody.init_image == undefined ? '' : task.reqBody.init_image)
|
||||||
if (IMAGE_REGEX.test(initImagePreview.src)) {
|
if (IMAGE_REGEX.test(initImagePreview.src)) {
|
||||||
Boolean(task.reqBody.mask) ? inpaintingEditor.setImg(task.reqBody.mask) : inpaintingEditor.resetBackground()
|
if (Boolean(task.reqBody.mask)) {
|
||||||
initImagePreviewContainer.style.display = 'block'
|
setTimeout(() => { // add a delay to insure this happens AFTER the main image loads (which reloads the inpainter)
|
||||||
inpaintingEditorContainer.style.display = 'none'
|
imageInpainter.setImg(task.reqBody.mask)
|
||||||
promptStrengthContainer.style.display = 'table-row'
|
}, 250)
|
||||||
//samplerSelectionContainer.style.display = 'none'
|
}
|
||||||
// maskSetting.checked = false
|
|
||||||
inpaintingEditorContainer.style.display = maskSetting.checked ? 'block' : 'none'
|
|
||||||
} else {
|
|
||||||
initImagePreviewContainer.style.display = 'none'
|
|
||||||
// inpaintingEditorContainer.style.display = 'none'
|
|
||||||
promptStrengthContainer.style.display = 'none'
|
|
||||||
// maskSetting.style.display = 'none'
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
function readUI() {
|
function readUI() {
|
||||||
@ -326,6 +324,7 @@ function getModelPath(filename, extensions)
|
|||||||
filename = filename.slice(0, filename.length - ext.length)
|
filename = filename.slice(0, filename.length - ext.length)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
return filename
|
||||||
}
|
}
|
||||||
|
|
||||||
const TASK_TEXT_MAPPING = {
|
const TASK_TEXT_MAPPING = {
|
||||||
@ -406,7 +405,7 @@ async function parseContent(text) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async function readFile(file, i) {
|
async function readFile(file, i) {
|
||||||
console.log(`Event %o reading file[${i}]:${file.name}...`, e)
|
console.log(`Event %o reading file[${i}]:${file.name}...`)
|
||||||
const fileContent = (await file.text()).trim()
|
const fileContent = (await file.text()).trim()
|
||||||
return await parseContent(fileContent)
|
return await parseContent(fileContent)
|
||||||
}
|
}
|
||||||
|
4
ui/media/js/drawingboard.min.js
vendored
4
ui/media/js/drawingboard.min.js
vendored
File diff suppressed because one or more lines are too long
680
ui/media/js/image-editor.js
Normal file
680
ui/media/js/image-editor.js
Normal file
@ -0,0 +1,680 @@
|
|||||||
|
var editorControlsLeft = document.getElementById("image-editor-controls-left")
|
||||||
|
|
||||||
|
const IMAGE_EDITOR_MAX_SIZE = 800
|
||||||
|
|
||||||
|
const IMAGE_EDITOR_BUTTONS = [
|
||||||
|
{
|
||||||
|
name: "Cancel",
|
||||||
|
icon: "fa-regular fa-circle-xmark",
|
||||||
|
handler: editor => {
|
||||||
|
editor.hide()
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Save",
|
||||||
|
icon: "fa-solid fa-floppy-disk",
|
||||||
|
handler: editor => {
|
||||||
|
editor.saveImage()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
const defaultToolBegin = (editor, ctx, x, y, is_overlay = false) => {
|
||||||
|
ctx.beginPath()
|
||||||
|
ctx.moveTo(x, y)
|
||||||
|
}
|
||||||
|
const defaultToolMove = (editor, ctx, x, y, is_overlay = false) => {
|
||||||
|
ctx.lineTo(x, y)
|
||||||
|
if (is_overlay) {
|
||||||
|
ctx.clearRect(0, 0, editor.width, editor.height)
|
||||||
|
ctx.stroke()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
const defaultToolEnd = (editor, ctx, x, y, is_overlay = false) => {
|
||||||
|
ctx.stroke()
|
||||||
|
if (is_overlay) {
|
||||||
|
ctx.clearRect(0, 0, editor.width, editor.height)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const IMAGE_EDITOR_TOOLS = [
|
||||||
|
{
|
||||||
|
id: "draw",
|
||||||
|
name: "Draw",
|
||||||
|
icon: "fa-solid fa-pencil",
|
||||||
|
cursor: "url(/media/images/fa-pencil.png) 0 24, pointer",
|
||||||
|
begin: defaultToolBegin,
|
||||||
|
move: defaultToolMove,
|
||||||
|
end: defaultToolEnd
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "erase",
|
||||||
|
name: "Erase",
|
||||||
|
icon: "fa-solid fa-eraser",
|
||||||
|
cursor: "url(/media/images/fa-eraser.png) 0 18, pointer",
|
||||||
|
begin: defaultToolBegin,
|
||||||
|
move: (editor, ctx, x, y, is_overlay = false) => {
|
||||||
|
ctx.lineTo(x, y)
|
||||||
|
if (is_overlay) {
|
||||||
|
ctx.clearRect(0, 0, editor.width, editor.height)
|
||||||
|
ctx.globalCompositeOperation = "source-over"
|
||||||
|
ctx.globalAlpha = 1
|
||||||
|
ctx.filter = "none"
|
||||||
|
ctx.drawImage(editor.canvas_current, 0, 0)
|
||||||
|
editor.setBrush(editor.layers.overlay)
|
||||||
|
ctx.stroke()
|
||||||
|
editor.canvas_current.style.opacity = 0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
end: (editor, ctx, x, y, is_overlay = false) => {
|
||||||
|
ctx.stroke()
|
||||||
|
if (is_overlay) {
|
||||||
|
ctx.clearRect(0, 0, editor.width, editor.height)
|
||||||
|
editor.canvas_current.style.opacity = ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
setBrush: (editor, layer) => {
|
||||||
|
layer.ctx.globalCompositeOperation = "destination-out"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "colorpicker",
|
||||||
|
name: "Color Picker",
|
||||||
|
icon: "fa-solid fa-eye-dropper",
|
||||||
|
cursor: "url(/media/images/fa-eye-dropper.png) 0 24, pointer",
|
||||||
|
begin: (editor, ctx, x, y, is_overlay = false) => {
|
||||||
|
var img_rgb = editor.layers.background.ctx.getImageData(x, y, 1, 1).data
|
||||||
|
var drawn_rgb = editor.ctx_current.getImageData(x, y, 1, 1).data
|
||||||
|
var drawn_opacity = drawn_rgb[3] / 255
|
||||||
|
editor.custom_color_input.value = rgbToHex({
|
||||||
|
r: (drawn_rgb[0] * drawn_opacity) + (img_rgb[0] * (1 - drawn_opacity)),
|
||||||
|
g: (drawn_rgb[1] * drawn_opacity) + (img_rgb[1] * (1 - drawn_opacity)),
|
||||||
|
b: (drawn_rgb[2] * drawn_opacity) + (img_rgb[2] * (1 - drawn_opacity)),
|
||||||
|
})
|
||||||
|
editor.custom_color_input.dispatchEvent(new Event("change"))
|
||||||
|
},
|
||||||
|
move: (editor, ctx, x, y, is_overlay = false) => {},
|
||||||
|
end: (editor, ctx, x, y, is_overlay = false) => {}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
const IMAGE_EDITOR_ACTIONS = [
|
||||||
|
{
|
||||||
|
id: "clear",
|
||||||
|
name: "Clear",
|
||||||
|
icon: "fa-solid fa-xmark",
|
||||||
|
handler: (editor) => {
|
||||||
|
editor.ctx_current.clearRect(0, 0, editor.width, editor.height)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
var IMAGE_EDITOR_SECTIONS = [
|
||||||
|
{
|
||||||
|
name: "tool",
|
||||||
|
title: "Tool",
|
||||||
|
default: "draw",
|
||||||
|
options: Array.from(IMAGE_EDITOR_TOOLS.map(t => t.id)),
|
||||||
|
initElement: (element, option) => {
|
||||||
|
var tool_info = IMAGE_EDITOR_TOOLS.find(t => t.id == option)
|
||||||
|
element.className = "image-editor-button button"
|
||||||
|
var sub_element = document.createElement("div")
|
||||||
|
var icon = document.createElement("i")
|
||||||
|
tool_info.icon.split(" ").forEach(c => icon.classList.add(c))
|
||||||
|
sub_element.appendChild(icon)
|
||||||
|
sub_element.append(tool_info.name)
|
||||||
|
element.appendChild(sub_element)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "color",
|
||||||
|
title: "Color",
|
||||||
|
default: "#f1c232",
|
||||||
|
options: [
|
||||||
|
"custom",
|
||||||
|
"#ea9999", "#e06666", "#cc0000", "#990000", "#660000",
|
||||||
|
"#f9cb9c", "#f6b26b", "#e69138", "#b45f06", "#783f04",
|
||||||
|
"#ffe599", "#ffd966", "#f1c232", "#bf9000", "#7f6000",
|
||||||
|
"#b6d7a8", "#93c47d", "#6aa84f", "#38761d", "#274e13",
|
||||||
|
"#a4c2f4", "#6d9eeb", "#3c78d8", "#1155cc", "#1c4587",
|
||||||
|
"#b4a7d6", "#8e7cc3", "#674ea7", "#351c75", "#20124d",
|
||||||
|
"#d5a6bd", "#c27ba0", "#a64d79", "#741b47", "#4c1130",
|
||||||
|
"#ffffff", "#c0c0c0", "#838383", "#525252", "#000000",
|
||||||
|
],
|
||||||
|
initElement: (element, option) => {
|
||||||
|
if (option == "custom") {
|
||||||
|
var input = document.createElement("input")
|
||||||
|
input.type = "color"
|
||||||
|
element.appendChild(input)
|
||||||
|
var span = document.createElement("span")
|
||||||
|
span.textContent = "Custom"
|
||||||
|
span.onclick = function(e) {
|
||||||
|
input.click()
|
||||||
|
}
|
||||||
|
element.appendChild(span)
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
element.style.background = option
|
||||||
|
}
|
||||||
|
},
|
||||||
|
getCustom: editor => {
|
||||||
|
var input = editor.popup.querySelector(".image_editor_color input")
|
||||||
|
return input.value
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "brush_size",
|
||||||
|
title: "Brush Size",
|
||||||
|
default: 48,
|
||||||
|
options: [ 16, 24, 32, 48, 64 ],
|
||||||
|
initElement: (element, option) => {
|
||||||
|
element.parentElement.style.flex = option
|
||||||
|
element.style.width = option + "px"
|
||||||
|
element.style.height = option + "px"
|
||||||
|
element.style["border-radius"] = (option / 2).toFixed() + "px"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "opacity",
|
||||||
|
title: "Opacity",
|
||||||
|
default: 0,
|
||||||
|
options: [ 0, 0.2, 0.4, 0.6, 0.8 ],
|
||||||
|
initElement: (element, option) => {
|
||||||
|
element.style.background = `repeating-conic-gradient(rgba(0, 0, 0, ${option}) 0% 25%, rgba(255, 255, 255, ${option}) 0% 50%) 50% / 10px 10px`
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "sharpness",
|
||||||
|
title: "Sharpness",
|
||||||
|
default: 0,
|
||||||
|
options: [ 0, 0.05, 0.1, 0.2, 0.3 ],
|
||||||
|
initElement: (element, option) => {
|
||||||
|
var size = 32
|
||||||
|
var blur_amount = parseInt(option * size)
|
||||||
|
var sub_element = document.createElement("div")
|
||||||
|
sub_element.style.background = `var(--background-color3)`
|
||||||
|
sub_element.style.filter = `blur(${blur_amount}px)`
|
||||||
|
sub_element.style.width = `${size - 4}px`
|
||||||
|
sub_element.style.height = `${size - 4}px`
|
||||||
|
sub_element.style['border-radius'] = `${size}px`
|
||||||
|
element.style.background = "none"
|
||||||
|
element.appendChild(sub_element)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
class EditorHistory {
|
||||||
|
constructor(editor) {
|
||||||
|
this.editor = editor
|
||||||
|
this.events = [] // stack of all events (actions/edits)
|
||||||
|
this.current_edit = null
|
||||||
|
this.rewind_index = 0 // how many events back into the history we've rewound to. (current state is just after event at index 'length - this.rewind_index - 1')
|
||||||
|
}
|
||||||
|
push(event) {
|
||||||
|
// probably add something here eventually to save state every x events
|
||||||
|
if (this.rewind_index != 0) {
|
||||||
|
this.events = this.events.slice(0, 0 - this.rewind_index)
|
||||||
|
this.rewind_index = 0
|
||||||
|
}
|
||||||
|
var snapshot_frequency = 20 // (every x edits, take a snapshot of the current drawing state, for faster rewinding)
|
||||||
|
if (this.events.length > 0 && this.events.length % snapshot_frequency == 0) {
|
||||||
|
event.snapshot = this.editor.layers.drawing.ctx.getImageData(0, 0, this.editor.width, this.editor.height)
|
||||||
|
}
|
||||||
|
this.events.push(event)
|
||||||
|
}
|
||||||
|
pushAction(action) {
|
||||||
|
this.push({
|
||||||
|
type: "action",
|
||||||
|
id: action
|
||||||
|
});
|
||||||
|
}
|
||||||
|
editBegin(x, y) {
|
||||||
|
this.current_edit = {
|
||||||
|
type: "edit",
|
||||||
|
id: this.editor.getOptionValue("tool"),
|
||||||
|
options: Object.assign({}, this.editor.options),
|
||||||
|
points: [ { x: x, y: y } ]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
editMove(x, y) {
|
||||||
|
if (this.current_edit) {
|
||||||
|
this.current_edit.points.push({ x: x, y: y })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
editEnd(x, y) {
|
||||||
|
if (this.current_edit) {
|
||||||
|
this.push(this.current_edit)
|
||||||
|
this.current_edit = null
|
||||||
|
}
|
||||||
|
}
|
||||||
|
clear() {
|
||||||
|
this.events = []
|
||||||
|
}
|
||||||
|
undo() {
|
||||||
|
this.rewindTo(this.rewind_index + 1)
|
||||||
|
}
|
||||||
|
redo() {
|
||||||
|
this.rewindTo(this.rewind_index - 1)
|
||||||
|
}
|
||||||
|
rewindTo(new_rewind_index) {
|
||||||
|
if (new_rewind_index < 0 || new_rewind_index > this.events.length) {
|
||||||
|
return; // do nothing if target index is out of bounds
|
||||||
|
}
|
||||||
|
|
||||||
|
var ctx = this.editor.layers.drawing.ctx
|
||||||
|
ctx.clearRect(0, 0, this.editor.width, this.editor.height)
|
||||||
|
|
||||||
|
var target_index = this.events.length - 1 - new_rewind_index
|
||||||
|
var snapshot_index = target_index
|
||||||
|
while (snapshot_index > -1) {
|
||||||
|
if (this.events[snapshot_index].snapshot) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
snapshot_index--
|
||||||
|
}
|
||||||
|
|
||||||
|
if (snapshot_index != -1) {
|
||||||
|
ctx.putImageData(this.events[snapshot_index].snapshot, 0, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (var i = (snapshot_index + 1); i <= target_index; i++) {
|
||||||
|
var event = this.events[i]
|
||||||
|
if (event.type == "action") {
|
||||||
|
var action = IMAGE_EDITOR_ACTIONS.find(a => a.id == event.id)
|
||||||
|
action.handler(this.editor)
|
||||||
|
}
|
||||||
|
else if (event.type == "edit") {
|
||||||
|
var tool = IMAGE_EDITOR_TOOLS.find(t => t.id == event.id)
|
||||||
|
this.editor.setBrush(this.editor.layers.drawing, event.options)
|
||||||
|
|
||||||
|
var first_point = event.points[0]
|
||||||
|
tool.begin(this.editor, ctx, first_point.x, first_point.y)
|
||||||
|
for (var point_i = 1; point_i < event.points.length; point_i++) {
|
||||||
|
tool.move(this.editor, ctx, event.points[point_i].x, event.points[point_i].y)
|
||||||
|
}
|
||||||
|
var last_point = event.points[event.points.length - 1]
|
||||||
|
tool.end(this.editor, ctx, last_point.x, last_point.y)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// re-set brush to current settings
|
||||||
|
this.editor.setBrush(this.editor.layers.drawing)
|
||||||
|
|
||||||
|
this.rewind_index = new_rewind_index
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
class ImageEditor {
|
||||||
|
constructor(popup, inpainter = false) {
|
||||||
|
this.inpainter = inpainter
|
||||||
|
this.popup = popup
|
||||||
|
this.history = new EditorHistory(this)
|
||||||
|
if (inpainter) {
|
||||||
|
this.popup.classList.add("inpainter")
|
||||||
|
}
|
||||||
|
this.drawing = false
|
||||||
|
this.temp_previous_tool = null // used for the ctrl-colorpicker functionality
|
||||||
|
this.container = popup.querySelector(".editor-controls-center > div")
|
||||||
|
this.layers = {}
|
||||||
|
var layer_names = [
|
||||||
|
"background",
|
||||||
|
"drawing",
|
||||||
|
"overlay"
|
||||||
|
]
|
||||||
|
layer_names.forEach(name => {
|
||||||
|
let canvas = document.createElement("canvas")
|
||||||
|
canvas.className = `editor-canvas-${name}`
|
||||||
|
this.container.appendChild(canvas)
|
||||||
|
this.layers[name] = {
|
||||||
|
name: name,
|
||||||
|
canvas: canvas,
|
||||||
|
ctx: canvas.getContext("2d")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// add mouse handlers
|
||||||
|
this.container.addEventListener("mousedown", this.mouseHandler.bind(this))
|
||||||
|
this.container.addEventListener("mouseup", this.mouseHandler.bind(this))
|
||||||
|
this.container.addEventListener("mousemove", this.mouseHandler.bind(this))
|
||||||
|
this.container.addEventListener("mouseout", this.mouseHandler.bind(this))
|
||||||
|
this.container.addEventListener("mouseenter", this.mouseHandler.bind(this))
|
||||||
|
|
||||||
|
this.container.addEventListener("touchstart", this.mouseHandler.bind(this))
|
||||||
|
this.container.addEventListener("touchmove", this.mouseHandler.bind(this))
|
||||||
|
this.container.addEventListener("touchcancel", this.mouseHandler.bind(this))
|
||||||
|
this.container.addEventListener("touchend", this.mouseHandler.bind(this))
|
||||||
|
|
||||||
|
// initialize editor controls
|
||||||
|
this.options = {}
|
||||||
|
this.optionElements = {}
|
||||||
|
IMAGE_EDITOR_SECTIONS.forEach(section => {
|
||||||
|
section.id = `image_editor_${section.name}`
|
||||||
|
var sectionElement = document.createElement("div")
|
||||||
|
sectionElement.className = section.id
|
||||||
|
|
||||||
|
var title = document.createElement("h4")
|
||||||
|
title.innerText = section.title
|
||||||
|
sectionElement.appendChild(title)
|
||||||
|
|
||||||
|
var optionsContainer = document.createElement("div")
|
||||||
|
optionsContainer.classList.add("editor-options-container")
|
||||||
|
|
||||||
|
this.optionElements[section.name] = []
|
||||||
|
section.options.forEach((option, index) => {
|
||||||
|
var optionHolder = document.createElement("div")
|
||||||
|
var optionElement = document.createElement("div")
|
||||||
|
optionHolder.appendChild(optionElement)
|
||||||
|
section.initElement(optionElement, option)
|
||||||
|
optionElement.addEventListener("click", target => this.selectOption(section.name, index))
|
||||||
|
optionsContainer.appendChild(optionHolder)
|
||||||
|
this.optionElements[section.name].push(optionElement)
|
||||||
|
})
|
||||||
|
this.selectOption(section.name, section.options.indexOf(section.default))
|
||||||
|
|
||||||
|
sectionElement.appendChild(optionsContainer)
|
||||||
|
|
||||||
|
this.popup.querySelector(".editor-controls-left").appendChild(sectionElement)
|
||||||
|
})
|
||||||
|
|
||||||
|
this.custom_color_input = this.popup.querySelector(`input[type="color"]`)
|
||||||
|
this.custom_color_input.addEventListener("change", () => {
|
||||||
|
this.custom_color_input.parentElement.style.background = this.custom_color_input.value
|
||||||
|
this.selectOption("color", 0)
|
||||||
|
})
|
||||||
|
|
||||||
|
if (this.inpainter) {
|
||||||
|
this.selectOption("color", IMAGE_EDITOR_SECTIONS.find(s => s.name == "color").options.indexOf("#ffffff"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// initialize the right-side controls
|
||||||
|
var buttonContainer = document.createElement("div")
|
||||||
|
IMAGE_EDITOR_BUTTONS.forEach(button => {
|
||||||
|
var element = document.createElement("div")
|
||||||
|
var icon = document.createElement("i")
|
||||||
|
element.className = "image-editor-button button"
|
||||||
|
icon.className = button.icon
|
||||||
|
element.appendChild(icon)
|
||||||
|
element.append(button.name)
|
||||||
|
buttonContainer.appendChild(element)
|
||||||
|
element.addEventListener("click", event => button.handler(this))
|
||||||
|
})
|
||||||
|
var actionsContainer = document.createElement("div")
|
||||||
|
var actionsTitle = document.createElement("h4")
|
||||||
|
actionsTitle.textContent = "Actions"
|
||||||
|
actionsContainer.appendChild(actionsTitle);
|
||||||
|
IMAGE_EDITOR_ACTIONS.forEach(action => {
|
||||||
|
var element = document.createElement("div")
|
||||||
|
var icon = document.createElement("i")
|
||||||
|
element.className = "image-editor-button button"
|
||||||
|
icon.className = action.icon
|
||||||
|
element.appendChild(icon)
|
||||||
|
element.append(action.name)
|
||||||
|
actionsContainer.appendChild(element)
|
||||||
|
element.addEventListener("click", event => this.runAction(action.id))
|
||||||
|
})
|
||||||
|
this.popup.querySelector(".editor-controls-right").appendChild(actionsContainer)
|
||||||
|
this.popup.querySelector(".editor-controls-right").appendChild(buttonContainer)
|
||||||
|
|
||||||
|
this.keyHandlerBound = this.keyHandler.bind(this)
|
||||||
|
|
||||||
|
this.setSize(512, 512)
|
||||||
|
}
|
||||||
|
show() {
|
||||||
|
this.popup.classList.add("active")
|
||||||
|
document.addEventListener("keydown", this.keyHandlerBound)
|
||||||
|
document.addEventListener("keyup", this.keyHandlerBound)
|
||||||
|
}
|
||||||
|
hide() {
|
||||||
|
this.popup.classList.remove("active")
|
||||||
|
document.removeEventListener("keydown", this.keyHandlerBound)
|
||||||
|
document.removeEventListener("keyup", this.keyHandlerBound)
|
||||||
|
}
|
||||||
|
setSize(width, height) {
|
||||||
|
if (width == this.width && height == this.height) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var max_size = Math.min(parseInt(window.innerWidth * 0.9), width, 768)
|
||||||
|
if (width > height) {
|
||||||
|
var multiplier = max_size / width
|
||||||
|
width = (multiplier * width).toFixed()
|
||||||
|
height = (multiplier * height).toFixed()
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
var multiplier = max_size / height
|
||||||
|
width = (multiplier * width).toFixed()
|
||||||
|
height = (multiplier * height).toFixed()
|
||||||
|
}
|
||||||
|
this.width = width
|
||||||
|
this.height = height
|
||||||
|
|
||||||
|
this.container.style.width = width + "px"
|
||||||
|
this.container.style.height = height + "px"
|
||||||
|
|
||||||
|
Object.values(this.layers).forEach(layer => {
|
||||||
|
layer.canvas.width = width
|
||||||
|
layer.canvas.height = height
|
||||||
|
})
|
||||||
|
|
||||||
|
if (this.inpainter) {
|
||||||
|
this.saveImage() // We've reset the size of the image so inpainting is different
|
||||||
|
}
|
||||||
|
this.setBrush()
|
||||||
|
this.history.clear()
|
||||||
|
}
|
||||||
|
get tool() {
|
||||||
|
var tool_id = this.getOptionValue("tool")
|
||||||
|
return IMAGE_EDITOR_TOOLS.find(t => t.id == tool_id);
|
||||||
|
}
|
||||||
|
loadTool() {
|
||||||
|
this.drawing = false
|
||||||
|
this.container.style.cursor = this.tool.cursor;
|
||||||
|
}
|
||||||
|
setImage(url, width, height) {
|
||||||
|
this.setSize(width, height)
|
||||||
|
this.layers.drawing.ctx.clearRect(0, 0, this.width, this.height)
|
||||||
|
this.layers.background.ctx.clearRect(0, 0, this.width, this.height)
|
||||||
|
if (url) {
|
||||||
|
var image = new Image()
|
||||||
|
image.onload = () => {
|
||||||
|
this.layers.background.ctx.drawImage(image, 0, 0, this.width, this.height)
|
||||||
|
}
|
||||||
|
image.src = url
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
this.layers.background.ctx.fillStyle = "#ffffff"
|
||||||
|
this.layers.background.ctx.beginPath()
|
||||||
|
this.layers.background.ctx.rect(0, 0, this.width, this.height)
|
||||||
|
this.layers.background.ctx.fill()
|
||||||
|
}
|
||||||
|
this.history.clear()
|
||||||
|
}
|
||||||
|
saveImage() {
|
||||||
|
if (!this.inpainter) {
|
||||||
|
// This is not an inpainter, so save the image as the new img2img input
|
||||||
|
this.layers.background.ctx.drawImage(this.layers.drawing.canvas, 0, 0, this.width, this.height)
|
||||||
|
var base64 = this.layers.background.canvas.toDataURL()
|
||||||
|
initImagePreview.src = base64 // this will trigger the rest of the app to use it
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
// This is an inpainter, so make sure the toggle is set accordingly
|
||||||
|
var is_blank = !this.layers.drawing.ctx
|
||||||
|
.getImageData(0, 0, this.width, this.height).data
|
||||||
|
.some(channel => channel !== 0)
|
||||||
|
maskSetting.checked = !is_blank
|
||||||
|
}
|
||||||
|
this.hide()
|
||||||
|
}
|
||||||
|
getImg() { // a drop-in replacement of the drawingboard version
|
||||||
|
return this.layers.drawing.canvas.toDataURL()
|
||||||
|
}
|
||||||
|
setImg(dataUrl) { // a drop-in replacement of the drawingboard version
|
||||||
|
var image = new Image()
|
||||||
|
image.onload = () => {
|
||||||
|
var ctx = this.layers.drawing.ctx;
|
||||||
|
ctx.clearRect(0, 0, this.width, this.height)
|
||||||
|
ctx.globalCompositeOperation = "source-over"
|
||||||
|
ctx.globalAlpha = 1
|
||||||
|
ctx.filter = "none"
|
||||||
|
ctx.drawImage(image, 0, 0, this.width, this.height)
|
||||||
|
this.setBrush(this.layers.drawing)
|
||||||
|
}
|
||||||
|
image.src = dataUrl
|
||||||
|
}
|
||||||
|
runAction(action_id) {
|
||||||
|
var action = IMAGE_EDITOR_ACTIONS.find(a => a.id == action_id)
|
||||||
|
this.history.pushAction(action_id)
|
||||||
|
action.handler(this)
|
||||||
|
}
|
||||||
|
setBrush(layer = null, options = null) {
|
||||||
|
if (options == null) {
|
||||||
|
options = this.options
|
||||||
|
}
|
||||||
|
if (layer) {
|
||||||
|
layer.ctx.lineCap = "round"
|
||||||
|
layer.ctx.lineJoin = "round"
|
||||||
|
layer.ctx.lineWidth = options.brush_size
|
||||||
|
layer.ctx.fillStyle = options.color
|
||||||
|
layer.ctx.strokeStyle = options.color
|
||||||
|
var sharpness = parseInt(options.sharpness * options.brush_size)
|
||||||
|
layer.ctx.filter = sharpness == 0 ? `none` : `blur(${sharpness}px)`
|
||||||
|
layer.ctx.globalAlpha = (1 - options.opacity)
|
||||||
|
layer.ctx.globalCompositeOperation = "source-over"
|
||||||
|
var tool = IMAGE_EDITOR_TOOLS.find(t => t.id == options.tool)
|
||||||
|
if (tool && tool.setBrush) {
|
||||||
|
tool.setBrush(editor, layer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
Object.values([ "drawing", "overlay" ]).map(name => this.layers[name]).forEach(l => {
|
||||||
|
this.setBrush(l)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
get ctx_overlay() {
|
||||||
|
return this.layers.overlay.ctx
|
||||||
|
}
|
||||||
|
get ctx_current() { // the idea is this will help support having custom layers and editing each one
|
||||||
|
return this.layers.drawing.ctx
|
||||||
|
}
|
||||||
|
get canvas_current() {
|
||||||
|
return this.layers.drawing.canvas
|
||||||
|
}
|
||||||
|
keyHandler(event) { // handles keybinds like ctrl+z, ctrl+y
|
||||||
|
if (!this.popup.classList.contains("active")) {
|
||||||
|
document.removeEventListener("keydown", this.keyHandlerBound)
|
||||||
|
document.removeEventListener("keyup", this.keyHandlerBound)
|
||||||
|
return // this catches if something else closes the window but doesnt properly unbind the key handler
|
||||||
|
}
|
||||||
|
|
||||||
|
// keybindings
|
||||||
|
if (event.type == "keydown") {
|
||||||
|
if ((event.key == "z" || event.key == "Z") && event.ctrlKey) {
|
||||||
|
if (!event.shiftKey) {
|
||||||
|
this.history.undo()
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
this.history.redo()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (event.key == "y" && event.ctrlKey) {
|
||||||
|
this.history.redo()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// dropper ctrl holding handler stuff
|
||||||
|
var dropper_active = this.temp_previous_tool != null;
|
||||||
|
if (dropper_active && !event.ctrlKey) {
|
||||||
|
this.selectOption("tool", IMAGE_EDITOR_TOOLS.findIndex(t => t.id == this.temp_previous_tool))
|
||||||
|
this.temp_previous_tool = null
|
||||||
|
}
|
||||||
|
else if (!dropper_active && event.ctrlKey) {
|
||||||
|
this.temp_previous_tool = this.getOptionValue("tool")
|
||||||
|
this.selectOption("tool", IMAGE_EDITOR_TOOLS.findIndex(t => t.id == "colorpicker"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mouseHandler(event) {
|
||||||
|
var bbox = this.layers.overlay.canvas.getBoundingClientRect()
|
||||||
|
var x = (event.clientX || 0) - bbox.left
|
||||||
|
var y = (event.clientY || 0) - bbox.top
|
||||||
|
var type = event.type;
|
||||||
|
var touchmap = {
|
||||||
|
touchstart: "mousedown",
|
||||||
|
touchmove: "mousemove",
|
||||||
|
touchend: "mouseup",
|
||||||
|
touchcancel: "mouseup"
|
||||||
|
}
|
||||||
|
if (type in touchmap) {
|
||||||
|
type = touchmap[type]
|
||||||
|
if (event.touches && event.touches[0]) {
|
||||||
|
var touch = event.touches[0]
|
||||||
|
var x = (touch.clientX || 0) - bbox.left
|
||||||
|
var y = (touch.clientY || 0) - bbox.top
|
||||||
|
}
|
||||||
|
}
|
||||||
|
event.preventDefault()
|
||||||
|
// do drawing-related stuff
|
||||||
|
if (type == "mousedown" || (type == "mouseenter" && event.buttons == 1)) {
|
||||||
|
this.drawing = true
|
||||||
|
this.tool.begin(this, this.ctx_current, x, y)
|
||||||
|
this.tool.begin(this, this.ctx_overlay, x, y, true)
|
||||||
|
this.history.editBegin(x, y)
|
||||||
|
}
|
||||||
|
if (type == "mouseup" || type == "mousemove") {
|
||||||
|
if (this.drawing) {
|
||||||
|
if (x > 0 && y > 0) {
|
||||||
|
this.tool.move(this, this.ctx_current, x, y)
|
||||||
|
this.tool.move(this, this.ctx_overlay, x, y, true)
|
||||||
|
this.history.editMove(x, y)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (type == "mouseup" || type == "mouseout") {
|
||||||
|
if (this.drawing) {
|
||||||
|
this.drawing = false
|
||||||
|
this.tool.end(this, this.ctx_current, x, y)
|
||||||
|
this.tool.end(this, this.ctx_overlay, x, y, true)
|
||||||
|
this.history.editEnd(x, y)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
getOptionValue(section_name) {
|
||||||
|
var section = IMAGE_EDITOR_SECTIONS.find(s => s.name == section_name)
|
||||||
|
return this.options && section_name in this.options ? this.options[section_name] : section.default
|
||||||
|
}
|
||||||
|
selectOption(section_name, option_index) {
|
||||||
|
var section = IMAGE_EDITOR_SECTIONS.find(s => s.name == section_name)
|
||||||
|
var value = section.options[option_index]
|
||||||
|
this.options[section_name] = value == "custom" ? section.getCustom(this) : value
|
||||||
|
|
||||||
|
this.optionElements[section_name].forEach(element => element.classList.remove("active"))
|
||||||
|
this.optionElements[section_name][option_index].classList.add("active")
|
||||||
|
|
||||||
|
// change the editor
|
||||||
|
this.setBrush()
|
||||||
|
if (section.name == "tool") {
|
||||||
|
this.loadTool()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function rgbToHex(rgb) {
|
||||||
|
function componentToHex(c) {
|
||||||
|
var hex = parseInt(c).toString(16)
|
||||||
|
return hex.length == 1 ? "0" + hex : hex
|
||||||
|
}
|
||||||
|
return "#" + componentToHex(rgb.r) + componentToHex(rgb.g) + componentToHex(rgb.b)
|
||||||
|
}
|
||||||
|
|
||||||
|
const imageEditor = new ImageEditor(document.getElementById("image-editor"))
|
||||||
|
const imageInpainter = new ImageEditor(document.getElementById("image-inpainter"), true)
|
||||||
|
|
||||||
|
imageEditor.setImage(null, 512, 512)
|
||||||
|
imageInpainter.setImage(null, 512, 512)
|
||||||
|
|
||||||
|
document.getElementById("init_image_button_draw").addEventListener("click", () => {
|
||||||
|
imageEditor.show()
|
||||||
|
})
|
||||||
|
document.getElementById("init_image_button_inpaint").addEventListener("click", () => {
|
||||||
|
imageInpainter.show()
|
||||||
|
})
|
@ -85,14 +85,13 @@ function createModifierGroup(modifierGroup, initiallyExpanded) {
|
|||||||
|
|
||||||
if(typeof modifierCard == 'object') {
|
if(typeof modifierCard == 'object') {
|
||||||
modifiersEl.appendChild(modifierCard)
|
modifiersEl.appendChild(modifierCard)
|
||||||
|
const trimmedName = trimModifiers(modifierName)
|
||||||
|
|
||||||
modifierCard.addEventListener('click', () => {
|
modifierCard.addEventListener('click', () => {
|
||||||
if (activeTags.map(x => x.name).includes(modifierName)) {
|
if (activeTags.map(x => trimModifiers(x.name)).includes(trimmedName)) {
|
||||||
// remove modifier from active array
|
// remove modifier from active array
|
||||||
activeTags = activeTags.filter(x => x.name != modifierName)
|
activeTags = activeTags.filter(x => trimModifiers(x.name) != trimmedName)
|
||||||
modifierCard.classList.remove(activeCardClass)
|
toggleCardState(trimmedName, false)
|
||||||
|
|
||||||
modifierCard.querySelector('.modifier-card-image-overlay').innerText = '+'
|
|
||||||
} else {
|
} else {
|
||||||
// add modifier to active array
|
// add modifier to active array
|
||||||
activeTags.push({
|
activeTags.push({
|
||||||
@ -101,10 +100,7 @@ function createModifierGroup(modifierGroup, initiallyExpanded) {
|
|||||||
'originElement': modifierCard,
|
'originElement': modifierCard,
|
||||||
'previews': modifierPreviews
|
'previews': modifierPreviews
|
||||||
})
|
})
|
||||||
|
toggleCardState(trimmedName, true)
|
||||||
modifierCard.classList.add(activeCardClass)
|
|
||||||
|
|
||||||
modifierCard.querySelector('.modifier-card-image-overlay').innerText = '-'
|
|
||||||
}
|
}
|
||||||
|
|
||||||
refreshTagsList()
|
refreshTagsList()
|
||||||
@ -125,6 +121,10 @@ function createModifierGroup(modifierGroup, initiallyExpanded) {
|
|||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function trimModifiers(tag) {
|
||||||
|
return tag.replace(/^\(+|\)+$/g, '').replace(/^\[+|\]+$/g, '')
|
||||||
|
}
|
||||||
|
|
||||||
async function loadModifiers() {
|
async function loadModifiers() {
|
||||||
try {
|
try {
|
||||||
let res = await fetch('/get/modifiers')
|
let res = await fetch('/get/modifiers')
|
||||||
@ -219,11 +219,10 @@ function refreshTagsList() {
|
|||||||
editorModifierTagsList.appendChild(tag.element)
|
editorModifierTagsList.appendChild(tag.element)
|
||||||
|
|
||||||
tag.element.addEventListener('click', () => {
|
tag.element.addEventListener('click', () => {
|
||||||
let idx = activeTags.indexOf(tag)
|
let idx = activeTags.findIndex(o => { return o.name === tag.name })
|
||||||
|
|
||||||
if (idx !== -1 && activeTags[idx].originElement !== undefined) {
|
if (idx !== -1) {
|
||||||
activeTags[idx].originElement.classList.remove(activeCardClass)
|
toggleCardState(activeTags[idx].name, false)
|
||||||
activeTags[idx].originElement.querySelector('.modifier-card-image-overlay').innerText = '+'
|
|
||||||
|
|
||||||
activeTags.splice(idx, 1)
|
activeTags.splice(idx, 1)
|
||||||
refreshTagsList()
|
refreshTagsList()
|
||||||
@ -236,6 +235,23 @@ function refreshTagsList() {
|
|||||||
editorModifierTagsList.appendChild(brk)
|
editorModifierTagsList.appendChild(brk)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function toggleCardState(modifierName, makeActive) {
|
||||||
|
document.querySelector('#editor-modifiers').querySelectorAll('.modifier-card').forEach(card => {
|
||||||
|
const name = card.querySelector('.modifier-card-label').innerText
|
||||||
|
if ( trimModifiers(modifierName) == trimModifiers(name)
|
||||||
|
|| trimModifiers(modifierName) == 'by ' + trimModifiers(name)) {
|
||||||
|
if(makeActive) {
|
||||||
|
card.classList.add(activeCardClass)
|
||||||
|
card.querySelector('.modifier-card-image-overlay').innerText = '-'
|
||||||
|
}
|
||||||
|
else{
|
||||||
|
card.classList.remove(activeCardClass)
|
||||||
|
card.querySelector('.modifier-card-image-overlay').innerText = '+'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
function changePreviewImages(val) {
|
function changePreviewImages(val) {
|
||||||
const previewImages = document.querySelectorAll('.modifier-card-image-container img')
|
const previewImages = document.querySelectorAll('.modifier-card-image-container img')
|
||||||
|
|
||||||
@ -310,31 +326,7 @@ function saveCustomModifiers() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function loadCustomModifiers() {
|
function loadCustomModifiers() {
|
||||||
let customModifiers = localStorage.getItem(CUSTOM_MODIFIERS_KEY, '')
|
PLUGINS['MODIFIERS_LOAD'].forEach(fn=>fn.loader.call())
|
||||||
customModifiersTextBox.value = customModifiers
|
|
||||||
|
|
||||||
if (customModifiersGroupElement !== undefined) {
|
|
||||||
customModifiersGroupElement.remove()
|
|
||||||
}
|
|
||||||
|
|
||||||
if (customModifiers && customModifiers.trim() !== '') {
|
|
||||||
customModifiers = customModifiers.split('\n')
|
|
||||||
customModifiers = customModifiers.filter(m => m.trim() !== '')
|
|
||||||
customModifiers = customModifiers.map(function(m) {
|
|
||||||
return {
|
|
||||||
"modifier": m
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
let customGroup = {
|
|
||||||
'category': 'Custom Modifiers',
|
|
||||||
'modifiers': customModifiers
|
|
||||||
}
|
|
||||||
|
|
||||||
customModifiersGroupElement = createModifierGroup(customGroup, true)
|
|
||||||
|
|
||||||
createCollapsibles(customModifiersGroupElement)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
customModifiersTextBox.addEventListener('change', saveCustomModifiers)
|
customModifiersTextBox.addEventListener('change', saveCustomModifiers)
|
||||||
|
@ -1,41 +0,0 @@
|
|||||||
const INPAINTING_EDITOR_SIZE = 450
|
|
||||||
|
|
||||||
let inpaintingEditorContainer = document.querySelector('#inpaintingEditor')
|
|
||||||
let inpaintingEditor = new DrawingBoard.Board('inpaintingEditor', {
|
|
||||||
color: "#ffffff",
|
|
||||||
background: false,
|
|
||||||
size: 30,
|
|
||||||
webStorage: false,
|
|
||||||
controls: [{'DrawingMode': {'filler': false}}, 'Size', 'Navigation']
|
|
||||||
})
|
|
||||||
let inpaintingEditorCanvasBackground = document.querySelector('.drawing-board-canvas-wrapper')
|
|
||||||
|
|
||||||
function resizeInpaintingEditor(widthValue, heightValue) {
|
|
||||||
if (widthValue === heightValue) {
|
|
||||||
widthValue = INPAINTING_EDITOR_SIZE
|
|
||||||
heightValue = INPAINTING_EDITOR_SIZE
|
|
||||||
} else if (widthValue > heightValue) {
|
|
||||||
heightValue = (heightValue / widthValue) * INPAINTING_EDITOR_SIZE
|
|
||||||
widthValue = INPAINTING_EDITOR_SIZE
|
|
||||||
} else {
|
|
||||||
widthValue = (widthValue / heightValue) * INPAINTING_EDITOR_SIZE
|
|
||||||
heightValue = INPAINTING_EDITOR_SIZE
|
|
||||||
}
|
|
||||||
if (inpaintingEditor.opts.aspectRatio === (widthValue / heightValue).toFixed(3)) {
|
|
||||||
// Same ratio, don't reset the canvas.
|
|
||||||
return
|
|
||||||
}
|
|
||||||
inpaintingEditor.opts.aspectRatio = (widthValue / heightValue).toFixed(3)
|
|
||||||
|
|
||||||
inpaintingEditorContainer.style.width = widthValue + 'px'
|
|
||||||
inpaintingEditorContainer.style.height = heightValue + 'px'
|
|
||||||
inpaintingEditor.opts.enlargeYourContainer = true
|
|
||||||
|
|
||||||
inpaintingEditor.opts.size = inpaintingEditor.ctx.lineWidth
|
|
||||||
inpaintingEditor.resize()
|
|
||||||
|
|
||||||
inpaintingEditor.ctx.lineCap = "round"
|
|
||||||
inpaintingEditor.ctx.lineJoin = "round"
|
|
||||||
inpaintingEditor.ctx.lineWidth = inpaintingEditor.opts.size
|
|
||||||
inpaintingEditor.setColor(inpaintingEditor.opts.color)
|
|
||||||
}
|
|
10
ui/media/js/jquery-confirm.min.js
vendored
Normal file
10
ui/media/js/jquery-confirm.min.js
vendored
Normal file
File diff suppressed because one or more lines are too long
@ -16,6 +16,9 @@ let numOutputsParallelField = document.querySelector('#num_outputs_parallel')
|
|||||||
let numInferenceStepsField = document.querySelector('#num_inference_steps')
|
let numInferenceStepsField = document.querySelector('#num_inference_steps')
|
||||||
let guidanceScaleSlider = document.querySelector('#guidance_scale_slider')
|
let guidanceScaleSlider = document.querySelector('#guidance_scale_slider')
|
||||||
let guidanceScaleField = document.querySelector('#guidance_scale')
|
let guidanceScaleField = document.querySelector('#guidance_scale')
|
||||||
|
let outputQualitySlider = document.querySelector('#output_quality_slider')
|
||||||
|
let outputQualityField = document.querySelector('#output_quality')
|
||||||
|
let outputQualityRow = document.querySelector('#output_quality_row')
|
||||||
let randomSeedField = document.querySelector("#random_seed")
|
let randomSeedField = document.querySelector("#random_seed")
|
||||||
let seedField = document.querySelector('#seed')
|
let seedField = document.querySelector('#seed')
|
||||||
let widthField = document.querySelector('#width')
|
let widthField = document.querySelector('#width')
|
||||||
@ -59,14 +62,6 @@ let serverStatusColor = document.querySelector('#server-status-color')
|
|||||||
let serverStatusMsg = document.querySelector('#server-status-msg')
|
let serverStatusMsg = document.querySelector('#server-status-msg')
|
||||||
|
|
||||||
|
|
||||||
document.querySelector('.drawing-board-control-navigation-back').innerHTML = '<i class="fa-solid fa-rotate-left"></i>'
|
|
||||||
document.querySelector('.drawing-board-control-navigation-forward').innerHTML = '<i class="fa-solid fa-rotate-right"></i>'
|
|
||||||
|
|
||||||
let maskResetButton = document.querySelector('.drawing-board-control-navigation-reset')
|
|
||||||
maskResetButton.innerHTML = 'Clear'
|
|
||||||
maskResetButton.style.fontWeight = 'normal'
|
|
||||||
maskResetButton.style.fontSize = '10pt'
|
|
||||||
|
|
||||||
let serverState = {'status': 'Offline', 'time': Date.now()}
|
let serverState = {'status': 'Offline', 'time': Date.now()}
|
||||||
let bellPending = false
|
let bellPending = false
|
||||||
|
|
||||||
@ -138,6 +133,35 @@ function isServerAvailable() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// shiftOrConfirm(e, prompt, fn)
|
||||||
|
// e : MouseEvent
|
||||||
|
// prompt : Text to be shown as prompt. Should be a question to which "yes" is a good answer.
|
||||||
|
// fn : function to be called if the user confirms the dialog or has the shift key pressed
|
||||||
|
//
|
||||||
|
// If the user had the shift key pressed while clicking, the function fn will be executed.
|
||||||
|
// If the setting "confirm_dangerous_actions" in the system settings is disabled, the function
|
||||||
|
// fn will be executed.
|
||||||
|
// Otherwise, a confirmation dialog is shown. If the user confirms, the function fn will also
|
||||||
|
// be executed.
|
||||||
|
function shiftOrConfirm(e, prompt, fn) {
|
||||||
|
e.stopPropagation()
|
||||||
|
if (e.shiftKey || !confirmDangerousActionsField.checked) {
|
||||||
|
fn(e)
|
||||||
|
} else {
|
||||||
|
$.confirm({
|
||||||
|
theme: 'modern',
|
||||||
|
title: prompt,
|
||||||
|
useBootstrap: false,
|
||||||
|
animateFromElement: false,
|
||||||
|
content: '<small>Tip: To skip this dialog, use shift-click or disable the "Confirm dangerous actions" setting in the Settings tab.</small>',
|
||||||
|
buttons: {
|
||||||
|
yes: () => { fn(e) },
|
||||||
|
cancel: () => {}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
function logMsg(msg, level, outputMsg) {
|
function logMsg(msg, level, outputMsg) {
|
||||||
if (outputMsg.hasChildNodes()) {
|
if (outputMsg.hasChildNodes()) {
|
||||||
outputMsg.appendChild(document.createElement('br'))
|
outputMsg.appendChild(document.createElement('br'))
|
||||||
@ -169,34 +193,6 @@ function playSound() {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
function setSystemInfo(devices) {
|
|
||||||
let cpu = devices.all.cpu.name
|
|
||||||
let allGPUs = Object.keys(devices.all).filter(d => d != 'cpu')
|
|
||||||
let activeGPUs = Object.keys(devices.active)
|
|
||||||
|
|
||||||
function ID_TO_TEXT(d) {
|
|
||||||
let info = devices.all[d]
|
|
||||||
if ("mem_free" in info && "mem_total" in info) {
|
|
||||||
return `${info.name} <small>(${d}) (${info.mem_free.toFixed(1)}Gb free / ${info.mem_total.toFixed(1)} Gb total)</small>`
|
|
||||||
} else {
|
|
||||||
return `${info.name} <small>(${d}) (no memory info)</small>`
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
allGPUs = allGPUs.map(ID_TO_TEXT)
|
|
||||||
activeGPUs = activeGPUs.map(ID_TO_TEXT)
|
|
||||||
|
|
||||||
let systemInfo = `
|
|
||||||
<table>
|
|
||||||
<tr><td><label>Processor:</label></td><td class="value">${cpu}</td></tr>
|
|
||||||
<tr><td><label>Compatible Graphics Cards (all):</label></td><td class="value">${allGPUs.join('</br>')}</td></tr>
|
|
||||||
<tr><td></td><td> </td></tr>
|
|
||||||
<tr><td><label>Used for rendering 🔥:</label></td><td class="value">${activeGPUs.join('</br>')}</td></tr>
|
|
||||||
</table>`
|
|
||||||
|
|
||||||
let systemInfoEl = document.querySelector('#system-info')
|
|
||||||
systemInfoEl.innerHTML = systemInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
async function healthCheck() {
|
async function healthCheck() {
|
||||||
try {
|
try {
|
||||||
@ -231,7 +227,7 @@ async function healthCheck() {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
if (serverState.devices) {
|
if (serverState.devices) {
|
||||||
setSystemInfo(serverState.devices)
|
setDeviceInfo(serverState.devices)
|
||||||
}
|
}
|
||||||
serverState.time = Date.now()
|
serverState.time = Date.now()
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
@ -334,11 +330,7 @@ function onUseAsInputClick(req, img) {
|
|||||||
initImageSelector.value = null
|
initImageSelector.value = null
|
||||||
initImagePreview.src = imgData
|
initImagePreview.src = imgData
|
||||||
|
|
||||||
initImagePreviewContainer.style.display = 'block'
|
|
||||||
inpaintingEditorContainer.style.display = 'none'
|
|
||||||
promptStrengthContainer.style.display = 'table-row'
|
|
||||||
maskSetting.checked = false
|
maskSetting.checked = false
|
||||||
samplerSelectionContainer.style.display = 'none'
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function onDownloadImageClick(req, img) {
|
function onDownloadImageClick(req, img) {
|
||||||
@ -386,11 +378,21 @@ function onMakeSimilarClick(req, img) {
|
|||||||
function enqueueImageVariationTask(req, img, reqDiff) {
|
function enqueueImageVariationTask(req, img, reqDiff) {
|
||||||
const imageSeed = img.getAttribute('data-seed')
|
const imageSeed = img.getAttribute('data-seed')
|
||||||
|
|
||||||
const newTaskRequest = modifyCurrentRequest(req, reqDiff, {
|
const newRequestBody = {
|
||||||
num_outputs: 1, // this can be user-configurable in the future
|
num_outputs: 1, // this can be user-configurable in the future
|
||||||
seed: imageSeed
|
seed: imageSeed
|
||||||
})
|
}
|
||||||
|
|
||||||
|
// If the user is editing pictures, stop modifyCurrentRequest from importing
|
||||||
|
// new values by setting the missing properties to undefined
|
||||||
|
if (!('init_image' in req) && !('init_image' in reqDiff)) {
|
||||||
|
newRequestBody.init_image = undefined
|
||||||
|
newRequestBody.mask = undefined
|
||||||
|
} else if (!('mask' in req) && !('mask' in reqDiff)) {
|
||||||
|
newRequestBody.mask = undefined
|
||||||
|
}
|
||||||
|
|
||||||
|
const newTaskRequest = modifyCurrentRequest(req, reqDiff, newRequestBody)
|
||||||
newTaskRequest.numOutputsTotal = 1 // this can be user-configurable in the future
|
newTaskRequest.numOutputsTotal = 1 // this can be user-configurable in the future
|
||||||
newTaskRequest.batchCount = 1
|
newTaskRequest.batchCount = 1
|
||||||
|
|
||||||
@ -695,6 +697,12 @@ async function checkTasks() {
|
|||||||
|
|
||||||
const genSeeds = Boolean(typeof task.reqBody.seed !== 'number' || (task.reqBody.seed === task.seed && task.numOutputsTotal > 1))
|
const genSeeds = Boolean(typeof task.reqBody.seed !== 'number' || (task.reqBody.seed === task.seed && task.numOutputsTotal > 1))
|
||||||
const startSeed = task.reqBody.seed || task.seed
|
const startSeed = task.reqBody.seed || task.seed
|
||||||
|
|
||||||
|
// Update the seed *before* starting the processing so it's retained if user stops the task
|
||||||
|
if (randomSeedField.checked) {
|
||||||
|
seedField.value = task.seed
|
||||||
|
}
|
||||||
|
|
||||||
for (let i = 0; i < task.batchCount; i++) {
|
for (let i = 0; i < task.batchCount; i++) {
|
||||||
let newTask = task
|
let newTask = task
|
||||||
if (task.batchCount > 1) {
|
if (task.batchCount > 1) {
|
||||||
@ -741,10 +749,6 @@ async function checkTasks() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (randomSeedField.checked) {
|
|
||||||
seedField.value = task.seed
|
|
||||||
}
|
|
||||||
|
|
||||||
currentTask = null
|
currentTask = null
|
||||||
|
|
||||||
if (typeof requestIdleCallback === 'function') {
|
if (typeof requestIdleCallback === 'function') {
|
||||||
@ -790,6 +794,7 @@ function getCurrentUserRequest() {
|
|||||||
stream_image_progress: (numOutputsTotal > 50 ? false : streamImageProgressField.checked),
|
stream_image_progress: (numOutputsTotal > 50 ? false : streamImageProgressField.checked),
|
||||||
show_only_filtered_image: showOnlyFilteredImageField.checked,
|
show_only_filtered_image: showOnlyFilteredImageField.checked,
|
||||||
output_format: outputFormatField.value,
|
output_format: outputFormatField.value,
|
||||||
|
output_quality: outputQualityField.value,
|
||||||
original_prompt: promptField.value,
|
original_prompt: promptField.value,
|
||||||
active_tags: (activeTags.map(x => x.name))
|
active_tags: (activeTags.map(x => x.name))
|
||||||
}
|
}
|
||||||
@ -802,7 +807,7 @@ function getCurrentUserRequest() {
|
|||||||
// newTask.reqBody.mask = maskImagePreview.src
|
// newTask.reqBody.mask = maskImagePreview.src
|
||||||
// }
|
// }
|
||||||
if (maskSetting.checked) {
|
if (maskSetting.checked) {
|
||||||
newTask.reqBody.mask = inpaintingEditor.getImg()
|
newTask.reqBody.mask = imageInpainter.getImg()
|
||||||
}
|
}
|
||||||
newTask.reqBody.sampler = 'ddim'
|
newTask.reqBody.sampler = 'ddim'
|
||||||
} else {
|
} else {
|
||||||
@ -887,24 +892,26 @@ function createTask(task) {
|
|||||||
task['progressBar'] = taskEntry.querySelector('.progress-bar')
|
task['progressBar'] = taskEntry.querySelector('.progress-bar')
|
||||||
task['stopTask'] = taskEntry.querySelector('.stopTask')
|
task['stopTask'] = taskEntry.querySelector('.stopTask')
|
||||||
|
|
||||||
task['stopTask'].addEventListener('click', async function(e) {
|
task['stopTask'].addEventListener('click', (e) => {
|
||||||
e.stopPropagation()
|
let question = (task['isProcessing'] ? "Stop this task?" : "Remove this task?")
|
||||||
if (task['isProcessing']) {
|
shiftOrConfirm(e, question, async function(e) {
|
||||||
task.isProcessing = false
|
if (task['isProcessing']) {
|
||||||
task.progressBar.classList.remove("active")
|
task.isProcessing = false
|
||||||
try {
|
task.progressBar.classList.remove("active")
|
||||||
let res = await fetch('/image/stop?session_id=' + sessionId)
|
try {
|
||||||
} catch (e) {
|
let res = await fetch('/image/stop?session_id=' + sessionId)
|
||||||
console.log(e)
|
} catch (e) {
|
||||||
}
|
console.log(e)
|
||||||
} else {
|
}
|
||||||
let idx = taskQueue.indexOf(task)
|
} else {
|
||||||
if (idx >= 0) {
|
let idx = taskQueue.indexOf(task)
|
||||||
taskQueue.splice(idx, 1)
|
if (idx >= 0) {
|
||||||
}
|
taskQueue.splice(idx, 1)
|
||||||
|
}
|
||||||
|
|
||||||
taskEntry.remove()
|
removeTask(taskEntry)
|
||||||
}
|
}
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
task['useSettings'] = taskEntry.querySelector('.useSettings')
|
task['useSettings'] = taskEntry.querySelector('.useSettings')
|
||||||
@ -933,11 +940,12 @@ function getPrompts() {
|
|||||||
prompts = prompts.map(prompt => prompt.trim())
|
prompts = prompts.map(prompt => prompt.trim())
|
||||||
prompts = prompts.filter(prompt => prompt !== '')
|
prompts = prompts.filter(prompt => prompt !== '')
|
||||||
|
|
||||||
if (activeTags.length > 0) {
|
const newTags = activeTags.filter(tag => tag.inactive === undefined || tag.inactive === false)
|
||||||
const promptTags = activeTags.map(x => x.name).join(", ")
|
if (newTags.length > 0) {
|
||||||
prompts = prompts.map((prompt) => `${prompt}, ${promptTags}`)
|
const promptTags = newTags.map(x => x.name).join(", ")
|
||||||
|
prompts = prompts.map((prompt) => `${prompt}, ${promptTags}`)
|
||||||
}
|
}
|
||||||
|
|
||||||
let promptsToMake = applySetOperator(prompts)
|
let promptsToMake = applySetOperator(prompts)
|
||||||
promptsToMake = applyPermuteOperator(promptsToMake)
|
promptsToMake = applyPermuteOperator(promptsToMake)
|
||||||
|
|
||||||
@ -1047,21 +1055,25 @@ async function stopAllTasks() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
clearAllPreviewsBtn.addEventListener('click', async function() {
|
function removeTask(taskToRemove) {
|
||||||
|
taskToRemove.remove()
|
||||||
|
|
||||||
|
if (document.querySelector('.imageTaskContainer') === null) {
|
||||||
|
previewTools.style.display = 'none'
|
||||||
|
initialText.style.display = 'block'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
clearAllPreviewsBtn.addEventListener('click', (e) => { shiftOrConfirm(e, "Clear all the results and tasks in this window?", async function() {
|
||||||
await stopAllTasks()
|
await stopAllTasks()
|
||||||
|
|
||||||
let taskEntries = document.querySelectorAll('.imageTaskContainer')
|
let taskEntries = document.querySelectorAll('.imageTaskContainer')
|
||||||
taskEntries.forEach(task => {
|
taskEntries.forEach(removeTask)
|
||||||
task.remove()
|
})})
|
||||||
})
|
|
||||||
|
|
||||||
previewTools.style.display = 'none'
|
stopImageBtn.addEventListener('click', (e) => { shiftOrConfirm(e, "Stop all the tasks?", async function(e) {
|
||||||
initialText.style.display = 'block'
|
|
||||||
})
|
|
||||||
|
|
||||||
stopImageBtn.addEventListener('click', async function() {
|
|
||||||
await stopAllTasks()
|
await stopAllTasks()
|
||||||
})
|
})})
|
||||||
|
|
||||||
widthField.addEventListener('change', onDimensionChange)
|
widthField.addEventListener('change', onDimensionChange)
|
||||||
heightField.addEventListener('change', onDimensionChange)
|
heightField.addEventListener('change', onDimensionChange)
|
||||||
@ -1082,13 +1094,14 @@ numOutputsTotalField.addEventListener('change', renameMakeImageButton)
|
|||||||
numOutputsParallelField.addEventListener('change', renameMakeImageButton)
|
numOutputsParallelField.addEventListener('change', renameMakeImageButton)
|
||||||
|
|
||||||
function onDimensionChange() {
|
function onDimensionChange() {
|
||||||
if (!maskSetting.checked) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
let widthValue = parseInt(widthField.value)
|
let widthValue = parseInt(widthField.value)
|
||||||
let heightValue = parseInt(heightField.value)
|
let heightValue = parseInt(heightField.value)
|
||||||
|
if (!initImagePreviewContainer.classList.contains("has-image")) {
|
||||||
resizeInpaintingEditor(widthValue, heightValue)
|
imageEditor.setImage(null, widthValue, heightValue)
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
imageInpainter.setImage(initImagePreview.src, widthValue, heightValue)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
diskPathField.disabled = !saveToDiskField.checked
|
diskPathField.disabled = !saveToDiskField.checked
|
||||||
@ -1107,6 +1120,7 @@ document.onkeydown = function(e) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/********************* Guidance **************************/
|
||||||
function updateGuidanceScale() {
|
function updateGuidanceScale() {
|
||||||
guidanceScaleField.value = guidanceScaleSlider.value / 10
|
guidanceScaleField.value = guidanceScaleSlider.value / 10
|
||||||
guidanceScaleField.dispatchEvent(new Event("change"))
|
guidanceScaleField.dispatchEvent(new Event("change"))
|
||||||
@ -1127,6 +1141,7 @@ guidanceScaleSlider.addEventListener('input', updateGuidanceScale)
|
|||||||
guidanceScaleField.addEventListener('input', updateGuidanceScaleSlider)
|
guidanceScaleField.addEventListener('input', updateGuidanceScaleSlider)
|
||||||
updateGuidanceScale()
|
updateGuidanceScale()
|
||||||
|
|
||||||
|
/********************* Prompt Strength *******************/
|
||||||
function updatePromptStrength() {
|
function updatePromptStrength() {
|
||||||
promptStrengthField.value = promptStrengthSlider.value / 100
|
promptStrengthField.value = promptStrengthSlider.value / 100
|
||||||
promptStrengthField.dispatchEvent(new Event("change"))
|
promptStrengthField.dispatchEvent(new Event("change"))
|
||||||
@ -1147,6 +1162,36 @@ promptStrengthSlider.addEventListener('input', updatePromptStrength)
|
|||||||
promptStrengthField.addEventListener('input', updatePromptStrengthSlider)
|
promptStrengthField.addEventListener('input', updatePromptStrengthSlider)
|
||||||
updatePromptStrength()
|
updatePromptStrength()
|
||||||
|
|
||||||
|
/********************* JPEG Quality **********************/
|
||||||
|
function updateOutputQuality() {
|
||||||
|
outputQualityField.value = 0 | outputQualitySlider.value
|
||||||
|
outputQualityField.dispatchEvent(new Event("change"))
|
||||||
|
}
|
||||||
|
|
||||||
|
function updateOutputQualitySlider() {
|
||||||
|
if (outputQualityField.value < 10) {
|
||||||
|
outputQualityField.value = 10
|
||||||
|
} else if (outputQualityField.value > 95) {
|
||||||
|
outputQualityField.value = 95
|
||||||
|
}
|
||||||
|
|
||||||
|
outputQualitySlider.value = 0 | outputQualityField.value
|
||||||
|
outputQualitySlider.dispatchEvent(new Event("change"))
|
||||||
|
}
|
||||||
|
|
||||||
|
outputQualitySlider.addEventListener('input', updateOutputQuality)
|
||||||
|
outputQualityField.addEventListener('input', debounce(updateOutputQualitySlider))
|
||||||
|
updateOutputQuality()
|
||||||
|
|
||||||
|
outputFormatField.addEventListener('change', e => {
|
||||||
|
if (outputFormatField.value == 'jpeg') {
|
||||||
|
outputQualityRow.style.display='table-row'
|
||||||
|
} else {
|
||||||
|
outputQualityRow.style.display='none'
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
async function getModels() {
|
async function getModels() {
|
||||||
try {
|
try {
|
||||||
var sd_model_setting_key = "stable_diffusion_model"
|
var sd_model_setting_key = "stable_diffusion_model"
|
||||||
@ -1201,7 +1246,7 @@ async function getModels() {
|
|||||||
function checkRandomSeed() {
|
function checkRandomSeed() {
|
||||||
if (randomSeedField.checked) {
|
if (randomSeedField.checked) {
|
||||||
seedField.disabled = true
|
seedField.disabled = true
|
||||||
seedField.value = "0"
|
//seedField.value = "0" // This causes the seed to be lost if the user changes their mind after toggling the checkbox
|
||||||
} else {
|
} else {
|
||||||
seedField.disabled = false
|
seedField.disabled = false
|
||||||
}
|
}
|
||||||
@ -1209,12 +1254,8 @@ function checkRandomSeed() {
|
|||||||
randomSeedField.addEventListener('input', checkRandomSeed)
|
randomSeedField.addEventListener('input', checkRandomSeed)
|
||||||
checkRandomSeed()
|
checkRandomSeed()
|
||||||
|
|
||||||
function showInitImagePreview() {
|
function loadImg2ImgFromFile() {
|
||||||
if (initImageSelector.files.length === 0) {
|
if (initImageSelector.files.length === 0) {
|
||||||
initImagePreviewContainer.style.display = 'none'
|
|
||||||
// inpaintingEditorContainer.style.display = 'none'
|
|
||||||
promptStrengthContainer.style.display = 'none'
|
|
||||||
// maskSetting.style.display = 'none'
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1222,51 +1263,41 @@ function showInitImagePreview() {
|
|||||||
let file = initImageSelector.files[0]
|
let file = initImageSelector.files[0]
|
||||||
|
|
||||||
reader.addEventListener('load', function(event) {
|
reader.addEventListener('load', function(event) {
|
||||||
// console.log(file.name, reader.result)
|
|
||||||
initImagePreview.src = reader.result
|
initImagePreview.src = reader.result
|
||||||
initImagePreviewContainer.style.display = 'block'
|
|
||||||
inpaintingEditorContainer.style.display = 'none'
|
|
||||||
promptStrengthContainer.style.display = 'table-row'
|
|
||||||
samplerSelectionContainer.style.display = 'none'
|
|
||||||
// maskSetting.checked = false
|
|
||||||
})
|
})
|
||||||
|
|
||||||
if (file) {
|
if (file) {
|
||||||
reader.readAsDataURL(file)
|
reader.readAsDataURL(file)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
initImageSelector.addEventListener('change', showInitImagePreview)
|
initImageSelector.addEventListener('change', loadImg2ImgFromFile)
|
||||||
showInitImagePreview()
|
loadImg2ImgFromFile()
|
||||||
|
|
||||||
|
function img2imgLoad() {
|
||||||
|
promptStrengthContainer.style.display = 'table-row'
|
||||||
|
samplerSelectionContainer.style.display = "none"
|
||||||
|
initImagePreviewContainer.classList.add("has-image")
|
||||||
|
|
||||||
initImagePreview.addEventListener('load', function() {
|
|
||||||
inpaintingEditorCanvasBackground.style.backgroundImage = "url('" + this.src + "')"
|
|
||||||
// maskSetting.style.display = 'block'
|
|
||||||
// inpaintingEditorContainer.style.display = 'block'
|
|
||||||
initImageSizeBox.textContent = initImagePreview.naturalWidth + " x " + initImagePreview.naturalHeight
|
initImageSizeBox.textContent = initImagePreview.naturalWidth + " x " + initImagePreview.naturalHeight
|
||||||
initImageSizeBox.style.display = 'block'
|
imageEditor.setImage(this.src, initImagePreview.naturalWidth, initImagePreview.naturalHeight)
|
||||||
})
|
imageInpainter.setImage(this.src, parseInt(widthField.value), parseInt(heightField.value))
|
||||||
|
}
|
||||||
|
|
||||||
initImageClearBtn.addEventListener('click', function() {
|
function img2imgUnload() {
|
||||||
initImageSelector.value = null
|
initImageSelector.value = null
|
||||||
// maskImageSelector.value = null
|
|
||||||
|
|
||||||
initImagePreview.src = ''
|
initImagePreview.src = ''
|
||||||
// maskImagePreview.src = ''
|
|
||||||
maskSetting.checked = false
|
maskSetting.checked = false
|
||||||
|
|
||||||
initImagePreviewContainer.style.display = 'none'
|
promptStrengthContainer.style.display = "none"
|
||||||
// inpaintingEditorContainer.style.display = 'none'
|
samplerSelectionContainer.style.display = ""
|
||||||
// maskImagePreviewContainer.style.display = 'none'
|
initImagePreviewContainer.classList.remove("has-image")
|
||||||
|
imageEditor.setImage(null, parseInt(widthField.value), parseInt(heightField.value))
|
||||||
|
|
||||||
// maskSetting.style.display = 'none'
|
}
|
||||||
|
initImagePreview.addEventListener('load', img2imgLoad)
|
||||||
promptStrengthContainer.style.display = 'none'
|
initImageClearBtn.addEventListener('click', img2imgUnload)
|
||||||
samplerSelectionContainer.style.display = 'table-row'
|
|
||||||
initImageSizeBox.style.display = 'none'
|
|
||||||
})
|
|
||||||
|
|
||||||
maskSetting.addEventListener('click', function() {
|
maskSetting.addEventListener('click', function() {
|
||||||
inpaintingEditorContainer.style.display = (this.checked ? 'block' : 'none')
|
|
||||||
onDimensionChange()
|
onDimensionChange()
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -1306,9 +1337,22 @@ document.querySelectorAll('.popup').forEach(popup => {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
var tabElements = [];
|
var tabElements = []
|
||||||
|
function selectTab(tab_id) {
|
||||||
|
let tabInfo = tabElements.find(t => t.tab.id == tab_id)
|
||||||
|
if (!tabInfo.tab.classList.contains("active")) {
|
||||||
|
tabElements.forEach(info => {
|
||||||
|
if (info.tab.classList.contains("active")) {
|
||||||
|
info.tab.classList.toggle("active")
|
||||||
|
info.content.classList.toggle("active")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
tabInfo.tab.classList.toggle("active")
|
||||||
|
tabInfo.content.classList.toggle("active")
|
||||||
|
}
|
||||||
|
}
|
||||||
function linkTabContents(tab) {
|
function linkTabContents(tab) {
|
||||||
var name = tab.id.replace("tab-", "");
|
var name = tab.id.replace("tab-", "")
|
||||||
var content = document.getElementById(`tab-content-${name}`)
|
var content = document.getElementById(`tab-content-${name}`)
|
||||||
tabElements.push({
|
tabElements.push({
|
||||||
name: name,
|
name: name,
|
||||||
@ -1316,18 +1360,7 @@ function linkTabContents(tab) {
|
|||||||
content: content
|
content: content
|
||||||
})
|
})
|
||||||
|
|
||||||
tab.addEventListener("click", event => {
|
tab.addEventListener("click", event => selectTab(tab.id))
|
||||||
if (!tab.classList.contains("active")) {
|
|
||||||
tabElements.forEach(tabInfo => {
|
|
||||||
if (tabInfo.tab.classList.contains("active")) {
|
|
||||||
tabInfo.tab.classList.toggle("active")
|
|
||||||
tabInfo.content.classList.toggle("active")
|
|
||||||
}
|
|
||||||
})
|
|
||||||
tab.classList.toggle("active")
|
|
||||||
content.classList.toggle("active")
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
document.querySelectorAll(".tab").forEach(linkTabContents)
|
document.querySelectorAll(".tab").forEach(linkTabContents)
|
||||||
|
@ -5,9 +5,9 @@
|
|||||||
*/
|
*/
|
||||||
var ParameterType = {
|
var ParameterType = {
|
||||||
checkbox: "checkbox",
|
checkbox: "checkbox",
|
||||||
select: "select",
|
select: "select",
|
||||||
select_multiple: "select_multiple",
|
select_multiple: "select_multiple",
|
||||||
custom: "custom",
|
custom: "custom",
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -23,166 +23,182 @@
|
|||||||
|
|
||||||
/** @type {Array.<Parameter>} */
|
/** @type {Array.<Parameter>} */
|
||||||
var PARAMETERS = [
|
var PARAMETERS = [
|
||||||
{
|
{
|
||||||
id: "theme",
|
id: "theme",
|
||||||
type: ParameterType.select,
|
type: ParameterType.select,
|
||||||
label: "Theme",
|
label: "Theme",
|
||||||
default: "theme-default",
|
default: "theme-default",
|
||||||
note: "customize the look and feel of the ui",
|
note: "customize the look and feel of the ui",
|
||||||
options: [ // Note: options expanded dynamically
|
options: [ // Note: options expanded dynamically
|
||||||
{
|
{
|
||||||
value: "theme-default",
|
value: "theme-default",
|
||||||
label: "Default"
|
label: "Default"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
icon: "fa-palette"
|
icon: "fa-palette"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: "save_to_disk",
|
id: "save_to_disk",
|
||||||
type: ParameterType.checkbox,
|
type: ParameterType.checkbox,
|
||||||
label: "Auto-Save Images",
|
label: "Auto-Save Images",
|
||||||
note: "automatically saves images to the specified location",
|
note: "automatically saves images to the specified location",
|
||||||
icon: "fa-download",
|
icon: "fa-download",
|
||||||
default: false,
|
default: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: "diskPath",
|
id: "diskPath",
|
||||||
type: ParameterType.custom,
|
type: ParameterType.custom,
|
||||||
label: "Save Location",
|
label: "Save Location",
|
||||||
render: (parameter) => {
|
render: (parameter) => {
|
||||||
return `<input id="${parameter.id}" name="${parameter.id}" size="30" disabled>`
|
return `<input id="${parameter.id}" name="${parameter.id}" size="30" disabled>`
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: "sound_toggle",
|
id: "sound_toggle",
|
||||||
type: ParameterType.checkbox,
|
type: ParameterType.checkbox,
|
||||||
label: "Enable Sound",
|
label: "Enable Sound",
|
||||||
note: "plays a sound on task completion",
|
note: "plays a sound on task completion",
|
||||||
icon: "fa-volume-low",
|
icon: "fa-volume-low",
|
||||||
default: true,
|
default: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: "ui_open_browser_on_start",
|
id: "ui_open_browser_on_start",
|
||||||
type: ParameterType.checkbox,
|
type: ParameterType.checkbox,
|
||||||
label: "Open browser on startup",
|
label: "Open browser on startup",
|
||||||
note: "starts the default browser on startup",
|
note: "starts the default browser on startup",
|
||||||
icon: "fa-window-restore",
|
icon: "fa-window-restore",
|
||||||
default: true,
|
default: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: "turbo",
|
id: "turbo",
|
||||||
type: ParameterType.checkbox,
|
type: ParameterType.checkbox,
|
||||||
label: "Turbo Mode",
|
label: "Turbo Mode",
|
||||||
note: "generates images faster, but uses an additional 1 GB of GPU memory",
|
note: "generates images faster, but uses an additional 1 GB of GPU memory",
|
||||||
icon: "fa-forward",
|
icon: "fa-forward",
|
||||||
default: true,
|
default: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: "use_cpu",
|
id: "use_cpu",
|
||||||
type: ParameterType.checkbox,
|
type: ParameterType.checkbox,
|
||||||
label: "Use CPU (not GPU)",
|
label: "Use CPU (not GPU)",
|
||||||
note: "warning: this will be *very* slow",
|
note: "warning: this will be *very* slow",
|
||||||
icon: "fa-microchip",
|
icon: "fa-microchip",
|
||||||
default: false,
|
default: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: "auto_pick_gpus",
|
id: "auto_pick_gpus",
|
||||||
type: ParameterType.checkbox,
|
type: ParameterType.checkbox,
|
||||||
label: "Automatically pick the GPUs (experimental)",
|
label: "Automatically pick the GPUs (experimental)",
|
||||||
default: false,
|
default: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: "use_gpus",
|
id: "use_gpus",
|
||||||
type: ParameterType.select_multiple,
|
type: ParameterType.select_multiple,
|
||||||
label: "GPUs to use (experimental)",
|
label: "GPUs to use (experimental)",
|
||||||
note: "to process in parallel",
|
note: "to process in parallel",
|
||||||
default: false,
|
default: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: "use_full_precision",
|
id: "use_full_precision",
|
||||||
type: ParameterType.checkbox,
|
type: ParameterType.checkbox,
|
||||||
label: "Use Full Precision",
|
label: "Use Full Precision",
|
||||||
note: "for GPU-only. warning: this will consume more VRAM",
|
note: "for GPU-only. warning: this will consume more VRAM",
|
||||||
icon: "fa-crosshairs",
|
icon: "fa-crosshairs",
|
||||||
default: false,
|
default: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: "auto_save_settings",
|
id: "auto_save_settings",
|
||||||
type: ParameterType.checkbox,
|
type: ParameterType.checkbox,
|
||||||
label: "Auto-Save Settings",
|
label: "Auto-Save Settings",
|
||||||
note: "restores settings on browser load",
|
note: "restores settings on browser load",
|
||||||
icon: "fa-gear",
|
icon: "fa-gear",
|
||||||
default: true,
|
default: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: "listen_to_network",
|
id: "confirm_dangerous_actions",
|
||||||
type: ParameterType.checkbox,
|
type: ParameterType.checkbox,
|
||||||
label: "Make Stable Diffusion available on your network",
|
label: "Confirm dangerous actions",
|
||||||
note: "Other devices on your network can access this web page",
|
note: "Actions that might lead to data loss must either be clicked with the shift key pressed, or confirmed in an 'Are you sure?' dialog",
|
||||||
icon: "fa-network-wired",
|
icon: "fa-check-double",
|
||||||
default: true,
|
default: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: "listen_port",
|
id: "listen_to_network",
|
||||||
type: ParameterType.custom,
|
type: ParameterType.checkbox,
|
||||||
label: "Network port",
|
label: "Make Stable Diffusion available on your network",
|
||||||
note: "Port that this server listens to. The '9000' part in 'http://localhost:9000'",
|
note: "Other devices on your network can access this web page",
|
||||||
icon: "fa-anchor",
|
icon: "fa-network-wired",
|
||||||
render: (parameter) => {
|
default: true,
|
||||||
return `<input id="${parameter.id}" name="${parameter.id}" size="6" value="9000" onkeypress="preventNonNumericalInput(event)">`
|
},
|
||||||
}
|
{
|
||||||
},
|
id: "listen_port",
|
||||||
{
|
type: ParameterType.custom,
|
||||||
id: "use_beta_channel",
|
label: "Network port",
|
||||||
type: ParameterType.checkbox,
|
note: "Port that this server listens to. The '9000' part in 'http://localhost:9000'",
|
||||||
label: "Beta channel",
|
icon: "fa-anchor",
|
||||||
note: "Get the latest features immediately (but could be less stable). Please restart the program after changing this.",
|
render: (parameter) => {
|
||||||
icon: "fa-fire",
|
return `<input id="${parameter.id}" name="${parameter.id}" size="6" value="9000" onkeypress="preventNonNumericalInput(event)">`
|
||||||
default: false,
|
}
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
id: "test_sd2",
|
||||||
|
type: ParameterType.checkbox,
|
||||||
|
label: "Test SD 2.0",
|
||||||
|
note: "Experimental! High memory usage! GPU-only! Not the final version! Please restart the program after changing this.",
|
||||||
|
icon: "fa-fire",
|
||||||
|
default: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "use_beta_channel",
|
||||||
|
type: ParameterType.checkbox,
|
||||||
|
label: "Beta channel",
|
||||||
|
note: "Get the latest features immediately (but could be less stable). Please restart the program after changing this.",
|
||||||
|
icon: "fa-fire",
|
||||||
|
default: false,
|
||||||
|
},
|
||||||
];
|
];
|
||||||
|
|
||||||
function getParameterSettingsEntry(id) {
|
function getParameterSettingsEntry(id) {
|
||||||
let parameter = PARAMETERS.filter(p => p.id === id)
|
let parameter = PARAMETERS.filter(p => p.id === id)
|
||||||
if (parameter.length === 0) {
|
if (parameter.length === 0) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
return parameter[0].settingsEntry
|
return parameter[0].settingsEntry
|
||||||
}
|
}
|
||||||
|
|
||||||
function getParameterElement(parameter) {
|
function getParameterElement(parameter) {
|
||||||
switch (parameter.type) {
|
switch (parameter.type) {
|
||||||
case ParameterType.checkbox:
|
case ParameterType.checkbox:
|
||||||
var is_checked = parameter.default ? " checked" : "";
|
var is_checked = parameter.default ? " checked" : "";
|
||||||
return `<input id="${parameter.id}" name="${parameter.id}"${is_checked} type="checkbox">`
|
return `<input id="${parameter.id}" name="${parameter.id}"${is_checked} type="checkbox">`
|
||||||
case ParameterType.select:
|
case ParameterType.select:
|
||||||
case ParameterType.select_multiple:
|
case ParameterType.select_multiple:
|
||||||
var options = (parameter.options || []).map(option => `<option value="${option.value}">${option.label}</option>`).join("")
|
var options = (parameter.options || []).map(option => `<option value="${option.value}">${option.label}</option>`).join("")
|
||||||
var multiple = (parameter.type == ParameterType.select_multiple ? 'multiple' : '')
|
var multiple = (parameter.type == ParameterType.select_multiple ? 'multiple' : '')
|
||||||
return `<select id="${parameter.id}" name="${parameter.id}" ${multiple}>${options}</select>`
|
return `<select id="${parameter.id}" name="${parameter.id}" ${multiple}>${options}</select>`
|
||||||
case ParameterType.custom:
|
case ParameterType.custom:
|
||||||
return parameter.render(parameter)
|
return parameter.render(parameter)
|
||||||
default:
|
default:
|
||||||
console.error(`Invalid type for parameter ${parameter.id}`);
|
console.error(`Invalid type for parameter ${parameter.id}`);
|
||||||
return "ERROR: Invalid Type"
|
return "ERROR: Invalid Type"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let parametersTable = document.querySelector("#system-settings .parameters-table")
|
let parametersTable = document.querySelector("#system-settings .parameters-table")
|
||||||
/* fill in the system settings popup table */
|
/* fill in the system settings popup table */
|
||||||
function initParameters() {
|
function initParameters() {
|
||||||
PARAMETERS.forEach(parameter => {
|
PARAMETERS.forEach(parameter => {
|
||||||
var element = getParameterElement(parameter)
|
var element = getParameterElement(parameter)
|
||||||
var note = parameter.note ? `<small>${parameter.note}</small>` : "";
|
var note = parameter.note ? `<small>${parameter.note}</small>` : "";
|
||||||
var icon = parameter.icon ? `<i class="fa ${parameter.icon}"></i>` : "";
|
var icon = parameter.icon ? `<i class="fa ${parameter.icon}"></i>` : "";
|
||||||
var newrow = document.createElement('div')
|
var newrow = document.createElement('div')
|
||||||
newrow.innerHTML = `
|
newrow.innerHTML = `
|
||||||
<div>${icon}</div>
|
<div>${icon}</div>
|
||||||
<div><label for="${parameter.id}">${parameter.label}</label>${note}</div>
|
<div><label for="${parameter.id}">${parameter.label}</label>${note}</div>
|
||||||
<div>${element}</div>`
|
<div>${element}</div>`
|
||||||
parametersTable.appendChild(newrow)
|
parametersTable.appendChild(newrow)
|
||||||
parameter.settingsEntry = newrow
|
parameter.settingsEntry = newrow
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
initParameters()
|
initParameters()
|
||||||
@ -196,11 +212,14 @@ let saveToDiskField = document.querySelector('#save_to_disk')
|
|||||||
let diskPathField = document.querySelector('#diskPath')
|
let diskPathField = document.querySelector('#diskPath')
|
||||||
let listenToNetworkField = document.querySelector("#listen_to_network")
|
let listenToNetworkField = document.querySelector("#listen_to_network")
|
||||||
let listenPortField = document.querySelector("#listen_port")
|
let listenPortField = document.querySelector("#listen_port")
|
||||||
|
let testSD2Field = document.querySelector("#test_sd2")
|
||||||
let useBetaChannelField = document.querySelector("#use_beta_channel")
|
let useBetaChannelField = document.querySelector("#use_beta_channel")
|
||||||
let uiOpenBrowserOnStartField = document.querySelector("#ui_open_browser_on_start")
|
let uiOpenBrowserOnStartField = document.querySelector("#ui_open_browser_on_start")
|
||||||
|
let confirmDangerousActionsField = document.querySelector("#confirm_dangerous_actions")
|
||||||
|
|
||||||
let saveSettingsBtn = document.querySelector('#save-system-settings-btn')
|
let saveSettingsBtn = document.querySelector('#save-system-settings-btn')
|
||||||
|
|
||||||
|
|
||||||
async function changeAppConfig(configDelta) {
|
async function changeAppConfig(configDelta) {
|
||||||
try {
|
try {
|
||||||
let res = await fetch('/app_config', {
|
let res = await fetch('/app_config', {
|
||||||
@ -230,12 +249,18 @@ async function getAppConfig() {
|
|||||||
if (config.ui && config.ui.open_browser_on_start === false) {
|
if (config.ui && config.ui.open_browser_on_start === false) {
|
||||||
uiOpenBrowserOnStartField.checked = false
|
uiOpenBrowserOnStartField.checked = false
|
||||||
}
|
}
|
||||||
if (config.net && config.net.listen_to_network === false) {
|
if ('test_sd2' in config) {
|
||||||
listenToNetworkField.checked = false
|
testSD2Field.checked = config['test_sd2']
|
||||||
}
|
}
|
||||||
if (config.net && config.net.listen_port !== undefined) {
|
|
||||||
listenPortField.value = config.net.listen_port
|
let testSD2SettingEntry = getParameterSettingsEntry('test_sd2')
|
||||||
}
|
testSD2SettingEntry.style.display = (config.update_branch === 'beta' ? '' : 'none')
|
||||||
|
if (config.net && config.net.listen_to_network === false) {
|
||||||
|
listenToNetworkField.checked = false
|
||||||
|
}
|
||||||
|
if (config.net && config.net.listen_port !== undefined) {
|
||||||
|
listenPortField.value = config.net.listen_port
|
||||||
|
}
|
||||||
|
|
||||||
console.log('get config status response', config)
|
console.log('get config status response', config)
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
@ -263,7 +288,6 @@ function getCurrentRenderDeviceSelection() {
|
|||||||
useCPUField.addEventListener('click', function() {
|
useCPUField.addEventListener('click', function() {
|
||||||
let gpuSettingEntry = getParameterSettingsEntry('use_gpus')
|
let gpuSettingEntry = getParameterSettingsEntry('use_gpus')
|
||||||
let autoPickGPUSettingEntry = getParameterSettingsEntry('auto_pick_gpus')
|
let autoPickGPUSettingEntry = getParameterSettingsEntry('auto_pick_gpus')
|
||||||
console.log("hello", this.checked);
|
|
||||||
if (this.checked) {
|
if (this.checked) {
|
||||||
gpuSettingEntry.style.display = 'none'
|
gpuSettingEntry.style.display = 'none'
|
||||||
autoPickGPUSettingEntry.style.display = 'none'
|
autoPickGPUSettingEntry.style.display = 'none'
|
||||||
@ -313,14 +337,45 @@ async function getDiskPath() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async function getDevices() {
|
function setDeviceInfo(devices) {
|
||||||
|
let cpu = devices.all.cpu.name
|
||||||
|
let allGPUs = Object.keys(devices.all).filter(d => d != 'cpu')
|
||||||
|
let activeGPUs = Object.keys(devices.active)
|
||||||
|
|
||||||
|
function ID_TO_TEXT(d) {
|
||||||
|
let info = devices.all[d]
|
||||||
|
if ("mem_free" in info && "mem_total" in info) {
|
||||||
|
return `${info.name} <small>(${d}) (${info.mem_free.toFixed(1)}Gb free / ${info.mem_total.toFixed(1)} Gb total)</small>`
|
||||||
|
} else {
|
||||||
|
return `${info.name} <small>(${d}) (no memory info)</small>`
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
allGPUs = allGPUs.map(ID_TO_TEXT)
|
||||||
|
activeGPUs = activeGPUs.map(ID_TO_TEXT)
|
||||||
|
|
||||||
|
let systemInfoEl = document.querySelector('#system-info')
|
||||||
|
systemInfoEl.querySelector('#system-info-cpu').innerText = cpu
|
||||||
|
systemInfoEl.querySelector('#system-info-gpus-all').innerHTML = allGPUs.join('</br>')
|
||||||
|
systemInfoEl.querySelector('#system-info-rendering-devices').innerHTML = activeGPUs.join('</br>')
|
||||||
|
}
|
||||||
|
|
||||||
|
function setHostInfo(hosts) {
|
||||||
|
let port = listenPortField.value
|
||||||
|
hosts = hosts.map(addr => `http://${addr}:${port}/`).map(url => `<div><a href="${url}">${url}</a></div>`)
|
||||||
|
document.querySelector('#system-info-server-hosts').innerHTML = hosts.join('')
|
||||||
|
}
|
||||||
|
|
||||||
|
async function getSystemInfo() {
|
||||||
try {
|
try {
|
||||||
let res = await fetch('/get/devices')
|
let res = await fetch('/get/system_info')
|
||||||
if (res.status === 200) {
|
if (res.status === 200) {
|
||||||
res = await res.json()
|
res = await res.json()
|
||||||
|
let devices = res['devices']
|
||||||
|
let hosts = res['hosts']
|
||||||
|
|
||||||
let allDeviceIds = Object.keys(res['all']).filter(d => d !== 'cpu')
|
let allDeviceIds = Object.keys(devices['all']).filter(d => d !== 'cpu')
|
||||||
let activeDeviceIds = Object.keys(res['active']).filter(d => d !== 'cpu')
|
let activeDeviceIds = Object.keys(devices['active']).filter(d => d !== 'cpu')
|
||||||
|
|
||||||
if (activeDeviceIds.length === 0) {
|
if (activeDeviceIds.length === 0) {
|
||||||
useCPUField.checked = true
|
useCPUField.checked = true
|
||||||
@ -338,11 +393,11 @@ async function getDevices() {
|
|||||||
useCPUField.disabled = true // no compatible GPUs, so make the CPU mandatory
|
useCPUField.disabled = true // no compatible GPUs, so make the CPU mandatory
|
||||||
}
|
}
|
||||||
|
|
||||||
autoPickGPUsField.checked = (res['config'] === 'auto')
|
autoPickGPUsField.checked = (devices['config'] === 'auto')
|
||||||
|
|
||||||
useGPUsField.innerHTML = ''
|
useGPUsField.innerHTML = ''
|
||||||
allDeviceIds.forEach(device => {
|
allDeviceIds.forEach(device => {
|
||||||
let deviceName = res['all'][device]['name']
|
let deviceName = devices['all'][device]['name']
|
||||||
let deviceOption = `<option value="${device}">${deviceName} (${device})</option>`
|
let deviceOption = `<option value="${device}">${deviceName} (${device})</option>`
|
||||||
useGPUsField.insertAdjacentHTML('beforeend', deviceOption)
|
useGPUsField.insertAdjacentHTML('beforeend', deviceOption)
|
||||||
})
|
})
|
||||||
@ -353,6 +408,9 @@ async function getDevices() {
|
|||||||
} else {
|
} else {
|
||||||
$('#use_gpus').val(activeDeviceIds)
|
$('#use_gpus').val(activeDeviceIds)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
setDeviceInfo(devices)
|
||||||
|
setHostInfo(hosts)
|
||||||
}
|
}
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
console.log('error fetching devices', e)
|
console.log('error fetching devices', e)
|
||||||
@ -360,22 +418,23 @@ async function getDevices() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
saveSettingsBtn.addEventListener('click', function() {
|
saveSettingsBtn.addEventListener('click', function() {
|
||||||
let updateBranch = (useBetaChannelField.checked ? 'beta' : 'main')
|
let updateBranch = (useBetaChannelField.checked ? 'beta' : 'main')
|
||||||
|
|
||||||
if (listenPortField.value == '') {
|
if (listenPortField.value == '') {
|
||||||
alert('The network port field must not be empty.')
|
alert('The network port field must not be empty.')
|
||||||
} else if (listenPortField.value<1 || listenPortField.value>65535) {
|
} else if (listenPortField.value<1 || listenPortField.value>65535) {
|
||||||
alert('The network port must be a number from 1 to 65535')
|
alert('The network port must be a number from 1 to 65535')
|
||||||
} else {
|
} else {
|
||||||
changeAppConfig({
|
changeAppConfig({
|
||||||
'render_devices': getCurrentRenderDeviceSelection(),
|
'render_devices': getCurrentRenderDeviceSelection(),
|
||||||
'update_branch': updateBranch,
|
'update_branch': updateBranch,
|
||||||
'ui_open_browser_on_start': uiOpenBrowserOnStartField.checked,
|
'ui_open_browser_on_start': uiOpenBrowserOnStartField.checked,
|
||||||
'listen_to_network': listenToNetworkField.checked,
|
'listen_to_network': listenToNetworkField.checked,
|
||||||
'listen_port': listenPortField.value
|
'listen_port': listenPortField.value,
|
||||||
})
|
'test_sd2': testSD2Field.checked
|
||||||
}
|
})
|
||||||
|
}
|
||||||
|
|
||||||
saveSettingsBtn.classList.add('active')
|
saveSettingsBtn.classList.add('active')
|
||||||
asyncDelay(300).then(() => saveSettingsBtn.classList.remove('active'))
|
asyncDelay(300).then(() => saveSettingsBtn.classList.remove('active'))
|
||||||
})
|
})
|
||||||
|
@ -24,7 +24,8 @@ const PLUGINS = {
|
|||||||
* }
|
* }
|
||||||
* })
|
* })
|
||||||
*/
|
*/
|
||||||
IMAGE_INFO_BUTTONS: []
|
IMAGE_INFO_BUTTONS: [],
|
||||||
|
MODIFIERS_LOAD: []
|
||||||
}
|
}
|
||||||
|
|
||||||
async function loadUIPlugins() {
|
async function loadUIPlugins() {
|
||||||
|
@ -60,6 +60,7 @@ function themeFieldChanged() {
|
|||||||
|
|
||||||
body.style = "";
|
body.style = "";
|
||||||
var theme = THEMES.find(t => t.key == theme_key);
|
var theme = THEMES.find(t => t.key == theme_key);
|
||||||
|
let borderColor = undefined
|
||||||
if (theme) {
|
if (theme) {
|
||||||
// refresh variables incase they are back referencing
|
// refresh variables incase they are back referencing
|
||||||
Array.from(DEFAULT_THEME.rule.style)
|
Array.from(DEFAULT_THEME.rule.style)
|
||||||
@ -67,7 +68,14 @@ function themeFieldChanged() {
|
|||||||
.forEach(cssVariable => {
|
.forEach(cssVariable => {
|
||||||
body.style.setProperty(cssVariable, DEFAULT_THEME.rule.style.getPropertyValue(cssVariable));
|
body.style.setProperty(cssVariable, DEFAULT_THEME.rule.style.getPropertyValue(cssVariable));
|
||||||
});
|
});
|
||||||
|
borderColor = theme.rule.style.getPropertyValue('--input-border-color').trim()
|
||||||
|
if (!borderColor.startsWith('#')) {
|
||||||
|
borderColor = theme.rule.style.getPropertyValue('--theme-color-fallback')
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
borderColor = DEFAULT_THEME.rule.style.getPropertyValue('--theme-color-fallback')
|
||||||
}
|
}
|
||||||
|
document.querySelector('meta[name="theme-color"]').setAttribute("content", borderColor)
|
||||||
}
|
}
|
||||||
|
|
||||||
themeField.addEventListener('change', themeFieldChanged);
|
themeField.addEventListener('change', themeFieldChanged);
|
||||||
|
@ -1,17 +1,17 @@
|
|||||||
// https://gomakethings.com/finding-the-next-and-previous-sibling-elements-that-match-a-selector-with-vanilla-js/
|
// https://gomakethings.com/finding-the-next-and-previous-sibling-elements-that-match-a-selector-with-vanilla-js/
|
||||||
function getNextSibling(elem, selector) {
|
function getNextSibling(elem, selector) {
|
||||||
// Get the next sibling element
|
// Get the next sibling element
|
||||||
var sibling = elem.nextElementSibling
|
var sibling = elem.nextElementSibling
|
||||||
|
|
||||||
// If there's no selector, return the first sibling
|
// If there's no selector, return the first sibling
|
||||||
if (!selector) return sibling
|
if (!selector) return sibling
|
||||||
|
|
||||||
// If the sibling matches our selector, use it
|
// If the sibling matches our selector, use it
|
||||||
// If not, jump to the next sibling and continue the loop
|
// If not, jump to the next sibling and continue the loop
|
||||||
while (sibling) {
|
while (sibling) {
|
||||||
if (sibling.matches(selector)) return sibling
|
if (sibling.matches(selector)) return sibling
|
||||||
sibling = sibling.nextElementSibling
|
sibling = sibling.nextElementSibling
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -347,6 +347,16 @@ function asyncDelay(timeout) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Simple debounce function, placeholder for the one in engine.js for simple use cases */
|
||||||
|
function debounce(func, timeout = 300){
|
||||||
|
let timer;
|
||||||
|
return (...args) => {
|
||||||
|
clearTimeout(timer);
|
||||||
|
timer = setTimeout(() => { func.apply(this, args); }, timeout);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
function preventNonNumericalInput(e) {
|
function preventNonNumericalInput(e) {
|
||||||
e = e || window.event;
|
e = e || window.event;
|
||||||
let charCode = (typeof e.which == "undefined") ? e.keyCode : e.which;
|
let charCode = (typeof e.which == "undefined") ? e.keyCode : e.which;
|
||||||
|
8
ui/media/manifest.webmanifest
Normal file
8
ui/media/manifest.webmanifest
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
{
|
||||||
|
"name": "Stable Diffusion UI",
|
||||||
|
"display": "standalone",
|
||||||
|
"display_override": [
|
||||||
|
"window-controls-overlay"
|
||||||
|
],
|
||||||
|
"theme_color": "#000000"
|
||||||
|
}
|
51
ui/plugins/ui/Autoscroll.plugin.js
Normal file
51
ui/plugins/ui/Autoscroll.plugin.js
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
(function () {
|
||||||
|
"use strict"
|
||||||
|
|
||||||
|
var styleSheet = document.createElement("style");
|
||||||
|
styleSheet.textContent = `
|
||||||
|
.auto-scroll {
|
||||||
|
float: right;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
document.head.appendChild(styleSheet);
|
||||||
|
|
||||||
|
const autoScrollControl = document.createElement('div');
|
||||||
|
autoScrollControl.innerHTML = `<input id="auto_scroll" name="auto_scroll" type="checkbox">
|
||||||
|
<label for="auto_scroll">Scroll to generated image</label>`
|
||||||
|
autoScrollControl.className = "auto-scroll"
|
||||||
|
clearAllPreviewsBtn.parentNode.insertBefore(autoScrollControl, clearAllPreviewsBtn.nextSibling)
|
||||||
|
prettifyInputs(document);
|
||||||
|
let autoScroll = document.querySelector("#auto_scroll")
|
||||||
|
|
||||||
|
/**
|
||||||
|
* the use of initSettings() in the autoscroll plugin seems to be breaking the models dropdown and the save-to-disk folder field
|
||||||
|
* in the settings tab. They're both blank, because they're being re-initialized. Their earlier values came from the API call,
|
||||||
|
* but those values aren't stored in localStorage, since they aren't user-specified.
|
||||||
|
* So when initSettings() is called a second time, it overwrites the values with an empty string.
|
||||||
|
*
|
||||||
|
* We could either rework how new components can register themselves to be auto-saved, without having to call initSettings() again.
|
||||||
|
* Or we could move the autoscroll code into the main code, and include it in the list of fields in auto-save.js
|
||||||
|
*/
|
||||||
|
// SETTINGS_IDS_LIST.push("auto_scroll")
|
||||||
|
// initSettings()
|
||||||
|
|
||||||
|
// observe for changes in the preview pane
|
||||||
|
var observer = new MutationObserver(function (mutations) {
|
||||||
|
mutations.forEach(function (mutation) {
|
||||||
|
if (mutation.target.className == 'img-batch') {
|
||||||
|
Autoscroll(mutation.target)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
observer.observe(document.getElementById('preview'), {
|
||||||
|
childList: true,
|
||||||
|
subtree: true
|
||||||
|
})
|
||||||
|
|
||||||
|
function Autoscroll(target) {
|
||||||
|
if (autoScroll.checked && target !== null) {
|
||||||
|
target.parentElement.parentElement.parentElement.scrollIntoView();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})()
|
@ -18,40 +18,42 @@
|
|||||||
let overlays = document.querySelector('#editor-inputs-tags-list').querySelectorAll('.modifier-card-overlay')
|
let overlays = document.querySelector('#editor-inputs-tags-list').querySelectorAll('.modifier-card-overlay')
|
||||||
overlays.forEach (i => {
|
overlays.forEach (i => {
|
||||||
i.onwheel = (e) => {
|
i.onwheel = (e) => {
|
||||||
e.preventDefault()
|
if (e.ctrlKey == true) {
|
||||||
|
e.preventDefault()
|
||||||
const delta = Math.sign(event.deltaY)
|
|
||||||
let s = i.parentElement.getElementsByClassName('modifier-card-label')[0].getElementsByTagName("p")[0].innerText
|
const delta = Math.sign(event.deltaY)
|
||||||
if (delta < 0) {
|
let s = i.parentElement.getElementsByClassName('modifier-card-label')[0].getElementsByTagName("p")[0].innerText
|
||||||
// wheel scrolling up
|
if (delta < 0) {
|
||||||
if (s.substring(0, 1) == '[' && s.substring(s.length-1) == ']') {
|
// wheel scrolling up
|
||||||
s = s.substring(1, s.length - 1)
|
if (s.substring(0, 1) == '[' && s.substring(s.length-1) == ']') {
|
||||||
}
|
s = s.substring(1, s.length - 1)
|
||||||
else
|
}
|
||||||
{
|
else
|
||||||
if (s.substring(0, 10) !== '('.repeat(10) && s.substring(s.length-10) !== ')'.repeat(10)) {
|
{
|
||||||
s = '(' + s + ')'
|
if (s.substring(0, 10) !== '('.repeat(10) && s.substring(s.length-10) !== ')'.repeat(10)) {
|
||||||
|
s = '(' + s + ')'
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
else{
|
||||||
else{
|
// wheel scrolling down
|
||||||
// wheel scrolling down
|
if (s.substring(0, 1) == '(' && s.substring(s.length-1) == ')') {
|
||||||
if (s.substring(0, 1) == '(' && s.substring(s.length-1) == ')') {
|
s = s.substring(1, s.length - 1)
|
||||||
s = s.substring(1, s.length - 1)
|
}
|
||||||
}
|
else
|
||||||
else
|
{
|
||||||
{
|
if (s.substring(0, 10) !== '['.repeat(10) && s.substring(s.length-10) !== ']'.repeat(10)) {
|
||||||
if (s.substring(0, 10) !== '['.repeat(10) && s.substring(s.length-10) !== ']'.repeat(10)) {
|
s = '[' + s + ']'
|
||||||
s = '[' + s + ']'
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
i.parentElement.getElementsByClassName('modifier-card-label')[0].getElementsByTagName("p")[0].innerText = s
|
||||||
i.parentElement.getElementsByClassName('modifier-card-label')[0].getElementsByTagName("p")[0].innerText = s
|
// update activeTags
|
||||||
// update activeTags
|
for (let it = 0; it < overlays.length; it++) {
|
||||||
for (let it = 0; it < overlays.length; it++) {
|
if (i == overlays[it]) {
|
||||||
if (i == overlays[it]) {
|
activeTags[it].name = s
|
||||||
activeTags[it].name = s
|
break
|
||||||
break
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
31
ui/plugins/ui/custom-modifiers.plugin.js
Normal file
31
ui/plugins/ui/custom-modifiers.plugin.js
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
(function() {
|
||||||
|
PLUGINS['MODIFIERS_LOAD'].push({
|
||||||
|
loader: function() {
|
||||||
|
let customModifiers = localStorage.getItem(CUSTOM_MODIFIERS_KEY, '')
|
||||||
|
customModifiersTextBox.value = customModifiers
|
||||||
|
|
||||||
|
if (customModifiersGroupElement !== undefined) {
|
||||||
|
customModifiersGroupElement.remove()
|
||||||
|
}
|
||||||
|
|
||||||
|
if (customModifiers && customModifiers.trim() !== '') {
|
||||||
|
customModifiers = customModifiers.split('\n')
|
||||||
|
customModifiers = customModifiers.filter(m => m.trim() !== '')
|
||||||
|
customModifiers = customModifiers.map(function(m) {
|
||||||
|
return {
|
||||||
|
"modifier": m
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
let customGroup = {
|
||||||
|
'category': 'Custom Modifiers',
|
||||||
|
'modifiers': customModifiers
|
||||||
|
}
|
||||||
|
|
||||||
|
customModifiersGroupElement = createModifierGroup(customGroup, true)
|
||||||
|
|
||||||
|
createCollapsibles(customModifiersGroupElement)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})()
|
53
ui/plugins/ui/modifiers-toggle.plugin.js
Normal file
53
ui/plugins/ui/modifiers-toggle.plugin.js
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
(function () {
|
||||||
|
"use strict"
|
||||||
|
|
||||||
|
var styleSheet = document.createElement("style");
|
||||||
|
styleSheet.textContent = `
|
||||||
|
.modifier-card-tiny.modifier-toggle-inactive {
|
||||||
|
background: transparent;
|
||||||
|
border: 2px dashed red;
|
||||||
|
opacity:0.2;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
document.head.appendChild(styleSheet);
|
||||||
|
|
||||||
|
// observe for changes in tag list
|
||||||
|
var observer = new MutationObserver(function (mutations) {
|
||||||
|
// mutations.forEach(function (mutation) {
|
||||||
|
if (editorModifierTagsList.childNodes.length > 0) {
|
||||||
|
ModifierToggle()
|
||||||
|
}
|
||||||
|
// })
|
||||||
|
})
|
||||||
|
|
||||||
|
observer.observe(editorModifierTagsList, {
|
||||||
|
childList: true
|
||||||
|
})
|
||||||
|
|
||||||
|
function ModifierToggle() {
|
||||||
|
let overlays = document.querySelector('#editor-inputs-tags-list').querySelectorAll('.modifier-card-overlay')
|
||||||
|
overlays.forEach (i => {
|
||||||
|
i.oncontextmenu = (e) => {
|
||||||
|
e.preventDefault()
|
||||||
|
|
||||||
|
if (i.parentElement.classList.contains('modifier-toggle-inactive')) {
|
||||||
|
i.parentElement.classList.remove('modifier-toggle-inactive')
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
i.parentElement.classList.add('modifier-toggle-inactive')
|
||||||
|
}
|
||||||
|
// refresh activeTags
|
||||||
|
let modifierName = i.parentElement.getElementsByClassName('modifier-card-label')[0].getElementsByTagName("p")[0].innerText
|
||||||
|
activeTags = activeTags.map(obj => {
|
||||||
|
if (obj.name === modifierName) {
|
||||||
|
return {...obj, inactive: (obj.element.classList.contains('modifier-toggle-inactive'))};
|
||||||
|
}
|
||||||
|
|
||||||
|
return obj;
|
||||||
|
});
|
||||||
|
console.log(activeTags)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})()
|
@ -25,6 +25,7 @@ class Request:
|
|||||||
use_vae_model: str = None
|
use_vae_model: str = None
|
||||||
show_only_filtered_image: bool = False
|
show_only_filtered_image: bool = False
|
||||||
output_format: str = "jpeg" # or "png"
|
output_format: str = "jpeg" # or "png"
|
||||||
|
output_quality: int = 75
|
||||||
|
|
||||||
stream_progress_updates: bool = False
|
stream_progress_updates: bool = False
|
||||||
stream_image_progress: bool = False
|
stream_image_progress: bool = False
|
||||||
@ -47,6 +48,7 @@ class Request:
|
|||||||
"use_stable_diffusion_model": self.use_stable_diffusion_model,
|
"use_stable_diffusion_model": self.use_stable_diffusion_model,
|
||||||
"use_vae_model": self.use_vae_model,
|
"use_vae_model": self.use_vae_model,
|
||||||
"output_format": self.output_format,
|
"output_format": self.output_format,
|
||||||
|
"output_quality": self.output_quality,
|
||||||
}
|
}
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
@ -70,6 +72,7 @@ class Request:
|
|||||||
use_vae_model: {self.use_vae_model}
|
use_vae_model: {self.use_vae_model}
|
||||||
show_only_filtered_image: {self.show_only_filtered_image}
|
show_only_filtered_image: {self.show_only_filtered_image}
|
||||||
output_format: {self.output_format}
|
output_format: {self.output_format}
|
||||||
|
output_quality: {self.output_quality}
|
||||||
|
|
||||||
stream_progress_updates: {self.stream_progress_updates}
|
stream_progress_updates: {self.stream_progress_updates}
|
||||||
stream_image_progress: {self.stream_image_progress}'''
|
stream_image_progress: {self.stream_image_progress}'''
|
||||||
|
@ -1,72 +1,13 @@
|
|||||||
diff --git a/optimizedSD/ddpm.py b/optimizedSD/ddpm.py
|
diff --git a/optimizedSD/ddpm.py b/optimizedSD/ddpm.py
|
||||||
index b967b55..35ef520 100644
|
index 79058bc..a473411 100644
|
||||||
--- a/optimizedSD/ddpm.py
|
--- a/optimizedSD/ddpm.py
|
||||||
+++ b/optimizedSD/ddpm.py
|
+++ b/optimizedSD/ddpm.py
|
||||||
@@ -22,7 +22,7 @@ from ldm.util import exists, default, instantiate_from_config
|
@@ -564,12 +564,12 @@ class UNet(DDPM):
|
||||||
from ldm.modules.diffusionmodules.util import make_beta_schedule
|
unconditional_guidance_scale=unconditional_guidance_scale,
|
||||||
from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like
|
callback=callback, img_callback=img_callback)
|
||||||
from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
|
|
||||||
-from samplers import CompVisDenoiser, get_ancestral_step, to_d, append_dims,linear_multistep_coeff
|
|
||||||
+from .samplers import CompVisDenoiser, get_ancestral_step, to_d, append_dims,linear_multistep_coeff
|
|
||||||
|
|
||||||
def disabled_train(self):
|
|
||||||
"""Overwrite model.train with this function to make sure train/eval mode
|
|
||||||
@@ -506,6 +506,8 @@ class UNet(DDPM):
|
|
||||||
|
|
||||||
x_latent = noise if x0 is None else x0
|
|
||||||
# sampling
|
|
||||||
+ if sampler in ('ddim', 'dpm2', 'heun', 'dpm2_a', 'lms') and not hasattr(self, 'ddim_timesteps'):
|
|
||||||
+ self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=False)
|
|
||||||
|
|
||||||
if sampler == "plms":
|
|
||||||
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=False)
|
|
||||||
@@ -528,39 +530,46 @@ class UNet(DDPM):
|
|
||||||
elif sampler == "ddim":
|
|
||||||
samples = self.ddim_sampling(x_latent, conditioning, S, unconditional_guidance_scale=unconditional_guidance_scale,
|
|
||||||
unconditional_conditioning=unconditional_conditioning,
|
|
||||||
- mask = mask,init_latent=x_T,use_original_steps=False)
|
|
||||||
+ mask = mask,init_latent=x_T,use_original_steps=False,
|
|
||||||
+ callback=callback, img_callback=img_callback)
|
|
||||||
|
|
||||||
elif sampler == "euler":
|
|
||||||
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=False)
|
|
||||||
samples = self.euler_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
|
||||||
- unconditional_guidance_scale=unconditional_guidance_scale)
|
|
||||||
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
|
||||||
+ img_callback=img_callback)
|
|
||||||
elif sampler == "euler_a":
|
|
||||||
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=False)
|
|
||||||
samples = self.euler_ancestral_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
|
||||||
- unconditional_guidance_scale=unconditional_guidance_scale)
|
|
||||||
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
|
||||||
+ img_callback=img_callback)
|
|
||||||
|
|
||||||
elif sampler == "dpm2":
|
|
||||||
samples = self.dpm_2_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
|
||||||
- unconditional_guidance_scale=unconditional_guidance_scale)
|
|
||||||
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
|
||||||
+ img_callback=img_callback)
|
|
||||||
elif sampler == "heun":
|
|
||||||
samples = self.heun_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
|
||||||
- unconditional_guidance_scale=unconditional_guidance_scale)
|
|
||||||
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
|
||||||
+ img_callback=img_callback)
|
|
||||||
|
|
||||||
elif sampler == "dpm2_a":
|
|
||||||
samples = self.dpm_2_ancestral_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
|
||||||
- unconditional_guidance_scale=unconditional_guidance_scale)
|
|
||||||
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
|
||||||
+ img_callback=img_callback)
|
|
||||||
|
|
||||||
|
|
||||||
elif sampler == "lms":
|
|
||||||
samples = self.lms_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
|
||||||
- unconditional_guidance_scale=unconditional_guidance_scale)
|
|
||||||
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
|
||||||
+ img_callback=img_callback)
|
|
||||||
+
|
|
||||||
+ yield from samples
|
+ yield from samples
|
||||||
|
+
|
||||||
if(self.turbo):
|
if(self.turbo):
|
||||||
self.model1.to("cpu")
|
self.model1.to("cpu")
|
||||||
self.model2.to("cpu")
|
self.model2.to("cpu")
|
||||||
@ -76,7 +17,7 @@ index b967b55..35ef520 100644
|
|||||||
@torch.no_grad()
|
@torch.no_grad()
|
||||||
def plms_sampling(self, cond,b, img,
|
def plms_sampling(self, cond,b, img,
|
||||||
ddim_use_original_steps=False,
|
ddim_use_original_steps=False,
|
||||||
@@ -599,10 +608,10 @@ class UNet(DDPM):
|
@@ -608,10 +608,10 @@ class UNet(DDPM):
|
||||||
old_eps.append(e_t)
|
old_eps.append(e_t)
|
||||||
if len(old_eps) >= 4:
|
if len(old_eps) >= 4:
|
||||||
old_eps.pop(0)
|
old_eps.pop(0)
|
||||||
@ -90,23 +31,15 @@ index b967b55..35ef520 100644
|
|||||||
|
|
||||||
@torch.no_grad()
|
@torch.no_grad()
|
||||||
def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
|
def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
|
||||||
@@ -706,7 +715,8 @@ class UNet(DDPM):
|
@@ -740,13 +740,13 @@ class UNet(DDPM):
|
||||||
|
|
||||||
@torch.no_grad()
|
|
||||||
def ddim_sampling(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,
|
|
||||||
- mask = None,init_latent=None,use_original_steps=False):
|
|
||||||
+ mask = None,init_latent=None,use_original_steps=False,
|
|
||||||
+ callback=None, img_callback=None):
|
|
||||||
|
|
||||||
timesteps = self.ddim_timesteps
|
|
||||||
timesteps = timesteps[:t_start]
|
|
||||||
@@ -730,10 +740,13 @@ class UNet(DDPM):
|
|
||||||
unconditional_guidance_scale=unconditional_guidance_scale,
|
unconditional_guidance_scale=unconditional_guidance_scale,
|
||||||
unconditional_conditioning=unconditional_conditioning)
|
unconditional_conditioning=unconditional_conditioning)
|
||||||
|
|
||||||
|
- if callback: callback(i)
|
||||||
|
- if img_callback: img_callback(x_dec, i)
|
||||||
+ if callback: yield from callback(i)
|
+ if callback: yield from callback(i)
|
||||||
+ if img_callback: yield from img_callback(x_dec, i)
|
+ if img_callback: yield from img_callback(x_dec, i)
|
||||||
+
|
|
||||||
if mask is not None:
|
if mask is not None:
|
||||||
- return x0 * mask + (1. - mask) * x_dec
|
- return x0 * mask + (1. - mask) * x_dec
|
||||||
+ x_dec = x0 * mask + (1. - mask) * x_dec
|
+ x_dec = x0 * mask + (1. - mask) * x_dec
|
||||||
@ -116,217 +49,114 @@ index b967b55..35ef520 100644
|
|||||||
|
|
||||||
|
|
||||||
@torch.no_grad()
|
@torch.no_grad()
|
||||||
@@ -779,13 +792,16 @@ class UNet(DDPM):
|
@@ -820,12 +820,12 @@ class UNet(DDPM):
|
||||||
|
|
||||||
|
|
||||||
@torch.no_grad()
|
|
||||||
- def euler_sampling(self, ac, x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None,callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.):
|
|
||||||
+ def euler_sampling(self, ac, x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None,callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.,
|
|
||||||
+ img_callback=None):
|
|
||||||
"""Implements Algorithm 2 (Euler steps) from Karras et al. (2022)."""
|
|
||||||
extra_args = {} if extra_args is None else extra_args
|
|
||||||
cvd = CompVisDenoiser(ac)
|
|
||||||
sigmas = cvd.get_sigmas(S)
|
|
||||||
x = x*sigmas[0]
|
|
||||||
|
|
||||||
+ print(f"Running Euler Sampling with {len(sigmas) - 1} timesteps")
|
|
||||||
+
|
|
||||||
s_in = x.new_ones([x.shape[0]]).half()
|
|
||||||
for i in trange(len(sigmas) - 1, disable=disable):
|
|
||||||
gamma = min(s_churn / (len(sigmas) - 1), 2 ** 0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.
|
|
||||||
@@ -807,13 +823,18 @@ class UNet(DDPM):
|
|
||||||
d = to_d(x, sigma_hat, denoised)
|
d = to_d(x, sigma_hat, denoised)
|
||||||
if callback is not None:
|
- if callback: callback(i)
|
||||||
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})
|
- if img_callback: img_callback(x, i)
|
||||||
+
|
+ if callback: yield from callback(i)
|
||||||
+ if img_callback: yield from img_callback(x, i)
|
+ if img_callback: yield from img_callback(x, i)
|
||||||
+
|
|
||||||
dt = sigmas[i + 1] - sigma_hat
|
dt = sigmas[i + 1] - sigma_hat
|
||||||
# Euler method
|
# Euler method
|
||||||
x = x + d * dt
|
x = x + d * dt
|
||||||
- return x
|
- return x
|
||||||
+
|
|
||||||
+ yield from img_callback(x, len(sigmas)-1)
|
+ yield from img_callback(x, len(sigmas)-1)
|
||||||
|
|
||||||
@torch.no_grad()
|
@torch.no_grad()
|
||||||
- def euler_ancestral_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None, callback=None, disable=None):
|
def euler_ancestral_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None, callback=None, disable=None, img_callback=None):
|
||||||
+ def euler_ancestral_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None, callback=None, disable=None,
|
@@ -852,14 +852,14 @@ class UNet(DDPM):
|
||||||
+ img_callback=None):
|
denoised = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
|
||||||
"""Ancestral sampling with Euler method steps."""
|
|
||||||
extra_args = {} if extra_args is None else extra_args
|
|
||||||
|
|
||||||
@@ -822,6 +843,8 @@ class UNet(DDPM):
|
|
||||||
sigmas = cvd.get_sigmas(S)
|
|
||||||
x = x*sigmas[0]
|
|
||||||
|
|
||||||
+ print(f"Running Euler Ancestral Sampling with {len(sigmas) - 1} timesteps")
|
|
||||||
+
|
|
||||||
s_in = x.new_ones([x.shape[0]]).half()
|
|
||||||
for i in trange(len(sigmas) - 1, disable=disable):
|
|
||||||
|
|
||||||
@@ -837,17 +860,22 @@ class UNet(DDPM):
|
|
||||||
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1])
|
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1])
|
||||||
if callback is not None:
|
- if callback: callback(i)
|
||||||
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
|
- if img_callback: img_callback(x, i)
|
||||||
+
|
+ if callback: yield from callback(i)
|
||||||
+ if img_callback: yield from img_callback(x, i)
|
+ if img_callback: yield from img_callback(x, i)
|
||||||
+
|
|
||||||
d = to_d(x, sigmas[i], denoised)
|
d = to_d(x, sigmas[i], denoised)
|
||||||
# Euler method
|
# Euler method
|
||||||
dt = sigma_down - sigmas[i]
|
dt = sigma_down - sigmas[i]
|
||||||
x = x + d * dt
|
x = x + d * dt
|
||||||
x = x + torch.randn_like(x) * sigma_up
|
x = x + torch.randn_like(x) * sigma_up
|
||||||
- return x
|
- return x
|
||||||
+
|
|
||||||
+ yield from img_callback(x, len(sigmas)-1)
|
+ yield from img_callback(x, len(sigmas)-1)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@torch.no_grad()
|
@@ -892,8 +892,8 @@ class UNet(DDPM):
|
||||||
- def heun_sampling(self, ac, x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.):
|
denoised = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
|
||||||
+ def heun_sampling(self, ac, x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.,
|
|
||||||
+ img_callback=None):
|
|
||||||
"""Implements Algorithm 2 (Heun steps) from Karras et al. (2022)."""
|
|
||||||
extra_args = {} if extra_args is None else extra_args
|
|
||||||
|
|
||||||
@@ -855,6 +883,8 @@ class UNet(DDPM):
|
|
||||||
sigmas = cvd.get_sigmas(S)
|
|
||||||
x = x*sigmas[0]
|
|
||||||
|
|
||||||
+ print(f"Running Heun Sampling with {len(sigmas) - 1} timesteps")
|
|
||||||
+
|
|
||||||
|
|
||||||
s_in = x.new_ones([x.shape[0]]).half()
|
|
||||||
for i in trange(len(sigmas) - 1, disable=disable):
|
|
||||||
@@ -876,6 +906,9 @@ class UNet(DDPM):
|
|
||||||
d = to_d(x, sigma_hat, denoised)
|
d = to_d(x, sigma_hat, denoised)
|
||||||
if callback is not None:
|
- if callback: callback(i)
|
||||||
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})
|
- if img_callback: img_callback(x, i)
|
||||||
+
|
+ if callback: yield from callback(i)
|
||||||
+ if img_callback: yield from img_callback(x, i)
|
+ if img_callback: yield from img_callback(x, i)
|
||||||
+
|
|
||||||
dt = sigmas[i + 1] - sigma_hat
|
dt = sigmas[i + 1] - sigma_hat
|
||||||
if sigmas[i + 1] == 0:
|
if sigmas[i + 1] == 0:
|
||||||
# Euler method
|
# Euler method
|
||||||
@@ -895,11 +928,13 @@ class UNet(DDPM):
|
@@ -913,7 +913,7 @@ class UNet(DDPM):
|
||||||
d_2 = to_d(x_2, sigmas[i + 1], denoised_2)
|
d_2 = to_d(x_2, sigmas[i + 1], denoised_2)
|
||||||
d_prime = (d + d_2) / 2
|
d_prime = (d + d_2) / 2
|
||||||
x = x + d_prime * dt
|
x = x + d_prime * dt
|
||||||
- return x
|
- return x
|
||||||
+
|
|
||||||
+ yield from img_callback(x, len(sigmas)-1)
|
+ yield from img_callback(x, len(sigmas)-1)
|
||||||
|
|
||||||
|
|
||||||
@torch.no_grad()
|
@torch.no_grad()
|
||||||
- def dpm_2_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.):
|
@@ -944,8 +944,8 @@ class UNet(DDPM):
|
||||||
+ def dpm_2_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.,
|
|
||||||
+ img_callback=None):
|
|
||||||
"""A sampler inspired by DPM-Solver-2 and Algorithm 2 from Karras et al. (2022)."""
|
|
||||||
extra_args = {} if extra_args is None else extra_args
|
|
||||||
|
|
||||||
@@ -907,6 +942,8 @@ class UNet(DDPM):
|
|
||||||
sigmas = cvd.get_sigmas(S)
|
|
||||||
x = x*sigmas[0]
|
|
||||||
|
|
||||||
+ print(f"Running DPM2 Sampling with {len(sigmas) - 1} timesteps")
|
|
||||||
+
|
|
||||||
s_in = x.new_ones([x.shape[0]]).half()
|
|
||||||
for i in trange(len(sigmas) - 1, disable=disable):
|
|
||||||
gamma = min(s_churn / (len(sigmas) - 1), 2 ** 0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.
|
|
||||||
@@ -924,7 +961,7 @@ class UNet(DDPM):
|
|
||||||
e_t_uncond, e_t = (x_in + eps * c_out).chunk(2)
|
e_t_uncond, e_t = (x_in + eps * c_out).chunk(2)
|
||||||
denoised = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
|
denoised = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
|
||||||
|
|
||||||
-
|
- if callback: callback(i)
|
||||||
|
- if img_callback: img_callback(x, i)
|
||||||
|
+ if callback: yield from callback(i)
|
||||||
+ if img_callback: yield from img_callback(x, i)
|
+ if img_callback: yield from img_callback(x, i)
|
||||||
|
|
||||||
d = to_d(x, sigma_hat, denoised)
|
d = to_d(x, sigma_hat, denoised)
|
||||||
# Midpoint method, where the midpoint is chosen according to a rho=3 Karras schedule
|
# Midpoint method, where the midpoint is chosen according to a rho=3 Karras schedule
|
||||||
@@ -945,11 +982,13 @@ class UNet(DDPM):
|
@@ -966,7 +966,7 @@ class UNet(DDPM):
|
||||||
|
|
||||||
d_2 = to_d(x_2, sigma_mid, denoised_2)
|
d_2 = to_d(x_2, sigma_mid, denoised_2)
|
||||||
x = x + d_2 * dt_2
|
x = x + d_2 * dt_2
|
||||||
- return x
|
- return x
|
||||||
+
|
|
||||||
+ yield from img_callback(x, len(sigmas)-1)
|
+ yield from img_callback(x, len(sigmas)-1)
|
||||||
|
|
||||||
|
|
||||||
@torch.no_grad()
|
@torch.no_grad()
|
||||||
- def dpm_2_ancestral_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None):
|
@@ -994,8 +994,8 @@ class UNet(DDPM):
|
||||||
+ def dpm_2_ancestral_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None,
|
|
||||||
+ img_callback=None):
|
|
||||||
"""Ancestral sampling with DPM-Solver inspired second-order steps."""
|
|
||||||
extra_args = {} if extra_args is None else extra_args
|
|
||||||
|
|
||||||
@@ -957,6 +996,8 @@ class UNet(DDPM):
|
|
||||||
sigmas = cvd.get_sigmas(S)
|
|
||||||
x = x*sigmas[0]
|
|
||||||
|
|
||||||
+ print(f"Running DPM2 Ancestral Sampling with {len(sigmas) - 1} timesteps")
|
|
||||||
+
|
|
||||||
s_in = x.new_ones([x.shape[0]]).half()
|
|
||||||
for i in trange(len(sigmas) - 1, disable=disable):
|
|
||||||
|
|
||||||
@@ -973,6 +1014,9 @@ class UNet(DDPM):
|
|
||||||
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1])
|
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1])
|
||||||
if callback is not None:
|
- if callback: callback(i)
|
||||||
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
|
- if img_callback: img_callback(x, i)
|
||||||
+
|
+ if callback: yield from callback(i)
|
||||||
+ if img_callback: yield from img_callback(x, i)
|
+ if img_callback: yield from img_callback(x, i)
|
||||||
+
|
|
||||||
d = to_d(x, sigmas[i], denoised)
|
d = to_d(x, sigmas[i], denoised)
|
||||||
# Midpoint method, where the midpoint is chosen according to a rho=3 Karras schedule
|
# Midpoint method, where the midpoint is chosen according to a rho=3 Karras schedule
|
||||||
sigma_mid = ((sigmas[i] ** (1 / 3) + sigma_down ** (1 / 3)) / 2) ** 3
|
sigma_mid = ((sigmas[i] ** (1 / 3) + sigma_down ** (1 / 3)) / 2) ** 3
|
||||||
@@ -993,11 +1037,13 @@ class UNet(DDPM):
|
@@ -1016,7 +1016,7 @@ class UNet(DDPM):
|
||||||
d_2 = to_d(x_2, sigma_mid, denoised_2)
|
d_2 = to_d(x_2, sigma_mid, denoised_2)
|
||||||
x = x + d_2 * dt_2
|
x = x + d_2 * dt_2
|
||||||
x = x + torch.randn_like(x) * sigma_up
|
x = x + torch.randn_like(x) * sigma_up
|
||||||
- return x
|
- return x
|
||||||
+
|
|
||||||
+ yield from img_callback(x, len(sigmas)-1)
|
+ yield from img_callback(x, len(sigmas)-1)
|
||||||
|
|
||||||
|
|
||||||
@torch.no_grad()
|
@torch.no_grad()
|
||||||
- def lms_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None, order=4):
|
@@ -1042,8 +1042,8 @@ class UNet(DDPM):
|
||||||
+ def lms_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None, order=4,
|
|
||||||
+ img_callback=None):
|
|
||||||
extra_args = {} if extra_args is None else extra_args
|
|
||||||
s_in = x.new_ones([x.shape[0]])
|
|
||||||
|
|
||||||
@@ -1005,6 +1051,8 @@ class UNet(DDPM):
|
|
||||||
sigmas = cvd.get_sigmas(S)
|
|
||||||
x = x*sigmas[0]
|
|
||||||
|
|
||||||
+ print(f"Running LMS Sampling with {len(sigmas) - 1} timesteps")
|
|
||||||
+
|
|
||||||
ds = []
|
|
||||||
for i in trange(len(sigmas) - 1, disable=disable):
|
|
||||||
|
|
||||||
@@ -1017,6 +1065,7 @@ class UNet(DDPM):
|
|
||||||
e_t_uncond, e_t = (x_in + eps * c_out).chunk(2)
|
e_t_uncond, e_t = (x_in + eps * c_out).chunk(2)
|
||||||
denoised = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
|
denoised = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
|
||||||
|
|
||||||
|
- if callback: callback(i)
|
||||||
|
- if img_callback: img_callback(x, i)
|
||||||
|
+ if callback: yield from callback(i)
|
||||||
+ if img_callback: yield from img_callback(x, i)
|
+ if img_callback: yield from img_callback(x, i)
|
||||||
|
|
||||||
d = to_d(x, sigmas[i], denoised)
|
d = to_d(x, sigmas[i], denoised)
|
||||||
ds.append(d)
|
ds.append(d)
|
||||||
@@ -1027,4 +1076,5 @@ class UNet(DDPM):
|
@@ -1054,4 +1054,4 @@ class UNet(DDPM):
|
||||||
cur_order = min(i + 1, order)
|
cur_order = min(i + 1, order)
|
||||||
coeffs = [linear_multistep_coeff(cur_order, sigmas.cpu(), i, j) for j in range(cur_order)]
|
coeffs = [linear_multistep_coeff(cur_order, sigmas.cpu(), i, j) for j in range(cur_order)]
|
||||||
x = x + sum(coeff * d for coeff, d in zip(coeffs, reversed(ds)))
|
x = x + sum(coeff * d for coeff, d in zip(coeffs, reversed(ds)))
|
||||||
- return x
|
- return x
|
||||||
+
|
|
||||||
+ yield from img_callback(x, len(sigmas)-1)
|
+ yield from img_callback(x, len(sigmas)-1)
|
||||||
diff --git a/optimizedSD/openaimodelSplit.py b/optimizedSD/openaimodelSplit.py
|
|
||||||
index abc3098..7a32ffe 100644
|
|
||||||
--- a/optimizedSD/openaimodelSplit.py
|
|
||||||
+++ b/optimizedSD/openaimodelSplit.py
|
|
||||||
@@ -13,7 +13,7 @@ from ldm.modules.diffusionmodules.util import (
|
|
||||||
normalization,
|
|
||||||
timestep_embedding,
|
|
||||||
)
|
|
||||||
-from splitAttention import SpatialTransformer
|
|
||||||
+from .splitAttention import SpatialTransformer
|
|
||||||
|
|
||||||
|
|
||||||
class AttentionPool2d(nn.Module):
|
|
||||||
|
84
ui/sd_internal/ddim_callback_sd2.patch
Normal file
84
ui/sd_internal/ddim_callback_sd2.patch
Normal file
@ -0,0 +1,84 @@
|
|||||||
|
diff --git a/ldm/models/diffusion/ddim.py b/ldm/models/diffusion/ddim.py
|
||||||
|
index 27ead0e..6215939 100644
|
||||||
|
--- a/ldm/models/diffusion/ddim.py
|
||||||
|
+++ b/ldm/models/diffusion/ddim.py
|
||||||
|
@@ -100,7 +100,7 @@ class DDIMSampler(object):
|
||||||
|
size = (batch_size, C, H, W)
|
||||||
|
print(f'Data shape for DDIM sampling is {size}, eta {eta}')
|
||||||
|
|
||||||
|
- samples, intermediates = self.ddim_sampling(conditioning, size,
|
||||||
|
+ samples = self.ddim_sampling(conditioning, size,
|
||||||
|
callback=callback,
|
||||||
|
img_callback=img_callback,
|
||||||
|
quantize_denoised=quantize_x0,
|
||||||
|
@@ -117,7 +117,8 @@ class DDIMSampler(object):
|
||||||
|
dynamic_threshold=dynamic_threshold,
|
||||||
|
ucg_schedule=ucg_schedule
|
||||||
|
)
|
||||||
|
- return samples, intermediates
|
||||||
|
+ # return samples, intermediates
|
||||||
|
+ yield from samples
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
def ddim_sampling(self, cond, shape,
|
||||||
|
@@ -168,14 +169,15 @@ class DDIMSampler(object):
|
||||||
|
unconditional_conditioning=unconditional_conditioning,
|
||||||
|
dynamic_threshold=dynamic_threshold)
|
||||||
|
img, pred_x0 = outs
|
||||||
|
- if callback: callback(i)
|
||||||
|
- if img_callback: img_callback(pred_x0, i)
|
||||||
|
+ if callback: yield from callback(i)
|
||||||
|
+ if img_callback: yield from img_callback(pred_x0, i)
|
||||||
|
|
||||||
|
if index % log_every_t == 0 or index == total_steps - 1:
|
||||||
|
intermediates['x_inter'].append(img)
|
||||||
|
intermediates['pred_x0'].append(pred_x0)
|
||||||
|
|
||||||
|
- return img, intermediates
|
||||||
|
+ # return img, intermediates
|
||||||
|
+ yield from img_callback(pred_x0, len(iterator)-1)
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
|
||||||
|
diff --git a/ldm/models/diffusion/plms.py b/ldm/models/diffusion/plms.py
|
||||||
|
index 7002a36..0951f39 100644
|
||||||
|
--- a/ldm/models/diffusion/plms.py
|
||||||
|
+++ b/ldm/models/diffusion/plms.py
|
||||||
|
@@ -96,7 +96,7 @@ class PLMSSampler(object):
|
||||||
|
size = (batch_size, C, H, W)
|
||||||
|
print(f'Data shape for PLMS sampling is {size}')
|
||||||
|
|
||||||
|
- samples, intermediates = self.plms_sampling(conditioning, size,
|
||||||
|
+ samples = self.plms_sampling(conditioning, size,
|
||||||
|
callback=callback,
|
||||||
|
img_callback=img_callback,
|
||||||
|
quantize_denoised=quantize_x0,
|
||||||
|
@@ -112,7 +112,8 @@ class PLMSSampler(object):
|
||||||
|
unconditional_conditioning=unconditional_conditioning,
|
||||||
|
dynamic_threshold=dynamic_threshold,
|
||||||
|
)
|
||||||
|
- return samples, intermediates
|
||||||
|
+ #return samples, intermediates
|
||||||
|
+ yield from samples
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
def plms_sampling(self, cond, shape,
|
||||||
|
@@ -165,14 +166,15 @@ class PLMSSampler(object):
|
||||||
|
old_eps.append(e_t)
|
||||||
|
if len(old_eps) >= 4:
|
||||||
|
old_eps.pop(0)
|
||||||
|
- if callback: callback(i)
|
||||||
|
- if img_callback: img_callback(pred_x0, i)
|
||||||
|
+ if callback: yield from callback(i)
|
||||||
|
+ if img_callback: yield from img_callback(pred_x0, i)
|
||||||
|
|
||||||
|
if index % log_every_t == 0 or index == total_steps - 1:
|
||||||
|
intermediates['x_inter'].append(img)
|
||||||
|
intermediates['pred_x0'].append(pred_x0)
|
||||||
|
|
||||||
|
- return img, intermediates
|
||||||
|
+ # return img, intermediates
|
||||||
|
+ yield from img_callback(pred_x0, len(iterator)-1)
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
|
@ -101,7 +101,7 @@ def device_init(thread_data, device):
|
|||||||
|
|
||||||
# Force full precision on 1660 and 1650 NVIDIA cards to avoid creating green images
|
# Force full precision on 1660 and 1650 NVIDIA cards to avoid creating green images
|
||||||
device_name = thread_data.device_name.lower()
|
device_name = thread_data.device_name.lower()
|
||||||
thread_data.force_full_precision = ('nvidia' in device_name or 'geforce' in device_name) and (' 1660' in device_name or ' 1650' in device_name)
|
thread_data.force_full_precision = (('nvidia' in device_name or 'geforce' in device_name) and (' 1660' in device_name or ' 1650' in device_name)) or ('Quadro T2000' in device_name)
|
||||||
if thread_data.force_full_precision:
|
if thread_data.force_full_precision:
|
||||||
print('forcing full precision on NVIDIA 16xx cards, to avoid green images. GPU detected: ', thread_data.device_name)
|
print('forcing full precision on NVIDIA 16xx cards, to avoid green images. GPU detected: ', thread_data.device_name)
|
||||||
# Apply force_full_precision now before models are loaded.
|
# Apply force_full_precision now before models are loaded.
|
||||||
|
@ -1,13 +0,0 @@
|
|||||||
diff --git a/environment.yaml b/environment.yaml
|
|
||||||
index 7f25da8..306750f 100644
|
|
||||||
--- a/environment.yaml
|
|
||||||
+++ b/environment.yaml
|
|
||||||
@@ -23,6 +23,8 @@ dependencies:
|
|
||||||
- torch-fidelity==0.3.0
|
|
||||||
- transformers==4.19.2
|
|
||||||
- torchmetrics==0.6.0
|
|
||||||
+ - pywavelets==1.3.0
|
|
||||||
+ - pandas==1.4.4
|
|
||||||
- kornia==0.6
|
|
||||||
- -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
|
|
||||||
- -e git+https://github.com/openai/CLIP.git@main#egg=clip
|
|
@ -7,6 +7,7 @@ Notes:
|
|||||||
import json
|
import json
|
||||||
import os, re
|
import os, re
|
||||||
import traceback
|
import traceback
|
||||||
|
import queue
|
||||||
import torch
|
import torch
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from gc import collect as gc_collect
|
from gc import collect as gc_collect
|
||||||
@ -21,13 +22,15 @@ from torch import autocast
|
|||||||
from contextlib import nullcontext
|
from contextlib import nullcontext
|
||||||
from einops import rearrange, repeat
|
from einops import rearrange, repeat
|
||||||
from ldm.util import instantiate_from_config
|
from ldm.util import instantiate_from_config
|
||||||
from optimizedSD.optimUtils import split_weighted_subprompts
|
|
||||||
from transformers import logging
|
from transformers import logging
|
||||||
|
|
||||||
from gfpgan import GFPGANer
|
from gfpgan import GFPGANer
|
||||||
from basicsr.archs.rrdbnet_arch import RRDBNet
|
from basicsr.archs.rrdbnet_arch import RRDBNet
|
||||||
from realesrgan import RealESRGANer
|
from realesrgan import RealESRGANer
|
||||||
|
|
||||||
|
from threading import Lock
|
||||||
|
from safetensors.torch import load_file
|
||||||
|
|
||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
logging.set_verbosity_error()
|
logging.set_verbosity_error()
|
||||||
@ -35,7 +38,7 @@ logging.set_verbosity_error()
|
|||||||
# consts
|
# consts
|
||||||
config_yaml = "optimizedSD/v1-inference.yaml"
|
config_yaml = "optimizedSD/v1-inference.yaml"
|
||||||
filename_regex = re.compile('[^a-zA-Z0-9]')
|
filename_regex = re.compile('[^a-zA-Z0-9]')
|
||||||
force_gfpgan_to_cuda0 = True # workaround: gfpgan currently works only on cuda:0
|
gfpgan_temp_device_lock = Lock() # workaround: gfpgan currently can only start on one device at a time.
|
||||||
|
|
||||||
# api stuff
|
# api stuff
|
||||||
from sd_internal import device_manager
|
from sd_internal import device_manager
|
||||||
@ -69,18 +72,37 @@ def thread_init(device):
|
|||||||
thread_data.device_name = None
|
thread_data.device_name = None
|
||||||
thread_data.unet_bs = 1
|
thread_data.unet_bs = 1
|
||||||
thread_data.precision = 'autocast'
|
thread_data.precision = 'autocast'
|
||||||
thread_data.sampler_plms = None
|
|
||||||
thread_data.sampler_ddim = None
|
|
||||||
|
|
||||||
thread_data.turbo = False
|
thread_data.turbo = False
|
||||||
thread_data.force_full_precision = False
|
thread_data.force_full_precision = False
|
||||||
thread_data.reduced_memory = True
|
thread_data.reduced_memory = True
|
||||||
|
|
||||||
|
thread_data.test_sd2 = isSD2()
|
||||||
|
|
||||||
device_manager.device_init(thread_data, device)
|
device_manager.device_init(thread_data, device)
|
||||||
|
|
||||||
|
# temp hack, will remove soon
|
||||||
|
def isSD2():
|
||||||
|
try:
|
||||||
|
SD_UI_DIR = os.getenv('SD_UI_PATH', None)
|
||||||
|
CONFIG_DIR = os.path.abspath(os.path.join(SD_UI_DIR, '..', 'scripts'))
|
||||||
|
config_json_path = os.path.join(CONFIG_DIR, 'config.json')
|
||||||
|
if not os.path.exists(config_json_path):
|
||||||
|
return False
|
||||||
|
with open(config_json_path, 'r', encoding='utf-8') as f:
|
||||||
|
config = json.load(f)
|
||||||
|
return config.get('test_sd2', False)
|
||||||
|
except Exception as e:
|
||||||
|
return False
|
||||||
|
|
||||||
def load_model_ckpt():
|
def load_model_ckpt():
|
||||||
if not thread_data.ckpt_file: raise ValueError(f'Thread ckpt_file is undefined.')
|
if not thread_data.ckpt_file: raise ValueError(f'Thread ckpt_file is undefined.')
|
||||||
if not os.path.exists(thread_data.ckpt_file + '.ckpt'): raise FileNotFoundError(f'Cannot find {thread_data.ckpt_file}.ckpt')
|
if os.path.exists(thread_data.ckpt_file + '.ckpt'):
|
||||||
|
thread_data.ckpt_file += '.ckpt'
|
||||||
|
elif os.path.exists(thread_data.ckpt_file + '.safetensors'):
|
||||||
|
thread_data.ckpt_file += '.safetensors'
|
||||||
|
elif not os.path.exists(thread_data.ckpt_file):
|
||||||
|
raise FileNotFoundError(f'Cannot find {thread_data.ckpt_file}.ckpt or .safetensors')
|
||||||
|
|
||||||
if not thread_data.precision:
|
if not thread_data.precision:
|
||||||
thread_data.precision = 'full' if thread_data.force_full_precision else 'autocast'
|
thread_data.precision = 'full' if thread_data.force_full_precision else 'autocast'
|
||||||
@ -91,8 +113,15 @@ def load_model_ckpt():
|
|||||||
if thread_data.device == 'cpu':
|
if thread_data.device == 'cpu':
|
||||||
thread_data.precision = 'full'
|
thread_data.precision = 'full'
|
||||||
|
|
||||||
print('loading', thread_data.ckpt_file + '.ckpt', 'to device', thread_data.device, 'using precision', thread_data.precision)
|
print('loading', thread_data.ckpt_file, 'to device', thread_data.device, 'using precision', thread_data.precision)
|
||||||
sd = load_model_from_config(thread_data.ckpt_file + '.ckpt')
|
|
||||||
|
if thread_data.test_sd2:
|
||||||
|
load_model_ckpt_sd2()
|
||||||
|
else:
|
||||||
|
load_model_ckpt_sd1()
|
||||||
|
|
||||||
|
def load_model_ckpt_sd1():
|
||||||
|
sd = load_model_from_config(thread_data.ckpt_file)
|
||||||
li, lo = [], []
|
li, lo = [], []
|
||||||
for key, value in sd.items():
|
for key, value in sd.items():
|
||||||
sp = key.split(".")
|
sp = key.split(".")
|
||||||
@ -179,12 +208,46 @@ def load_model_ckpt():
|
|||||||
thread_data.model_fs_is_half = False
|
thread_data.model_fs_is_half = False
|
||||||
|
|
||||||
print(f'''loaded model
|
print(f'''loaded model
|
||||||
model file: {thread_data.ckpt_file}.ckpt
|
model file: {thread_data.ckpt_file}
|
||||||
model.device: {model.device}
|
model.device: {model.device}
|
||||||
modelCS.device: {modelCS.cond_stage_model.device}
|
modelCS.device: {modelCS.cond_stage_model.device}
|
||||||
modelFS.device: {thread_data.modelFS.device}
|
modelFS.device: {thread_data.modelFS.device}
|
||||||
using precision: {thread_data.precision}''')
|
using precision: {thread_data.precision}''')
|
||||||
|
|
||||||
|
def load_model_ckpt_sd2():
|
||||||
|
config_file = 'configs/stable-diffusion/v2-inference-v.yaml' if 'sd2_' in thread_data.ckpt_file else "configs/stable-diffusion/v1-inference.yaml"
|
||||||
|
config = OmegaConf.load(config_file)
|
||||||
|
verbose = False
|
||||||
|
|
||||||
|
sd = load_model_from_config(thread_data.ckpt_file)
|
||||||
|
|
||||||
|
thread_data.model = instantiate_from_config(config.model)
|
||||||
|
m, u = thread_data.model.load_state_dict(sd, strict=False)
|
||||||
|
if len(m) > 0 and verbose:
|
||||||
|
print("missing keys:")
|
||||||
|
print(m)
|
||||||
|
if len(u) > 0 and verbose:
|
||||||
|
print("unexpected keys:")
|
||||||
|
print(u)
|
||||||
|
|
||||||
|
thread_data.model.to(thread_data.device)
|
||||||
|
thread_data.model.eval()
|
||||||
|
del sd
|
||||||
|
|
||||||
|
thread_data.model.cond_stage_model.device = torch.device(thread_data.device)
|
||||||
|
|
||||||
|
if thread_data.device != "cpu" and thread_data.precision == "autocast":
|
||||||
|
thread_data.model.half()
|
||||||
|
thread_data.model_is_half = True
|
||||||
|
thread_data.model_fs_is_half = True
|
||||||
|
else:
|
||||||
|
thread_data.model_is_half = False
|
||||||
|
thread_data.model_fs_is_half = False
|
||||||
|
|
||||||
|
print(f'''loaded model
|
||||||
|
model file: {thread_data.ckpt_file}
|
||||||
|
using precision: {thread_data.precision}''')
|
||||||
|
|
||||||
def unload_filters():
|
def unload_filters():
|
||||||
if thread_data.model_gfpgan is not None:
|
if thread_data.model_gfpgan is not None:
|
||||||
if thread_data.device != 'cpu': thread_data.model_gfpgan.gfpgan.to('cpu')
|
if thread_data.device != 'cpu': thread_data.model_gfpgan.gfpgan.to('cpu')
|
||||||
@ -204,10 +267,11 @@ def unload_models():
|
|||||||
if thread_data.model is not None:
|
if thread_data.model is not None:
|
||||||
print('Unloading models...')
|
print('Unloading models...')
|
||||||
if thread_data.device != 'cpu':
|
if thread_data.device != 'cpu':
|
||||||
thread_data.modelFS.to('cpu')
|
if not thread_data.test_sd2:
|
||||||
thread_data.modelCS.to('cpu')
|
thread_data.modelFS.to('cpu')
|
||||||
thread_data.model.model1.to("cpu")
|
thread_data.modelCS.to('cpu')
|
||||||
thread_data.model.model2.to("cpu")
|
thread_data.model.model1.to("cpu")
|
||||||
|
thread_data.model.model2.to("cpu")
|
||||||
|
|
||||||
del thread_data.model
|
del thread_data.model
|
||||||
del thread_data.modelCS
|
del thread_data.modelCS
|
||||||
@ -253,12 +317,6 @@ def move_to_cpu(model):
|
|||||||
|
|
||||||
def load_model_gfpgan():
|
def load_model_gfpgan():
|
||||||
if thread_data.gfpgan_file is None: raise ValueError(f'Thread gfpgan_file is undefined.')
|
if thread_data.gfpgan_file is None: raise ValueError(f'Thread gfpgan_file is undefined.')
|
||||||
|
|
||||||
# hack for a bug in facexlib: https://github.com/xinntao/facexlib/pull/19/files
|
|
||||||
from facexlib.detection import retinaface
|
|
||||||
retinaface.device = torch.device(thread_data.device)
|
|
||||||
print('forced retinaface.device to', thread_data.device)
|
|
||||||
|
|
||||||
model_path = thread_data.gfpgan_file + ".pth"
|
model_path = thread_data.gfpgan_file + ".pth"
|
||||||
thread_data.model_gfpgan = GFPGANer(device=torch.device(thread_data.device), model_path=model_path, upscale=1, arch='clean', channel_multiplier=2, bg_upsampler=None)
|
thread_data.model_gfpgan = GFPGANer(device=torch.device(thread_data.device), model_path=model_path, upscale=1, arch='clean', channel_multiplier=2, bg_upsampler=None)
|
||||||
print('loaded', thread_data.gfpgan_file, 'to', thread_data.model_gfpgan.device, 'precision', thread_data.precision)
|
print('loaded', thread_data.gfpgan_file, 'to', thread_data.model_gfpgan.device, 'precision', thread_data.precision)
|
||||||
@ -314,15 +372,23 @@ def apply_filters(filter_name, image_data, model_path=None):
|
|||||||
image_data.to(thread_data.device)
|
image_data.to(thread_data.device)
|
||||||
|
|
||||||
if filter_name == 'gfpgan':
|
if filter_name == 'gfpgan':
|
||||||
if model_path is not None and model_path != thread_data.gfpgan_file:
|
# This lock is only ever used here. No need to use timeout for the request. Should never deadlock.
|
||||||
thread_data.gfpgan_file = model_path
|
with gfpgan_temp_device_lock: # Wait for any other devices to complete before starting.
|
||||||
load_model_gfpgan()
|
# hack for a bug in facexlib: https://github.com/xinntao/facexlib/pull/19/files
|
||||||
elif not thread_data.model_gfpgan:
|
from facexlib.detection import retinaface
|
||||||
load_model_gfpgan()
|
retinaface.device = torch.device(thread_data.device)
|
||||||
if thread_data.model_gfpgan is None: raise Exception('Model "gfpgan" not loaded.')
|
print('forced retinaface.device to', thread_data.device)
|
||||||
print('enhance with', thread_data.gfpgan_file, 'on', thread_data.model_gfpgan.device, 'precision', thread_data.precision)
|
|
||||||
_, _, output = thread_data.model_gfpgan.enhance(image_data[:,:,::-1], has_aligned=False, only_center_face=False, paste_back=True)
|
if model_path is not None and model_path != thread_data.gfpgan_file:
|
||||||
image_data = output[:,:,::-1]
|
thread_data.gfpgan_file = model_path
|
||||||
|
load_model_gfpgan()
|
||||||
|
elif not thread_data.model_gfpgan:
|
||||||
|
load_model_gfpgan()
|
||||||
|
if thread_data.model_gfpgan is None: raise Exception('Model "gfpgan" not loaded.')
|
||||||
|
|
||||||
|
print('enhance with', thread_data.gfpgan_file, 'on', thread_data.model_gfpgan.device, 'precision', thread_data.precision)
|
||||||
|
_, _, output = thread_data.model_gfpgan.enhance(image_data[:,:,::-1], has_aligned=False, only_center_face=False, paste_back=True)
|
||||||
|
image_data = output[:,:,::-1]
|
||||||
|
|
||||||
if filter_name == 'real_esrgan':
|
if filter_name == 'real_esrgan':
|
||||||
if model_path is not None and model_path != thread_data.real_esrgan_file:
|
if model_path is not None and model_path != thread_data.real_esrgan_file:
|
||||||
@ -337,45 +403,78 @@ def apply_filters(filter_name, image_data, model_path=None):
|
|||||||
|
|
||||||
return image_data
|
return image_data
|
||||||
|
|
||||||
def mk_img(req: Request):
|
def is_model_reload_necessary(req: Request):
|
||||||
|
# custom model support:
|
||||||
|
# the req.use_stable_diffusion_model needs to be a valid path
|
||||||
|
# to the ckpt file (without the extension).
|
||||||
|
if os.path.exists(req.use_stable_diffusion_model + '.ckpt'):
|
||||||
|
req.use_stable_diffusion_model += '.ckpt'
|
||||||
|
elif os.path.exists(req.use_stable_diffusion_model + '.safetensors'):
|
||||||
|
req.use_stable_diffusion_model += '.safetensors'
|
||||||
|
elif not os.path.exists(req.use_stable_diffusion_model):
|
||||||
|
raise FileNotFoundError(f'Cannot find {req.use_stable_diffusion_model}.ckpt or .safetensors')
|
||||||
|
|
||||||
|
needs_model_reload = False
|
||||||
|
if not thread_data.model or thread_data.ckpt_file != req.use_stable_diffusion_model or thread_data.vae_file != req.use_vae_model:
|
||||||
|
thread_data.ckpt_file = req.use_stable_diffusion_model
|
||||||
|
thread_data.vae_file = req.use_vae_model
|
||||||
|
needs_model_reload = True
|
||||||
|
|
||||||
|
if thread_data.device != 'cpu':
|
||||||
|
if (thread_data.precision == 'autocast' and (req.use_full_precision or not thread_data.model_is_half)) or \
|
||||||
|
(thread_data.precision == 'full' and not req.use_full_precision and not thread_data.force_full_precision):
|
||||||
|
thread_data.precision = 'full' if req.use_full_precision else 'autocast'
|
||||||
|
needs_model_reload = True
|
||||||
|
|
||||||
|
return needs_model_reload
|
||||||
|
|
||||||
|
def reload_model():
|
||||||
|
unload_models()
|
||||||
|
unload_filters()
|
||||||
|
load_model_ckpt()
|
||||||
|
|
||||||
|
def mk_img(req: Request, data_queue: queue.Queue, task_temp_images: list, step_callback):
|
||||||
try:
|
try:
|
||||||
yield from do_mk_img(req)
|
return do_mk_img(req, data_queue, task_temp_images, step_callback)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(traceback.format_exc())
|
print(traceback.format_exc())
|
||||||
|
|
||||||
if thread_data.device != 'cpu':
|
if thread_data.device != 'cpu' and not thread_data.test_sd2:
|
||||||
thread_data.modelFS.to('cpu')
|
thread_data.modelFS.to('cpu')
|
||||||
thread_data.modelCS.to('cpu')
|
thread_data.modelCS.to('cpu')
|
||||||
thread_data.model.model1.to("cpu")
|
thread_data.model.model1.to("cpu")
|
||||||
thread_data.model.model2.to("cpu")
|
thread_data.model.model2.to("cpu")
|
||||||
|
|
||||||
gc() # Release from memory.
|
gc() # Release from memory.
|
||||||
yield json.dumps({
|
data_queue.put(json.dumps({
|
||||||
"status": 'failed',
|
"status": 'failed',
|
||||||
"detail": str(e)
|
"detail": str(e)
|
||||||
})
|
}))
|
||||||
|
raise e
|
||||||
|
|
||||||
def update_temp_img(req, x_samples):
|
def update_temp_img(req, x_samples, task_temp_images: list):
|
||||||
partial_images = []
|
partial_images = []
|
||||||
for i in range(req.num_outputs):
|
for i in range(req.num_outputs):
|
||||||
x_sample_ddim = thread_data.modelFS.decode_first_stage(x_samples[i].unsqueeze(0))
|
if thread_data.test_sd2:
|
||||||
|
x_sample_ddim = thread_data.model.decode_first_stage(x_samples[i].unsqueeze(0))
|
||||||
|
else:
|
||||||
|
x_sample_ddim = thread_data.modelFS.decode_first_stage(x_samples[i].unsqueeze(0))
|
||||||
x_sample = torch.clamp((x_sample_ddim + 1.0) / 2.0, min=0.0, max=1.0)
|
x_sample = torch.clamp((x_sample_ddim + 1.0) / 2.0, min=0.0, max=1.0)
|
||||||
x_sample = 255.0 * rearrange(x_sample[0].cpu().numpy(), "c h w -> h w c")
|
x_sample = 255.0 * rearrange(x_sample[0].cpu().numpy(), "c h w -> h w c")
|
||||||
x_sample = x_sample.astype(np.uint8)
|
x_sample = x_sample.astype(np.uint8)
|
||||||
img = Image.fromarray(x_sample)
|
img = Image.fromarray(x_sample)
|
||||||
buf = BytesIO()
|
buf = img_to_buffer(img, output_format='JPEG')
|
||||||
img.save(buf, format='JPEG')
|
|
||||||
buf.seek(0)
|
|
||||||
|
|
||||||
del img, x_sample, x_sample_ddim
|
del img, x_sample, x_sample_ddim
|
||||||
# don't delete x_samples, it is used in the code that called this callback
|
# don't delete x_samples, it is used in the code that called this callback
|
||||||
|
|
||||||
thread_data.temp_images[str(req.session_id) + '/' + str(i)] = buf
|
thread_data.temp_images[str(req.session_id) + '/' + str(i)] = buf
|
||||||
|
task_temp_images[i] = buf
|
||||||
partial_images.append({'path': f'/image/tmp/{req.session_id}/{i}'})
|
partial_images.append({'path': f'/image/tmp/{req.session_id}/{i}'})
|
||||||
return partial_images
|
return partial_images
|
||||||
|
|
||||||
# Build and return the apropriate generator for do_mk_img
|
# Build and return the apropriate generator for do_mk_img
|
||||||
def get_image_progress_generator(req, extra_props=None):
|
def get_image_progress_generator(req, data_queue: queue.Queue, task_temp_images: list, step_callback, extra_props=None):
|
||||||
if not req.stream_progress_updates:
|
if not req.stream_progress_updates:
|
||||||
def empty_callback(x_samples, i): return x_samples
|
def empty_callback(x_samples, i): return x_samples
|
||||||
return empty_callback
|
return empty_callback
|
||||||
@ -394,15 +493,17 @@ def get_image_progress_generator(req, extra_props=None):
|
|||||||
progress.update(extra_props)
|
progress.update(extra_props)
|
||||||
|
|
||||||
if req.stream_image_progress and i % 5 == 0:
|
if req.stream_image_progress and i % 5 == 0:
|
||||||
progress['output'] = update_temp_img(req, x_samples)
|
progress['output'] = update_temp_img(req, x_samples, task_temp_images)
|
||||||
|
|
||||||
yield json.dumps(progress)
|
data_queue.put(json.dumps(progress))
|
||||||
|
|
||||||
|
step_callback()
|
||||||
|
|
||||||
if thread_data.stop_processing:
|
if thread_data.stop_processing:
|
||||||
raise UserInitiatedStop("User requested that we stop processing")
|
raise UserInitiatedStop("User requested that we stop processing")
|
||||||
return img_callback
|
return img_callback
|
||||||
|
|
||||||
def do_mk_img(req: Request):
|
def do_mk_img(req: Request, data_queue: queue.Queue, task_temp_images: list, step_callback):
|
||||||
thread_data.stop_processing = False
|
thread_data.stop_processing = False
|
||||||
|
|
||||||
res = Response()
|
res = Response()
|
||||||
@ -411,29 +512,7 @@ def do_mk_img(req: Request):
|
|||||||
|
|
||||||
thread_data.temp_images.clear()
|
thread_data.temp_images.clear()
|
||||||
|
|
||||||
# custom model support:
|
if thread_data.turbo != req.turbo and not thread_data.test_sd2:
|
||||||
# the req.use_stable_diffusion_model needs to be a valid path
|
|
||||||
# to the ckpt file (without the extension).
|
|
||||||
if not os.path.exists(req.use_stable_diffusion_model + '.ckpt'): raise FileNotFoundError(f'Cannot find {req.use_stable_diffusion_model}.ckpt')
|
|
||||||
|
|
||||||
needs_model_reload = False
|
|
||||||
if not thread_data.model or thread_data.ckpt_file != req.use_stable_diffusion_model or thread_data.vae_file != req.use_vae_model:
|
|
||||||
thread_data.ckpt_file = req.use_stable_diffusion_model
|
|
||||||
thread_data.vae_file = req.use_vae_model
|
|
||||||
needs_model_reload = True
|
|
||||||
|
|
||||||
if thread_data.device != 'cpu':
|
|
||||||
if (thread_data.precision == 'autocast' and (req.use_full_precision or not thread_data.model_is_half)) or \
|
|
||||||
(thread_data.precision == 'full' and not req.use_full_precision and not thread_data.force_full_precision):
|
|
||||||
thread_data.precision = 'full' if req.use_full_precision else 'autocast'
|
|
||||||
needs_model_reload = True
|
|
||||||
|
|
||||||
if needs_model_reload:
|
|
||||||
unload_models()
|
|
||||||
unload_filters()
|
|
||||||
load_model_ckpt()
|
|
||||||
|
|
||||||
if thread_data.turbo != req.turbo:
|
|
||||||
thread_data.turbo = req.turbo
|
thread_data.turbo = req.turbo
|
||||||
thread_data.model.turbo = req.turbo
|
thread_data.model.turbo = req.turbo
|
||||||
|
|
||||||
@ -478,10 +557,14 @@ def do_mk_img(req: Request):
|
|||||||
if thread_data.device != "cpu" and thread_data.precision == "autocast":
|
if thread_data.device != "cpu" and thread_data.precision == "autocast":
|
||||||
init_image = init_image.half()
|
init_image = init_image.half()
|
||||||
|
|
||||||
thread_data.modelFS.to(thread_data.device)
|
if not thread_data.test_sd2:
|
||||||
|
thread_data.modelFS.to(thread_data.device)
|
||||||
|
|
||||||
init_image = repeat(init_image, '1 ... -> b ...', b=batch_size)
|
init_image = repeat(init_image, '1 ... -> b ...', b=batch_size)
|
||||||
init_latent = thread_data.modelFS.get_first_stage_encoding(thread_data.modelFS.encode_first_stage(init_image)) # move to latent space
|
if thread_data.test_sd2:
|
||||||
|
init_latent = thread_data.model.get_first_stage_encoding(thread_data.model.encode_first_stage(init_image)) # move to latent space
|
||||||
|
else:
|
||||||
|
init_latent = thread_data.modelFS.get_first_stage_encoding(thread_data.modelFS.encode_first_stage(init_image)) # move to latent space
|
||||||
|
|
||||||
if req.mask is not None:
|
if req.mask is not None:
|
||||||
mask = load_mask(req.mask, req.width, req.height, init_latent.shape[2], init_latent.shape[3], True).to(thread_data.device)
|
mask = load_mask(req.mask, req.width, req.height, init_latent.shape[2], init_latent.shape[3], True).to(thread_data.device)
|
||||||
@ -493,7 +576,8 @@ def do_mk_img(req: Request):
|
|||||||
|
|
||||||
# Send to CPU and wait until complete.
|
# Send to CPU and wait until complete.
|
||||||
# wait_model_move_to(thread_data.modelFS, 'cpu')
|
# wait_model_move_to(thread_data.modelFS, 'cpu')
|
||||||
move_to_cpu(thread_data.modelFS)
|
if not thread_data.test_sd2:
|
||||||
|
move_to_cpu(thread_data.modelFS)
|
||||||
|
|
||||||
assert 0. <= req.prompt_strength <= 1., 'can only work with strength in [0.0, 1.0]'
|
assert 0. <= req.prompt_strength <= 1., 'can only work with strength in [0.0, 1.0]'
|
||||||
t_enc = int(req.prompt_strength * req.num_inference_steps)
|
t_enc = int(req.prompt_strength * req.num_inference_steps)
|
||||||
@ -509,11 +593,14 @@ def do_mk_img(req: Request):
|
|||||||
for prompts in tqdm(data, desc="data"):
|
for prompts in tqdm(data, desc="data"):
|
||||||
|
|
||||||
with precision_scope("cuda"):
|
with precision_scope("cuda"):
|
||||||
if thread_data.reduced_memory:
|
if thread_data.reduced_memory and not thread_data.test_sd2:
|
||||||
thread_data.modelCS.to(thread_data.device)
|
thread_data.modelCS.to(thread_data.device)
|
||||||
uc = None
|
uc = None
|
||||||
if req.guidance_scale != 1.0:
|
if req.guidance_scale != 1.0:
|
||||||
uc = thread_data.modelCS.get_learned_conditioning(batch_size * [req.negative_prompt])
|
if thread_data.test_sd2:
|
||||||
|
uc = thread_data.model.get_learned_conditioning(batch_size * [req.negative_prompt])
|
||||||
|
else:
|
||||||
|
uc = thread_data.modelCS.get_learned_conditioning(batch_size * [req.negative_prompt])
|
||||||
if isinstance(prompts, tuple):
|
if isinstance(prompts, tuple):
|
||||||
prompts = list(prompts)
|
prompts = list(prompts)
|
||||||
|
|
||||||
@ -526,15 +613,21 @@ def do_mk_img(req: Request):
|
|||||||
weight = weights[i]
|
weight = weights[i]
|
||||||
# if not skip_normalize:
|
# if not skip_normalize:
|
||||||
weight = weight / totalWeight
|
weight = weight / totalWeight
|
||||||
c = torch.add(c, thread_data.modelCS.get_learned_conditioning(subprompts[i]), alpha=weight)
|
if thread_data.test_sd2:
|
||||||
|
c = torch.add(c, thread_data.model.get_learned_conditioning(subprompts[i]), alpha=weight)
|
||||||
|
else:
|
||||||
|
c = torch.add(c, thread_data.modelCS.get_learned_conditioning(subprompts[i]), alpha=weight)
|
||||||
else:
|
else:
|
||||||
c = thread_data.modelCS.get_learned_conditioning(prompts)
|
if thread_data.test_sd2:
|
||||||
|
c = thread_data.model.get_learned_conditioning(prompts)
|
||||||
|
else:
|
||||||
|
c = thread_data.modelCS.get_learned_conditioning(prompts)
|
||||||
|
|
||||||
if thread_data.reduced_memory:
|
if thread_data.reduced_memory and not thread_data.test_sd2:
|
||||||
thread_data.modelFS.to(thread_data.device)
|
thread_data.modelFS.to(thread_data.device)
|
||||||
|
|
||||||
n_steps = req.num_inference_steps if req.init_image is None else t_enc
|
n_steps = req.num_inference_steps if req.init_image is None else t_enc
|
||||||
img_callback = get_image_progress_generator(req, {"total_steps": n_steps})
|
img_callback = get_image_progress_generator(req, data_queue, task_temp_images, step_callback, {"total_steps": n_steps})
|
||||||
|
|
||||||
# run the handler
|
# run the handler
|
||||||
try:
|
try:
|
||||||
@ -542,14 +635,7 @@ def do_mk_img(req: Request):
|
|||||||
if handler == _txt2img:
|
if handler == _txt2img:
|
||||||
x_samples = _txt2img(req.width, req.height, req.num_outputs, req.num_inference_steps, req.guidance_scale, None, opt_C, opt_f, opt_ddim_eta, c, uc, opt_seed, img_callback, mask, req.sampler)
|
x_samples = _txt2img(req.width, req.height, req.num_outputs, req.num_inference_steps, req.guidance_scale, None, opt_C, opt_f, opt_ddim_eta, c, uc, opt_seed, img_callback, mask, req.sampler)
|
||||||
else:
|
else:
|
||||||
x_samples = _img2img(init_latent, t_enc, batch_size, req.guidance_scale, c, uc, req.num_inference_steps, opt_ddim_eta, opt_seed, img_callback, mask)
|
x_samples = _img2img(init_latent, t_enc, batch_size, req.guidance_scale, c, uc, req.num_inference_steps, opt_ddim_eta, opt_seed, img_callback, mask, opt_C, req.height, req.width, opt_f)
|
||||||
|
|
||||||
if req.stream_progress_updates:
|
|
||||||
yield from x_samples
|
|
||||||
if hasattr(thread_data, 'partial_x_samples'):
|
|
||||||
if thread_data.partial_x_samples is not None:
|
|
||||||
x_samples = thread_data.partial_x_samples
|
|
||||||
del thread_data.partial_x_samples
|
|
||||||
except UserInitiatedStop:
|
except UserInitiatedStop:
|
||||||
if not hasattr(thread_data, 'partial_x_samples'):
|
if not hasattr(thread_data, 'partial_x_samples'):
|
||||||
continue
|
continue
|
||||||
@ -562,7 +648,10 @@ def do_mk_img(req: Request):
|
|||||||
print("decoding images")
|
print("decoding images")
|
||||||
img_data = [None] * batch_size
|
img_data = [None] * batch_size
|
||||||
for i in range(batch_size):
|
for i in range(batch_size):
|
||||||
x_samples_ddim = thread_data.modelFS.decode_first_stage(x_samples[i].unsqueeze(0))
|
if thread_data.test_sd2:
|
||||||
|
x_samples_ddim = thread_data.model.decode_first_stage(x_samples[i].unsqueeze(0))
|
||||||
|
else:
|
||||||
|
x_samples_ddim = thread_data.modelFS.decode_first_stage(x_samples[i].unsqueeze(0))
|
||||||
x_sample = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
|
x_sample = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
|
||||||
x_sample = 255.0 * rearrange(x_sample[0].cpu().numpy(), "c h w -> h w c")
|
x_sample = 255.0 * rearrange(x_sample[0].cpu().numpy(), "c h w -> h w c")
|
||||||
x_sample = x_sample.astype(np.uint8)
|
x_sample = x_sample.astype(np.uint8)
|
||||||
@ -586,14 +675,16 @@ def do_mk_img(req: Request):
|
|||||||
if req.save_to_disk_path is not None:
|
if req.save_to_disk_path is not None:
|
||||||
if return_orig_img:
|
if return_orig_img:
|
||||||
img_out_path = get_base_path(req.save_to_disk_path, req.session_id, prompts[0], img_id, req.output_format)
|
img_out_path = get_base_path(req.save_to_disk_path, req.session_id, prompts[0], img_id, req.output_format)
|
||||||
save_image(img, img_out_path)
|
save_image(img, img_out_path, req.output_format, req.output_quality)
|
||||||
meta_out_path = get_base_path(req.save_to_disk_path, req.session_id, prompts[0], img_id, 'txt')
|
meta_out_path = get_base_path(req.save_to_disk_path, req.session_id, prompts[0], img_id, 'txt')
|
||||||
save_metadata(meta_out_path, req, prompts[0], opt_seed)
|
save_metadata(meta_out_path, req, prompts[0], opt_seed)
|
||||||
|
|
||||||
if return_orig_img:
|
if return_orig_img:
|
||||||
img_str = img_to_base64_str(img, req.output_format)
|
img_buffer = img_to_buffer(img, req.output_format, req.output_quality)
|
||||||
|
img_str = buffer_to_base64_str(img_buffer, req.output_format)
|
||||||
res_image_orig = ResponseImage(data=img_str, seed=opt_seed)
|
res_image_orig = ResponseImage(data=img_str, seed=opt_seed)
|
||||||
res.images.append(res_image_orig)
|
res.images.append(res_image_orig)
|
||||||
|
task_temp_images[i] = img_buffer
|
||||||
|
|
||||||
if req.save_to_disk_path is not None:
|
if req.save_to_disk_path is not None:
|
||||||
res_image_orig.path_abs = img_out_path
|
res_image_orig.path_abs = img_out_path
|
||||||
@ -609,12 +700,14 @@ def do_mk_img(req: Request):
|
|||||||
filters_applied.append(req.use_upscale)
|
filters_applied.append(req.use_upscale)
|
||||||
if (len(filters_applied) > 0):
|
if (len(filters_applied) > 0):
|
||||||
filtered_image = Image.fromarray(img_data[i])
|
filtered_image = Image.fromarray(img_data[i])
|
||||||
filtered_img_data = img_to_base64_str(filtered_image, req.output_format)
|
filtered_buffer = img_to_buffer(filtered_image, req.output_format, req.output_quality)
|
||||||
|
filtered_img_data = buffer_to_base64_str(filtered_buffer, req.output_format)
|
||||||
response_image = ResponseImage(data=filtered_img_data, seed=opt_seed)
|
response_image = ResponseImage(data=filtered_img_data, seed=opt_seed)
|
||||||
res.images.append(response_image)
|
res.images.append(response_image)
|
||||||
|
task_temp_images[i] = filtered_buffer
|
||||||
if req.save_to_disk_path is not None:
|
if req.save_to_disk_path is not None:
|
||||||
filtered_img_out_path = get_base_path(req.save_to_disk_path, req.session_id, prompts[0], img_id, req.output_format, "_".join(filters_applied))
|
filtered_img_out_path = get_base_path(req.save_to_disk_path, req.session_id, prompts[0], img_id, req.output_format, "_".join(filters_applied))
|
||||||
save_image(filtered_image, filtered_img_out_path)
|
save_image(filtered_image, filtered_img_out_path, req.output_format, req.output_quality)
|
||||||
response_image.path_abs = filtered_img_out_path
|
response_image.path_abs = filtered_img_out_path
|
||||||
del filtered_image
|
del filtered_image
|
||||||
# Filter Applied, move to next seed
|
# Filter Applied, move to next seed
|
||||||
@ -622,18 +715,25 @@ def do_mk_img(req: Request):
|
|||||||
|
|
||||||
# if thread_data.reduced_memory:
|
# if thread_data.reduced_memory:
|
||||||
# unload_filters()
|
# unload_filters()
|
||||||
move_to_cpu(thread_data.modelFS)
|
if not thread_data.test_sd2:
|
||||||
|
move_to_cpu(thread_data.modelFS)
|
||||||
del img_data
|
del img_data
|
||||||
gc()
|
gc()
|
||||||
if thread_data.device != 'cpu':
|
if thread_data.device != 'cpu':
|
||||||
print(f'memory_final = {round(torch.cuda.memory_allocated(thread_data.device) / 1e6, 2)}Mb')
|
print(f'memory_final = {round(torch.cuda.memory_allocated(thread_data.device) / 1e6, 2)}Mb')
|
||||||
|
|
||||||
print('Task completed')
|
print('Task completed')
|
||||||
yield json.dumps(res.json())
|
res = res.json()
|
||||||
|
data_queue.put(json.dumps(res))
|
||||||
|
|
||||||
def save_image(img, img_out_path):
|
return res
|
||||||
|
|
||||||
|
def save_image(img, img_out_path, output_format="", output_quality=75):
|
||||||
try:
|
try:
|
||||||
img.save(img_out_path)
|
if output_format.upper() == "JPEG":
|
||||||
|
img.save(img_out_path, quality=output_quality)
|
||||||
|
else:
|
||||||
|
img.save(img_out_path)
|
||||||
except:
|
except:
|
||||||
print('could not save the file', traceback.format_exc())
|
print('could not save the file', traceback.format_exc())
|
||||||
|
|
||||||
@ -664,51 +764,109 @@ def _txt2img(opt_W, opt_H, opt_n_samples, opt_ddim_steps, opt_scale, start_code,
|
|||||||
# Send to CPU and wait until complete.
|
# Send to CPU and wait until complete.
|
||||||
# wait_model_move_to(thread_data.modelCS, 'cpu')
|
# wait_model_move_to(thread_data.modelCS, 'cpu')
|
||||||
|
|
||||||
move_to_cpu(thread_data.modelCS)
|
if not thread_data.test_sd2:
|
||||||
|
move_to_cpu(thread_data.modelCS)
|
||||||
|
|
||||||
if sampler_name == 'ddim':
|
if thread_data.test_sd2 and sampler_name not in ('plms', 'ddim', 'dpm2'):
|
||||||
thread_data.model.make_schedule(ddim_num_steps=opt_ddim_steps, ddim_eta=opt_ddim_eta, verbose=False)
|
raise Exception('Only plms and ddim samplers are supported right now, in SD 2.0')
|
||||||
|
|
||||||
samples_ddim = thread_data.model.sample(
|
|
||||||
S=opt_ddim_steps,
|
|
||||||
conditioning=c,
|
|
||||||
seed=opt_seed,
|
|
||||||
shape=shape,
|
|
||||||
verbose=False,
|
|
||||||
unconditional_guidance_scale=opt_scale,
|
|
||||||
unconditional_conditioning=uc,
|
|
||||||
eta=opt_ddim_eta,
|
|
||||||
x_T=start_code,
|
|
||||||
img_callback=img_callback,
|
|
||||||
mask=mask,
|
|
||||||
sampler = sampler_name,
|
|
||||||
)
|
|
||||||
yield from samples_ddim
|
|
||||||
|
|
||||||
def _img2img(init_latent, t_enc, batch_size, opt_scale, c, uc, opt_ddim_steps, opt_ddim_eta, opt_seed, img_callback, mask):
|
# samples, _ = sampler.sample(S=opt.steps,
|
||||||
|
# conditioning=c,
|
||||||
|
# batch_size=opt.n_samples,
|
||||||
|
# shape=shape,
|
||||||
|
# verbose=False,
|
||||||
|
# unconditional_guidance_scale=opt.scale,
|
||||||
|
# unconditional_conditioning=uc,
|
||||||
|
# eta=opt.ddim_eta,
|
||||||
|
# x_T=start_code)
|
||||||
|
|
||||||
|
if thread_data.test_sd2:
|
||||||
|
if sampler_name == 'plms':
|
||||||
|
from ldm.models.diffusion.plms import PLMSSampler
|
||||||
|
sampler = PLMSSampler(thread_data.model)
|
||||||
|
elif sampler_name == 'ddim':
|
||||||
|
from ldm.models.diffusion.ddim import DDIMSampler
|
||||||
|
sampler = DDIMSampler(thread_data.model)
|
||||||
|
sampler.make_schedule(ddim_num_steps=opt_ddim_steps, ddim_eta=opt_ddim_eta, verbose=False)
|
||||||
|
elif sampler_name == 'dpm2':
|
||||||
|
from ldm.models.diffusion.dpm_solver import DPMSolverSampler
|
||||||
|
sampler = DPMSolverSampler(thread_data.model)
|
||||||
|
|
||||||
|
shape = [opt_C, opt_H // opt_f, opt_W // opt_f]
|
||||||
|
|
||||||
|
samples_ddim, intermediates = sampler.sample(
|
||||||
|
S=opt_ddim_steps,
|
||||||
|
conditioning=c,
|
||||||
|
batch_size=opt_n_samples,
|
||||||
|
seed=opt_seed,
|
||||||
|
shape=shape,
|
||||||
|
verbose=False,
|
||||||
|
unconditional_guidance_scale=opt_scale,
|
||||||
|
unconditional_conditioning=uc,
|
||||||
|
eta=opt_ddim_eta,
|
||||||
|
x_T=start_code,
|
||||||
|
img_callback=img_callback,
|
||||||
|
mask=mask,
|
||||||
|
sampler = sampler_name,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
if sampler_name == 'ddim':
|
||||||
|
thread_data.model.make_schedule(ddim_num_steps=opt_ddim_steps, ddim_eta=opt_ddim_eta, verbose=False)
|
||||||
|
|
||||||
|
samples_ddim = thread_data.model.sample(
|
||||||
|
S=opt_ddim_steps,
|
||||||
|
conditioning=c,
|
||||||
|
seed=opt_seed,
|
||||||
|
shape=shape,
|
||||||
|
verbose=False,
|
||||||
|
unconditional_guidance_scale=opt_scale,
|
||||||
|
unconditional_conditioning=uc,
|
||||||
|
eta=opt_ddim_eta,
|
||||||
|
x_T=start_code,
|
||||||
|
img_callback=img_callback,
|
||||||
|
mask=mask,
|
||||||
|
sampler = sampler_name,
|
||||||
|
)
|
||||||
|
return samples_ddim
|
||||||
|
|
||||||
|
def _img2img(init_latent, t_enc, batch_size, opt_scale, c, uc, opt_ddim_steps, opt_ddim_eta, opt_seed, img_callback, mask, opt_C=1, opt_H=1, opt_W=1, opt_f=1):
|
||||||
# encode (scaled latent)
|
# encode (scaled latent)
|
||||||
z_enc = thread_data.model.stochastic_encode(
|
|
||||||
init_latent,
|
|
||||||
torch.tensor([t_enc] * batch_size).to(thread_data.device),
|
|
||||||
opt_seed,
|
|
||||||
opt_ddim_eta,
|
|
||||||
opt_ddim_steps,
|
|
||||||
)
|
|
||||||
x_T = None if mask is None else init_latent
|
x_T = None if mask is None else init_latent
|
||||||
|
|
||||||
# decode it
|
if thread_data.test_sd2:
|
||||||
samples_ddim = thread_data.model.sample(
|
from ldm.models.diffusion.ddim import DDIMSampler
|
||||||
t_enc,
|
|
||||||
c,
|
sampler = DDIMSampler(thread_data.model)
|
||||||
z_enc,
|
|
||||||
unconditional_guidance_scale=opt_scale,
|
sampler.make_schedule(ddim_num_steps=opt_ddim_steps, ddim_eta=opt_ddim_eta, verbose=False)
|
||||||
unconditional_conditioning=uc,
|
|
||||||
img_callback=img_callback,
|
z_enc = sampler.stochastic_encode(init_latent, torch.tensor([t_enc] * batch_size).to(thread_data.device))
|
||||||
mask=mask,
|
|
||||||
x_T=x_T,
|
samples_ddim = sampler.decode(z_enc, c, t_enc, unconditional_guidance_scale=opt_scale,unconditional_conditioning=uc, img_callback=img_callback)
|
||||||
sampler = 'ddim'
|
|
||||||
)
|
else:
|
||||||
yield from samples_ddim
|
z_enc = thread_data.model.stochastic_encode(
|
||||||
|
init_latent,
|
||||||
|
torch.tensor([t_enc] * batch_size).to(thread_data.device),
|
||||||
|
opt_seed,
|
||||||
|
opt_ddim_eta,
|
||||||
|
opt_ddim_steps,
|
||||||
|
)
|
||||||
|
|
||||||
|
# decode it
|
||||||
|
samples_ddim = thread_data.model.sample(
|
||||||
|
t_enc,
|
||||||
|
c,
|
||||||
|
z_enc,
|
||||||
|
unconditional_guidance_scale=opt_scale,
|
||||||
|
unconditional_conditioning=uc,
|
||||||
|
img_callback=img_callback,
|
||||||
|
mask=mask,
|
||||||
|
x_T=x_T,
|
||||||
|
sampler = 'ddim'
|
||||||
|
)
|
||||||
|
return samples_ddim
|
||||||
|
|
||||||
def gc():
|
def gc():
|
||||||
gc_collect()
|
gc_collect()
|
||||||
@ -725,13 +883,21 @@ def chunk(it, size):
|
|||||||
|
|
||||||
def load_model_from_config(ckpt, verbose=False):
|
def load_model_from_config(ckpt, verbose=False):
|
||||||
print(f"Loading model from {ckpt}")
|
print(f"Loading model from {ckpt}")
|
||||||
pl_sd = torch.load(ckpt, map_location="cpu")
|
|
||||||
|
if ckpt.endswith(".safetensors"):
|
||||||
|
print("Loading from safetensors")
|
||||||
|
pl_sd = load_file(ckpt, device="cpu")
|
||||||
|
else:
|
||||||
|
pl_sd = torch.load(ckpt, map_location="cpu")
|
||||||
|
|
||||||
if "global_step" in pl_sd:
|
if "global_step" in pl_sd:
|
||||||
print(f"Global Step: {pl_sd['global_step']}")
|
print(f"Global Step: {pl_sd['global_step']}")
|
||||||
sd = pl_sd["state_dict"]
|
|
||||||
return sd
|
|
||||||
|
|
||||||
# utils
|
if "state_dict" in pl_sd:
|
||||||
|
return pl_sd["state_dict"]
|
||||||
|
else:
|
||||||
|
return pl_sd
|
||||||
|
|
||||||
class UserInitiatedStop(Exception):
|
class UserInitiatedStop(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@ -775,9 +941,20 @@ def load_mask(mask_str, h0, w0, newH, newW, invert=False):
|
|||||||
return image
|
return image
|
||||||
|
|
||||||
# https://stackoverflow.com/a/61114178
|
# https://stackoverflow.com/a/61114178
|
||||||
def img_to_base64_str(img, output_format="PNG"):
|
def img_to_base64_str(img, output_format="PNG", output_quality=75):
|
||||||
|
buffered = img_to_buffer(img, output_format, quality=output_quality)
|
||||||
|
return buffer_to_base64_str(buffered, output_format)
|
||||||
|
|
||||||
|
def img_to_buffer(img, output_format="PNG", output_quality=75):
|
||||||
buffered = BytesIO()
|
buffered = BytesIO()
|
||||||
img.save(buffered, format=output_format)
|
if ( output_format.upper() == "JPEG" ):
|
||||||
|
img.save(buffered, format=output_format, quality=output_quality)
|
||||||
|
else:
|
||||||
|
img.save(buffered, format=output_format)
|
||||||
|
buffered.seek(0)
|
||||||
|
return buffered
|
||||||
|
|
||||||
|
def buffer_to_base64_str(buffered, output_format="PNG"):
|
||||||
buffered.seek(0)
|
buffered.seek(0)
|
||||||
img_byte = buffered.getvalue()
|
img_byte = buffered.getvalue()
|
||||||
mime_type = "image/png" if output_format.lower() == "png" else "image/jpeg"
|
mime_type = "image/png" if output_format.lower() == "png" else "image/jpeg"
|
||||||
@ -795,3 +972,48 @@ def base64_str_to_img(img_str):
|
|||||||
buffered = base64_str_to_buffer(img_str)
|
buffered = base64_str_to_buffer(img_str)
|
||||||
img = Image.open(buffered)
|
img = Image.open(buffered)
|
||||||
return img
|
return img
|
||||||
|
|
||||||
|
def split_weighted_subprompts(text):
|
||||||
|
"""
|
||||||
|
grabs all text up to the first occurrence of ':'
|
||||||
|
uses the grabbed text as a sub-prompt, and takes the value following ':' as weight
|
||||||
|
if ':' has no value defined, defaults to 1.0
|
||||||
|
repeats until no text remaining
|
||||||
|
"""
|
||||||
|
remaining = len(text)
|
||||||
|
prompts = []
|
||||||
|
weights = []
|
||||||
|
while remaining > 0:
|
||||||
|
if ":" in text:
|
||||||
|
idx = text.index(":") # first occurrence from start
|
||||||
|
# grab up to index as sub-prompt
|
||||||
|
prompt = text[:idx]
|
||||||
|
remaining -= idx
|
||||||
|
# remove from main text
|
||||||
|
text = text[idx+1:]
|
||||||
|
# find value for weight
|
||||||
|
if " " in text:
|
||||||
|
idx = text.index(" ") # first occurence
|
||||||
|
else: # no space, read to end
|
||||||
|
idx = len(text)
|
||||||
|
if idx != 0:
|
||||||
|
try:
|
||||||
|
weight = float(text[:idx])
|
||||||
|
except: # couldn't treat as float
|
||||||
|
print(f"Warning: '{text[:idx]}' is not a value, are you missing a space?")
|
||||||
|
weight = 1.0
|
||||||
|
else: # no value found
|
||||||
|
weight = 1.0
|
||||||
|
# remove from main text
|
||||||
|
remaining -= idx
|
||||||
|
text = text[idx+1:]
|
||||||
|
# append the sub-prompt and its weight
|
||||||
|
prompts.append(prompt)
|
||||||
|
weights.append(weight)
|
||||||
|
else: # no : found
|
||||||
|
if len(text) > 0: # there is still text though
|
||||||
|
# take remainder as weight 1
|
||||||
|
prompts.append(text)
|
||||||
|
weights.append(1.0)
|
||||||
|
remaining = 0
|
||||||
|
return prompts, weights
|
||||||
|
@ -79,6 +79,7 @@ class ImageRequest(BaseModel):
|
|||||||
use_vae_model: str = None
|
use_vae_model: str = None
|
||||||
show_only_filtered_image: bool = False
|
show_only_filtered_image: bool = False
|
||||||
output_format: str = "jpeg" # or "png"
|
output_format: str = "jpeg" # or "png"
|
||||||
|
output_quality: int = 75
|
||||||
|
|
||||||
stream_progress_updates: bool = False
|
stream_progress_updates: bool = False
|
||||||
stream_image_progress: bool = False
|
stream_image_progress: bool = False
|
||||||
@ -95,6 +96,7 @@ class FilterRequest(BaseModel):
|
|||||||
render_device: str = None
|
render_device: str = None
|
||||||
use_full_precision: bool = False
|
use_full_precision: bool = False
|
||||||
output_format: str = "jpeg" # or "png"
|
output_format: str = "jpeg" # or "png"
|
||||||
|
output_quality: int = 75
|
||||||
|
|
||||||
# Temporary cache to allow to query tasks results for a short time after they are completed.
|
# Temporary cache to allow to query tasks results for a short time after they are completed.
|
||||||
class TaskCache():
|
class TaskCache():
|
||||||
@ -283,45 +285,26 @@ def thread_render(device):
|
|||||||
print(f'Session {task.request.session_id} starting task {id(task)} on {runtime.thread_data.device_name}')
|
print(f'Session {task.request.session_id} starting task {id(task)} on {runtime.thread_data.device_name}')
|
||||||
if not task.lock.acquire(blocking=False): raise Exception('Got locked task from queue.')
|
if not task.lock.acquire(blocking=False): raise Exception('Got locked task from queue.')
|
||||||
try:
|
try:
|
||||||
if runtime.thread_data.device == 'cpu' and is_alive() > 1:
|
if runtime.is_model_reload_necessary(task.request):
|
||||||
# CPU is not the only device. Keep track of active time to unload resources later.
|
|
||||||
runtime.thread_data.lastActive = time.time()
|
|
||||||
# Open data generator.
|
|
||||||
res = runtime.mk_img(task.request)
|
|
||||||
if current_model_path == task.request.use_stable_diffusion_model:
|
|
||||||
current_state = ServerStates.Rendering
|
|
||||||
else:
|
|
||||||
current_state = ServerStates.LoadingModel
|
current_state = ServerStates.LoadingModel
|
||||||
# Start reading from generator.
|
runtime.reload_model()
|
||||||
dataQueue = None
|
current_model_path = task.request.use_stable_diffusion_model
|
||||||
if task.request.stream_progress_updates:
|
current_vae_path = task.request.use_vae_model
|
||||||
dataQueue = task.buffer_queue
|
|
||||||
for result in res:
|
def step_callback():
|
||||||
if current_state == ServerStates.LoadingModel:
|
global current_state_error
|
||||||
current_state = ServerStates.Rendering
|
|
||||||
current_model_path = task.request.use_stable_diffusion_model
|
|
||||||
current_vae_path = task.request.use_vae_model
|
|
||||||
if isinstance(current_state_error, SystemExit) or isinstance(current_state_error, StopAsyncIteration) or isinstance(task.error, StopAsyncIteration):
|
if isinstance(current_state_error, SystemExit) or isinstance(current_state_error, StopAsyncIteration) or isinstance(task.error, StopAsyncIteration):
|
||||||
runtime.thread_data.stop_processing = True
|
runtime.thread_data.stop_processing = True
|
||||||
if isinstance(current_state_error, StopAsyncIteration):
|
if isinstance(current_state_error, StopAsyncIteration):
|
||||||
task.error = current_state_error
|
task.error = current_state_error
|
||||||
current_state_error = None
|
current_state_error = None
|
||||||
print(f'Session {task.request.session_id} sent cancel signal for task {id(task)}')
|
print(f'Session {task.request.session_id} sent cancel signal for task {id(task)}')
|
||||||
if dataQueue:
|
|
||||||
dataQueue.put(result)
|
task_cache.keep(task.request.session_id, TASK_TTL)
|
||||||
if isinstance(result, str):
|
|
||||||
result = json.loads(result)
|
current_state = ServerStates.Rendering
|
||||||
task.response = result
|
task.response = runtime.mk_img(task.request, task.buffer_queue, task.temp_images, step_callback)
|
||||||
if 'output' in result:
|
|
||||||
for out_obj in result['output']:
|
|
||||||
if 'path' in out_obj:
|
|
||||||
img_id = out_obj['path'][out_obj['path'].rindex('/') + 1:]
|
|
||||||
task.temp_images[int(img_id)] = runtime.thread_data.temp_images[out_obj['path'][11:]]
|
|
||||||
elif 'data' in out_obj:
|
|
||||||
buf = runtime.base64_str_to_buffer(out_obj['data'])
|
|
||||||
task.temp_images[result['output'].index(out_obj)] = buf
|
|
||||||
# Before looping back to the generator, mark cache as still alive.
|
|
||||||
task_cache.keep(task.request.session_id, TASK_TTL)
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
task.error = e
|
task.error = e
|
||||||
print(traceback.format_exc())
|
print(traceback.format_exc())
|
||||||
@ -523,6 +506,7 @@ def render(req : ImageRequest):
|
|||||||
r.use_vae_model = req.use_vae_model
|
r.use_vae_model = req.use_vae_model
|
||||||
r.show_only_filtered_image = req.show_only_filtered_image
|
r.show_only_filtered_image = req.show_only_filtered_image
|
||||||
r.output_format = req.output_format
|
r.output_format = req.output_format
|
||||||
|
r.output_quality = req.output_quality
|
||||||
|
|
||||||
r.stream_progress_updates = True # the underlying implementation only supports streaming
|
r.stream_progress_updates = True # the underlying implementation only supports streaming
|
||||||
r.stream_image_progress = req.stream_image_progress
|
r.stream_image_progress = req.stream_image_progress
|
||||||
|
51
ui/server.py
51
ui/server.py
@ -7,6 +7,7 @@ import traceback
|
|||||||
|
|
||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
|
import socket
|
||||||
import picklescan.scanner
|
import picklescan.scanner
|
||||||
import rich
|
import rich
|
||||||
|
|
||||||
@ -23,7 +24,7 @@ USER_UI_PLUGINS_DIR = os.path.abspath(os.path.join(SD_DIR, '..', 'plugins', 'ui'
|
|||||||
CORE_UI_PLUGINS_DIR = os.path.abspath(os.path.join(SD_UI_DIR, 'plugins', 'ui'))
|
CORE_UI_PLUGINS_DIR = os.path.abspath(os.path.join(SD_UI_DIR, 'plugins', 'ui'))
|
||||||
UI_PLUGINS_SOURCES = ((CORE_UI_PLUGINS_DIR, 'core'), (USER_UI_PLUGINS_DIR, 'user'))
|
UI_PLUGINS_SOURCES = ((CORE_UI_PLUGINS_DIR, 'core'), (USER_UI_PLUGINS_DIR, 'user'))
|
||||||
|
|
||||||
STABLE_DIFFUSION_MODEL_EXTENSIONS = ['.ckpt']
|
STABLE_DIFFUSION_MODEL_EXTENSIONS = ['.ckpt', '.safetensors']
|
||||||
VAE_MODEL_EXTENSIONS = ['.vae.pt', '.ckpt']
|
VAE_MODEL_EXTENSIONS = ['.vae.pt', '.ckpt']
|
||||||
|
|
||||||
OUTPUT_DIRNAME = "Stable Diffusion UI" # in the user's home folder
|
OUTPUT_DIRNAME = "Stable Diffusion UI" # in the user's home folder
|
||||||
@ -116,6 +117,8 @@ def setConfig(config):
|
|||||||
bind_ip = '0.0.0.0' if config['net']['listen_to_network'] else '127.0.0.1'
|
bind_ip = '0.0.0.0' if config['net']['listen_to_network'] else '127.0.0.1'
|
||||||
config_bat.append(f"@set SD_UI_BIND_IP={bind_ip}")
|
config_bat.append(f"@set SD_UI_BIND_IP={bind_ip}")
|
||||||
|
|
||||||
|
config_bat.append(f"@set test_sd2={'Y' if config.get('test_sd2', False) else 'N'}")
|
||||||
|
|
||||||
if len(config_bat) > 0:
|
if len(config_bat) > 0:
|
||||||
with open(config_bat_path, 'w', encoding='utf-8') as f:
|
with open(config_bat_path, 'w', encoding='utf-8') as f:
|
||||||
f.write('\r\n'.join(config_bat))
|
f.write('\r\n'.join(config_bat))
|
||||||
@ -133,6 +136,8 @@ def setConfig(config):
|
|||||||
bind_ip = '0.0.0.0' if config['net']['listen_to_network'] else '127.0.0.1'
|
bind_ip = '0.0.0.0' if config['net']['listen_to_network'] else '127.0.0.1'
|
||||||
config_sh.append(f"export SD_UI_BIND_IP={bind_ip}")
|
config_sh.append(f"export SD_UI_BIND_IP={bind_ip}")
|
||||||
|
|
||||||
|
config_sh.append(f"export test_sd2=\"{'Y' if config.get('test_sd2', False) else 'N'}\"")
|
||||||
|
|
||||||
if len(config_sh) > 1:
|
if len(config_sh) > 1:
|
||||||
with open(config_sh_path, 'w', encoding='utf-8') as f:
|
with open(config_sh_path, 'w', encoding='utf-8') as f:
|
||||||
f.write('\n'.join(config_sh))
|
f.write('\n'.join(config_sh))
|
||||||
@ -140,12 +145,19 @@ def setConfig(config):
|
|||||||
print(traceback.format_exc())
|
print(traceback.format_exc())
|
||||||
|
|
||||||
def resolve_model_to_use(model_name:str, model_type:str, model_dir:str, model_extensions:list, default_models=[]):
|
def resolve_model_to_use(model_name:str, model_type:str, model_dir:str, model_extensions:list, default_models=[]):
|
||||||
|
config = getConfig()
|
||||||
|
|
||||||
model_dirs = [os.path.join(MODELS_DIR, model_dir), SD_DIR]
|
model_dirs = [os.path.join(MODELS_DIR, model_dir), SD_DIR]
|
||||||
if not model_name: # When None try user configured model.
|
if not model_name: # When None try user configured model.
|
||||||
config = getConfig()
|
# config = getConfig()
|
||||||
if 'model' in config and model_type in config['model']:
|
if 'model' in config and model_type in config['model']:
|
||||||
model_name = config['model'][model_type]
|
model_name = config['model'][model_type]
|
||||||
if model_name:
|
if model_name:
|
||||||
|
is_sd2 = config.get('test_sd2', False)
|
||||||
|
if model_name.startswith('sd2_') and not is_sd2: # temp hack, until SD2 is unified with 1.4
|
||||||
|
print('ERROR: Cannot use SD 2.0 models with SD 1.0 code. Using the sd-v1-4 model instead!')
|
||||||
|
model_name = 'sd-v1-4'
|
||||||
|
|
||||||
# Check models directory
|
# Check models directory
|
||||||
models_dir_path = os.path.join(MODELS_DIR, model_dir, model_name)
|
models_dir_path = os.path.join(MODELS_DIR, model_dir, model_name)
|
||||||
for model_extension in model_extensions:
|
for model_extension in model_extensions:
|
||||||
@ -188,6 +200,7 @@ class SetAppConfigRequest(BaseModel):
|
|||||||
ui_open_browser_on_start: bool = None
|
ui_open_browser_on_start: bool = None
|
||||||
listen_to_network: bool = None
|
listen_to_network: bool = None
|
||||||
listen_port: int = None
|
listen_port: int = None
|
||||||
|
test_sd2: bool = None
|
||||||
|
|
||||||
@app.post('/app_config')
|
@app.post('/app_config')
|
||||||
async def setAppConfig(req : SetAppConfigRequest):
|
async def setAppConfig(req : SetAppConfigRequest):
|
||||||
@ -208,6 +221,8 @@ async def setAppConfig(req : SetAppConfigRequest):
|
|||||||
if 'net' not in config:
|
if 'net' not in config:
|
||||||
config['net'] = {}
|
config['net'] = {}
|
||||||
config['net']['listen_port'] = int(req.listen_port)
|
config['net']['listen_port'] = int(req.listen_port)
|
||||||
|
if req.test_sd2 is not None:
|
||||||
|
config['test_sd2'] = req.test_sd2
|
||||||
try:
|
try:
|
||||||
setConfig(config)
|
setConfig(config)
|
||||||
|
|
||||||
@ -230,9 +245,9 @@ def is_malicious_model(file_path):
|
|||||||
return False
|
return False
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print('error while scanning', file_path, 'error:', e)
|
print('error while scanning', file_path, 'error:', e)
|
||||||
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
known_models = {}
|
||||||
def getModels():
|
def getModels():
|
||||||
models = {
|
models = {
|
||||||
'active': {
|
'active': {
|
||||||
@ -255,9 +270,14 @@ def getModels():
|
|||||||
if not file.endswith(model_extension):
|
if not file.endswith(model_extension):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if is_malicious_model(os.path.join(models_dir, file)):
|
model_path = os.path.join(models_dir, file)
|
||||||
models['scan-error'] = file
|
mtime = os.path.getmtime(model_path)
|
||||||
return
|
mod_time = known_models[model_path] if model_path in known_models else -1
|
||||||
|
if mod_time != mtime:
|
||||||
|
if is_malicious_model(model_path):
|
||||||
|
models['scan-error'] = file
|
||||||
|
return
|
||||||
|
known_models[model_path] = mtime
|
||||||
|
|
||||||
model_name = file[:-len(model_extension)]
|
model_name = file[:-len(model_extension)]
|
||||||
models['options'][model_type].append(model_name)
|
models['options'][model_type].append(model_name)
|
||||||
@ -286,6 +306,11 @@ def getUIPlugins():
|
|||||||
|
|
||||||
return plugins
|
return plugins
|
||||||
|
|
||||||
|
def getIPConfig():
|
||||||
|
ips = socket.gethostbyname_ex(socket.gethostname())
|
||||||
|
ips[2].append(ips[0])
|
||||||
|
return ips[2]
|
||||||
|
|
||||||
@app.get('/get/{key:path}')
|
@app.get('/get/{key:path}')
|
||||||
def read_web_data(key:str=None):
|
def read_web_data(key:str=None):
|
||||||
if not key: # /get without parameters, stable-diffusion easter egg.
|
if not key: # /get without parameters, stable-diffusion easter egg.
|
||||||
@ -295,11 +320,14 @@ def read_web_data(key:str=None):
|
|||||||
if config is None:
|
if config is None:
|
||||||
config = APP_CONFIG_DEFAULTS
|
config = APP_CONFIG_DEFAULTS
|
||||||
return JSONResponse(config, headers=NOCACHE_HEADERS)
|
return JSONResponse(config, headers=NOCACHE_HEADERS)
|
||||||
elif key == 'devices':
|
elif key == 'system_info':
|
||||||
config = getConfig()
|
config = getConfig()
|
||||||
devices = task_manager.get_devices()
|
system_info = {
|
||||||
devices['config'] = config.get('render_devices', "auto")
|
'devices': task_manager.get_devices(),
|
||||||
return JSONResponse(devices, headers=NOCACHE_HEADERS)
|
'hosts': getIPConfig(),
|
||||||
|
}
|
||||||
|
system_info['devices']['config'] = config.get('render_devices', "auto")
|
||||||
|
return JSONResponse(system_info, headers=NOCACHE_HEADERS)
|
||||||
elif key == 'models':
|
elif key == 'models':
|
||||||
return JSONResponse(getModels(), headers=NOCACHE_HEADERS)
|
return JSONResponse(getModels(), headers=NOCACHE_HEADERS)
|
||||||
elif key == 'modifiers': return FileResponse(os.path.join(SD_UI_DIR, 'modifiers.json'), headers=NOCACHE_HEADERS)
|
elif key == 'modifiers': return FileResponse(os.path.join(SD_UI_DIR, 'modifiers.json'), headers=NOCACHE_HEADERS)
|
||||||
@ -435,6 +463,9 @@ class LogSuppressFilter(logging.Filter):
|
|||||||
return True
|
return True
|
||||||
logging.getLogger('uvicorn.access').addFilter(LogSuppressFilter())
|
logging.getLogger('uvicorn.access').addFilter(LogSuppressFilter())
|
||||||
|
|
||||||
|
# Check models and prepare cache for UI open
|
||||||
|
getModels()
|
||||||
|
|
||||||
# Start the task_manager
|
# Start the task_manager
|
||||||
task_manager.default_model_to_load = resolve_ckpt_to_use()
|
task_manager.default_model_to_load = resolve_ckpt_to_use()
|
||||||
task_manager.default_vae_to_load = resolve_vae_to_use()
|
task_manager.default_vae_to_load = resolve_vae_to_use()
|
||||||
|
Reference in New Issue
Block a user