Compare commits
419 Commits
v2.05
...
installer_
Author | SHA1 | Date | |
---|---|---|---|
7219c55dcd | |||
9aa46f92dc | |||
199fa4a0f5 | |||
c91348dae7 | |||
b47ff071da | |||
0d921eacb6 | |||
e1718c45e1 | |||
1c5352203d | |||
e521b350ca | |||
e9ddef6992 | |||
1d4a835e4a | |||
3cf7a984fd | |||
a6913dfe29 | |||
1f7c7909c2 | |||
0e15c48d04 | |||
a3e5931fd6 | |||
0e3766838f | |||
f17a00092a | |||
724e101edc | |||
0b1968c017 | |||
be3a52d703 | |||
7468aa5a4f | |||
889fd98577 | |||
9de91d3021 | |||
f20014660d | |||
add533d0da | |||
a5f5113e9a | |||
c72e1f0943 | |||
e282b2864f | |||
abcab9bce5 | |||
2174788514 | |||
ecda0d5b05 | |||
55bd8a34d7 | |||
65c667cc37 | |||
582b594789 | |||
19a868b2df | |||
9e07228a90 | |||
85f8141968 | |||
b9646a8a94 | |||
3a7e4390eb | |||
d07279c266 | |||
c10411c506 | |||
4a85296e23 | |||
deac32d843 | |||
f05d114f05 | |||
1cdb456216 | |||
fcd3c39656 | |||
b4c68a8ae5 | |||
34023f66f0 | |||
2696da7337 | |||
4edbd8719c | |||
ed6c59b58a | |||
553525a0fb | |||
38ebb95e63 | |||
17d92d8b79 | |||
4c08c692ea | |||
32bafd8b01 | |||
edb4b9a82e | |||
008b4228d2 | |||
faf455a37c | |||
828a7aabd7 | |||
acbf2a8ab0 | |||
05a4701d98 | |||
6a19b333b7 | |||
864fc84899 | |||
cddd62c284 | |||
30c46c0858 | |||
396e54bad0 | |||
a2b9ee5988 | |||
fac8e8aa8b | |||
a821b309f7 | |||
bd2b627113 | |||
3c3711b933 | |||
926ffefac8 | |||
a10c4f7a34 | |||
939dd0b207 | |||
defabf4324 | |||
b652d0fc65 | |||
e67843638f | |||
ca704e1d51 | |||
04eb356c89 | |||
300159c03b | |||
f083b816a9 | |||
2a46f6b225 | |||
b3e9b266fa | |||
d9fa2c4a62 | |||
547e640b57 | |||
e107037011 | |||
489aae7a46 | |||
1bcb6738bb | |||
d14a13fcaf | |||
5facba4419 | |||
b4282a03ca | |||
688659b815 | |||
37cf9eb587 | |||
65b2da4db5 | |||
2cecb11847 | |||
a39e0a19cd | |||
50d805abbc | |||
b7467b466f | |||
3048a26e6f | |||
0175d7658e | |||
5eee528d6b | |||
4a431ddc7c | |||
ebda485bcb | |||
f9bb55bc5c | |||
5d207f36e6 | |||
6fc9098035 | |||
215c3d82e2 | |||
86d0feed32 | |||
53674b03fc | |||
102e454902 | |||
88d59eb7fa | |||
53ebd583cf | |||
a1914f5079 | |||
5231eb62e1 | |||
661cf440f3 | |||
3822058daf | |||
011f283067 | |||
4f58a485a6 | |||
858a1c7ae0 | |||
0c96510128 | |||
ecf7860847 | |||
27e2699fa1 | |||
906d90c304 | |||
d243bf069e | |||
dde3d5c35b | |||
657129e4a7 | |||
5bbef09f85 | |||
342f5e5e41 | |||
02c0bac71f | |||
9bb091d31e | |||
a0e201a9ef | |||
119d5ba7ff | |||
a35454b1b3 | |||
8cb340be9d | |||
8d21ee23f4 | |||
5e7c376950 | |||
7617d56276 | |||
80e4b33047 | |||
4f6287c163 | |||
84ee1a2d25 | |||
67252e0c6b | |||
4264c2e266 | |||
0efa4ffb23 | |||
ae108bb603 | |||
ca4229c732 | |||
5c827703a1 | |||
a3de0820b3 | |||
83cb473a45 | |||
e7f9db5e56 | |||
af3de448bd | |||
fcb2f1b555 | |||
c1bcf9fa8a | |||
5ddfe7a184 | |||
c675caf3f9 | |||
956b3d89db | |||
b0c15bc430 | |||
b934a6b6e9 | |||
df73be495e | |||
efca13c8c0 | |||
e49b0b9381 | |||
a69cd85ed7 | |||
7b520942dc | |||
7ee00230fd | |||
bc8c7285da | |||
12e6baa925 | |||
f98225cdb6 | |||
310abcd8b9 | |||
f42eaaea86 | |||
bfdb74979f | |||
28e002e248 | |||
7d12dbd4b2 | |||
6f60e71ea4 | |||
16c842366a | |||
97ba151e09 | |||
18f452d968 | |||
bb6db783d8 | |||
49ce302bd4 | |||
95f01007a3 | |||
108e516b80 | |||
1c5097b81b | |||
ef1bbda49c | |||
5fed14cb78 | |||
7e7c110851 | |||
5337153761 | |||
444834a891 | |||
2587727087 | |||
a6456b068d | |||
4ccf26c23f | |||
3927dfa71d | |||
31c324bcc3 | |||
476d6fe85d | |||
ee4d468bce | |||
47fca55b0c | |||
219f310a25 | |||
27071cfa29 | |||
1d88a5b42e | |||
74a9c46f08 | |||
7a540f2a88 | |||
9f48d5e5ff | |||
64cc2567bd | |||
3b47eb3b07 | |||
d13e08e53b | |||
4685461282 | |||
885759abc5 | |||
0c0c8e503e | |||
5605cfe213 | |||
3e3fc54da4 | |||
e59c66ae26 | |||
d74eef8088 | |||
88a240b0f6 | |||
ee21f41b25 | |||
9ec2010ac2 | |||
e928fee26f | |||
79f6723678 | |||
db1fbad0db | |||
812a0a14fc | |||
852875b440 | |||
d1dd1b8a9b | |||
30974482c5 | |||
982696fb3b | |||
cf43dc7b5c | |||
4444525c01 | |||
760cc89449 | |||
ba26f22f53 | |||
e1f37a2f3c | |||
e59287d736 | |||
5cda0c7684 | |||
717a1d8f57 | |||
a955730086 | |||
d5ccee7bbb | |||
97e2a17ce1 | |||
a32a58bd0f | |||
093201ef65 | |||
ef46603f4e | |||
3483f63b72 | |||
8bee8060f9 | |||
db58c9aca9 | |||
75f5ec8575 | |||
246ceebe0e | |||
a294b128c7 | |||
7879bf19eb | |||
80082d9c26 | |||
9fe1709bf7 | |||
fec21f1208 | |||
1d4b34c0dd | |||
d88e0f16ac | |||
8d31c474df | |||
704d545159 | |||
a1e5a2cb67 | |||
3668c87e0d | |||
28c8dc4bcc | |||
a02915dadb | |||
c916f46ac3 | |||
5230fbaf6e | |||
813e65e586 | |||
387605f443 | |||
ff335ecadd | |||
74ccee2aa4 | |||
4976f35979 | |||
9c09a4d393 | |||
905bcd8d1b | |||
6883618825 | |||
8beaf2107e | |||
cd1db214b0 | |||
a6b4d59d94 | |||
ff590d3090 | |||
c16e425980 | |||
622322c878 | |||
7c580e276a | |||
927013cd57 | |||
666bf0ebb4 | |||
c283d3181f | |||
28dfe2140c | |||
75ac3450e6 | |||
f727dd26ed | |||
d21c0bfc18 | |||
65b2c056c6 | |||
53533e71e9 | |||
90a28732af | |||
98b1e50c86 | |||
512ffa9030 | |||
dbd37a0961 | |||
4eaba01de0 | |||
09cdbe6b90 | |||
0d33964a03 | |||
b14523ecfa | |||
38fa083503 | |||
fff050ef14 | |||
1a10c60e4f | |||
eb6d19a4dc | |||
9d92174b1d | |||
0dd38870e0 | |||
964d752e11 | |||
a4305540f0 | |||
788dcbf471 | |||
a715022049 | |||
c9fe2e8a66 | |||
5170f508f7 | |||
72900eaf93 | |||
85b6540c9f | |||
d92fb1ec95 | |||
9051bf6e68 | |||
598de3697d | |||
ff515f9bb0 | |||
10ed23e144 | |||
253e75c747 | |||
6efbe62dca | |||
0eae17075f | |||
c7c47635f7 | |||
8e1445d27a | |||
0fc92942cf | |||
2628a061f7 | |||
78971ac504 | |||
107a0e1b7d | |||
fc6954a541 | |||
2f60afb039 | |||
1a2da16e12 | |||
678e0912ae | |||
8d596c07df | |||
dc9f6013e8 | |||
e5a21fda32 | |||
2a0f920bcd | |||
dbd2f2003d | |||
4922877816 | |||
e914378dd9 | |||
9ec35d2bfe | |||
d96cc79814 | |||
7b92703624 | |||
22089c528a | |||
20ebdc46e9 | |||
3224cd73ed | |||
024c7f6a15 | |||
96d4b52da3 | |||
d1e29b8a9d | |||
0e02714114 | |||
04ad3c0386 | |||
14985bcdcd | |||
a1908de302 | |||
8820f10e01 | |||
e1116938ec | |||
c30678af98 | |||
1d4e06b884 | |||
39d6fcac73 | |||
f44c1d4536 | |||
c4349951da | |||
5a42704ac4 | |||
52e94fe650 | |||
466b9da56c | |||
b280288e83 | |||
7388c13c63 | |||
b4f4ccec99 | |||
e5e3f02440 | |||
874bfa0c54 | |||
92a4f7adb8 | |||
0772417f33 | |||
14f1b6df4b | |||
0d9c8a804d | |||
0dfbfafb82 | |||
051ef564e7 | |||
bafd3612e6 | |||
05f9f7ce9d | |||
88acf49305 | |||
74d9901ec9 | |||
64d1b56497 | |||
0cc540b12a | |||
a1712a654d | |||
fe21889ab0 | |||
e50c84ff28 | |||
44824acf34 | |||
ec3253620e | |||
f902466882 | |||
b5d2a23c64 | |||
0fd4804f95 | |||
729d0ea417 | |||
8f2f644230 | |||
183edf9eaf | |||
46c37403a6 | |||
c57e15bdf1 | |||
bc0e53a59b | |||
2c38b51996 | |||
67e36788b0 | |||
9b8ed32c74 | |||
833063c916 | |||
3b2c8e0a97 | |||
ee90d1f258 | |||
f90f42c25c | |||
d6555cb344 | |||
286f057a14 | |||
00b89c2bc7 | |||
b4a3de4cff | |||
ec49c96219 | |||
1eb420bcda | |||
9ba810ccb6 | |||
7a99241c76 | |||
617066c7e1 | |||
835dd4da9d | |||
15da928655 | |||
7460e6f73b | |||
4104b4d0a3 | |||
a29259a8b6 | |||
8925c6caf9 | |||
7b2a85a118 | |||
f8980aecf0 | |||
a4f44f02ed | |||
3fe76a6bd3 | |||
8c060b468b | |||
6136039682 | |||
f2954eeb3c | |||
f186c41f4d | |||
7d69f4d3ed | |||
4baec1b185 | |||
90c4361363 | |||
10aaa48068 | |||
458b0150ef | |||
44a3f63f99 | |||
96a6b11ab4 | |||
9ef9ce76f8 |
3
.github/FUNDING.yml
vendored
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
# These are supported funding model platforms
|
||||||
|
|
||||||
|
ko_fi: cmdr2_stablediffusion_ui
|
38
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
---
|
||||||
|
name: Bug report
|
||||||
|
about: Create a report to help us improve
|
||||||
|
title: ''
|
||||||
|
labels: bug
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Describe the bug**
|
||||||
|
A clear and concise description of what the bug is.
|
||||||
|
|
||||||
|
**To Reproduce**
|
||||||
|
Steps to reproduce the behavior:
|
||||||
|
1. Go to '...'
|
||||||
|
2. Click on '....'
|
||||||
|
3. Scroll down to '....'
|
||||||
|
4. See error
|
||||||
|
|
||||||
|
**Expected behavior**
|
||||||
|
A clear and concise description of what you expected to happen.
|
||||||
|
|
||||||
|
**Screenshots**
|
||||||
|
If applicable, add screenshots to help explain your problem.
|
||||||
|
|
||||||
|
**Desktop (please complete the following information):**
|
||||||
|
- OS:
|
||||||
|
- Browser:
|
||||||
|
- Version:
|
||||||
|
|
||||||
|
**Smartphone (please complete the following information):**
|
||||||
|
- Device:
|
||||||
|
- OS:
|
||||||
|
- Browser
|
||||||
|
- Version
|
||||||
|
|
||||||
|
**Additional context**
|
||||||
|
Add any other context about the problem here.
|
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
name: Feature request
|
||||||
|
about: Suggest an idea for this project
|
||||||
|
title: ''
|
||||||
|
labels: enhancement
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Is your feature request related to a problem? Please describe.**
|
||||||
|
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||||
|
|
||||||
|
**Describe the solution you'd like**
|
||||||
|
A clear and concise description of what you want to happen.
|
||||||
|
|
||||||
|
**Describe alternatives you've considered**
|
||||||
|
A clear and concise description of any alternative solutions or features you've considered.
|
||||||
|
|
||||||
|
**Additional context**
|
||||||
|
Add any other context or screenshots about the feature request here.
|
5
.gitignore
vendored
@ -1,4 +1,3 @@
|
|||||||
__pycache__
|
__pycache__
|
||||||
installer
|
dist
|
||||||
installer.tar
|
.idea/*
|
||||||
dist
|
|
||||||
|
55
CONTRIBUTING.md
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
Hi there, these instructions are meant for the developers of this project.
|
||||||
|
|
||||||
|
If you only want to use the Stable Diffusion UI, you've downloaded the wrong file. In that case, please download and follow the instructions at https://github.com/cmdr2/stable-diffusion-ui#installation
|
||||||
|
|
||||||
|
Thanks
|
||||||
|
|
||||||
|
# For developers:
|
||||||
|
|
||||||
|
If you would like to contribute to this project, there is a discord for dicussion:
|
||||||
|
[](https://discord.com/invite/u9yhsFmEkB)
|
||||||
|
|
||||||
|
## Development environment for UI (frontend and server) changes
|
||||||
|
This is in-flux, but one way to get a development environment running for editing the UI of this project is:
|
||||||
|
(swap `.sh` or `.bat` in instructions depending on your environment, and be sure to adjust any paths to match where you're working)
|
||||||
|
|
||||||
|
1) `git clone` the repository, e.g. to `/projects/stable-diffusion-ui-repo`
|
||||||
|
2) Download the pre-built end user archive from the link on github, and extract it, e.g. to `/projects/stable-diffusion-ui-archive`
|
||||||
|
3) `cd /projects/stable-diffusion-ui-archive` and run the script to set up and start the project, e.g. `start.sh`
|
||||||
|
4) Check you can view and generate images on `localhost:9000`
|
||||||
|
5) Close the server, and edit `/projects/stable-diffusion-ui-archive/scripts/on_env_start.sh`
|
||||||
|
6) Comment out the lines near the bottom that copies the `files/ui` folder, e.g:
|
||||||
|
|
||||||
|
for `.sh`
|
||||||
|
```
|
||||||
|
# rm -rf ui
|
||||||
|
# cp -Rf sd-ui-files/ui .
|
||||||
|
# cp sd-ui-files/scripts/on_sd_start.sh scripts/
|
||||||
|
# cp sd-ui-files/scripts/start.sh .
|
||||||
|
```
|
||||||
|
for `.bat`
|
||||||
|
```
|
||||||
|
REM @xcopy sd-ui-files\ui ui /s /i /Y
|
||||||
|
REM @copy sd-ui-files\scripts\on_sd_start.bat scripts\ /Y
|
||||||
|
REM @copy "sd-ui-files\scripts\Start Stable Diffusion UI.cmd" . /Y
|
||||||
|
```
|
||||||
|
7) Comment out the line at the top of `/projects/stable-diffusion-ui-archive/scripts/on_sd_start.sh` that copies `on_env_start`. For e.g. `@copy sd-ui-files\scripts\on_env_start.bat scripts\ /Y`
|
||||||
|
8) Delete the current `ui` folder at `/projects/stable-diffusion-ui-archive/ui`
|
||||||
|
9) Now make a symlink between the repository clone (where you will be making changes) and this archive (where you will be running stable diffusion):
|
||||||
|
`ln -s /projects/stable-diffusion-ui-repo/ui /projects/stable-diffusion-ui-archive/ui`
|
||||||
|
or for Windows
|
||||||
|
`mklink /D \projects\stable-diffusion-ui-archive\ui \projects\stable-diffusion-ui-repo\ui` (link name first, source repo dir second)
|
||||||
|
9) Run the archive again `start.sh` and ensure you can still use the UI.
|
||||||
|
10) Congrats, now any changes you make in your repo `ui` folder are linked to this running archive of the app and can be previewed in the browser.
|
||||||
|
|
||||||
|
Check the `ui/frontend/build/README.md` for instructions on running and building the React code.
|
||||||
|
|
||||||
|
## Development environment for Installer changes
|
||||||
|
Build the Windows installer using Windows, and the Linux installer using Linux. Don't mix the two, and don't use WSL. An Ubuntu VM is fine for building the Linux installer on a Windows host.
|
||||||
|
|
||||||
|
1. Install Miniconda 3 or Anaconda.
|
||||||
|
2. Install `conda install -c conda-forge -y conda-pack`
|
||||||
|
3. Open the Anaconda Prompt. Do not use WSL if you're building for Windows.
|
||||||
|
4. Run `build.bat` or `./build.sh` depending on whether you're in Windows or Linux.
|
||||||
|
5. Compress the `stable-diffusion-ui` folder created inside the `dist` folder. Make a `zip` for Windows, and `tar.xz` for Linux (smaller files, and Linux users already have tar).
|
||||||
|
6. Make a new GitHub release and upload the Windows and Linux installer builds.
|
15
Developer Console.cmd
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
@echo off
|
||||||
|
|
||||||
|
echo "Opening Stable Diffusion UI - Developer Console.." & echo.
|
||||||
|
|
||||||
|
set SD_BASE_DIR=%cd%
|
||||||
|
set MAMBA_ROOT_PREFIX=%SD_BASE_DIR%\env\mamba
|
||||||
|
set INSTALL_ENV_DIR=%SD_BASE_DIR%\env\installer_env
|
||||||
|
set PROJECT_ENV_DIR=%SD_BASE_DIR%\env\project_env
|
||||||
|
|
||||||
|
call "%MAMBA_ROOT_PREFIX%\condabin\mamba_hook.bat"
|
||||||
|
|
||||||
|
call micromamba activate "%INSTALL_ENV_DIR%"
|
||||||
|
call micromamba activate "%PROJECT_ENV_DIR%"
|
||||||
|
|
||||||
|
cmd /k
|
@ -1,24 +0,0 @@
|
|||||||
Congrats on downloading Stable Diffusion UI, version 2!
|
|
||||||
|
|
||||||
If you haven't downloaded Stable Diffusion UI yet, please download from https://github.com/cmdr2/stable-diffusion-ui
|
|
||||||
|
|
||||||
After downloading, to install please follow these instructions:
|
|
||||||
|
|
||||||
For Windows:
|
|
||||||
- Please double-click the "Start Stable Diffusion UI.cmd" file inside the "stable-diffusion-ui" folder.
|
|
||||||
|
|
||||||
For Linux:
|
|
||||||
- Please open a terminal, and go to the "stable-diffusion-ui" directory. Then run ./start.sh
|
|
||||||
|
|
||||||
That file will automatically install everything. After that it will start the Stable Diffusion interface in a web browser.
|
|
||||||
|
|
||||||
To start the UI in the future, please run the same command mentioned above.
|
|
||||||
|
|
||||||
|
|
||||||
If you have any problems, please:
|
|
||||||
1. Try the troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting
|
|
||||||
2. Or, seek help from the community at https://discord.com/invite/u9yhsFmEkB
|
|
||||||
3. Or, file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues
|
|
||||||
|
|
||||||
Thanks
|
|
||||||
cmdr2 (and contributors to the project)
|
|
71
README.md
@ -1,41 +1,65 @@
|
|||||||
# Stable Diffusion UI - v2 (beta)
|
# Stable Diffusion UI v2
|
||||||
### A simple way to install and use [Stable Diffusion](https://github.com/CompVis/stable-diffusion) on your own computer (Win 10/11, Linux). No dependencies or technical knowledge required.
|
### A simple 1-click way to install and use [Stable Diffusion](https://github.com/CompVis/stable-diffusion) on your own computer. No dependencies or technical knowledge required.
|
||||||
|
|
||||||
[](https://discord.com/invite/u9yhsFmEkB) (for support, and development discussion)
|
<p float="left">
|
||||||
|
<a href="#installation"><img src="https://github.com/cmdr2/stable-diffusion-ui/raw/develop/media/download-win.png" width="200" /></a>
|
||||||
|
<a href="#installation"><img src="https://github.com/cmdr2/stable-diffusion-ui/raw/develop/media/download-linux.png" width="200" /></a>
|
||||||
|
</p>
|
||||||
|
|
||||||
|
[](https://discord.com/invite/u9yhsFmEkB) (for support, and development discussion) | [Troubleshooting guide for common problems](Troubleshooting.md)
|
||||||
|
|
||||||
|
️🔥🎉 **New!** Use Custom Weights, Task Queue, Negative Prompt, Live Preview, More Samplers, In-Painting, Face Correction (GFPGAN) and Upscaling (RealESRGAN) have been added!
|
||||||
|
|
||||||
|
This distribution currently uses Stable Diffusion 1.4. Once the model for 1.5 becomes publicly available, the model in this distribution will be updated.
|
||||||
|
|
||||||
# Features in the new v2 Version:
|
# Features in the new v2 Version:
|
||||||
- **No Dependencies or Technical Knowledge Required**: 1-click install for Windows 10/11 and Linux. *No dependencies*, no need for WSL or Docker or Conda or technical setup. Just download and run!
|
- **No Dependencies or Technical Knowledge Required**: 1-click install for Windows 10/11 and Linux. *No dependencies*, no need for WSL or Docker or Conda or technical setup. Just download and run!
|
||||||
|
- **Face Correction (GFPGAN) and Upscaling (RealESRGAN)**
|
||||||
|
- **In-Painting**
|
||||||
|
- **Live Preview**: See the image as the AI is drawing it
|
||||||
|
- **Task Queue**: Queue up all your ideas, without waiting for the current task to finish
|
||||||
|
- **Custom Weights**: Use your own `.ckpt` file, by placing it inside the `stable-diffusion` folder (rename it to `custom-model.ckpt`)
|
||||||
|
- **Negative Prompt**: Specify aspects of the image to *remove*.
|
||||||
|
- **Lots of Samplers:** ddim, plms, heun, euler, euler_a, dpm2, dpm2_a, lms
|
||||||
- **Image Modifiers**: A library of *modifier tags* like *"Realistic"*, *"Pencil Sketch"*, *"ArtStation"* etc. Experiment with various styles quickly.
|
- **Image Modifiers**: A library of *modifier tags* like *"Realistic"*, *"Pencil Sketch"*, *"ArtStation"* etc. Experiment with various styles quickly.
|
||||||
- **New UI**: with cleaner design
|
- **New UI**: with cleaner design
|
||||||
|
- **Waifu Model Support**: Just replace the `stable-diffusion\sd-v1-4.ckpt` file after installation with the Waifu model
|
||||||
- Supports "*Text to Image*" and "*Image to Image*"
|
- Supports "*Text to Image*" and "*Image to Image*"
|
||||||
- **NSFW Setting**: A setting in the UI to control *NSFW content*
|
- **NSFW Setting**: A setting in the UI to control *NSFW content*
|
||||||
- **Use CPU setting**: If you don't have a compatible graphics card, but still want to run it on your CPU.
|
- **Use CPU setting**: If you don't have a compatible graphics card, but still want to run it on your CPU.
|
||||||
- **Auto-updater**: Gets you the latest improvements and bug-fixes to a rapidly evolving project.
|
- **Auto-updater**: Gets you the latest improvements and bug-fixes to a rapidly evolving project.
|
||||||
|
- **Low Memory Usage**: Creates 512x512 images with less than 4GB of VRAM!
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
## Live Preview
|
||||||
|

|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
# System Requirements
|
# System Requirements
|
||||||
1. Windows 10/11, or Linux. Experimental support for Mac is coming soon.
|
1. Windows 10/11, or Linux. Experimental support for Mac is coming soon.
|
||||||
2. An NVIDIA graphics card, preferably with 6GB or more of VRAM. But if you don't have a compatible graphics card, you can still use it with a "Use CPU" setting. It'll be very slow, but it should still work.
|
2. An NVIDIA graphics card, preferably with 4GB or more of VRAM. But if you don't have a compatible graphics card, you can still use it with a "Use CPU" setting. It'll be very slow, but it should still work.
|
||||||
|
|
||||||
You do not need anything else. You do not need WSL, Docker or Conda. The installer will take care of it.
|
You do not need anything else. You do not need WSL, Docker or Conda. The installer will take care of it.
|
||||||
|
|
||||||
# Installation
|
# Installation
|
||||||
1. Download [for Windows](https://drive.google.com/file/d/1MY5gzsQHV_KREbYs3gw33QL4gGIlQRqj/view?usp=sharing) or [for Linux](https://drive.google.com/file/d/1Gwz1LVQUCart8HhCjrmXkS6TWKbTsLsR/view?usp=sharing) (this will be hosted on GitHub in the future).
|
1. **Download** [for Windows](https://github.com/cmdr2/stable-diffusion-ui/releases/download/v2.16/stable-diffusion-ui-win64.zip) or [for Linux](https://github.com/cmdr2/stable-diffusion-ui/releases/download/v2.16/stable-diffusion-ui-linux.tar.xz).
|
||||||
|
|
||||||
2. Extract:
|
2. **Extract**:
|
||||||
- For Windows: After unzipping the file, please move the `stable-diffusion-ui` folder to your `C:` (or any drive like D: at the top root level). For e.g. `C:\stable-diffusion-ui`. This will avoid a common problem with Windows (of file path length limits).
|
- For Windows: After unzipping the file, please move the `stable-diffusion-ui` folder to your `C:` (or any drive like D:, at the top root level), e.g. `C:\stable-diffusion-ui`. This will avoid a common problem with Windows (file path length limits).
|
||||||
- For Linux: After extracting the .tar.xz file, please open a terminal, and go to the `stable-diffusion-ui` directory.
|
- For Linux: After extracting the .tar.xz file, please open a terminal, and go to the `stable-diffusion-ui` directory.
|
||||||
|
|
||||||
3. Run:
|
3. **Run**:
|
||||||
- For Windows: `Start Stable Diffusion UI.cmd` by double-clicking it.
|
- For Windows: `Start Stable Diffusion UI.cmd` by double-clicking it.
|
||||||
- For Linux: In the terminal, run `./start.sh` (or `bash start.sh`)
|
- For Linux: In the terminal, run `./start.sh` (or `bash start.sh`)
|
||||||
|
|
||||||
This will automatically install Stable Diffusion, set it up, and start the interface. No additional steps are needed.
|
This will automatically install Stable Diffusion, set it up, and start the interface. No additional steps are needed.
|
||||||
|
|
||||||
|
**To Uninstall:** Just delete the `stable-diffusion-ui` folder to uninstall all the downloaded packages.
|
||||||
|
|
||||||
|
|
||||||
# Usage
|
# Usage
|
||||||
Open http://localhost:9000 in your browser (after running step 3 previously).
|
Open http://localhost:9000 in your browser (after running step 3 previously). It may take a few moments for the back-end to be ready.
|
||||||
|
|
||||||
## With a text description
|
## With a text description
|
||||||
1. Enter a text prompt, like `a photograph of an astronaut riding a horse` in the textbox.
|
1. Enter a text prompt, like `a photograph of an astronaut riding a horse` in the textbox.
|
||||||
@ -47,29 +71,30 @@ Open http://localhost:9000 in your browser (after running step 3 previously).
|
|||||||
2. An optional text prompt can help you further describe the kind of image you want to generate.
|
2. An optional text prompt can help you further describe the kind of image you want to generate.
|
||||||
3. Press `Make Image`. See the image generated using your prompt.
|
3. Press `Make Image`. See the image generated using your prompt.
|
||||||
|
|
||||||
|
You can use Face Correction or Upscaling to improve the image further.
|
||||||
|
|
||||||
**Pro tip:** You can also click `Use as Input` on a generated image, to use it as the input image for your next generation. This can be useful for sequentially refining the generated image with a single click.
|
**Pro tip:** You can also click `Use as Input` on a generated image, to use it as the input image for your next generation. This can be useful for sequentially refining the generated image with a single click.
|
||||||
|
|
||||||
**Another tip:** Images with the same aspect ratio of your generated image work best. E.g. 1:1 if you're generating images sized 512x512.
|
**Another tip:** Images with the same aspect ratio of your generated image work best. E.g. 1:1 if you're generating images sized 512x512.
|
||||||
|
|
||||||
## Problems?
|
## Problems? Troubleshooting
|
||||||
Please ask on the new [discord server](https://discord.com/invite/u9yhsFmEkB), or [file an issue](https://github.com/cmdr2/stable-diffusion-ui/issues) if this did not work for you (after trying the common [troubleshooting](#troubleshooting) steps)!
|
Please try the common [troubleshooting](Troubleshooting.md) steps. If that doesn't fix it, please ask on the [discord server](https://discord.com/invite/u9yhsFmEkB), or [file an issue](https://github.com/cmdr2/stable-diffusion-ui/issues).
|
||||||
|
|
||||||
# Advanced Settings
|
# Image Settings
|
||||||
You can also set the configuration like `seed`, `width`, `height`, `num_outputs`, `num_inference_steps` and `guidance_scale` using the 'show' button next to 'Advanced settings'.
|
You can also set the configuration like `seed`, `width`, `height`, `num_outputs`, `num_inference_steps` and `guidance_scale` using the 'show' button next to 'Image settings'.
|
||||||
|
|
||||||
Use the same `seed` number to get the same image for a certain prompt. This is useful for refining a prompt without losing the basic image design. Enable the `random images` checkbox to get random images.
|
Use the same `seed` number to get the same image for a certain prompt. This is useful for refining a prompt without losing the basic image design. Enable the `random images` checkbox to get random images.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
# Troubleshooting
|
# System Settings
|
||||||
The [Troubleshooting wiki page](https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting) contains some common errors and their solutions. Please check that, and if it doesn't work, feel free to ask on the [discord server](https://discord.com/invite/u9yhsFmEkB) or [file an issue](https://github.com/cmdr2/stable-diffusion-ui/issues).
|
The system settings are reachable via the cogwheel symbol on the top right. It can be used to configure whether all generated images should
|
||||||
|
saved be automically, or to tune the Stable Diffusion image generation.
|
||||||
|
|
||||||
# What is this? Why no Docker?
|

|
||||||
This version is a 1-click installer. You don't need WSL or Docker or anything beyond a working NVIDIA GPU with an updated driver. You don't need to use the command-line at all. Even if you don't have a compatible GPU, you can run it on your CPU (albeit very slowly).
|
|
||||||
|
|
||||||
It'll download the necessary files from the original [Stable Diffusion](https://github.com/CompVis/stable-diffusion) git repository, and set it up. It'll then start the browser-based interface like before.
|
# Image Modifiers
|
||||||
|

|
||||||
The NSFW option is currently off (temporarily), so it'll allow NSFW images, for those people who are unable to run their prompts without hitting the NSFW filter incorrectly.
|
|
||||||
|
|
||||||
# Bugs reports and code contributions welcome
|
# Bugs reports and code contributions welcome
|
||||||
If there are any problems or suggestions, please feel free to ask on the [discord server](https://discord.com/invite/u9yhsFmEkB) or [file an issue](https://github.com/cmdr2/stable-diffusion-ui/issues).
|
If there are any problems or suggestions, please feel free to ask on the [discord server](https://discord.com/invite/u9yhsFmEkB) or [file an issue](https://github.com/cmdr2/stable-diffusion-ui/issues).
|
||||||
@ -79,4 +104,4 @@ Also, please feel free to submit a pull request, if you have any code contributi
|
|||||||
# Disclaimer
|
# Disclaimer
|
||||||
The authors of this project are not responsible for any content generated using this interface.
|
The authors of this project are not responsible for any content generated using this interface.
|
||||||
|
|
||||||
This license of this software forbids you from sharing any content that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, spread misinformation and target vulnerable groups. For the full list of restrictions please read [the license](LICENSE).
|
The license of this software forbids you from sharing any content that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, spread misinformation, or target vulnerable groups. For the full list of restrictions please read [the license](LICENSE). You agree to these terms by using this software.
|
||||||
|
25
Start Stable Diffusion UI.cmd
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
@echo off
|
||||||
|
|
||||||
|
echo. & echo "Stable Diffusion UI - v2.5" & echo.
|
||||||
|
|
||||||
|
set PATH=C:\Windows\System32;%PATH%
|
||||||
|
|
||||||
|
set SD_BASE_DIR=%cd%
|
||||||
|
|
||||||
|
@rem Confirm or change the installation dir
|
||||||
|
call installer\bootstrap\check-install-dir.bat
|
||||||
|
|
||||||
|
@rem set the vars again, if the installer dir has changed
|
||||||
|
set SD_BASE_DIR=%cd%
|
||||||
|
|
||||||
|
echo Working in %SD_BASE_DIR%
|
||||||
|
|
||||||
|
@rem Setup the packages required for the installer
|
||||||
|
call installer\bootstrap\bootstrap.bat
|
||||||
|
|
||||||
|
@rem Test the bootstrap
|
||||||
|
call git --version
|
||||||
|
call python --version
|
||||||
|
|
||||||
|
@rem Download the rest of the installer and UI
|
||||||
|
call installer\installer\start.bat
|
75
Troubleshooting.md
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
Common issues and their solutions. If these solutions don't work, please feel free to ask at the [discord server](https://discord.com/invite/u9yhsFmEkB) or [file an issue](https://github.com/cmdr2/stable-diffusion-ui/issues).
|
||||||
|
|
||||||
|
## RuntimeError: CUDA out of memory
|
||||||
|
This can happen if your PC has less than 6GB of VRAM.
|
||||||
|
|
||||||
|
Try disabling the "Turbo mode" setting under "Advanced Settings", since that takes an additional 1 GB of VRAM (to increase the speed).
|
||||||
|
|
||||||
|
Additionally, a common reason for this error is that you're using an initial image larger than 768x768 pixels. Try using a smaller initial image.
|
||||||
|
|
||||||
|
Also try generating smaller sized images.
|
||||||
|
|
||||||
|
## basicsr module not found
|
||||||
|
For Windows: Please download and extract basicsr from [here](https://github.com/cmdr2/stable-diffusion-ui/releases/download/v2.16/basicsr-win64.zip), and place the `basicsr` folder inside the `stable-diffusion-ui\stable-diffusion\env\lib\site-packages` folder. Then run the `Start Stable Diffusion UI.cmd` file again.
|
||||||
|
|
||||||
|
For Linux: Please contact on the [discord server](https://discord.com/invite/u9yhsFmEkB).
|
||||||
|
|
||||||
|
## No ldm found, or antlr4 or any other missing module, or ClobberError: This transaction has incompatible packages due to a shared path
|
||||||
|
On Windows, please ensure that you had placed the `stable-diffusion-ui` folder after unzipping to the root of C: or D: (or any drive). For e.g. `C:\stable-diffusion-ui`. **Note:** This has to be done **before** you start the installation process. If you have already installed (and are facing this error), please delete the installed folder, and start fresh by unzipping and placing the folder at the top of your drive.
|
||||||
|
|
||||||
|
This error can also be caused if you already have conda/miniconda/anaconda installed, due to package conflicts. Please open your Anaconda Prompt, and run `conda clean --all` to clean up unused packages.
|
||||||
|
|
||||||
|
If nothing works, this could be due to a corrupted installation. Please try reinstalling this, by deleting the installed folder, and unzipping from the downloaded zip file.
|
||||||
|
|
||||||
|
## Killed uvicorn server:app --app-dir ... --port 9000 --host 0.0.0.0
|
||||||
|
This happens if your PC ran out of RAM. Stable Diffusion requires a lot of RAM, and requires atleast 10 GB of RAM to work well. You can also try closing all other applications before running Stable Diffusion UI.
|
||||||
|
|
||||||
|
## Green image generated
|
||||||
|
This usually happens if you're running NVIDIA 1650 or 1660 Super. To solve this, please close and run the Stable Diffusion command on your computer. If you're using the older Docker-based solution (v1), please upgrade to v2: https://github.com/cmdr2/stable-diffusion-ui/tree/v2#installation
|
||||||
|
|
||||||
|
If you're still seeing this error, please try enabling "Full Precision" under "Advanced Settings" in the Stable Diffusion UI.
|
||||||
|
|
||||||
|
## './docker-compose.yml' is invalid:
|
||||||
|
> ERROR: The Compose file './docker-compose.yml' is invalid because:
|
||||||
|
> services.stability-ai.deploy.resources.reservations value Additional properties are not allowed ('devices' was unexpected)
|
||||||
|
|
||||||
|
Please ensure you have `docker-compose` version 1.29 or higher. Check `docker-compose --version`, and if required [update it to 1.29](https://docs.docker.com/compose/install/). (Thanks [HVRyan](https://github.com/HVRyan))
|
||||||
|
|
||||||
|
## RuntimeError: Found no NVIDIA driver on your system:
|
||||||
|
If you have an NVIDIA GPU and the latest [NVIDIA driver](http://www.nvidia.com/Download/index.aspx), please ensure that you've installed [nvidia-container-toolkit](https://stackoverflow.com/a/58432877). (Thanks [u/exintrovert420](https://www.reddit.com/user/exintrovert420/))
|
||||||
|
|
||||||
|
## Some other process is already running at port 9000 / port 9000 could not be bound
|
||||||
|
You can override the port used. Please change `docker-compose.yml` inside the project directory, and update the line `9000:9000` to `1337:9000` (where 1337 is whichever port number you want).
|
||||||
|
|
||||||
|
After doing this, please restart your server, by running `./server restart`.
|
||||||
|
|
||||||
|
After this, you can access the server at `http://localhost:1337` (where 1337 is the new port you specified earlier).
|
||||||
|
|
||||||
|
## RuntimeError: CUDA error: unknown error
|
||||||
|
Please ensure that you have an NVIDIA GPU and the latest [NVIDIA driver](http://www.nvidia.com/Download/index.aspx), and that you've installed [nvidia-container-toolkit](https://stackoverflow.com/a/58432877).
|
||||||
|
|
||||||
|
Also, if you are using WSL (Windows), please ensure you have the latest WSL kernel by running `wsl --shutdown` and then `wsl --update`. (Thanks [AndrWeisR](https://github.com/AndrWeisR))
|
||||||
|
|
||||||
|
# For support queries
|
||||||
|
## Entering a conda environment in an existing installation
|
||||||
|
This will give you an activated conda environment in the terminal, so you can run commands and force-install any packages, if required.
|
||||||
|
|
||||||
|
Users don't need to have the Anaconda Prompt installed to do this anymore, since the installer bundles a portable version of conda inside it. Just follow these steps.
|
||||||
|
|
||||||
|
**Windows:**
|
||||||
|
1. Open the terminal: Press Win+R, type "cmd", and press "Run"
|
||||||
|
2. Type `cd C:\stable-diffusion-ui` and press enter (or wherever you've installed it)
|
||||||
|
3. Type `installer\Scripts\activate.bat` and press enter
|
||||||
|
4. Type `cd stable-diffusion` and press enter
|
||||||
|
5. Type `conda activate .\env` and press enter
|
||||||
|
6. Type `python --version` and press enter. You should see 3.8.5.
|
||||||
|
|
||||||
|
**Linux:**
|
||||||
|
1. Open the terminal
|
||||||
|
2. Type `cd /path/to/stable-diffusion-ui` and press enter
|
||||||
|
3. Type `installer/bin/activate` and press enter
|
||||||
|
4. Type `cd stable-diffusion` and press enter
|
||||||
|
5. Type `conda activate ./env` and press enter
|
||||||
|
6. Type `python --version` and press enter. You should see 3.8.5.
|
||||||
|
|
||||||
|
This will give you an activated conda environment. To confirm, type `python --version` and press enter. You should see 3.8.5.
|
39
build.bat
@ -1,39 +0,0 @@
|
|||||||
@mkdir dist\stable-diffusion-ui
|
|
||||||
|
|
||||||
@echo "Downloading components for the installer.."
|
|
||||||
|
|
||||||
@call conda env create --prefix installer -f environment.yaml
|
|
||||||
@call conda activate .\installer
|
|
||||||
|
|
||||||
@echo "Setting up startup scripts.."
|
|
||||||
|
|
||||||
@mkdir installer\etc\conda\activate.d
|
|
||||||
@copy scripts\post_activate.bat installer\etc\conda\activate.d\
|
|
||||||
|
|
||||||
@echo "Creating a distributable package.."
|
|
||||||
|
|
||||||
@call conda install -c conda-forge -y conda-pack
|
|
||||||
@call conda pack --n-threads -1 --prefix installer --format tar
|
|
||||||
|
|
||||||
@cd dist\stable-diffusion-ui
|
|
||||||
@mkdir installer
|
|
||||||
|
|
||||||
@call tar -xf ..\..\installer.tar -C installer
|
|
||||||
|
|
||||||
@mkdir scripts
|
|
||||||
|
|
||||||
@copy ..\..\scripts\on_env_start.bat scripts\
|
|
||||||
@copy "..\..\scripts\Start Stable Diffusion UI.cmd" .
|
|
||||||
@copy ..\..\LICENSE .
|
|
||||||
@copy "..\..\CreativeML Open RAIL-M License" .
|
|
||||||
@copy "..\..\How to install and run.txt" .
|
|
||||||
|
|
||||||
@echo "Build ready. Zip the 'dist\stable-diffusion-ui' folder."
|
|
||||||
|
|
||||||
@echo "Cleaning up.."
|
|
||||||
|
|
||||||
@cd ..\..
|
|
||||||
|
|
||||||
@rmdir /s /q installer
|
|
||||||
|
|
||||||
@del installer.tar
|
|
39
build.sh
@ -1,39 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
mkdir -p dist/stable-diffusion-ui
|
|
||||||
|
|
||||||
echo "Downloading components for the installer.."
|
|
||||||
|
|
||||||
source ~/miniconda3/etc/profile.d/conda.sh
|
|
||||||
|
|
||||||
conda install -c conda-forge -y conda-pack
|
|
||||||
|
|
||||||
conda env create --prefix installer -f environment.yaml
|
|
||||||
conda activate ./installer
|
|
||||||
|
|
||||||
echo "Creating a distributable package.."
|
|
||||||
|
|
||||||
conda pack --n-threads -1 --prefix installer --format tar
|
|
||||||
|
|
||||||
cd dist/stable-diffusion-ui
|
|
||||||
mkdir installer
|
|
||||||
|
|
||||||
tar -xf ../../installer.tar -C installer
|
|
||||||
|
|
||||||
mkdir scripts
|
|
||||||
|
|
||||||
cp ../../scripts/on_env_start.sh scripts/
|
|
||||||
cp "../../scripts/start.sh" .
|
|
||||||
cp ../../LICENSE .
|
|
||||||
cp "../../CreativeML Open RAIL-M License" .
|
|
||||||
cp "../../How to install and run.txt" .
|
|
||||||
|
|
||||||
echo "Build ready. Zip the 'dist/stable-diffusion-ui' folder."
|
|
||||||
|
|
||||||
echo "Cleaning up.."
|
|
||||||
|
|
||||||
cd ../..
|
|
||||||
|
|
||||||
rm -rf installer
|
|
||||||
|
|
||||||
rm installer.tar
|
|
18
developer_console.sh
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
if [ "$0" == "bash" ]; then
|
||||||
|
echo "Opening Stable Diffusion UI - Developer Console.."
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
export SD_BASE_DIR=`pwd`
|
||||||
|
export MAMBA_ROOT_PREFIX="$SD_BASE_DIR/env/mamba"
|
||||||
|
export INSTALL_ENV_DIR="$SD_BASE_DIR/env/installer_env"
|
||||||
|
export PROJECT_ENV_DIR="$SD_BASE_DIR/env/project_env"
|
||||||
|
|
||||||
|
eval "$($MAMBA_ROOT_PREFIX/micromamba shell hook -s posix)"
|
||||||
|
|
||||||
|
micromamba activate "$INSTALL_ENV_DIR"
|
||||||
|
micromamba activate "$PROJECT_ENV_DIR"
|
||||||
|
else
|
||||||
|
bash --init-file developer_console.sh
|
||||||
|
fi
|
101
engine/__init__.py
Normal file
@ -0,0 +1,101 @@
|
|||||||
|
import json
|
||||||
|
|
||||||
|
class Request:
|
||||||
|
session_id: str = "session"
|
||||||
|
prompt: str = ""
|
||||||
|
negative_prompt: str = ""
|
||||||
|
init_image: str = None # base64
|
||||||
|
mask: str = None # base64
|
||||||
|
num_outputs: int = 1
|
||||||
|
num_inference_steps: int = 50
|
||||||
|
guidance_scale: float = 7.5
|
||||||
|
width: int = 512
|
||||||
|
height: int = 512
|
||||||
|
seed: int = 42
|
||||||
|
prompt_strength: float = 0.8
|
||||||
|
sampler: str = None # "ddim", "plms", "heun", "euler", "euler_a", "dpm2", "dpm2_a", "lms"
|
||||||
|
# allow_nsfw: bool = False
|
||||||
|
precision: str = "autocast" # or "full"
|
||||||
|
save_to_disk_path: str = None
|
||||||
|
turbo: bool = True
|
||||||
|
use_cpu: bool = False
|
||||||
|
use_full_precision: bool = False
|
||||||
|
use_face_correction: str = None # or "GFPGANv1.3"
|
||||||
|
use_upscale: str = None # or "RealESRGAN_x4plus" or "RealESRGAN_x4plus_anime_6B"
|
||||||
|
show_only_filtered_image: bool = False
|
||||||
|
|
||||||
|
stream_progress_updates: bool = False
|
||||||
|
stream_image_progress: bool = False
|
||||||
|
|
||||||
|
def json(self):
|
||||||
|
return {
|
||||||
|
"session_id": self.session_id,
|
||||||
|
"prompt": self.prompt,
|
||||||
|
"negative_prompt": self.negative_prompt,
|
||||||
|
"num_outputs": self.num_outputs,
|
||||||
|
"num_inference_steps": self.num_inference_steps,
|
||||||
|
"guidance_scale": self.guidance_scale,
|
||||||
|
"width": self.width,
|
||||||
|
"height": self.height,
|
||||||
|
"seed": self.seed,
|
||||||
|
"prompt_strength": self.prompt_strength,
|
||||||
|
"sampler": self.sampler,
|
||||||
|
"use_face_correction": self.use_face_correction,
|
||||||
|
"use_upscale": self.use_upscale,
|
||||||
|
}
|
||||||
|
|
||||||
|
def to_string(self):
|
||||||
|
return f'''
|
||||||
|
session_id: {self.session_id}
|
||||||
|
prompt: {self.prompt}
|
||||||
|
negative_prompt: {self.negative_prompt}
|
||||||
|
seed: {self.seed}
|
||||||
|
num_inference_steps: {self.num_inference_steps}
|
||||||
|
sampler: {self.sampler}
|
||||||
|
guidance_scale: {self.guidance_scale}
|
||||||
|
w: {self.width}
|
||||||
|
h: {self.height}
|
||||||
|
precision: {self.precision}
|
||||||
|
save_to_disk_path: {self.save_to_disk_path}
|
||||||
|
turbo: {self.turbo}
|
||||||
|
use_cpu: {self.use_cpu}
|
||||||
|
use_full_precision: {self.use_full_precision}
|
||||||
|
use_face_correction: {self.use_face_correction}
|
||||||
|
use_upscale: {self.use_upscale}
|
||||||
|
show_only_filtered_image: {self.show_only_filtered_image}
|
||||||
|
|
||||||
|
stream_progress_updates: {self.stream_progress_updates}
|
||||||
|
stream_image_progress: {self.stream_image_progress}'''
|
||||||
|
|
||||||
|
class Image:
|
||||||
|
data: str # base64
|
||||||
|
seed: int
|
||||||
|
is_nsfw: bool
|
||||||
|
path_abs: str = None
|
||||||
|
|
||||||
|
def __init__(self, data, seed):
|
||||||
|
self.data = data
|
||||||
|
self.seed = seed
|
||||||
|
|
||||||
|
def json(self):
|
||||||
|
return {
|
||||||
|
"data": self.data,
|
||||||
|
"seed": self.seed,
|
||||||
|
"path_abs": self.path_abs,
|
||||||
|
}
|
||||||
|
|
||||||
|
class Response:
|
||||||
|
request: Request
|
||||||
|
images: list
|
||||||
|
|
||||||
|
def json(self):
|
||||||
|
res = {
|
||||||
|
"status": 'succeeded',
|
||||||
|
"request": self.request.json(),
|
||||||
|
"output": [],
|
||||||
|
}
|
||||||
|
|
||||||
|
for image in self.images:
|
||||||
|
res["output"].append(image.json())
|
||||||
|
|
||||||
|
return res
|
658
engine/runtime.py
Normal file
@ -0,0 +1,658 @@
|
|||||||
|
import json
|
||||||
|
import os, re
|
||||||
|
import traceback
|
||||||
|
import torch
|
||||||
|
import numpy as np
|
||||||
|
from omegaconf import OmegaConf
|
||||||
|
from PIL import Image, ImageOps
|
||||||
|
from tqdm import tqdm, trange
|
||||||
|
from itertools import islice
|
||||||
|
from einops import rearrange
|
||||||
|
import time
|
||||||
|
from pytorch_lightning import seed_everything
|
||||||
|
from torch import autocast
|
||||||
|
from contextlib import nullcontext
|
||||||
|
from einops import rearrange, repeat
|
||||||
|
from ldm.util import instantiate_from_config
|
||||||
|
from optimizedSD.optimUtils import split_weighted_subprompts
|
||||||
|
from transformers import logging
|
||||||
|
|
||||||
|
from gfpgan import GFPGANer
|
||||||
|
from basicsr.archs.rrdbnet_arch import RRDBNet
|
||||||
|
from realesrgan import RealESRGANer
|
||||||
|
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
logging.set_verbosity_error()
|
||||||
|
|
||||||
|
# consts
|
||||||
|
config_yaml = "optimizedSD/v1-inference.yaml"
|
||||||
|
filename_regex = re.compile('[^a-zA-Z0-9]')
|
||||||
|
|
||||||
|
# api stuff
|
||||||
|
from . import Request, Response, Image as ResponseImage
|
||||||
|
import base64
|
||||||
|
from io import BytesIO
|
||||||
|
#from colorama import Fore
|
||||||
|
|
||||||
|
# local
|
||||||
|
stop_processing = False
|
||||||
|
temp_images = {}
|
||||||
|
|
||||||
|
ckpt_file = None
|
||||||
|
gfpgan_file = None
|
||||||
|
real_esrgan_file = None
|
||||||
|
|
||||||
|
model = None
|
||||||
|
modelCS = None
|
||||||
|
modelFS = None
|
||||||
|
model_gfpgan = None
|
||||||
|
model_real_esrgan = None
|
||||||
|
|
||||||
|
model_is_half = False
|
||||||
|
model_fs_is_half = False
|
||||||
|
device = None
|
||||||
|
unet_bs = 1
|
||||||
|
precision = 'autocast'
|
||||||
|
sampler_plms = None
|
||||||
|
sampler_ddim = None
|
||||||
|
|
||||||
|
has_valid_gpu = False
|
||||||
|
force_full_precision = False
|
||||||
|
try:
|
||||||
|
gpu = torch.cuda.current_device()
|
||||||
|
gpu_name = torch.cuda.get_device_name(gpu)
|
||||||
|
print('GPU detected: ', gpu_name)
|
||||||
|
|
||||||
|
force_full_precision = ('nvidia' in gpu_name.lower() or 'geforce' in gpu_name.lower()) and (' 1660' in gpu_name or ' 1650' in gpu_name) # otherwise these NVIDIA cards create green images
|
||||||
|
if force_full_precision:
|
||||||
|
print('forcing full precision on NVIDIA 16xx cards, to avoid green images. GPU detected: ', gpu_name)
|
||||||
|
|
||||||
|
mem_free, mem_total = torch.cuda.mem_get_info(gpu)
|
||||||
|
mem_total /= float(10**9)
|
||||||
|
if mem_total < 3.0:
|
||||||
|
print("GPUs with less than 3 GB of VRAM are not compatible with Stable Diffusion")
|
||||||
|
raise Exception()
|
||||||
|
|
||||||
|
has_valid_gpu = True
|
||||||
|
except:
|
||||||
|
print('WARNING: No compatible GPU found. Using the CPU, but this will be very slow!')
|
||||||
|
pass
|
||||||
|
|
||||||
|
def load_model_ckpt(ckpt_to_use, device_to_use='cuda', turbo=False, unet_bs_to_use=1, precision_to_use='autocast', half_model_fs=False):
|
||||||
|
global ckpt_file, model, modelCS, modelFS, model_is_half, device, unet_bs, precision, model_fs_is_half
|
||||||
|
|
||||||
|
ckpt_file = ckpt_to_use
|
||||||
|
device = device_to_use if has_valid_gpu else 'cpu'
|
||||||
|
precision = precision_to_use if not force_full_precision else 'full'
|
||||||
|
unet_bs = unet_bs_to_use
|
||||||
|
|
||||||
|
if device == 'cpu':
|
||||||
|
precision = 'full'
|
||||||
|
|
||||||
|
sd = load_model_from_config(f"{ckpt_file}.ckpt")
|
||||||
|
li, lo = [], []
|
||||||
|
for key, value in sd.items():
|
||||||
|
sp = key.split(".")
|
||||||
|
if (sp[0]) == "model":
|
||||||
|
if "input_blocks" in sp:
|
||||||
|
li.append(key)
|
||||||
|
elif "middle_block" in sp:
|
||||||
|
li.append(key)
|
||||||
|
elif "time_embed" in sp:
|
||||||
|
li.append(key)
|
||||||
|
else:
|
||||||
|
lo.append(key)
|
||||||
|
for key in li:
|
||||||
|
sd["model1." + key[6:]] = sd.pop(key)
|
||||||
|
for key in lo:
|
||||||
|
sd["model2." + key[6:]] = sd.pop(key)
|
||||||
|
|
||||||
|
config = OmegaConf.load(f"{config_yaml}")
|
||||||
|
|
||||||
|
model = instantiate_from_config(config.modelUNet)
|
||||||
|
_, _ = model.load_state_dict(sd, strict=False)
|
||||||
|
model.eval()
|
||||||
|
model.cdevice = device
|
||||||
|
model.unet_bs = unet_bs
|
||||||
|
model.turbo = turbo
|
||||||
|
|
||||||
|
modelCS = instantiate_from_config(config.modelCondStage)
|
||||||
|
_, _ = modelCS.load_state_dict(sd, strict=False)
|
||||||
|
modelCS.eval()
|
||||||
|
modelCS.cond_stage_model.device = device
|
||||||
|
|
||||||
|
modelFS = instantiate_from_config(config.modelFirstStage)
|
||||||
|
_, _ = modelFS.load_state_dict(sd, strict=False)
|
||||||
|
modelFS.eval()
|
||||||
|
del sd
|
||||||
|
|
||||||
|
if device != "cpu" and precision == "autocast":
|
||||||
|
model.half()
|
||||||
|
modelCS.half()
|
||||||
|
model_is_half = True
|
||||||
|
else:
|
||||||
|
model_is_half = False
|
||||||
|
|
||||||
|
if half_model_fs:
|
||||||
|
modelFS.half()
|
||||||
|
model_fs_is_half = True
|
||||||
|
else:
|
||||||
|
model_fs_is_half = False
|
||||||
|
|
||||||
|
print('loaded ', ckpt_file, 'to', device, 'precision', precision)
|
||||||
|
|
||||||
|
def load_model_gfpgan(gfpgan_to_use):
|
||||||
|
global gfpgan_file, model_gfpgan
|
||||||
|
|
||||||
|
if gfpgan_to_use is None:
|
||||||
|
return
|
||||||
|
|
||||||
|
gfpgan_file = gfpgan_to_use
|
||||||
|
model_path = gfpgan_to_use + ".pth"
|
||||||
|
|
||||||
|
if device == 'cpu':
|
||||||
|
model_gfpgan = GFPGANer(model_path=model_path, upscale=1, arch='clean', channel_multiplier=2, bg_upsampler=None, device=torch.device('cpu'))
|
||||||
|
else:
|
||||||
|
model_gfpgan = GFPGANer(model_path=model_path, upscale=1, arch='clean', channel_multiplier=2, bg_upsampler=None, device=torch.device('cuda'))
|
||||||
|
|
||||||
|
print('loaded ', gfpgan_to_use, 'to', device, 'precision', precision)
|
||||||
|
|
||||||
|
def load_model_real_esrgan(real_esrgan_to_use):
|
||||||
|
global real_esrgan_file, model_real_esrgan
|
||||||
|
|
||||||
|
if real_esrgan_to_use is None:
|
||||||
|
return
|
||||||
|
|
||||||
|
real_esrgan_file = real_esrgan_to_use
|
||||||
|
model_path = real_esrgan_to_use + ".pth"
|
||||||
|
|
||||||
|
RealESRGAN_models = {
|
||||||
|
'RealESRGAN_x4plus': RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4),
|
||||||
|
'RealESRGAN_x4plus_anime_6B': RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
|
||||||
|
}
|
||||||
|
|
||||||
|
model_to_use = RealESRGAN_models[real_esrgan_to_use]
|
||||||
|
|
||||||
|
if device == 'cpu':
|
||||||
|
model_real_esrgan = RealESRGANer(scale=2, model_path=model_path, model=model_to_use, pre_pad=0, half=False) # cpu does not support half
|
||||||
|
model_real_esrgan.device = torch.device('cpu')
|
||||||
|
model_real_esrgan.model.to('cpu')
|
||||||
|
else:
|
||||||
|
model_real_esrgan = RealESRGANer(scale=2, model_path=model_path, model=model_to_use, pre_pad=0, half=model_is_half)
|
||||||
|
|
||||||
|
model_real_esrgan.model.name = real_esrgan_to_use
|
||||||
|
|
||||||
|
print('loaded ', real_esrgan_to_use, 'to', device, 'precision', precision)
|
||||||
|
|
||||||
|
def mk_img(req: Request):
|
||||||
|
try:
|
||||||
|
yield from do_mk_img(req)
|
||||||
|
except Exception as e:
|
||||||
|
print(traceback.format_exc())
|
||||||
|
|
||||||
|
gc()
|
||||||
|
|
||||||
|
if device != "cpu":
|
||||||
|
modelFS.to("cpu")
|
||||||
|
modelCS.to("cpu")
|
||||||
|
|
||||||
|
model.model1.to("cpu")
|
||||||
|
model.model2.to("cpu")
|
||||||
|
|
||||||
|
gc()
|
||||||
|
|
||||||
|
yield json.dumps({
|
||||||
|
"status": 'failed',
|
||||||
|
"detail": str(e)
|
||||||
|
})
|
||||||
|
|
||||||
|
def do_mk_img(req: Request):
|
||||||
|
global model, modelCS, modelFS, device
|
||||||
|
global model_gfpgan, model_real_esrgan
|
||||||
|
global stop_processing
|
||||||
|
|
||||||
|
stop_processing = False
|
||||||
|
|
||||||
|
res = Response()
|
||||||
|
res.request = req
|
||||||
|
res.images = []
|
||||||
|
|
||||||
|
temp_images.clear()
|
||||||
|
|
||||||
|
model.turbo = req.turbo
|
||||||
|
if req.use_cpu:
|
||||||
|
if device != 'cpu':
|
||||||
|
device = 'cpu'
|
||||||
|
|
||||||
|
if model_is_half:
|
||||||
|
del model, modelCS, modelFS
|
||||||
|
load_model_ckpt(ckpt_file, device)
|
||||||
|
|
||||||
|
load_model_gfpgan(gfpgan_file)
|
||||||
|
load_model_real_esrgan(real_esrgan_file)
|
||||||
|
else:
|
||||||
|
if has_valid_gpu:
|
||||||
|
prev_device = device
|
||||||
|
device = 'cuda'
|
||||||
|
|
||||||
|
if (precision == 'autocast' and (req.use_full_precision or not model_is_half)) or \
|
||||||
|
(precision == 'full' and not req.use_full_precision and not force_full_precision) or \
|
||||||
|
(req.init_image is None and model_fs_is_half) or \
|
||||||
|
(req.init_image is not None and not model_fs_is_half and not force_full_precision):
|
||||||
|
|
||||||
|
del model, modelCS, modelFS
|
||||||
|
load_model_ckpt(ckpt_file, device, req.turbo, unet_bs, ('full' if req.use_full_precision else 'autocast'), half_model_fs=(req.init_image is not None and not req.use_full_precision))
|
||||||
|
|
||||||
|
if prev_device != device:
|
||||||
|
load_model_gfpgan(gfpgan_file)
|
||||||
|
load_model_real_esrgan(real_esrgan_file)
|
||||||
|
|
||||||
|
if req.use_face_correction != gfpgan_file:
|
||||||
|
load_model_gfpgan(req.use_face_correction)
|
||||||
|
|
||||||
|
if req.use_upscale != real_esrgan_file:
|
||||||
|
load_model_real_esrgan(req.use_upscale)
|
||||||
|
|
||||||
|
model.cdevice = device
|
||||||
|
modelCS.cond_stage_model.device = device
|
||||||
|
|
||||||
|
opt_prompt = req.prompt
|
||||||
|
opt_seed = req.seed
|
||||||
|
opt_n_samples = req.num_outputs
|
||||||
|
opt_n_iter = 1
|
||||||
|
opt_scale = req.guidance_scale
|
||||||
|
opt_C = 4
|
||||||
|
opt_H = req.height
|
||||||
|
opt_W = req.width
|
||||||
|
opt_f = 8
|
||||||
|
opt_ddim_steps = req.num_inference_steps
|
||||||
|
opt_ddim_eta = 0.0
|
||||||
|
opt_strength = req.prompt_strength
|
||||||
|
opt_save_to_disk_path = req.save_to_disk_path
|
||||||
|
opt_init_img = req.init_image
|
||||||
|
opt_use_face_correction = req.use_face_correction
|
||||||
|
opt_use_upscale = req.use_upscale
|
||||||
|
opt_show_only_filtered = req.show_only_filtered_image
|
||||||
|
opt_format = 'png'
|
||||||
|
opt_sampler_name = req.sampler
|
||||||
|
|
||||||
|
print(req.to_string(), '\n device', device)
|
||||||
|
|
||||||
|
print('\n\n Using precision:', precision)
|
||||||
|
|
||||||
|
seed_everything(opt_seed)
|
||||||
|
|
||||||
|
batch_size = opt_n_samples
|
||||||
|
prompt = opt_prompt
|
||||||
|
assert prompt is not None
|
||||||
|
data = [batch_size * [prompt]]
|
||||||
|
|
||||||
|
if precision == "autocast" and device != "cpu":
|
||||||
|
precision_scope = autocast
|
||||||
|
else:
|
||||||
|
precision_scope = nullcontext
|
||||||
|
|
||||||
|
mask = None
|
||||||
|
|
||||||
|
if req.init_image is None:
|
||||||
|
handler = _txt2img
|
||||||
|
|
||||||
|
init_latent = None
|
||||||
|
t_enc = None
|
||||||
|
else:
|
||||||
|
handler = _img2img
|
||||||
|
|
||||||
|
init_image = load_img(req.init_image, opt_W, opt_H)
|
||||||
|
init_image = init_image.to(device)
|
||||||
|
|
||||||
|
if device != "cpu" and precision == "autocast":
|
||||||
|
init_image = init_image.half()
|
||||||
|
|
||||||
|
modelFS.to(device)
|
||||||
|
|
||||||
|
init_image = repeat(init_image, '1 ... -> b ...', b=batch_size)
|
||||||
|
init_latent = modelFS.get_first_stage_encoding(modelFS.encode_first_stage(init_image)) # move to latent space
|
||||||
|
|
||||||
|
if req.mask is not None:
|
||||||
|
mask = load_mask(req.mask, opt_W, opt_H, init_latent.shape[2], init_latent.shape[3], True).to(device)
|
||||||
|
mask = mask[0][0].unsqueeze(0).repeat(4, 1, 1).unsqueeze(0)
|
||||||
|
mask = repeat(mask, '1 ... -> b ...', b=batch_size)
|
||||||
|
|
||||||
|
if device != "cpu" and precision == "autocast":
|
||||||
|
mask = mask.half()
|
||||||
|
|
||||||
|
move_fs_to_cpu()
|
||||||
|
|
||||||
|
assert 0. <= opt_strength <= 1., 'can only work with strength in [0.0, 1.0]'
|
||||||
|
t_enc = int(opt_strength * opt_ddim_steps)
|
||||||
|
print(f"target t_enc is {t_enc} steps")
|
||||||
|
|
||||||
|
if opt_save_to_disk_path is not None:
|
||||||
|
session_out_path = os.path.join(opt_save_to_disk_path, req.session_id)
|
||||||
|
os.makedirs(session_out_path, exist_ok=True)
|
||||||
|
else:
|
||||||
|
session_out_path = None
|
||||||
|
|
||||||
|
seeds = ""
|
||||||
|
with torch.no_grad():
|
||||||
|
for n in trange(opt_n_iter, desc="Sampling"):
|
||||||
|
for prompts in tqdm(data, desc="data"):
|
||||||
|
|
||||||
|
with precision_scope("cuda"):
|
||||||
|
modelCS.to(device)
|
||||||
|
uc = None
|
||||||
|
if opt_scale != 1.0:
|
||||||
|
uc = modelCS.get_learned_conditioning(batch_size * [req.negative_prompt])
|
||||||
|
if isinstance(prompts, tuple):
|
||||||
|
prompts = list(prompts)
|
||||||
|
|
||||||
|
subprompts, weights = split_weighted_subprompts(prompts[0])
|
||||||
|
if len(subprompts) > 1:
|
||||||
|
c = torch.zeros_like(uc)
|
||||||
|
totalWeight = sum(weights)
|
||||||
|
# normalize each "sub prompt" and add it
|
||||||
|
for i in range(len(subprompts)):
|
||||||
|
weight = weights[i]
|
||||||
|
# if not skip_normalize:
|
||||||
|
weight = weight / totalWeight
|
||||||
|
c = torch.add(c, modelCS.get_learned_conditioning(subprompts[i]), alpha=weight)
|
||||||
|
else:
|
||||||
|
c = modelCS.get_learned_conditioning(prompts)
|
||||||
|
|
||||||
|
modelFS.to(device)
|
||||||
|
|
||||||
|
partial_x_samples = None
|
||||||
|
def img_callback(x_samples, i):
|
||||||
|
nonlocal partial_x_samples
|
||||||
|
|
||||||
|
partial_x_samples = x_samples
|
||||||
|
|
||||||
|
if req.stream_progress_updates:
|
||||||
|
n_steps = opt_ddim_steps if req.init_image is None else t_enc
|
||||||
|
progress = {"step": i, "total_steps": n_steps}
|
||||||
|
|
||||||
|
if req.stream_image_progress and i % 5 == 0:
|
||||||
|
partial_images = []
|
||||||
|
|
||||||
|
for i in range(batch_size):
|
||||||
|
x_samples_ddim = modelFS.decode_first_stage(x_samples[i].unsqueeze(0))
|
||||||
|
x_sample = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
|
||||||
|
x_sample = 255.0 * rearrange(x_sample[0].cpu().numpy(), "c h w -> h w c")
|
||||||
|
x_sample = x_sample.astype(np.uint8)
|
||||||
|
img = Image.fromarray(x_sample)
|
||||||
|
buf = BytesIO()
|
||||||
|
img.save(buf, format='JPEG')
|
||||||
|
buf.seek(0)
|
||||||
|
|
||||||
|
del img, x_sample, x_samples_ddim
|
||||||
|
# don't delete x_samples, it is used in the code that called this callback
|
||||||
|
|
||||||
|
temp_images[str(req.session_id) + '/' + str(i)] = buf
|
||||||
|
partial_images.append({'path': f'/image/tmp/{req.session_id}/{i}'})
|
||||||
|
|
||||||
|
progress['output'] = partial_images
|
||||||
|
|
||||||
|
yield json.dumps(progress)
|
||||||
|
|
||||||
|
if stop_processing:
|
||||||
|
raise UserInitiatedStop("User requested that we stop processing")
|
||||||
|
|
||||||
|
# run the handler
|
||||||
|
try:
|
||||||
|
if handler == _txt2img:
|
||||||
|
x_samples = _txt2img(opt_W, opt_H, opt_n_samples, opt_ddim_steps, opt_scale, None, opt_C, opt_f, opt_ddim_eta, c, uc, opt_seed, img_callback, mask, opt_sampler_name)
|
||||||
|
else:
|
||||||
|
x_samples = _img2img(init_latent, t_enc, batch_size, opt_scale, c, uc, opt_ddim_steps, opt_ddim_eta, opt_seed, img_callback, mask)
|
||||||
|
|
||||||
|
yield from x_samples
|
||||||
|
|
||||||
|
x_samples = partial_x_samples
|
||||||
|
except UserInitiatedStop:
|
||||||
|
if partial_x_samples is None:
|
||||||
|
continue
|
||||||
|
|
||||||
|
x_samples = partial_x_samples
|
||||||
|
|
||||||
|
print("saving images")
|
||||||
|
for i in range(batch_size):
|
||||||
|
|
||||||
|
x_samples_ddim = modelFS.decode_first_stage(x_samples[i].unsqueeze(0))
|
||||||
|
x_sample = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
|
||||||
|
x_sample = 255.0 * rearrange(x_sample[0].cpu().numpy(), "c h w -> h w c")
|
||||||
|
x_sample = x_sample.astype(np.uint8)
|
||||||
|
img = Image.fromarray(x_sample)
|
||||||
|
|
||||||
|
has_filters = (opt_use_face_correction is not None and opt_use_face_correction.startswith('GFPGAN')) or \
|
||||||
|
(opt_use_upscale is not None and opt_use_upscale.startswith('RealESRGAN'))
|
||||||
|
|
||||||
|
return_orig_img = not has_filters or not opt_show_only_filtered
|
||||||
|
|
||||||
|
if stop_processing:
|
||||||
|
return_orig_img = True
|
||||||
|
|
||||||
|
if opt_save_to_disk_path is not None:
|
||||||
|
prompt_flattened = filename_regex.sub('_', prompts[0])
|
||||||
|
prompt_flattened = prompt_flattened[:50]
|
||||||
|
|
||||||
|
img_id = str(uuid.uuid4())[-8:]
|
||||||
|
|
||||||
|
file_path = f"{prompt_flattened}_{img_id}"
|
||||||
|
img_out_path = os.path.join(session_out_path, f"{file_path}.{opt_format}")
|
||||||
|
meta_out_path = os.path.join(session_out_path, f"{file_path}.txt")
|
||||||
|
|
||||||
|
if return_orig_img:
|
||||||
|
save_image(img, img_out_path)
|
||||||
|
|
||||||
|
save_metadata(meta_out_path, prompts, opt_seed, opt_W, opt_H, opt_ddim_steps, opt_scale, opt_strength, opt_use_face_correction, opt_use_upscale, opt_sampler_name, req.negative_prompt)
|
||||||
|
|
||||||
|
if return_orig_img:
|
||||||
|
img_data = img_to_base64_str(img)
|
||||||
|
res_image_orig = ResponseImage(data=img_data, seed=opt_seed)
|
||||||
|
res.images.append(res_image_orig)
|
||||||
|
|
||||||
|
if opt_save_to_disk_path is not None:
|
||||||
|
res_image_orig.path_abs = img_out_path
|
||||||
|
|
||||||
|
del img
|
||||||
|
|
||||||
|
if has_filters and not stop_processing:
|
||||||
|
print('Applying filters..')
|
||||||
|
|
||||||
|
gc()
|
||||||
|
filters_applied = []
|
||||||
|
|
||||||
|
if opt_use_face_correction:
|
||||||
|
_, _, output = model_gfpgan.enhance(x_sample[:,:,::-1], has_aligned=False, only_center_face=False, paste_back=True)
|
||||||
|
x_sample = output[:,:,::-1]
|
||||||
|
filters_applied.append(opt_use_face_correction)
|
||||||
|
|
||||||
|
if opt_use_upscale:
|
||||||
|
output, _ = model_real_esrgan.enhance(x_sample[:,:,::-1])
|
||||||
|
x_sample = output[:,:,::-1]
|
||||||
|
filters_applied.append(opt_use_upscale)
|
||||||
|
|
||||||
|
filtered_image = Image.fromarray(x_sample)
|
||||||
|
|
||||||
|
filtered_img_data = img_to_base64_str(filtered_image)
|
||||||
|
res_image_filtered = ResponseImage(data=filtered_img_data, seed=opt_seed)
|
||||||
|
res.images.append(res_image_filtered)
|
||||||
|
|
||||||
|
filters_applied = "_".join(filters_applied)
|
||||||
|
|
||||||
|
if opt_save_to_disk_path is not None:
|
||||||
|
filtered_img_out_path = os.path.join(session_out_path, f"{file_path}_{filters_applied}.{opt_format}")
|
||||||
|
save_image(filtered_image, filtered_img_out_path)
|
||||||
|
res_image_filtered.path_abs = filtered_img_out_path
|
||||||
|
|
||||||
|
del filtered_image
|
||||||
|
|
||||||
|
seeds += str(opt_seed) + ","
|
||||||
|
opt_seed += 1
|
||||||
|
|
||||||
|
move_fs_to_cpu()
|
||||||
|
gc()
|
||||||
|
del x_samples, x_samples_ddim, x_sample
|
||||||
|
print("memory_final = ", torch.cuda.memory_allocated() / 1e6)
|
||||||
|
|
||||||
|
print('Task completed')
|
||||||
|
|
||||||
|
yield json.dumps(res.json())
|
||||||
|
|
||||||
|
def save_image(img, img_out_path):
|
||||||
|
try:
|
||||||
|
img.save(img_out_path)
|
||||||
|
except:
|
||||||
|
print('could not save the file', traceback.format_exc())
|
||||||
|
|
||||||
|
def save_metadata(meta_out_path, prompts, opt_seed, opt_W, opt_H, opt_ddim_steps, opt_scale, opt_prompt_strength, opt_correct_face, opt_upscale, sampler_name, negative_prompt):
|
||||||
|
metadata = f"{prompts[0]}\nWidth: {opt_W}\nHeight: {opt_H}\nSeed: {opt_seed}\nSteps: {opt_ddim_steps}\nGuidance Scale: {opt_scale}\nPrompt Strength: {opt_prompt_strength}\nUse Face Correction: {opt_correct_face}\nUse Upscaling: {opt_upscale}\nSampler: {sampler_name}\nNegative Prompt: {negative_prompt}"
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(meta_out_path, 'w') as f:
|
||||||
|
f.write(metadata)
|
||||||
|
except:
|
||||||
|
print('could not save the file', traceback.format_exc())
|
||||||
|
|
||||||
|
def _txt2img(opt_W, opt_H, opt_n_samples, opt_ddim_steps, opt_scale, start_code, opt_C, opt_f, opt_ddim_eta, c, uc, opt_seed, img_callback, mask, sampler_name):
|
||||||
|
shape = [opt_n_samples, opt_C, opt_H // opt_f, opt_W // opt_f]
|
||||||
|
|
||||||
|
if device != "cpu":
|
||||||
|
mem = torch.cuda.memory_allocated() / 1e6
|
||||||
|
modelCS.to("cpu")
|
||||||
|
while torch.cuda.memory_allocated() / 1e6 >= mem:
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
if sampler_name == 'ddim':
|
||||||
|
model.make_schedule(ddim_num_steps=opt_ddim_steps, ddim_eta=opt_ddim_eta, verbose=False)
|
||||||
|
|
||||||
|
samples_ddim = model.sample(
|
||||||
|
S=opt_ddim_steps,
|
||||||
|
conditioning=c,
|
||||||
|
seed=opt_seed,
|
||||||
|
shape=shape,
|
||||||
|
verbose=False,
|
||||||
|
unconditional_guidance_scale=opt_scale,
|
||||||
|
unconditional_conditioning=uc,
|
||||||
|
eta=opt_ddim_eta,
|
||||||
|
x_T=start_code,
|
||||||
|
img_callback=img_callback,
|
||||||
|
mask=mask,
|
||||||
|
sampler = sampler_name,
|
||||||
|
)
|
||||||
|
|
||||||
|
yield from samples_ddim
|
||||||
|
|
||||||
|
def _img2img(init_latent, t_enc, batch_size, opt_scale, c, uc, opt_ddim_steps, opt_ddim_eta, opt_seed, img_callback, mask):
|
||||||
|
# encode (scaled latent)
|
||||||
|
z_enc = model.stochastic_encode(
|
||||||
|
init_latent,
|
||||||
|
torch.tensor([t_enc] * batch_size).to(device),
|
||||||
|
opt_seed,
|
||||||
|
opt_ddim_eta,
|
||||||
|
opt_ddim_steps,
|
||||||
|
)
|
||||||
|
x_T = None if mask is None else init_latent
|
||||||
|
|
||||||
|
# decode it
|
||||||
|
samples_ddim = model.sample(
|
||||||
|
t_enc,
|
||||||
|
c,
|
||||||
|
z_enc,
|
||||||
|
unconditional_guidance_scale=opt_scale,
|
||||||
|
unconditional_conditioning=uc,
|
||||||
|
img_callback=img_callback,
|
||||||
|
mask=mask,
|
||||||
|
x_T=x_T,
|
||||||
|
sampler = 'ddim'
|
||||||
|
)
|
||||||
|
|
||||||
|
yield from samples_ddim
|
||||||
|
|
||||||
|
def move_fs_to_cpu():
|
||||||
|
if device != "cpu":
|
||||||
|
mem = torch.cuda.memory_allocated() / 1e6
|
||||||
|
modelFS.to("cpu")
|
||||||
|
while torch.cuda.memory_allocated() / 1e6 >= mem:
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
def gc():
|
||||||
|
if device == 'cpu':
|
||||||
|
return
|
||||||
|
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
torch.cuda.ipc_collect()
|
||||||
|
|
||||||
|
# internal
|
||||||
|
|
||||||
|
def chunk(it, size):
|
||||||
|
it = iter(it)
|
||||||
|
return iter(lambda: tuple(islice(it, size)), ())
|
||||||
|
|
||||||
|
|
||||||
|
def load_model_from_config(ckpt, verbose=False):
|
||||||
|
print(f"Loading model from {ckpt}")
|
||||||
|
pl_sd = torch.load(ckpt, map_location="cpu")
|
||||||
|
if "global_step" in pl_sd:
|
||||||
|
print(f"Global Step: {pl_sd['global_step']}")
|
||||||
|
sd = pl_sd["state_dict"]
|
||||||
|
return sd
|
||||||
|
|
||||||
|
# utils
|
||||||
|
class UserInitiatedStop(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def load_img(img_str, w0, h0):
|
||||||
|
image = base64_str_to_img(img_str).convert("RGB")
|
||||||
|
w, h = image.size
|
||||||
|
print(f"loaded input image of size ({w}, {h}) from base64")
|
||||||
|
if h0 is not None and w0 is not None:
|
||||||
|
h, w = h0, w0
|
||||||
|
|
||||||
|
w, h = map(lambda x: x - x % 64, (w, h)) # resize to integer multiple of 64
|
||||||
|
image = image.resize((w, h), resample=Image.Resampling.LANCZOS)
|
||||||
|
image = np.array(image).astype(np.float32) / 255.0
|
||||||
|
image = image[None].transpose(0, 3, 1, 2)
|
||||||
|
image = torch.from_numpy(image)
|
||||||
|
return 2.*image - 1.
|
||||||
|
|
||||||
|
def load_mask(mask_str, h0, w0, newH, newW, invert=False):
|
||||||
|
image = base64_str_to_img(mask_str).convert("RGB")
|
||||||
|
w, h = image.size
|
||||||
|
print(f"loaded input mask of size ({w}, {h})")
|
||||||
|
|
||||||
|
if invert:
|
||||||
|
print("inverted")
|
||||||
|
image = ImageOps.invert(image)
|
||||||
|
# where_0, where_1 = np.where(image == 0), np.where(image == 255)
|
||||||
|
# image[where_0], image[where_1] = 255, 0
|
||||||
|
|
||||||
|
if h0 is not None and w0 is not None:
|
||||||
|
h, w = h0, w0
|
||||||
|
|
||||||
|
w, h = map(lambda x: x - x % 64, (w, h)) # resize to integer multiple of 64
|
||||||
|
|
||||||
|
print(f"New mask size ({w}, {h})")
|
||||||
|
image = image.resize((newW, newH), resample=Image.Resampling.LANCZOS)
|
||||||
|
image = np.array(image)
|
||||||
|
|
||||||
|
image = image.astype(np.float32) / 255.0
|
||||||
|
image = image[None].transpose(0, 3, 1, 2)
|
||||||
|
image = torch.from_numpy(image)
|
||||||
|
return image
|
||||||
|
|
||||||
|
# https://stackoverflow.com/a/61114178
|
||||||
|
def img_to_base64_str(img):
|
||||||
|
buffered = BytesIO()
|
||||||
|
img.save(buffered, format="PNG")
|
||||||
|
buffered.seek(0)
|
||||||
|
img_byte = buffered.getvalue()
|
||||||
|
img_str = "data:image/png;base64," + base64.b64encode(img_byte).decode()
|
||||||
|
return img_str
|
||||||
|
|
||||||
|
def base64_str_to_img(img_str):
|
||||||
|
img_str = img_str[len("data:image/png;base64,"):]
|
||||||
|
data = base64.b64decode(img_str)
|
||||||
|
buffered = BytesIO(data)
|
||||||
|
img = Image.open(buffered)
|
||||||
|
return img
|
237
engine/server.py
Normal file
@ -0,0 +1,237 @@
|
|||||||
|
import json
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
|
||||||
|
SCRIPT_DIR = os.getcwd()
|
||||||
|
print('started in ', SCRIPT_DIR)
|
||||||
|
|
||||||
|
SD_UI_DIR = os.getenv('SD_UI_PATH', None)
|
||||||
|
sys.path.append(os.path.dirname(SD_UI_DIR))
|
||||||
|
|
||||||
|
CONFIG_DIR = os.path.join(SD_UI_DIR, '..', 'scripts')
|
||||||
|
|
||||||
|
OUTPUT_DIRNAME = "Stable Diffusion UI" # in the user's home folder
|
||||||
|
|
||||||
|
from fastapi import FastAPI, HTTPException
|
||||||
|
from fastapi.staticfiles import StaticFiles
|
||||||
|
from starlette.responses import FileResponse, StreamingResponse
|
||||||
|
from pydantic import BaseModel
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from sd_internal import Request, Response
|
||||||
|
|
||||||
|
app = FastAPI()
|
||||||
|
|
||||||
|
model_loaded = False
|
||||||
|
model_is_loading = False
|
||||||
|
|
||||||
|
modifiers_cache = None
|
||||||
|
outpath = os.path.join(os.path.expanduser("~"), OUTPUT_DIRNAME)
|
||||||
|
|
||||||
|
# don't show access log entries for URLs that start with the given prefix
|
||||||
|
ACCESS_LOG_SUPPRESS_PATH_PREFIXES = ['/ping', '/modifier-thumbnails']
|
||||||
|
|
||||||
|
app.mount('/media', StaticFiles(directory=os.path.join(SD_UI_DIR, 'media/')), name="media")
|
||||||
|
|
||||||
|
# defaults from https://huggingface.co/blog/stable_diffusion
|
||||||
|
class ImageRequest(BaseModel):
|
||||||
|
session_id: str = "session"
|
||||||
|
prompt: str = ""
|
||||||
|
negative_prompt: str = ""
|
||||||
|
init_image: str = None # base64
|
||||||
|
mask: str = None # base64
|
||||||
|
num_outputs: int = 1
|
||||||
|
num_inference_steps: int = 50
|
||||||
|
guidance_scale: float = 7.5
|
||||||
|
width: int = 512
|
||||||
|
height: int = 512
|
||||||
|
seed: int = 42
|
||||||
|
prompt_strength: float = 0.8
|
||||||
|
sampler: str = None # "ddim", "plms", "heun", "euler", "euler_a", "dpm2", "dpm2_a", "lms"
|
||||||
|
# allow_nsfw: bool = False
|
||||||
|
save_to_disk_path: str = None
|
||||||
|
turbo: bool = True
|
||||||
|
use_cpu: bool = False
|
||||||
|
use_full_precision: bool = False
|
||||||
|
use_face_correction: str = None # or "GFPGANv1.3"
|
||||||
|
use_upscale: str = None # or "RealESRGAN_x4plus" or "RealESRGAN_x4plus_anime_6B"
|
||||||
|
show_only_filtered_image: bool = False
|
||||||
|
|
||||||
|
stream_progress_updates: bool = False
|
||||||
|
stream_image_progress: bool = False
|
||||||
|
|
||||||
|
class SetAppConfigRequest(BaseModel):
|
||||||
|
update_branch: str = "main"
|
||||||
|
|
||||||
|
@app.get('/')
|
||||||
|
def read_root():
|
||||||
|
headers = {"Cache-Control": "no-cache, no-store, must-revalidate", "Pragma": "no-cache", "Expires": "0"}
|
||||||
|
return FileResponse(os.path.join(SD_UI_DIR, 'index.html'), headers=headers)
|
||||||
|
|
||||||
|
@app.get('/ping')
|
||||||
|
async def ping():
|
||||||
|
global model_loaded, model_is_loading
|
||||||
|
|
||||||
|
try:
|
||||||
|
if model_loaded:
|
||||||
|
return {'OK'}
|
||||||
|
|
||||||
|
if model_is_loading:
|
||||||
|
return {'ERROR'}
|
||||||
|
|
||||||
|
model_is_loading = True
|
||||||
|
|
||||||
|
from sd_internal import runtime
|
||||||
|
|
||||||
|
custom_weight_path = os.path.join(SCRIPT_DIR, 'custom-model.ckpt')
|
||||||
|
ckpt_to_use = "sd-v1-4" if not os.path.exists(custom_weight_path) else "custom-model"
|
||||||
|
runtime.load_model_ckpt(ckpt_to_use=ckpt_to_use)
|
||||||
|
|
||||||
|
model_loaded = True
|
||||||
|
model_is_loading = False
|
||||||
|
|
||||||
|
return {'OK'}
|
||||||
|
except Exception as e:
|
||||||
|
print(traceback.format_exc())
|
||||||
|
return HTTPException(status_code=500, detail=str(e))
|
||||||
|
|
||||||
|
@app.post('/image')
|
||||||
|
def image(req : ImageRequest):
|
||||||
|
from sd_internal import runtime
|
||||||
|
|
||||||
|
r = Request()
|
||||||
|
r.session_id = req.session_id
|
||||||
|
r.prompt = req.prompt
|
||||||
|
r.negative_prompt = req.negative_prompt
|
||||||
|
r.init_image = req.init_image
|
||||||
|
r.mask = req.mask
|
||||||
|
r.num_outputs = req.num_outputs
|
||||||
|
r.num_inference_steps = req.num_inference_steps
|
||||||
|
r.guidance_scale = req.guidance_scale
|
||||||
|
r.width = req.width
|
||||||
|
r.height = req.height
|
||||||
|
r.seed = req.seed
|
||||||
|
r.prompt_strength = req.prompt_strength
|
||||||
|
r.sampler = req.sampler
|
||||||
|
# r.allow_nsfw = req.allow_nsfw
|
||||||
|
r.turbo = req.turbo
|
||||||
|
r.use_cpu = req.use_cpu
|
||||||
|
r.use_full_precision = req.use_full_precision
|
||||||
|
r.save_to_disk_path = req.save_to_disk_path
|
||||||
|
r.use_upscale: str = req.use_upscale
|
||||||
|
r.use_face_correction = req.use_face_correction
|
||||||
|
r.show_only_filtered_image = req.show_only_filtered_image
|
||||||
|
|
||||||
|
r.stream_progress_updates = True # the underlying implementation only supports streaming
|
||||||
|
r.stream_image_progress = req.stream_image_progress
|
||||||
|
|
||||||
|
try:
|
||||||
|
if not req.stream_progress_updates:
|
||||||
|
r.stream_image_progress = False
|
||||||
|
|
||||||
|
res = runtime.mk_img(r)
|
||||||
|
|
||||||
|
if req.stream_progress_updates:
|
||||||
|
return StreamingResponse(res, media_type='application/json')
|
||||||
|
else: # compatibility mode: buffer the streaming responses, and return the last one
|
||||||
|
last_result = None
|
||||||
|
|
||||||
|
for result in res:
|
||||||
|
last_result = result
|
||||||
|
|
||||||
|
return json.loads(last_result)
|
||||||
|
except Exception as e:
|
||||||
|
print(traceback.format_exc())
|
||||||
|
return HTTPException(status_code=500, detail=str(e))
|
||||||
|
|
||||||
|
@app.get('/image/stop')
|
||||||
|
def stop():
|
||||||
|
try:
|
||||||
|
if model_is_loading:
|
||||||
|
return {'ERROR'}
|
||||||
|
|
||||||
|
from sd_internal import runtime
|
||||||
|
runtime.stop_processing = True
|
||||||
|
|
||||||
|
return {'OK'}
|
||||||
|
except Exception as e:
|
||||||
|
print(traceback.format_exc())
|
||||||
|
return HTTPException(status_code=500, detail=str(e))
|
||||||
|
|
||||||
|
@app.get('/image/tmp/{session_id}/{img_id}')
|
||||||
|
def get_image(session_id, img_id):
|
||||||
|
from sd_internal import runtime
|
||||||
|
buf = runtime.temp_images[session_id + '/' + img_id]
|
||||||
|
buf.seek(0)
|
||||||
|
return StreamingResponse(buf, media_type='image/jpeg')
|
||||||
|
|
||||||
|
@app.post('/app_config')
|
||||||
|
async def setAppConfig(req : SetAppConfigRequest):
|
||||||
|
try:
|
||||||
|
config = {
|
||||||
|
'update_branch': req.update_branch
|
||||||
|
}
|
||||||
|
|
||||||
|
config_json_str = json.dumps(config)
|
||||||
|
config_bat_str = f'@set update_branch={req.update_branch}'
|
||||||
|
config_sh_str = f'export update_branch={req.update_branch}'
|
||||||
|
|
||||||
|
config_json_path = os.path.join(CONFIG_DIR, 'config.json')
|
||||||
|
config_bat_path = os.path.join(CONFIG_DIR, 'config.bat')
|
||||||
|
config_sh_path = os.path.join(CONFIG_DIR, 'config.sh')
|
||||||
|
|
||||||
|
with open(config_json_path, 'w') as f:
|
||||||
|
f.write(config_json_str)
|
||||||
|
|
||||||
|
with open(config_bat_path, 'w') as f:
|
||||||
|
f.write(config_bat_str)
|
||||||
|
|
||||||
|
with open(config_sh_path, 'w') as f:
|
||||||
|
f.write(config_sh_str)
|
||||||
|
|
||||||
|
return {'OK'}
|
||||||
|
except Exception as e:
|
||||||
|
print(traceback.format_exc())
|
||||||
|
return HTTPException(status_code=500, detail=str(e))
|
||||||
|
|
||||||
|
@app.get('/app_config')
|
||||||
|
def getAppConfig():
|
||||||
|
try:
|
||||||
|
config_json_path = os.path.join(CONFIG_DIR, 'config.json')
|
||||||
|
|
||||||
|
if not os.path.exists(config_json_path):
|
||||||
|
return HTTPException(status_code=500, detail="No config file")
|
||||||
|
|
||||||
|
with open(config_json_path, 'r') as f:
|
||||||
|
config_json_str = f.read()
|
||||||
|
config = json.loads(config_json_str)
|
||||||
|
return config
|
||||||
|
except Exception as e:
|
||||||
|
print(traceback.format_exc())
|
||||||
|
return HTTPException(status_code=500, detail=str(e))
|
||||||
|
|
||||||
|
@app.get('/modifiers.json')
|
||||||
|
def read_modifiers():
|
||||||
|
headers = {"Cache-Control": "no-cache, no-store, must-revalidate", "Pragma": "no-cache", "Expires": "0"}
|
||||||
|
return FileResponse(os.path.join(SD_UI_DIR, 'modifiers.json'), headers=headers)
|
||||||
|
|
||||||
|
@app.get('/output_dir')
|
||||||
|
def read_home_dir():
|
||||||
|
return {outpath}
|
||||||
|
|
||||||
|
# don't log certain requests
|
||||||
|
class LogSuppressFilter(logging.Filter):
|
||||||
|
def filter(self, record: logging.LogRecord) -> bool:
|
||||||
|
path = record.getMessage()
|
||||||
|
for prefix in ACCESS_LOG_SUPPRESS_PATH_PREFIXES:
|
||||||
|
if path.find(prefix) != -1:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
logging.getLogger('uvicorn.access').addFilter(LogSuppressFilter())
|
||||||
|
|
||||||
|
# start the browser ui
|
||||||
|
import webbrowser; webbrowser.open('http://localhost:9000')
|
BIN
installer/bin/micromamba_linux_arm64
Executable file
BIN
installer/bin/micromamba_linux_x64
Executable file
BIN
installer/bin/micromamba_mac_arm64
Executable file
BIN
installer/bin/micromamba_mac_x64
Executable file
BIN
installer/bin/micromamba_win_x64.exe
Normal file
34
installer/bootstrap/bootstrap.bat
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
@echo off
|
||||||
|
|
||||||
|
@rem This file initializes micromamba and activates the env.
|
||||||
|
@rem A similar bootstrap file needs to exist for each platform (win, linux, macOS)
|
||||||
|
@rem Ready to hand-over to the platform-independent installer after this (written in python).
|
||||||
|
|
||||||
|
set MAMBA_ROOT_PREFIX=%SD_BASE_DIR%\env\mamba
|
||||||
|
set INSTALL_ENV_DIR=%SD_BASE_DIR%\env\installer_env
|
||||||
|
set INSTALLER_YAML_FILE=%SD_BASE_DIR%\installer\yaml\installer-environment.yaml
|
||||||
|
set MICROMAMBA_BINARY_FILE=%SD_BASE_DIR%\installer\bin\micromamba_win_x64.exe
|
||||||
|
|
||||||
|
@rem initialize the mamba dir
|
||||||
|
if not exist "%MAMBA_ROOT_PREFIX%" mkdir "%MAMBA_ROOT_PREFIX%"
|
||||||
|
|
||||||
|
copy "%MICROMAMBA_BINARY_FILE%" "%MAMBA_ROOT_PREFIX%\micromamba.exe"
|
||||||
|
|
||||||
|
@rem test the mamba binary
|
||||||
|
echo Micromamba version:
|
||||||
|
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" --version
|
||||||
|
|
||||||
|
@rem run the shell hook
|
||||||
|
if not exist "%MAMBA_ROOT_PREFIX%\Scripts" (
|
||||||
|
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" shell hook --log-level 4 -s cmd.exe
|
||||||
|
)
|
||||||
|
|
||||||
|
call "%MAMBA_ROOT_PREFIX%\condabin\mamba_hook.bat"
|
||||||
|
|
||||||
|
@rem create the installer env
|
||||||
|
if not exist "%INSTALL_ENV_DIR%" (
|
||||||
|
call micromamba create -y --prefix "%INSTALL_ENV_DIR%" -f "%INSTALLER_YAML_FILE%"
|
||||||
|
)
|
||||||
|
|
||||||
|
@rem activate
|
||||||
|
call micromamba activate "%INSTALL_ENV_DIR%"
|
44
installer/bootstrap/bootstrap.sh
Executable file
@ -0,0 +1,44 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# This file initializes micromamba and activates the env.
|
||||||
|
# A similar bootstrap file needs to exist for each platform (win, linux, macOS)
|
||||||
|
# Ready to hand-over to the platform-independent installer after this (written in python).
|
||||||
|
|
||||||
|
OS_NAME=$(uname -s)
|
||||||
|
case "${OS_NAME}" in
|
||||||
|
Linux*) OS_NAME="linux";;
|
||||||
|
Darwin*) OS_NAME="mac";;
|
||||||
|
*) echo "Unknown OS: $OS_NAME! This only runs on Linux or Mac" && exit
|
||||||
|
esac
|
||||||
|
|
||||||
|
OS_ARCH=$(uname -m)
|
||||||
|
case "${OS_ARCH}" in
|
||||||
|
x86_64*) OS_ARCH="x64";;
|
||||||
|
arm64*) OS_ARCH="arm64";;
|
||||||
|
*) echo "Unknown system architecture: $OS_ARCH! This only runs on x86_64 or arm64" && exit
|
||||||
|
esac
|
||||||
|
|
||||||
|
export MAMBA_ROOT_PREFIX=$SD_BASE_DIR/env/mamba
|
||||||
|
INSTALL_ENV_DIR=$SD_BASE_DIR/env/installer_env
|
||||||
|
INSTALLER_YAML_FILE=$SD_BASE_DIR/installer/yaml/installer-environment.yaml
|
||||||
|
MICROMAMBA_BINARY_FILE=$SD_BASE_DIR/installer/bin/micromamba_${OS_NAME}_${OS_ARCH}
|
||||||
|
|
||||||
|
# initialize the mamba dir
|
||||||
|
mkdir -p "$MAMBA_ROOT_PREFIX"
|
||||||
|
|
||||||
|
cp "$MICROMAMBA_BINARY_FILE" "$MAMBA_ROOT_PREFIX/micromamba"
|
||||||
|
|
||||||
|
# test the mamba binary
|
||||||
|
echo "Micromamba version:"
|
||||||
|
"$MAMBA_ROOT_PREFIX/micromamba" --version
|
||||||
|
|
||||||
|
# run the shell hook
|
||||||
|
eval "$($MAMBA_ROOT_PREFIX/micromamba shell hook -s posix)"
|
||||||
|
|
||||||
|
# create the installer env
|
||||||
|
if [ ! -e "$INSTALL_ENV_DIR" ]; then
|
||||||
|
micromamba create -y --prefix "$INSTALL_ENV_DIR" -f "$INSTALLER_YAML_FILE"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# activate
|
||||||
|
micromamba activate "$INSTALL_ENV_DIR"
|
21
installer/bootstrap/check-install-dir.bat
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
@echo off
|
||||||
|
|
||||||
|
if exist "%SD_BASE_DIR%\env" exit /b
|
||||||
|
|
||||||
|
set suggested_dir=%~d0\stable-diffusion-ui
|
||||||
|
|
||||||
|
echo "Please install Stable Diffusion UI at the root of your drive. This avoids problems with path length limits in Windows." & echo.
|
||||||
|
set /p answer="Press Enter to install at %suggested_dir%, or type 'c' (without quotes) to install at the current location (press enter or type 'c'): "
|
||||||
|
|
||||||
|
if /i "%answer:~,1%" NEQ "c" (
|
||||||
|
if exist "%suggested_dir%" (
|
||||||
|
echo. & echo "Sorry, %suggested_dir% already exists! Cannot overwrite that folder!" & echo.
|
||||||
|
pause
|
||||||
|
exit
|
||||||
|
)
|
||||||
|
|
||||||
|
xcopy "%SD_BASE_DIR%" "%suggested_dir%" /s /i /Y /Q
|
||||||
|
echo Please run the %START_CMD_FILENAME% file inside %suggested_dir% . Do not use this folder anymore > "%SD_BASE_DIR%/READ_ME - DO_NOT_USE_THIS_FOLDER.txt"
|
||||||
|
|
||||||
|
cd %suggested_dir%
|
||||||
|
)
|
78
installer/developer/enable_dev_mode.py
Normal file
@ -0,0 +1,78 @@
|
|||||||
|
import argparse
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import platform
|
||||||
|
import shutil
|
||||||
|
|
||||||
|
config_path = os.path.join('config.json')
|
||||||
|
|
||||||
|
if not os.path.exists('LICENSE'):
|
||||||
|
print('Error: This script needs to be run from the root of the stable-diffusion-ui folder! Please cd to the correct folder, and run this again.')
|
||||||
|
exit(1)
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--symlink_dir", type=str, default=None, help="the absolute path to the project git repository (to link to)"
|
||||||
|
)
|
||||||
|
opt = parser.parse_args()
|
||||||
|
|
||||||
|
def run(cmd):
|
||||||
|
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
|
||||||
|
|
||||||
|
for c in iter(lambda: p.stdout.read(1), b""):
|
||||||
|
sys.stdout.buffer.write(c)
|
||||||
|
sys.stdout.flush()
|
||||||
|
|
||||||
|
p.wait()
|
||||||
|
|
||||||
|
return p.returncode == 0
|
||||||
|
|
||||||
|
def get_config():
|
||||||
|
if not os.path.exists(config_path):
|
||||||
|
return {}
|
||||||
|
|
||||||
|
with open(config_path, "r") as f:
|
||||||
|
return json.load(f)
|
||||||
|
|
||||||
|
def save_config(config):
|
||||||
|
with open(config_path, "w") as f:
|
||||||
|
json.dump(config, f)
|
||||||
|
|
||||||
|
# set the `is_developer_mode` flag to `true` in the config
|
||||||
|
config = get_config()
|
||||||
|
config['is_developer_mode'] = True
|
||||||
|
save_config(config)
|
||||||
|
|
||||||
|
print('set is_developer_mode=true in config.json')
|
||||||
|
|
||||||
|
# make the symlink, if requested
|
||||||
|
if opt.symlink_dir is not None:
|
||||||
|
if not os.path.exists(opt.symlink_dir):
|
||||||
|
print(f'Symlink directory "{opt.symlink_dir}" was not found! Are you sure it has been escaped correctly?')
|
||||||
|
exit(1)
|
||||||
|
|
||||||
|
installer_target_path = os.path.join(opt.symlink_dir, 'installer')
|
||||||
|
ui_target_path = os.path.join(opt.symlink_dir, 'ui')
|
||||||
|
engine_target_path = os.path.join(opt.symlink_dir, 'engine')
|
||||||
|
|
||||||
|
shutil.rmtree('installer', ignore_errors=True)
|
||||||
|
shutil.rmtree('ui', ignore_errors=True)
|
||||||
|
shutil.rmtree('engine', ignore_errors=True)
|
||||||
|
|
||||||
|
if not os.path.exists(ui_target_path) or not os.path.exists(installer_target_path) or not os.path.exists(engine_target_path):
|
||||||
|
print('The target symlink directory does not contain the required {ui, installer, engine} folders. Are you sure it is the correct git repo for the project?')
|
||||||
|
exit(1)
|
||||||
|
|
||||||
|
if platform.system() == 'Windows':
|
||||||
|
run(f'mklink /J "installer" "{installer_target_path}"')
|
||||||
|
run(f'mklink /J "ui" "{ui_target_path}"')
|
||||||
|
run(f'mklink /J "engine" "{engine_target_path}"')
|
||||||
|
elif platform.system() in ('Linux', 'Darwin'):
|
||||||
|
run(f'ln -s "{installer_target_path}" "installer"')
|
||||||
|
run(f'ln -s "{ui_target_path}" "ui"')
|
||||||
|
run(f'ln -s "{engine_target_path}" "engine"')
|
||||||
|
|
||||||
|
print(f'Created symlinks! Your installation will now automatically use the files present in the repository at {opt.symlink_dir}')
|
0
installer/installer/__init__.py
Normal file
70
installer/installer/app.py
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
import os
|
||||||
|
import json
|
||||||
|
import platform
|
||||||
|
|
||||||
|
# config
|
||||||
|
PROJECT_REPO_URL = 'https://github.com/cmdr2/stable-diffusion-ui.git'
|
||||||
|
DEFAULT_PROJECT_BRANCH = 'installer_new'
|
||||||
|
PROJECT_REPO_DIR_NAME = 'project_repo'
|
||||||
|
|
||||||
|
STABLE_DIFFUSION_REPO_URL = 'https://github.com/basujindal/stable-diffusion.git'
|
||||||
|
DEFAULT_STABLE_DIFFUSION_COMMIT = 'f6cfebffa752ee11a7b07497b8529d5971de916c'
|
||||||
|
STABLE_DIFFUSION_REPO_DIR_NAME = 'stable-diffusion'
|
||||||
|
|
||||||
|
PROJECT_ENV_DIR_NAME = 'project_env'
|
||||||
|
|
||||||
|
START_CMD_FILE_NAME = "Start Stable Diffusion UI.cmd" if platform.system() == "Windows" else "start.sh"
|
||||||
|
DEV_CONSOLE_CMD_FILE_NAME = "Developer Console.cmd" if platform.system() == "Windows" else "developer_console.sh"
|
||||||
|
CONFIG_FILE_NAME = 'config.json'
|
||||||
|
|
||||||
|
# top-level folders
|
||||||
|
ENV_DIR_NAME = 'env'
|
||||||
|
MODELS_DIR_NAME = 'models'
|
||||||
|
|
||||||
|
INSTALLER_DIR_NAME = 'installer'
|
||||||
|
UI_DIR_NAME = 'ui'
|
||||||
|
ENGINE_DIR_NAME = 'engine'
|
||||||
|
|
||||||
|
|
||||||
|
# env
|
||||||
|
SD_BASE_DIR = os.environ['SD_BASE_DIR']
|
||||||
|
|
||||||
|
|
||||||
|
# model folders
|
||||||
|
STABLE_DIFFUSION_MODELS_DIR_NAME = "stable-diffusion"
|
||||||
|
GFPGAN_MODELS_DIR_NAME = "gfpgan"
|
||||||
|
RealESRGAN_MODELS_DIR_NAME = "realesrgan"
|
||||||
|
|
||||||
|
# create references to dirs
|
||||||
|
env_dir_path = os.path.join(SD_BASE_DIR, ENV_DIR_NAME)
|
||||||
|
|
||||||
|
installer_dir_path = os.path.join(SD_BASE_DIR, INSTALLER_DIR_NAME)
|
||||||
|
ui_dir_path = os.path.join(SD_BASE_DIR, UI_DIR_NAME)
|
||||||
|
engine_dir_path = os.path.join(SD_BASE_DIR, ENGINE_DIR_NAME)
|
||||||
|
|
||||||
|
project_repo_dir_path = os.path.join(env_dir_path, PROJECT_REPO_DIR_NAME)
|
||||||
|
stable_diffusion_repo_dir_path = os.path.join(env_dir_path, STABLE_DIFFUSION_REPO_DIR_NAME)
|
||||||
|
|
||||||
|
project_env_dir_path = os.path.join(env_dir_path, PROJECT_ENV_DIR_NAME)
|
||||||
|
|
||||||
|
patches_dir_path = os.path.join(installer_dir_path, 'patches')
|
||||||
|
|
||||||
|
models_dir_path = os.path.join(SD_BASE_DIR, MODELS_DIR_NAME)
|
||||||
|
stable_diffusion_models_dir_path = os.path.join(models_dir_path, STABLE_DIFFUSION_MODELS_DIR_NAME)
|
||||||
|
gfpgan_models_dir_path = os.path.join(models_dir_path, GFPGAN_MODELS_DIR_NAME)
|
||||||
|
realesrgan_models_dir_path = os.path.join(models_dir_path, RealESRGAN_MODELS_DIR_NAME)
|
||||||
|
|
||||||
|
|
||||||
|
# useful functions
|
||||||
|
def get_config():
|
||||||
|
config_path = os.path.join(SD_BASE_DIR, CONFIG_FILE_NAME)
|
||||||
|
if not os.path.exists(config_path):
|
||||||
|
return {}
|
||||||
|
|
||||||
|
with open(config_path, "r") as f:
|
||||||
|
return json.load(f)
|
||||||
|
|
||||||
|
|
||||||
|
# app context
|
||||||
|
config = get_config()
|
||||||
|
activated_env_dir_path = None
|
18
installer/installer/check_modules.py
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
'''
|
||||||
|
This script is run by the `installer.helpers.modules_exist_in_env()` function
|
||||||
|
'''
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import pkgutil
|
||||||
|
|
||||||
|
modules = sys.argv[1:]
|
||||||
|
missing_modules = []
|
||||||
|
for m in modules:
|
||||||
|
if pkgutil.find_loader(m) is None:
|
||||||
|
missing_modules.append(m)
|
||||||
|
|
||||||
|
if len(missing_modules) == 0:
|
||||||
|
print('42')
|
||||||
|
exit()
|
||||||
|
|
||||||
|
print('Missing modules', missing_modules)
|
80
installer/installer/helpers.py
Normal file
@ -0,0 +1,80 @@
|
|||||||
|
import os
|
||||||
|
from os import path
|
||||||
|
import subprocess
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
from installer import app
|
||||||
|
|
||||||
|
def run(cmd, run_in_folder=None, env=None, get_output=False, log_the_cmd=False):
|
||||||
|
if app.activated_env_dir_path is not None and 'micromamba activate' not in cmd:
|
||||||
|
cmd = f'micromamba activate "{app.activated_env_dir_path}" && {cmd}'
|
||||||
|
|
||||||
|
if run_in_folder is not None:
|
||||||
|
cmd = f'cd "{run_in_folder}" && {cmd}'
|
||||||
|
|
||||||
|
if log_the_cmd:
|
||||||
|
log('running: ' + cmd)
|
||||||
|
|
||||||
|
if get_output:
|
||||||
|
p = subprocess.Popen(cmd, shell=True, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||||
|
else:
|
||||||
|
p = subprocess.Popen(cmd, shell=True, env=env)
|
||||||
|
|
||||||
|
out, err = p.communicate()
|
||||||
|
|
||||||
|
out = out.decode('utf-8') if isinstance(out, bytes) else out
|
||||||
|
err = err.decode('utf-8') if isinstance(out, bytes) else err
|
||||||
|
|
||||||
|
if get_output:
|
||||||
|
return out, err
|
||||||
|
|
||||||
|
def log(msg):
|
||||||
|
print(msg)
|
||||||
|
|
||||||
|
def modules_exist_in_env(modules, env_dir_path=app.project_env_dir_path):
|
||||||
|
if not path.exists(env_dir_path):
|
||||||
|
return False
|
||||||
|
|
||||||
|
check_modules_script_path = path.join(app.installer_dir_path, 'installer', 'check_modules.py')
|
||||||
|
module_args = ' '.join(modules)
|
||||||
|
check_modules_cmd = f'python "{check_modules_script_path}" {module_args}'
|
||||||
|
|
||||||
|
env = os.environ.copy()
|
||||||
|
env['PYTHONPATH'] = app.stable_diffusion_repo_dir_path + ';' + os.path.join(app.project_env_dir_path, 'lib', 'site-packages')
|
||||||
|
|
||||||
|
if app.activated_env_dir_path != env_dir_path:
|
||||||
|
activate_cmd = f'micromamba activate "{env_dir_path}"'
|
||||||
|
check_modules_cmd = f'{activate_cmd} && {check_modules_cmd}'
|
||||||
|
|
||||||
|
# activate and run the modules checker
|
||||||
|
output, _ = run(check_modules_cmd, get_output=True, env=env)
|
||||||
|
if 'Missing' in output:
|
||||||
|
log(output)
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
def fail_with_install_error(error_msg):
|
||||||
|
try:
|
||||||
|
log(traceback.format_stack())
|
||||||
|
log(f'''
|
||||||
|
|
||||||
|
Error: {error_msg}. Sorry about that, please try to:
|
||||||
|
1. Run this installer again.
|
||||||
|
2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/blob/main/Troubleshooting.md
|
||||||
|
3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB
|
||||||
|
4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues
|
||||||
|
Thanks!''')
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
exit(1)
|
||||||
|
|
||||||
|
def apply_git_patches(repo_dir_path, patch_file_names):
|
||||||
|
is_developer_mode = app.config.get('is_developer_mode', False)
|
||||||
|
if is_developer_mode:
|
||||||
|
return
|
||||||
|
|
||||||
|
for patch_file_name in patch_file_names:
|
||||||
|
patch_file_path = path.join(app.patches_dir_path, patch_file_name)
|
||||||
|
run(f"git apply {patch_file_path}", run_in_folder=repo_dir_path)
|
34
installer/installer/main.py
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
import os
|
||||||
|
import sys
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
|
||||||
|
|
||||||
|
from installer import helpers
|
||||||
|
from installer.tasks import (
|
||||||
|
fetch_project_repo,
|
||||||
|
apply_project_update,
|
||||||
|
fetch_stable_diffusion_repo,
|
||||||
|
install_stable_diffusion_packages,
|
||||||
|
install_ui_packages,
|
||||||
|
download_weights,
|
||||||
|
start_ui_server,
|
||||||
|
)
|
||||||
|
|
||||||
|
tasks = [
|
||||||
|
fetch_project_repo,
|
||||||
|
apply_project_update,
|
||||||
|
fetch_stable_diffusion_repo,
|
||||||
|
install_stable_diffusion_packages,
|
||||||
|
install_ui_packages,
|
||||||
|
download_weights,
|
||||||
|
start_ui_server,
|
||||||
|
]
|
||||||
|
|
||||||
|
helpers.log(f'Starting Stable Diffusion UI at {datetime.now().strftime("%d/%m/%Y %H:%M:%S")}')
|
||||||
|
|
||||||
|
def run_tasks():
|
||||||
|
for task in tasks:
|
||||||
|
task.run()
|
||||||
|
|
||||||
|
run_tasks()
|
8
installer/installer/start.bat
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
@echo off
|
||||||
|
rem Never edit this file. If you really, really have to, beware that a script doesn't like
|
||||||
|
rem being overwritten while it is running (the auto-updater will do that).
|
||||||
|
rem The trick is to update this file while another script is running, and vice versa.
|
||||||
|
|
||||||
|
call python %SD_BASE_DIR%\installer\installer\main.py
|
||||||
|
|
||||||
|
pause
|
9
installer/installer/start.sh
Executable file
@ -0,0 +1,9 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Never edit this file. If you really, really have to, beware that a script doesn't like
|
||||||
|
# being overwritten while it is running (the auto-updater will do that).
|
||||||
|
# The trick is to update this file while another script is running, and vice versa.
|
||||||
|
|
||||||
|
python $SD_BASE_DIR/installer/installer/main.py
|
||||||
|
|
||||||
|
read -p "Press enter to continue"
|
0
installer/installer/tasks/__init__.py
Normal file
30
installer/installer/tasks/apply_project_update.py
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
from os import path
|
||||||
|
import shutil
|
||||||
|
|
||||||
|
from installer import app
|
||||||
|
|
||||||
|
def run():
|
||||||
|
is_developer_mode = app.config.get('is_developer_mode', False)
|
||||||
|
if is_developer_mode:
|
||||||
|
return
|
||||||
|
|
||||||
|
installer_src_path = path.join(app.project_repo_dir_path, 'installer')
|
||||||
|
ui_src_path = path.join(app.project_repo_dir_path, 'ui')
|
||||||
|
engine_src_path = path.join(app.project_repo_dir_path, 'engine')
|
||||||
|
|
||||||
|
start_cmd_src_path = path.join(app.project_repo_dir_path, app.START_CMD_FILE_NAME)
|
||||||
|
start_cmd_dst_path = path.join(app.SD_BASE_DIR, app.START_CMD_FILE_NAME)
|
||||||
|
|
||||||
|
dev_console_cmd_src_path = path.join(app.project_repo_dir_path, app.DEV_CONSOLE_CMD_FILE_NAME)
|
||||||
|
dev_console_cmd_dst_path = path.join(app.SD_BASE_DIR, app.DEV_CONSOLE_CMD_FILE_NAME)
|
||||||
|
|
||||||
|
shutil.rmtree(app.installer_dir_path, ignore_errors=True)
|
||||||
|
shutil.rmtree(app.ui_dir_path, ignore_errors=True)
|
||||||
|
shutil.rmtree(app.engine_dir_path, ignore_errors=True)
|
||||||
|
|
||||||
|
shutil.copytree(installer_src_path, app.installer_dir_path, dirs_exist_ok=True)
|
||||||
|
shutil.copytree(ui_src_path, app.ui_dir_path, dirs_exist_ok=True)
|
||||||
|
shutil.copytree(engine_src_path, app.engine_dir_path, dirs_exist_ok=True)
|
||||||
|
|
||||||
|
shutil.copy(start_cmd_src_path, start_cmd_dst_path)
|
||||||
|
shutil.copy(dev_console_cmd_src_path, dev_console_cmd_dst_path)
|
46
installer/installer/tasks/download_weights.py
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
import os
|
||||||
|
|
||||||
|
from installer import app, helpers
|
||||||
|
|
||||||
|
def run():
|
||||||
|
fetch_model('Stable Diffusion', 'sd-v1-4.ckpt', model_dir_path=app.stable_diffusion_models_dir_path, download_url='https://me.cmdr2.org/stable-diffusion-ui/sd-v1-4.ckpt', expected_file_sizes=[4265380512, 7703807346, 7703810927])
|
||||||
|
fetch_model('Face Correction (GFPGAN)', 'GFPGANv1.4.pth', model_dir_path=app.gfpgan_models_dir_path, download_url='https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/GFPGANv1.4.pth', expected_file_sizes=[348632874])
|
||||||
|
fetch_model('Resolution Upscale (RealESRGAN x4)', 'RealESRGAN_x4plus.pth', model_dir_path=app.realesrgan_models_dir_path, download_url='https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth', expected_file_sizes=[67040989])
|
||||||
|
fetch_model('Resolution Upscale (RealESRGAN x4_anime)', 'RealESRGAN_x4plus_anime_6B.pth', model_dir_path=app.realesrgan_models_dir_path, download_url='https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth', expected_file_sizes=[17938799])
|
||||||
|
|
||||||
|
def fetch_model(model_type, file_name, model_dir_path, download_url, expected_file_sizes):
|
||||||
|
os.makedirs(model_dir_path, exist_ok=True)
|
||||||
|
|
||||||
|
file_path = os.path.join(model_dir_path, file_name)
|
||||||
|
|
||||||
|
if model_exists(file_name, file_path, expected_file_sizes):
|
||||||
|
helpers.log(f'Data files (weights) necessary for {model_type} were already downloaded')
|
||||||
|
return
|
||||||
|
|
||||||
|
helpers.log(f'Downloading data files (weights) for {model_type}..')
|
||||||
|
|
||||||
|
helpers.run(f'curl -L -k "{download_url}" > "{file_path}"', log_the_cmd=True)
|
||||||
|
|
||||||
|
def model_exists(file_name, file_path, expected_file_sizes):
|
||||||
|
legacy_file_path = os.path.join(app.stable_diffusion_repo_dir_path, file_name)
|
||||||
|
|
||||||
|
file_exists = os.path.exists(file_path)
|
||||||
|
legacy_file_exists = os.path.exists(legacy_file_path)
|
||||||
|
|
||||||
|
if legacy_file_exists:
|
||||||
|
file_size = os.path.getsize(legacy_file_path)
|
||||||
|
if file_size in expected_file_sizes:
|
||||||
|
return True
|
||||||
|
|
||||||
|
helpers.log(f'{file_name} is invalid. Was only {file_size} bytes in size. Downloading again..')
|
||||||
|
os.remove(legacy_file_path)
|
||||||
|
|
||||||
|
if file_exists:
|
||||||
|
file_size = os.path.getsize(file_path)
|
||||||
|
if file_size in expected_file_sizes:
|
||||||
|
return True
|
||||||
|
|
||||||
|
helpers.log(f'{file_name} is invalid. Was only {file_size} bytes in size. Downloading again..')
|
||||||
|
os.remove(file_path)
|
||||||
|
|
||||||
|
return False
|
27
installer/installer/tasks/fetch_project_repo.py
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
from os import path
|
||||||
|
|
||||||
|
from installer import app, helpers
|
||||||
|
|
||||||
|
project_repo_git_path = path.join(app.project_repo_dir_path, '.git')
|
||||||
|
|
||||||
|
def run():
|
||||||
|
branch_name = app.config.get('update_branch', app.DEFAULT_PROJECT_BRANCH)
|
||||||
|
|
||||||
|
if path.exists(project_repo_git_path):
|
||||||
|
helpers.log(f"Stable Diffusion UI's git repository was already installed. Updating from {branch_name}..")
|
||||||
|
|
||||||
|
helpers.run("git reset --hard", run_in_folder=app.project_repo_dir_path)
|
||||||
|
helpers.run(f'git -c advice.detachedHead=false checkout "{branch_name}"', run_in_folder=app.project_repo_dir_path)
|
||||||
|
helpers.run("git pull", run_in_folder=app.project_repo_dir_path)
|
||||||
|
else:
|
||||||
|
helpers.log("\nDownloading Stable Diffusion UI..\n")
|
||||||
|
helpers.log(f"Using the {branch_name} channel\n")
|
||||||
|
|
||||||
|
helpers.run(f'git clone {app.PROJECT_REPO_URL} "{app.project_repo_dir_path}"')
|
||||||
|
|
||||||
|
if path.exists(project_repo_git_path):
|
||||||
|
helpers.log("Downloaded Stable Diffusion UI")
|
||||||
|
else:
|
||||||
|
helpers.fail_with_install_error(error_msg="Could not download Stable Diffusion UI")
|
||||||
|
|
||||||
|
helpers.run(f'git -c advice.detachedHead=false checkout "{branch_name}"', run_in_folder=app.project_repo_dir_path)
|
37
installer/installer/tasks/fetch_stable_diffusion_repo.py
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
from os import path
|
||||||
|
|
||||||
|
from installer import app, helpers
|
||||||
|
|
||||||
|
stable_diffusion_repo_git_path = path.join(app.stable_diffusion_repo_dir_path, '.git')
|
||||||
|
|
||||||
|
is_developer_mode = app.config.get('is_developer_mode', False)
|
||||||
|
|
||||||
|
def run():
|
||||||
|
fetch_repo()
|
||||||
|
|
||||||
|
helpers.apply_git_patches(app.stable_diffusion_repo_dir_path, patch_file_names=(
|
||||||
|
"sd_custom.patch",
|
||||||
|
))
|
||||||
|
|
||||||
|
def fetch_repo():
|
||||||
|
commit_id = app.config.get('stable_diffusion_commit', app.DEFAULT_STABLE_DIFFUSION_COMMIT)
|
||||||
|
|
||||||
|
if path.exists(stable_diffusion_repo_git_path):
|
||||||
|
helpers.log(f"Stable Diffusion's git repository was already installed. Using commit: {commit_id}..")
|
||||||
|
|
||||||
|
if not is_developer_mode:
|
||||||
|
helpers.run("git reset --hard", run_in_folder=app.stable_diffusion_repo_dir_path)
|
||||||
|
helpers.run("git fetch origin", run_in_folder=app.stable_diffusion_repo_dir_path)
|
||||||
|
helpers.run(f'git -c advice.detachedHead=false checkout "{commit_id}"', run_in_folder=app.stable_diffusion_repo_dir_path)
|
||||||
|
else:
|
||||||
|
helpers.log("\nDownloading Stable Diffusion..\n")
|
||||||
|
helpers.log(f"Using commit: {commit_id}\n")
|
||||||
|
|
||||||
|
helpers.run(f'git clone {app.STABLE_DIFFUSION_REPO_URL} "{app.stable_diffusion_repo_dir_path}"')
|
||||||
|
|
||||||
|
if path.exists(stable_diffusion_repo_git_path):
|
||||||
|
helpers.log("Downloaded Stable Diffusion")
|
||||||
|
else:
|
||||||
|
helpers.fail_with_install_error(error_msg="Could not download Stable Diffusion")
|
||||||
|
|
||||||
|
helpers.run(f'git -c advice.detachedHead=false checkout "{commit_id}"', run_in_folder=app.stable_diffusion_repo_dir_path)
|
@ -0,0 +1,59 @@
|
|||||||
|
import os
|
||||||
|
import platform
|
||||||
|
import shutil
|
||||||
|
|
||||||
|
from installer import app, helpers
|
||||||
|
|
||||||
|
def run():
|
||||||
|
environment_file_path = get_environment_file_path()
|
||||||
|
local_env_file_path = os.path.join(app.stable_diffusion_repo_dir_path, 'environment.yaml')
|
||||||
|
|
||||||
|
shutil.copy(environment_file_path, local_env_file_path)
|
||||||
|
|
||||||
|
if is_valid_env():
|
||||||
|
helpers.log("Packages necessary for Stable Diffusion were already installed")
|
||||||
|
return
|
||||||
|
|
||||||
|
log_installing_header()
|
||||||
|
|
||||||
|
env = os.environ.copy()
|
||||||
|
env['PYTHONNOUSERSITE'] = '1'
|
||||||
|
|
||||||
|
if not os.path.exists(app.project_env_dir_path):
|
||||||
|
helpers.run(f'micromamba create --prefix {app.project_env_dir_path}', log_the_cmd=True)
|
||||||
|
|
||||||
|
helpers.run(f'micromamba install -y --prefix {app.project_env_dir_path} -f {local_env_file_path}', env=env, log_the_cmd=True, run_in_folder=app.stable_diffusion_repo_dir_path)
|
||||||
|
|
||||||
|
if is_valid_env():
|
||||||
|
helpers.log("Installed the packages necessary for Stable Diffusion")
|
||||||
|
|
||||||
|
app.activated_env_dir_path = app.project_env_dir_path # so that future `run()` invocations will run in the activated env
|
||||||
|
else:
|
||||||
|
helpers.fail_with_install_error(error_msg="Could not install the packages necessary for Stable Diffusion")
|
||||||
|
|
||||||
|
apply_patches()
|
||||||
|
|
||||||
|
def apply_patches():
|
||||||
|
gfpgan_repo_dir_path = os.path.join(app.stable_diffusion_repo_dir_path, 'src', 'gfpgan')
|
||||||
|
helpers.apply_git_patches(gfpgan_repo_dir_path, patch_file_names=(
|
||||||
|
"gfpgan_custom.patch",
|
||||||
|
))
|
||||||
|
|
||||||
|
def get_environment_file_path():
|
||||||
|
environment_file_name = 'sd-environment-win-linux-nvidia.yaml'
|
||||||
|
if platform.system() == 'Darwin':
|
||||||
|
environment_file_name = 'sd-environment-mac-nvidia.yaml'
|
||||||
|
|
||||||
|
return os.path.join(app.installer_dir_path, 'yaml', environment_file_name)
|
||||||
|
|
||||||
|
def log_installing_header():
|
||||||
|
helpers.log('''
|
||||||
|
|
||||||
|
Downloading packages necessary for Stable Diffusion..
|
||||||
|
|
||||||
|
***** !! This will take some time (depending on the speed of the Internet connection) and may appear to be stuck, but please be patient *****
|
||||||
|
|
||||||
|
''')
|
||||||
|
|
||||||
|
def is_valid_env():
|
||||||
|
return helpers.modules_exist_in_env(('torch', 'antlr4', 'transformers', 'numpy', 'gfpgan', 'realesrgan', 'basicsr'))
|
39
installer/installer/tasks/install_ui_packages.py
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
import platform
|
||||||
|
|
||||||
|
from installer import app, helpers
|
||||||
|
|
||||||
|
def run():
|
||||||
|
if is_valid_env():
|
||||||
|
helpers.log("Packages necessary for Stable Diffusion UI were already installed")
|
||||||
|
return
|
||||||
|
|
||||||
|
log_installing_header()
|
||||||
|
|
||||||
|
env = os.environ.copy()
|
||||||
|
env['PYTHONNOUSERSITE'] = '1'
|
||||||
|
|
||||||
|
helpers.run(f'micromamba install -y --prefix {app.project_env_dir_path} -c conda-forge uvicorn fastapi', env=env, log_the_cmd=True)
|
||||||
|
|
||||||
|
if is_valid_env():
|
||||||
|
helpers.log("Installed the packages necessary for Stable Diffusion UI")
|
||||||
|
else:
|
||||||
|
helpers.fail_with_install_error(error_msg="Could not install the packages necessary for Stable Diffusion UI")
|
||||||
|
|
||||||
|
def log_installing_header():
|
||||||
|
helpers.log('''
|
||||||
|
|
||||||
|
Downloading packages necessary for Stable Diffusion UI..
|
||||||
|
|
||||||
|
''')
|
||||||
|
|
||||||
|
def is_valid_env():
|
||||||
|
path = os.environ['PATH']
|
||||||
|
path += ';' + os.path.join(app.project_env_dir_path, 'Scripts' if platform.system() == 'Windows' else 'bin')
|
||||||
|
|
||||||
|
if shutil.which("uvicorn", path=path) is None:
|
||||||
|
helpers.log("uvicorn not found!")
|
||||||
|
return False
|
||||||
|
|
||||||
|
return helpers.modules_exist_in_env(('uvicorn', 'fastapi'))
|
23
installer/installer/tasks/start_ui_server.py
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
import os
|
||||||
|
import platform
|
||||||
|
|
||||||
|
from installer import app, helpers
|
||||||
|
|
||||||
|
def run():
|
||||||
|
helpers.log("\nStable Diffusion is ready!\n")
|
||||||
|
|
||||||
|
env = os.environ.copy()
|
||||||
|
env['SD_DIR'] = app.stable_diffusion_repo_dir_path
|
||||||
|
env['PYTHONPATH'] = app.stable_diffusion_repo_dir_path + ';' + os.path.join(app.project_env_dir_path, 'lib', 'site-packages')
|
||||||
|
env['SD_UI_PATH'] = app.ui_dir_path
|
||||||
|
env['PATH'] += ';' + os.path.join(app.project_env_dir_path, 'Scripts' if platform.system() == 'Windows' else 'bin')
|
||||||
|
|
||||||
|
helpers.log(f'PYTHONPATH={env["PYTHONPATH"]}')
|
||||||
|
helpers.run('python --version', log_the_cmd=True)
|
||||||
|
|
||||||
|
host = app.config.get('host', 'localhost')
|
||||||
|
port = app.config.get('port', '9000')
|
||||||
|
|
||||||
|
ui_server_cmd = f'uvicorn server:app --app-dir "{app.ui_dir_path}" --port {port} --host {host}'
|
||||||
|
|
||||||
|
helpers.run(ui_server_cmd, run_in_folder=app.stable_diffusion_repo_dir_path, log_the_cmd=True, env=env)
|
22
installer/patches/gfpgan_custom.patch
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
diff --git a/gfpgan/utils.py b/gfpgan/utils.py
|
||||||
|
index 74ee5a8..1357f48 100644
|
||||||
|
--- a/gfpgan/utils.py
|
||||||
|
+++ b/gfpgan/utils.py
|
||||||
|
@@ -117,14 +117,14 @@ class GFPGANer():
|
||||||
|
# face restoration
|
||||||
|
for cropped_face in self.face_helper.cropped_faces:
|
||||||
|
# prepare data
|
||||||
|
- cropped_face_t = img2tensor(cropped_face / 255., bgr2rgb=True, float32=True)
|
||||||
|
+ cropped_face_t = img2tensor(cropped_face / 255., bgr2rgb=False, float32=True)
|
||||||
|
normalize(cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)
|
||||||
|
cropped_face_t = cropped_face_t.unsqueeze(0).to(self.device)
|
||||||
|
|
||||||
|
try:
|
||||||
|
- output = self.gfpgan(cropped_face_t, return_rgb=False, weight=weight)[0]
|
||||||
|
+ output = self.gfpgan(cropped_face_t, return_rgb=True, weight=weight)[0]
|
||||||
|
# convert to image
|
||||||
|
- restored_face = tensor2img(output.squeeze(0), rgb2bgr=True, min_max=(-1, 1))
|
||||||
|
+ restored_face = tensor2img(output.squeeze(0), rgb2bgr=False, min_max=(-1, 1))
|
||||||
|
except RuntimeError as error:
|
||||||
|
print(f'\tFailed inference for GFPGAN: {error}.')
|
||||||
|
restored_face = cropped_face
|
@ -0,0 +1,171 @@
|
|||||||
|
{
|
||||||
|
"_name_or_path": "clip-vit-large-patch14/",
|
||||||
|
"architectures": [
|
||||||
|
"CLIPModel"
|
||||||
|
],
|
||||||
|
"initializer_factor": 1.0,
|
||||||
|
"logit_scale_init_value": 2.6592,
|
||||||
|
"model_type": "clip",
|
||||||
|
"projection_dim": 768,
|
||||||
|
"text_config": {
|
||||||
|
"_name_or_path": "",
|
||||||
|
"add_cross_attention": false,
|
||||||
|
"architectures": null,
|
||||||
|
"attention_dropout": 0.0,
|
||||||
|
"bad_words_ids": null,
|
||||||
|
"bos_token_id": 0,
|
||||||
|
"chunk_size_feed_forward": 0,
|
||||||
|
"cross_attention_hidden_size": null,
|
||||||
|
"decoder_start_token_id": null,
|
||||||
|
"diversity_penalty": 0.0,
|
||||||
|
"do_sample": false,
|
||||||
|
"dropout": 0.0,
|
||||||
|
"early_stopping": false,
|
||||||
|
"encoder_no_repeat_ngram_size": 0,
|
||||||
|
"eos_token_id": 2,
|
||||||
|
"finetuning_task": null,
|
||||||
|
"forced_bos_token_id": null,
|
||||||
|
"forced_eos_token_id": null,
|
||||||
|
"hidden_act": "quick_gelu",
|
||||||
|
"hidden_size": 768,
|
||||||
|
"id2label": {
|
||||||
|
"0": "LABEL_0",
|
||||||
|
"1": "LABEL_1"
|
||||||
|
},
|
||||||
|
"initializer_factor": 1.0,
|
||||||
|
"initializer_range": 0.02,
|
||||||
|
"intermediate_size": 3072,
|
||||||
|
"is_decoder": false,
|
||||||
|
"is_encoder_decoder": false,
|
||||||
|
"label2id": {
|
||||||
|
"LABEL_0": 0,
|
||||||
|
"LABEL_1": 1
|
||||||
|
},
|
||||||
|
"layer_norm_eps": 1e-05,
|
||||||
|
"length_penalty": 1.0,
|
||||||
|
"max_length": 20,
|
||||||
|
"max_position_embeddings": 77,
|
||||||
|
"min_length": 0,
|
||||||
|
"model_type": "clip_text_model",
|
||||||
|
"no_repeat_ngram_size": 0,
|
||||||
|
"num_attention_heads": 12,
|
||||||
|
"num_beam_groups": 1,
|
||||||
|
"num_beams": 1,
|
||||||
|
"num_hidden_layers": 12,
|
||||||
|
"num_return_sequences": 1,
|
||||||
|
"output_attentions": false,
|
||||||
|
"output_hidden_states": false,
|
||||||
|
"output_scores": false,
|
||||||
|
"pad_token_id": 1,
|
||||||
|
"prefix": null,
|
||||||
|
"problem_type": null,
|
||||||
|
"projection_dim" : 768,
|
||||||
|
"pruned_heads": {},
|
||||||
|
"remove_invalid_values": false,
|
||||||
|
"repetition_penalty": 1.0,
|
||||||
|
"return_dict": true,
|
||||||
|
"return_dict_in_generate": false,
|
||||||
|
"sep_token_id": null,
|
||||||
|
"task_specific_params": null,
|
||||||
|
"temperature": 1.0,
|
||||||
|
"tie_encoder_decoder": false,
|
||||||
|
"tie_word_embeddings": true,
|
||||||
|
"tokenizer_class": null,
|
||||||
|
"top_k": 50,
|
||||||
|
"top_p": 1.0,
|
||||||
|
"torch_dtype": null,
|
||||||
|
"torchscript": false,
|
||||||
|
"transformers_version": "4.16.0.dev0",
|
||||||
|
"use_bfloat16": false,
|
||||||
|
"vocab_size": 49408
|
||||||
|
},
|
||||||
|
"text_config_dict": {
|
||||||
|
"hidden_size": 768,
|
||||||
|
"intermediate_size": 3072,
|
||||||
|
"num_attention_heads": 12,
|
||||||
|
"num_hidden_layers": 12,
|
||||||
|
"projection_dim": 768
|
||||||
|
},
|
||||||
|
"torch_dtype": "float32",
|
||||||
|
"transformers_version": null,
|
||||||
|
"vision_config": {
|
||||||
|
"_name_or_path": "",
|
||||||
|
"add_cross_attention": false,
|
||||||
|
"architectures": null,
|
||||||
|
"attention_dropout": 0.0,
|
||||||
|
"bad_words_ids": null,
|
||||||
|
"bos_token_id": null,
|
||||||
|
"chunk_size_feed_forward": 0,
|
||||||
|
"cross_attention_hidden_size": null,
|
||||||
|
"decoder_start_token_id": null,
|
||||||
|
"diversity_penalty": 0.0,
|
||||||
|
"do_sample": false,
|
||||||
|
"dropout": 0.0,
|
||||||
|
"early_stopping": false,
|
||||||
|
"encoder_no_repeat_ngram_size": 0,
|
||||||
|
"eos_token_id": null,
|
||||||
|
"finetuning_task": null,
|
||||||
|
"forced_bos_token_id": null,
|
||||||
|
"forced_eos_token_id": null,
|
||||||
|
"hidden_act": "quick_gelu",
|
||||||
|
"hidden_size": 1024,
|
||||||
|
"id2label": {
|
||||||
|
"0": "LABEL_0",
|
||||||
|
"1": "LABEL_1"
|
||||||
|
},
|
||||||
|
"image_size": 224,
|
||||||
|
"initializer_factor": 1.0,
|
||||||
|
"initializer_range": 0.02,
|
||||||
|
"intermediate_size": 4096,
|
||||||
|
"is_decoder": false,
|
||||||
|
"is_encoder_decoder": false,
|
||||||
|
"label2id": {
|
||||||
|
"LABEL_0": 0,
|
||||||
|
"LABEL_1": 1
|
||||||
|
},
|
||||||
|
"layer_norm_eps": 1e-05,
|
||||||
|
"length_penalty": 1.0,
|
||||||
|
"max_length": 20,
|
||||||
|
"min_length": 0,
|
||||||
|
"model_type": "clip_vision_model",
|
||||||
|
"no_repeat_ngram_size": 0,
|
||||||
|
"num_attention_heads": 16,
|
||||||
|
"num_beam_groups": 1,
|
||||||
|
"num_beams": 1,
|
||||||
|
"num_hidden_layers": 24,
|
||||||
|
"num_return_sequences": 1,
|
||||||
|
"output_attentions": false,
|
||||||
|
"output_hidden_states": false,
|
||||||
|
"output_scores": false,
|
||||||
|
"pad_token_id": null,
|
||||||
|
"patch_size": 14,
|
||||||
|
"prefix": null,
|
||||||
|
"problem_type": null,
|
||||||
|
"projection_dim" : 768,
|
||||||
|
"pruned_heads": {},
|
||||||
|
"remove_invalid_values": false,
|
||||||
|
"repetition_penalty": 1.0,
|
||||||
|
"return_dict": true,
|
||||||
|
"return_dict_in_generate": false,
|
||||||
|
"sep_token_id": null,
|
||||||
|
"task_specific_params": null,
|
||||||
|
"temperature": 1.0,
|
||||||
|
"tie_encoder_decoder": false,
|
||||||
|
"tie_word_embeddings": true,
|
||||||
|
"tokenizer_class": null,
|
||||||
|
"top_k": 50,
|
||||||
|
"top_p": 1.0,
|
||||||
|
"torch_dtype": null,
|
||||||
|
"torchscript": false,
|
||||||
|
"transformers_version": "4.16.0.dev0",
|
||||||
|
"use_bfloat16": false
|
||||||
|
},
|
||||||
|
"vision_config_dict": {
|
||||||
|
"hidden_size": 1024,
|
||||||
|
"intermediate_size": 4096,
|
||||||
|
"num_attention_heads": 16,
|
||||||
|
"num_hidden_layers": 24,
|
||||||
|
"patch_size": 14,
|
||||||
|
"projection_dim": 768
|
||||||
|
}
|
||||||
|
}
|
332
installer/patches/sd_custom.patch
Normal file
@ -0,0 +1,332 @@
|
|||||||
|
diff --git a/optimizedSD/ddpm.py b/optimizedSD/ddpm.py
|
||||||
|
index b967b55..35ef520 100644
|
||||||
|
--- a/optimizedSD/ddpm.py
|
||||||
|
+++ b/optimizedSD/ddpm.py
|
||||||
|
@@ -22,7 +22,7 @@ from ldm.util import exists, default, instantiate_from_config
|
||||||
|
from ldm.modules.diffusionmodules.util import make_beta_schedule
|
||||||
|
from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like
|
||||||
|
from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
|
||||||
|
-from samplers import CompVisDenoiser, get_ancestral_step, to_d, append_dims,linear_multistep_coeff
|
||||||
|
+from .samplers import CompVisDenoiser, get_ancestral_step, to_d, append_dims,linear_multistep_coeff
|
||||||
|
|
||||||
|
def disabled_train(self):
|
||||||
|
"""Overwrite model.train with this function to make sure train/eval mode
|
||||||
|
@@ -506,6 +506,8 @@ class UNet(DDPM):
|
||||||
|
|
||||||
|
x_latent = noise if x0 is None else x0
|
||||||
|
# sampling
|
||||||
|
+ if sampler in ('ddim', 'dpm2', 'heun', 'dpm2_a', 'lms') and not hasattr(self, 'ddim_timesteps'):
|
||||||
|
+ self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=False)
|
||||||
|
|
||||||
|
if sampler == "plms":
|
||||||
|
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=False)
|
||||||
|
@@ -528,39 +530,46 @@ class UNet(DDPM):
|
||||||
|
elif sampler == "ddim":
|
||||||
|
samples = self.ddim_sampling(x_latent, conditioning, S, unconditional_guidance_scale=unconditional_guidance_scale,
|
||||||
|
unconditional_conditioning=unconditional_conditioning,
|
||||||
|
- mask = mask,init_latent=x_T,use_original_steps=False)
|
||||||
|
+ mask = mask,init_latent=x_T,use_original_steps=False,
|
||||||
|
+ callback=callback, img_callback=img_callback)
|
||||||
|
|
||||||
|
elif sampler == "euler":
|
||||||
|
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=False)
|
||||||
|
samples = self.euler_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
||||||
|
- unconditional_guidance_scale=unconditional_guidance_scale)
|
||||||
|
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
||||||
|
+ img_callback=img_callback)
|
||||||
|
elif sampler == "euler_a":
|
||||||
|
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=False)
|
||||||
|
samples = self.euler_ancestral_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
||||||
|
- unconditional_guidance_scale=unconditional_guidance_scale)
|
||||||
|
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
||||||
|
+ img_callback=img_callback)
|
||||||
|
|
||||||
|
elif sampler == "dpm2":
|
||||||
|
samples = self.dpm_2_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
||||||
|
- unconditional_guidance_scale=unconditional_guidance_scale)
|
||||||
|
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
||||||
|
+ img_callback=img_callback)
|
||||||
|
elif sampler == "heun":
|
||||||
|
samples = self.heun_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
||||||
|
- unconditional_guidance_scale=unconditional_guidance_scale)
|
||||||
|
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
||||||
|
+ img_callback=img_callback)
|
||||||
|
|
||||||
|
elif sampler == "dpm2_a":
|
||||||
|
samples = self.dpm_2_ancestral_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
||||||
|
- unconditional_guidance_scale=unconditional_guidance_scale)
|
||||||
|
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
||||||
|
+ img_callback=img_callback)
|
||||||
|
|
||||||
|
|
||||||
|
elif sampler == "lms":
|
||||||
|
samples = self.lms_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
||||||
|
- unconditional_guidance_scale=unconditional_guidance_scale)
|
||||||
|
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
||||||
|
+ img_callback=img_callback)
|
||||||
|
+
|
||||||
|
+ yield from samples
|
||||||
|
|
||||||
|
if(self.turbo):
|
||||||
|
self.model1.to("cpu")
|
||||||
|
self.model2.to("cpu")
|
||||||
|
|
||||||
|
- return samples
|
||||||
|
-
|
||||||
|
@torch.no_grad()
|
||||||
|
def plms_sampling(self, cond,b, img,
|
||||||
|
ddim_use_original_steps=False,
|
||||||
|
@@ -599,10 +608,10 @@ class UNet(DDPM):
|
||||||
|
old_eps.append(e_t)
|
||||||
|
if len(old_eps) >= 4:
|
||||||
|
old_eps.pop(0)
|
||||||
|
- if callback: callback(i)
|
||||||
|
- if img_callback: img_callback(pred_x0, i)
|
||||||
|
+ if callback: yield from callback(i)
|
||||||
|
+ if img_callback: yield from img_callback(pred_x0, i)
|
||||||
|
|
||||||
|
- return img
|
||||||
|
+ yield from img_callback(img, len(iterator)-1)
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
|
||||||
|
@@ -706,7 +715,8 @@ class UNet(DDPM):
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
def ddim_sampling(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,
|
||||||
|
- mask = None,init_latent=None,use_original_steps=False):
|
||||||
|
+ mask = None,init_latent=None,use_original_steps=False,
|
||||||
|
+ callback=None, img_callback=None):
|
||||||
|
|
||||||
|
timesteps = self.ddim_timesteps
|
||||||
|
timesteps = timesteps[:t_start]
|
||||||
|
@@ -730,10 +740,13 @@ class UNet(DDPM):
|
||||||
|
unconditional_guidance_scale=unconditional_guidance_scale,
|
||||||
|
unconditional_conditioning=unconditional_conditioning)
|
||||||
|
|
||||||
|
+ if callback: yield from callback(i)
|
||||||
|
+ if img_callback: yield from img_callback(x_dec, i)
|
||||||
|
+
|
||||||
|
if mask is not None:
|
||||||
|
- return x0 * mask + (1. - mask) * x_dec
|
||||||
|
+ x_dec = x0 * mask + (1. - mask) * x_dec
|
||||||
|
|
||||||
|
- return x_dec
|
||||||
|
+ yield from img_callback(x_dec, len(iterator)-1)
|
||||||
|
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
@@ -779,13 +792,16 @@ class UNet(DDPM):
|
||||||
|
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
- def euler_sampling(self, ac, x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None,callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.):
|
||||||
|
+ def euler_sampling(self, ac, x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None,callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.,
|
||||||
|
+ img_callback=None):
|
||||||
|
"""Implements Algorithm 2 (Euler steps) from Karras et al. (2022)."""
|
||||||
|
extra_args = {} if extra_args is None else extra_args
|
||||||
|
cvd = CompVisDenoiser(ac)
|
||||||
|
sigmas = cvd.get_sigmas(S)
|
||||||
|
x = x*sigmas[0]
|
||||||
|
|
||||||
|
+ print(f"Running Euler Sampling with {len(sigmas) - 1} timesteps")
|
||||||
|
+
|
||||||
|
s_in = x.new_ones([x.shape[0]]).half()
|
||||||
|
for i in trange(len(sigmas) - 1, disable=disable):
|
||||||
|
gamma = min(s_churn / (len(sigmas) - 1), 2 ** 0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.
|
||||||
|
@@ -807,13 +823,18 @@ class UNet(DDPM):
|
||||||
|
d = to_d(x, sigma_hat, denoised)
|
||||||
|
if callback is not None:
|
||||||
|
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})
|
||||||
|
+
|
||||||
|
+ if img_callback: yield from img_callback(x, i)
|
||||||
|
+
|
||||||
|
dt = sigmas[i + 1] - sigma_hat
|
||||||
|
# Euler method
|
||||||
|
x = x + d * dt
|
||||||
|
- return x
|
||||||
|
+
|
||||||
|
+ yield from img_callback(x, len(sigmas)-1)
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
- def euler_ancestral_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None, callback=None, disable=None):
|
||||||
|
+ def euler_ancestral_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None, callback=None, disable=None,
|
||||||
|
+ img_callback=None):
|
||||||
|
"""Ancestral sampling with Euler method steps."""
|
||||||
|
extra_args = {} if extra_args is None else extra_args
|
||||||
|
|
||||||
|
@@ -822,6 +843,8 @@ class UNet(DDPM):
|
||||||
|
sigmas = cvd.get_sigmas(S)
|
||||||
|
x = x*sigmas[0]
|
||||||
|
|
||||||
|
+ print(f"Running Euler Ancestral Sampling with {len(sigmas) - 1} timesteps")
|
||||||
|
+
|
||||||
|
s_in = x.new_ones([x.shape[0]]).half()
|
||||||
|
for i in trange(len(sigmas) - 1, disable=disable):
|
||||||
|
|
||||||
|
@@ -837,17 +860,22 @@ class UNet(DDPM):
|
||||||
|
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1])
|
||||||
|
if callback is not None:
|
||||||
|
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
|
||||||
|
+
|
||||||
|
+ if img_callback: yield from img_callback(x, i)
|
||||||
|
+
|
||||||
|
d = to_d(x, sigmas[i], denoised)
|
||||||
|
# Euler method
|
||||||
|
dt = sigma_down - sigmas[i]
|
||||||
|
x = x + d * dt
|
||||||
|
x = x + torch.randn_like(x) * sigma_up
|
||||||
|
- return x
|
||||||
|
+
|
||||||
|
+ yield from img_callback(x, len(sigmas)-1)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
- def heun_sampling(self, ac, x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.):
|
||||||
|
+ def heun_sampling(self, ac, x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.,
|
||||||
|
+ img_callback=None):
|
||||||
|
"""Implements Algorithm 2 (Heun steps) from Karras et al. (2022)."""
|
||||||
|
extra_args = {} if extra_args is None else extra_args
|
||||||
|
|
||||||
|
@@ -855,6 +883,8 @@ class UNet(DDPM):
|
||||||
|
sigmas = cvd.get_sigmas(S)
|
||||||
|
x = x*sigmas[0]
|
||||||
|
|
||||||
|
+ print(f"Running Heun Sampling with {len(sigmas) - 1} timesteps")
|
||||||
|
+
|
||||||
|
|
||||||
|
s_in = x.new_ones([x.shape[0]]).half()
|
||||||
|
for i in trange(len(sigmas) - 1, disable=disable):
|
||||||
|
@@ -876,6 +906,9 @@ class UNet(DDPM):
|
||||||
|
d = to_d(x, sigma_hat, denoised)
|
||||||
|
if callback is not None:
|
||||||
|
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})
|
||||||
|
+
|
||||||
|
+ if img_callback: yield from img_callback(x, i)
|
||||||
|
+
|
||||||
|
dt = sigmas[i + 1] - sigma_hat
|
||||||
|
if sigmas[i + 1] == 0:
|
||||||
|
# Euler method
|
||||||
|
@@ -895,11 +928,13 @@ class UNet(DDPM):
|
||||||
|
d_2 = to_d(x_2, sigmas[i + 1], denoised_2)
|
||||||
|
d_prime = (d + d_2) / 2
|
||||||
|
x = x + d_prime * dt
|
||||||
|
- return x
|
||||||
|
+
|
||||||
|
+ yield from img_callback(x, len(sigmas)-1)
|
||||||
|
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
- def dpm_2_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.):
|
||||||
|
+ def dpm_2_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.,
|
||||||
|
+ img_callback=None):
|
||||||
|
"""A sampler inspired by DPM-Solver-2 and Algorithm 2 from Karras et al. (2022)."""
|
||||||
|
extra_args = {} if extra_args is None else extra_args
|
||||||
|
|
||||||
|
@@ -907,6 +942,8 @@ class UNet(DDPM):
|
||||||
|
sigmas = cvd.get_sigmas(S)
|
||||||
|
x = x*sigmas[0]
|
||||||
|
|
||||||
|
+ print(f"Running DPM2 Sampling with {len(sigmas) - 1} timesteps")
|
||||||
|
+
|
||||||
|
s_in = x.new_ones([x.shape[0]]).half()
|
||||||
|
for i in trange(len(sigmas) - 1, disable=disable):
|
||||||
|
gamma = min(s_churn / (len(sigmas) - 1), 2 ** 0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.
|
||||||
|
@@ -924,7 +961,7 @@ class UNet(DDPM):
|
||||||
|
e_t_uncond, e_t = (x_in + eps * c_out).chunk(2)
|
||||||
|
denoised = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
|
||||||
|
|
||||||
|
-
|
||||||
|
+ if img_callback: yield from img_callback(x, i)
|
||||||
|
|
||||||
|
d = to_d(x, sigma_hat, denoised)
|
||||||
|
# Midpoint method, where the midpoint is chosen according to a rho=3 Karras schedule
|
||||||
|
@@ -945,11 +982,13 @@ class UNet(DDPM):
|
||||||
|
|
||||||
|
d_2 = to_d(x_2, sigma_mid, denoised_2)
|
||||||
|
x = x + d_2 * dt_2
|
||||||
|
- return x
|
||||||
|
+
|
||||||
|
+ yield from img_callback(x, len(sigmas)-1)
|
||||||
|
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
- def dpm_2_ancestral_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None):
|
||||||
|
+ def dpm_2_ancestral_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None,
|
||||||
|
+ img_callback=None):
|
||||||
|
"""Ancestral sampling with DPM-Solver inspired second-order steps."""
|
||||||
|
extra_args = {} if extra_args is None else extra_args
|
||||||
|
|
||||||
|
@@ -957,6 +996,8 @@ class UNet(DDPM):
|
||||||
|
sigmas = cvd.get_sigmas(S)
|
||||||
|
x = x*sigmas[0]
|
||||||
|
|
||||||
|
+ print(f"Running DPM2 Ancestral Sampling with {len(sigmas) - 1} timesteps")
|
||||||
|
+
|
||||||
|
s_in = x.new_ones([x.shape[0]]).half()
|
||||||
|
for i in trange(len(sigmas) - 1, disable=disable):
|
||||||
|
|
||||||
|
@@ -973,6 +1014,9 @@ class UNet(DDPM):
|
||||||
|
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1])
|
||||||
|
if callback is not None:
|
||||||
|
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
|
||||||
|
+
|
||||||
|
+ if img_callback: yield from img_callback(x, i)
|
||||||
|
+
|
||||||
|
d = to_d(x, sigmas[i], denoised)
|
||||||
|
# Midpoint method, where the midpoint is chosen according to a rho=3 Karras schedule
|
||||||
|
sigma_mid = ((sigmas[i] ** (1 / 3) + sigma_down ** (1 / 3)) / 2) ** 3
|
||||||
|
@@ -993,11 +1037,13 @@ class UNet(DDPM):
|
||||||
|
d_2 = to_d(x_2, sigma_mid, denoised_2)
|
||||||
|
x = x + d_2 * dt_2
|
||||||
|
x = x + torch.randn_like(x) * sigma_up
|
||||||
|
- return x
|
||||||
|
+
|
||||||
|
+ yield from img_callback(x, len(sigmas)-1)
|
||||||
|
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
- def lms_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None, order=4):
|
||||||
|
+ def lms_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None, order=4,
|
||||||
|
+ img_callback=None):
|
||||||
|
extra_args = {} if extra_args is None else extra_args
|
||||||
|
s_in = x.new_ones([x.shape[0]])
|
||||||
|
|
||||||
|
@@ -1005,6 +1051,8 @@ class UNet(DDPM):
|
||||||
|
sigmas = cvd.get_sigmas(S)
|
||||||
|
x = x*sigmas[0]
|
||||||
|
|
||||||
|
+ print(f"Running LMS Sampling with {len(sigmas) - 1} timesteps")
|
||||||
|
+
|
||||||
|
ds = []
|
||||||
|
for i in trange(len(sigmas) - 1, disable=disable):
|
||||||
|
|
||||||
|
@@ -1017,6 +1065,7 @@ class UNet(DDPM):
|
||||||
|
e_t_uncond, e_t = (x_in + eps * c_out).chunk(2)
|
||||||
|
denoised = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
|
||||||
|
|
||||||
|
+ if img_callback: yield from img_callback(x, i)
|
||||||
|
|
||||||
|
d = to_d(x, sigmas[i], denoised)
|
||||||
|
ds.append(d)
|
||||||
|
@@ -1027,4 +1076,5 @@ class UNet(DDPM):
|
||||||
|
cur_order = min(i + 1, order)
|
||||||
|
coeffs = [linear_multistep_coeff(cur_order, sigmas.cpu(), i, j) for j in range(cur_order)]
|
||||||
|
x = x + sum(coeff * d for coeff, d in zip(coeffs, reversed(ds)))
|
||||||
|
- return x
|
||||||
|
+
|
||||||
|
+ yield from img_callback(x, len(sigmas)-1)
|
||||||
|
diff --git a/optimizedSD/openaimodelSplit.py b/optimizedSD/openaimodelSplit.py
|
||||||
|
index abc3098..7a32ffe 100644
|
||||||
|
--- a/optimizedSD/openaimodelSplit.py
|
||||||
|
+++ b/optimizedSD/openaimodelSplit.py
|
||||||
|
@@ -13,7 +13,7 @@ from ldm.modules.diffusionmodules.util import (
|
||||||
|
normalization,
|
||||||
|
timestep_embedding,
|
||||||
|
)
|
||||||
|
-from splitAttention import SpatialTransformer
|
||||||
|
+from .splitAttention import SpatialTransformer
|
||||||
|
|
||||||
|
|
||||||
|
class AttentionPool2d(nn.Module):
|
13
installer/patches/sd_env_yaml.patch
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
diff --git a/environment.yaml b/environment.yaml
|
||||||
|
index 7f25da8..306750f 100644
|
||||||
|
--- a/environment.yaml
|
||||||
|
+++ b/environment.yaml
|
||||||
|
@@ -23,6 +23,8 @@ dependencies:
|
||||||
|
- torch-fidelity==0.3.0
|
||||||
|
- transformers==4.19.2
|
||||||
|
- torchmetrics==0.6.0
|
||||||
|
+ - pywavelets==1.3.0
|
||||||
|
+ - pandas==1.4.4
|
||||||
|
- kornia==0.6
|
||||||
|
- -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
|
||||||
|
- -e git+https://github.com/openai/CLIP.git@main#egg=clip
|
@ -3,5 +3,5 @@ channels:
|
|||||||
- defaults
|
- defaults
|
||||||
- conda-forge
|
- conda-forge
|
||||||
dependencies:
|
dependencies:
|
||||||
- conda
|
|
||||||
- git
|
- git
|
||||||
|
- python=3.10.5
|
47
installer/yaml/sd-environment-mac-nvidia.yaml
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
name: ldm
|
||||||
|
channels:
|
||||||
|
- pytorch
|
||||||
|
- conda-forge
|
||||||
|
dependencies:
|
||||||
|
- python==3.10.5
|
||||||
|
- pip==22.2.2
|
||||||
|
|
||||||
|
- pytorch
|
||||||
|
- torchvision
|
||||||
|
|
||||||
|
- albumentations==1.2.1
|
||||||
|
- coloredlogs==15.0.1
|
||||||
|
- einops==0.4.1
|
||||||
|
- grpcio==1.46.4
|
||||||
|
- humanfriendly==10.0
|
||||||
|
- imageio==2.21.2
|
||||||
|
- imageio-ffmpeg==0.4.7
|
||||||
|
- imgaug==0.4.0
|
||||||
|
- kornia==0.6.7
|
||||||
|
- mpmath==1.2.1
|
||||||
|
- nomkl
|
||||||
|
- numpy==1.23.2
|
||||||
|
- omegaconf==2.1.1
|
||||||
|
- onnx==1.12.0
|
||||||
|
- onnxruntime==1.12.1
|
||||||
|
- pudb==2022.1
|
||||||
|
- pytorch-lightning==1.6.5
|
||||||
|
- scipy==1.9.1
|
||||||
|
- streamlit==1.12.2
|
||||||
|
- sympy==1.10.1
|
||||||
|
- tensorboard==2.9.0
|
||||||
|
- torchmetrics==0.9.3
|
||||||
|
- antlr4-python3-runtime=4.8
|
||||||
|
- pip:
|
||||||
|
- opencv-python==4.6.0.66
|
||||||
|
- realesrgan==0.2.5.0
|
||||||
|
- test-tube==0.7.5
|
||||||
|
- transformers==4.21.2
|
||||||
|
- torch-fidelity==0.3.0
|
||||||
|
- -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
|
||||||
|
- -e git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||||
|
- -e git+https://github.com/TencentARC/GFPGAN#egg=GFPGAN
|
||||||
|
- -e git+https://github.com/xinntao/Real-ESRGAN#egg=realesrgan
|
||||||
|
- -e .
|
||||||
|
variables:
|
||||||
|
PYTORCH_ENABLE_MPS_FALLBACK: 1
|
33
installer/yaml/sd-environment-win-linux-nvidia.yaml
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
name: ldm
|
||||||
|
channels:
|
||||||
|
- pytorch
|
||||||
|
- defaults
|
||||||
|
- conda-forge
|
||||||
|
dependencies:
|
||||||
|
- python=3.10.5
|
||||||
|
- pip=20.3
|
||||||
|
- cudatoolkit=11.3
|
||||||
|
- pytorch=1.11.0
|
||||||
|
- torchvision=0.12.0
|
||||||
|
- numpy=1.23.2
|
||||||
|
- antlr4-python3-runtime=4.8
|
||||||
|
- pip:
|
||||||
|
- albumentations==0.4.3
|
||||||
|
- opencv-python==4.6.0.66
|
||||||
|
- pudb==2019.2
|
||||||
|
- imageio==2.9.0
|
||||||
|
- imageio-ffmpeg==0.4.2
|
||||||
|
- pytorch-lightning==1.4.2
|
||||||
|
- omegaconf==2.1.1
|
||||||
|
- test-tube>=0.7.5
|
||||||
|
- streamlit>=0.73.1
|
||||||
|
- einops==0.3.0
|
||||||
|
- torch-fidelity==0.3.0
|
||||||
|
- transformers==4.19.2
|
||||||
|
- torchmetrics==0.6.0
|
||||||
|
- kornia==0.6
|
||||||
|
- -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
|
||||||
|
- -e git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||||
|
- -e git+https://github.com/TencentARC/GFPGAN#egg=GFPGAN
|
||||||
|
- -e git+https://github.com/xinntao/Real-ESRGAN#egg=realesrgan
|
||||||
|
- -e .
|
Before Width: | Height: | Size: 22 KiB |
Before Width: | Height: | Size: 29 KiB |
Before Width: | Height: | Size: 55 KiB |
BIN
media/config-v6.jpg
Normal file
After Width: | Height: | Size: 48 KiB |
BIN
media/ding.mp3
BIN
media/download buttons.xcf
Normal file
BIN
media/download-linux.png
Normal file
After Width: | Height: | Size: 14 KiB |
BIN
media/download-win.png
Normal file
After Width: | Height: | Size: 13 KiB |
BIN
media/modifiers-v1.jpg
Normal file
After Width: | Height: | Size: 83 KiB |
Before Width: | Height: | Size: 122 KiB |
Before Width: | Height: | Size: 67 KiB |
Before Width: | Height: | Size: 244 KiB |
BIN
media/shot-v9.jpg
Normal file
After Width: | Height: | Size: 199 KiB |
BIN
media/system-settings-v2.jpg
Normal file
After Width: | Height: | Size: 40 KiB |
@ -1 +0,0 @@
|
|||||||
installer\Scripts\activate.bat
|
|
@ -1,30 +0,0 @@
|
|||||||
@echo. & echo "Stable Diffusion UI" & echo.
|
|
||||||
|
|
||||||
@cd ..
|
|
||||||
|
|
||||||
@>nul grep -c "sd_ui_git_cloned" scripts\install_status.txt
|
|
||||||
@if "%ERRORLEVEL%" EQU "0" (
|
|
||||||
@echo "Stable Diffusion UI's git repository was already installed. Updating.."
|
|
||||||
|
|
||||||
@cd sd-ui-files
|
|
||||||
|
|
||||||
@call git reset --hard
|
|
||||||
@call git pull
|
|
||||||
|
|
||||||
@cd ..
|
|
||||||
) else (
|
|
||||||
@echo. & echo "Downloading Stable Diffusion UI.." & echo.
|
|
||||||
|
|
||||||
@call git clone https://github.com/cmdr2/stable-diffusion-ui.git sd-ui-files && (
|
|
||||||
@echo sd_ui_git_cloned >> scripts\install_status.txt
|
|
||||||
) || (
|
|
||||||
@echo "Error downloading Stable Diffusion UI. Please try re-running this installer. If it doesn't work, please copy the messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB or file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues"
|
|
||||||
pause
|
|
||||||
@exit /b
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
@xcopy sd-ui-files\ui ui /s /i /Y
|
|
||||||
@xcopy sd-ui-files\scripts scripts /s /i /Y
|
|
||||||
|
|
||||||
@call scripts\on_sd_start.bat
|
|
@ -1,28 +0,0 @@
|
|||||||
printf "\n\nStable Diffusion UI\n\n"
|
|
||||||
|
|
||||||
if [ -f "scripts/install_status.txt" ] && [ `grep -c sd_ui_git_cloned scripts/install_status.txt` -gt "0" ]; then
|
|
||||||
echo "Stable Diffusion UI's git repository was already installed. Updating.."
|
|
||||||
|
|
||||||
cd sd-ui-files
|
|
||||||
|
|
||||||
git reset --hard
|
|
||||||
git pull
|
|
||||||
|
|
||||||
cd ..
|
|
||||||
else
|
|
||||||
printf "\n\nDownloading Stable Diffusion UI..\n\n"
|
|
||||||
|
|
||||||
if git clone https://github.com/cmdr2/stable-diffusion-ui.git sd-ui-files ; then
|
|
||||||
echo sd_ui_git_cloned >> scripts/install_status.txt
|
|
||||||
else
|
|
||||||
printf "\n\nError downloading Stable Diffusion UI. Please try re-running this installer. If it doesn't work, please copy the messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB or file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues\n\n"
|
|
||||||
read -p "Press any key to continue"
|
|
||||||
exit
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
cp -Rf sd-ui-files/ui ui
|
|
||||||
cp -Rf sd-ui-files/scripts/* scripts/
|
|
||||||
cp "scripts/start.sh" .
|
|
||||||
|
|
||||||
./scripts/on_sd_start.sh
|
|
@ -1,104 +0,0 @@
|
|||||||
@set cmd_had_error=F
|
|
||||||
|
|
||||||
@>nul grep -c "sd_git_cloned" scripts\install_status.txt
|
|
||||||
@if "%ERRORLEVEL%" EQU "0" (
|
|
||||||
@echo "Stable Diffusion's git repository was already installed. Updating.."
|
|
||||||
|
|
||||||
@cd stable-diffusion
|
|
||||||
|
|
||||||
@call git reset --hard
|
|
||||||
@call git pull
|
|
||||||
|
|
||||||
@cd ..
|
|
||||||
) else (
|
|
||||||
@echo. & echo "Downloading Stable Diffusion.." & echo.
|
|
||||||
|
|
||||||
@call git clone https://github.com/basujindal/stable-diffusion.git && (
|
|
||||||
@echo sd_git_cloned >> scripts\install_status.txt
|
|
||||||
) || (
|
|
||||||
@set cmd_had_error=T
|
|
||||||
)
|
|
||||||
|
|
||||||
if "%ERRORLEVEL%" NEQ "0" (
|
|
||||||
@set cmd_had_error=T
|
|
||||||
)
|
|
||||||
|
|
||||||
if "%cmd_had_error%"=="T" (
|
|
||||||
@echo "Error downloading Stable Diffusion. Please try re-running this installer. If it doesn't work, please copy the messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB or file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues"
|
|
||||||
pause
|
|
||||||
@exit /b
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
@cd stable-diffusion
|
|
||||||
|
|
||||||
@>nul grep -c "conda_sd_env_created" ..\scripts\install_status.txt
|
|
||||||
@if "%ERRORLEVEL%" EQU "0" (
|
|
||||||
@echo "Packages necessary for Stable Diffusion were already installed"
|
|
||||||
) else (
|
|
||||||
@echo. & echo "Downloading packages necessary for Stable Diffusion.." & echo. & echo "***** This will take some time (depending on the speed of the Internet connection) and may appear to be stuck, but please be patient ***** .." & echo.
|
|
||||||
|
|
||||||
@rmdir /s /q .\env
|
|
||||||
|
|
||||||
@call conda env create --prefix env -f environment.yaml && (
|
|
||||||
@echo conda_sd_env_created >> ..\scripts\install_status.txt
|
|
||||||
) || (
|
|
||||||
@set cmd_had_error=T
|
|
||||||
)
|
|
||||||
|
|
||||||
if "%cmd_had_error%"=="T" (
|
|
||||||
echo "Error installing the packages necessary for Stable Diffusion. Please try re-running this installer. If it doesn't work, please copy the messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB or file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues"
|
|
||||||
pause
|
|
||||||
exit /b
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
@call conda activate .\env
|
|
||||||
|
|
||||||
@>nul grep -c "conda_sd_ui_deps_installed" ..\scripts\install_status.txt
|
|
||||||
@if "%ERRORLEVEL%" EQU "0" (
|
|
||||||
echo "Packages necessary for Stable Diffusion UI were already installed"
|
|
||||||
) else (
|
|
||||||
@echo. & echo "Downloading packages necessary for Stable Diffusion UI.." & echo.
|
|
||||||
|
|
||||||
@call conda install -c conda-forge -y --prefix env uvicorn fastapi && (
|
|
||||||
@echo conda_sd_ui_deps_installed >> ..\scripts\install_status.txt
|
|
||||||
) || (
|
|
||||||
@set cmd_had_error=T
|
|
||||||
)
|
|
||||||
|
|
||||||
if "%ERRORLEVEL%" NEQ "0" (
|
|
||||||
@set cmd_had_error=T
|
|
||||||
)
|
|
||||||
|
|
||||||
if "%cmd_had_error%"=="T" (
|
|
||||||
echo "Error installing the packages necessary for Stable Diffusion UI. Please try re-running this installer. If it doesn't work, please copy the messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB or file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues"
|
|
||||||
pause
|
|
||||||
exit /b
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
@if exist "sd-v1-4.ckpt" (
|
|
||||||
echo "Data files (weights) necessary for Stable Diffusion were already downloaded"
|
|
||||||
) else (
|
|
||||||
@echo. & echo "Downloading data files (weights) for Stable Diffusion.." & echo.
|
|
||||||
|
|
||||||
@call curl -L https://me.cmdr2.org/stable-diffusion-ui/sd-v1-4.ckpt > sd-v1-4.ckpt
|
|
||||||
|
|
||||||
@if not exist "sd-v1-4.ckpt" (
|
|
||||||
echo "Error downloading the data files (weights) for Stable Diffusion. Please try re-running this installer. If it doesn't work, please copy the messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB or file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues"
|
|
||||||
pause
|
|
||||||
exit /b
|
|
||||||
)
|
|
||||||
|
|
||||||
@echo sd_weights_downloaded >> ..\scripts\install_status.txt
|
|
||||||
@echo sd_install_complete >> ..\scripts\install_status.txt
|
|
||||||
)
|
|
||||||
|
|
||||||
@echo. & echo "Stable Diffusion is ready!" & echo.
|
|
||||||
|
|
||||||
@set SD_UI_PATH=%cd%\..\ui
|
|
||||||
|
|
||||||
@uvicorn server:app --app-dir "%SD_UI_PATH%" --port 9000 --host 0.0.0.0
|
|
||||||
|
|
||||||
@pause
|
|
@ -1,80 +0,0 @@
|
|||||||
source installer/etc/profile.d/conda.sh
|
|
||||||
|
|
||||||
if [ `grep -c sd_git_cloned scripts/install_status.txt` -gt "0" ]; then
|
|
||||||
echo "Stable Diffusion's git repository was already installed. Updating.."
|
|
||||||
|
|
||||||
cd stable-diffusion
|
|
||||||
|
|
||||||
git reset --hard
|
|
||||||
git pull
|
|
||||||
|
|
||||||
cd ..
|
|
||||||
else
|
|
||||||
printf "\n\nDownloading Stable Diffusion..\n\n"
|
|
||||||
|
|
||||||
if git clone https://github.com/basujindal/stable-diffusion.git ; then
|
|
||||||
echo sd_git_cloned >> scripts/install_status.txt
|
|
||||||
else
|
|
||||||
printf "\n\nError downloading Stable Diffusion. Please try re-running this installer. If it doesn't work, please copy the messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB or file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues\n\n"
|
|
||||||
read -p "Press any key to continue"
|
|
||||||
exit
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
cd stable-diffusion
|
|
||||||
|
|
||||||
if [ `grep -c conda_sd_env_created ../scripts/install_status.txt` -gt "0" ]; then
|
|
||||||
echo "Packages necessary for Stable Diffusion were already installed"
|
|
||||||
else
|
|
||||||
printf "\n\nDownloading packages necessary for Stable Diffusion..\n"
|
|
||||||
printf "\n\n***** This will take some time (depending on the speed of the Internet connection) and may appear to be stuck, but please be patient ***** ..\n\n"
|
|
||||||
|
|
||||||
if conda env create --prefix env --force -f environment.yaml ; then
|
|
||||||
echo conda_sd_env_created >> ../scripts/install_status.txt
|
|
||||||
else
|
|
||||||
printf "\n\nError installing the packages necessary for Stable Diffusion. Please try re-running this installer. If it doesn't work, please copy the messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB or file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues\n\n"
|
|
||||||
read -p "Press any key to continue"
|
|
||||||
exit
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
conda activate ./env
|
|
||||||
|
|
||||||
if [ `grep -c conda_sd_ui_deps_installed ../scripts/install_status.txt` -gt "0" ]; then
|
|
||||||
echo "Packages necessary for Stable Diffusion UI were already installed"
|
|
||||||
else
|
|
||||||
printf "\n\nDownloading packages necessary for Stable Diffusion UI..\n\n"
|
|
||||||
|
|
||||||
if conda install -c conda-forge --prefix ./env -y uvicorn fastapi ; then
|
|
||||||
echo conda_sd_ui_deps_installed >> ../scripts/install_status.txt
|
|
||||||
else
|
|
||||||
printf "\n\nError installing the packages necessary for Stable Diffusion UI. Please try re-running this installer. If it doesn't work, please copy the messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB or file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues\n\n"
|
|
||||||
read -p "Press any key to continue"
|
|
||||||
exit
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -f "sd-v1-4.ckpt" ]; then
|
|
||||||
echo "Data files (weights) necessary for Stable Diffusion were already downloaded"
|
|
||||||
else
|
|
||||||
echo "Downloading data files (weights) for Stable Diffusion.."
|
|
||||||
|
|
||||||
curl -L https://me.cmdr2.org/stable-diffusion-ui/sd-v1-4.ckpt > sd-v1-4.ckpt
|
|
||||||
|
|
||||||
if [ ! -f "sd-v1-4.ckpt" ]; then
|
|
||||||
printf "\n\nError downloading the data files (weights) for Stable Diffusion. Please try re-running this installer. If it doesn't work, please copy the messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB or file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues\n\n"
|
|
||||||
read -p "Press any key to continue"
|
|
||||||
exit
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo sd_weights_downloaded >> ../scripts/install_status.txt
|
|
||||||
echo sd_install_complete >> ../scripts/install_status.txt
|
|
||||||
fi
|
|
||||||
|
|
||||||
printf "\n\nStable Diffusion is ready!\n\n"
|
|
||||||
|
|
||||||
export SD_UI_PATH=`pwd`/../ui
|
|
||||||
|
|
||||||
uvicorn server:app --app-dir "$SD_UI_PATH" --port 9000 --host 0.0.0.0
|
|
||||||
|
|
||||||
read -p "Press any key to continue"
|
|
@ -1,6 +0,0 @@
|
|||||||
@call conda --version
|
|
||||||
@call git --version
|
|
||||||
|
|
||||||
cd %CONDA_PREFIX%\..\scripts
|
|
||||||
|
|
||||||
on_env_start.bat
|
|
@ -1,10 +0,0 @@
|
|||||||
conda-unpack
|
|
||||||
|
|
||||||
source $CONDA_PREFIX/etc/profile.d/conda.sh
|
|
||||||
|
|
||||||
conda --version
|
|
||||||
git --version
|
|
||||||
|
|
||||||
cd $CONDA_PREFIX/../scripts
|
|
||||||
|
|
||||||
./on_env_start.sh
|
|
@ -1,5 +0,0 @@
|
|||||||
source installer/bin/activate
|
|
||||||
|
|
||||||
conda-unpack
|
|
||||||
|
|
||||||
scripts/on_env_start.sh
|
|
@ -1,2 +0,0 @@
|
|||||||
Set-ItemProperty -Path 'HKLM:\SYSTEM\CurrentControlSet\Control\FileSystem' -Name LongPathsEnabled -Type DWord -Value 1
|
|
||||||
pause
|
|
18
start.sh
Executable file
@ -0,0 +1,18 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
echo "Stable Diffusion UI - v2.5"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
export SD_BASE_DIR=$(pwd)
|
||||||
|
|
||||||
|
echo "Working in $SD_BASE_DIR"
|
||||||
|
|
||||||
|
# Setup the packages required for the installer
|
||||||
|
installer/bootstrap/bootstrap.sh
|
||||||
|
|
||||||
|
# Test the bootstrap
|
||||||
|
git --version
|
||||||
|
python --version
|
||||||
|
|
||||||
|
# Download the rest of the installer and UI
|
||||||
|
installer/installer/start.sh
|
1043
ui/index.html
5
ui/media/drawingboard.min.css
vendored
Normal file
4
ui/media/drawingboard.min.js
vendored
Normal file
BIN
ui/media/favicon-16x16.png
Normal file
After Width: | Height: | Size: 466 B |
BIN
ui/media/favicon-32x32.png
Normal file
After Width: | Height: | Size: 973 B |
2
ui/media/jquery-3.6.1.min.js
vendored
Normal file
BIN
ui/media/kofi.png
Normal file
After Width: | Height: | Size: 11 KiB |
412
ui/media/main.css
Normal file
@ -0,0 +1,412 @@
|
|||||||
|
body {
|
||||||
|
font-family: Arial, Helvetica, sans-serif;
|
||||||
|
font-size: 11pt;
|
||||||
|
background-color: rgb(32, 33, 36);
|
||||||
|
color: #eee;
|
||||||
|
}
|
||||||
|
a {
|
||||||
|
color: rgb(0, 102, 204);
|
||||||
|
}
|
||||||
|
a:visited {
|
||||||
|
color: rgb(0, 102, 204);
|
||||||
|
}
|
||||||
|
label {
|
||||||
|
font-size: 10pt;
|
||||||
|
}
|
||||||
|
#prompt {
|
||||||
|
width: 100%;
|
||||||
|
height: 65pt;
|
||||||
|
box-sizing: border-box;
|
||||||
|
}
|
||||||
|
@media screen and (max-width: 600px) {
|
||||||
|
#prompt {
|
||||||
|
width: 95%;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
.image_preview_container {
|
||||||
|
/* display: none; */
|
||||||
|
margin-top: 10pt;
|
||||||
|
}
|
||||||
|
.image_clear_btn {
|
||||||
|
position: absolute;
|
||||||
|
transform: translateX(-50%) translateY(-35%);
|
||||||
|
background: black;
|
||||||
|
color: white;
|
||||||
|
border: 2pt solid #ccc;
|
||||||
|
padding: 0;
|
||||||
|
cursor: pointer;
|
||||||
|
outline: inherit;
|
||||||
|
border-radius: 8pt;
|
||||||
|
width: 16pt;
|
||||||
|
height: 16pt;
|
||||||
|
font-family: Verdana;
|
||||||
|
font-size: 8pt;
|
||||||
|
}
|
||||||
|
.settings-box ul {
|
||||||
|
font-size: 9pt;
|
||||||
|
margin-bottom: 5px;
|
||||||
|
padding-left: 10px;
|
||||||
|
list-style-type: none;
|
||||||
|
}
|
||||||
|
.settings-box li {
|
||||||
|
padding-bottom: 4pt;
|
||||||
|
}
|
||||||
|
.editor-slider {
|
||||||
|
vertical-align: middle;
|
||||||
|
}
|
||||||
|
.outputMsg {
|
||||||
|
font-size: small;
|
||||||
|
padding-bottom: 3pt;
|
||||||
|
}
|
||||||
|
#progressBar {
|
||||||
|
font-size: small;
|
||||||
|
}
|
||||||
|
#footer {
|
||||||
|
font-size: small;
|
||||||
|
padding-left: 10pt;
|
||||||
|
background: none;
|
||||||
|
}
|
||||||
|
#footer-legal {
|
||||||
|
font-size: 8pt;
|
||||||
|
}
|
||||||
|
.imgSeedLabel {
|
||||||
|
position: absolute;
|
||||||
|
transform: translateX(-100%);
|
||||||
|
margin-top: 5pt;
|
||||||
|
margin-left: -5pt;
|
||||||
|
font-size: 10pt;
|
||||||
|
|
||||||
|
background-color: #333;
|
||||||
|
opacity: 0.8;
|
||||||
|
color: #ddd;
|
||||||
|
border-radius: 3pt;
|
||||||
|
padding: 1pt 3pt;
|
||||||
|
}
|
||||||
|
.imgUseBtn {
|
||||||
|
position: absolute;
|
||||||
|
transform: translateX(-100%);
|
||||||
|
margin-top: 30pt;
|
||||||
|
margin-left: -5pt;
|
||||||
|
}
|
||||||
|
.imgSaveBtn {
|
||||||
|
position: absolute;
|
||||||
|
transform: translateX(-100%);
|
||||||
|
margin-top: 55pt;
|
||||||
|
margin-left: -5pt;
|
||||||
|
}
|
||||||
|
.imgItem {
|
||||||
|
display: inline;
|
||||||
|
padding-right: 10px;
|
||||||
|
}
|
||||||
|
.imgItemInfo {
|
||||||
|
opacity: 0.5;
|
||||||
|
}
|
||||||
|
|
||||||
|
#container {
|
||||||
|
width: 90%;
|
||||||
|
margin-left: auto;
|
||||||
|
margin-right: auto;
|
||||||
|
}
|
||||||
|
@media screen and (max-width: 1800px) {
|
||||||
|
#container {
|
||||||
|
width: 100%;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#logo small {
|
||||||
|
font-size: 11pt;
|
||||||
|
}
|
||||||
|
#editor {
|
||||||
|
padding: 5px;
|
||||||
|
}
|
||||||
|
#editor label {
|
||||||
|
font-weight: normal;
|
||||||
|
}
|
||||||
|
.settings-box label small {
|
||||||
|
color: rgb(153, 153, 153);
|
||||||
|
}
|
||||||
|
#preview {
|
||||||
|
padding: 5px;
|
||||||
|
}
|
||||||
|
#editor-inputs {
|
||||||
|
margin-bottom: 20px;
|
||||||
|
}
|
||||||
|
#editor-inputs-prompt {
|
||||||
|
flex: 1;
|
||||||
|
}
|
||||||
|
#editor-inputs .row {
|
||||||
|
padding-bottom: 10px;
|
||||||
|
}
|
||||||
|
#makeImage {
|
||||||
|
border-radius: 6px;
|
||||||
|
}
|
||||||
|
#editor-modifiers h5 {
|
||||||
|
padding: 5pt 0;
|
||||||
|
margin: 0;
|
||||||
|
}
|
||||||
|
#makeImage {
|
||||||
|
flex: 0 0 70px;
|
||||||
|
background: rgb(80, 0, 185);
|
||||||
|
border: 2px solid rgb(40, 0, 78);
|
||||||
|
color: rgb(255, 221, 255);
|
||||||
|
width: 100%;
|
||||||
|
height: 30pt;
|
||||||
|
}
|
||||||
|
#makeImage:hover {
|
||||||
|
background: rgb(93, 0, 214);
|
||||||
|
}
|
||||||
|
#stopImage {
|
||||||
|
flex: 0 0 70px;
|
||||||
|
background: rgb(132, 8, 0);
|
||||||
|
border: 2px solid rgb(122, 29, 0);
|
||||||
|
color: rgb(255, 221, 255);
|
||||||
|
width: 100%;
|
||||||
|
height: 30pt;
|
||||||
|
border-radius: 6px;
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
#stopImage:hover {
|
||||||
|
background: rgb(177, 27, 0);
|
||||||
|
}
|
||||||
|
.flex-container {
|
||||||
|
display: flex;
|
||||||
|
}
|
||||||
|
.col-50 {
|
||||||
|
flex: 50%;
|
||||||
|
}
|
||||||
|
.col-fixed-10 {
|
||||||
|
flex: 0 0 380pt;
|
||||||
|
}
|
||||||
|
.col-free {
|
||||||
|
flex: 1;
|
||||||
|
}
|
||||||
|
.collapsible {
|
||||||
|
cursor: pointer;
|
||||||
|
}
|
||||||
|
.collapsible-content {
|
||||||
|
display: none;
|
||||||
|
padding-left: 15px;
|
||||||
|
}
|
||||||
|
.collapsible-content h5 {
|
||||||
|
padding: 5pt 0pt;
|
||||||
|
margin: 0;
|
||||||
|
font-size: 10pt;
|
||||||
|
}
|
||||||
|
.collapsible-handle {
|
||||||
|
color: white;
|
||||||
|
padding-right: 5px;
|
||||||
|
}
|
||||||
|
.panel-box {
|
||||||
|
background: rgb(44, 45, 48);
|
||||||
|
border: 1px solid rgb(47, 49, 53);
|
||||||
|
border-radius: 7px;
|
||||||
|
padding: 5px;
|
||||||
|
margin-bottom: 15px;
|
||||||
|
box-shadow: 0 4px 8px 0 rgba(0, 0, 0, 0.15), 0 6px 20px 0 rgba(0, 0, 0, 0.15);
|
||||||
|
}
|
||||||
|
.panel-box h4 {
|
||||||
|
margin: 0;
|
||||||
|
padding: 2px 0;
|
||||||
|
}
|
||||||
|
#editor-modifiers .editor-modifiers-leaf {
|
||||||
|
padding-top: 10pt;
|
||||||
|
padding-bottom: 10pt;
|
||||||
|
}
|
||||||
|
#preview {
|
||||||
|
margin-left: 10pt;
|
||||||
|
}
|
||||||
|
img {
|
||||||
|
box-shadow: 0 4px 8px 0 rgba(0, 0, 0, 0.15), 0 6px 20px 0 rgba(0, 0, 0, 0.15);
|
||||||
|
}
|
||||||
|
.line-separator {
|
||||||
|
background: rgb(56, 56, 56);
|
||||||
|
height: 1pt;
|
||||||
|
margin: 15pt 0;
|
||||||
|
}
|
||||||
|
#editor-inputs-tags-container {
|
||||||
|
margin-top: 5pt;
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
#server-status {
|
||||||
|
display: inline;
|
||||||
|
float: right;
|
||||||
|
transform: translateY(-5pt);
|
||||||
|
}
|
||||||
|
#server-status-color {
|
||||||
|
/* width: 8pt;
|
||||||
|
height: 8pt;
|
||||||
|
border-radius: 4pt; */
|
||||||
|
font-size: 14pt;
|
||||||
|
color: rgb(128, 87, 0);
|
||||||
|
/* background-color: rgb(197, 1, 1); */
|
||||||
|
/* transform: translateY(15%); */
|
||||||
|
display: inline;
|
||||||
|
}
|
||||||
|
#server-status-msg {
|
||||||
|
color: rgb(128, 87, 0);
|
||||||
|
padding-left: 2pt;
|
||||||
|
font-size: 10pt;
|
||||||
|
}
|
||||||
|
.preview-prompt {
|
||||||
|
font-size: 16pt;
|
||||||
|
margin-bottom: 10pt;
|
||||||
|
}
|
||||||
|
#coffeeButton {
|
||||||
|
height: 23px;
|
||||||
|
transform: translateY(25%);
|
||||||
|
}
|
||||||
|
|
||||||
|
#inpaintingEditor {
|
||||||
|
width: 300pt;
|
||||||
|
height: 300pt;
|
||||||
|
margin-top: 5pt;
|
||||||
|
}
|
||||||
|
.drawing-board-canvas-wrapper {
|
||||||
|
background-size: 100% 100%;
|
||||||
|
}
|
||||||
|
.drawing-board-control > button {
|
||||||
|
background-color: #eee;
|
||||||
|
border-radius: 3pt;
|
||||||
|
}
|
||||||
|
.drawing-board-control-inner {
|
||||||
|
background-color: #eee;
|
||||||
|
border-radius: 3pt;
|
||||||
|
}
|
||||||
|
#inpaintingEditor canvas {
|
||||||
|
opacity: 0.6;
|
||||||
|
}
|
||||||
|
#enable_mask {
|
||||||
|
margin-top: 8pt;
|
||||||
|
}
|
||||||
|
|
||||||
|
#top-nav {
|
||||||
|
padding-top: 3pt;
|
||||||
|
padding-bottom: 15pt;
|
||||||
|
}
|
||||||
|
#top-nav .icon {
|
||||||
|
padding-right: 4pt;
|
||||||
|
font-size: 14pt;
|
||||||
|
transform: translateY(1pt);
|
||||||
|
}
|
||||||
|
#logo {
|
||||||
|
display: inline;
|
||||||
|
}
|
||||||
|
#logo h1 {
|
||||||
|
display: inline;
|
||||||
|
}
|
||||||
|
#top-nav-items {
|
||||||
|
list-style-type: none;
|
||||||
|
display: inline;
|
||||||
|
float: right;
|
||||||
|
}
|
||||||
|
#top-nav-items > li {
|
||||||
|
float: left;
|
||||||
|
display: inline;
|
||||||
|
padding-left: 20pt;
|
||||||
|
cursor: default;
|
||||||
|
}
|
||||||
|
#initial-text {
|
||||||
|
padding-top: 15pt;
|
||||||
|
padding-left: 4pt;
|
||||||
|
}
|
||||||
|
.settings-subheader {
|
||||||
|
font-size: 10pt;
|
||||||
|
font-weight: bold;
|
||||||
|
}
|
||||||
|
.pl-5 {
|
||||||
|
padding-left: 5pt;
|
||||||
|
}
|
||||||
|
#system-settings {
|
||||||
|
width: 360pt;
|
||||||
|
transform: translateX(-100%) translateX(70pt);
|
||||||
|
|
||||||
|
padding-top: 10pt;
|
||||||
|
padding-bottom: 10pt;
|
||||||
|
}
|
||||||
|
#system-settings ul {
|
||||||
|
margin: 0;
|
||||||
|
padding: 0;
|
||||||
|
}
|
||||||
|
#system-settings li {
|
||||||
|
padding-left: 5pt;
|
||||||
|
}
|
||||||
|
#community-links {
|
||||||
|
list-style-type: none;
|
||||||
|
margin: 0;
|
||||||
|
padding: 12pt;
|
||||||
|
padding-bottom: 0pt;
|
||||||
|
transform: translateX(-15%);
|
||||||
|
}
|
||||||
|
#community-links li {
|
||||||
|
padding-bottom: 12pt;
|
||||||
|
display: block;
|
||||||
|
font-size: 10pt;
|
||||||
|
}
|
||||||
|
#community-links li .fa-fw {
|
||||||
|
padding-right: 2pt;
|
||||||
|
}
|
||||||
|
#community-links li a {
|
||||||
|
color: white;
|
||||||
|
text-decoration: none;
|
||||||
|
}
|
||||||
|
.dropdown {
|
||||||
|
overflow: hidden;
|
||||||
|
}
|
||||||
|
.dropdown-content {
|
||||||
|
display: none;
|
||||||
|
position: absolute;
|
||||||
|
z-index: 2;
|
||||||
|
|
||||||
|
background: rgb(18, 18, 19);
|
||||||
|
border: 2px solid rgb(37, 38, 41);
|
||||||
|
border-radius: 7px;
|
||||||
|
padding: 5px;
|
||||||
|
margin-bottom: 15px;
|
||||||
|
box-shadow: 0 20px 28px 0 rgba(0, 0, 0, 0.15), 0 6px 20px 0 rgba(0, 0, 0, 0.15);
|
||||||
|
}
|
||||||
|
.dropdown:hover .dropdown-content {
|
||||||
|
display: block;
|
||||||
|
}
|
||||||
|
|
||||||
|
.imageTaskContainer {
|
||||||
|
border: 1px solid #333;
|
||||||
|
margin-bottom: 10pt;
|
||||||
|
padding: 5pt;
|
||||||
|
border-radius: 5pt;
|
||||||
|
box-shadow: 0 20px 28px 0 rgba(0, 0, 0, 0.15), 0 6px 20px 0 rgba(0, 0, 0, 0.15);
|
||||||
|
}
|
||||||
|
.taskStatusLabel {
|
||||||
|
float: left;
|
||||||
|
font-size: 8pt;
|
||||||
|
background:rgb(44, 45, 48);
|
||||||
|
border: 1px solid rgb(61, 62, 66);
|
||||||
|
padding: 2pt 4pt;
|
||||||
|
border-radius: 2pt;
|
||||||
|
margin-right: 5pt;
|
||||||
|
}
|
||||||
|
.activeTaskLabel {
|
||||||
|
background:rgb(0, 90, 30);
|
||||||
|
border: 1px solid rgb(0, 75, 19);
|
||||||
|
color:rgb(204, 255, 217)
|
||||||
|
}
|
||||||
|
.secondaryButton {
|
||||||
|
background: rgb(132, 8, 0);
|
||||||
|
border: 1px solid rgb(122, 29, 0);
|
||||||
|
color: rgb(255, 221, 255);
|
||||||
|
padding: 3pt 6pt;
|
||||||
|
border-radius: 5px;
|
||||||
|
}
|
||||||
|
.secondaryButton:hover {
|
||||||
|
background: rgb(177, 27, 0);
|
||||||
|
}
|
||||||
|
.stopTask {
|
||||||
|
float: right;
|
||||||
|
}
|
||||||
|
#preview-tools {
|
||||||
|
display: none;
|
||||||
|
padding: 4pt;
|
||||||
|
}
|
||||||
|
.taskConfig {
|
||||||
|
font-size: 10pt;
|
||||||
|
color: #aaa;
|
||||||
|
margin-bottom: 5pt;
|
||||||
|
}
|
1351
ui/media/main.js
Normal file
216
ui/media/modifier-thumbnails.css
Normal file
@ -0,0 +1,216 @@
|
|||||||
|
.modifier-card {
|
||||||
|
box-shadow: 0 4px 8px 0 rgba(0,0,0,0.2);
|
||||||
|
transition: 0.1s;
|
||||||
|
border-radius: 7px;
|
||||||
|
margin: 3pt 3pt;
|
||||||
|
float: left;
|
||||||
|
width: 8em;
|
||||||
|
height: 11.5em;
|
||||||
|
display: grid;
|
||||||
|
grid-template-columns: 1fr;
|
||||||
|
grid-template-rows: 8em 3.5em;
|
||||||
|
gap: 0px 0px;
|
||||||
|
grid-auto-flow: row;
|
||||||
|
grid-template-areas:
|
||||||
|
"modifier-card-image-container"
|
||||||
|
"modifier-card-container";
|
||||||
|
border: 2px solid rgba(255, 255, 255, .05);
|
||||||
|
cursor: pointer;
|
||||||
|
}
|
||||||
|
.modifier-card-size_5 {
|
||||||
|
width: 18em;
|
||||||
|
grid-template-rows: 18em 3.5em;
|
||||||
|
height: 21.5em;
|
||||||
|
}
|
||||||
|
.modifier-card-size_5 .modifier-card-image-overlay {
|
||||||
|
font-size: 8em;
|
||||||
|
}
|
||||||
|
.modifier-card-size_4 {
|
||||||
|
width: 14em;
|
||||||
|
grid-template-rows: 14em 3.5em;
|
||||||
|
height: 17.5em;
|
||||||
|
}
|
||||||
|
.modifier-card-size_4 .modifier-card-image-overlay {
|
||||||
|
font-size: 7em;
|
||||||
|
}
|
||||||
|
.modifier-card-size_3 {
|
||||||
|
width: 11em;
|
||||||
|
grid-template-rows: 11em 3.5em;
|
||||||
|
height: 14.5em;
|
||||||
|
}
|
||||||
|
.modifier-card-size_3 .modifier-card-image-overlay {
|
||||||
|
font-size: 6em;
|
||||||
|
}
|
||||||
|
.modifier-card-size_2 {
|
||||||
|
width: 10em;
|
||||||
|
grid-template-rows: 10em 3.5em;
|
||||||
|
height: 13.5em;
|
||||||
|
}
|
||||||
|
.modifier-card-size_2 .modifier-card-image-overlay {
|
||||||
|
font-size: 6em;
|
||||||
|
}
|
||||||
|
.modifier-card-size_1 {
|
||||||
|
width: 9em;
|
||||||
|
grid-template-rows: 9em 3.5em;
|
||||||
|
height: 12.5em;
|
||||||
|
}
|
||||||
|
.modifier-card-size_1 .modifier-card-image-overlay {
|
||||||
|
font-size: 5em;
|
||||||
|
}
|
||||||
|
.modifier-card-size_-1 {
|
||||||
|
width: 7em;
|
||||||
|
grid-template-rows: 7em 3.5em;
|
||||||
|
height: 10.5em;
|
||||||
|
}
|
||||||
|
.modifier-card-size_-1 .modifier-card-image-overlay {
|
||||||
|
font-size: 4em;
|
||||||
|
}
|
||||||
|
.modifier-card-size_-2 {
|
||||||
|
width: 6em;
|
||||||
|
grid-template-rows: 6em 3.5em;
|
||||||
|
height: 9.5em;
|
||||||
|
}
|
||||||
|
.modifier-card-size_-2 .modifier-card-image-overlay {
|
||||||
|
font-size: 3em;
|
||||||
|
}
|
||||||
|
.modifier-card-size_-3 {
|
||||||
|
width: 5em;
|
||||||
|
grid-template-rows: 5em 3.5em;
|
||||||
|
height: 8.5em;
|
||||||
|
}
|
||||||
|
.modifier-card-size_-3 .modifier-card-image-overlay {
|
||||||
|
font-size: 3em;
|
||||||
|
}
|
||||||
|
.modifier-card-size_-3 .modifier-card-label {
|
||||||
|
font-size: 0.8em;
|
||||||
|
}
|
||||||
|
.modifier-card-tiny {
|
||||||
|
width: 6em;
|
||||||
|
height: 9.5em;
|
||||||
|
grid-template-rows: 6em 3.5em;
|
||||||
|
}
|
||||||
|
.modifier-card-tiny .modifier-card-image-overlay {
|
||||||
|
font-size: 4em;
|
||||||
|
}
|
||||||
|
.modifier-card:hover {
|
||||||
|
transform: scale(1.05);
|
||||||
|
box-shadow: 0 5px 16px 5px rgba(0, 0, 0, 0.25);
|
||||||
|
}
|
||||||
|
.modifier-card-image-container {
|
||||||
|
border-radius: 5px 5px 0 0;
|
||||||
|
width: inherit;
|
||||||
|
height: 100%;
|
||||||
|
background-color: rgba(0, 0, 0, .2);
|
||||||
|
grid-area: modifier-card-image-container;
|
||||||
|
position: relative;
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
justify-content: center;
|
||||||
|
color: rgb(255 255 255 / 8%);
|
||||||
|
}
|
||||||
|
.modifier-card-image-container img {
|
||||||
|
width: inherit;
|
||||||
|
height: 100%;
|
||||||
|
border-radius: 5px 5px 0 0;
|
||||||
|
}
|
||||||
|
.modifier-card-image-container * {
|
||||||
|
position: absolute;
|
||||||
|
}
|
||||||
|
.modifier-card-container {
|
||||||
|
text-align: center;
|
||||||
|
background-color: rgba(0,0,0,0.5);
|
||||||
|
border-radius: 0 0 5px 5px;
|
||||||
|
display: flex;
|
||||||
|
justify-content: center;
|
||||||
|
align-items: center;
|
||||||
|
grid-area: modifier-card-container;
|
||||||
|
font-weight: 100;
|
||||||
|
font-size: .9em;
|
||||||
|
width: inherit;
|
||||||
|
}
|
||||||
|
.modifier-card-label {
|
||||||
|
padding: 4px;
|
||||||
|
word-break: break-word;
|
||||||
|
}
|
||||||
|
.modifier-card-image-overlay {
|
||||||
|
width: inherit;
|
||||||
|
height: inherit;
|
||||||
|
background-color: rgb(0 0 0 / 50%);
|
||||||
|
z-index: 2;
|
||||||
|
position: absolute;
|
||||||
|
border-radius: 5px 5px 0 0;
|
||||||
|
opacity: 0;
|
||||||
|
font-size: 5em;
|
||||||
|
font-weight: 900;
|
||||||
|
color: rgb(255 255 255 / 50%);
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
justify-content: center;
|
||||||
|
}
|
||||||
|
.modifier-card-overlay {
|
||||||
|
width: inherit;
|
||||||
|
height: inherit;
|
||||||
|
position: absolute;
|
||||||
|
z-index: 3;
|
||||||
|
}
|
||||||
|
.modifier-card:hover > .modifier-card-image-container .modifier-card-image-overlay {
|
||||||
|
opacity: 1;
|
||||||
|
}
|
||||||
|
.modifier-card:hover > .modifier-card-image-container img {
|
||||||
|
filter: blur(.1em);
|
||||||
|
}
|
||||||
|
.modifier-card:active {
|
||||||
|
transform: scale(0.95);
|
||||||
|
box-shadow: 0 5px 16px 5px rgba(0, 0, 0, 0.5);
|
||||||
|
}
|
||||||
|
#preview-image {
|
||||||
|
margin-top: 0.5em;
|
||||||
|
margin-bottom: 0.5em;
|
||||||
|
}
|
||||||
|
.modifier-card-active {
|
||||||
|
border: 2px solid rgb(179 82 255 / 94%);
|
||||||
|
box-shadow: 0 0px 10px 0 rgb(170 0 229 / 58%);
|
||||||
|
}
|
||||||
|
.tooltip {
|
||||||
|
position: relative;
|
||||||
|
display: inline-block;
|
||||||
|
}
|
||||||
|
.tooltip .tooltip-text {
|
||||||
|
visibility: hidden;
|
||||||
|
width: 120px;
|
||||||
|
background: rgb(101,97,181);
|
||||||
|
background: linear-gradient(180deg, rgba(101,97,181,1) 0%, rgba(47,45,85,1) 100%);
|
||||||
|
color: #fff;
|
||||||
|
text-align: center;
|
||||||
|
border-radius: 6px;
|
||||||
|
padding: 5px;
|
||||||
|
position: absolute;
|
||||||
|
z-index: 1;
|
||||||
|
top: 105%;
|
||||||
|
left: 39%;
|
||||||
|
margin-left: -60px;
|
||||||
|
opacity: 0;
|
||||||
|
transition: opacity 0.3s;
|
||||||
|
border: 2px solid rgb(90 100 177 / 94%);
|
||||||
|
box-shadow: 0px 10px 20px 5px rgb(11 0 58 / 55%);
|
||||||
|
width: 10em;
|
||||||
|
}
|
||||||
|
.tooltip .tooltip-text::after {
|
||||||
|
content: "";
|
||||||
|
position: absolute;
|
||||||
|
top: -0.9em;
|
||||||
|
left: 50%;
|
||||||
|
margin-left: -5px;
|
||||||
|
border-width: 5px;
|
||||||
|
border-style: solid;
|
||||||
|
border-color: transparent transparent rgb(90 100 177 / 94%) transparent;
|
||||||
|
}
|
||||||
|
.tooltip:hover .tooltip-text {
|
||||||
|
visibility: visible;
|
||||||
|
opacity: 1;
|
||||||
|
}
|
||||||
|
#modifier-card-size-slider {
|
||||||
|
width: 6em;
|
||||||
|
margin-bottom: 0.5em;
|
||||||
|
vertical-align: middle;
|
||||||
|
}
|
BIN
ui/media/modifier-thumbnails/artist/artstation/landscape-0.jpg
Normal file
After Width: | Height: | Size: 38 KiB |
BIN
ui/media/modifier-thumbnails/artist/artstation/portrait-0.jpg
Normal file
After Width: | Height: | Size: 27 KiB |
After Width: | Height: | Size: 23 KiB |
After Width: | Height: | Size: 27 KiB |
After Width: | Height: | Size: 36 KiB |
After Width: | Height: | Size: 33 KiB |
BIN
ui/media/modifier-thumbnails/artist/by_alex_grey/landscape-0.jpg
Normal file
After Width: | Height: | Size: 90 KiB |
BIN
ui/media/modifier-thumbnails/artist/by_alex_grey/portrait-0.jpg
Normal file
After Width: | Height: | Size: 59 KiB |
After Width: | Height: | Size: 66 KiB |
After Width: | Height: | Size: 66 KiB |
After Width: | Height: | Size: 75 KiB |
After Width: | Height: | Size: 45 KiB |
After Width: | Height: | Size: 76 KiB |
After Width: | Height: | Size: 41 KiB |
BIN
ui/media/modifier-thumbnails/artist/by_artgerm/landscape-0.jpg
Normal file
After Width: | Height: | Size: 51 KiB |
BIN
ui/media/modifier-thumbnails/artist/by_artgerm/portrait-0.jpg
Normal file
After Width: | Height: | Size: 34 KiB |
After Width: | Height: | Size: 94 KiB |
After Width: | Height: | Size: 56 KiB |
After Width: | Height: | Size: 77 KiB |