Compare commits
495 Commits
v1.22
...
installer_
Author | SHA1 | Date | |
---|---|---|---|
7219c55dcd | |||
9aa46f92dc | |||
199fa4a0f5 | |||
c91348dae7 | |||
b47ff071da | |||
0d921eacb6 | |||
e1718c45e1 | |||
1c5352203d | |||
e521b350ca | |||
e9ddef6992 | |||
1d4a835e4a | |||
3cf7a984fd | |||
a6913dfe29 | |||
1f7c7909c2 | |||
0e15c48d04 | |||
a3e5931fd6 | |||
0e3766838f | |||
f17a00092a | |||
724e101edc | |||
0b1968c017 | |||
be3a52d703 | |||
7468aa5a4f | |||
889fd98577 | |||
9de91d3021 | |||
f20014660d | |||
add533d0da | |||
a5f5113e9a | |||
c72e1f0943 | |||
e282b2864f | |||
abcab9bce5 | |||
2174788514 | |||
ecda0d5b05 | |||
55bd8a34d7 | |||
65c667cc37 | |||
582b594789 | |||
19a868b2df | |||
9e07228a90 | |||
85f8141968 | |||
b9646a8a94 | |||
3a7e4390eb | |||
d07279c266 | |||
c10411c506 | |||
4a85296e23 | |||
deac32d843 | |||
f05d114f05 | |||
1cdb456216 | |||
fcd3c39656 | |||
b4c68a8ae5 | |||
34023f66f0 | |||
2696da7337 | |||
4edbd8719c | |||
ed6c59b58a | |||
553525a0fb | |||
38ebb95e63 | |||
17d92d8b79 | |||
4c08c692ea | |||
32bafd8b01 | |||
edb4b9a82e | |||
008b4228d2 | |||
faf455a37c | |||
828a7aabd7 | |||
acbf2a8ab0 | |||
05a4701d98 | |||
6a19b333b7 | |||
864fc84899 | |||
cddd62c284 | |||
30c46c0858 | |||
396e54bad0 | |||
a2b9ee5988 | |||
fac8e8aa8b | |||
a821b309f7 | |||
bd2b627113 | |||
3c3711b933 | |||
926ffefac8 | |||
a10c4f7a34 | |||
939dd0b207 | |||
defabf4324 | |||
b652d0fc65 | |||
e67843638f | |||
ca704e1d51 | |||
04eb356c89 | |||
300159c03b | |||
f083b816a9 | |||
2a46f6b225 | |||
b3e9b266fa | |||
d9fa2c4a62 | |||
547e640b57 | |||
e107037011 | |||
489aae7a46 | |||
1bcb6738bb | |||
d14a13fcaf | |||
5facba4419 | |||
b4282a03ca | |||
688659b815 | |||
37cf9eb587 | |||
65b2da4db5 | |||
2cecb11847 | |||
a39e0a19cd | |||
50d805abbc | |||
b7467b466f | |||
3048a26e6f | |||
0175d7658e | |||
5eee528d6b | |||
4a431ddc7c | |||
ebda485bcb | |||
f9bb55bc5c | |||
5d207f36e6 | |||
6fc9098035 | |||
215c3d82e2 | |||
86d0feed32 | |||
53674b03fc | |||
102e454902 | |||
88d59eb7fa | |||
53ebd583cf | |||
a1914f5079 | |||
5231eb62e1 | |||
661cf440f3 | |||
3822058daf | |||
011f283067 | |||
4f58a485a6 | |||
858a1c7ae0 | |||
0c96510128 | |||
ecf7860847 | |||
27e2699fa1 | |||
906d90c304 | |||
d243bf069e | |||
dde3d5c35b | |||
657129e4a7 | |||
5bbef09f85 | |||
342f5e5e41 | |||
02c0bac71f | |||
9bb091d31e | |||
a0e201a9ef | |||
119d5ba7ff | |||
a35454b1b3 | |||
8cb340be9d | |||
8d21ee23f4 | |||
5e7c376950 | |||
7617d56276 | |||
80e4b33047 | |||
4f6287c163 | |||
84ee1a2d25 | |||
67252e0c6b | |||
4264c2e266 | |||
0efa4ffb23 | |||
ae108bb603 | |||
ca4229c732 | |||
5c827703a1 | |||
a3de0820b3 | |||
83cb473a45 | |||
e7f9db5e56 | |||
af3de448bd | |||
fcb2f1b555 | |||
c1bcf9fa8a | |||
5ddfe7a184 | |||
c675caf3f9 | |||
956b3d89db | |||
b0c15bc430 | |||
b934a6b6e9 | |||
df73be495e | |||
efca13c8c0 | |||
e49b0b9381 | |||
a69cd85ed7 | |||
7b520942dc | |||
7ee00230fd | |||
bc8c7285da | |||
12e6baa925 | |||
f98225cdb6 | |||
310abcd8b9 | |||
f42eaaea86 | |||
bfdb74979f | |||
28e002e248 | |||
7d12dbd4b2 | |||
6f60e71ea4 | |||
16c842366a | |||
97ba151e09 | |||
18f452d968 | |||
bb6db783d8 | |||
49ce302bd4 | |||
95f01007a3 | |||
108e516b80 | |||
1c5097b81b | |||
ef1bbda49c | |||
5fed14cb78 | |||
7e7c110851 | |||
5337153761 | |||
444834a891 | |||
2587727087 | |||
a6456b068d | |||
4ccf26c23f | |||
3927dfa71d | |||
31c324bcc3 | |||
476d6fe85d | |||
ee4d468bce | |||
47fca55b0c | |||
219f310a25 | |||
27071cfa29 | |||
1d88a5b42e | |||
74a9c46f08 | |||
7a540f2a88 | |||
9f48d5e5ff | |||
64cc2567bd | |||
3b47eb3b07 | |||
d13e08e53b | |||
4685461282 | |||
885759abc5 | |||
0c0c8e503e | |||
5605cfe213 | |||
3e3fc54da4 | |||
e59c66ae26 | |||
d74eef8088 | |||
88a240b0f6 | |||
ee21f41b25 | |||
9ec2010ac2 | |||
e928fee26f | |||
79f6723678 | |||
db1fbad0db | |||
812a0a14fc | |||
852875b440 | |||
d1dd1b8a9b | |||
30974482c5 | |||
982696fb3b | |||
cf43dc7b5c | |||
4444525c01 | |||
760cc89449 | |||
ba26f22f53 | |||
e1f37a2f3c | |||
e59287d736 | |||
5cda0c7684 | |||
717a1d8f57 | |||
a955730086 | |||
d5ccee7bbb | |||
97e2a17ce1 | |||
a32a58bd0f | |||
093201ef65 | |||
ef46603f4e | |||
3483f63b72 | |||
8bee8060f9 | |||
db58c9aca9 | |||
75f5ec8575 | |||
246ceebe0e | |||
a294b128c7 | |||
7879bf19eb | |||
80082d9c26 | |||
9fe1709bf7 | |||
fec21f1208 | |||
1d4b34c0dd | |||
d88e0f16ac | |||
8d31c474df | |||
704d545159 | |||
a1e5a2cb67 | |||
3668c87e0d | |||
28c8dc4bcc | |||
a02915dadb | |||
c916f46ac3 | |||
5230fbaf6e | |||
813e65e586 | |||
387605f443 | |||
ff335ecadd | |||
74ccee2aa4 | |||
4976f35979 | |||
9c09a4d393 | |||
905bcd8d1b | |||
6883618825 | |||
8beaf2107e | |||
cd1db214b0 | |||
a6b4d59d94 | |||
ff590d3090 | |||
c16e425980 | |||
622322c878 | |||
7c580e276a | |||
927013cd57 | |||
666bf0ebb4 | |||
c283d3181f | |||
28dfe2140c | |||
75ac3450e6 | |||
f727dd26ed | |||
d21c0bfc18 | |||
65b2c056c6 | |||
53533e71e9 | |||
90a28732af | |||
98b1e50c86 | |||
512ffa9030 | |||
dbd37a0961 | |||
4eaba01de0 | |||
09cdbe6b90 | |||
0d33964a03 | |||
b14523ecfa | |||
38fa083503 | |||
fff050ef14 | |||
1a10c60e4f | |||
eb6d19a4dc | |||
9d92174b1d | |||
0dd38870e0 | |||
964d752e11 | |||
a4305540f0 | |||
788dcbf471 | |||
a715022049 | |||
c9fe2e8a66 | |||
5170f508f7 | |||
72900eaf93 | |||
85b6540c9f | |||
d92fb1ec95 | |||
9051bf6e68 | |||
598de3697d | |||
ff515f9bb0 | |||
10ed23e144 | |||
253e75c747 | |||
6efbe62dca | |||
0eae17075f | |||
c7c47635f7 | |||
8e1445d27a | |||
0fc92942cf | |||
2628a061f7 | |||
78971ac504 | |||
107a0e1b7d | |||
fc6954a541 | |||
2f60afb039 | |||
1a2da16e12 | |||
678e0912ae | |||
8d596c07df | |||
dc9f6013e8 | |||
e5a21fda32 | |||
2a0f920bcd | |||
dbd2f2003d | |||
4922877816 | |||
e914378dd9 | |||
9ec35d2bfe | |||
d96cc79814 | |||
7b92703624 | |||
22089c528a | |||
20ebdc46e9 | |||
3224cd73ed | |||
024c7f6a15 | |||
96d4b52da3 | |||
d1e29b8a9d | |||
0e02714114 | |||
04ad3c0386 | |||
14985bcdcd | |||
a1908de302 | |||
8820f10e01 | |||
e1116938ec | |||
c30678af98 | |||
1d4e06b884 | |||
39d6fcac73 | |||
f44c1d4536 | |||
c4349951da | |||
5a42704ac4 | |||
52e94fe650 | |||
466b9da56c | |||
b280288e83 | |||
7388c13c63 | |||
b4f4ccec99 | |||
e5e3f02440 | |||
874bfa0c54 | |||
92a4f7adb8 | |||
0772417f33 | |||
14f1b6df4b | |||
0d9c8a804d | |||
0dfbfafb82 | |||
051ef564e7 | |||
bafd3612e6 | |||
05f9f7ce9d | |||
88acf49305 | |||
74d9901ec9 | |||
64d1b56497 | |||
0cc540b12a | |||
a1712a654d | |||
fe21889ab0 | |||
e50c84ff28 | |||
44824acf34 | |||
ec3253620e | |||
f902466882 | |||
b5d2a23c64 | |||
0fd4804f95 | |||
729d0ea417 | |||
8f2f644230 | |||
183edf9eaf | |||
46c37403a6 | |||
c57e15bdf1 | |||
bc0e53a59b | |||
2c38b51996 | |||
67e36788b0 | |||
9b8ed32c74 | |||
833063c916 | |||
3b2c8e0a97 | |||
ee90d1f258 | |||
f90f42c25c | |||
d6555cb344 | |||
286f057a14 | |||
00b89c2bc7 | |||
b4a3de4cff | |||
ec49c96219 | |||
1eb420bcda | |||
9ba810ccb6 | |||
7a99241c76 | |||
617066c7e1 | |||
835dd4da9d | |||
15da928655 | |||
7460e6f73b | |||
4104b4d0a3 | |||
a29259a8b6 | |||
8925c6caf9 | |||
7b2a85a118 | |||
f8980aecf0 | |||
a4f44f02ed | |||
3fe76a6bd3 | |||
8c060b468b | |||
6136039682 | |||
f2954eeb3c | |||
f186c41f4d | |||
7d69f4d3ed | |||
4baec1b185 | |||
90c4361363 | |||
10aaa48068 | |||
458b0150ef | |||
44a3f63f99 | |||
96a6b11ab4 | |||
9ef9ce76f8 | |||
94835c46a0 | |||
58b3d31526 | |||
b023f5c0da | |||
78b87c6ddd | |||
edde4dc2fa | |||
d9a6e41265 | |||
64f0f1aa2c | |||
b54c057c83 | |||
baa4acaf79 | |||
3c6bb41939 | |||
3f64d3729e | |||
21dc2ece1b | |||
fcac6c4f8c | |||
e5dc932717 | |||
6b65b05e2f | |||
d52b973b44 | |||
79d3f4ca9e | |||
06dd22d89a | |||
e79e425cf5 | |||
b4b2c351b4 | |||
73acaadf70 | |||
18ef36bbc3 | |||
021315f0f5 | |||
a4ee103ff0 | |||
618173c5f0 | |||
4cb906571b | |||
6092b6c4cf | |||
9bf17a1c8d | |||
542379dcf4 | |||
a565bb5889 | |||
9a96ff2edc | |||
9fa2e363cc | |||
a9939a31cf | |||
64fffbcdec | |||
a65f8f5d5c | |||
c5475fb028 | |||
f267c46595 | |||
b1f67a9a65 | |||
044a7524a3 | |||
495b15e065 | |||
f4e6c399f2 | |||
4519acb77e | |||
cf1ba6d459 | |||
c28cb67484 | |||
9017ee9a40 | |||
52086a2d39 | |||
facec59fe8 | |||
75eb79bd55 | |||
307209945c | |||
8db9f40001 | |||
472b8d0e51 | |||
dbebd32a6e | |||
ad8d2a913b | |||
38bd247d64 | |||
6a4e972de6 | |||
cc14ac0bac | |||
2e0d7fdbb8 | |||
2284eea2d8 | |||
867b5b2ee4 | |||
85e29fffc9 | |||
fa84d812f1 | |||
4ffa8420dd | |||
7dc1c54578 | |||
05434d3575 | |||
ddea9b9f38 | |||
9445ee41cf | |||
8325b4e5aa | |||
1ff9db3714 | |||
12ff102a21 | |||
f660111751 | |||
bac08306fb | |||
b860dbd9a6 | |||
c7713f559d | |||
7c60189f29 | |||
35dc13ffcf | |||
6b55f385c7 |
3
.github/FUNDING.yml
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
# These are supported funding model platforms
|
||||
|
||||
ko_fi: cmdr2_stablediffusion_ui
|
38
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**To Reproduce**
|
||||
Steps to reproduce the behavior:
|
||||
1. Go to '...'
|
||||
2. Click on '....'
|
||||
3. Scroll down to '....'
|
||||
4. See error
|
||||
|
||||
**Expected behavior**
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Screenshots**
|
||||
If applicable, add screenshots to help explain your problem.
|
||||
|
||||
**Desktop (please complete the following information):**
|
||||
- OS:
|
||||
- Browser:
|
||||
- Version:
|
||||
|
||||
**Smartphone (please complete the following information):**
|
||||
- Device:
|
||||
- OS:
|
||||
- Browser
|
||||
- Version
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the problem here.
|
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: enhancement
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
2
.gitignore
vendored
@ -1 +1,3 @@
|
||||
__pycache__
|
||||
dist
|
||||
.idea/*
|
||||
|
55
CONTRIBUTING.md
Normal file
@ -0,0 +1,55 @@
|
||||
Hi there, these instructions are meant for the developers of this project.
|
||||
|
||||
If you only want to use the Stable Diffusion UI, you've downloaded the wrong file. In that case, please download and follow the instructions at https://github.com/cmdr2/stable-diffusion-ui#installation
|
||||
|
||||
Thanks
|
||||
|
||||
# For developers:
|
||||
|
||||
If you would like to contribute to this project, there is a discord for dicussion:
|
||||
[](https://discord.com/invite/u9yhsFmEkB)
|
||||
|
||||
## Development environment for UI (frontend and server) changes
|
||||
This is in-flux, but one way to get a development environment running for editing the UI of this project is:
|
||||
(swap `.sh` or `.bat` in instructions depending on your environment, and be sure to adjust any paths to match where you're working)
|
||||
|
||||
1) `git clone` the repository, e.g. to `/projects/stable-diffusion-ui-repo`
|
||||
2) Download the pre-built end user archive from the link on github, and extract it, e.g. to `/projects/stable-diffusion-ui-archive`
|
||||
3) `cd /projects/stable-diffusion-ui-archive` and run the script to set up and start the project, e.g. `start.sh`
|
||||
4) Check you can view and generate images on `localhost:9000`
|
||||
5) Close the server, and edit `/projects/stable-diffusion-ui-archive/scripts/on_env_start.sh`
|
||||
6) Comment out the lines near the bottom that copies the `files/ui` folder, e.g:
|
||||
|
||||
for `.sh`
|
||||
```
|
||||
# rm -rf ui
|
||||
# cp -Rf sd-ui-files/ui .
|
||||
# cp sd-ui-files/scripts/on_sd_start.sh scripts/
|
||||
# cp sd-ui-files/scripts/start.sh .
|
||||
```
|
||||
for `.bat`
|
||||
```
|
||||
REM @xcopy sd-ui-files\ui ui /s /i /Y
|
||||
REM @copy sd-ui-files\scripts\on_sd_start.bat scripts\ /Y
|
||||
REM @copy "sd-ui-files\scripts\Start Stable Diffusion UI.cmd" . /Y
|
||||
```
|
||||
7) Comment out the line at the top of `/projects/stable-diffusion-ui-archive/scripts/on_sd_start.sh` that copies `on_env_start`. For e.g. `@copy sd-ui-files\scripts\on_env_start.bat scripts\ /Y`
|
||||
8) Delete the current `ui` folder at `/projects/stable-diffusion-ui-archive/ui`
|
||||
9) Now make a symlink between the repository clone (where you will be making changes) and this archive (where you will be running stable diffusion):
|
||||
`ln -s /projects/stable-diffusion-ui-repo/ui /projects/stable-diffusion-ui-archive/ui`
|
||||
or for Windows
|
||||
`mklink /D \projects\stable-diffusion-ui-archive\ui \projects\stable-diffusion-ui-repo\ui` (link name first, source repo dir second)
|
||||
9) Run the archive again `start.sh` and ensure you can still use the UI.
|
||||
10) Congrats, now any changes you make in your repo `ui` folder are linked to this running archive of the app and can be previewed in the browser.
|
||||
|
||||
Check the `ui/frontend/build/README.md` for instructions on running and building the React code.
|
||||
|
||||
## Development environment for Installer changes
|
||||
Build the Windows installer using Windows, and the Linux installer using Linux. Don't mix the two, and don't use WSL. An Ubuntu VM is fine for building the Linux installer on a Windows host.
|
||||
|
||||
1. Install Miniconda 3 or Anaconda.
|
||||
2. Install `conda install -c conda-forge -y conda-pack`
|
||||
3. Open the Anaconda Prompt. Do not use WSL if you're building for Windows.
|
||||
4. Run `build.bat` or `./build.sh` depending on whether you're in Windows or Linux.
|
||||
5. Compress the `stable-diffusion-ui` folder created inside the `dist` folder. Make a `zip` for Windows, and `tar.xz` for Linux (smaller files, and Linux users already have tar).
|
||||
6. Make a new GitHub release and upload the Windows and Linux installer builds.
|
15
Developer Console.cmd
Normal file
@ -0,0 +1,15 @@
|
||||
@echo off
|
||||
|
||||
echo "Opening Stable Diffusion UI - Developer Console.." & echo.
|
||||
|
||||
set SD_BASE_DIR=%cd%
|
||||
set MAMBA_ROOT_PREFIX=%SD_BASE_DIR%\env\mamba
|
||||
set INSTALL_ENV_DIR=%SD_BASE_DIR%\env\installer_env
|
||||
set PROJECT_ENV_DIR=%SD_BASE_DIR%\env\project_env
|
||||
|
||||
call "%MAMBA_ROOT_PREFIX%\condabin\mamba_hook.bat"
|
||||
|
||||
call micromamba activate "%INSTALL_ENV_DIR%"
|
||||
call micromamba activate "%PROJECT_ENV_DIR%"
|
||||
|
||||
cmd /k
|
15
Dockerfile
@ -1,15 +0,0 @@
|
||||
FROM python:3.9
|
||||
|
||||
RUN mkdir /app
|
||||
WORKDIR /app
|
||||
|
||||
RUN apt update
|
||||
|
||||
COPY requirements.txt ./
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
COPY . .
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
ENTRYPOINT ["uvicorn", "main:app", "--reload", "--host", "0.0.0.0", "--port", "9000"]
|
@ -1,15 +0,0 @@
|
||||
FROM python:3.9
|
||||
|
||||
RUN mkdir /app
|
||||
WORKDIR /app
|
||||
|
||||
RUN apt update
|
||||
|
||||
COPY requirements.txt ./
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
COPY . .
|
||||
|
||||
EXPOSE 8000
|
||||
|
||||
ENTRYPOINT ["uvicorn", "old_port_main:app", "--host", "0.0.0.0", "--port", "8000"]
|
103
README.md
@ -1,41 +1,65 @@
|
||||
# Stable Diffusion UI
|
||||
### A simple way to install and use [Stable Diffusion](https://replicate.com/stability-ai/stable-diffusion) on your own computer
|
||||
|
||||
---
|
||||
|
||||
🎉 **New!** `img2img` and `inpaint` (masking) are now supported! You can provide an image to generate new images based on it (and an optional text prompt). You can also use the generated image as the new input image in 1-click, to refine it further. (Thanks [Andreas](https://github.com/andreasjansson)!)
|
||||
|
||||
# What does this do?
|
||||
Two things:
|
||||
1. Automatically downloads and installs Stable Diffusion on your own computer (no need to mess with conda or environments)
|
||||
2. Gives you a simple browser-based UI to talk to your local Stable Diffusion. Enter text prompts and view the generated image. No API keys required.
|
||||
|
||||
All the processing will happen on your computer locally, it does not transmit your prompts or process on any remote server.
|
||||
# Stable Diffusion UI v2
|
||||
### A simple 1-click way to install and use [Stable Diffusion](https://github.com/CompVis/stable-diffusion) on your own computer. No dependencies or technical knowledge required.
|
||||
|
||||
<p float="left">
|
||||
<img src="https://github.com/cmdr2/stable-diffusion-ui/raw/main/media/shot-v3a.jpg" height="500" />
|
||||
<img src="https://github.com/cmdr2/stable-diffusion-ui/raw/main/media/shot-v6a.jpg" height="500" />
|
||||
<a href="#installation"><img src="https://github.com/cmdr2/stable-diffusion-ui/raw/develop/media/download-win.png" width="200" /></a>
|
||||
<a href="#installation"><img src="https://github.com/cmdr2/stable-diffusion-ui/raw/develop/media/download-linux.png" width="200" /></a>
|
||||
</p>
|
||||
|
||||
[](https://discord.com/invite/u9yhsFmEkB) (for support, and development discussion) | [Troubleshooting guide for common problems](Troubleshooting.md)
|
||||
|
||||
️🔥🎉 **New!** Use Custom Weights, Task Queue, Negative Prompt, Live Preview, More Samplers, In-Painting, Face Correction (GFPGAN) and Upscaling (RealESRGAN) have been added!
|
||||
|
||||
This distribution currently uses Stable Diffusion 1.4. Once the model for 1.5 becomes publicly available, the model in this distribution will be updated.
|
||||
|
||||
# Features in the new v2 Version:
|
||||
- **No Dependencies or Technical Knowledge Required**: 1-click install for Windows 10/11 and Linux. *No dependencies*, no need for WSL or Docker or Conda or technical setup. Just download and run!
|
||||
- **Face Correction (GFPGAN) and Upscaling (RealESRGAN)**
|
||||
- **In-Painting**
|
||||
- **Live Preview**: See the image as the AI is drawing it
|
||||
- **Task Queue**: Queue up all your ideas, without waiting for the current task to finish
|
||||
- **Custom Weights**: Use your own `.ckpt` file, by placing it inside the `stable-diffusion` folder (rename it to `custom-model.ckpt`)
|
||||
- **Negative Prompt**: Specify aspects of the image to *remove*.
|
||||
- **Lots of Samplers:** ddim, plms, heun, euler, euler_a, dpm2, dpm2_a, lms
|
||||
- **Image Modifiers**: A library of *modifier tags* like *"Realistic"*, *"Pencil Sketch"*, *"ArtStation"* etc. Experiment with various styles quickly.
|
||||
- **New UI**: with cleaner design
|
||||
- **Waifu Model Support**: Just replace the `stable-diffusion\sd-v1-4.ckpt` file after installation with the Waifu model
|
||||
- Supports "*Text to Image*" and "*Image to Image*"
|
||||
- **NSFW Setting**: A setting in the UI to control *NSFW content*
|
||||
- **Use CPU setting**: If you don't have a compatible graphics card, but still want to run it on your CPU.
|
||||
- **Auto-updater**: Gets you the latest improvements and bug-fixes to a rapidly evolving project.
|
||||
- **Low Memory Usage**: Creates 512x512 images with less than 4GB of VRAM!
|
||||
|
||||

|
||||
|
||||
## Live Preview
|
||||

|
||||
|
||||
|
||||
# System Requirements
|
||||
1. Computer capable of running Stable Diffusion.
|
||||
2. Linux or Windows 11 (with [WSL](https://docs.microsoft.com/en-us/windows/wsl/install)) or Windows 10 v2004+ (Build 19041+) with [WSL](https://docs.microsoft.com/en-us/windows/wsl/install).
|
||||
3. Requires (a) [Docker](https://docs.docker.com/engine/install/), (b) [docker-compose v1.29](https://docs.docker.com/compose/install/), and (c) [nvidia-container-toolkit](https://stackoverflow.com/a/58432877).
|
||||
1. Windows 10/11, or Linux. Experimental support for Mac is coming soon.
|
||||
2. An NVIDIA graphics card, preferably with 4GB or more of VRAM. But if you don't have a compatible graphics card, you can still use it with a "Use CPU" setting. It'll be very slow, but it should still work.
|
||||
|
||||
**Important:** If you're using Windows, please install docker inside your [WSL](https://docs.microsoft.com/en-us/windows/wsl/install)'s Linux. Install docker for the Linux distro in your WSL. **Don't install Docker for Windows.**
|
||||
You do not need anything else. You do not need WSL, Docker or Conda. The installer will take care of it.
|
||||
|
||||
# Installation
|
||||
1. Clone this repository: `git clone https://github.com/cmdr2/stable-diffusion-ui.git` or [download the zip file](https://github.com/cmdr2/stable-diffusion-ui/archive/refs/heads/main.zip) and unzip.
|
||||
2. Open your terminal, and in the project directory run: `./server` (warning: this will take some time during the first run, since it'll download Stable Diffusion's [docker image](https://replicate.com/stability-ai/stable-diffusion), nearly 17 GiB)
|
||||
3. Open http://localhost:9000 in your browser. That's it!
|
||||
1. **Download** [for Windows](https://github.com/cmdr2/stable-diffusion-ui/releases/download/v2.16/stable-diffusion-ui-win64.zip) or [for Linux](https://github.com/cmdr2/stable-diffusion-ui/releases/download/v2.16/stable-diffusion-ui-linux.tar.xz).
|
||||
|
||||
If you're getting errors, please check the [Troubleshooting](https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting) page.
|
||||
2. **Extract**:
|
||||
- For Windows: After unzipping the file, please move the `stable-diffusion-ui` folder to your `C:` (or any drive like D:, at the top root level), e.g. `C:\stable-diffusion-ui`. This will avoid a common problem with Windows (file path length limits).
|
||||
- For Linux: After extracting the .tar.xz file, please open a terminal, and go to the `stable-diffusion-ui` directory.
|
||||
|
||||
3. **Run**:
|
||||
- For Windows: `Start Stable Diffusion UI.cmd` by double-clicking it.
|
||||
- For Linux: In the terminal, run `./start.sh` (or `bash start.sh`)
|
||||
|
||||
This will automatically install Stable Diffusion, set it up, and start the interface. No additional steps are needed.
|
||||
|
||||
**To Uninstall:** Just delete the `stable-diffusion-ui` folder to uninstall all the downloaded packages.
|
||||
|
||||
To stop the server, please run `./server stop`
|
||||
|
||||
# Usage
|
||||
Open http://localhost:9000 in your browser (after running `./server` from step 2 previously).
|
||||
Open http://localhost:9000 in your browser (after running step 3 previously). It may take a few moments for the back-end to be ready.
|
||||
|
||||
## With a text description
|
||||
1. Enter a text prompt, like `a photograph of an astronaut riding a horse` in the textbox.
|
||||
@ -47,36 +71,37 @@ Open http://localhost:9000 in your browser (after running `./server` from step 2
|
||||
2. An optional text prompt can help you further describe the kind of image you want to generate.
|
||||
3. Press `Make Image`. See the image generated using your prompt.
|
||||
|
||||
You can also set an `Image Mask` for telling Stable Diffusion to draw in only the black areas in your image mask. White areas in your mask will be ignored.
|
||||
You can use Face Correction or Upscaling to improve the image further.
|
||||
|
||||
**Pro tip:** You can also click `Use as Input` on a generated image, to use it as the input image for your next generation. This can be useful for sequentially refining the generated image with a single click.
|
||||
|
||||
**Another tip:** Images with the same aspect ratio of your generated image work best. E.g. 1:1 if you're generating images sized 512x512.
|
||||
|
||||
## Problems?
|
||||
Please [file an issue](https://github.com/cmdr2/stable-diffusion-ui/issues) if this did not work for you (after trying the common [troubleshooting](#troubleshooting) steps)!
|
||||
## Problems? Troubleshooting
|
||||
Please try the common [troubleshooting](Troubleshooting.md) steps. If that doesn't fix it, please ask on the [discord server](https://discord.com/invite/u9yhsFmEkB), or [file an issue](https://github.com/cmdr2/stable-diffusion-ui/issues).
|
||||
|
||||
# Advanced Settings
|
||||
You can also set the configuration like `seed`, `width`, `height`, `num_outputs`, `num_inference_steps` and `guidance_scale` using the 'show' button next to 'Advanced settings'.
|
||||
# Image Settings
|
||||
You can also set the configuration like `seed`, `width`, `height`, `num_outputs`, `num_inference_steps` and `guidance_scale` using the 'show' button next to 'Image settings'.
|
||||
|
||||
Use the same `seed` number to get the same image for a certain prompt. This is useful for refining a prompt without losing the basic image design. Enable the `random images` checkbox to get random images.
|
||||
|
||||

|
||||

|
||||
|
||||
# Troubleshooting
|
||||
The [Troubleshooting wiki page](https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting) contains some common errors and their solutions. Please check that, and if it doesn't work, feel free to [file an issue](https://github.com/cmdr2/stable-diffusion-ui/issues).
|
||||
# System Settings
|
||||
The system settings are reachable via the cogwheel symbol on the top right. It can be used to configure whether all generated images should
|
||||
saved be automically, or to tune the Stable Diffusion image generation.
|
||||
|
||||
# Behind the scenes
|
||||
This project is a quick way to get started with Stable Diffusion. You do not need to have Stable Diffusion already installed, and do not need any API keys. This project will automatically download Stable Diffusion's docker image, the first time it is run.
|
||||

|
||||
|
||||
This project runs Stable Diffusion in a docker container behind the scenes, using Stable Diffusion's [Docker image](https://replicate.com/stability-ai/stable-diffusion) on replicate.com.
|
||||
# Image Modifiers
|
||||

|
||||
|
||||
# Bugs reports and code contributions welcome
|
||||
If there are any problems or suggestions, please feel free to [file an issue](https://github.com/cmdr2/stable-diffusion-ui/issues).
|
||||
If there are any problems or suggestions, please feel free to ask on the [discord server](https://discord.com/invite/u9yhsFmEkB) or [file an issue](https://github.com/cmdr2/stable-diffusion-ui/issues).
|
||||
|
||||
Also, please feel free to submit a pull request, if you have any code contributions in mind.
|
||||
Also, please feel free to submit a pull request, if you have any code contributions in mind. Join the [discord server](https://discord.com/invite/u9yhsFmEkB) for development-related discussions, and for helping other users.
|
||||
|
||||
# Disclaimer
|
||||
The authors of this project are not responsible for any content generated using this interface.
|
||||
|
||||
This license of this software forbids you from sharing any content that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, spread misinformation and target vulnerable groups. For the full list of restrictions please read [the license](LICENSE).
|
||||
The license of this software forbids you from sharing any content that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, spread misinformation, or target vulnerable groups. For the full list of restrictions please read [the license](LICENSE). You agree to these terms by using this software.
|
||||
|
25
Start Stable Diffusion UI.cmd
Normal file
@ -0,0 +1,25 @@
|
||||
@echo off
|
||||
|
||||
echo. & echo "Stable Diffusion UI - v2.5" & echo.
|
||||
|
||||
set PATH=C:\Windows\System32;%PATH%
|
||||
|
||||
set SD_BASE_DIR=%cd%
|
||||
|
||||
@rem Confirm or change the installation dir
|
||||
call installer\bootstrap\check-install-dir.bat
|
||||
|
||||
@rem set the vars again, if the installer dir has changed
|
||||
set SD_BASE_DIR=%cd%
|
||||
|
||||
echo Working in %SD_BASE_DIR%
|
||||
|
||||
@rem Setup the packages required for the installer
|
||||
call installer\bootstrap\bootstrap.bat
|
||||
|
||||
@rem Test the bootstrap
|
||||
call git --version
|
||||
call python --version
|
||||
|
||||
@rem Download the rest of the installer and UI
|
||||
call installer\installer\start.bat
|
75
Troubleshooting.md
Normal file
@ -0,0 +1,75 @@
|
||||
Common issues and their solutions. If these solutions don't work, please feel free to ask at the [discord server](https://discord.com/invite/u9yhsFmEkB) or [file an issue](https://github.com/cmdr2/stable-diffusion-ui/issues).
|
||||
|
||||
## RuntimeError: CUDA out of memory
|
||||
This can happen if your PC has less than 6GB of VRAM.
|
||||
|
||||
Try disabling the "Turbo mode" setting under "Advanced Settings", since that takes an additional 1 GB of VRAM (to increase the speed).
|
||||
|
||||
Additionally, a common reason for this error is that you're using an initial image larger than 768x768 pixels. Try using a smaller initial image.
|
||||
|
||||
Also try generating smaller sized images.
|
||||
|
||||
## basicsr module not found
|
||||
For Windows: Please download and extract basicsr from [here](https://github.com/cmdr2/stable-diffusion-ui/releases/download/v2.16/basicsr-win64.zip), and place the `basicsr` folder inside the `stable-diffusion-ui\stable-diffusion\env\lib\site-packages` folder. Then run the `Start Stable Diffusion UI.cmd` file again.
|
||||
|
||||
For Linux: Please contact on the [discord server](https://discord.com/invite/u9yhsFmEkB).
|
||||
|
||||
## No ldm found, or antlr4 or any other missing module, or ClobberError: This transaction has incompatible packages due to a shared path
|
||||
On Windows, please ensure that you had placed the `stable-diffusion-ui` folder after unzipping to the root of C: or D: (or any drive). For e.g. `C:\stable-diffusion-ui`. **Note:** This has to be done **before** you start the installation process. If you have already installed (and are facing this error), please delete the installed folder, and start fresh by unzipping and placing the folder at the top of your drive.
|
||||
|
||||
This error can also be caused if you already have conda/miniconda/anaconda installed, due to package conflicts. Please open your Anaconda Prompt, and run `conda clean --all` to clean up unused packages.
|
||||
|
||||
If nothing works, this could be due to a corrupted installation. Please try reinstalling this, by deleting the installed folder, and unzipping from the downloaded zip file.
|
||||
|
||||
## Killed uvicorn server:app --app-dir ... --port 9000 --host 0.0.0.0
|
||||
This happens if your PC ran out of RAM. Stable Diffusion requires a lot of RAM, and requires atleast 10 GB of RAM to work well. You can also try closing all other applications before running Stable Diffusion UI.
|
||||
|
||||
## Green image generated
|
||||
This usually happens if you're running NVIDIA 1650 or 1660 Super. To solve this, please close and run the Stable Diffusion command on your computer. If you're using the older Docker-based solution (v1), please upgrade to v2: https://github.com/cmdr2/stable-diffusion-ui/tree/v2#installation
|
||||
|
||||
If you're still seeing this error, please try enabling "Full Precision" under "Advanced Settings" in the Stable Diffusion UI.
|
||||
|
||||
## './docker-compose.yml' is invalid:
|
||||
> ERROR: The Compose file './docker-compose.yml' is invalid because:
|
||||
> services.stability-ai.deploy.resources.reservations value Additional properties are not allowed ('devices' was unexpected)
|
||||
|
||||
Please ensure you have `docker-compose` version 1.29 or higher. Check `docker-compose --version`, and if required [update it to 1.29](https://docs.docker.com/compose/install/). (Thanks [HVRyan](https://github.com/HVRyan))
|
||||
|
||||
## RuntimeError: Found no NVIDIA driver on your system:
|
||||
If you have an NVIDIA GPU and the latest [NVIDIA driver](http://www.nvidia.com/Download/index.aspx), please ensure that you've installed [nvidia-container-toolkit](https://stackoverflow.com/a/58432877). (Thanks [u/exintrovert420](https://www.reddit.com/user/exintrovert420/))
|
||||
|
||||
## Some other process is already running at port 9000 / port 9000 could not be bound
|
||||
You can override the port used. Please change `docker-compose.yml` inside the project directory, and update the line `9000:9000` to `1337:9000` (where 1337 is whichever port number you want).
|
||||
|
||||
After doing this, please restart your server, by running `./server restart`.
|
||||
|
||||
After this, you can access the server at `http://localhost:1337` (where 1337 is the new port you specified earlier).
|
||||
|
||||
## RuntimeError: CUDA error: unknown error
|
||||
Please ensure that you have an NVIDIA GPU and the latest [NVIDIA driver](http://www.nvidia.com/Download/index.aspx), and that you've installed [nvidia-container-toolkit](https://stackoverflow.com/a/58432877).
|
||||
|
||||
Also, if you are using WSL (Windows), please ensure you have the latest WSL kernel by running `wsl --shutdown` and then `wsl --update`. (Thanks [AndrWeisR](https://github.com/AndrWeisR))
|
||||
|
||||
# For support queries
|
||||
## Entering a conda environment in an existing installation
|
||||
This will give you an activated conda environment in the terminal, so you can run commands and force-install any packages, if required.
|
||||
|
||||
Users don't need to have the Anaconda Prompt installed to do this anymore, since the installer bundles a portable version of conda inside it. Just follow these steps.
|
||||
|
||||
**Windows:**
|
||||
1. Open the terminal: Press Win+R, type "cmd", and press "Run"
|
||||
2. Type `cd C:\stable-diffusion-ui` and press enter (or wherever you've installed it)
|
||||
3. Type `installer\Scripts\activate.bat` and press enter
|
||||
4. Type `cd stable-diffusion` and press enter
|
||||
5. Type `conda activate .\env` and press enter
|
||||
6. Type `python --version` and press enter. You should see 3.8.5.
|
||||
|
||||
**Linux:**
|
||||
1. Open the terminal
|
||||
2. Type `cd /path/to/stable-diffusion-ui` and press enter
|
||||
3. Type `installer/bin/activate` and press enter
|
||||
4. Type `cd stable-diffusion` and press enter
|
||||
5. Type `conda activate ./env` and press enter
|
||||
6. Type `python --version` and press enter. You should see 3.8.5.
|
||||
|
||||
This will give you an activated conda environment. To confirm, type `python --version` and press enter. You should see 3.8.5.
|
18
developer_console.sh
Normal file
@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ "$0" == "bash" ]; then
|
||||
echo "Opening Stable Diffusion UI - Developer Console.."
|
||||
echo ""
|
||||
|
||||
export SD_BASE_DIR=`pwd`
|
||||
export MAMBA_ROOT_PREFIX="$SD_BASE_DIR/env/mamba"
|
||||
export INSTALL_ENV_DIR="$SD_BASE_DIR/env/installer_env"
|
||||
export PROJECT_ENV_DIR="$SD_BASE_DIR/env/project_env"
|
||||
|
||||
eval "$($MAMBA_ROOT_PREFIX/micromamba shell hook -s posix)"
|
||||
|
||||
micromamba activate "$INSTALL_ENV_DIR"
|
||||
micromamba activate "$PROJECT_ENV_DIR"
|
||||
else
|
||||
bash --init-file developer_console.sh
|
||||
fi
|
@ -1,38 +0,0 @@
|
||||
version: '3.3'
|
||||
|
||||
services:
|
||||
stability-ai:
|
||||
container_name: sd
|
||||
ports:
|
||||
- '5000:5000'
|
||||
image: 'r8.im/stability-ai/stable-diffusion@sha256:be04660a5b93ef2aff61e3668dedb4cbeb14941e62a3fd5998364a32d613e35e'
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- capabilities: [gpu]
|
||||
|
||||
stable-diffusion-ui:
|
||||
container_name: sd-ui
|
||||
ports:
|
||||
- '9000:9000'
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
volumes:
|
||||
- .:/app
|
||||
depends_on:
|
||||
- stability-ai
|
||||
|
||||
stable-diffusion-old-port-redirect:
|
||||
container_name: sd-old-port-redirect
|
||||
ports:
|
||||
- '8000:8000'
|
||||
build:
|
||||
context: .
|
||||
dockerfile: OldPortDockerfile
|
||||
volumes:
|
||||
- .:/app
|
||||
|
||||
networks:
|
||||
default:
|
101
engine/__init__.py
Normal file
@ -0,0 +1,101 @@
|
||||
import json
|
||||
|
||||
class Request:
|
||||
session_id: str = "session"
|
||||
prompt: str = ""
|
||||
negative_prompt: str = ""
|
||||
init_image: str = None # base64
|
||||
mask: str = None # base64
|
||||
num_outputs: int = 1
|
||||
num_inference_steps: int = 50
|
||||
guidance_scale: float = 7.5
|
||||
width: int = 512
|
||||
height: int = 512
|
||||
seed: int = 42
|
||||
prompt_strength: float = 0.8
|
||||
sampler: str = None # "ddim", "plms", "heun", "euler", "euler_a", "dpm2", "dpm2_a", "lms"
|
||||
# allow_nsfw: bool = False
|
||||
precision: str = "autocast" # or "full"
|
||||
save_to_disk_path: str = None
|
||||
turbo: bool = True
|
||||
use_cpu: bool = False
|
||||
use_full_precision: bool = False
|
||||
use_face_correction: str = None # or "GFPGANv1.3"
|
||||
use_upscale: str = None # or "RealESRGAN_x4plus" or "RealESRGAN_x4plus_anime_6B"
|
||||
show_only_filtered_image: bool = False
|
||||
|
||||
stream_progress_updates: bool = False
|
||||
stream_image_progress: bool = False
|
||||
|
||||
def json(self):
|
||||
return {
|
||||
"session_id": self.session_id,
|
||||
"prompt": self.prompt,
|
||||
"negative_prompt": self.negative_prompt,
|
||||
"num_outputs": self.num_outputs,
|
||||
"num_inference_steps": self.num_inference_steps,
|
||||
"guidance_scale": self.guidance_scale,
|
||||
"width": self.width,
|
||||
"height": self.height,
|
||||
"seed": self.seed,
|
||||
"prompt_strength": self.prompt_strength,
|
||||
"sampler": self.sampler,
|
||||
"use_face_correction": self.use_face_correction,
|
||||
"use_upscale": self.use_upscale,
|
||||
}
|
||||
|
||||
def to_string(self):
|
||||
return f'''
|
||||
session_id: {self.session_id}
|
||||
prompt: {self.prompt}
|
||||
negative_prompt: {self.negative_prompt}
|
||||
seed: {self.seed}
|
||||
num_inference_steps: {self.num_inference_steps}
|
||||
sampler: {self.sampler}
|
||||
guidance_scale: {self.guidance_scale}
|
||||
w: {self.width}
|
||||
h: {self.height}
|
||||
precision: {self.precision}
|
||||
save_to_disk_path: {self.save_to_disk_path}
|
||||
turbo: {self.turbo}
|
||||
use_cpu: {self.use_cpu}
|
||||
use_full_precision: {self.use_full_precision}
|
||||
use_face_correction: {self.use_face_correction}
|
||||
use_upscale: {self.use_upscale}
|
||||
show_only_filtered_image: {self.show_only_filtered_image}
|
||||
|
||||
stream_progress_updates: {self.stream_progress_updates}
|
||||
stream_image_progress: {self.stream_image_progress}'''
|
||||
|
||||
class Image:
|
||||
data: str # base64
|
||||
seed: int
|
||||
is_nsfw: bool
|
||||
path_abs: str = None
|
||||
|
||||
def __init__(self, data, seed):
|
||||
self.data = data
|
||||
self.seed = seed
|
||||
|
||||
def json(self):
|
||||
return {
|
||||
"data": self.data,
|
||||
"seed": self.seed,
|
||||
"path_abs": self.path_abs,
|
||||
}
|
||||
|
||||
class Response:
|
||||
request: Request
|
||||
images: list
|
||||
|
||||
def json(self):
|
||||
res = {
|
||||
"status": 'succeeded',
|
||||
"request": self.request.json(),
|
||||
"output": [],
|
||||
}
|
||||
|
||||
for image in self.images:
|
||||
res["output"].append(image.json())
|
||||
|
||||
return res
|
658
engine/runtime.py
Normal file
@ -0,0 +1,658 @@
|
||||
import json
|
||||
import os, re
|
||||
import traceback
|
||||
import torch
|
||||
import numpy as np
|
||||
from omegaconf import OmegaConf
|
||||
from PIL import Image, ImageOps
|
||||
from tqdm import tqdm, trange
|
||||
from itertools import islice
|
||||
from einops import rearrange
|
||||
import time
|
||||
from pytorch_lightning import seed_everything
|
||||
from torch import autocast
|
||||
from contextlib import nullcontext
|
||||
from einops import rearrange, repeat
|
||||
from ldm.util import instantiate_from_config
|
||||
from optimizedSD.optimUtils import split_weighted_subprompts
|
||||
from transformers import logging
|
||||
|
||||
from gfpgan import GFPGANer
|
||||
from basicsr.archs.rrdbnet_arch import RRDBNet
|
||||
from realesrgan import RealESRGANer
|
||||
|
||||
import uuid
|
||||
|
||||
logging.set_verbosity_error()
|
||||
|
||||
# consts
|
||||
config_yaml = "optimizedSD/v1-inference.yaml"
|
||||
filename_regex = re.compile('[^a-zA-Z0-9]')
|
||||
|
||||
# api stuff
|
||||
from . import Request, Response, Image as ResponseImage
|
||||
import base64
|
||||
from io import BytesIO
|
||||
#from colorama import Fore
|
||||
|
||||
# local
|
||||
stop_processing = False
|
||||
temp_images = {}
|
||||
|
||||
ckpt_file = None
|
||||
gfpgan_file = None
|
||||
real_esrgan_file = None
|
||||
|
||||
model = None
|
||||
modelCS = None
|
||||
modelFS = None
|
||||
model_gfpgan = None
|
||||
model_real_esrgan = None
|
||||
|
||||
model_is_half = False
|
||||
model_fs_is_half = False
|
||||
device = None
|
||||
unet_bs = 1
|
||||
precision = 'autocast'
|
||||
sampler_plms = None
|
||||
sampler_ddim = None
|
||||
|
||||
has_valid_gpu = False
|
||||
force_full_precision = False
|
||||
try:
|
||||
gpu = torch.cuda.current_device()
|
||||
gpu_name = torch.cuda.get_device_name(gpu)
|
||||
print('GPU detected: ', gpu_name)
|
||||
|
||||
force_full_precision = ('nvidia' in gpu_name.lower() or 'geforce' in gpu_name.lower()) and (' 1660' in gpu_name or ' 1650' in gpu_name) # otherwise these NVIDIA cards create green images
|
||||
if force_full_precision:
|
||||
print('forcing full precision on NVIDIA 16xx cards, to avoid green images. GPU detected: ', gpu_name)
|
||||
|
||||
mem_free, mem_total = torch.cuda.mem_get_info(gpu)
|
||||
mem_total /= float(10**9)
|
||||
if mem_total < 3.0:
|
||||
print("GPUs with less than 3 GB of VRAM are not compatible with Stable Diffusion")
|
||||
raise Exception()
|
||||
|
||||
has_valid_gpu = True
|
||||
except:
|
||||
print('WARNING: No compatible GPU found. Using the CPU, but this will be very slow!')
|
||||
pass
|
||||
|
||||
def load_model_ckpt(ckpt_to_use, device_to_use='cuda', turbo=False, unet_bs_to_use=1, precision_to_use='autocast', half_model_fs=False):
|
||||
global ckpt_file, model, modelCS, modelFS, model_is_half, device, unet_bs, precision, model_fs_is_half
|
||||
|
||||
ckpt_file = ckpt_to_use
|
||||
device = device_to_use if has_valid_gpu else 'cpu'
|
||||
precision = precision_to_use if not force_full_precision else 'full'
|
||||
unet_bs = unet_bs_to_use
|
||||
|
||||
if device == 'cpu':
|
||||
precision = 'full'
|
||||
|
||||
sd = load_model_from_config(f"{ckpt_file}.ckpt")
|
||||
li, lo = [], []
|
||||
for key, value in sd.items():
|
||||
sp = key.split(".")
|
||||
if (sp[0]) == "model":
|
||||
if "input_blocks" in sp:
|
||||
li.append(key)
|
||||
elif "middle_block" in sp:
|
||||
li.append(key)
|
||||
elif "time_embed" in sp:
|
||||
li.append(key)
|
||||
else:
|
||||
lo.append(key)
|
||||
for key in li:
|
||||
sd["model1." + key[6:]] = sd.pop(key)
|
||||
for key in lo:
|
||||
sd["model2." + key[6:]] = sd.pop(key)
|
||||
|
||||
config = OmegaConf.load(f"{config_yaml}")
|
||||
|
||||
model = instantiate_from_config(config.modelUNet)
|
||||
_, _ = model.load_state_dict(sd, strict=False)
|
||||
model.eval()
|
||||
model.cdevice = device
|
||||
model.unet_bs = unet_bs
|
||||
model.turbo = turbo
|
||||
|
||||
modelCS = instantiate_from_config(config.modelCondStage)
|
||||
_, _ = modelCS.load_state_dict(sd, strict=False)
|
||||
modelCS.eval()
|
||||
modelCS.cond_stage_model.device = device
|
||||
|
||||
modelFS = instantiate_from_config(config.modelFirstStage)
|
||||
_, _ = modelFS.load_state_dict(sd, strict=False)
|
||||
modelFS.eval()
|
||||
del sd
|
||||
|
||||
if device != "cpu" and precision == "autocast":
|
||||
model.half()
|
||||
modelCS.half()
|
||||
model_is_half = True
|
||||
else:
|
||||
model_is_half = False
|
||||
|
||||
if half_model_fs:
|
||||
modelFS.half()
|
||||
model_fs_is_half = True
|
||||
else:
|
||||
model_fs_is_half = False
|
||||
|
||||
print('loaded ', ckpt_file, 'to', device, 'precision', precision)
|
||||
|
||||
def load_model_gfpgan(gfpgan_to_use):
|
||||
global gfpgan_file, model_gfpgan
|
||||
|
||||
if gfpgan_to_use is None:
|
||||
return
|
||||
|
||||
gfpgan_file = gfpgan_to_use
|
||||
model_path = gfpgan_to_use + ".pth"
|
||||
|
||||
if device == 'cpu':
|
||||
model_gfpgan = GFPGANer(model_path=model_path, upscale=1, arch='clean', channel_multiplier=2, bg_upsampler=None, device=torch.device('cpu'))
|
||||
else:
|
||||
model_gfpgan = GFPGANer(model_path=model_path, upscale=1, arch='clean', channel_multiplier=2, bg_upsampler=None, device=torch.device('cuda'))
|
||||
|
||||
print('loaded ', gfpgan_to_use, 'to', device, 'precision', precision)
|
||||
|
||||
def load_model_real_esrgan(real_esrgan_to_use):
|
||||
global real_esrgan_file, model_real_esrgan
|
||||
|
||||
if real_esrgan_to_use is None:
|
||||
return
|
||||
|
||||
real_esrgan_file = real_esrgan_to_use
|
||||
model_path = real_esrgan_to_use + ".pth"
|
||||
|
||||
RealESRGAN_models = {
|
||||
'RealESRGAN_x4plus': RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4),
|
||||
'RealESRGAN_x4plus_anime_6B': RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
|
||||
}
|
||||
|
||||
model_to_use = RealESRGAN_models[real_esrgan_to_use]
|
||||
|
||||
if device == 'cpu':
|
||||
model_real_esrgan = RealESRGANer(scale=2, model_path=model_path, model=model_to_use, pre_pad=0, half=False) # cpu does not support half
|
||||
model_real_esrgan.device = torch.device('cpu')
|
||||
model_real_esrgan.model.to('cpu')
|
||||
else:
|
||||
model_real_esrgan = RealESRGANer(scale=2, model_path=model_path, model=model_to_use, pre_pad=0, half=model_is_half)
|
||||
|
||||
model_real_esrgan.model.name = real_esrgan_to_use
|
||||
|
||||
print('loaded ', real_esrgan_to_use, 'to', device, 'precision', precision)
|
||||
|
||||
def mk_img(req: Request):
|
||||
try:
|
||||
yield from do_mk_img(req)
|
||||
except Exception as e:
|
||||
print(traceback.format_exc())
|
||||
|
||||
gc()
|
||||
|
||||
if device != "cpu":
|
||||
modelFS.to("cpu")
|
||||
modelCS.to("cpu")
|
||||
|
||||
model.model1.to("cpu")
|
||||
model.model2.to("cpu")
|
||||
|
||||
gc()
|
||||
|
||||
yield json.dumps({
|
||||
"status": 'failed',
|
||||
"detail": str(e)
|
||||
})
|
||||
|
||||
def do_mk_img(req: Request):
|
||||
global model, modelCS, modelFS, device
|
||||
global model_gfpgan, model_real_esrgan
|
||||
global stop_processing
|
||||
|
||||
stop_processing = False
|
||||
|
||||
res = Response()
|
||||
res.request = req
|
||||
res.images = []
|
||||
|
||||
temp_images.clear()
|
||||
|
||||
model.turbo = req.turbo
|
||||
if req.use_cpu:
|
||||
if device != 'cpu':
|
||||
device = 'cpu'
|
||||
|
||||
if model_is_half:
|
||||
del model, modelCS, modelFS
|
||||
load_model_ckpt(ckpt_file, device)
|
||||
|
||||
load_model_gfpgan(gfpgan_file)
|
||||
load_model_real_esrgan(real_esrgan_file)
|
||||
else:
|
||||
if has_valid_gpu:
|
||||
prev_device = device
|
||||
device = 'cuda'
|
||||
|
||||
if (precision == 'autocast' and (req.use_full_precision or not model_is_half)) or \
|
||||
(precision == 'full' and not req.use_full_precision and not force_full_precision) or \
|
||||
(req.init_image is None and model_fs_is_half) or \
|
||||
(req.init_image is not None and not model_fs_is_half and not force_full_precision):
|
||||
|
||||
del model, modelCS, modelFS
|
||||
load_model_ckpt(ckpt_file, device, req.turbo, unet_bs, ('full' if req.use_full_precision else 'autocast'), half_model_fs=(req.init_image is not None and not req.use_full_precision))
|
||||
|
||||
if prev_device != device:
|
||||
load_model_gfpgan(gfpgan_file)
|
||||
load_model_real_esrgan(real_esrgan_file)
|
||||
|
||||
if req.use_face_correction != gfpgan_file:
|
||||
load_model_gfpgan(req.use_face_correction)
|
||||
|
||||
if req.use_upscale != real_esrgan_file:
|
||||
load_model_real_esrgan(req.use_upscale)
|
||||
|
||||
model.cdevice = device
|
||||
modelCS.cond_stage_model.device = device
|
||||
|
||||
opt_prompt = req.prompt
|
||||
opt_seed = req.seed
|
||||
opt_n_samples = req.num_outputs
|
||||
opt_n_iter = 1
|
||||
opt_scale = req.guidance_scale
|
||||
opt_C = 4
|
||||
opt_H = req.height
|
||||
opt_W = req.width
|
||||
opt_f = 8
|
||||
opt_ddim_steps = req.num_inference_steps
|
||||
opt_ddim_eta = 0.0
|
||||
opt_strength = req.prompt_strength
|
||||
opt_save_to_disk_path = req.save_to_disk_path
|
||||
opt_init_img = req.init_image
|
||||
opt_use_face_correction = req.use_face_correction
|
||||
opt_use_upscale = req.use_upscale
|
||||
opt_show_only_filtered = req.show_only_filtered_image
|
||||
opt_format = 'png'
|
||||
opt_sampler_name = req.sampler
|
||||
|
||||
print(req.to_string(), '\n device', device)
|
||||
|
||||
print('\n\n Using precision:', precision)
|
||||
|
||||
seed_everything(opt_seed)
|
||||
|
||||
batch_size = opt_n_samples
|
||||
prompt = opt_prompt
|
||||
assert prompt is not None
|
||||
data = [batch_size * [prompt]]
|
||||
|
||||
if precision == "autocast" and device != "cpu":
|
||||
precision_scope = autocast
|
||||
else:
|
||||
precision_scope = nullcontext
|
||||
|
||||
mask = None
|
||||
|
||||
if req.init_image is None:
|
||||
handler = _txt2img
|
||||
|
||||
init_latent = None
|
||||
t_enc = None
|
||||
else:
|
||||
handler = _img2img
|
||||
|
||||
init_image = load_img(req.init_image, opt_W, opt_H)
|
||||
init_image = init_image.to(device)
|
||||
|
||||
if device != "cpu" and precision == "autocast":
|
||||
init_image = init_image.half()
|
||||
|
||||
modelFS.to(device)
|
||||
|
||||
init_image = repeat(init_image, '1 ... -> b ...', b=batch_size)
|
||||
init_latent = modelFS.get_first_stage_encoding(modelFS.encode_first_stage(init_image)) # move to latent space
|
||||
|
||||
if req.mask is not None:
|
||||
mask = load_mask(req.mask, opt_W, opt_H, init_latent.shape[2], init_latent.shape[3], True).to(device)
|
||||
mask = mask[0][0].unsqueeze(0).repeat(4, 1, 1).unsqueeze(0)
|
||||
mask = repeat(mask, '1 ... -> b ...', b=batch_size)
|
||||
|
||||
if device != "cpu" and precision == "autocast":
|
||||
mask = mask.half()
|
||||
|
||||
move_fs_to_cpu()
|
||||
|
||||
assert 0. <= opt_strength <= 1., 'can only work with strength in [0.0, 1.0]'
|
||||
t_enc = int(opt_strength * opt_ddim_steps)
|
||||
print(f"target t_enc is {t_enc} steps")
|
||||
|
||||
if opt_save_to_disk_path is not None:
|
||||
session_out_path = os.path.join(opt_save_to_disk_path, req.session_id)
|
||||
os.makedirs(session_out_path, exist_ok=True)
|
||||
else:
|
||||
session_out_path = None
|
||||
|
||||
seeds = ""
|
||||
with torch.no_grad():
|
||||
for n in trange(opt_n_iter, desc="Sampling"):
|
||||
for prompts in tqdm(data, desc="data"):
|
||||
|
||||
with precision_scope("cuda"):
|
||||
modelCS.to(device)
|
||||
uc = None
|
||||
if opt_scale != 1.0:
|
||||
uc = modelCS.get_learned_conditioning(batch_size * [req.negative_prompt])
|
||||
if isinstance(prompts, tuple):
|
||||
prompts = list(prompts)
|
||||
|
||||
subprompts, weights = split_weighted_subprompts(prompts[0])
|
||||
if len(subprompts) > 1:
|
||||
c = torch.zeros_like(uc)
|
||||
totalWeight = sum(weights)
|
||||
# normalize each "sub prompt" and add it
|
||||
for i in range(len(subprompts)):
|
||||
weight = weights[i]
|
||||
# if not skip_normalize:
|
||||
weight = weight / totalWeight
|
||||
c = torch.add(c, modelCS.get_learned_conditioning(subprompts[i]), alpha=weight)
|
||||
else:
|
||||
c = modelCS.get_learned_conditioning(prompts)
|
||||
|
||||
modelFS.to(device)
|
||||
|
||||
partial_x_samples = None
|
||||
def img_callback(x_samples, i):
|
||||
nonlocal partial_x_samples
|
||||
|
||||
partial_x_samples = x_samples
|
||||
|
||||
if req.stream_progress_updates:
|
||||
n_steps = opt_ddim_steps if req.init_image is None else t_enc
|
||||
progress = {"step": i, "total_steps": n_steps}
|
||||
|
||||
if req.stream_image_progress and i % 5 == 0:
|
||||
partial_images = []
|
||||
|
||||
for i in range(batch_size):
|
||||
x_samples_ddim = modelFS.decode_first_stage(x_samples[i].unsqueeze(0))
|
||||
x_sample = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
|
||||
x_sample = 255.0 * rearrange(x_sample[0].cpu().numpy(), "c h w -> h w c")
|
||||
x_sample = x_sample.astype(np.uint8)
|
||||
img = Image.fromarray(x_sample)
|
||||
buf = BytesIO()
|
||||
img.save(buf, format='JPEG')
|
||||
buf.seek(0)
|
||||
|
||||
del img, x_sample, x_samples_ddim
|
||||
# don't delete x_samples, it is used in the code that called this callback
|
||||
|
||||
temp_images[str(req.session_id) + '/' + str(i)] = buf
|
||||
partial_images.append({'path': f'/image/tmp/{req.session_id}/{i}'})
|
||||
|
||||
progress['output'] = partial_images
|
||||
|
||||
yield json.dumps(progress)
|
||||
|
||||
if stop_processing:
|
||||
raise UserInitiatedStop("User requested that we stop processing")
|
||||
|
||||
# run the handler
|
||||
try:
|
||||
if handler == _txt2img:
|
||||
x_samples = _txt2img(opt_W, opt_H, opt_n_samples, opt_ddim_steps, opt_scale, None, opt_C, opt_f, opt_ddim_eta, c, uc, opt_seed, img_callback, mask, opt_sampler_name)
|
||||
else:
|
||||
x_samples = _img2img(init_latent, t_enc, batch_size, opt_scale, c, uc, opt_ddim_steps, opt_ddim_eta, opt_seed, img_callback, mask)
|
||||
|
||||
yield from x_samples
|
||||
|
||||
x_samples = partial_x_samples
|
||||
except UserInitiatedStop:
|
||||
if partial_x_samples is None:
|
||||
continue
|
||||
|
||||
x_samples = partial_x_samples
|
||||
|
||||
print("saving images")
|
||||
for i in range(batch_size):
|
||||
|
||||
x_samples_ddim = modelFS.decode_first_stage(x_samples[i].unsqueeze(0))
|
||||
x_sample = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
|
||||
x_sample = 255.0 * rearrange(x_sample[0].cpu().numpy(), "c h w -> h w c")
|
||||
x_sample = x_sample.astype(np.uint8)
|
||||
img = Image.fromarray(x_sample)
|
||||
|
||||
has_filters = (opt_use_face_correction is not None and opt_use_face_correction.startswith('GFPGAN')) or \
|
||||
(opt_use_upscale is not None and opt_use_upscale.startswith('RealESRGAN'))
|
||||
|
||||
return_orig_img = not has_filters or not opt_show_only_filtered
|
||||
|
||||
if stop_processing:
|
||||
return_orig_img = True
|
||||
|
||||
if opt_save_to_disk_path is not None:
|
||||
prompt_flattened = filename_regex.sub('_', prompts[0])
|
||||
prompt_flattened = prompt_flattened[:50]
|
||||
|
||||
img_id = str(uuid.uuid4())[-8:]
|
||||
|
||||
file_path = f"{prompt_flattened}_{img_id}"
|
||||
img_out_path = os.path.join(session_out_path, f"{file_path}.{opt_format}")
|
||||
meta_out_path = os.path.join(session_out_path, f"{file_path}.txt")
|
||||
|
||||
if return_orig_img:
|
||||
save_image(img, img_out_path)
|
||||
|
||||
save_metadata(meta_out_path, prompts, opt_seed, opt_W, opt_H, opt_ddim_steps, opt_scale, opt_strength, opt_use_face_correction, opt_use_upscale, opt_sampler_name, req.negative_prompt)
|
||||
|
||||
if return_orig_img:
|
||||
img_data = img_to_base64_str(img)
|
||||
res_image_orig = ResponseImage(data=img_data, seed=opt_seed)
|
||||
res.images.append(res_image_orig)
|
||||
|
||||
if opt_save_to_disk_path is not None:
|
||||
res_image_orig.path_abs = img_out_path
|
||||
|
||||
del img
|
||||
|
||||
if has_filters and not stop_processing:
|
||||
print('Applying filters..')
|
||||
|
||||
gc()
|
||||
filters_applied = []
|
||||
|
||||
if opt_use_face_correction:
|
||||
_, _, output = model_gfpgan.enhance(x_sample[:,:,::-1], has_aligned=False, only_center_face=False, paste_back=True)
|
||||
x_sample = output[:,:,::-1]
|
||||
filters_applied.append(opt_use_face_correction)
|
||||
|
||||
if opt_use_upscale:
|
||||
output, _ = model_real_esrgan.enhance(x_sample[:,:,::-1])
|
||||
x_sample = output[:,:,::-1]
|
||||
filters_applied.append(opt_use_upscale)
|
||||
|
||||
filtered_image = Image.fromarray(x_sample)
|
||||
|
||||
filtered_img_data = img_to_base64_str(filtered_image)
|
||||
res_image_filtered = ResponseImage(data=filtered_img_data, seed=opt_seed)
|
||||
res.images.append(res_image_filtered)
|
||||
|
||||
filters_applied = "_".join(filters_applied)
|
||||
|
||||
if opt_save_to_disk_path is not None:
|
||||
filtered_img_out_path = os.path.join(session_out_path, f"{file_path}_{filters_applied}.{opt_format}")
|
||||
save_image(filtered_image, filtered_img_out_path)
|
||||
res_image_filtered.path_abs = filtered_img_out_path
|
||||
|
||||
del filtered_image
|
||||
|
||||
seeds += str(opt_seed) + ","
|
||||
opt_seed += 1
|
||||
|
||||
move_fs_to_cpu()
|
||||
gc()
|
||||
del x_samples, x_samples_ddim, x_sample
|
||||
print("memory_final = ", torch.cuda.memory_allocated() / 1e6)
|
||||
|
||||
print('Task completed')
|
||||
|
||||
yield json.dumps(res.json())
|
||||
|
||||
def save_image(img, img_out_path):
|
||||
try:
|
||||
img.save(img_out_path)
|
||||
except:
|
||||
print('could not save the file', traceback.format_exc())
|
||||
|
||||
def save_metadata(meta_out_path, prompts, opt_seed, opt_W, opt_H, opt_ddim_steps, opt_scale, opt_prompt_strength, opt_correct_face, opt_upscale, sampler_name, negative_prompt):
|
||||
metadata = f"{prompts[0]}\nWidth: {opt_W}\nHeight: {opt_H}\nSeed: {opt_seed}\nSteps: {opt_ddim_steps}\nGuidance Scale: {opt_scale}\nPrompt Strength: {opt_prompt_strength}\nUse Face Correction: {opt_correct_face}\nUse Upscaling: {opt_upscale}\nSampler: {sampler_name}\nNegative Prompt: {negative_prompt}"
|
||||
|
||||
try:
|
||||
with open(meta_out_path, 'w') as f:
|
||||
f.write(metadata)
|
||||
except:
|
||||
print('could not save the file', traceback.format_exc())
|
||||
|
||||
def _txt2img(opt_W, opt_H, opt_n_samples, opt_ddim_steps, opt_scale, start_code, opt_C, opt_f, opt_ddim_eta, c, uc, opt_seed, img_callback, mask, sampler_name):
|
||||
shape = [opt_n_samples, opt_C, opt_H // opt_f, opt_W // opt_f]
|
||||
|
||||
if device != "cpu":
|
||||
mem = torch.cuda.memory_allocated() / 1e6
|
||||
modelCS.to("cpu")
|
||||
while torch.cuda.memory_allocated() / 1e6 >= mem:
|
||||
time.sleep(1)
|
||||
|
||||
if sampler_name == 'ddim':
|
||||
model.make_schedule(ddim_num_steps=opt_ddim_steps, ddim_eta=opt_ddim_eta, verbose=False)
|
||||
|
||||
samples_ddim = model.sample(
|
||||
S=opt_ddim_steps,
|
||||
conditioning=c,
|
||||
seed=opt_seed,
|
||||
shape=shape,
|
||||
verbose=False,
|
||||
unconditional_guidance_scale=opt_scale,
|
||||
unconditional_conditioning=uc,
|
||||
eta=opt_ddim_eta,
|
||||
x_T=start_code,
|
||||
img_callback=img_callback,
|
||||
mask=mask,
|
||||
sampler = sampler_name,
|
||||
)
|
||||
|
||||
yield from samples_ddim
|
||||
|
||||
def _img2img(init_latent, t_enc, batch_size, opt_scale, c, uc, opt_ddim_steps, opt_ddim_eta, opt_seed, img_callback, mask):
|
||||
# encode (scaled latent)
|
||||
z_enc = model.stochastic_encode(
|
||||
init_latent,
|
||||
torch.tensor([t_enc] * batch_size).to(device),
|
||||
opt_seed,
|
||||
opt_ddim_eta,
|
||||
opt_ddim_steps,
|
||||
)
|
||||
x_T = None if mask is None else init_latent
|
||||
|
||||
# decode it
|
||||
samples_ddim = model.sample(
|
||||
t_enc,
|
||||
c,
|
||||
z_enc,
|
||||
unconditional_guidance_scale=opt_scale,
|
||||
unconditional_conditioning=uc,
|
||||
img_callback=img_callback,
|
||||
mask=mask,
|
||||
x_T=x_T,
|
||||
sampler = 'ddim'
|
||||
)
|
||||
|
||||
yield from samples_ddim
|
||||
|
||||
def move_fs_to_cpu():
|
||||
if device != "cpu":
|
||||
mem = torch.cuda.memory_allocated() / 1e6
|
||||
modelFS.to("cpu")
|
||||
while torch.cuda.memory_allocated() / 1e6 >= mem:
|
||||
time.sleep(1)
|
||||
|
||||
def gc():
|
||||
if device == 'cpu':
|
||||
return
|
||||
|
||||
torch.cuda.empty_cache()
|
||||
torch.cuda.ipc_collect()
|
||||
|
||||
# internal
|
||||
|
||||
def chunk(it, size):
|
||||
it = iter(it)
|
||||
return iter(lambda: tuple(islice(it, size)), ())
|
||||
|
||||
|
||||
def load_model_from_config(ckpt, verbose=False):
|
||||
print(f"Loading model from {ckpt}")
|
||||
pl_sd = torch.load(ckpt, map_location="cpu")
|
||||
if "global_step" in pl_sd:
|
||||
print(f"Global Step: {pl_sd['global_step']}")
|
||||
sd = pl_sd["state_dict"]
|
||||
return sd
|
||||
|
||||
# utils
|
||||
class UserInitiatedStop(Exception):
|
||||
pass
|
||||
|
||||
def load_img(img_str, w0, h0):
|
||||
image = base64_str_to_img(img_str).convert("RGB")
|
||||
w, h = image.size
|
||||
print(f"loaded input image of size ({w}, {h}) from base64")
|
||||
if h0 is not None and w0 is not None:
|
||||
h, w = h0, w0
|
||||
|
||||
w, h = map(lambda x: x - x % 64, (w, h)) # resize to integer multiple of 64
|
||||
image = image.resize((w, h), resample=Image.Resampling.LANCZOS)
|
||||
image = np.array(image).astype(np.float32) / 255.0
|
||||
image = image[None].transpose(0, 3, 1, 2)
|
||||
image = torch.from_numpy(image)
|
||||
return 2.*image - 1.
|
||||
|
||||
def load_mask(mask_str, h0, w0, newH, newW, invert=False):
|
||||
image = base64_str_to_img(mask_str).convert("RGB")
|
||||
w, h = image.size
|
||||
print(f"loaded input mask of size ({w}, {h})")
|
||||
|
||||
if invert:
|
||||
print("inverted")
|
||||
image = ImageOps.invert(image)
|
||||
# where_0, where_1 = np.where(image == 0), np.where(image == 255)
|
||||
# image[where_0], image[where_1] = 255, 0
|
||||
|
||||
if h0 is not None and w0 is not None:
|
||||
h, w = h0, w0
|
||||
|
||||
w, h = map(lambda x: x - x % 64, (w, h)) # resize to integer multiple of 64
|
||||
|
||||
print(f"New mask size ({w}, {h})")
|
||||
image = image.resize((newW, newH), resample=Image.Resampling.LANCZOS)
|
||||
image = np.array(image)
|
||||
|
||||
image = image.astype(np.float32) / 255.0
|
||||
image = image[None].transpose(0, 3, 1, 2)
|
||||
image = torch.from_numpy(image)
|
||||
return image
|
||||
|
||||
# https://stackoverflow.com/a/61114178
|
||||
def img_to_base64_str(img):
|
||||
buffered = BytesIO()
|
||||
img.save(buffered, format="PNG")
|
||||
buffered.seek(0)
|
||||
img_byte = buffered.getvalue()
|
||||
img_str = "data:image/png;base64," + base64.b64encode(img_byte).decode()
|
||||
return img_str
|
||||
|
||||
def base64_str_to_img(img_str):
|
||||
img_str = img_str[len("data:image/png;base64,"):]
|
||||
data = base64.b64decode(img_str)
|
||||
buffered = BytesIO(data)
|
||||
img = Image.open(buffered)
|
||||
return img
|
237
engine/server.py
Normal file
@ -0,0 +1,237 @@
|
||||
import json
|
||||
import traceback
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
||||
SCRIPT_DIR = os.getcwd()
|
||||
print('started in ', SCRIPT_DIR)
|
||||
|
||||
SD_UI_DIR = os.getenv('SD_UI_PATH', None)
|
||||
sys.path.append(os.path.dirname(SD_UI_DIR))
|
||||
|
||||
CONFIG_DIR = os.path.join(SD_UI_DIR, '..', 'scripts')
|
||||
|
||||
OUTPUT_DIRNAME = "Stable Diffusion UI" # in the user's home folder
|
||||
|
||||
from fastapi import FastAPI, HTTPException
|
||||
from fastapi.staticfiles import StaticFiles
|
||||
from starlette.responses import FileResponse, StreamingResponse
|
||||
from pydantic import BaseModel
|
||||
import logging
|
||||
|
||||
from sd_internal import Request, Response
|
||||
|
||||
app = FastAPI()
|
||||
|
||||
model_loaded = False
|
||||
model_is_loading = False
|
||||
|
||||
modifiers_cache = None
|
||||
outpath = os.path.join(os.path.expanduser("~"), OUTPUT_DIRNAME)
|
||||
|
||||
# don't show access log entries for URLs that start with the given prefix
|
||||
ACCESS_LOG_SUPPRESS_PATH_PREFIXES = ['/ping', '/modifier-thumbnails']
|
||||
|
||||
app.mount('/media', StaticFiles(directory=os.path.join(SD_UI_DIR, 'media/')), name="media")
|
||||
|
||||
# defaults from https://huggingface.co/blog/stable_diffusion
|
||||
class ImageRequest(BaseModel):
|
||||
session_id: str = "session"
|
||||
prompt: str = ""
|
||||
negative_prompt: str = ""
|
||||
init_image: str = None # base64
|
||||
mask: str = None # base64
|
||||
num_outputs: int = 1
|
||||
num_inference_steps: int = 50
|
||||
guidance_scale: float = 7.5
|
||||
width: int = 512
|
||||
height: int = 512
|
||||
seed: int = 42
|
||||
prompt_strength: float = 0.8
|
||||
sampler: str = None # "ddim", "plms", "heun", "euler", "euler_a", "dpm2", "dpm2_a", "lms"
|
||||
# allow_nsfw: bool = False
|
||||
save_to_disk_path: str = None
|
||||
turbo: bool = True
|
||||
use_cpu: bool = False
|
||||
use_full_precision: bool = False
|
||||
use_face_correction: str = None # or "GFPGANv1.3"
|
||||
use_upscale: str = None # or "RealESRGAN_x4plus" or "RealESRGAN_x4plus_anime_6B"
|
||||
show_only_filtered_image: bool = False
|
||||
|
||||
stream_progress_updates: bool = False
|
||||
stream_image_progress: bool = False
|
||||
|
||||
class SetAppConfigRequest(BaseModel):
|
||||
update_branch: str = "main"
|
||||
|
||||
@app.get('/')
|
||||
def read_root():
|
||||
headers = {"Cache-Control": "no-cache, no-store, must-revalidate", "Pragma": "no-cache", "Expires": "0"}
|
||||
return FileResponse(os.path.join(SD_UI_DIR, 'index.html'), headers=headers)
|
||||
|
||||
@app.get('/ping')
|
||||
async def ping():
|
||||
global model_loaded, model_is_loading
|
||||
|
||||
try:
|
||||
if model_loaded:
|
||||
return {'OK'}
|
||||
|
||||
if model_is_loading:
|
||||
return {'ERROR'}
|
||||
|
||||
model_is_loading = True
|
||||
|
||||
from sd_internal import runtime
|
||||
|
||||
custom_weight_path = os.path.join(SCRIPT_DIR, 'custom-model.ckpt')
|
||||
ckpt_to_use = "sd-v1-4" if not os.path.exists(custom_weight_path) else "custom-model"
|
||||
runtime.load_model_ckpt(ckpt_to_use=ckpt_to_use)
|
||||
|
||||
model_loaded = True
|
||||
model_is_loading = False
|
||||
|
||||
return {'OK'}
|
||||
except Exception as e:
|
||||
print(traceback.format_exc())
|
||||
return HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
@app.post('/image')
|
||||
def image(req : ImageRequest):
|
||||
from sd_internal import runtime
|
||||
|
||||
r = Request()
|
||||
r.session_id = req.session_id
|
||||
r.prompt = req.prompt
|
||||
r.negative_prompt = req.negative_prompt
|
||||
r.init_image = req.init_image
|
||||
r.mask = req.mask
|
||||
r.num_outputs = req.num_outputs
|
||||
r.num_inference_steps = req.num_inference_steps
|
||||
r.guidance_scale = req.guidance_scale
|
||||
r.width = req.width
|
||||
r.height = req.height
|
||||
r.seed = req.seed
|
||||
r.prompt_strength = req.prompt_strength
|
||||
r.sampler = req.sampler
|
||||
# r.allow_nsfw = req.allow_nsfw
|
||||
r.turbo = req.turbo
|
||||
r.use_cpu = req.use_cpu
|
||||
r.use_full_precision = req.use_full_precision
|
||||
r.save_to_disk_path = req.save_to_disk_path
|
||||
r.use_upscale: str = req.use_upscale
|
||||
r.use_face_correction = req.use_face_correction
|
||||
r.show_only_filtered_image = req.show_only_filtered_image
|
||||
|
||||
r.stream_progress_updates = True # the underlying implementation only supports streaming
|
||||
r.stream_image_progress = req.stream_image_progress
|
||||
|
||||
try:
|
||||
if not req.stream_progress_updates:
|
||||
r.stream_image_progress = False
|
||||
|
||||
res = runtime.mk_img(r)
|
||||
|
||||
if req.stream_progress_updates:
|
||||
return StreamingResponse(res, media_type='application/json')
|
||||
else: # compatibility mode: buffer the streaming responses, and return the last one
|
||||
last_result = None
|
||||
|
||||
for result in res:
|
||||
last_result = result
|
||||
|
||||
return json.loads(last_result)
|
||||
except Exception as e:
|
||||
print(traceback.format_exc())
|
||||
return HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
@app.get('/image/stop')
|
||||
def stop():
|
||||
try:
|
||||
if model_is_loading:
|
||||
return {'ERROR'}
|
||||
|
||||
from sd_internal import runtime
|
||||
runtime.stop_processing = True
|
||||
|
||||
return {'OK'}
|
||||
except Exception as e:
|
||||
print(traceback.format_exc())
|
||||
return HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
@app.get('/image/tmp/{session_id}/{img_id}')
|
||||
def get_image(session_id, img_id):
|
||||
from sd_internal import runtime
|
||||
buf = runtime.temp_images[session_id + '/' + img_id]
|
||||
buf.seek(0)
|
||||
return StreamingResponse(buf, media_type='image/jpeg')
|
||||
|
||||
@app.post('/app_config')
|
||||
async def setAppConfig(req : SetAppConfigRequest):
|
||||
try:
|
||||
config = {
|
||||
'update_branch': req.update_branch
|
||||
}
|
||||
|
||||
config_json_str = json.dumps(config)
|
||||
config_bat_str = f'@set update_branch={req.update_branch}'
|
||||
config_sh_str = f'export update_branch={req.update_branch}'
|
||||
|
||||
config_json_path = os.path.join(CONFIG_DIR, 'config.json')
|
||||
config_bat_path = os.path.join(CONFIG_DIR, 'config.bat')
|
||||
config_sh_path = os.path.join(CONFIG_DIR, 'config.sh')
|
||||
|
||||
with open(config_json_path, 'w') as f:
|
||||
f.write(config_json_str)
|
||||
|
||||
with open(config_bat_path, 'w') as f:
|
||||
f.write(config_bat_str)
|
||||
|
||||
with open(config_sh_path, 'w') as f:
|
||||
f.write(config_sh_str)
|
||||
|
||||
return {'OK'}
|
||||
except Exception as e:
|
||||
print(traceback.format_exc())
|
||||
return HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
@app.get('/app_config')
|
||||
def getAppConfig():
|
||||
try:
|
||||
config_json_path = os.path.join(CONFIG_DIR, 'config.json')
|
||||
|
||||
if not os.path.exists(config_json_path):
|
||||
return HTTPException(status_code=500, detail="No config file")
|
||||
|
||||
with open(config_json_path, 'r') as f:
|
||||
config_json_str = f.read()
|
||||
config = json.loads(config_json_str)
|
||||
return config
|
||||
except Exception as e:
|
||||
print(traceback.format_exc())
|
||||
return HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
@app.get('/modifiers.json')
|
||||
def read_modifiers():
|
||||
headers = {"Cache-Control": "no-cache, no-store, must-revalidate", "Pragma": "no-cache", "Expires": "0"}
|
||||
return FileResponse(os.path.join(SD_UI_DIR, 'modifiers.json'), headers=headers)
|
||||
|
||||
@app.get('/output_dir')
|
||||
def read_home_dir():
|
||||
return {outpath}
|
||||
|
||||
# don't log certain requests
|
||||
class LogSuppressFilter(logging.Filter):
|
||||
def filter(self, record: logging.LogRecord) -> bool:
|
||||
path = record.getMessage()
|
||||
for prefix in ACCESS_LOG_SUPPRESS_PATH_PREFIXES:
|
||||
if path.find(prefix) != -1:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
logging.getLogger('uvicorn.access').addFilter(LogSuppressFilter())
|
||||
|
||||
# start the browser ui
|
||||
import webbrowser; webbrowser.open('http://localhost:9000')
|
505
index.html
@ -1,505 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<style>
|
||||
body {
|
||||
font-family: Arial, Helvetica, sans-serif;
|
||||
font-size: 11pt;
|
||||
}
|
||||
a {
|
||||
color: rgb(0, 102, 204);
|
||||
}
|
||||
a:visited {
|
||||
color: rgb(0, 102, 204);
|
||||
}
|
||||
@media (prefers-color-scheme: dark) {
|
||||
body {
|
||||
background-color: rgb(32, 33, 36);
|
||||
color: #eee;
|
||||
}
|
||||
}
|
||||
label {
|
||||
font-size: 10pt;
|
||||
}
|
||||
#prompt {
|
||||
width: 50vw;
|
||||
height: 50pt;
|
||||
}
|
||||
@media screen and (max-width: 600px) {
|
||||
#prompt {
|
||||
width: 95%;
|
||||
}
|
||||
}
|
||||
.image_preview_container {
|
||||
display: none;
|
||||
}
|
||||
.image_clear_btn {
|
||||
position: absolute;
|
||||
transform: translateX(-50%);
|
||||
background: black;
|
||||
color: white;
|
||||
border: 2pt solid #ccc;
|
||||
padding: 0;
|
||||
cursor: pointer;
|
||||
outline: inherit;
|
||||
border-radius: 8pt;
|
||||
width: 16pt;
|
||||
height: 16pt;
|
||||
font-size: 10pt;
|
||||
}
|
||||
#configHeader {
|
||||
margin-top: 5px;
|
||||
margin-bottom: 5px;
|
||||
font-size: 10pt;
|
||||
}
|
||||
#config {
|
||||
font-size: 9pt;
|
||||
margin-bottom: 5px;
|
||||
padding-left: 10px;
|
||||
}
|
||||
#outputMsg {
|
||||
font-size: small;
|
||||
}
|
||||
#footer {
|
||||
border-top: 1px solid #999;
|
||||
margin-top: 10px;
|
||||
padding-top: 10px;
|
||||
font-size: small;
|
||||
}
|
||||
.imgUseBtn {
|
||||
position: absolute;
|
||||
transform: translateX(-100%);
|
||||
margin-top: 5pt;
|
||||
margin-left: -5pt;
|
||||
}
|
||||
.imgSaveBtn {
|
||||
position: absolute;
|
||||
transform: translateX(-100%);
|
||||
margin-top: 30pt;
|
||||
margin-left: -5pt;
|
||||
}
|
||||
.imgItem {
|
||||
display: inline;
|
||||
padding-right: 10px;
|
||||
}
|
||||
</style>
|
||||
</html>
|
||||
<body>
|
||||
<div id="status">Server status: <span id="serverStatus">checking..</span> | Request status: <span id="reqStatus">n/a</span></div>
|
||||
|
||||
<br/>
|
||||
|
||||
<b>Prompt:</b><br/>
|
||||
<textarea id="prompt">a photograph of an astronaut riding a horse</textarea><br/>
|
||||
|
||||
<label for="init_image"><b>Initial Image:</b> (optional) </label> <input id="init_image" name="init_image" type="file" /> </button><br/>
|
||||
<div id="init_image_preview_container" class="image_preview_container">
|
||||
<img id="init_image_preview" src="" width="100" height="100" />
|
||||
<button id="init_image_clear" class="image_clear_btn">X</button>
|
||||
</div><br/>
|
||||
|
||||
<div id="mask_setting">
|
||||
<label for="mask"><b>Image Mask:</b> (optional) </label> <input id="mask" name="mask" type="file" /> </button><br/>
|
||||
<div id="mask_preview_container" class="image_preview_container">
|
||||
<img id="mask_preview" src="" width="100" height="100" />
|
||||
<button id="mask_clear" class="image_clear_btn">X</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div id="configHeader"><b>Advanced settings:</b> [<a id="configToggleBtn" href="#">show</a>]</div>
|
||||
<div id="config">
|
||||
<label for="seed">Seed:</label> <input id="seed" name="seed" value="30000"> <input id="random_seed" name="random_seed" type="checkbox" checked> <label for="random_seed">Random Image</label> <br/>
|
||||
<label for="num_outputs">Number of outputs:</label> <select id="num_outputs" name="num_outputs" value="1"><option value="1" selected>1</option><option value="4">4</option></select><br/>
|
||||
<label for="width">Width:</label> <select id="width" name="width" value="512"><option value="128">128</option><option value="256">256</option><option value="512" selected>512</option><option value="768">768</option><option value="1024">1024</option></select><br/>
|
||||
<label for="height">Height:</label> <select id="height" name="height" value="512"><option value="128">128</option><option value="256">256</option><option value="512" selected>512</option><option value="768">768</option></select><br/>
|
||||
<label for="num_inference_steps">Number of inference steps:</label> <input id="num_inference_steps" name="num_inference_steps" value="50"><br/>
|
||||
<label for="guidance_scale">Guidance Scale:</label> <input id="guidance_scale" name="guidance_scale" value="75" type="range" min="10" max="200"> <span id="guidance_scale_value"></span><br/>
|
||||
<span id="prompt_strength_container"><label for="prompt_strength">Prompt Strength:</label> <input id="prompt_strength" name="prompt_strength" value="8" type="range" min="0" max="10"> <span id="prompt_strength_value"></span><br/></span><br/>
|
||||
<input id="sound_toggle" name="sound_toggle" type="checkbox" checked> <label for="sound_toggle">Play sound on task completion</label><br/>
|
||||
</div>
|
||||
|
||||
<button id="makeImage">Make Image</button> <br/><br/>
|
||||
|
||||
<div id="outputMsg"></div>
|
||||
|
||||
<div id="images"></div>
|
||||
|
||||
<div id="footer">
|
||||
<p>Please feel free to <a href="https://github.com/cmdr2/stable-diffusion-ui/issues" target="_blank">file an issue</a> if you have any problems or suggestions in using this interface.</p>
|
||||
<p><b>Disclaimer:</b> The authors of this project are not responsible for any content generated using this interface.</p>
|
||||
<p>This license of this software forbids you from sharing any content that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, <br/>spread misinformation and target vulnerable groups. For the full list of restrictions please read <a href="https://github.com/cmdr2/stable-diffusion-ui/blob/main/LICENSE" target="_blank">the license</a>.</p>
|
||||
<p>By using this software, you consent to the terms and conditions of the license.</p>
|
||||
</div>
|
||||
</body>
|
||||
|
||||
<script>
|
||||
const SOUND_ENABLED_KEY = "soundEnabled"
|
||||
const HEALTH_PING_INTERVAL = 5 // seconds
|
||||
|
||||
let promptField = document.querySelector('#prompt')
|
||||
let numOutputsField = document.querySelector('#num_outputs')
|
||||
let numInferenceStepsField = document.querySelector('#num_inference_steps')
|
||||
let guidanceScaleField = document.querySelector('#guidance_scale')
|
||||
let guidanceScaleValueLabel = document.querySelector('#guidance_scale_value')
|
||||
let randomSeedField = document.querySelector("#random_seed")
|
||||
let seedField = document.querySelector('#seed')
|
||||
let widthField = document.querySelector('#width')
|
||||
let heightField = document.querySelector('#height')
|
||||
let initImageSelector = document.querySelector("#init_image")
|
||||
let initImagePreview = document.querySelector("#init_image_preview")
|
||||
let maskImageSelector = document.querySelector("#mask")
|
||||
let maskImagePreview = document.querySelector("#mask_preview")
|
||||
let promptStrengthField = document.querySelector('#prompt_strength')
|
||||
let promptStrengthValueLabel = document.querySelector('#prompt_strength_value')
|
||||
|
||||
let makeImageBtn = document.querySelector('#makeImage')
|
||||
|
||||
let imagesContainer = document.querySelector('#images')
|
||||
let initImagePreviewContainer = document.querySelector('#init_image_preview_container')
|
||||
let initImageClearBtn = document.querySelector('#init_image_clear')
|
||||
let promptStrengthContainer = document.querySelector('#prompt_strength_container')
|
||||
|
||||
let maskSetting = document.querySelector('#mask_setting')
|
||||
let maskImagePreviewContainer = document.querySelector('#mask_preview_container')
|
||||
let maskImageClearBtn = document.querySelector('#mask_clear')
|
||||
|
||||
let showConfigToggle = document.querySelector('#configToggleBtn')
|
||||
let configBox = document.querySelector('#config')
|
||||
let outputMsg = document.querySelector('#outputMsg')
|
||||
|
||||
let soundToggle = document.querySelector('#sound_toggle')
|
||||
|
||||
let serverStatus = 'offline'
|
||||
|
||||
function isSoundEnabled() {
|
||||
if (localStorage.getItem(SOUND_ENABLED_KEY) === 'false') {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
function setStatus(statusType, msg, msgType) {
|
||||
let el = ''
|
||||
|
||||
if (statusType === 'server') {
|
||||
el = '#serverStatus'
|
||||
serverStatus = msg
|
||||
} else if (statusType === 'request') {
|
||||
el = '#reqStatus'
|
||||
}
|
||||
|
||||
if (msgType == 'error') {
|
||||
msg = '<span style="color: red">' + msg + '<span>'
|
||||
} else if (msgType == 'success') {
|
||||
msg = '<span style="color: green">' + msg + '<span>'
|
||||
}
|
||||
|
||||
if (el) {
|
||||
document.querySelector(el).innerHTML = msg
|
||||
}
|
||||
}
|
||||
|
||||
function playSound() {
|
||||
const audio = new Audio('/media/ding.mp3')
|
||||
audio.volume = 0.2
|
||||
audio.play()
|
||||
}
|
||||
|
||||
async function healthCheck() {
|
||||
try {
|
||||
let res = await fetch('/ping')
|
||||
res = await res.json()
|
||||
|
||||
if (res[0] == 'OK') {
|
||||
setStatus('server', 'online', 'success')
|
||||
} else {
|
||||
setStatus('server', 'offline', 'error')
|
||||
}
|
||||
} catch (e) {
|
||||
setStatus('server', 'offline', 'error')
|
||||
}
|
||||
}
|
||||
|
||||
async function makeImage() {
|
||||
setStatus('request', 'fetching..')
|
||||
|
||||
makeImageBtn.innerHTML = 'Processing..'
|
||||
makeImageBtn.disabled = true
|
||||
|
||||
outputMsg.innerHTML = 'Fetching..'
|
||||
|
||||
function logError(msg, res) {
|
||||
outputMsg.innerHTML = '<span style="color: red">Error: ' + msg + '</span>'
|
||||
console.log('request error', res)
|
||||
setStatus('request', 'error', 'error')
|
||||
}
|
||||
|
||||
let seed = (randomSeedField.checked ? Math.floor(Math.random() * 10000) : seedField.value)
|
||||
|
||||
let reqBody = {
|
||||
prompt: promptField.value,
|
||||
num_outputs: numOutputsField.value,
|
||||
num_inference_steps: numInferenceStepsField.value,
|
||||
guidance_scale: guidanceScaleField.value / 10,
|
||||
width: widthField.value,
|
||||
height: heightField.value,
|
||||
seed: seed,
|
||||
}
|
||||
|
||||
if (initImagePreview.src.indexOf('data:image/png;base64') !== -1) {
|
||||
reqBody['init_image'] = initImagePreview.src
|
||||
reqBody['prompt_strength'] = promptStrengthField.value / 10
|
||||
|
||||
if (maskImagePreview.src.indexOf('data:image/png;base64') !== -1) {
|
||||
reqBody['mask'] = maskImagePreview.src
|
||||
}
|
||||
}
|
||||
|
||||
let res = ''
|
||||
let time = new Date().getTime()
|
||||
|
||||
try {
|
||||
res = await fetch('/image', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify(reqBody)
|
||||
})
|
||||
|
||||
if (res.status != 200) {
|
||||
if (serverStatus === 'online') {
|
||||
logError('Stable Diffusion had an error: ' + await res.text() + '. This happens sometimes. Maybe modify the prompt or seed a little bit?', res)
|
||||
} else {
|
||||
logError("Stable Diffusion is still starting up, please wait. If this goes on beyond a few minutes, Stable Diffusion has probably crashed.", res)
|
||||
}
|
||||
res = undefined
|
||||
} else {
|
||||
res = await res.json()
|
||||
|
||||
if (res.status !== 'succeeded') {
|
||||
let msg = ''
|
||||
if (res.detail !== undefined) {
|
||||
msg = res.detail[0].msg + " in " + JSON.stringify(res.detail[0].loc)
|
||||
} else {
|
||||
msg = res
|
||||
}
|
||||
logError(msg, res)
|
||||
res = undefined
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
console.log('request error', e)
|
||||
setStatus('request', 'error', 'error')
|
||||
}
|
||||
|
||||
makeImageBtn.innerHTML = 'Make Image'
|
||||
makeImageBtn.disabled = false
|
||||
|
||||
if (isSoundEnabled()) {
|
||||
playSound()
|
||||
}
|
||||
|
||||
if (!res) {
|
||||
return
|
||||
}
|
||||
|
||||
time = new Date().getTime() - time
|
||||
time /= 1000
|
||||
|
||||
outputMsg.innerHTML = 'Processed in ' + time + ' seconds. Seed: ' + seed
|
||||
|
||||
imagesContainer.innerHTML = ''
|
||||
|
||||
for (let idx in res.output) {
|
||||
let imgBody = ''
|
||||
|
||||
try {
|
||||
imgBody = res.output[idx]
|
||||
} catch (e) {
|
||||
console.log(imgBody)
|
||||
setStatus('request', 'invalid image', 'error')
|
||||
return
|
||||
}
|
||||
|
||||
let imgItem = document.createElement('div')
|
||||
imgItem.className = 'imgItem'
|
||||
|
||||
let img = document.createElement('img')
|
||||
img.width = parseInt(reqBody.width)
|
||||
img.height = parseInt(reqBody.height)
|
||||
img.src = imgBody
|
||||
|
||||
let imgUseBtn = document.createElement('button')
|
||||
imgUseBtn.className = 'imgUseBtn'
|
||||
imgUseBtn.innerHTML = 'Use as Input'
|
||||
|
||||
let imgSaveBtn = document.createElement('button')
|
||||
imgSaveBtn.className = 'imgSaveBtn'
|
||||
imgSaveBtn.innerHTML = 'Download'
|
||||
|
||||
imgItem.appendChild(img)
|
||||
imgItem.appendChild(imgUseBtn)
|
||||
imgItem.appendChild(imgSaveBtn)
|
||||
imagesContainer.appendChild(imgItem)
|
||||
|
||||
imgUseBtn.addEventListener('click', function() {
|
||||
initImageSelector.value = null
|
||||
initImagePreview.src = imgBody
|
||||
|
||||
initImagePreviewContainer.style.display = 'block'
|
||||
promptStrengthContainer.style.display = 'block'
|
||||
|
||||
maskSetting.style.display = 'block'
|
||||
|
||||
randomSeedField.checked = false
|
||||
seedField.value = seed
|
||||
seedField.disabled = false
|
||||
})
|
||||
|
||||
imgSaveBtn.addEventListener('click', function() {
|
||||
let imgDownload = document.createElement('a')
|
||||
imgDownload.download = generateUUID() + '.png'
|
||||
imgDownload.href = imgBody
|
||||
imgDownload.click()
|
||||
})
|
||||
}
|
||||
|
||||
setStatus('request', 'done', 'success')
|
||||
|
||||
if (randomSeedField.checked) {
|
||||
seedField.value = seed
|
||||
}
|
||||
}
|
||||
|
||||
function generateUUID() { // Public Domain/MIT
|
||||
var d = new Date().getTime();//Timestamp
|
||||
var d2 = ((typeof performance !== 'undefined') && performance.now && (performance.now()*1000)) || 0;//Time in microseconds since page-load or 0 if unsupported
|
||||
return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function(c) {
|
||||
var r = Math.random() * 16;//random number between 0 and 16
|
||||
if(d > 0){//Use timestamp until depleted
|
||||
r = (d + r)%16 | 0;
|
||||
d = Math.floor(d/16);
|
||||
} else {//Use microseconds since page-load if supported
|
||||
r = (d2 + r)%16 | 0;
|
||||
d2 = Math.floor(d2/16);
|
||||
}
|
||||
return (c === 'x' ? r : (r & 0x3 | 0x8)).toString(16);
|
||||
});
|
||||
}
|
||||
|
||||
function handleAudioEnabledChange(e) {
|
||||
localStorage.setItem(SOUND_ENABLED_KEY, e.target.checked.toString())
|
||||
}
|
||||
|
||||
soundToggle.addEventListener('click', handleAudioEnabledChange)
|
||||
soundToggle.checked = isSoundEnabled();
|
||||
|
||||
makeImageBtn.addEventListener('click', makeImage)
|
||||
|
||||
configBox.style.display = 'none'
|
||||
|
||||
showConfigToggle.addEventListener('click', function() {
|
||||
configBox.style.display = (configBox.style.display === 'none' ? 'block' : 'none')
|
||||
showConfigToggle.innerHTML = (configBox.style.display === 'none' ? 'show' : 'hide')
|
||||
return false
|
||||
})
|
||||
|
||||
function updateGuidanceScale() {
|
||||
guidanceScaleValueLabel.innerHTML = guidanceScaleField.value / 10
|
||||
}
|
||||
|
||||
guidanceScaleField.addEventListener('input', updateGuidanceScale)
|
||||
updateGuidanceScale()
|
||||
|
||||
function updatePromptStrength() {
|
||||
promptStrengthValueLabel.innerHTML = promptStrengthField.value / 10
|
||||
}
|
||||
|
||||
promptStrengthField.addEventListener('input', updatePromptStrength)
|
||||
updatePromptStrength()
|
||||
|
||||
function checkRandomSeed() {
|
||||
if (randomSeedField.checked) {
|
||||
seedField.disabled = true
|
||||
seedField.value = "random"
|
||||
} else {
|
||||
seedField.disabled = false
|
||||
}
|
||||
}
|
||||
randomSeedField.addEventListener('input', checkRandomSeed)
|
||||
checkRandomSeed()
|
||||
|
||||
function showInitImagePreview() {
|
||||
if (initImageSelector.files.length === 0) {
|
||||
initImagePreviewContainer.style.display = 'none'
|
||||
promptStrengthContainer.style.display = 'none'
|
||||
maskSetting.style.display = 'none'
|
||||
return
|
||||
}
|
||||
|
||||
let reader = new FileReader()
|
||||
let file = initImageSelector.files[0]
|
||||
|
||||
reader.addEventListener('load', function() {
|
||||
// console.log(file.name, reader.result)
|
||||
initImagePreview.src = reader.result
|
||||
initImagePreviewContainer.style.display = 'block'
|
||||
promptStrengthContainer.style.display = 'block'
|
||||
|
||||
maskSetting.style.display = 'block'
|
||||
})
|
||||
|
||||
if (file) {
|
||||
reader.readAsDataURL(file)
|
||||
}
|
||||
}
|
||||
initImageSelector.addEventListener('change', showInitImagePreview)
|
||||
showInitImagePreview()
|
||||
|
||||
initImageClearBtn.addEventListener('click', function() {
|
||||
initImageSelector.value = null
|
||||
maskImageSelector.value = null
|
||||
|
||||
initImagePreview.src = ''
|
||||
maskImagePreview.src = ''
|
||||
|
||||
initImagePreviewContainer.style.display = 'none'
|
||||
maskImagePreviewContainer.style.display = 'none'
|
||||
|
||||
maskSetting.style.display = 'none'
|
||||
|
||||
promptStrengthContainer.style.display = 'none'
|
||||
})
|
||||
|
||||
function showMaskImagePreview() {
|
||||
if (maskImageSelector.files.length === 0) {
|
||||
maskImagePreviewContainer.style.display = 'none'
|
||||
return
|
||||
}
|
||||
|
||||
let reader = new FileReader()
|
||||
let file = maskImageSelector.files[0]
|
||||
|
||||
reader.addEventListener('load', function() {
|
||||
maskImagePreview.src = reader.result
|
||||
maskImagePreviewContainer.style.display = 'block'
|
||||
})
|
||||
|
||||
if (file) {
|
||||
reader.readAsDataURL(file)
|
||||
}
|
||||
}
|
||||
maskImageSelector.addEventListener('change', showMaskImagePreview)
|
||||
showMaskImagePreview()
|
||||
|
||||
maskImageClearBtn.addEventListener('click', function() {
|
||||
maskImageSelector.value = null
|
||||
maskImagePreview.src = ''
|
||||
maskImagePreviewContainer.style.display = 'none'
|
||||
})
|
||||
|
||||
setInterval(healthCheck, HEALTH_PING_INTERVAL * 1000)
|
||||
</script>
|
||||
|
||||
</html>
|
BIN
installer/bin/micromamba_linux_arm64
Executable file
BIN
installer/bin/micromamba_linux_x64
Executable file
BIN
installer/bin/micromamba_mac_arm64
Executable file
BIN
installer/bin/micromamba_mac_x64
Executable file
BIN
installer/bin/micromamba_win_x64.exe
Normal file
34
installer/bootstrap/bootstrap.bat
Normal file
@ -0,0 +1,34 @@
|
||||
@echo off
|
||||
|
||||
@rem This file initializes micromamba and activates the env.
|
||||
@rem A similar bootstrap file needs to exist for each platform (win, linux, macOS)
|
||||
@rem Ready to hand-over to the platform-independent installer after this (written in python).
|
||||
|
||||
set MAMBA_ROOT_PREFIX=%SD_BASE_DIR%\env\mamba
|
||||
set INSTALL_ENV_DIR=%SD_BASE_DIR%\env\installer_env
|
||||
set INSTALLER_YAML_FILE=%SD_BASE_DIR%\installer\yaml\installer-environment.yaml
|
||||
set MICROMAMBA_BINARY_FILE=%SD_BASE_DIR%\installer\bin\micromamba_win_x64.exe
|
||||
|
||||
@rem initialize the mamba dir
|
||||
if not exist "%MAMBA_ROOT_PREFIX%" mkdir "%MAMBA_ROOT_PREFIX%"
|
||||
|
||||
copy "%MICROMAMBA_BINARY_FILE%" "%MAMBA_ROOT_PREFIX%\micromamba.exe"
|
||||
|
||||
@rem test the mamba binary
|
||||
echo Micromamba version:
|
||||
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" --version
|
||||
|
||||
@rem run the shell hook
|
||||
if not exist "%MAMBA_ROOT_PREFIX%\Scripts" (
|
||||
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" shell hook --log-level 4 -s cmd.exe
|
||||
)
|
||||
|
||||
call "%MAMBA_ROOT_PREFIX%\condabin\mamba_hook.bat"
|
||||
|
||||
@rem create the installer env
|
||||
if not exist "%INSTALL_ENV_DIR%" (
|
||||
call micromamba create -y --prefix "%INSTALL_ENV_DIR%" -f "%INSTALLER_YAML_FILE%"
|
||||
)
|
||||
|
||||
@rem activate
|
||||
call micromamba activate "%INSTALL_ENV_DIR%"
|
44
installer/bootstrap/bootstrap.sh
Executable file
@ -0,0 +1,44 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This file initializes micromamba and activates the env.
|
||||
# A similar bootstrap file needs to exist for each platform (win, linux, macOS)
|
||||
# Ready to hand-over to the platform-independent installer after this (written in python).
|
||||
|
||||
OS_NAME=$(uname -s)
|
||||
case "${OS_NAME}" in
|
||||
Linux*) OS_NAME="linux";;
|
||||
Darwin*) OS_NAME="mac";;
|
||||
*) echo "Unknown OS: $OS_NAME! This only runs on Linux or Mac" && exit
|
||||
esac
|
||||
|
||||
OS_ARCH=$(uname -m)
|
||||
case "${OS_ARCH}" in
|
||||
x86_64*) OS_ARCH="x64";;
|
||||
arm64*) OS_ARCH="arm64";;
|
||||
*) echo "Unknown system architecture: $OS_ARCH! This only runs on x86_64 or arm64" && exit
|
||||
esac
|
||||
|
||||
export MAMBA_ROOT_PREFIX=$SD_BASE_DIR/env/mamba
|
||||
INSTALL_ENV_DIR=$SD_BASE_DIR/env/installer_env
|
||||
INSTALLER_YAML_FILE=$SD_BASE_DIR/installer/yaml/installer-environment.yaml
|
||||
MICROMAMBA_BINARY_FILE=$SD_BASE_DIR/installer/bin/micromamba_${OS_NAME}_${OS_ARCH}
|
||||
|
||||
# initialize the mamba dir
|
||||
mkdir -p "$MAMBA_ROOT_PREFIX"
|
||||
|
||||
cp "$MICROMAMBA_BINARY_FILE" "$MAMBA_ROOT_PREFIX/micromamba"
|
||||
|
||||
# test the mamba binary
|
||||
echo "Micromamba version:"
|
||||
"$MAMBA_ROOT_PREFIX/micromamba" --version
|
||||
|
||||
# run the shell hook
|
||||
eval "$($MAMBA_ROOT_PREFIX/micromamba shell hook -s posix)"
|
||||
|
||||
# create the installer env
|
||||
if [ ! -e "$INSTALL_ENV_DIR" ]; then
|
||||
micromamba create -y --prefix "$INSTALL_ENV_DIR" -f "$INSTALLER_YAML_FILE"
|
||||
fi
|
||||
|
||||
# activate
|
||||
micromamba activate "$INSTALL_ENV_DIR"
|
21
installer/bootstrap/check-install-dir.bat
Normal file
@ -0,0 +1,21 @@
|
||||
@echo off
|
||||
|
||||
if exist "%SD_BASE_DIR%\env" exit /b
|
||||
|
||||
set suggested_dir=%~d0\stable-diffusion-ui
|
||||
|
||||
echo "Please install Stable Diffusion UI at the root of your drive. This avoids problems with path length limits in Windows." & echo.
|
||||
set /p answer="Press Enter to install at %suggested_dir%, or type 'c' (without quotes) to install at the current location (press enter or type 'c'): "
|
||||
|
||||
if /i "%answer:~,1%" NEQ "c" (
|
||||
if exist "%suggested_dir%" (
|
||||
echo. & echo "Sorry, %suggested_dir% already exists! Cannot overwrite that folder!" & echo.
|
||||
pause
|
||||
exit
|
||||
)
|
||||
|
||||
xcopy "%SD_BASE_DIR%" "%suggested_dir%" /s /i /Y /Q
|
||||
echo Please run the %START_CMD_FILENAME% file inside %suggested_dir% . Do not use this folder anymore > "%SD_BASE_DIR%/READ_ME - DO_NOT_USE_THIS_FOLDER.txt"
|
||||
|
||||
cd %suggested_dir%
|
||||
)
|
78
installer/developer/enable_dev_mode.py
Normal file
@ -0,0 +1,78 @@
|
||||
import argparse
|
||||
import subprocess
|
||||
import sys
|
||||
import json
|
||||
import os
|
||||
import platform
|
||||
import shutil
|
||||
|
||||
config_path = os.path.join('config.json')
|
||||
|
||||
if not os.path.exists('LICENSE'):
|
||||
print('Error: This script needs to be run from the root of the stable-diffusion-ui folder! Please cd to the correct folder, and run this again.')
|
||||
exit(1)
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument(
|
||||
"--symlink_dir", type=str, default=None, help="the absolute path to the project git repository (to link to)"
|
||||
)
|
||||
opt = parser.parse_args()
|
||||
|
||||
def run(cmd):
|
||||
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
|
||||
|
||||
for c in iter(lambda: p.stdout.read(1), b""):
|
||||
sys.stdout.buffer.write(c)
|
||||
sys.stdout.flush()
|
||||
|
||||
p.wait()
|
||||
|
||||
return p.returncode == 0
|
||||
|
||||
def get_config():
|
||||
if not os.path.exists(config_path):
|
||||
return {}
|
||||
|
||||
with open(config_path, "r") as f:
|
||||
return json.load(f)
|
||||
|
||||
def save_config(config):
|
||||
with open(config_path, "w") as f:
|
||||
json.dump(config, f)
|
||||
|
||||
# set the `is_developer_mode` flag to `true` in the config
|
||||
config = get_config()
|
||||
config['is_developer_mode'] = True
|
||||
save_config(config)
|
||||
|
||||
print('set is_developer_mode=true in config.json')
|
||||
|
||||
# make the symlink, if requested
|
||||
if opt.symlink_dir is not None:
|
||||
if not os.path.exists(opt.symlink_dir):
|
||||
print(f'Symlink directory "{opt.symlink_dir}" was not found! Are you sure it has been escaped correctly?')
|
||||
exit(1)
|
||||
|
||||
installer_target_path = os.path.join(opt.symlink_dir, 'installer')
|
||||
ui_target_path = os.path.join(opt.symlink_dir, 'ui')
|
||||
engine_target_path = os.path.join(opt.symlink_dir, 'engine')
|
||||
|
||||
shutil.rmtree('installer', ignore_errors=True)
|
||||
shutil.rmtree('ui', ignore_errors=True)
|
||||
shutil.rmtree('engine', ignore_errors=True)
|
||||
|
||||
if not os.path.exists(ui_target_path) or not os.path.exists(installer_target_path) or not os.path.exists(engine_target_path):
|
||||
print('The target symlink directory does not contain the required {ui, installer, engine} folders. Are you sure it is the correct git repo for the project?')
|
||||
exit(1)
|
||||
|
||||
if platform.system() == 'Windows':
|
||||
run(f'mklink /J "installer" "{installer_target_path}"')
|
||||
run(f'mklink /J "ui" "{ui_target_path}"')
|
||||
run(f'mklink /J "engine" "{engine_target_path}"')
|
||||
elif platform.system() in ('Linux', 'Darwin'):
|
||||
run(f'ln -s "{installer_target_path}" "installer"')
|
||||
run(f'ln -s "{ui_target_path}" "ui"')
|
||||
run(f'ln -s "{engine_target_path}" "engine"')
|
||||
|
||||
print(f'Created symlinks! Your installation will now automatically use the files present in the repository at {opt.symlink_dir}')
|
0
installer/installer/__init__.py
Normal file
70
installer/installer/app.py
Normal file
@ -0,0 +1,70 @@
|
||||
import os
|
||||
import json
|
||||
import platform
|
||||
|
||||
# config
|
||||
PROJECT_REPO_URL = 'https://github.com/cmdr2/stable-diffusion-ui.git'
|
||||
DEFAULT_PROJECT_BRANCH = 'installer_new'
|
||||
PROJECT_REPO_DIR_NAME = 'project_repo'
|
||||
|
||||
STABLE_DIFFUSION_REPO_URL = 'https://github.com/basujindal/stable-diffusion.git'
|
||||
DEFAULT_STABLE_DIFFUSION_COMMIT = 'f6cfebffa752ee11a7b07497b8529d5971de916c'
|
||||
STABLE_DIFFUSION_REPO_DIR_NAME = 'stable-diffusion'
|
||||
|
||||
PROJECT_ENV_DIR_NAME = 'project_env'
|
||||
|
||||
START_CMD_FILE_NAME = "Start Stable Diffusion UI.cmd" if platform.system() == "Windows" else "start.sh"
|
||||
DEV_CONSOLE_CMD_FILE_NAME = "Developer Console.cmd" if platform.system() == "Windows" else "developer_console.sh"
|
||||
CONFIG_FILE_NAME = 'config.json'
|
||||
|
||||
# top-level folders
|
||||
ENV_DIR_NAME = 'env'
|
||||
MODELS_DIR_NAME = 'models'
|
||||
|
||||
INSTALLER_DIR_NAME = 'installer'
|
||||
UI_DIR_NAME = 'ui'
|
||||
ENGINE_DIR_NAME = 'engine'
|
||||
|
||||
|
||||
# env
|
||||
SD_BASE_DIR = os.environ['SD_BASE_DIR']
|
||||
|
||||
|
||||
# model folders
|
||||
STABLE_DIFFUSION_MODELS_DIR_NAME = "stable-diffusion"
|
||||
GFPGAN_MODELS_DIR_NAME = "gfpgan"
|
||||
RealESRGAN_MODELS_DIR_NAME = "realesrgan"
|
||||
|
||||
# create references to dirs
|
||||
env_dir_path = os.path.join(SD_BASE_DIR, ENV_DIR_NAME)
|
||||
|
||||
installer_dir_path = os.path.join(SD_BASE_DIR, INSTALLER_DIR_NAME)
|
||||
ui_dir_path = os.path.join(SD_BASE_DIR, UI_DIR_NAME)
|
||||
engine_dir_path = os.path.join(SD_BASE_DIR, ENGINE_DIR_NAME)
|
||||
|
||||
project_repo_dir_path = os.path.join(env_dir_path, PROJECT_REPO_DIR_NAME)
|
||||
stable_diffusion_repo_dir_path = os.path.join(env_dir_path, STABLE_DIFFUSION_REPO_DIR_NAME)
|
||||
|
||||
project_env_dir_path = os.path.join(env_dir_path, PROJECT_ENV_DIR_NAME)
|
||||
|
||||
patches_dir_path = os.path.join(installer_dir_path, 'patches')
|
||||
|
||||
models_dir_path = os.path.join(SD_BASE_DIR, MODELS_DIR_NAME)
|
||||
stable_diffusion_models_dir_path = os.path.join(models_dir_path, STABLE_DIFFUSION_MODELS_DIR_NAME)
|
||||
gfpgan_models_dir_path = os.path.join(models_dir_path, GFPGAN_MODELS_DIR_NAME)
|
||||
realesrgan_models_dir_path = os.path.join(models_dir_path, RealESRGAN_MODELS_DIR_NAME)
|
||||
|
||||
|
||||
# useful functions
|
||||
def get_config():
|
||||
config_path = os.path.join(SD_BASE_DIR, CONFIG_FILE_NAME)
|
||||
if not os.path.exists(config_path):
|
||||
return {}
|
||||
|
||||
with open(config_path, "r") as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
# app context
|
||||
config = get_config()
|
||||
activated_env_dir_path = None
|
18
installer/installer/check_modules.py
Normal file
@ -0,0 +1,18 @@
|
||||
'''
|
||||
This script is run by the `installer.helpers.modules_exist_in_env()` function
|
||||
'''
|
||||
|
||||
import sys
|
||||
import pkgutil
|
||||
|
||||
modules = sys.argv[1:]
|
||||
missing_modules = []
|
||||
for m in modules:
|
||||
if pkgutil.find_loader(m) is None:
|
||||
missing_modules.append(m)
|
||||
|
||||
if len(missing_modules) == 0:
|
||||
print('42')
|
||||
exit()
|
||||
|
||||
print('Missing modules', missing_modules)
|
80
installer/installer/helpers.py
Normal file
@ -0,0 +1,80 @@
|
||||
import os
|
||||
from os import path
|
||||
import subprocess
|
||||
import traceback
|
||||
|
||||
from installer import app
|
||||
|
||||
def run(cmd, run_in_folder=None, env=None, get_output=False, log_the_cmd=False):
|
||||
if app.activated_env_dir_path is not None and 'micromamba activate' not in cmd:
|
||||
cmd = f'micromamba activate "{app.activated_env_dir_path}" && {cmd}'
|
||||
|
||||
if run_in_folder is not None:
|
||||
cmd = f'cd "{run_in_folder}" && {cmd}'
|
||||
|
||||
if log_the_cmd:
|
||||
log('running: ' + cmd)
|
||||
|
||||
if get_output:
|
||||
p = subprocess.Popen(cmd, shell=True, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
else:
|
||||
p = subprocess.Popen(cmd, shell=True, env=env)
|
||||
|
||||
out, err = p.communicate()
|
||||
|
||||
out = out.decode('utf-8') if isinstance(out, bytes) else out
|
||||
err = err.decode('utf-8') if isinstance(out, bytes) else err
|
||||
|
||||
if get_output:
|
||||
return out, err
|
||||
|
||||
def log(msg):
|
||||
print(msg)
|
||||
|
||||
def modules_exist_in_env(modules, env_dir_path=app.project_env_dir_path):
|
||||
if not path.exists(env_dir_path):
|
||||
return False
|
||||
|
||||
check_modules_script_path = path.join(app.installer_dir_path, 'installer', 'check_modules.py')
|
||||
module_args = ' '.join(modules)
|
||||
check_modules_cmd = f'python "{check_modules_script_path}" {module_args}'
|
||||
|
||||
env = os.environ.copy()
|
||||
env['PYTHONPATH'] = app.stable_diffusion_repo_dir_path + ';' + os.path.join(app.project_env_dir_path, 'lib', 'site-packages')
|
||||
|
||||
if app.activated_env_dir_path != env_dir_path:
|
||||
activate_cmd = f'micromamba activate "{env_dir_path}"'
|
||||
check_modules_cmd = f'{activate_cmd} && {check_modules_cmd}'
|
||||
|
||||
# activate and run the modules checker
|
||||
output, _ = run(check_modules_cmd, get_output=True, env=env)
|
||||
if 'Missing' in output:
|
||||
log(output)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def fail_with_install_error(error_msg):
|
||||
try:
|
||||
log(traceback.format_stack())
|
||||
log(f'''
|
||||
|
||||
Error: {error_msg}. Sorry about that, please try to:
|
||||
1. Run this installer again.
|
||||
2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/blob/main/Troubleshooting.md
|
||||
3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB
|
||||
4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues
|
||||
Thanks!''')
|
||||
except:
|
||||
pass
|
||||
|
||||
exit(1)
|
||||
|
||||
def apply_git_patches(repo_dir_path, patch_file_names):
|
||||
is_developer_mode = app.config.get('is_developer_mode', False)
|
||||
if is_developer_mode:
|
||||
return
|
||||
|
||||
for patch_file_name in patch_file_names:
|
||||
patch_file_path = path.join(app.patches_dir_path, patch_file_name)
|
||||
run(f"git apply {patch_file_path}", run_in_folder=repo_dir_path)
|
34
installer/installer/main.py
Normal file
@ -0,0 +1,34 @@
|
||||
import os
|
||||
import sys
|
||||
from datetime import datetime
|
||||
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
|
||||
|
||||
from installer import helpers
|
||||
from installer.tasks import (
|
||||
fetch_project_repo,
|
||||
apply_project_update,
|
||||
fetch_stable_diffusion_repo,
|
||||
install_stable_diffusion_packages,
|
||||
install_ui_packages,
|
||||
download_weights,
|
||||
start_ui_server,
|
||||
)
|
||||
|
||||
tasks = [
|
||||
fetch_project_repo,
|
||||
apply_project_update,
|
||||
fetch_stable_diffusion_repo,
|
||||
install_stable_diffusion_packages,
|
||||
install_ui_packages,
|
||||
download_weights,
|
||||
start_ui_server,
|
||||
]
|
||||
|
||||
helpers.log(f'Starting Stable Diffusion UI at {datetime.now().strftime("%d/%m/%Y %H:%M:%S")}')
|
||||
|
||||
def run_tasks():
|
||||
for task in tasks:
|
||||
task.run()
|
||||
|
||||
run_tasks()
|
8
installer/installer/start.bat
Normal file
@ -0,0 +1,8 @@
|
||||
@echo off
|
||||
rem Never edit this file. If you really, really have to, beware that a script doesn't like
|
||||
rem being overwritten while it is running (the auto-updater will do that).
|
||||
rem The trick is to update this file while another script is running, and vice versa.
|
||||
|
||||
call python %SD_BASE_DIR%\installer\installer\main.py
|
||||
|
||||
pause
|
9
installer/installer/start.sh
Executable file
@ -0,0 +1,9 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Never edit this file. If you really, really have to, beware that a script doesn't like
|
||||
# being overwritten while it is running (the auto-updater will do that).
|
||||
# The trick is to update this file while another script is running, and vice versa.
|
||||
|
||||
python $SD_BASE_DIR/installer/installer/main.py
|
||||
|
||||
read -p "Press enter to continue"
|
0
installer/installer/tasks/__init__.py
Normal file
30
installer/installer/tasks/apply_project_update.py
Normal file
@ -0,0 +1,30 @@
|
||||
from os import path
|
||||
import shutil
|
||||
|
||||
from installer import app
|
||||
|
||||
def run():
|
||||
is_developer_mode = app.config.get('is_developer_mode', False)
|
||||
if is_developer_mode:
|
||||
return
|
||||
|
||||
installer_src_path = path.join(app.project_repo_dir_path, 'installer')
|
||||
ui_src_path = path.join(app.project_repo_dir_path, 'ui')
|
||||
engine_src_path = path.join(app.project_repo_dir_path, 'engine')
|
||||
|
||||
start_cmd_src_path = path.join(app.project_repo_dir_path, app.START_CMD_FILE_NAME)
|
||||
start_cmd_dst_path = path.join(app.SD_BASE_DIR, app.START_CMD_FILE_NAME)
|
||||
|
||||
dev_console_cmd_src_path = path.join(app.project_repo_dir_path, app.DEV_CONSOLE_CMD_FILE_NAME)
|
||||
dev_console_cmd_dst_path = path.join(app.SD_BASE_DIR, app.DEV_CONSOLE_CMD_FILE_NAME)
|
||||
|
||||
shutil.rmtree(app.installer_dir_path, ignore_errors=True)
|
||||
shutil.rmtree(app.ui_dir_path, ignore_errors=True)
|
||||
shutil.rmtree(app.engine_dir_path, ignore_errors=True)
|
||||
|
||||
shutil.copytree(installer_src_path, app.installer_dir_path, dirs_exist_ok=True)
|
||||
shutil.copytree(ui_src_path, app.ui_dir_path, dirs_exist_ok=True)
|
||||
shutil.copytree(engine_src_path, app.engine_dir_path, dirs_exist_ok=True)
|
||||
|
||||
shutil.copy(start_cmd_src_path, start_cmd_dst_path)
|
||||
shutil.copy(dev_console_cmd_src_path, dev_console_cmd_dst_path)
|
46
installer/installer/tasks/download_weights.py
Normal file
@ -0,0 +1,46 @@
|
||||
import os
|
||||
|
||||
from installer import app, helpers
|
||||
|
||||
def run():
|
||||
fetch_model('Stable Diffusion', 'sd-v1-4.ckpt', model_dir_path=app.stable_diffusion_models_dir_path, download_url='https://me.cmdr2.org/stable-diffusion-ui/sd-v1-4.ckpt', expected_file_sizes=[4265380512, 7703807346, 7703810927])
|
||||
fetch_model('Face Correction (GFPGAN)', 'GFPGANv1.4.pth', model_dir_path=app.gfpgan_models_dir_path, download_url='https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/GFPGANv1.4.pth', expected_file_sizes=[348632874])
|
||||
fetch_model('Resolution Upscale (RealESRGAN x4)', 'RealESRGAN_x4plus.pth', model_dir_path=app.realesrgan_models_dir_path, download_url='https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth', expected_file_sizes=[67040989])
|
||||
fetch_model('Resolution Upscale (RealESRGAN x4_anime)', 'RealESRGAN_x4plus_anime_6B.pth', model_dir_path=app.realesrgan_models_dir_path, download_url='https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth', expected_file_sizes=[17938799])
|
||||
|
||||
def fetch_model(model_type, file_name, model_dir_path, download_url, expected_file_sizes):
|
||||
os.makedirs(model_dir_path, exist_ok=True)
|
||||
|
||||
file_path = os.path.join(model_dir_path, file_name)
|
||||
|
||||
if model_exists(file_name, file_path, expected_file_sizes):
|
||||
helpers.log(f'Data files (weights) necessary for {model_type} were already downloaded')
|
||||
return
|
||||
|
||||
helpers.log(f'Downloading data files (weights) for {model_type}..')
|
||||
|
||||
helpers.run(f'curl -L -k "{download_url}" > "{file_path}"', log_the_cmd=True)
|
||||
|
||||
def model_exists(file_name, file_path, expected_file_sizes):
|
||||
legacy_file_path = os.path.join(app.stable_diffusion_repo_dir_path, file_name)
|
||||
|
||||
file_exists = os.path.exists(file_path)
|
||||
legacy_file_exists = os.path.exists(legacy_file_path)
|
||||
|
||||
if legacy_file_exists:
|
||||
file_size = os.path.getsize(legacy_file_path)
|
||||
if file_size in expected_file_sizes:
|
||||
return True
|
||||
|
||||
helpers.log(f'{file_name} is invalid. Was only {file_size} bytes in size. Downloading again..')
|
||||
os.remove(legacy_file_path)
|
||||
|
||||
if file_exists:
|
||||
file_size = os.path.getsize(file_path)
|
||||
if file_size in expected_file_sizes:
|
||||
return True
|
||||
|
||||
helpers.log(f'{file_name} is invalid. Was only {file_size} bytes in size. Downloading again..')
|
||||
os.remove(file_path)
|
||||
|
||||
return False
|
27
installer/installer/tasks/fetch_project_repo.py
Normal file
@ -0,0 +1,27 @@
|
||||
from os import path
|
||||
|
||||
from installer import app, helpers
|
||||
|
||||
project_repo_git_path = path.join(app.project_repo_dir_path, '.git')
|
||||
|
||||
def run():
|
||||
branch_name = app.config.get('update_branch', app.DEFAULT_PROJECT_BRANCH)
|
||||
|
||||
if path.exists(project_repo_git_path):
|
||||
helpers.log(f"Stable Diffusion UI's git repository was already installed. Updating from {branch_name}..")
|
||||
|
||||
helpers.run("git reset --hard", run_in_folder=app.project_repo_dir_path)
|
||||
helpers.run(f'git -c advice.detachedHead=false checkout "{branch_name}"', run_in_folder=app.project_repo_dir_path)
|
||||
helpers.run("git pull", run_in_folder=app.project_repo_dir_path)
|
||||
else:
|
||||
helpers.log("\nDownloading Stable Diffusion UI..\n")
|
||||
helpers.log(f"Using the {branch_name} channel\n")
|
||||
|
||||
helpers.run(f'git clone {app.PROJECT_REPO_URL} "{app.project_repo_dir_path}"')
|
||||
|
||||
if path.exists(project_repo_git_path):
|
||||
helpers.log("Downloaded Stable Diffusion UI")
|
||||
else:
|
||||
helpers.fail_with_install_error(error_msg="Could not download Stable Diffusion UI")
|
||||
|
||||
helpers.run(f'git -c advice.detachedHead=false checkout "{branch_name}"', run_in_folder=app.project_repo_dir_path)
|
37
installer/installer/tasks/fetch_stable_diffusion_repo.py
Normal file
@ -0,0 +1,37 @@
|
||||
from os import path
|
||||
|
||||
from installer import app, helpers
|
||||
|
||||
stable_diffusion_repo_git_path = path.join(app.stable_diffusion_repo_dir_path, '.git')
|
||||
|
||||
is_developer_mode = app.config.get('is_developer_mode', False)
|
||||
|
||||
def run():
|
||||
fetch_repo()
|
||||
|
||||
helpers.apply_git_patches(app.stable_diffusion_repo_dir_path, patch_file_names=(
|
||||
"sd_custom.patch",
|
||||
))
|
||||
|
||||
def fetch_repo():
|
||||
commit_id = app.config.get('stable_diffusion_commit', app.DEFAULT_STABLE_DIFFUSION_COMMIT)
|
||||
|
||||
if path.exists(stable_diffusion_repo_git_path):
|
||||
helpers.log(f"Stable Diffusion's git repository was already installed. Using commit: {commit_id}..")
|
||||
|
||||
if not is_developer_mode:
|
||||
helpers.run("git reset --hard", run_in_folder=app.stable_diffusion_repo_dir_path)
|
||||
helpers.run("git fetch origin", run_in_folder=app.stable_diffusion_repo_dir_path)
|
||||
helpers.run(f'git -c advice.detachedHead=false checkout "{commit_id}"', run_in_folder=app.stable_diffusion_repo_dir_path)
|
||||
else:
|
||||
helpers.log("\nDownloading Stable Diffusion..\n")
|
||||
helpers.log(f"Using commit: {commit_id}\n")
|
||||
|
||||
helpers.run(f'git clone {app.STABLE_DIFFUSION_REPO_URL} "{app.stable_diffusion_repo_dir_path}"')
|
||||
|
||||
if path.exists(stable_diffusion_repo_git_path):
|
||||
helpers.log("Downloaded Stable Diffusion")
|
||||
else:
|
||||
helpers.fail_with_install_error(error_msg="Could not download Stable Diffusion")
|
||||
|
||||
helpers.run(f'git -c advice.detachedHead=false checkout "{commit_id}"', run_in_folder=app.stable_diffusion_repo_dir_path)
|
@ -0,0 +1,59 @@
|
||||
import os
|
||||
import platform
|
||||
import shutil
|
||||
|
||||
from installer import app, helpers
|
||||
|
||||
def run():
|
||||
environment_file_path = get_environment_file_path()
|
||||
local_env_file_path = os.path.join(app.stable_diffusion_repo_dir_path, 'environment.yaml')
|
||||
|
||||
shutil.copy(environment_file_path, local_env_file_path)
|
||||
|
||||
if is_valid_env():
|
||||
helpers.log("Packages necessary for Stable Diffusion were already installed")
|
||||
return
|
||||
|
||||
log_installing_header()
|
||||
|
||||
env = os.environ.copy()
|
||||
env['PYTHONNOUSERSITE'] = '1'
|
||||
|
||||
if not os.path.exists(app.project_env_dir_path):
|
||||
helpers.run(f'micromamba create --prefix {app.project_env_dir_path}', log_the_cmd=True)
|
||||
|
||||
helpers.run(f'micromamba install -y --prefix {app.project_env_dir_path} -f {local_env_file_path}', env=env, log_the_cmd=True, run_in_folder=app.stable_diffusion_repo_dir_path)
|
||||
|
||||
if is_valid_env():
|
||||
helpers.log("Installed the packages necessary for Stable Diffusion")
|
||||
|
||||
app.activated_env_dir_path = app.project_env_dir_path # so that future `run()` invocations will run in the activated env
|
||||
else:
|
||||
helpers.fail_with_install_error(error_msg="Could not install the packages necessary for Stable Diffusion")
|
||||
|
||||
apply_patches()
|
||||
|
||||
def apply_patches():
|
||||
gfpgan_repo_dir_path = os.path.join(app.stable_diffusion_repo_dir_path, 'src', 'gfpgan')
|
||||
helpers.apply_git_patches(gfpgan_repo_dir_path, patch_file_names=(
|
||||
"gfpgan_custom.patch",
|
||||
))
|
||||
|
||||
def get_environment_file_path():
|
||||
environment_file_name = 'sd-environment-win-linux-nvidia.yaml'
|
||||
if platform.system() == 'Darwin':
|
||||
environment_file_name = 'sd-environment-mac-nvidia.yaml'
|
||||
|
||||
return os.path.join(app.installer_dir_path, 'yaml', environment_file_name)
|
||||
|
||||
def log_installing_header():
|
||||
helpers.log('''
|
||||
|
||||
Downloading packages necessary for Stable Diffusion..
|
||||
|
||||
***** !! This will take some time (depending on the speed of the Internet connection) and may appear to be stuck, but please be patient *****
|
||||
|
||||
''')
|
||||
|
||||
def is_valid_env():
|
||||
return helpers.modules_exist_in_env(('torch', 'antlr4', 'transformers', 'numpy', 'gfpgan', 'realesrgan', 'basicsr'))
|
39
installer/installer/tasks/install_ui_packages.py
Normal file
@ -0,0 +1,39 @@
|
||||
import os
|
||||
import shutil
|
||||
import platform
|
||||
|
||||
from installer import app, helpers
|
||||
|
||||
def run():
|
||||
if is_valid_env():
|
||||
helpers.log("Packages necessary for Stable Diffusion UI were already installed")
|
||||
return
|
||||
|
||||
log_installing_header()
|
||||
|
||||
env = os.environ.copy()
|
||||
env['PYTHONNOUSERSITE'] = '1'
|
||||
|
||||
helpers.run(f'micromamba install -y --prefix {app.project_env_dir_path} -c conda-forge uvicorn fastapi', env=env, log_the_cmd=True)
|
||||
|
||||
if is_valid_env():
|
||||
helpers.log("Installed the packages necessary for Stable Diffusion UI")
|
||||
else:
|
||||
helpers.fail_with_install_error(error_msg="Could not install the packages necessary for Stable Diffusion UI")
|
||||
|
||||
def log_installing_header():
|
||||
helpers.log('''
|
||||
|
||||
Downloading packages necessary for Stable Diffusion UI..
|
||||
|
||||
''')
|
||||
|
||||
def is_valid_env():
|
||||
path = os.environ['PATH']
|
||||
path += ';' + os.path.join(app.project_env_dir_path, 'Scripts' if platform.system() == 'Windows' else 'bin')
|
||||
|
||||
if shutil.which("uvicorn", path=path) is None:
|
||||
helpers.log("uvicorn not found!")
|
||||
return False
|
||||
|
||||
return helpers.modules_exist_in_env(('uvicorn', 'fastapi'))
|
23
installer/installer/tasks/start_ui_server.py
Normal file
@ -0,0 +1,23 @@
|
||||
import os
|
||||
import platform
|
||||
|
||||
from installer import app, helpers
|
||||
|
||||
def run():
|
||||
helpers.log("\nStable Diffusion is ready!\n")
|
||||
|
||||
env = os.environ.copy()
|
||||
env['SD_DIR'] = app.stable_diffusion_repo_dir_path
|
||||
env['PYTHONPATH'] = app.stable_diffusion_repo_dir_path + ';' + os.path.join(app.project_env_dir_path, 'lib', 'site-packages')
|
||||
env['SD_UI_PATH'] = app.ui_dir_path
|
||||
env['PATH'] += ';' + os.path.join(app.project_env_dir_path, 'Scripts' if platform.system() == 'Windows' else 'bin')
|
||||
|
||||
helpers.log(f'PYTHONPATH={env["PYTHONPATH"]}')
|
||||
helpers.run('python --version', log_the_cmd=True)
|
||||
|
||||
host = app.config.get('host', 'localhost')
|
||||
port = app.config.get('port', '9000')
|
||||
|
||||
ui_server_cmd = f'uvicorn server:app --app-dir "{app.ui_dir_path}" --port {port} --host {host}'
|
||||
|
||||
helpers.run(ui_server_cmd, run_in_folder=app.stable_diffusion_repo_dir_path, log_the_cmd=True, env=env)
|
22
installer/patches/gfpgan_custom.patch
Normal file
@ -0,0 +1,22 @@
|
||||
diff --git a/gfpgan/utils.py b/gfpgan/utils.py
|
||||
index 74ee5a8..1357f48 100644
|
||||
--- a/gfpgan/utils.py
|
||||
+++ b/gfpgan/utils.py
|
||||
@@ -117,14 +117,14 @@ class GFPGANer():
|
||||
# face restoration
|
||||
for cropped_face in self.face_helper.cropped_faces:
|
||||
# prepare data
|
||||
- cropped_face_t = img2tensor(cropped_face / 255., bgr2rgb=True, float32=True)
|
||||
+ cropped_face_t = img2tensor(cropped_face / 255., bgr2rgb=False, float32=True)
|
||||
normalize(cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)
|
||||
cropped_face_t = cropped_face_t.unsqueeze(0).to(self.device)
|
||||
|
||||
try:
|
||||
- output = self.gfpgan(cropped_face_t, return_rgb=False, weight=weight)[0]
|
||||
+ output = self.gfpgan(cropped_face_t, return_rgb=True, weight=weight)[0]
|
||||
# convert to image
|
||||
- restored_face = tensor2img(output.squeeze(0), rgb2bgr=True, min_max=(-1, 1))
|
||||
+ restored_face = tensor2img(output.squeeze(0), rgb2bgr=False, min_max=(-1, 1))
|
||||
except RuntimeError as error:
|
||||
print(f'\tFailed inference for GFPGAN: {error}.')
|
||||
restored_face = cropped_face
|
@ -0,0 +1,171 @@
|
||||
{
|
||||
"_name_or_path": "clip-vit-large-patch14/",
|
||||
"architectures": [
|
||||
"CLIPModel"
|
||||
],
|
||||
"initializer_factor": 1.0,
|
||||
"logit_scale_init_value": 2.6592,
|
||||
"model_type": "clip",
|
||||
"projection_dim": 768,
|
||||
"text_config": {
|
||||
"_name_or_path": "",
|
||||
"add_cross_attention": false,
|
||||
"architectures": null,
|
||||
"attention_dropout": 0.0,
|
||||
"bad_words_ids": null,
|
||||
"bos_token_id": 0,
|
||||
"chunk_size_feed_forward": 0,
|
||||
"cross_attention_hidden_size": null,
|
||||
"decoder_start_token_id": null,
|
||||
"diversity_penalty": 0.0,
|
||||
"do_sample": false,
|
||||
"dropout": 0.0,
|
||||
"early_stopping": false,
|
||||
"encoder_no_repeat_ngram_size": 0,
|
||||
"eos_token_id": 2,
|
||||
"finetuning_task": null,
|
||||
"forced_bos_token_id": null,
|
||||
"forced_eos_token_id": null,
|
||||
"hidden_act": "quick_gelu",
|
||||
"hidden_size": 768,
|
||||
"id2label": {
|
||||
"0": "LABEL_0",
|
||||
"1": "LABEL_1"
|
||||
},
|
||||
"initializer_factor": 1.0,
|
||||
"initializer_range": 0.02,
|
||||
"intermediate_size": 3072,
|
||||
"is_decoder": false,
|
||||
"is_encoder_decoder": false,
|
||||
"label2id": {
|
||||
"LABEL_0": 0,
|
||||
"LABEL_1": 1
|
||||
},
|
||||
"layer_norm_eps": 1e-05,
|
||||
"length_penalty": 1.0,
|
||||
"max_length": 20,
|
||||
"max_position_embeddings": 77,
|
||||
"min_length": 0,
|
||||
"model_type": "clip_text_model",
|
||||
"no_repeat_ngram_size": 0,
|
||||
"num_attention_heads": 12,
|
||||
"num_beam_groups": 1,
|
||||
"num_beams": 1,
|
||||
"num_hidden_layers": 12,
|
||||
"num_return_sequences": 1,
|
||||
"output_attentions": false,
|
||||
"output_hidden_states": false,
|
||||
"output_scores": false,
|
||||
"pad_token_id": 1,
|
||||
"prefix": null,
|
||||
"problem_type": null,
|
||||
"projection_dim" : 768,
|
||||
"pruned_heads": {},
|
||||
"remove_invalid_values": false,
|
||||
"repetition_penalty": 1.0,
|
||||
"return_dict": true,
|
||||
"return_dict_in_generate": false,
|
||||
"sep_token_id": null,
|
||||
"task_specific_params": null,
|
||||
"temperature": 1.0,
|
||||
"tie_encoder_decoder": false,
|
||||
"tie_word_embeddings": true,
|
||||
"tokenizer_class": null,
|
||||
"top_k": 50,
|
||||
"top_p": 1.0,
|
||||
"torch_dtype": null,
|
||||
"torchscript": false,
|
||||
"transformers_version": "4.16.0.dev0",
|
||||
"use_bfloat16": false,
|
||||
"vocab_size": 49408
|
||||
},
|
||||
"text_config_dict": {
|
||||
"hidden_size": 768,
|
||||
"intermediate_size": 3072,
|
||||
"num_attention_heads": 12,
|
||||
"num_hidden_layers": 12,
|
||||
"projection_dim": 768
|
||||
},
|
||||
"torch_dtype": "float32",
|
||||
"transformers_version": null,
|
||||
"vision_config": {
|
||||
"_name_or_path": "",
|
||||
"add_cross_attention": false,
|
||||
"architectures": null,
|
||||
"attention_dropout": 0.0,
|
||||
"bad_words_ids": null,
|
||||
"bos_token_id": null,
|
||||
"chunk_size_feed_forward": 0,
|
||||
"cross_attention_hidden_size": null,
|
||||
"decoder_start_token_id": null,
|
||||
"diversity_penalty": 0.0,
|
||||
"do_sample": false,
|
||||
"dropout": 0.0,
|
||||
"early_stopping": false,
|
||||
"encoder_no_repeat_ngram_size": 0,
|
||||
"eos_token_id": null,
|
||||
"finetuning_task": null,
|
||||
"forced_bos_token_id": null,
|
||||
"forced_eos_token_id": null,
|
||||
"hidden_act": "quick_gelu",
|
||||
"hidden_size": 1024,
|
||||
"id2label": {
|
||||
"0": "LABEL_0",
|
||||
"1": "LABEL_1"
|
||||
},
|
||||
"image_size": 224,
|
||||
"initializer_factor": 1.0,
|
||||
"initializer_range": 0.02,
|
||||
"intermediate_size": 4096,
|
||||
"is_decoder": false,
|
||||
"is_encoder_decoder": false,
|
||||
"label2id": {
|
||||
"LABEL_0": 0,
|
||||
"LABEL_1": 1
|
||||
},
|
||||
"layer_norm_eps": 1e-05,
|
||||
"length_penalty": 1.0,
|
||||
"max_length": 20,
|
||||
"min_length": 0,
|
||||
"model_type": "clip_vision_model",
|
||||
"no_repeat_ngram_size": 0,
|
||||
"num_attention_heads": 16,
|
||||
"num_beam_groups": 1,
|
||||
"num_beams": 1,
|
||||
"num_hidden_layers": 24,
|
||||
"num_return_sequences": 1,
|
||||
"output_attentions": false,
|
||||
"output_hidden_states": false,
|
||||
"output_scores": false,
|
||||
"pad_token_id": null,
|
||||
"patch_size": 14,
|
||||
"prefix": null,
|
||||
"problem_type": null,
|
||||
"projection_dim" : 768,
|
||||
"pruned_heads": {},
|
||||
"remove_invalid_values": false,
|
||||
"repetition_penalty": 1.0,
|
||||
"return_dict": true,
|
||||
"return_dict_in_generate": false,
|
||||
"sep_token_id": null,
|
||||
"task_specific_params": null,
|
||||
"temperature": 1.0,
|
||||
"tie_encoder_decoder": false,
|
||||
"tie_word_embeddings": true,
|
||||
"tokenizer_class": null,
|
||||
"top_k": 50,
|
||||
"top_p": 1.0,
|
||||
"torch_dtype": null,
|
||||
"torchscript": false,
|
||||
"transformers_version": "4.16.0.dev0",
|
||||
"use_bfloat16": false
|
||||
},
|
||||
"vision_config_dict": {
|
||||
"hidden_size": 1024,
|
||||
"intermediate_size": 4096,
|
||||
"num_attention_heads": 16,
|
||||
"num_hidden_layers": 24,
|
||||
"patch_size": 14,
|
||||
"projection_dim": 768
|
||||
}
|
||||
}
|
332
installer/patches/sd_custom.patch
Normal file
@ -0,0 +1,332 @@
|
||||
diff --git a/optimizedSD/ddpm.py b/optimizedSD/ddpm.py
|
||||
index b967b55..35ef520 100644
|
||||
--- a/optimizedSD/ddpm.py
|
||||
+++ b/optimizedSD/ddpm.py
|
||||
@@ -22,7 +22,7 @@ from ldm.util import exists, default, instantiate_from_config
|
||||
from ldm.modules.diffusionmodules.util import make_beta_schedule
|
||||
from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like
|
||||
from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
|
||||
-from samplers import CompVisDenoiser, get_ancestral_step, to_d, append_dims,linear_multistep_coeff
|
||||
+from .samplers import CompVisDenoiser, get_ancestral_step, to_d, append_dims,linear_multistep_coeff
|
||||
|
||||
def disabled_train(self):
|
||||
"""Overwrite model.train with this function to make sure train/eval mode
|
||||
@@ -506,6 +506,8 @@ class UNet(DDPM):
|
||||
|
||||
x_latent = noise if x0 is None else x0
|
||||
# sampling
|
||||
+ if sampler in ('ddim', 'dpm2', 'heun', 'dpm2_a', 'lms') and not hasattr(self, 'ddim_timesteps'):
|
||||
+ self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=False)
|
||||
|
||||
if sampler == "plms":
|
||||
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=False)
|
||||
@@ -528,39 +530,46 @@ class UNet(DDPM):
|
||||
elif sampler == "ddim":
|
||||
samples = self.ddim_sampling(x_latent, conditioning, S, unconditional_guidance_scale=unconditional_guidance_scale,
|
||||
unconditional_conditioning=unconditional_conditioning,
|
||||
- mask = mask,init_latent=x_T,use_original_steps=False)
|
||||
+ mask = mask,init_latent=x_T,use_original_steps=False,
|
||||
+ callback=callback, img_callback=img_callback)
|
||||
|
||||
elif sampler == "euler":
|
||||
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=False)
|
||||
samples = self.euler_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
||||
- unconditional_guidance_scale=unconditional_guidance_scale)
|
||||
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
||||
+ img_callback=img_callback)
|
||||
elif sampler == "euler_a":
|
||||
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=False)
|
||||
samples = self.euler_ancestral_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
||||
- unconditional_guidance_scale=unconditional_guidance_scale)
|
||||
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
||||
+ img_callback=img_callback)
|
||||
|
||||
elif sampler == "dpm2":
|
||||
samples = self.dpm_2_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
||||
- unconditional_guidance_scale=unconditional_guidance_scale)
|
||||
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
||||
+ img_callback=img_callback)
|
||||
elif sampler == "heun":
|
||||
samples = self.heun_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
||||
- unconditional_guidance_scale=unconditional_guidance_scale)
|
||||
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
||||
+ img_callback=img_callback)
|
||||
|
||||
elif sampler == "dpm2_a":
|
||||
samples = self.dpm_2_ancestral_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
||||
- unconditional_guidance_scale=unconditional_guidance_scale)
|
||||
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
||||
+ img_callback=img_callback)
|
||||
|
||||
|
||||
elif sampler == "lms":
|
||||
samples = self.lms_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
||||
- unconditional_guidance_scale=unconditional_guidance_scale)
|
||||
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
||||
+ img_callback=img_callback)
|
||||
+
|
||||
+ yield from samples
|
||||
|
||||
if(self.turbo):
|
||||
self.model1.to("cpu")
|
||||
self.model2.to("cpu")
|
||||
|
||||
- return samples
|
||||
-
|
||||
@torch.no_grad()
|
||||
def plms_sampling(self, cond,b, img,
|
||||
ddim_use_original_steps=False,
|
||||
@@ -599,10 +608,10 @@ class UNet(DDPM):
|
||||
old_eps.append(e_t)
|
||||
if len(old_eps) >= 4:
|
||||
old_eps.pop(0)
|
||||
- if callback: callback(i)
|
||||
- if img_callback: img_callback(pred_x0, i)
|
||||
+ if callback: yield from callback(i)
|
||||
+ if img_callback: yield from img_callback(pred_x0, i)
|
||||
|
||||
- return img
|
||||
+ yield from img_callback(img, len(iterator)-1)
|
||||
|
||||
@torch.no_grad()
|
||||
def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
|
||||
@@ -706,7 +715,8 @@ class UNet(DDPM):
|
||||
|
||||
@torch.no_grad()
|
||||
def ddim_sampling(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,
|
||||
- mask = None,init_latent=None,use_original_steps=False):
|
||||
+ mask = None,init_latent=None,use_original_steps=False,
|
||||
+ callback=None, img_callback=None):
|
||||
|
||||
timesteps = self.ddim_timesteps
|
||||
timesteps = timesteps[:t_start]
|
||||
@@ -730,10 +740,13 @@ class UNet(DDPM):
|
||||
unconditional_guidance_scale=unconditional_guidance_scale,
|
||||
unconditional_conditioning=unconditional_conditioning)
|
||||
|
||||
+ if callback: yield from callback(i)
|
||||
+ if img_callback: yield from img_callback(x_dec, i)
|
||||
+
|
||||
if mask is not None:
|
||||
- return x0 * mask + (1. - mask) * x_dec
|
||||
+ x_dec = x0 * mask + (1. - mask) * x_dec
|
||||
|
||||
- return x_dec
|
||||
+ yield from img_callback(x_dec, len(iterator)-1)
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
@@ -779,13 +792,16 @@ class UNet(DDPM):
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
- def euler_sampling(self, ac, x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None,callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.):
|
||||
+ def euler_sampling(self, ac, x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None,callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.,
|
||||
+ img_callback=None):
|
||||
"""Implements Algorithm 2 (Euler steps) from Karras et al. (2022)."""
|
||||
extra_args = {} if extra_args is None else extra_args
|
||||
cvd = CompVisDenoiser(ac)
|
||||
sigmas = cvd.get_sigmas(S)
|
||||
x = x*sigmas[0]
|
||||
|
||||
+ print(f"Running Euler Sampling with {len(sigmas) - 1} timesteps")
|
||||
+
|
||||
s_in = x.new_ones([x.shape[0]]).half()
|
||||
for i in trange(len(sigmas) - 1, disable=disable):
|
||||
gamma = min(s_churn / (len(sigmas) - 1), 2 ** 0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.
|
||||
@@ -807,13 +823,18 @@ class UNet(DDPM):
|
||||
d = to_d(x, sigma_hat, denoised)
|
||||
if callback is not None:
|
||||
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})
|
||||
+
|
||||
+ if img_callback: yield from img_callback(x, i)
|
||||
+
|
||||
dt = sigmas[i + 1] - sigma_hat
|
||||
# Euler method
|
||||
x = x + d * dt
|
||||
- return x
|
||||
+
|
||||
+ yield from img_callback(x, len(sigmas)-1)
|
||||
|
||||
@torch.no_grad()
|
||||
- def euler_ancestral_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None, callback=None, disable=None):
|
||||
+ def euler_ancestral_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None, callback=None, disable=None,
|
||||
+ img_callback=None):
|
||||
"""Ancestral sampling with Euler method steps."""
|
||||
extra_args = {} if extra_args is None else extra_args
|
||||
|
||||
@@ -822,6 +843,8 @@ class UNet(DDPM):
|
||||
sigmas = cvd.get_sigmas(S)
|
||||
x = x*sigmas[0]
|
||||
|
||||
+ print(f"Running Euler Ancestral Sampling with {len(sigmas) - 1} timesteps")
|
||||
+
|
||||
s_in = x.new_ones([x.shape[0]]).half()
|
||||
for i in trange(len(sigmas) - 1, disable=disable):
|
||||
|
||||
@@ -837,17 +860,22 @@ class UNet(DDPM):
|
||||
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1])
|
||||
if callback is not None:
|
||||
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
|
||||
+
|
||||
+ if img_callback: yield from img_callback(x, i)
|
||||
+
|
||||
d = to_d(x, sigmas[i], denoised)
|
||||
# Euler method
|
||||
dt = sigma_down - sigmas[i]
|
||||
x = x + d * dt
|
||||
x = x + torch.randn_like(x) * sigma_up
|
||||
- return x
|
||||
+
|
||||
+ yield from img_callback(x, len(sigmas)-1)
|
||||
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
- def heun_sampling(self, ac, x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.):
|
||||
+ def heun_sampling(self, ac, x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.,
|
||||
+ img_callback=None):
|
||||
"""Implements Algorithm 2 (Heun steps) from Karras et al. (2022)."""
|
||||
extra_args = {} if extra_args is None else extra_args
|
||||
|
||||
@@ -855,6 +883,8 @@ class UNet(DDPM):
|
||||
sigmas = cvd.get_sigmas(S)
|
||||
x = x*sigmas[0]
|
||||
|
||||
+ print(f"Running Heun Sampling with {len(sigmas) - 1} timesteps")
|
||||
+
|
||||
|
||||
s_in = x.new_ones([x.shape[0]]).half()
|
||||
for i in trange(len(sigmas) - 1, disable=disable):
|
||||
@@ -876,6 +906,9 @@ class UNet(DDPM):
|
||||
d = to_d(x, sigma_hat, denoised)
|
||||
if callback is not None:
|
||||
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})
|
||||
+
|
||||
+ if img_callback: yield from img_callback(x, i)
|
||||
+
|
||||
dt = sigmas[i + 1] - sigma_hat
|
||||
if sigmas[i + 1] == 0:
|
||||
# Euler method
|
||||
@@ -895,11 +928,13 @@ class UNet(DDPM):
|
||||
d_2 = to_d(x_2, sigmas[i + 1], denoised_2)
|
||||
d_prime = (d + d_2) / 2
|
||||
x = x + d_prime * dt
|
||||
- return x
|
||||
+
|
||||
+ yield from img_callback(x, len(sigmas)-1)
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
- def dpm_2_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.):
|
||||
+ def dpm_2_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.,
|
||||
+ img_callback=None):
|
||||
"""A sampler inspired by DPM-Solver-2 and Algorithm 2 from Karras et al. (2022)."""
|
||||
extra_args = {} if extra_args is None else extra_args
|
||||
|
||||
@@ -907,6 +942,8 @@ class UNet(DDPM):
|
||||
sigmas = cvd.get_sigmas(S)
|
||||
x = x*sigmas[0]
|
||||
|
||||
+ print(f"Running DPM2 Sampling with {len(sigmas) - 1} timesteps")
|
||||
+
|
||||
s_in = x.new_ones([x.shape[0]]).half()
|
||||
for i in trange(len(sigmas) - 1, disable=disable):
|
||||
gamma = min(s_churn / (len(sigmas) - 1), 2 ** 0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.
|
||||
@@ -924,7 +961,7 @@ class UNet(DDPM):
|
||||
e_t_uncond, e_t = (x_in + eps * c_out).chunk(2)
|
||||
denoised = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
|
||||
|
||||
-
|
||||
+ if img_callback: yield from img_callback(x, i)
|
||||
|
||||
d = to_d(x, sigma_hat, denoised)
|
||||
# Midpoint method, where the midpoint is chosen according to a rho=3 Karras schedule
|
||||
@@ -945,11 +982,13 @@ class UNet(DDPM):
|
||||
|
||||
d_2 = to_d(x_2, sigma_mid, denoised_2)
|
||||
x = x + d_2 * dt_2
|
||||
- return x
|
||||
+
|
||||
+ yield from img_callback(x, len(sigmas)-1)
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
- def dpm_2_ancestral_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None):
|
||||
+ def dpm_2_ancestral_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None,
|
||||
+ img_callback=None):
|
||||
"""Ancestral sampling with DPM-Solver inspired second-order steps."""
|
||||
extra_args = {} if extra_args is None else extra_args
|
||||
|
||||
@@ -957,6 +996,8 @@ class UNet(DDPM):
|
||||
sigmas = cvd.get_sigmas(S)
|
||||
x = x*sigmas[0]
|
||||
|
||||
+ print(f"Running DPM2 Ancestral Sampling with {len(sigmas) - 1} timesteps")
|
||||
+
|
||||
s_in = x.new_ones([x.shape[0]]).half()
|
||||
for i in trange(len(sigmas) - 1, disable=disable):
|
||||
|
||||
@@ -973,6 +1014,9 @@ class UNet(DDPM):
|
||||
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1])
|
||||
if callback is not None:
|
||||
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
|
||||
+
|
||||
+ if img_callback: yield from img_callback(x, i)
|
||||
+
|
||||
d = to_d(x, sigmas[i], denoised)
|
||||
# Midpoint method, where the midpoint is chosen according to a rho=3 Karras schedule
|
||||
sigma_mid = ((sigmas[i] ** (1 / 3) + sigma_down ** (1 / 3)) / 2) ** 3
|
||||
@@ -993,11 +1037,13 @@ class UNet(DDPM):
|
||||
d_2 = to_d(x_2, sigma_mid, denoised_2)
|
||||
x = x + d_2 * dt_2
|
||||
x = x + torch.randn_like(x) * sigma_up
|
||||
- return x
|
||||
+
|
||||
+ yield from img_callback(x, len(sigmas)-1)
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
- def lms_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None, order=4):
|
||||
+ def lms_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None, order=4,
|
||||
+ img_callback=None):
|
||||
extra_args = {} if extra_args is None else extra_args
|
||||
s_in = x.new_ones([x.shape[0]])
|
||||
|
||||
@@ -1005,6 +1051,8 @@ class UNet(DDPM):
|
||||
sigmas = cvd.get_sigmas(S)
|
||||
x = x*sigmas[0]
|
||||
|
||||
+ print(f"Running LMS Sampling with {len(sigmas) - 1} timesteps")
|
||||
+
|
||||
ds = []
|
||||
for i in trange(len(sigmas) - 1, disable=disable):
|
||||
|
||||
@@ -1017,6 +1065,7 @@ class UNet(DDPM):
|
||||
e_t_uncond, e_t = (x_in + eps * c_out).chunk(2)
|
||||
denoised = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
|
||||
|
||||
+ if img_callback: yield from img_callback(x, i)
|
||||
|
||||
d = to_d(x, sigmas[i], denoised)
|
||||
ds.append(d)
|
||||
@@ -1027,4 +1076,5 @@ class UNet(DDPM):
|
||||
cur_order = min(i + 1, order)
|
||||
coeffs = [linear_multistep_coeff(cur_order, sigmas.cpu(), i, j) for j in range(cur_order)]
|
||||
x = x + sum(coeff * d for coeff, d in zip(coeffs, reversed(ds)))
|
||||
- return x
|
||||
+
|
||||
+ yield from img_callback(x, len(sigmas)-1)
|
||||
diff --git a/optimizedSD/openaimodelSplit.py b/optimizedSD/openaimodelSplit.py
|
||||
index abc3098..7a32ffe 100644
|
||||
--- a/optimizedSD/openaimodelSplit.py
|
||||
+++ b/optimizedSD/openaimodelSplit.py
|
||||
@@ -13,7 +13,7 @@ from ldm.modules.diffusionmodules.util import (
|
||||
normalization,
|
||||
timestep_embedding,
|
||||
)
|
||||
-from splitAttention import SpatialTransformer
|
||||
+from .splitAttention import SpatialTransformer
|
||||
|
||||
|
||||
class AttentionPool2d(nn.Module):
|
13
installer/patches/sd_env_yaml.patch
Normal file
@ -0,0 +1,13 @@
|
||||
diff --git a/environment.yaml b/environment.yaml
|
||||
index 7f25da8..306750f 100644
|
||||
--- a/environment.yaml
|
||||
+++ b/environment.yaml
|
||||
@@ -23,6 +23,8 @@ dependencies:
|
||||
- torch-fidelity==0.3.0
|
||||
- transformers==4.19.2
|
||||
- torchmetrics==0.6.0
|
||||
+ - pywavelets==1.3.0
|
||||
+ - pandas==1.4.4
|
||||
- kornia==0.6
|
||||
- -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
|
||||
- -e git+https://github.com/openai/CLIP.git@main#egg=clip
|
7
installer/yaml/installer-environment.yaml
Normal file
@ -0,0 +1,7 @@
|
||||
name: stable-diffusion-ui-installer
|
||||
channels:
|
||||
- defaults
|
||||
- conda-forge
|
||||
dependencies:
|
||||
- git
|
||||
- python=3.10.5
|
47
installer/yaml/sd-environment-mac-nvidia.yaml
Normal file
@ -0,0 +1,47 @@
|
||||
name: ldm
|
||||
channels:
|
||||
- pytorch
|
||||
- conda-forge
|
||||
dependencies:
|
||||
- python==3.10.5
|
||||
- pip==22.2.2
|
||||
|
||||
- pytorch
|
||||
- torchvision
|
||||
|
||||
- albumentations==1.2.1
|
||||
- coloredlogs==15.0.1
|
||||
- einops==0.4.1
|
||||
- grpcio==1.46.4
|
||||
- humanfriendly==10.0
|
||||
- imageio==2.21.2
|
||||
- imageio-ffmpeg==0.4.7
|
||||
- imgaug==0.4.0
|
||||
- kornia==0.6.7
|
||||
- mpmath==1.2.1
|
||||
- nomkl
|
||||
- numpy==1.23.2
|
||||
- omegaconf==2.1.1
|
||||
- onnx==1.12.0
|
||||
- onnxruntime==1.12.1
|
||||
- pudb==2022.1
|
||||
- pytorch-lightning==1.6.5
|
||||
- scipy==1.9.1
|
||||
- streamlit==1.12.2
|
||||
- sympy==1.10.1
|
||||
- tensorboard==2.9.0
|
||||
- torchmetrics==0.9.3
|
||||
- antlr4-python3-runtime=4.8
|
||||
- pip:
|
||||
- opencv-python==4.6.0.66
|
||||
- realesrgan==0.2.5.0
|
||||
- test-tube==0.7.5
|
||||
- transformers==4.21.2
|
||||
- torch-fidelity==0.3.0
|
||||
- -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
|
||||
- -e git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||
- -e git+https://github.com/TencentARC/GFPGAN#egg=GFPGAN
|
||||
- -e git+https://github.com/xinntao/Real-ESRGAN#egg=realesrgan
|
||||
- -e .
|
||||
variables:
|
||||
PYTORCH_ENABLE_MPS_FALLBACK: 1
|
33
installer/yaml/sd-environment-win-linux-nvidia.yaml
Normal file
@ -0,0 +1,33 @@
|
||||
name: ldm
|
||||
channels:
|
||||
- pytorch
|
||||
- defaults
|
||||
- conda-forge
|
||||
dependencies:
|
||||
- python=3.10.5
|
||||
- pip=20.3
|
||||
- cudatoolkit=11.3
|
||||
- pytorch=1.11.0
|
||||
- torchvision=0.12.0
|
||||
- numpy=1.23.2
|
||||
- antlr4-python3-runtime=4.8
|
||||
- pip:
|
||||
- albumentations==0.4.3
|
||||
- opencv-python==4.6.0.66
|
||||
- pudb==2019.2
|
||||
- imageio==2.9.0
|
||||
- imageio-ffmpeg==0.4.2
|
||||
- pytorch-lightning==1.4.2
|
||||
- omegaconf==2.1.1
|
||||
- test-tube>=0.7.5
|
||||
- streamlit>=0.73.1
|
||||
- einops==0.3.0
|
||||
- torch-fidelity==0.3.0
|
||||
- transformers==4.19.2
|
||||
- torchmetrics==0.6.0
|
||||
- kornia==0.6
|
||||
- -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
|
||||
- -e git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||
- -e git+https://github.com/TencentARC/GFPGAN#egg=GFPGAN
|
||||
- -e git+https://github.com/xinntao/Real-ESRGAN#egg=realesrgan
|
||||
- -e .
|
69
main.py
@ -1,69 +0,0 @@
|
||||
from fastapi import FastAPI, HTTPException
|
||||
from starlette.responses import FileResponse
|
||||
from pydantic import BaseModel
|
||||
|
||||
import requests
|
||||
|
||||
LOCAL_SERVER_URL = 'http://stability-ai:5000'
|
||||
PREDICT_URL = LOCAL_SERVER_URL + '/predictions'
|
||||
|
||||
app = FastAPI()
|
||||
|
||||
# defaults from https://huggingface.co/blog/stable_diffusion
|
||||
class ImageRequest(BaseModel):
|
||||
prompt: str
|
||||
init_image: str = None # base64
|
||||
mask: str = None # base64
|
||||
num_outputs: str = "1"
|
||||
num_inference_steps: str = "50"
|
||||
guidance_scale: str = "7.5"
|
||||
width: str = "512"
|
||||
height: str = "512"
|
||||
seed: str = "30000"
|
||||
prompt_strength: str = "0.8"
|
||||
|
||||
@app.get('/')
|
||||
def read_root():
|
||||
return FileResponse('index.html')
|
||||
|
||||
@app.get('/ping')
|
||||
async def ping():
|
||||
try:
|
||||
requests.get(LOCAL_SERVER_URL)
|
||||
return {'OK'}
|
||||
except:
|
||||
return {'ERROR'}
|
||||
|
||||
@app.post('/image')
|
||||
async def image(req : ImageRequest):
|
||||
data = {
|
||||
"input": {
|
||||
"prompt": req.prompt,
|
||||
"num_outputs": req.num_outputs,
|
||||
"num_inference_steps": req.num_inference_steps,
|
||||
"width": req.width,
|
||||
"height": req.height,
|
||||
"seed": req.seed,
|
||||
"guidance_scale": req.guidance_scale,
|
||||
}
|
||||
}
|
||||
|
||||
if req.init_image is not None:
|
||||
data['input']['init_image'] = req.init_image
|
||||
data['input']['prompt_strength'] = req.prompt_strength
|
||||
|
||||
if req.mask is not None:
|
||||
data['input']['mask'] = req.mask
|
||||
|
||||
if req.seed == "-1":
|
||||
del data['input']['seed']
|
||||
|
||||
res = requests.post(PREDICT_URL, json=data)
|
||||
if res.status_code != 200:
|
||||
raise HTTPException(status_code=500, detail=res.text)
|
||||
|
||||
return res.json()
|
||||
|
||||
@app.get('/media/ding.mp3')
|
||||
def read_root():
|
||||
return FileResponse('media/ding.mp3')
|
Before Width: | Height: | Size: 18 KiB |
BIN
media/config-v6.jpg
Normal file
After Width: | Height: | Size: 48 KiB |
BIN
media/download buttons.xcf
Normal file
BIN
media/download-linux.png
Normal file
After Width: | Height: | Size: 14 KiB |
BIN
media/download-win.png
Normal file
After Width: | Height: | Size: 13 KiB |
BIN
media/modifiers-v1.jpg
Normal file
After Width: | Height: | Size: 83 KiB |
Before Width: | Height: | Size: 122 KiB |
Before Width: | Height: | Size: 67 KiB |
BIN
media/shot-v9.jpg
Normal file
After Width: | Height: | Size: 199 KiB |
BIN
media/system-settings-v2.jpg
Normal file
After Width: | Height: | Size: 40 KiB |
@ -1,38 +0,0 @@
|
||||
from fastapi import FastAPI
|
||||
from fastapi.responses import HTMLResponse
|
||||
|
||||
app = FastAPI()
|
||||
|
||||
@app.get('/', response_class=HTMLResponse)
|
||||
def read_root():
|
||||
return '''
|
||||
<style>
|
||||
body {
|
||||
font-family: Arial;
|
||||
font-size: 11pt;
|
||||
}
|
||||
pre {
|
||||
display: inline;
|
||||
background: #aaa;
|
||||
padding: 2px;
|
||||
border: 1px solid #777;
|
||||
border-radius: 3px;
|
||||
}
|
||||
@media (prefers-color-scheme: dark) {
|
||||
body {
|
||||
background-color: rgb(32, 33, 36);
|
||||
color: #eee;
|
||||
}
|
||||
pre {
|
||||
background: #444;
|
||||
}
|
||||
}
|
||||
</style>
|
||||
<h4>The UI has moved to <a href="http://localhost:9000">http://localhost:9000</a>. The current address that you used (ending with :8000) will be removed in the future, so please use <a href="http://localhost:9000">http://localhost:9000</a> going ahead (and in any bookmarks you've saved).</h4>
|
||||
|
||||
<h4>Also, please use <pre>./server</pre> instead of <pre>docker-compose up &</pre>. To stop, please use <pre>./server stop</pre>. This will help the project better manage the startup process in the future.</h4>
|
||||
|
||||
<h3>Why has the address changed?</h3>
|
||||
<p>The previously used port (8000) is often used by other servers, which results in port conflicts. So the project's port number has been changed, while the project is still young. Otherwise port-conflicts with 8000 will be a common source of new-user issues in the future.</p>
|
||||
<p>Sorry about this, and apologies for the inconvenience :)</p>
|
||||
'''
|
@ -1,3 +0,0 @@
|
||||
requests
|
||||
fastapi==0.80.0
|
||||
uvicorn==0.18.2
|
26
server
@ -1,26 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
CMD="$1"
|
||||
if [ -z "$1" ]; then
|
||||
CMD="start"
|
||||
fi
|
||||
|
||||
start_server() {
|
||||
docker-compose up -d stable-diffusion-old-port-redirect # old port 8000 server, show redirect notice
|
||||
docker-compose up stability-ai stable-diffusion-ui
|
||||
}
|
||||
|
||||
stop_server() {
|
||||
docker-compose down
|
||||
}
|
||||
|
||||
if [ "$CMD" == "start" ]; then
|
||||
start_server
|
||||
elif [ "$CMD" == "stop" ]; then
|
||||
stop_server
|
||||
elif [ "$CMD" == "restart" ]; then
|
||||
stop_server
|
||||
start_server
|
||||
else
|
||||
echo "Unknown option: $1 (Expected start or stop)"
|
||||
fi
|
18
start.sh
Executable file
@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo "Stable Diffusion UI - v2.5"
|
||||
echo ""
|
||||
|
||||
export SD_BASE_DIR=$(pwd)
|
||||
|
||||
echo "Working in $SD_BASE_DIR"
|
||||
|
||||
# Setup the packages required for the installer
|
||||
installer/bootstrap/bootstrap.sh
|
||||
|
||||
# Test the bootstrap
|
||||
git --version
|
||||
python --version
|
||||
|
||||
# Download the rest of the installer and UI
|
||||
installer/installer/start.sh
|
231
ui/index.html
Normal file
@ -0,0 +1,231 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<link rel="icon" type="image/png" href="/media/favicon-16x16.png" sizes="16x16">
|
||||
<link rel="icon" type="image/png" href="/media/favicon-32x32.png" sizes="32x32">
|
||||
<link rel="stylesheet" href="/media/main.css?v=10">
|
||||
<link rel="stylesheet" href="/media/modifier-thumbnails.css?v=1">
|
||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.2.0/css/all.min.css">
|
||||
<link rel="stylesheet" href="/media/drawingboard.min.css">
|
||||
<script src="/media/jquery-3.6.1.min.js"></script>
|
||||
<script src="/media/drawingboard.min.js"></script>
|
||||
</head>
|
||||
<body>
|
||||
<div id="container">
|
||||
<div id="top-nav">
|
||||
<div id="logo">
|
||||
<h1>Stable Diffusion UI <small>v2.195 <span id="updateBranchLabel"></span></small></h1>
|
||||
</div>
|
||||
<ul id="top-nav-items">
|
||||
<li class="dropdown">
|
||||
<span><i class="fa fa-comments icon"></i> Help & Community</span>
|
||||
<ul id="community-links" class="dropdown-content">
|
||||
<li><a href="https://github.com/cmdr2/stable-diffusion-ui/blob/main/Troubleshooting.md" target="_blank"><i class="fa-solid fa-circle-question fa-fw"></i> Usual problems and solutions</a></li>
|
||||
<li><a href="https://discord.com/invite/u9yhsFmEkB" target="_blank"><i class="fa-brands fa-discord fa-fw"></i> Discord user community</a></li>
|
||||
<li><a href="https://www.reddit.com/r/StableDiffusionUI/" target="_blank"><i class="fa-brands fa-reddit fa-fw"></i> Reddit community</a></li>
|
||||
<li><a href="https://github.com/cmdr2/stable-diffusion-ui" target="_blank"><i class="fa-brands fa-github fa-fw"></i> Source code on GitHub</a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="dropdown">
|
||||
<span><i class="fa fa-gear icon"></i> Settings</span>
|
||||
<div id="system-settings" class="panel-box settings-box dropdown-content">
|
||||
<ul id="system-settings-entries">
|
||||
<li><b class="settings-subheader">System Settings</b></li>
|
||||
<br/>
|
||||
<li><input id="save_to_disk" name="save_to_disk" type="checkbox"> <label for="save_to_disk">Automatically save to <input id="diskPath" name="diskPath" size="40" disabled></label></li>
|
||||
<li><input id="sound_toggle" name="sound_toggle" type="checkbox" checked> <label for="sound_toggle">Play sound on task completion</label></li>
|
||||
<li><input id="turbo" name="turbo" type="checkbox" checked> <label for="turbo">Turbo mode <small>(generates images faster, but uses an additional 1 GB of GPU memory)</small></label></li>
|
||||
<li><input id="use_cpu" name="use_cpu" type="checkbox"> <label for="use_cpu">Use CPU instead of GPU <small>(warning: this will be *very* slow)</small></label></li>
|
||||
<li><input id="use_full_precision" name="use_full_precision" type="checkbox"> <label for="use_full_precision">Use full precision <small>(for GPU-only. warning: this will consume more VRAM)</small></label></li>
|
||||
<!-- <li><input id="allow_nsfw" name="allow_nsfw" type="checkbox"> <label for="allow_nsfw">Allow NSFW Content (You confirm you are above 18 years of age)</label></li> -->
|
||||
<br/>
|
||||
<li><input id="use_beta_channel" name="use_beta_channel" type="checkbox"> <label for="use_beta_channel">🔥Beta channel. Get the latest features immediately (but could be less stable). Please restart the program after changing this.</label></li>
|
||||
</ul>
|
||||
</div>
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
<div class="flex-container">
|
||||
<div id="editor" class="col-fixed-10">
|
||||
<div id="server-status">
|
||||
<div id="server-status-color">●</div>
|
||||
<span id="server-status-msg">Stable Diffusion is starting..</span>
|
||||
</div>
|
||||
<div id="editor-inputs">
|
||||
<div id="editor-inputs-prompt" class="row">
|
||||
<label for="prompt">Prompt</label>
|
||||
<textarea id="prompt" class="col-free">a photograph of an astronaut riding a horse</textarea>
|
||||
</div>
|
||||
|
||||
<div id="editor-inputs-init-image" class="row">
|
||||
<label for="init_image"><b>Initial Image:</b> (optional) </label> <input id="init_image" name="init_image" type="file" /><br/>
|
||||
|
||||
<div id="init_image_preview_container" class="image_preview_container">
|
||||
<img id="init_image_preview" src="" width="100" height="100" />
|
||||
<button class="init_image_clear image_clear_btn">X</button>
|
||||
|
||||
<br/>
|
||||
<input id="enable_mask" name="enable_mask" type="checkbox"> <label for="enable_mask">In-Painting (beta) <small>(select the area which the AI will paint into)</small></label>
|
||||
<div id="inpaintingEditor"></div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div id="editor-inputs-tags-container" class="row">
|
||||
<label>Image Modifiers: <small>(click an Image Modifier to remove it)</small></label>
|
||||
<div id="editor-inputs-tags-list"></div>
|
||||
</div>
|
||||
|
||||
<button id="makeImage">Make Image</button>
|
||||
<button id="stopImage" class="secondaryButton">Stop All</button>
|
||||
</div>
|
||||
|
||||
<div class="line-separator"> </div>
|
||||
|
||||
<div id="editor-settings" class="panel-box settings-box">
|
||||
<h4 class="collapsible">Image Settings</h4>
|
||||
<ul id="editor-settings-entries" class="collapsible-content">
|
||||
<li><b class="settings-subheader">Image Settings</b></li>
|
||||
<li class="pl-5"><label for="seed">Seed:</label> <input id="seed" name="seed" size="10" value="30000"> <input id="random_seed" name="random_seed" type="checkbox" checked> <label for="random_seed">Random Image</label></li>
|
||||
<li class="pl-5"><label for="num_outputs_total">Number of images to make:</label> <input id="num_outputs_total" name="num_outputs_total" value="1" size="1"> <label for="num_outputs_parallel">Generate in parallel:</label> <input id="num_outputs_parallel" name="num_outputs_parallel" value="1" size="1"> (images at once)</li>
|
||||
<li id="samplerSelection" class="pl-5"><label for="sampler">Sampler:</label>
|
||||
<select id="sampler" name="sampler">
|
||||
<option value="plms" selected>plms</option>
|
||||
<option value="ddim">ddim</option>
|
||||
<option value="heun">heun</option>
|
||||
<option value="euler">euler</option>
|
||||
<option value="euler_a">euler_a</option>
|
||||
<option value="dpm2">dpm2</option>
|
||||
<option value="dpm2_a">dpm2_a</option>
|
||||
<option value="lms">lms</option>
|
||||
</select>
|
||||
</li>
|
||||
<li class="pl-5"><label>Image Size: </label>
|
||||
<select id="width" name="width" value="512">
|
||||
<option value="128">128 (*)</option>
|
||||
<option value="192">192</option>
|
||||
<option value="256">256 (*)</option>
|
||||
<option value="320">320</option>
|
||||
<option value="384">384</option>
|
||||
<option value="448">448</option>
|
||||
<option value="512" selected>512 (*)</option>
|
||||
<option value="576">576</option>
|
||||
<option value="640">640</option>
|
||||
<option value="704">704</option>
|
||||
<option value="768">768 (*)</option>
|
||||
<option value="832">832</option>
|
||||
<option value="896">896</option>
|
||||
<option value="960">960</option>
|
||||
<option value="1024">1024 (*)</option>
|
||||
<option value="1280">1280</option>
|
||||
<option value="1536">1536</option>
|
||||
<option value="1792">1792</option>
|
||||
<option value="2048">2048</option>
|
||||
</select>
|
||||
<label for="width"><small>(width)</small></label>
|
||||
<select id="height" name="height" value="512">
|
||||
<option value="128">128 (*)</option>
|
||||
<option value="192">192</option>
|
||||
<option value="256">256 (*)</option>
|
||||
<option value="320">320</option>
|
||||
<option value="384">384</option>
|
||||
<option value="448">448</option>
|
||||
<option value="512" selected>512 (*)</option>
|
||||
<option value="576">576</option>
|
||||
<option value="640">640</option>
|
||||
<option value="704">704</option>
|
||||
<option value="768">768 (*)</option>
|
||||
<option value="832">832</option>
|
||||
<option value="896">896</option>
|
||||
<option value="960">960</option>
|
||||
<option value="1024">1024 (*)</option>
|
||||
<option value="1280">1280</option>
|
||||
<option value="1536">1536</option>
|
||||
<option value="1792">1792</option>
|
||||
<option value="2048">2048</option>
|
||||
</select>
|
||||
<label for="height"><small>(height)</small></label>
|
||||
</li>
|
||||
<li class="pl-5"><label for="num_inference_steps">Number of inference steps:</label> <input id="num_inference_steps" name="num_inference_steps" size="4" value="50"></li>
|
||||
<li class="pl-5"><label for="guidance_scale_slider">Guidance Scale:</label> <input id="guidance_scale_slider" name="guidance_scale_slider" class="editor-slider" value="75" type="range" min="10" max="500"> <input id="guidance_scale" name="guidance_scale" size="4"></li>
|
||||
<li class="pl-5"><span id="prompt_strength_container"><label for="prompt_strength_slider">Prompt Strength:</label> <input id="prompt_strength_slider" name="prompt_strength_slider" class="editor-slider" value="80" type="range" min="0" max="99"> <input id="prompt_strength" name="prompt_strength" size="4"><br/></span></li>
|
||||
|
||||
<br/>
|
||||
|
||||
<li><b class="settings-subheader">Prompt Settings</b></li>
|
||||
<li class="pl-5"><label for="negative_prompt">Negative Prompt:</label> <input id="negative_prompt" name="negative_prompt" size="55"></li>
|
||||
|
||||
<br/>
|
||||
|
||||
<li><b class="settings-subheader">Render Settings</b></li>
|
||||
<li class="pl-5"><input id="stream_image_progress" name="stream_image_progress" type="checkbox"> <label for="stream_image_progress">Show a live preview of the image <small>(uses more VRAM, slightly slower image creation)</small></label></li>
|
||||
<li class="pl-5"><input id="use_face_correction" name="use_face_correction" type="checkbox" checked> <label for="use_face_correction">Fix incorrect faces and eyes <small>(uses GFPGAN)</small></label></li>
|
||||
<li class="pl-5">
|
||||
<input id="use_upscale" name="use_upscale" type="checkbox"> <label for="use_upscale">Upscale the image to 4x resolution using </label>
|
||||
<select id="upscale_model" name="upscale_model">
|
||||
<option value="RealESRGAN_x4plus" selected>RealESRGAN_x4plus</option>
|
||||
<option value="RealESRGAN_x4plus_anime_6B">RealESRGAN_x4plus_anime_6B</option>
|
||||
</select>
|
||||
</li>
|
||||
<li class="pl-5"><input id="show_only_filtered_image" name="show_only_filtered_image" type="checkbox" checked> <label for="show_only_filtered_image">Show only the corrected/upscaled image</label></li>
|
||||
<br/>
|
||||
<li><small>The system-related settings have been moved to the top-right corner.</small></li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
<div id="editor-modifiers" class="panel-box">
|
||||
<h4 class="collapsible">Image Modifiers (art styles, tags etc)</h4>
|
||||
<div id="editor-modifiers-entries" class="collapsible-content">
|
||||
<label for="preview-image">Image Style:</label>
|
||||
<select id="preview-image" name="preview-image" value="portrait">
|
||||
<option value="portrait" selected="">Face</option>
|
||||
<option value="landscape">Landscape</option>
|
||||
</select>
|
||||
|
||||
<label for="modifier-card-size-slider">Thumbnail Size:</label>
|
||||
<input id="modifier-card-size-slider" name="modifier-card-size-slider" value="0" type="range" min="-3" max="5">
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div id="preview" class="col-free">
|
||||
<div id="initial-text">
|
||||
Type a prompt and press the "Make Image" button.<br/><br/>You can set an "Initial Image" if you want to guide the AI.<br/><br/>You can also add modifiers like "Realistic", "Pencil Sketch", "ArtStation" etc by browsing through the "Image Modifiers" section and selecting the desired modifiers.<br/><br/>Click "Advanced Settings" for additional settings like seed, image size, number of images to generate etc.<br/><br/>Enjoy! :)
|
||||
</div>
|
||||
<div id="preview-tools">
|
||||
<button id="clear-all-previews" class="secondaryButton"><i class="fa-solid fa-trash-can"></i> Clear All</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="line-separator"> </div>
|
||||
|
||||
<div id="footer" class="panel-box">
|
||||
<p>If you found this project useful and want to help keep it alive, please <a href="https://ko-fi.com/cmdr2_stablediffusion_ui" target="_blank"><img src="media/kofi.png" id="coffeeButton"></a> to help cover the cost of development and maintenance! Thank you for your support!</p>
|
||||
<p>Please feel free to join the <a href="https://discord.com/invite/u9yhsFmEkB" target="_blank">discord community</a> or <a href="https://github.com/cmdr2/stable-diffusion-ui/issues" target="_blank">file an issue</a> if you have any problems or suggestions in using this interface.</p>
|
||||
<div id="footer-legal">
|
||||
<p><b>Disclaimer:</b> The authors of this project are not responsible for any content generated using this interface.</p>
|
||||
<p>This license of this software forbids you from sharing any content that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, <br/>spread misinformation and target vulnerable groups. For the full list of restrictions please read <a href="https://github.com/cmdr2/stable-diffusion-ui/blob/main/LICENSE" target="_blank">the license</a>.</p>
|
||||
<p>By using this software, you consent to the terms and conditions of the license.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
|
||||
<script src="media/main.js?v=15"></script>
|
||||
<script>
|
||||
async function init() {
|
||||
await loadModifiers()
|
||||
await getDiskPath()
|
||||
await getAppConfig()
|
||||
|
||||
setInterval(healthCheck, HEALTH_PING_INTERVAL * 1000)
|
||||
healthCheck()
|
||||
|
||||
playSound()
|
||||
}
|
||||
|
||||
init()
|
||||
</script>
|
||||
</html>
|
5
ui/media/drawingboard.min.css
vendored
Normal file
4
ui/media/drawingboard.min.js
vendored
Normal file
BIN
ui/media/favicon-16x16.png
Normal file
After Width: | Height: | Size: 466 B |
BIN
ui/media/favicon-32x32.png
Normal file
After Width: | Height: | Size: 973 B |
2
ui/media/jquery-3.6.1.min.js
vendored
Normal file
BIN
ui/media/kofi.png
Normal file
After Width: | Height: | Size: 11 KiB |
412
ui/media/main.css
Normal file
@ -0,0 +1,412 @@
|
||||
body {
|
||||
font-family: Arial, Helvetica, sans-serif;
|
||||
font-size: 11pt;
|
||||
background-color: rgb(32, 33, 36);
|
||||
color: #eee;
|
||||
}
|
||||
a {
|
||||
color: rgb(0, 102, 204);
|
||||
}
|
||||
a:visited {
|
||||
color: rgb(0, 102, 204);
|
||||
}
|
||||
label {
|
||||
font-size: 10pt;
|
||||
}
|
||||
#prompt {
|
||||
width: 100%;
|
||||
height: 65pt;
|
||||
box-sizing: border-box;
|
||||
}
|
||||
@media screen and (max-width: 600px) {
|
||||
#prompt {
|
||||
width: 95%;
|
||||
}
|
||||
}
|
||||
.image_preview_container {
|
||||
/* display: none; */
|
||||
margin-top: 10pt;
|
||||
}
|
||||
.image_clear_btn {
|
||||
position: absolute;
|
||||
transform: translateX(-50%) translateY(-35%);
|
||||
background: black;
|
||||
color: white;
|
||||
border: 2pt solid #ccc;
|
||||
padding: 0;
|
||||
cursor: pointer;
|
||||
outline: inherit;
|
||||
border-radius: 8pt;
|
||||
width: 16pt;
|
||||
height: 16pt;
|
||||
font-family: Verdana;
|
||||
font-size: 8pt;
|
||||
}
|
||||
.settings-box ul {
|
||||
font-size: 9pt;
|
||||
margin-bottom: 5px;
|
||||
padding-left: 10px;
|
||||
list-style-type: none;
|
||||
}
|
||||
.settings-box li {
|
||||
padding-bottom: 4pt;
|
||||
}
|
||||
.editor-slider {
|
||||
vertical-align: middle;
|
||||
}
|
||||
.outputMsg {
|
||||
font-size: small;
|
||||
padding-bottom: 3pt;
|
||||
}
|
||||
#progressBar {
|
||||
font-size: small;
|
||||
}
|
||||
#footer {
|
||||
font-size: small;
|
||||
padding-left: 10pt;
|
||||
background: none;
|
||||
}
|
||||
#footer-legal {
|
||||
font-size: 8pt;
|
||||
}
|
||||
.imgSeedLabel {
|
||||
position: absolute;
|
||||
transform: translateX(-100%);
|
||||
margin-top: 5pt;
|
||||
margin-left: -5pt;
|
||||
font-size: 10pt;
|
||||
|
||||
background-color: #333;
|
||||
opacity: 0.8;
|
||||
color: #ddd;
|
||||
border-radius: 3pt;
|
||||
padding: 1pt 3pt;
|
||||
}
|
||||
.imgUseBtn {
|
||||
position: absolute;
|
||||
transform: translateX(-100%);
|
||||
margin-top: 30pt;
|
||||
margin-left: -5pt;
|
||||
}
|
||||
.imgSaveBtn {
|
||||
position: absolute;
|
||||
transform: translateX(-100%);
|
||||
margin-top: 55pt;
|
||||
margin-left: -5pt;
|
||||
}
|
||||
.imgItem {
|
||||
display: inline;
|
||||
padding-right: 10px;
|
||||
}
|
||||
.imgItemInfo {
|
||||
opacity: 0.5;
|
||||
}
|
||||
|
||||
#container {
|
||||
width: 90%;
|
||||
margin-left: auto;
|
||||
margin-right: auto;
|
||||
}
|
||||
@media screen and (max-width: 1800px) {
|
||||
#container {
|
||||
width: 100%;
|
||||
}
|
||||
}
|
||||
#logo small {
|
||||
font-size: 11pt;
|
||||
}
|
||||
#editor {
|
||||
padding: 5px;
|
||||
}
|
||||
#editor label {
|
||||
font-weight: normal;
|
||||
}
|
||||
.settings-box label small {
|
||||
color: rgb(153, 153, 153);
|
||||
}
|
||||
#preview {
|
||||
padding: 5px;
|
||||
}
|
||||
#editor-inputs {
|
||||
margin-bottom: 20px;
|
||||
}
|
||||
#editor-inputs-prompt {
|
||||
flex: 1;
|
||||
}
|
||||
#editor-inputs .row {
|
||||
padding-bottom: 10px;
|
||||
}
|
||||
#makeImage {
|
||||
border-radius: 6px;
|
||||
}
|
||||
#editor-modifiers h5 {
|
||||
padding: 5pt 0;
|
||||
margin: 0;
|
||||
}
|
||||
#makeImage {
|
||||
flex: 0 0 70px;
|
||||
background: rgb(80, 0, 185);
|
||||
border: 2px solid rgb(40, 0, 78);
|
||||
color: rgb(255, 221, 255);
|
||||
width: 100%;
|
||||
height: 30pt;
|
||||
}
|
||||
#makeImage:hover {
|
||||
background: rgb(93, 0, 214);
|
||||
}
|
||||
#stopImage {
|
||||
flex: 0 0 70px;
|
||||
background: rgb(132, 8, 0);
|
||||
border: 2px solid rgb(122, 29, 0);
|
||||
color: rgb(255, 221, 255);
|
||||
width: 100%;
|
||||
height: 30pt;
|
||||
border-radius: 6px;
|
||||
display: none;
|
||||
}
|
||||
#stopImage:hover {
|
||||
background: rgb(177, 27, 0);
|
||||
}
|
||||
.flex-container {
|
||||
display: flex;
|
||||
}
|
||||
.col-50 {
|
||||
flex: 50%;
|
||||
}
|
||||
.col-fixed-10 {
|
||||
flex: 0 0 380pt;
|
||||
}
|
||||
.col-free {
|
||||
flex: 1;
|
||||
}
|
||||
.collapsible {
|
||||
cursor: pointer;
|
||||
}
|
||||
.collapsible-content {
|
||||
display: none;
|
||||
padding-left: 15px;
|
||||
}
|
||||
.collapsible-content h5 {
|
||||
padding: 5pt 0pt;
|
||||
margin: 0;
|
||||
font-size: 10pt;
|
||||
}
|
||||
.collapsible-handle {
|
||||
color: white;
|
||||
padding-right: 5px;
|
||||
}
|
||||
.panel-box {
|
||||
background: rgb(44, 45, 48);
|
||||
border: 1px solid rgb(47, 49, 53);
|
||||
border-radius: 7px;
|
||||
padding: 5px;
|
||||
margin-bottom: 15px;
|
||||
box-shadow: 0 4px 8px 0 rgba(0, 0, 0, 0.15), 0 6px 20px 0 rgba(0, 0, 0, 0.15);
|
||||
}
|
||||
.panel-box h4 {
|
||||
margin: 0;
|
||||
padding: 2px 0;
|
||||
}
|
||||
#editor-modifiers .editor-modifiers-leaf {
|
||||
padding-top: 10pt;
|
||||
padding-bottom: 10pt;
|
||||
}
|
||||
#preview {
|
||||
margin-left: 10pt;
|
||||
}
|
||||
img {
|
||||
box-shadow: 0 4px 8px 0 rgba(0, 0, 0, 0.15), 0 6px 20px 0 rgba(0, 0, 0, 0.15);
|
||||
}
|
||||
.line-separator {
|
||||
background: rgb(56, 56, 56);
|
||||
height: 1pt;
|
||||
margin: 15pt 0;
|
||||
}
|
||||
#editor-inputs-tags-container {
|
||||
margin-top: 5pt;
|
||||
display: none;
|
||||
}
|
||||
#server-status {
|
||||
display: inline;
|
||||
float: right;
|
||||
transform: translateY(-5pt);
|
||||
}
|
||||
#server-status-color {
|
||||
/* width: 8pt;
|
||||
height: 8pt;
|
||||
border-radius: 4pt; */
|
||||
font-size: 14pt;
|
||||
color: rgb(128, 87, 0);
|
||||
/* background-color: rgb(197, 1, 1); */
|
||||
/* transform: translateY(15%); */
|
||||
display: inline;
|
||||
}
|
||||
#server-status-msg {
|
||||
color: rgb(128, 87, 0);
|
||||
padding-left: 2pt;
|
||||
font-size: 10pt;
|
||||
}
|
||||
.preview-prompt {
|
||||
font-size: 16pt;
|
||||
margin-bottom: 10pt;
|
||||
}
|
||||
#coffeeButton {
|
||||
height: 23px;
|
||||
transform: translateY(25%);
|
||||
}
|
||||
|
||||
#inpaintingEditor {
|
||||
width: 300pt;
|
||||
height: 300pt;
|
||||
margin-top: 5pt;
|
||||
}
|
||||
.drawing-board-canvas-wrapper {
|
||||
background-size: 100% 100%;
|
||||
}
|
||||
.drawing-board-control > button {
|
||||
background-color: #eee;
|
||||
border-radius: 3pt;
|
||||
}
|
||||
.drawing-board-control-inner {
|
||||
background-color: #eee;
|
||||
border-radius: 3pt;
|
||||
}
|
||||
#inpaintingEditor canvas {
|
||||
opacity: 0.6;
|
||||
}
|
||||
#enable_mask {
|
||||
margin-top: 8pt;
|
||||
}
|
||||
|
||||
#top-nav {
|
||||
padding-top: 3pt;
|
||||
padding-bottom: 15pt;
|
||||
}
|
||||
#top-nav .icon {
|
||||
padding-right: 4pt;
|
||||
font-size: 14pt;
|
||||
transform: translateY(1pt);
|
||||
}
|
||||
#logo {
|
||||
display: inline;
|
||||
}
|
||||
#logo h1 {
|
||||
display: inline;
|
||||
}
|
||||
#top-nav-items {
|
||||
list-style-type: none;
|
||||
display: inline;
|
||||
float: right;
|
||||
}
|
||||
#top-nav-items > li {
|
||||
float: left;
|
||||
display: inline;
|
||||
padding-left: 20pt;
|
||||
cursor: default;
|
||||
}
|
||||
#initial-text {
|
||||
padding-top: 15pt;
|
||||
padding-left: 4pt;
|
||||
}
|
||||
.settings-subheader {
|
||||
font-size: 10pt;
|
||||
font-weight: bold;
|
||||
}
|
||||
.pl-5 {
|
||||
padding-left: 5pt;
|
||||
}
|
||||
#system-settings {
|
||||
width: 360pt;
|
||||
transform: translateX(-100%) translateX(70pt);
|
||||
|
||||
padding-top: 10pt;
|
||||
padding-bottom: 10pt;
|
||||
}
|
||||
#system-settings ul {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
#system-settings li {
|
||||
padding-left: 5pt;
|
||||
}
|
||||
#community-links {
|
||||
list-style-type: none;
|
||||
margin: 0;
|
||||
padding: 12pt;
|
||||
padding-bottom: 0pt;
|
||||
transform: translateX(-15%);
|
||||
}
|
||||
#community-links li {
|
||||
padding-bottom: 12pt;
|
||||
display: block;
|
||||
font-size: 10pt;
|
||||
}
|
||||
#community-links li .fa-fw {
|
||||
padding-right: 2pt;
|
||||
}
|
||||
#community-links li a {
|
||||
color: white;
|
||||
text-decoration: none;
|
||||
}
|
||||
.dropdown {
|
||||
overflow: hidden;
|
||||
}
|
||||
.dropdown-content {
|
||||
display: none;
|
||||
position: absolute;
|
||||
z-index: 2;
|
||||
|
||||
background: rgb(18, 18, 19);
|
||||
border: 2px solid rgb(37, 38, 41);
|
||||
border-radius: 7px;
|
||||
padding: 5px;
|
||||
margin-bottom: 15px;
|
||||
box-shadow: 0 20px 28px 0 rgba(0, 0, 0, 0.15), 0 6px 20px 0 rgba(0, 0, 0, 0.15);
|
||||
}
|
||||
.dropdown:hover .dropdown-content {
|
||||
display: block;
|
||||
}
|
||||
|
||||
.imageTaskContainer {
|
||||
border: 1px solid #333;
|
||||
margin-bottom: 10pt;
|
||||
padding: 5pt;
|
||||
border-radius: 5pt;
|
||||
box-shadow: 0 20px 28px 0 rgba(0, 0, 0, 0.15), 0 6px 20px 0 rgba(0, 0, 0, 0.15);
|
||||
}
|
||||
.taskStatusLabel {
|
||||
float: left;
|
||||
font-size: 8pt;
|
||||
background:rgb(44, 45, 48);
|
||||
border: 1px solid rgb(61, 62, 66);
|
||||
padding: 2pt 4pt;
|
||||
border-radius: 2pt;
|
||||
margin-right: 5pt;
|
||||
}
|
||||
.activeTaskLabel {
|
||||
background:rgb(0, 90, 30);
|
||||
border: 1px solid rgb(0, 75, 19);
|
||||
color:rgb(204, 255, 217)
|
||||
}
|
||||
.secondaryButton {
|
||||
background: rgb(132, 8, 0);
|
||||
border: 1px solid rgb(122, 29, 0);
|
||||
color: rgb(255, 221, 255);
|
||||
padding: 3pt 6pt;
|
||||
border-radius: 5px;
|
||||
}
|
||||
.secondaryButton:hover {
|
||||
background: rgb(177, 27, 0);
|
||||
}
|
||||
.stopTask {
|
||||
float: right;
|
||||
}
|
||||
#preview-tools {
|
||||
display: none;
|
||||
padding: 4pt;
|
||||
}
|
||||
.taskConfig {
|
||||
font-size: 10pt;
|
||||
color: #aaa;
|
||||
margin-bottom: 5pt;
|
||||
}
|
1351
ui/media/main.js
Normal file
216
ui/media/modifier-thumbnails.css
Normal file
@ -0,0 +1,216 @@
|
||||
.modifier-card {
|
||||
box-shadow: 0 4px 8px 0 rgba(0,0,0,0.2);
|
||||
transition: 0.1s;
|
||||
border-radius: 7px;
|
||||
margin: 3pt 3pt;
|
||||
float: left;
|
||||
width: 8em;
|
||||
height: 11.5em;
|
||||
display: grid;
|
||||
grid-template-columns: 1fr;
|
||||
grid-template-rows: 8em 3.5em;
|
||||
gap: 0px 0px;
|
||||
grid-auto-flow: row;
|
||||
grid-template-areas:
|
||||
"modifier-card-image-container"
|
||||
"modifier-card-container";
|
||||
border: 2px solid rgba(255, 255, 255, .05);
|
||||
cursor: pointer;
|
||||
}
|
||||
.modifier-card-size_5 {
|
||||
width: 18em;
|
||||
grid-template-rows: 18em 3.5em;
|
||||
height: 21.5em;
|
||||
}
|
||||
.modifier-card-size_5 .modifier-card-image-overlay {
|
||||
font-size: 8em;
|
||||
}
|
||||
.modifier-card-size_4 {
|
||||
width: 14em;
|
||||
grid-template-rows: 14em 3.5em;
|
||||
height: 17.5em;
|
||||
}
|
||||
.modifier-card-size_4 .modifier-card-image-overlay {
|
||||
font-size: 7em;
|
||||
}
|
||||
.modifier-card-size_3 {
|
||||
width: 11em;
|
||||
grid-template-rows: 11em 3.5em;
|
||||
height: 14.5em;
|
||||
}
|
||||
.modifier-card-size_3 .modifier-card-image-overlay {
|
||||
font-size: 6em;
|
||||
}
|
||||
.modifier-card-size_2 {
|
||||
width: 10em;
|
||||
grid-template-rows: 10em 3.5em;
|
||||
height: 13.5em;
|
||||
}
|
||||
.modifier-card-size_2 .modifier-card-image-overlay {
|
||||
font-size: 6em;
|
||||
}
|
||||
.modifier-card-size_1 {
|
||||
width: 9em;
|
||||
grid-template-rows: 9em 3.5em;
|
||||
height: 12.5em;
|
||||
}
|
||||
.modifier-card-size_1 .modifier-card-image-overlay {
|
||||
font-size: 5em;
|
||||
}
|
||||
.modifier-card-size_-1 {
|
||||
width: 7em;
|
||||
grid-template-rows: 7em 3.5em;
|
||||
height: 10.5em;
|
||||
}
|
||||
.modifier-card-size_-1 .modifier-card-image-overlay {
|
||||
font-size: 4em;
|
||||
}
|
||||
.modifier-card-size_-2 {
|
||||
width: 6em;
|
||||
grid-template-rows: 6em 3.5em;
|
||||
height: 9.5em;
|
||||
}
|
||||
.modifier-card-size_-2 .modifier-card-image-overlay {
|
||||
font-size: 3em;
|
||||
}
|
||||
.modifier-card-size_-3 {
|
||||
width: 5em;
|
||||
grid-template-rows: 5em 3.5em;
|
||||
height: 8.5em;
|
||||
}
|
||||
.modifier-card-size_-3 .modifier-card-image-overlay {
|
||||
font-size: 3em;
|
||||
}
|
||||
.modifier-card-size_-3 .modifier-card-label {
|
||||
font-size: 0.8em;
|
||||
}
|
||||
.modifier-card-tiny {
|
||||
width: 6em;
|
||||
height: 9.5em;
|
||||
grid-template-rows: 6em 3.5em;
|
||||
}
|
||||
.modifier-card-tiny .modifier-card-image-overlay {
|
||||
font-size: 4em;
|
||||
}
|
||||
.modifier-card:hover {
|
||||
transform: scale(1.05);
|
||||
box-shadow: 0 5px 16px 5px rgba(0, 0, 0, 0.25);
|
||||
}
|
||||
.modifier-card-image-container {
|
||||
border-radius: 5px 5px 0 0;
|
||||
width: inherit;
|
||||
height: 100%;
|
||||
background-color: rgba(0, 0, 0, .2);
|
||||
grid-area: modifier-card-image-container;
|
||||
position: relative;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
color: rgb(255 255 255 / 8%);
|
||||
}
|
||||
.modifier-card-image-container img {
|
||||
width: inherit;
|
||||
height: 100%;
|
||||
border-radius: 5px 5px 0 0;
|
||||
}
|
||||
.modifier-card-image-container * {
|
||||
position: absolute;
|
||||
}
|
||||
.modifier-card-container {
|
||||
text-align: center;
|
||||
background-color: rgba(0,0,0,0.5);
|
||||
border-radius: 0 0 5px 5px;
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
grid-area: modifier-card-container;
|
||||
font-weight: 100;
|
||||
font-size: .9em;
|
||||
width: inherit;
|
||||
}
|
||||
.modifier-card-label {
|
||||
padding: 4px;
|
||||
word-break: break-word;
|
||||
}
|
||||
.modifier-card-image-overlay {
|
||||
width: inherit;
|
||||
height: inherit;
|
||||
background-color: rgb(0 0 0 / 50%);
|
||||
z-index: 2;
|
||||
position: absolute;
|
||||
border-radius: 5px 5px 0 0;
|
||||
opacity: 0;
|
||||
font-size: 5em;
|
||||
font-weight: 900;
|
||||
color: rgb(255 255 255 / 50%);
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
}
|
||||
.modifier-card-overlay {
|
||||
width: inherit;
|
||||
height: inherit;
|
||||
position: absolute;
|
||||
z-index: 3;
|
||||
}
|
||||
.modifier-card:hover > .modifier-card-image-container .modifier-card-image-overlay {
|
||||
opacity: 1;
|
||||
}
|
||||
.modifier-card:hover > .modifier-card-image-container img {
|
||||
filter: blur(.1em);
|
||||
}
|
||||
.modifier-card:active {
|
||||
transform: scale(0.95);
|
||||
box-shadow: 0 5px 16px 5px rgba(0, 0, 0, 0.5);
|
||||
}
|
||||
#preview-image {
|
||||
margin-top: 0.5em;
|
||||
margin-bottom: 0.5em;
|
||||
}
|
||||
.modifier-card-active {
|
||||
border: 2px solid rgb(179 82 255 / 94%);
|
||||
box-shadow: 0 0px 10px 0 rgb(170 0 229 / 58%);
|
||||
}
|
||||
.tooltip {
|
||||
position: relative;
|
||||
display: inline-block;
|
||||
}
|
||||
.tooltip .tooltip-text {
|
||||
visibility: hidden;
|
||||
width: 120px;
|
||||
background: rgb(101,97,181);
|
||||
background: linear-gradient(180deg, rgba(101,97,181,1) 0%, rgba(47,45,85,1) 100%);
|
||||
color: #fff;
|
||||
text-align: center;
|
||||
border-radius: 6px;
|
||||
padding: 5px;
|
||||
position: absolute;
|
||||
z-index: 1;
|
||||
top: 105%;
|
||||
left: 39%;
|
||||
margin-left: -60px;
|
||||
opacity: 0;
|
||||
transition: opacity 0.3s;
|
||||
border: 2px solid rgb(90 100 177 / 94%);
|
||||
box-shadow: 0px 10px 20px 5px rgb(11 0 58 / 55%);
|
||||
width: 10em;
|
||||
}
|
||||
.tooltip .tooltip-text::after {
|
||||
content: "";
|
||||
position: absolute;
|
||||
top: -0.9em;
|
||||
left: 50%;
|
||||
margin-left: -5px;
|
||||
border-width: 5px;
|
||||
border-style: solid;
|
||||
border-color: transparent transparent rgb(90 100 177 / 94%) transparent;
|
||||
}
|
||||
.tooltip:hover .tooltip-text {
|
||||
visibility: visible;
|
||||
opacity: 1;
|
||||
}
|
||||
#modifier-card-size-slider {
|
||||
width: 6em;
|
||||
margin-bottom: 0.5em;
|
||||
vertical-align: middle;
|
||||
}
|
BIN
ui/media/modifier-thumbnails/artist/artstation/landscape-0.jpg
Normal file
After Width: | Height: | Size: 38 KiB |
BIN
ui/media/modifier-thumbnails/artist/artstation/portrait-0.jpg
Normal file
After Width: | Height: | Size: 27 KiB |
After Width: | Height: | Size: 23 KiB |
After Width: | Height: | Size: 27 KiB |
After Width: | Height: | Size: 36 KiB |
After Width: | Height: | Size: 33 KiB |
BIN
ui/media/modifier-thumbnails/artist/by_alex_grey/landscape-0.jpg
Normal file
After Width: | Height: | Size: 90 KiB |
BIN
ui/media/modifier-thumbnails/artist/by_alex_grey/portrait-0.jpg
Normal file
After Width: | Height: | Size: 59 KiB |
After Width: | Height: | Size: 66 KiB |
After Width: | Height: | Size: 66 KiB |
After Width: | Height: | Size: 75 KiB |
After Width: | Height: | Size: 45 KiB |
After Width: | Height: | Size: 76 KiB |
After Width: | Height: | Size: 41 KiB |
BIN
ui/media/modifier-thumbnails/artist/by_artgerm/landscape-0.jpg
Normal file
After Width: | Height: | Size: 51 KiB |
BIN
ui/media/modifier-thumbnails/artist/by_artgerm/portrait-0.jpg
Normal file
After Width: | Height: | Size: 34 KiB |
After Width: | Height: | Size: 94 KiB |
After Width: | Height: | Size: 56 KiB |
After Width: | Height: | Size: 77 KiB |
After Width: | Height: | Size: 44 KiB |
BIN
ui/media/modifier-thumbnails/artist/by_banksy/landscape-0.jpg
Normal file
After Width: | Height: | Size: 48 KiB |
BIN
ui/media/modifier-thumbnails/artist/by_banksy/portrait-0.jpg
Normal file
After Width: | Height: | Size: 54 KiB |
BIN
ui/media/modifier-thumbnails/artist/by_beeple/landscape-0.jpg
Normal file
After Width: | Height: | Size: 46 KiB |
BIN
ui/media/modifier-thumbnails/artist/by_beeple/portrait-0.jpg
Normal file
After Width: | Height: | Size: 31 KiB |
After Width: | Height: | Size: 30 KiB |
After Width: | Height: | Size: 32 KiB |