forked from extern/easydiffusion
Compare commits
200 Commits
Author | SHA1 | Date | |
---|---|---|---|
13721f160e | |||
102e5623f7 | |||
9a975321db | |||
6743ec14f1 | |||
daec5e5426 | |||
a2b55c0df7 | |||
01320ac735 | |||
84bddee2ce | |||
5f6b798e35 | |||
9137f3793e | |||
73e92a688f | |||
7a9f219037 | |||
a4728190c0 | |||
04d67a24b6 | |||
55049ba9d2 | |||
e0b33a4feb | |||
fb5c0a3db7 | |||
8154a5709b | |||
3a6780bd50 | |||
b7a76d4212 | |||
ba7cae683a | |||
243556656e | |||
6662dc66d5 | |||
107112d1c4 | |||
c5d343750c | |||
09b76dcd93 | |||
fb95d76e34 | |||
cf2408013e | |||
d8543d1358 | |||
d8b79d8b5c | |||
c2bcf89f9a | |||
5cb24f992c | |||
21394b7d45 | |||
768fb2583a | |||
6e07b2354f | |||
0cd0d6aadf | |||
d6c535c45c | |||
babdb5b718 | |||
0ea8d038be | |||
c804a9971e | |||
4d7f6e4236 | |||
6036ccdc1c | |||
5eeef41d8c | |||
bacf266f0d | |||
ba5c54043b | |||
e33c858829 | |||
e47e54de3f | |||
54f9e9bfe9 | |||
e1875c872c | |||
27b8e173e8 | |||
47e3884994 | |||
e483071894 | |||
af090cb289 | |||
9bbb25f16c | |||
3007f00c9b | |||
352dcfbe30 | |||
60b181a545 | |||
600482e2d7 | |||
39ccbbd72e | |||
6e69cbcdaf | |||
bf6c222a3b | |||
6afcf7570a | |||
c3126f7b4d | |||
cb3b542363 | |||
1a5e15608c | |||
64a751ad79 | |||
57efe31959 | |||
39350d554b | |||
8f4e03550c | |||
d03823fb20 | |||
00ec2b9d6f | |||
70e4bc4582 | |||
5e56a437ef | |||
22ffd25619 | |||
127949c56b | |||
cdfef16a0e | |||
1595f1ed05 | |||
1cae39b105 | |||
8189b38e6e | |||
c240d6932a | |||
c4548d9396 | |||
aea70e3dd4 | |||
3b01e65e11 | |||
341c810bbb | |||
85fd2dfaaa | |||
bf4bc38c6c | |||
aa8b50280b | |||
62553dc0fa | |||
25639cc3f8 | |||
7982a9ae25 | |||
aa01fd058e | |||
ef7e1575bd | |||
fb075a0013 | |||
d1738baf44 | |||
7eb29fa91b | |||
34c00fb77f | |||
7965318d9f | |||
e73a514e29 | |||
35ff4f439e | |||
12e0194c7f | |||
d1ac90e16d | |||
7dc7f70582 | |||
84d606408a | |||
d103693811 | |||
0dbce101ac | |||
cb81e2aacd | |||
6cd0b530c5 | |||
35571eb14d | |||
8e6102ad9a | |||
80bc80dc2c | |||
a483bd0800 | |||
47a39569bc | |||
f00e1a92d8 | |||
a289945e8e | |||
b750c0d7c3 | |||
a244a6873a | |||
ceff4f06c1 | |||
0307114c8e | |||
92030a3917 | |||
73ace121a4 | |||
44d5809e46 | |||
5c4e6f7e96 | |||
8c032579b8 | |||
b53935bfd4 | |||
d4db027cfa | |||
27963decc9 | |||
25f488c6e1 | |||
07bd580050 | |||
fb32a38d96 | |||
ac0961d7d4 | |||
6b943f88d1 | |||
4bbf683d15 | |||
d0e50584ea | |||
b57649828d | |||
1f44a283b3 | |||
9947c3bcfb | |||
8faf6b9f52 | |||
e45cbbf1ca | |||
1a5b6ef260 | |||
096556d8c9 | |||
97919c7e87 | |||
0aa7968503 | |||
bd1bc78953 | |||
6ce6dc3ff6 | |||
e6346775e7 | |||
d03eed3859 | |||
afb88616d8 | |||
543f13f9a3 | |||
af5c68051a | |||
5b7cd11de8 | |||
d3c3496e55 | |||
c08c8b2789 | |||
069315e434 | |||
7e4ad83a1c | |||
400f9fd680 | |||
38951f5581 | |||
b5329ee93d | |||
c568bca69e | |||
7b2be12587 | |||
099fde2652 | |||
83e5410945 | |||
b330c34b29 | |||
e3184622e8 | |||
28f822afe0 | |||
a2af811ad2 | |||
cde8c2d3bd | |||
79cc84b611 | |||
f1de0be679 | |||
854e3d3576 | |||
dbac2655f5 | |||
0f656dbf2f | |||
3fbb3f6773 | |||
8820814002 | |||
b40fb3a422 | |||
aa59575df3 | |||
accfec9007 | |||
16410d90b8 | |||
27c6113287 | |||
f4a6910ab4 | |||
bad89160cc | |||
5782966d63 | |||
ba2c966329 | |||
f8dee7e25f | |||
a8151176d7 | |||
9ee0b7fe2e | |||
fb6a7e04f5 | |||
bfdf487d52 | |||
b7aac1501d | |||
273525e6f9 | |||
064a4938c1 | |||
182236e742 | |||
75cb052cca | |||
d4a378827f | |||
592d5e8c40 | |||
733150111d | |||
cbe91251ac | |||
1283c6483d | |||
f24d3d69af | |||
7984327d81 | |||
ef90832aea |
31
CHANGES.md
31
CHANGES.md
@ -1,11 +1,30 @@
|
|||||||
# What's new?
|
# What's new?
|
||||||
|
|
||||||
|
## v2.5
|
||||||
|
### Major Changes
|
||||||
|
- **Nearly twice as fast** - significantly faster speed of image generation. We're now pretty close to automatic1111's speed. Code contributions are welcome to make our project even faster: https://github.com/easydiffusion/sdkit/#is-it-fast
|
||||||
|
- **Full support for Stable Diffusion 2.1** - supports loading v1.4 or v2.0 or v2.1 models seamlessly. No need to enable "Test SD2", and no need to add `sd2_` to your SD 2.0 model file names.
|
||||||
|
- **Memory optimized Stable Diffusion 2.1** - you can now use 768x768 models for SD 2.1, with the same low VRAM optimizations that we've always had for SD 1.4.
|
||||||
|
- **6 new samplers!** - explore the new samplers, some of which can generate great images in less than 10 inference steps!
|
||||||
|
- **Model Merging** - You can now merge two models (`.ckpt` or `.safetensors`) and output `.ckpt` or `.safetensors` models, optionally in `fp16` precision. Details: https://github.com/cmdr2/stable-diffusion-ui/wiki/Model-Merging
|
||||||
|
- **Fast loading/unloading of VAEs** - No longer needs to reload the entire Stable Diffusion model, each time you change the VAE
|
||||||
|
- **Database of known models** - automatically picks the right configuration for known models. E.g. we automatically detect and apply "v" parameterization (required for some SD 2.0 models), and "fp32" attention precision (required for some SD 2.1 models).
|
||||||
|
- **Color correction for img2img** - an option to preserve the color profile (histogram) of the initial image. This is especially useful if you're getting red-tinted images after inpainting/masking.
|
||||||
|
- **Three GPU Memory Usage Settings** - `High` (fastest, maximum VRAM usage), `Balanced` (default - almost as fast, significantly lower VRAM usage), `Low` (slowest, very low VRAM usage). The `Low` setting is applied automatically for GPUs with less than 4 GB of VRAM.
|
||||||
|
- **Save metadata as JSON** - You can now save the metadata files as either text or json files (choose in the Settings tab).
|
||||||
|
- **Major rewrite of the code** - Most of the codebase has been reorganized and rewritten, to make it more manageable and easier for new developers to contribute features. We've separated our core engine into a new project called `sdkit`, which allows anyone to easily integrate Stable Diffusion (and related modules like GFPGAN etc) into their programming projects (via a simple `pip install sdkit`): https://github.com/easydiffusion/sdkit/
|
||||||
|
- **Name change** - Last, and probably the least, the UI is now called "Easy Diffusion". It indicates the focus of this project - an easy way for people to play with Stable Diffusion.
|
||||||
|
|
||||||
|
Our focus continues to remain on an easy installation experience, and an easy user-interface. While still remaining pretty powerful, in terms of features and speed.
|
||||||
|
|
||||||
## v2.4
|
## v2.4
|
||||||
### Major Changes
|
### Major Changes
|
||||||
|
- **Allow reordering the task queue** (by dragging and dropping tasks). Thanks @madrang
|
||||||
- **Automatic scanning for malicious model files** - using `picklescan`, and support for `safetensor` model format. Thanks @JeLuf
|
- **Automatic scanning for malicious model files** - using `picklescan`, and support for `safetensor` model format. Thanks @JeLuf
|
||||||
|
- **Image Editor** - for drawing simple images for guiding the AI. Thanks @mdiller
|
||||||
|
- **Use pre-trained hypernetworks** - for improving the quality of images. Thanks @C0bra5
|
||||||
- **Support for custom VAE models**. You can place your VAE files in the `models/vae` folder, and refresh the browser page to use them. More info: https://github.com/cmdr2/stable-diffusion-ui/wiki/VAE-Variational-Auto-Encoder
|
- **Support for custom VAE models**. You can place your VAE files in the `models/vae` folder, and refresh the browser page to use them. More info: https://github.com/cmdr2/stable-diffusion-ui/wiki/VAE-Variational-Auto-Encoder
|
||||||
- **Experimental support for multiple GPUs!** It should work automatically. Just open one browser tab per GPU, and spread your tasks across your GPUs. For e.g. open our UI in two browser tabs if you have two GPUs. You can customize which GPUs it should use in the "Settings" tab, otherwise let it automatically pick the best GPUs. Thanks @madrang . More info: https://github.com/cmdr2/stable-diffusion-ui/wiki/Run-on-Multiple-GPUs
|
- **Experimental support for multiple GPUs!** It should work automatically. Just open one browser tab per GPU, and spread your tasks across your GPUs. For e.g. open our UI in two browser tabs if you have two GPUs. You can customize which GPUs it should use in the "Settings" tab, otherwise let it automatically pick the best GPUs. Thanks @madrang . More info: https://github.com/cmdr2/stable-diffusion-ui/wiki/Run-on-Multiple-GPUs
|
||||||
- **Image Editor** - for drawing simple images for guiding the AI. Thanks @mdiller
|
|
||||||
- **Cleaner UI design** - Show settings and help in new tabs, instead of dropdown popups (which were buggy). Thanks @mdiller
|
- **Cleaner UI design** - Show settings and help in new tabs, instead of dropdown popups (which were buggy). Thanks @mdiller
|
||||||
- **Progress bar.** Thanks @mdiller
|
- **Progress bar.** Thanks @mdiller
|
||||||
- **Custom Image Modifiers** - You can now save your custom image modifiers! Your saved modifiers can include special characters like `{}, (), [], |`
|
- **Custom Image Modifiers** - You can now save your custom image modifiers! Your saved modifiers can include special characters like `{}, (), [], |`
|
||||||
@ -25,6 +44,16 @@
|
|||||||
- Support loading models in the safetensor format, for improved safety
|
- Support loading models in the safetensor format, for improved safety
|
||||||
|
|
||||||
### Detailed changelog
|
### Detailed changelog
|
||||||
|
* 2.4.21 - 23 Dec 2022 - Speed up image creation, by removing a delay (regression) of 4-5 seconds between clicking the `Make Image` button and calling the server.
|
||||||
|
* 2.4.20 - 22 Dec 2022 - `Pause All` button to pause all the pending tasks. Thanks @JeLuf
|
||||||
|
* 2.4.20 - 22 Dec 2022 - `Undo`/`Redo` buttons in the image editor. Thanks @JeLuf
|
||||||
|
* 2.4.20 - 22 Dec 2022 - Drag handle to reorder the tasks. This fixed a bug where the metadata was no longer selectable (for copying). Thanks @JeLuf
|
||||||
|
* 2.4.19 - 17 Dec 2022 - Add Undo/Redo buttons in the Image Editor. Thanks @JeLuf
|
||||||
|
* 2.4.19 - 10 Dec 2022 - Show init img in task list
|
||||||
|
* 2.4.19 - 7 Dec 2022 - Use pre-trained hypernetworks while generating images. Thanks @C0bra5
|
||||||
|
* 2.4.19 - 6 Dec 2022 - Allow processing new tasks first. Thanks @madrang
|
||||||
|
* 2.4.19 - 6 Dec 2022 - Allow reordering the task queue (by dragging tasks). Thanks @madrang
|
||||||
|
* 2.4.19 - 6 Dec 2022 - Re-organize the code, to make it easier to write user plugins. Thanks @madrang
|
||||||
* 2.4.18 - 5 Dec 2022 - Make JPEG Output quality user controllable. Thanks @JeLuf
|
* 2.4.18 - 5 Dec 2022 - Make JPEG Output quality user controllable. Thanks @JeLuf
|
||||||
* 2.4.18 - 5 Dec 2022 - Support loading models in the safetensor format, for improved safety. Thanks @JeLuf
|
* 2.4.18 - 5 Dec 2022 - Support loading models in the safetensor format, for improved safety. Thanks @JeLuf
|
||||||
* 2.4.18 - 1 Dec 2022 - Image Editor, for drawing simple images for guiding the AI. Thanks @mdiller
|
* 2.4.18 - 1 Dec 2022 - Image Editor, for drawing simple images for guiding the AI. Thanks @mdiller
|
||||||
|
@ -6,7 +6,7 @@ Thanks
|
|||||||
|
|
||||||
# For developers:
|
# For developers:
|
||||||
|
|
||||||
If you would like to contribute to this project, there is a discord for dicussion:
|
If you would like to contribute to this project, there is a discord for discussion:
|
||||||
[](https://discord.com/invite/u9yhsFmEkB)
|
[](https://discord.com/invite/u9yhsFmEkB)
|
||||||
|
|
||||||
## Development environment for UI (frontend and server) changes
|
## Development environment for UI (frontend and server) changes
|
||||||
|
139
README.md
139
README.md
@ -1,70 +1,107 @@
|
|||||||
# Stable Diffusion UI
|
# Stable Diffusion UI
|
||||||
### Easiest way to install and use [Stable Diffusion](https://github.com/CompVis/stable-diffusion) on your own computer. No dependencies or technical knowledge required. 1-click install, powerful features.
|
### The easiest way to install and use [Stable Diffusion](https://github.com/CompVis/stable-diffusion) on your own computer. Does not require technical knowledge, does not require pre-installed software. 1-click install, powerful features, friendly community.
|
||||||
|
|
||||||
[](https://discord.com/invite/u9yhsFmEkB) (for support, and development discussion) | [Troubleshooting guide for common problems](Troubleshooting.md)
|
[](https://discord.com/invite/u9yhsFmEkB) (for support, and development discussion) | [Troubleshooting guide for common problems](https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting)
|
||||||
|
|
||||||
New! Experimental support for Stable Diffusion 2.0 is available in beta!
|
### New:
|
||||||
|
Experimental support for Stable Diffusion 2.0 is available in beta!
|
||||||
|
|
||||||
----
|
----
|
||||||
|
|
||||||
## Step 1: Download the installer
|
# Step 1: Download and prepare the installer
|
||||||
|
Click the download button for your operating system:
|
||||||
|
|
||||||
<p float="left">
|
<p float="left">
|
||||||
<a href="#installation"><img src="https://github.com/cmdr2/stable-diffusion-ui/raw/develop/media/download-win.png" width="200" /></a>
|
<a href="https://github.com/cmdr2/stable-diffusion-ui/releases/download/v2.4.13/stable-diffusion-ui-windows.zip"><img src="https://github.com/cmdr2/stable-diffusion-ui/raw/main/media/download-win.png" width="200" /></a>
|
||||||
<a href="#installation"><img src="https://github.com/cmdr2/stable-diffusion-ui/raw/develop/media/download-linux.png" width="200" /></a>
|
<a href="https://github.com/cmdr2/stable-diffusion-ui#installation"><img src="https://github.com/cmdr2/stable-diffusion-ui/raw/main/media/download-linux.png" width="200" /></a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
## Step 2: Run the program
|
## On Windows:
|
||||||
- On Windows: Double-click `Start Stable Diffusion UI.cmd`
|
1. Unzip/extract the folder `stable-diffusion-ui` which should be in your downloads folder, unless you changed your default downloads destination.
|
||||||
- On Linux: Run `./start.sh` in a terminal
|
2. Move the `stable-diffusion-ui` folder to your `C:` drive (or any other drive like `D:`, at the top root level). `C:\stable-diffusion-ui` or `D:\stable-diffusion-ui` as examples. This will avoid a common problem with Windows (file path length limits).
|
||||||
|
## On Linux:
|
||||||
|
1. Unzip/extract the folder `stable-diffusion-ui` which should be in your downloads folder, unless you changed your default downloads destination.
|
||||||
|
2. Open a terminal window, and navigate to the `stable-diffusion-ui` directory.
|
||||||
|
|
||||||
## Step 3: There is no step 3!
|
# Step 2: Run the program
|
||||||
It's simple to get started. You don't need to install or struggle with Python, Anaconda, Docker etc.
|
## On Windows:
|
||||||
|
Double-click `Start Stable Diffusion UI.cmd`.
|
||||||
|
If Windows SmartScreen prevents you from running the program click `More info` and then `Run anyway`.
|
||||||
|
## On Linux:
|
||||||
|
Run `./start.sh` (or `bash start.sh`) in a terminal.
|
||||||
|
|
||||||
The installer will take care of whatever is needed. A friendly [Discord community](https://discord.com/invite/u9yhsFmEkB) will help you if you face any problems.
|
The installer will take care of whatever is needed. If you face any problems, you can join the friendly [Discord community](https://discord.com/invite/u9yhsFmEkB) and ask for assistance.
|
||||||
|
|
||||||
|
# Step 3: There is no Step 3. It's that simple!
|
||||||
|
|
||||||
|
**To Uninstall:** Just delete the `stable-diffusion-ui` folder to uninstall all the downloaded packages.
|
||||||
|
|
||||||
----
|
----
|
||||||
|
|
||||||
# Easy for new users, powerful features for advanced users
|
# Easy for new users, powerful features for advanced users
|
||||||
### Features:
|
## Features:
|
||||||
- **No Dependencies or Technical Knowledge Required**: 1-click install for Windows 10/11 and Linux. *No dependencies*, no need for WSL or Docker or Conda or technical setup. Just download and run!
|
|
||||||
- **Clutter-free UI**: a friendly and simple UI, while providing a lot of powerful features
|
### User experience
|
||||||
- Supports "*Text to Image*" and "*Image to Image*"
|
- **Hassle-free installation**: Does not require technical knowledge, does not require pre-installed software. Just download and run!
|
||||||
- **Stable Diffusion 2.0 support (experimental)** - available in beta channel
|
- **Clutter-free UI**: A friendly and simple UI, while providing a lot of powerful features.
|
||||||
- **Custom Models**: Use your own `.ckpt` file, by placing it inside the `models/stable-diffusion` folder!
|
|
||||||
- **Auto scan for malicious models** - uses picklescan to prevent malicious models
|
### Image generation
|
||||||
- **Live Preview**: See the image as the AI is drawing it
|
- **Supports**: "*Text to Image*" and "*Image to Image*".
|
||||||
- **Task Queue**: Queue up all your ideas, without waiting for the current task to finish
|
- **In-Painting**: Specify areas of your image to paint into.
|
||||||
- **In-Painting**: Specify areas of your image to paint into
|
- **Simple Drawing Tool**: Draw basic images to guide the AI, without needing an external drawing program.
|
||||||
- **Face Correction (GFPGAN) and Upscaling (RealESRGAN)**
|
- **Face Correction (GFPGAN)**
|
||||||
- **Image Modifiers**: A library of *modifier tags* like *"Realistic"*, *"Pencil Sketch"*, *"ArtStation"* etc. Experiment with various styles quickly.
|
- **Upscaling (RealESRGAN)**
|
||||||
- **Loopback**: Use the output image as the input image for the next img2img task
|
- **Loopback**: Use the output image as the input image for the next img2img task.
|
||||||
- **Negative Prompt**: Specify aspects of the image to *remove*.
|
- **Negative Prompt**: Specify aspects of the image to *remove*.
|
||||||
- **Attention/Emphasis:** () in the prompt increases the model's attention to enclosed words, and [] decreases it
|
- **Attention/Emphasis**: () in the prompt increases the model's attention to enclosed words, and [] decreases it.
|
||||||
- **Weighted Prompts:** Use weights for specific words in your prompt to change their importance, e.g. `red:2.4 dragon:1.2`
|
- **Weighted Prompts**: Use weights for specific words in your prompt to change their importance, e.g. `red:2.4 dragon:1.2`.
|
||||||
- **Prompt Matrix:** (in beta) Quickly create multiple variations of your prompt, e.g. `a photograph of an astronaut riding a horse | illustration | cinematic lighting`
|
- **Prompt Matrix**: Quickly create multiple variations of your prompt, e.g. `a photograph of an astronaut riding a horse | illustration | cinematic lighting`.
|
||||||
- **Lots of Samplers:** ddim, plms, heun, euler, euler_a, dpm2, dpm2_a, lms
|
- **Lots of Samplers**: ddim, plms, heun, euler, euler_a, dpm2, dpm2_a, lms.
|
||||||
- **Multiple Prompts File:** Queue multiple prompts by entering one prompt per line, or by running a text file
|
- **1-click Upscale/Face Correction**: Upscale or correct an image after it has been generated.
|
||||||
- **NSFW Setting**: A setting in the UI to control *NSFW content*
|
- **Make Similar Images**: Click to generate multiple variations of a generated image.
|
||||||
- **JPEG/PNG output**
|
- **NSFW Setting**: A setting in the UI to control *NSFW content*.
|
||||||
- **Save generated images to disk**
|
- **JPEG/PNG output**: Multiple file formats.
|
||||||
|
|
||||||
|
### Advanced features
|
||||||
|
- **Custom Models**: Use your own `.ckpt` or `.safetensors` file, by placing it inside the `models/stable-diffusion` folder!
|
||||||
|
- **Stable Diffusion 2.0 support (experimental)**: available in beta channel.
|
||||||
|
- **Use custom VAE models**
|
||||||
|
- **Use pre-trained Hypernetworks**
|
||||||
|
- **UI Plugins**: Choose from a growing list of [community-generated UI plugins](https://github.com/cmdr2/stable-diffusion-ui/wiki/UI-Plugins), or write your own plugin to add features to the project!
|
||||||
|
|
||||||
|
### Performance and security
|
||||||
|
- **Low Memory Usage**: Creates 512x512 images with less than 4GB of GPU RAM!
|
||||||
- **Use CPU setting**: If you don't have a compatible graphics card, but still want to run it on your CPU.
|
- **Use CPU setting**: If you don't have a compatible graphics card, but still want to run it on your CPU.
|
||||||
|
- **Multi-GPU support**: Automatically spreads your tasks across multiple GPUs (if available), for faster performance!
|
||||||
|
- **Auto scan for malicious models**: Uses picklescan to prevent malicious models.
|
||||||
|
- **Safetensors support**: Support loading models in the safetensor format, for improved safety.
|
||||||
- **Auto-updater**: Gets you the latest improvements and bug-fixes to a rapidly evolving project.
|
- **Auto-updater**: Gets you the latest improvements and bug-fixes to a rapidly evolving project.
|
||||||
- **Low Memory Usage**: Creates 512x512 images with less than 4GB of VRAM!
|
|
||||||
- **Developer Console**: A developer-mode for those who want to modify their Stable Diffusion code, and edit the conda environment.
|
- **Developer Console**: A developer-mode for those who want to modify their Stable Diffusion code, and edit the conda environment.
|
||||||
|
|
||||||
### Easy for new users:
|
### Usability:
|
||||||
|
- **Live Preview**: See the image as the AI is drawing it.
|
||||||
|
- **Task Queue**: Queue up all your ideas, without waiting for the current task to finish.
|
||||||
|
- **Image Modifiers**: A library of *modifier tags* like *"Realistic"*, *"Pencil Sketch"*, *"ArtStation"* etc. Experiment with various styles quickly.
|
||||||
|
- **Multiple Prompts File**: Queue multiple prompts by entering one prompt per line, or by running a text file.
|
||||||
|
- **Save generated images to disk**: Save your images to your PC!
|
||||||
|
- **UI Themes**: Customize the program to your liking.
|
||||||
|
|
||||||
|
**(and a lot more)**
|
||||||
|
|
||||||
|
----
|
||||||
|
|
||||||
|
## Easy for new users:
|
||||||

|

|
||||||
|
|
||||||
### Powerful features for advanced users:
|
## Powerful features for advanced users:
|
||||||

|

|
||||||
|
|
||||||
### Live Preview
|
## Live Preview
|
||||||
Useful for judging (and stopping) an image quickly, without waiting for it to finish rendering.
|
Useful for judging (and stopping) an image quickly, without waiting for it to finish rendering.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### Task Queue
|
## Task Queue
|
||||||

|

|
||||||
|
|
||||||
# System Requirements
|
# System Requirements
|
||||||
@ -74,23 +111,10 @@ Useful for judging (and stopping) an image quickly, without waiting for it to fi
|
|||||||
|
|
||||||
You don't need to install or struggle with Python, Anaconda, Docker etc. The installer will take care of whatever is needed.
|
You don't need to install or struggle with Python, Anaconda, Docker etc. The installer will take care of whatever is needed.
|
||||||
|
|
||||||
# Installation
|
----
|
||||||
1. **Download** [for Windows](https://github.com/cmdr2/stable-diffusion-ui/releases/download/v2.4.13/stable-diffusion-ui-windows.zip) or [for Linux](https://github.com/cmdr2/stable-diffusion-ui/releases/download/v2.4.13/stable-diffusion-ui-linux.zip).
|
|
||||||
|
|
||||||
2. **Extract**:
|
|
||||||
- For Windows: After unzipping the file, please move the `stable-diffusion-ui` folder to your `C:` (or any drive like D:, at the top root level), e.g. `C:\stable-diffusion-ui`. This will avoid a common problem with Windows (file path length limits).
|
|
||||||
- For Linux: After extracting the .tar.xz file, please open a terminal, and go to the `stable-diffusion-ui` directory.
|
|
||||||
|
|
||||||
3. **Run**:
|
|
||||||
- For Windows: `Start Stable Diffusion UI.cmd` by double-clicking it.
|
|
||||||
- For Linux: In the terminal, run `./start.sh` (or `bash start.sh`)
|
|
||||||
|
|
||||||
This will automatically install Stable Diffusion, set it up, and start the interface. No additional steps are needed.
|
|
||||||
|
|
||||||
**To Uninstall:** Just delete the `stable-diffusion-ui` folder to uninstall all the downloaded packages.
|
|
||||||
|
|
||||||
# How to use?
|
# How to use?
|
||||||
Please use our [guide](https://github.com/cmdr2/stable-diffusion-ui/wiki/How-to-Use) to understand how to use the features in this UI.
|
Please refer to our [guide](https://github.com/cmdr2/stable-diffusion-ui/wiki/How-to-Use) to understand how to use the features in this UI.
|
||||||
|
|
||||||
# Bugs reports and code contributions welcome
|
# Bugs reports and code contributions welcome
|
||||||
If there are any problems or suggestions, please feel free to ask on the [discord server](https://discord.com/invite/u9yhsFmEkB) or [file an issue](https://github.com/cmdr2/stable-diffusion-ui/issues).
|
If there are any problems or suggestions, please feel free to ask on the [discord server](https://discord.com/invite/u9yhsFmEkB) or [file an issue](https://github.com/cmdr2/stable-diffusion-ui/issues).
|
||||||
@ -106,4 +130,11 @@ If you have any code contributions in mind, please feel free to say Hi to us on
|
|||||||
# Disclaimer
|
# Disclaimer
|
||||||
The authors of this project are not responsible for any content generated using this interface.
|
The authors of this project are not responsible for any content generated using this interface.
|
||||||
|
|
||||||
The license of this software forbids you from sharing any content that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, spread misinformation, or target vulnerable groups. For the full list of restrictions please read [the license](LICENSE). You agree to these terms by using this software.
|
The license of this software forbids you from sharing any content that:
|
||||||
|
- Violates any laws.
|
||||||
|
- Produces any harm to a person or persons.
|
||||||
|
- Disseminates (spreads) any personal information that would be meant for harm.
|
||||||
|
- Spreads misinformation.
|
||||||
|
- Target vulnerable groups.
|
||||||
|
|
||||||
|
For the full list of restrictions please read [the License](LICENSE). You agree to these terms by using this software.
|
||||||
|
@ -1 +0,0 @@
|
|||||||
Moved to https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting
|
|
@ -23,23 +23,20 @@ call conda --version
|
|||||||
|
|
||||||
echo.
|
echo.
|
||||||
|
|
||||||
@rem activate the environment
|
@rem activate the legacy environment (if present) and set PYTHONPATH
|
||||||
call conda activate .\stable-diffusion\env
|
if exist "installer_files\env" (
|
||||||
|
set PYTHONPATH=%cd%\installer_files\env\lib\site-packages
|
||||||
|
)
|
||||||
|
if exist "stable-diffusion\env" (
|
||||||
|
call conda activate .\stable-diffusion\env
|
||||||
|
set PYTHONPATH=%cd%\stable-diffusion\env\lib\site-packages
|
||||||
|
)
|
||||||
|
|
||||||
call where python
|
call where python
|
||||||
call python --version
|
call python --version
|
||||||
|
|
||||||
@rem set the PYTHONPATH
|
|
||||||
cd stable-diffusion
|
|
||||||
set SD_DIR=%cd%
|
|
||||||
|
|
||||||
cd env\lib\site-packages
|
|
||||||
set PYTHONPATH=%SD_DIR%;%cd%
|
|
||||||
cd ..\..\..
|
|
||||||
echo PYTHONPATH=%PYTHONPATH%
|
echo PYTHONPATH=%PYTHONPATH%
|
||||||
|
|
||||||
cd ..
|
|
||||||
|
|
||||||
@rem done
|
@rem done
|
||||||
echo.
|
echo.
|
||||||
|
|
||||||
|
@ -24,7 +24,7 @@ if exist "%INSTALL_ENV_DIR%" set PATH=%INSTALL_ENV_DIR%;%INSTALL_ENV_DIR%\Librar
|
|||||||
set PACKAGES_TO_INSTALL=
|
set PACKAGES_TO_INSTALL=
|
||||||
|
|
||||||
if not exist "%LEGACY_INSTALL_ENV_DIR%\etc\profile.d\conda.sh" (
|
if not exist "%LEGACY_INSTALL_ENV_DIR%\etc\profile.d\conda.sh" (
|
||||||
if not exist "%INSTALL_ENV_DIR%\etc\profile.d\conda.sh" set PACKAGES_TO_INSTALL=%PACKAGES_TO_INSTALL% conda
|
if not exist "%INSTALL_ENV_DIR%\etc\profile.d\conda.sh" set PACKAGES_TO_INSTALL=%PACKAGES_TO_INSTALL% conda python=3.8.5
|
||||||
)
|
)
|
||||||
|
|
||||||
call git --version >.tmp1 2>.tmp2
|
call git --version >.tmp1 2>.tmp2
|
||||||
|
@ -39,7 +39,7 @@ if [ -e "$INSTALL_ENV_DIR" ]; then export PATH="$INSTALL_ENV_DIR/bin:$PATH"; fi
|
|||||||
|
|
||||||
PACKAGES_TO_INSTALL=""
|
PACKAGES_TO_INSTALL=""
|
||||||
|
|
||||||
if [ ! -e "$LEGACY_INSTALL_ENV_DIR/etc/profile.d/conda.sh" ] && [ ! -e "$INSTALL_ENV_DIR/etc/profile.d/conda.sh" ]; then PACKAGES_TO_INSTALL="$PACKAGES_TO_INSTALL conda"; fi
|
if [ ! -e "$LEGACY_INSTALL_ENV_DIR/etc/profile.d/conda.sh" ] && [ ! -e "$INSTALL_ENV_DIR/etc/profile.d/conda.sh" ]; then PACKAGES_TO_INSTALL="$PACKAGES_TO_INSTALL conda python=3.8.5"; fi
|
||||||
if ! hash "git" &>/dev/null; then PACKAGES_TO_INSTALL="$PACKAGES_TO_INSTALL git"; fi
|
if ! hash "git" &>/dev/null; then PACKAGES_TO_INSTALL="$PACKAGES_TO_INSTALL git"; fi
|
||||||
|
|
||||||
if "$MAMBA_ROOT_PREFIX/micromamba" --version &>/dev/null; then umamba_exists="T"; fi
|
if "$MAMBA_ROOT_PREFIX/micromamba" --version &>/dev/null; then umamba_exists="T"; fi
|
||||||
|
13
scripts/check_modules.py
Normal file
13
scripts/check_modules.py
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
'''
|
||||||
|
This script checks if the given modules exist
|
||||||
|
'''
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import pkgutil
|
||||||
|
|
||||||
|
modules = sys.argv[1:]
|
||||||
|
missing_modules = []
|
||||||
|
for m in modules:
|
||||||
|
if pkgutil.find_loader(m) is None:
|
||||||
|
print('module', m, 'not found')
|
||||||
|
exit(1)
|
@ -26,21 +26,23 @@ if [ "$0" == "bash" ]; then
|
|||||||
|
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
# activate the environment
|
# activate the legacy environment (if present) and set PYTHONPATH
|
||||||
CONDA_BASEPATH=$(conda info --base)
|
if [ -e "installer_files/env" ]; then
|
||||||
source "$CONDA_BASEPATH/etc/profile.d/conda.sh" # otherwise conda complains about 'shell not initialized' (needed when running in a script)
|
export PYTHONPATH="$(pwd)/installer_files/env/lib/python3.8/site-packages"
|
||||||
|
fi
|
||||||
|
if [ -e "stable-diffusion/env" ]; then
|
||||||
|
CONDA_BASEPATH=$(conda info --base)
|
||||||
|
source "$CONDA_BASEPATH/etc/profile.d/conda.sh" # otherwise conda complains about 'shell not initialized' (needed when running in a script)
|
||||||
|
|
||||||
conda activate ./stable-diffusion/env
|
conda activate ./stable-diffusion/env
|
||||||
|
|
||||||
|
export PYTHONPATH="$(pwd)/stable-diffusion/env/lib/python3.8/site-packages"
|
||||||
|
fi
|
||||||
|
|
||||||
which python
|
which python
|
||||||
python --version
|
python --version
|
||||||
|
|
||||||
# set the PYTHONPATH
|
|
||||||
cd stable-diffusion
|
|
||||||
SD_PATH=`pwd`
|
|
||||||
export PYTHONPATH="$SD_PATH:$SD_PATH/env/lib/python3.8/site-packages"
|
|
||||||
echo "PYTHONPATH=$PYTHONPATH"
|
echo "PYTHONPATH=$PYTHONPATH"
|
||||||
cd ..
|
|
||||||
|
|
||||||
# done
|
# done
|
||||||
|
|
||||||
|
@ -53,6 +53,7 @@ if "%update_branch%"=="" (
|
|||||||
@xcopy sd-ui-files\ui ui /s /i /Y /q
|
@xcopy sd-ui-files\ui ui /s /i /Y /q
|
||||||
@copy sd-ui-files\scripts\on_sd_start.bat scripts\ /Y
|
@copy sd-ui-files\scripts\on_sd_start.bat scripts\ /Y
|
||||||
@copy sd-ui-files\scripts\bootstrap.bat scripts\ /Y
|
@copy sd-ui-files\scripts\bootstrap.bat scripts\ /Y
|
||||||
|
@copy sd-ui-files\scripts\check_modules.py scripts\ /Y
|
||||||
@copy "sd-ui-files\scripts\Start Stable Diffusion UI.cmd" . /Y
|
@copy "sd-ui-files\scripts\Start Stable Diffusion UI.cmd" . /Y
|
||||||
@copy "sd-ui-files\scripts\Developer Console.cmd" . /Y
|
@copy "sd-ui-files\scripts\Developer Console.cmd" . /Y
|
||||||
|
|
||||||
|
@ -37,6 +37,7 @@ rm -rf ui
|
|||||||
cp -Rf sd-ui-files/ui .
|
cp -Rf sd-ui-files/ui .
|
||||||
cp sd-ui-files/scripts/on_sd_start.sh scripts/
|
cp sd-ui-files/scripts/on_sd_start.sh scripts/
|
||||||
cp sd-ui-files/scripts/bootstrap.sh scripts/
|
cp sd-ui-files/scripts/bootstrap.sh scripts/
|
||||||
|
cp sd-ui-files/scripts/check_modules.py scripts/
|
||||||
cp sd-ui-files/scripts/start.sh .
|
cp sd-ui-files/scripts/start.sh .
|
||||||
cp sd-ui-files/scripts/developer_console.sh .
|
cp sd-ui-files/scripts/developer_console.sh .
|
||||||
|
|
||||||
|
@ -5,11 +5,20 @@
|
|||||||
|
|
||||||
@copy sd-ui-files\scripts\on_env_start.bat scripts\ /Y
|
@copy sd-ui-files\scripts\on_env_start.bat scripts\ /Y
|
||||||
@copy sd-ui-files\scripts\bootstrap.bat scripts\ /Y
|
@copy sd-ui-files\scripts\bootstrap.bat scripts\ /Y
|
||||||
|
@copy sd-ui-files\scripts\check_modules.py scripts\ /Y
|
||||||
|
|
||||||
if exist "%cd%\profile" (
|
if exist "%cd%\profile" (
|
||||||
set USERPROFILE=%cd%\profile
|
set USERPROFILE=%cd%\profile
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@rem set the correct installer path (current vs legacy)
|
||||||
|
if exist "%cd%\installer_files\env" (
|
||||||
|
set INSTALL_ENV_DIR=%cd%\installer_files\env
|
||||||
|
)
|
||||||
|
if exist "%cd%\stable-diffusion\env" (
|
||||||
|
set INSTALL_ENV_DIR=%cd%\stable-diffusion\env
|
||||||
|
)
|
||||||
|
|
||||||
@mkdir tmp
|
@mkdir tmp
|
||||||
@set TMP=%cd%\tmp
|
@set TMP=%cd%\tmp
|
||||||
@set TEMP=%cd%\tmp
|
@set TEMP=%cd%\tmp
|
||||||
@ -27,137 +36,92 @@ if exist "Open Developer Console.cmd" del "Open Developer Console.cmd"
|
|||||||
|
|
||||||
@call python -c "import os; import shutil; frm = 'sd-ui-files\\ui\\hotfix\\9c24e6cd9f499d02c4f21a033736dabd365962dc80fe3aeb57a8f85ea45a20a3.26fead7ea4f0f843f6eb4055dfd25693f1a71f3c6871b184042d4b126244e142'; dst = os.path.join(os.path.expanduser('~'), '.cache', 'huggingface', 'transformers', '9c24e6cd9f499d02c4f21a033736dabd365962dc80fe3aeb57a8f85ea45a20a3.26fead7ea4f0f843f6eb4055dfd25693f1a71f3c6871b184042d4b126244e142'); shutil.copyfile(frm, dst) if os.path.exists(dst) else print(''); print('Hotfixed broken JSON file from OpenAI');"
|
@call python -c "import os; import shutil; frm = 'sd-ui-files\\ui\\hotfix\\9c24e6cd9f499d02c4f21a033736dabd365962dc80fe3aeb57a8f85ea45a20a3.26fead7ea4f0f843f6eb4055dfd25693f1a71f3c6871b184042d4b126244e142'; dst = os.path.join(os.path.expanduser('~'), '.cache', 'huggingface', 'transformers', '9c24e6cd9f499d02c4f21a033736dabd365962dc80fe3aeb57a8f85ea45a20a3.26fead7ea4f0f843f6eb4055dfd25693f1a71f3c6871b184042d4b126244e142'); shutil.copyfile(frm, dst) if os.path.exists(dst) else print(''); print('Hotfixed broken JSON file from OpenAI');"
|
||||||
|
|
||||||
if NOT DEFINED test_sd2 set test_sd2=N
|
@rem create the stable-diffusion folder, to work with legacy installations
|
||||||
|
if not exist "stable-diffusion" mkdir stable-diffusion
|
||||||
|
cd stable-diffusion
|
||||||
|
|
||||||
@>nul findstr /m "sd_git_cloned" scripts\install_status.txt
|
@rem activate the old stable-diffusion env, if it exists
|
||||||
@if "%ERRORLEVEL%" EQU "0" (
|
if exist "env" (
|
||||||
@echo "Stable Diffusion's git repository was already installed. Updating.."
|
call conda activate .\env
|
||||||
|
|
||||||
@cd stable-diffusion
|
|
||||||
|
|
||||||
@call git remote set-url origin https://github.com/easydiffusion/diffusion-kit.git
|
|
||||||
|
|
||||||
@call git reset --hard
|
|
||||||
@call git pull
|
|
||||||
|
|
||||||
if "%test_sd2%" == "N" (
|
|
||||||
@call git -c advice.detachedHead=false checkout 7f32368ed1030a6e710537047bacd908adea183a
|
|
||||||
)
|
|
||||||
if "%test_sd2%" == "Y" (
|
|
||||||
@call git -c advice.detachedHead=false checkout b1a80dfc75388914252ce363f923103185eaf48f
|
|
||||||
)
|
|
||||||
|
|
||||||
@cd ..
|
|
||||||
) else (
|
|
||||||
@echo. & echo "Downloading Stable Diffusion.." & echo.
|
|
||||||
|
|
||||||
@call git clone https://github.com/easydiffusion/diffusion-kit.git stable-diffusion && (
|
|
||||||
@echo sd_git_cloned >> scripts\install_status.txt
|
|
||||||
) || (
|
|
||||||
@echo "Error downloading Stable Diffusion. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!"
|
|
||||||
pause
|
|
||||||
@exit /b
|
|
||||||
)
|
|
||||||
|
|
||||||
@cd stable-diffusion
|
|
||||||
@call git -c advice.detachedHead=false checkout 7f32368ed1030a6e710537047bacd908adea183a
|
|
||||||
|
|
||||||
@cd ..
|
|
||||||
)
|
)
|
||||||
|
|
||||||
@cd stable-diffusion
|
@rem disable the legacy src and ldm folder (otherwise this prevents installing gfpgan and realesrgan)
|
||||||
|
if exist src rename src src-old
|
||||||
|
if exist ldm rename ldm ldm-old
|
||||||
|
|
||||||
@>nul findstr /m "conda_sd_env_created" ..\scripts\install_status.txt
|
@rem install torch and torchvision
|
||||||
@if "%ERRORLEVEL%" EQU "0" (
|
call python ..\scripts\check_modules.py torch torchvision
|
||||||
@echo "Packages necessary for Stable Diffusion were already installed"
|
if "%ERRORLEVEL%" EQU "0" (
|
||||||
|
echo "torch and torchvision have already been installed."
|
||||||
@call conda activate .\env
|
|
||||||
) else (
|
) else (
|
||||||
@echo. & echo "Downloading packages necessary for Stable Diffusion.." & echo. & echo "***** This will take some time (depending on the speed of the Internet connection) and may appear to be stuck, but please be patient ***** .." & echo.
|
echo "Installing torch and torchvision.."
|
||||||
|
|
||||||
@rmdir /s /q .\env
|
@REM prevent from using packages from the user's home directory, to avoid conflicts
|
||||||
|
set PYTHONNOUSERSITE=1
|
||||||
|
set PYTHONPATH=%INSTALL_ENV_DIR%\lib\site-packages
|
||||||
|
|
||||||
@REM prevent conda from using packages from the user's home directory, to avoid conflicts
|
call pip install --upgrade torch torchvision --extra-index-url https://download.pytorch.org/whl/cu116 || (
|
||||||
@set PYTHONNOUSERSITE=1
|
echo "Error installing torch. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!"
|
||||||
|
|
||||||
set USERPROFILE=%cd%\profile
|
|
||||||
|
|
||||||
set PYTHONPATH=%cd%;%cd%\env\lib\site-packages
|
|
||||||
|
|
||||||
@call conda env create --prefix env -f environment.yaml || (
|
|
||||||
@echo. & echo "Error installing the packages necessary for Stable Diffusion. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
|
||||||
pause
|
pause
|
||||||
exit /b
|
exit /b
|
||||||
)
|
)
|
||||||
|
)
|
||||||
|
|
||||||
@call conda activate .\env
|
@rem install/upgrade sdkit
|
||||||
|
call python ..\scripts\check_modules.py sdkit sdkit.models ldm transformers numpy antlr4 gfpgan realesrgan
|
||||||
|
if "%ERRORLEVEL%" EQU "0" (
|
||||||
|
echo "sdkit is already installed."
|
||||||
|
|
||||||
for /f "tokens=*" %%a in ('python -c "import torch; import ldm; import transformers; import numpy; import antlr4; print(42)"') do if "%%a" NEQ "42" (
|
@REM prevent from using packages from the user's home directory, to avoid conflicts
|
||||||
@echo. & echo "Dependency test failed! Error installing the packages necessary for Stable Diffusion. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
set PYTHONNOUSERSITE=1
|
||||||
|
set PYTHONPATH=%INSTALL_ENV_DIR%\lib\site-packages
|
||||||
|
|
||||||
|
call >nul pip install --upgrade sdkit || (
|
||||||
|
echo "Error updating sdkit"
|
||||||
|
)
|
||||||
|
) else (
|
||||||
|
echo "Installing sdkit: https://pypi.org/project/sdkit/"
|
||||||
|
|
||||||
|
@REM prevent from using packages from the user's home directory, to avoid conflicts
|
||||||
|
set PYTHONNOUSERSITE=1
|
||||||
|
set PYTHONPATH=%INSTALL_ENV_DIR%\lib\site-packages
|
||||||
|
|
||||||
|
call pip install sdkit || (
|
||||||
|
echo "Error installing sdkit. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!"
|
||||||
pause
|
pause
|
||||||
exit /b
|
exit /b
|
||||||
)
|
)
|
||||||
|
)
|
||||||
|
|
||||||
@echo conda_sd_env_created >> ..\scripts\install_status.txt
|
@rem install rich
|
||||||
|
call python ..\scripts\check_modules.py rich
|
||||||
|
if "%ERRORLEVEL%" EQU "0" (
|
||||||
|
echo "rich has already been installed."
|
||||||
|
) else (
|
||||||
|
echo "Installing rich.."
|
||||||
|
|
||||||
|
set PYTHONNOUSERSITE=1
|
||||||
|
set PYTHONPATH=%INSTALL_ENV_DIR%\lib\site-packages
|
||||||
|
|
||||||
|
call pip install rich || (
|
||||||
|
echo "Error installing rich. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!"
|
||||||
|
pause
|
||||||
|
exit /b
|
||||||
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
set PATH=C:\Windows\System32;%PATH%
|
set PATH=C:\Windows\System32;%PATH%
|
||||||
|
|
||||||
@>nul findstr /m "conda_sd_gfpgan_deps_installed" ..\scripts\install_status.txt
|
call python ..\scripts\check_modules.py uvicorn fastapi
|
||||||
@if "%ERRORLEVEL%" EQU "0" (
|
|
||||||
@echo "Packages necessary for GFPGAN (Face Correction) were already installed"
|
|
||||||
) else (
|
|
||||||
@echo. & echo "Downloading packages necessary for GFPGAN (Face Correction).." & echo.
|
|
||||||
|
|
||||||
@set PYTHONNOUSERSITE=1
|
|
||||||
|
|
||||||
set USERPROFILE=%cd%\profile
|
|
||||||
|
|
||||||
set PYTHONPATH=%cd%;%cd%\env\lib\site-packages
|
|
||||||
|
|
||||||
for /f "tokens=*" %%a in ('python -c "from gfpgan import GFPGANer; print(42)"') do if "%%a" NEQ "42" (
|
|
||||||
@echo. & echo "Dependency test failed! Error installing the packages necessary for GFPGAN (Face Correction). Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
|
||||||
pause
|
|
||||||
exit /b
|
|
||||||
)
|
|
||||||
|
|
||||||
@echo conda_sd_gfpgan_deps_installed >> ..\scripts\install_status.txt
|
|
||||||
)
|
|
||||||
|
|
||||||
@>nul findstr /m "conda_sd_esrgan_deps_installed" ..\scripts\install_status.txt
|
|
||||||
@if "%ERRORLEVEL%" EQU "0" (
|
|
||||||
@echo "Packages necessary for ESRGAN (Resolution Upscaling) were already installed"
|
|
||||||
) else (
|
|
||||||
@echo. & echo "Downloading packages necessary for ESRGAN (Resolution Upscaling).." & echo.
|
|
||||||
|
|
||||||
@set PYTHONNOUSERSITE=1
|
|
||||||
|
|
||||||
set USERPROFILE=%cd%\profile
|
|
||||||
|
|
||||||
set PYTHONPATH=%cd%;%cd%\env\lib\site-packages
|
|
||||||
|
|
||||||
for /f "tokens=*" %%a in ('python -c "from basicsr.archs.rrdbnet_arch import RRDBNet; from realesrgan import RealESRGANer; print(42)"') do if "%%a" NEQ "42" (
|
|
||||||
@echo. & echo "Dependency test failed! Error installing the packages necessary for ESRGAN (Resolution Upscaling). Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
|
||||||
pause
|
|
||||||
exit /b
|
|
||||||
)
|
|
||||||
|
|
||||||
@echo conda_sd_esrgan_deps_installed >> ..\scripts\install_status.txt
|
|
||||||
)
|
|
||||||
|
|
||||||
@>nul findstr /m "conda_sd_ui_deps_installed" ..\scripts\install_status.txt
|
|
||||||
@if "%ERRORLEVEL%" EQU "0" (
|
@if "%ERRORLEVEL%" EQU "0" (
|
||||||
echo "Packages necessary for Stable Diffusion UI were already installed"
|
echo "Packages necessary for Stable Diffusion UI were already installed"
|
||||||
) else (
|
) else (
|
||||||
@echo. & echo "Downloading packages necessary for Stable Diffusion UI.." & echo.
|
@echo. & echo "Downloading packages necessary for Stable Diffusion UI.." & echo.
|
||||||
|
|
||||||
@set PYTHONNOUSERSITE=1
|
set PYTHONNOUSERSITE=1
|
||||||
|
set PYTHONPATH=%INSTALL_ENV_DIR%\lib\site-packages
|
||||||
|
|
||||||
set USERPROFILE=%cd%\profile
|
@call conda install -c conda-forge -y uvicorn fastapi || (
|
||||||
|
|
||||||
set PYTHONPATH=%cd%;%cd%\env\lib\site-packages
|
|
||||||
|
|
||||||
@call conda install -c conda-forge -y --prefix env uvicorn fastapi || (
|
|
||||||
echo "Error installing the packages necessary for Stable Diffusion UI. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!"
|
echo "Error installing the packages necessary for Stable Diffusion UI. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!"
|
||||||
pause
|
pause
|
||||||
exit /b
|
exit /b
|
||||||
@ -172,26 +136,6 @@ call WHERE uvicorn > .tmp
|
|||||||
exit /b
|
exit /b
|
||||||
)
|
)
|
||||||
|
|
||||||
@>nul 2>nul call python -m picklescan --help
|
|
||||||
@if "%ERRORLEVEL%" NEQ "0" (
|
|
||||||
@echo. & echo Picklescan not found. Installing
|
|
||||||
@call pip install picklescan || (
|
|
||||||
echo "Error installing the picklescan package necessary for Stable Diffusion UI. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!"
|
|
||||||
pause
|
|
||||||
exit /b
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
@>nul 2>nul call python -c "import safetensors"
|
|
||||||
@if "%ERRORLEVEL%" NEQ "0" (
|
|
||||||
@echo. & echo SafeTensors not found. Installing
|
|
||||||
@call pip install safetensors || (
|
|
||||||
echo "Error installing the safetensors package necessary for Stable Diffusion UI. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!"
|
|
||||||
pause
|
|
||||||
exit /b
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
@>nul findstr /m "conda_sd_ui_deps_installed" ..\scripts\install_status.txt
|
@>nul findstr /m "conda_sd_ui_deps_installed" ..\scripts\install_status.txt
|
||||||
@if "%ERRORLEVEL%" NEQ "0" (
|
@if "%ERRORLEVEL%" NEQ "0" (
|
||||||
@echo conda_sd_ui_deps_installed >> ..\scripts\install_status.txt
|
@echo conda_sd_ui_deps_installed >> ..\scripts\install_status.txt
|
||||||
@ -199,10 +143,7 @@ call WHERE uvicorn > .tmp
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
if not exist "..\models\stable-diffusion" mkdir "..\models\stable-diffusion"
|
|
||||||
if not exist "..\models\vae" mkdir "..\models\vae"
|
if not exist "..\models\vae" mkdir "..\models\vae"
|
||||||
echo. > "..\models\stable-diffusion\Put your custom ckpt files here.txt"
|
|
||||||
echo. > "..\models\vae\Put your VAE files here.txt"
|
|
||||||
|
|
||||||
@if exist "sd-v1-4.ckpt" (
|
@if exist "sd-v1-4.ckpt" (
|
||||||
for %%I in ("sd-v1-4.ckpt") do if "%%~zI" EQU "4265380512" (
|
for %%I in ("sd-v1-4.ckpt") do if "%%~zI" EQU "4265380512" (
|
||||||
@ -360,10 +301,6 @@ echo. > "..\models\vae\Put your VAE files here.txt"
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
if "%test_sd2%" == "Y" (
|
|
||||||
@call pip install open_clip_torch==2.0.2
|
|
||||||
)
|
|
||||||
|
|
||||||
@>nul findstr /m "sd_install_complete" ..\scripts\install_status.txt
|
@>nul findstr /m "sd_install_complete" ..\scripts\install_status.txt
|
||||||
@if "%ERRORLEVEL%" NEQ "0" (
|
@if "%ERRORLEVEL%" NEQ "0" (
|
||||||
@echo sd_weights_downloaded >> ..\scripts\install_status.txt
|
@echo sd_weights_downloaded >> ..\scripts\install_status.txt
|
||||||
@ -374,10 +311,8 @@ if "%test_sd2%" == "Y" (
|
|||||||
|
|
||||||
@set SD_DIR=%cd%
|
@set SD_DIR=%cd%
|
||||||
|
|
||||||
@cd env\lib\site-packages
|
set PYTHONPATH=%INSTALL_ENV_DIR%\lib\site-packages
|
||||||
@set PYTHONPATH=%SD_DIR%;%cd%
|
echo PYTHONPATH=%PYTHONPATH%
|
||||||
@cd ..\..\..
|
|
||||||
@echo PYTHONPATH=%PYTHONPATH%
|
|
||||||
|
|
||||||
call where python
|
call where python
|
||||||
call python --version
|
call python --version
|
||||||
@ -386,17 +321,9 @@ call python --version
|
|||||||
@set SD_UI_PATH=%cd%\ui
|
@set SD_UI_PATH=%cd%\ui
|
||||||
@cd stable-diffusion
|
@cd stable-diffusion
|
||||||
|
|
||||||
@rem
|
|
||||||
@rem Rewrite easy-install.pth. This fixes the installation if the user has relocated the SDUI installation
|
|
||||||
@rem
|
|
||||||
>env\Lib\site-packages\easy-install.pth echo %cd%\src\taming-transformers
|
|
||||||
>>env\Lib\site-packages\easy-install.pth echo %cd%\src\clip
|
|
||||||
>>env\Lib\site-packages\easy-install.pth echo %cd%\src\gfpgan
|
|
||||||
>>env\Lib\site-packages\easy-install.pth echo %cd%\src\realesrgan
|
|
||||||
|
|
||||||
@if NOT DEFINED SD_UI_BIND_PORT set SD_UI_BIND_PORT=9000
|
@if NOT DEFINED SD_UI_BIND_PORT set SD_UI_BIND_PORT=9000
|
||||||
@if NOT DEFINED SD_UI_BIND_IP set SD_UI_BIND_IP=0.0.0.0
|
@if NOT DEFINED SD_UI_BIND_IP set SD_UI_BIND_IP=0.0.0.0
|
||||||
@uvicorn server:app --app-dir "%SD_UI_PATH%" --port %SD_UI_BIND_PORT% --host %SD_UI_BIND_IP%
|
@uvicorn main:server_api --app-dir "%SD_UI_PATH%" --port %SD_UI_BIND_PORT% --host %SD_UI_BIND_IP% --log-level error
|
||||||
|
|
||||||
|
|
||||||
@pause
|
@pause
|
||||||
|
@ -4,6 +4,7 @@ source ./scripts/functions.sh
|
|||||||
|
|
||||||
cp sd-ui-files/scripts/on_env_start.sh scripts/
|
cp sd-ui-files/scripts/on_env_start.sh scripts/
|
||||||
cp sd-ui-files/scripts/bootstrap.sh scripts/
|
cp sd-ui-files/scripts/bootstrap.sh scripts/
|
||||||
|
cp sd-ui-files/scripts/check_modules.py scripts/
|
||||||
|
|
||||||
# activate the installer env
|
# activate the installer env
|
||||||
CONDA_BASEPATH=$(conda info --base)
|
CONDA_BASEPATH=$(conda info --base)
|
||||||
@ -21,116 +22,89 @@ python -c "import os; import shutil; frm = 'sd-ui-files/ui/hotfix/9c24e6cd9f499d
|
|||||||
# Caution, this file will make your eyes and brain bleed. It's such an unholy mess.
|
# Caution, this file will make your eyes and brain bleed. It's such an unholy mess.
|
||||||
# Note to self: Please rewrite this in Python. For the sake of your own sanity.
|
# Note to self: Please rewrite this in Python. For the sake of your own sanity.
|
||||||
|
|
||||||
if [ "$test_sd2" == "" ]; then
|
# set the correct installer path (current vs legacy)
|
||||||
export test_sd2="N"
|
if [ -e "installer_files/env" ]; then
|
||||||
fi
|
export INSTALL_ENV_DIR="$(pwd)/installer_files/env"
|
||||||
|
fi
|
||||||
if [ -e "scripts/install_status.txt" ] && [ `grep -c sd_git_cloned scripts/install_status.txt` -gt "0" ]; then
|
if [ -e "stable-diffusion/env" ]; then
|
||||||
echo "Stable Diffusion's git repository was already installed. Updating.."
|
export INSTALL_ENV_DIR="$(pwd)/stable-diffusion/env"
|
||||||
|
|
||||||
cd stable-diffusion
|
|
||||||
|
|
||||||
git remote set-url origin https://github.com/easydiffusion/diffusion-kit.git
|
|
||||||
|
|
||||||
git reset --hard
|
|
||||||
git pull
|
|
||||||
|
|
||||||
if [ "$test_sd2" == "N" ]; then
|
|
||||||
git -c advice.detachedHead=false checkout 7f32368ed1030a6e710537047bacd908adea183a
|
|
||||||
elif [ "$test_sd2" == "Y" ]; then
|
|
||||||
git -c advice.detachedHead=false checkout b1a80dfc75388914252ce363f923103185eaf48f
|
|
||||||
fi
|
|
||||||
|
|
||||||
cd ..
|
|
||||||
else
|
|
||||||
printf "\n\nDownloading Stable Diffusion..\n\n"
|
|
||||||
|
|
||||||
if git clone https://github.com/easydiffusion/diffusion-kit.git stable-diffusion ; then
|
|
||||||
echo sd_git_cloned >> scripts/install_status.txt
|
|
||||||
else
|
|
||||||
fail "git clone of basujindal/stable-diffusion.git failed"
|
|
||||||
fi
|
|
||||||
|
|
||||||
cd stable-diffusion
|
|
||||||
git -c advice.detachedHead=false checkout 7f32368ed1030a6e710537047bacd908adea183a
|
|
||||||
|
|
||||||
cd ..
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# create the stable-diffusion folder, to work with legacy installations
|
||||||
|
if [ ! -e "stable-diffusion" ]; then mkdir stable-diffusion; fi
|
||||||
cd stable-diffusion
|
cd stable-diffusion
|
||||||
|
|
||||||
if [ `grep -c conda_sd_env_created ../scripts/install_status.txt` -gt "0" ]; then
|
# activate the old stable-diffusion env, if it exists
|
||||||
echo "Packages necessary for Stable Diffusion were already installed"
|
if [ -e "env" ]; then
|
||||||
|
|
||||||
conda activate ./env || fail "conda activate failed"
|
conda activate ./env || fail "conda activate failed"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# disable the legacy src and ldm folder (otherwise this prevents installing gfpgan and realesrgan)
|
||||||
|
if [ -e "src" ]; then mv src src-old; fi
|
||||||
|
if [ -e "ldm" ]; then mv ldm ldm-old; fi
|
||||||
|
|
||||||
|
# install torch and torchvision
|
||||||
|
if python ../scripts/check_modules.py torch torchvision; then
|
||||||
|
echo "torch and torchvision have already been installed."
|
||||||
else
|
else
|
||||||
printf "\n\nDownloading packages necessary for Stable Diffusion..\n"
|
echo "Installing torch and torchvision.."
|
||||||
printf "\n\n***** This will take some time (depending on the speed of the Internet connection) and may appear to be stuck, but please be patient ***** ..\n\n"
|
|
||||||
|
|
||||||
# prevent conda from using packages from the user's home directory, to avoid conflicts
|
|
||||||
export PYTHONNOUSERSITE=1
|
export PYTHONNOUSERSITE=1
|
||||||
export PYTHONPATH="$(pwd):$(pwd)/env/lib/site-packages"
|
export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages"
|
||||||
|
|
||||||
if conda env create --prefix env --force -f environment.yaml ; then
|
if pip install --upgrade torch torchvision --extra-index-url https://download.pytorch.org/whl/cu116 ; then
|
||||||
echo "Installed. Testing.."
|
echo "Installed."
|
||||||
else
|
else
|
||||||
fail "'conda env create' failed"
|
fail "torch install failed"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
conda activate ./env || fail "conda activate failed"
|
|
||||||
|
|
||||||
out_test=`python -c "import torch; import ldm; import transformers; import numpy; import antlr4; print(42)"`
|
|
||||||
if [ "$out_test" != "42" ]; then
|
|
||||||
fail "Dependency test failed"
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo conda_sd_env_created >> ../scripts/install_status.txt
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ `grep -c conda_sd_gfpgan_deps_installed ../scripts/install_status.txt` -gt "0" ]; then
|
# install/upgrade sdkit
|
||||||
echo "Packages necessary for GFPGAN (Face Correction) were already installed"
|
if python ../scripts/check_modules.py sdkit sdkit.models ldm transformers numpy antlr4 gfpgan realesrgan ; then
|
||||||
else
|
echo "sdkit is already installed."
|
||||||
printf "\n\nDownloading packages necessary for GFPGAN (Face Correction)..\n"
|
|
||||||
|
|
||||||
export PYTHONNOUSERSITE=1
|
export PYTHONNOUSERSITE=1
|
||||||
export PYTHONPATH="$(pwd):$(pwd)/env/lib/site-packages"
|
export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages"
|
||||||
|
|
||||||
out_test=`python -c "from gfpgan import GFPGANer; print(42)"`
|
pip install --upgrade sdkit > /dev/null
|
||||||
if [ "$out_test" != "42" ]; then
|
|
||||||
echo "EE The dependency check has failed. This usually means that some system libraries are missing."
|
|
||||||
echo "EE On Debian/Ubuntu systems, this are often these packages: libsm6 libxext6 libxrender-dev"
|
|
||||||
echo "EE Other Linux distributions might have different package names for these libraries."
|
|
||||||
fail "GFPGAN dependency test failed"
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo conda_sd_gfpgan_deps_installed >> ../scripts/install_status.txt
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ `grep -c conda_sd_esrgan_deps_installed ../scripts/install_status.txt` -gt "0" ]; then
|
|
||||||
echo "Packages necessary for ESRGAN (Resolution Upscaling) were already installed"
|
|
||||||
else
|
else
|
||||||
printf "\n\nDownloading packages necessary for ESRGAN (Resolution Upscaling)..\n"
|
echo "Installing sdkit: https://pypi.org/project/sdkit/"
|
||||||
|
|
||||||
export PYTHONNOUSERSITE=1
|
export PYTHONNOUSERSITE=1
|
||||||
export PYTHONPATH="$(pwd):$(pwd)/env/lib/site-packages"
|
export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages"
|
||||||
|
|
||||||
out_test=`python -c "from basicsr.archs.rrdbnet_arch import RRDBNet; from realesrgan import RealESRGANer; print(42)"`
|
if pip install sdkit ; then
|
||||||
if [ "$out_test" != "42" ]; then
|
echo "Installed."
|
||||||
fail "ESRGAN dependency test failed"
|
else
|
||||||
|
fail "sdkit install failed"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo conda_sd_esrgan_deps_installed >> ../scripts/install_status.txt
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ `grep -c conda_sd_ui_deps_installed ../scripts/install_status.txt` -gt "0" ]; then
|
# install rich
|
||||||
|
if python ../scripts/check_modules.py rich; then
|
||||||
|
echo "rich has already been installed."
|
||||||
|
else
|
||||||
|
echo "Installing rich.."
|
||||||
|
|
||||||
|
export PYTHONNOUSERSITE=1
|
||||||
|
export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages"
|
||||||
|
|
||||||
|
if pip install rich ; then
|
||||||
|
echo "Installed."
|
||||||
|
else
|
||||||
|
fail "Install failed for rich"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if python ../scripts/check_modules.py uvicorn fastapi ; then
|
||||||
echo "Packages necessary for Stable Diffusion UI were already installed"
|
echo "Packages necessary for Stable Diffusion UI were already installed"
|
||||||
else
|
else
|
||||||
printf "\n\nDownloading packages necessary for Stable Diffusion UI..\n\n"
|
printf "\n\nDownloading packages necessary for Stable Diffusion UI..\n\n"
|
||||||
|
|
||||||
export PYTHONNOUSERSITE=1
|
export PYTHONNOUSERSITE=1
|
||||||
export PYTHONPATH="$(pwd):$(pwd)/env/lib/site-packages"
|
export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages"
|
||||||
|
|
||||||
if conda install -c conda-forge --prefix ./env -y uvicorn fastapi ; then
|
if conda install -c conda-forge -y uvicorn fastapi ; then
|
||||||
echo "Installed. Testing.."
|
echo "Installed. Testing.."
|
||||||
else
|
else
|
||||||
fail "'conda install uvicorn' failed"
|
fail "'conda install uvicorn' failed"
|
||||||
@ -139,30 +113,9 @@ else
|
|||||||
if ! command -v uvicorn &> /dev/null; then
|
if ! command -v uvicorn &> /dev/null; then
|
||||||
fail "UI packages not found!"
|
fail "UI packages not found!"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo conda_sd_ui_deps_installed >> ../scripts/install_status.txt
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if python -m picklescan --help >/dev/null 2>&1; then
|
|
||||||
echo "Picklescan is already installed."
|
|
||||||
else
|
|
||||||
echo "Picklescan not found, installing."
|
|
||||||
pip install picklescan || fail "Picklescan installation failed."
|
|
||||||
fi
|
|
||||||
|
|
||||||
if python -c "import safetensors" --help >/dev/null 2>&1; then
|
|
||||||
echo "SafeTensors is already installed."
|
|
||||||
else
|
|
||||||
echo "SafeTensors not found, installing."
|
|
||||||
pip install safetensors || fail "SafeTensors installation failed."
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
mkdir -p "../models/stable-diffusion"
|
|
||||||
mkdir -p "../models/vae"
|
mkdir -p "../models/vae"
|
||||||
echo "" > "../models/stable-diffusion/Put your custom ckpt files here.txt"
|
|
||||||
echo "" > "../models/vae/Put your VAE files here.txt"
|
|
||||||
|
|
||||||
if [ -f "sd-v1-4.ckpt" ]; then
|
if [ -f "sd-v1-4.ckpt" ]; then
|
||||||
model_size=`find "sd-v1-4.ckpt" -printf "%s"`
|
model_size=`find "sd-v1-4.ckpt" -printf "%s"`
|
||||||
@ -303,10 +256,6 @@ if [ ! -f "../models/vae/vae-ft-mse-840000-ema-pruned.ckpt" ]; then
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$test_sd2" == "Y" ]; then
|
|
||||||
pip install open_clip_torch==2.0.2
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ `grep -c sd_install_complete ../scripts/install_status.txt` -gt "0" ]; then
|
if [ `grep -c sd_install_complete ../scripts/install_status.txt` -gt "0" ]; then
|
||||||
echo sd_weights_downloaded >> ../scripts/install_status.txt
|
echo sd_weights_downloaded >> ../scripts/install_status.txt
|
||||||
echo sd_install_complete >> ../scripts/install_status.txt
|
echo sd_install_complete >> ../scripts/install_status.txt
|
||||||
@ -315,7 +264,8 @@ fi
|
|||||||
printf "\n\nStable Diffusion is ready!\n\n"
|
printf "\n\nStable Diffusion is ready!\n\n"
|
||||||
|
|
||||||
SD_PATH=`pwd`
|
SD_PATH=`pwd`
|
||||||
export PYTHONPATH="$SD_PATH:$SD_PATH/env/lib/python3.8/site-packages"
|
|
||||||
|
export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages"
|
||||||
echo "PYTHONPATH=$PYTHONPATH"
|
echo "PYTHONPATH=$PYTHONPATH"
|
||||||
|
|
||||||
which python
|
which python
|
||||||
@ -325,6 +275,6 @@ cd ..
|
|||||||
export SD_UI_PATH=`pwd`/ui
|
export SD_UI_PATH=`pwd`/ui
|
||||||
cd stable-diffusion
|
cd stable-diffusion
|
||||||
|
|
||||||
uvicorn server:app --app-dir "$SD_UI_PATH" --port ${SD_UI_BIND_PORT:-9000} --host ${SD_UI_BIND_IP:-0.0.0.0}
|
uvicorn main:server_api --app-dir "$SD_UI_PATH" --port ${SD_UI_BIND_PORT:-9000} --host ${SD_UI_BIND_IP:-0.0.0.0} --log-level error
|
||||||
|
|
||||||
read -p "Press any key to continue"
|
read -p "Press any key to continue"
|
||||||
|
0
ui/easydiffusion/__init__.py
Normal file
0
ui/easydiffusion/__init__.py
Normal file
165
ui/easydiffusion/app.py
Normal file
165
ui/easydiffusion/app.py
Normal file
@ -0,0 +1,165 @@
|
|||||||
|
import os
|
||||||
|
import socket
|
||||||
|
import sys
|
||||||
|
import json
|
||||||
|
import traceback
|
||||||
|
import logging
|
||||||
|
from rich.logging import RichHandler
|
||||||
|
|
||||||
|
from sdkit.utils import log as sdkit_log # hack, so we can overwrite the log config
|
||||||
|
|
||||||
|
from easydiffusion import task_manager
|
||||||
|
from easydiffusion.utils import log
|
||||||
|
|
||||||
|
# Remove all handlers associated with the root logger object.
|
||||||
|
for handler in logging.root.handlers[:]:
|
||||||
|
logging.root.removeHandler(handler)
|
||||||
|
|
||||||
|
LOG_FORMAT = '%(asctime)s.%(msecs)03d %(levelname)s %(threadName)s %(message)s'
|
||||||
|
logging.basicConfig(
|
||||||
|
level=logging.INFO,
|
||||||
|
format=LOG_FORMAT,
|
||||||
|
datefmt="%X",
|
||||||
|
handlers=[RichHandler(markup=True, rich_tracebacks=True, show_time=False, show_level=False)]
|
||||||
|
)
|
||||||
|
|
||||||
|
SD_DIR = os.getcwd()
|
||||||
|
|
||||||
|
SD_UI_DIR = os.getenv('SD_UI_PATH', None)
|
||||||
|
sys.path.append(os.path.dirname(SD_UI_DIR))
|
||||||
|
|
||||||
|
CONFIG_DIR = os.path.abspath(os.path.join(SD_UI_DIR, '..', 'scripts'))
|
||||||
|
MODELS_DIR = os.path.abspath(os.path.join(SD_DIR, '..', 'models'))
|
||||||
|
|
||||||
|
USER_UI_PLUGINS_DIR = os.path.abspath(os.path.join(SD_DIR, '..', 'plugins', 'ui'))
|
||||||
|
CORE_UI_PLUGINS_DIR = os.path.abspath(os.path.join(SD_UI_DIR, 'plugins', 'ui'))
|
||||||
|
UI_PLUGINS_SOURCES = ((CORE_UI_PLUGINS_DIR, 'core'), (USER_UI_PLUGINS_DIR, 'user'))
|
||||||
|
|
||||||
|
OUTPUT_DIRNAME = "Stable Diffusion UI" # in the user's home folder
|
||||||
|
TASK_TTL = 15 * 60 # Discard last session's task timeout
|
||||||
|
APP_CONFIG_DEFAULTS = {
|
||||||
|
# auto: selects the cuda device with the most free memory, cuda: use the currently active cuda device.
|
||||||
|
'render_devices': 'auto', # valid entries: 'auto', 'cpu' or 'cuda:N' (where N is a GPU index)
|
||||||
|
'update_branch': 'main',
|
||||||
|
'ui': {
|
||||||
|
'open_browser_on_start': True,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
def init():
|
||||||
|
os.makedirs(USER_UI_PLUGINS_DIR, exist_ok=True)
|
||||||
|
|
||||||
|
update_render_threads()
|
||||||
|
|
||||||
|
def getConfig(default_val=APP_CONFIG_DEFAULTS):
|
||||||
|
try:
|
||||||
|
config_json_path = os.path.join(CONFIG_DIR, 'config.json')
|
||||||
|
if not os.path.exists(config_json_path):
|
||||||
|
return default_val
|
||||||
|
with open(config_json_path, 'r', encoding='utf-8') as f:
|
||||||
|
config = json.load(f)
|
||||||
|
if 'net' not in config:
|
||||||
|
config['net'] = {}
|
||||||
|
if os.getenv('SD_UI_BIND_PORT') is not None:
|
||||||
|
config['net']['listen_port'] = int(os.getenv('SD_UI_BIND_PORT'))
|
||||||
|
if os.getenv('SD_UI_BIND_IP') is not None:
|
||||||
|
config['net']['listen_to_network'] = (os.getenv('SD_UI_BIND_IP') == '0.0.0.0')
|
||||||
|
return config
|
||||||
|
except Exception as e:
|
||||||
|
log.warn(traceback.format_exc())
|
||||||
|
return default_val
|
||||||
|
|
||||||
|
def setConfig(config):
|
||||||
|
try: # config.json
|
||||||
|
config_json_path = os.path.join(CONFIG_DIR, 'config.json')
|
||||||
|
with open(config_json_path, 'w', encoding='utf-8') as f:
|
||||||
|
json.dump(config, f)
|
||||||
|
except:
|
||||||
|
log.error(traceback.format_exc())
|
||||||
|
|
||||||
|
try: # config.bat
|
||||||
|
config_bat_path = os.path.join(CONFIG_DIR, 'config.bat')
|
||||||
|
config_bat = []
|
||||||
|
|
||||||
|
if 'update_branch' in config:
|
||||||
|
config_bat.append(f"@set update_branch={config['update_branch']}")
|
||||||
|
|
||||||
|
config_bat.append(f"@set SD_UI_BIND_PORT={config['net']['listen_port']}")
|
||||||
|
bind_ip = '0.0.0.0' if config['net']['listen_to_network'] else '127.0.0.1'
|
||||||
|
config_bat.append(f"@set SD_UI_BIND_IP={bind_ip}")
|
||||||
|
|
||||||
|
if len(config_bat) > 0:
|
||||||
|
with open(config_bat_path, 'w', encoding='utf-8') as f:
|
||||||
|
f.write('\r\n'.join(config_bat))
|
||||||
|
except:
|
||||||
|
log.error(traceback.format_exc())
|
||||||
|
|
||||||
|
try: # config.sh
|
||||||
|
config_sh_path = os.path.join(CONFIG_DIR, 'config.sh')
|
||||||
|
config_sh = ['#!/bin/bash']
|
||||||
|
|
||||||
|
if 'update_branch' in config:
|
||||||
|
config_sh.append(f"export update_branch={config['update_branch']}")
|
||||||
|
|
||||||
|
config_sh.append(f"export SD_UI_BIND_PORT={config['net']['listen_port']}")
|
||||||
|
bind_ip = '0.0.0.0' if config['net']['listen_to_network'] else '127.0.0.1'
|
||||||
|
config_sh.append(f"export SD_UI_BIND_IP={bind_ip}")
|
||||||
|
|
||||||
|
if len(config_sh) > 1:
|
||||||
|
with open(config_sh_path, 'w', encoding='utf-8') as f:
|
||||||
|
f.write('\n'.join(config_sh))
|
||||||
|
except:
|
||||||
|
log.error(traceback.format_exc())
|
||||||
|
|
||||||
|
def save_to_config(ckpt_model_name, vae_model_name, hypernetwork_model_name, vram_usage_level):
|
||||||
|
config = getConfig()
|
||||||
|
if 'model' not in config:
|
||||||
|
config['model'] = {}
|
||||||
|
|
||||||
|
config['model']['stable-diffusion'] = ckpt_model_name
|
||||||
|
config['model']['vae'] = vae_model_name
|
||||||
|
config['model']['hypernetwork'] = hypernetwork_model_name
|
||||||
|
|
||||||
|
if vae_model_name is None or vae_model_name == "":
|
||||||
|
del config['model']['vae']
|
||||||
|
if hypernetwork_model_name is None or hypernetwork_model_name == "":
|
||||||
|
del config['model']['hypernetwork']
|
||||||
|
|
||||||
|
config['vram_usage_level'] = vram_usage_level
|
||||||
|
|
||||||
|
setConfig(config)
|
||||||
|
|
||||||
|
def update_render_threads():
|
||||||
|
config = getConfig()
|
||||||
|
render_devices = config.get('render_devices', 'auto')
|
||||||
|
active_devices = task_manager.get_devices()['active'].keys()
|
||||||
|
|
||||||
|
log.debug(f'requesting for render_devices: {render_devices}')
|
||||||
|
task_manager.update_render_threads(render_devices, active_devices)
|
||||||
|
|
||||||
|
def getUIPlugins():
|
||||||
|
plugins = []
|
||||||
|
|
||||||
|
for plugins_dir, dir_prefix in UI_PLUGINS_SOURCES:
|
||||||
|
for file in os.listdir(plugins_dir):
|
||||||
|
if file.endswith('.plugin.js'):
|
||||||
|
plugins.append(f'/plugins/{dir_prefix}/{file}')
|
||||||
|
|
||||||
|
return plugins
|
||||||
|
|
||||||
|
def getIPConfig():
|
||||||
|
try:
|
||||||
|
ips = socket.gethostbyname_ex(socket.gethostname())
|
||||||
|
ips[2].append(ips[0])
|
||||||
|
return ips[2]
|
||||||
|
except Exception as e:
|
||||||
|
log.exception(e)
|
||||||
|
return []
|
||||||
|
|
||||||
|
def open_browser():
|
||||||
|
config = getConfig()
|
||||||
|
ui = config.get('ui', {})
|
||||||
|
net = config.get('net', {'listen_port':9000})
|
||||||
|
port = net.get('listen_port', 9000)
|
||||||
|
if ui.get('open_browser_on_start', True):
|
||||||
|
import webbrowser; webbrowser.open(f"http://localhost:{port}")
|
@ -3,6 +3,15 @@ import torch
|
|||||||
import traceback
|
import traceback
|
||||||
import re
|
import re
|
||||||
|
|
||||||
|
from easydiffusion.utils import log
|
||||||
|
|
||||||
|
'''
|
||||||
|
Set `FORCE_FULL_PRECISION` in the environment variables, or in `config.bat`/`config.sh` to set full precision (i.e. float32).
|
||||||
|
Otherwise the models will load at half-precision (i.e. float16).
|
||||||
|
|
||||||
|
Half-precision is fine most of the time. Full precision is only needed for working around GPU bugs (like NVIDIA 16xx GPUs).
|
||||||
|
'''
|
||||||
|
|
||||||
COMPARABLE_GPU_PERCENTILE = 0.65 # if a GPU's free_mem is within this % of the GPU with the most free_mem, it will be picked
|
COMPARABLE_GPU_PERCENTILE = 0.65 # if a GPU's free_mem is within this % of the GPU with the most free_mem, it will be picked
|
||||||
|
|
||||||
mem_free_threshold = 0
|
mem_free_threshold = 0
|
||||||
@ -34,7 +43,7 @@ def get_device_delta(render_devices, active_devices):
|
|||||||
if 'auto' in render_devices:
|
if 'auto' in render_devices:
|
||||||
render_devices = auto_pick_devices(active_devices)
|
render_devices = auto_pick_devices(active_devices)
|
||||||
if 'cpu' in render_devices:
|
if 'cpu' in render_devices:
|
||||||
print('WARNING: Could not find a compatible GPU. Using the CPU, but this will be very slow!')
|
log.warn('WARNING: Could not find a compatible GPU. Using the CPU, but this will be very slow!')
|
||||||
|
|
||||||
active_devices = set(active_devices)
|
active_devices = set(active_devices)
|
||||||
render_devices = set(render_devices)
|
render_devices = set(render_devices)
|
||||||
@ -53,7 +62,7 @@ def auto_pick_devices(currently_active_devices):
|
|||||||
if device_count == 1:
|
if device_count == 1:
|
||||||
return ['cuda:0'] if is_device_compatible('cuda:0') else ['cpu']
|
return ['cuda:0'] if is_device_compatible('cuda:0') else ['cpu']
|
||||||
|
|
||||||
print('Autoselecting GPU. Using most free memory.')
|
log.debug('Autoselecting GPU. Using most free memory.')
|
||||||
devices = []
|
devices = []
|
||||||
for device in range(device_count):
|
for device in range(device_count):
|
||||||
device = f'cuda:{device}'
|
device = f'cuda:{device}'
|
||||||
@ -64,7 +73,7 @@ def auto_pick_devices(currently_active_devices):
|
|||||||
mem_free /= float(10**9)
|
mem_free /= float(10**9)
|
||||||
mem_total /= float(10**9)
|
mem_total /= float(10**9)
|
||||||
device_name = torch.cuda.get_device_name(device)
|
device_name = torch.cuda.get_device_name(device)
|
||||||
print(f'{device} detected: {device_name} - Memory (free/total): {round(mem_free, 2)}Gb / {round(mem_total, 2)}Gb')
|
log.debug(f'{device} detected: {device_name} - Memory (free/total): {round(mem_free, 2)}Gb / {round(mem_total, 2)}Gb')
|
||||||
devices.append({'device': device, 'device_name': device_name, 'mem_free': mem_free})
|
devices.append({'device': device, 'device_name': device_name, 'mem_free': mem_free})
|
||||||
|
|
||||||
devices.sort(key=lambda x:x['mem_free'], reverse=True)
|
devices.sort(key=lambda x:x['mem_free'], reverse=True)
|
||||||
@ -82,7 +91,7 @@ def auto_pick_devices(currently_active_devices):
|
|||||||
devices = list(map(lambda x: x['device'], devices))
|
devices = list(map(lambda x: x['device'], devices))
|
||||||
return devices
|
return devices
|
||||||
|
|
||||||
def device_init(thread_data, device):
|
def device_init(context, device):
|
||||||
'''
|
'''
|
||||||
This function assumes the 'device' has already been verified to be compatible.
|
This function assumes the 'device' has already been verified to be compatible.
|
||||||
`get_device_delta()` has already filtered out incompatible devices.
|
`get_device_delta()` has already filtered out incompatible devices.
|
||||||
@ -91,27 +100,45 @@ def device_init(thread_data, device):
|
|||||||
validate_device_id(device, log_prefix='device_init')
|
validate_device_id(device, log_prefix='device_init')
|
||||||
|
|
||||||
if device == 'cpu':
|
if device == 'cpu':
|
||||||
thread_data.device = 'cpu'
|
context.device = 'cpu'
|
||||||
thread_data.device_name = get_processor_name()
|
context.device_name = get_processor_name()
|
||||||
print('Render device CPU available as', thread_data.device_name)
|
context.half_precision = False
|
||||||
|
log.debug(f'Render device CPU available as {context.device_name}')
|
||||||
return
|
return
|
||||||
|
|
||||||
thread_data.device_name = torch.cuda.get_device_name(device)
|
context.device_name = torch.cuda.get_device_name(device)
|
||||||
thread_data.device = device
|
context.device = device
|
||||||
|
|
||||||
# Force full precision on 1660 and 1650 NVIDIA cards to avoid creating green images
|
# Force full precision on 1660 and 1650 NVIDIA cards to avoid creating green images
|
||||||
device_name = thread_data.device_name.lower()
|
if needs_to_force_full_precision(context):
|
||||||
thread_data.force_full_precision = (('nvidia' in device_name or 'geforce' in device_name) and (' 1660' in device_name or ' 1650' in device_name)) or ('Quadro T2000' in device_name)
|
log.warn(f'forcing full precision on this GPU, to avoid green images. GPU detected: {context.device_name}')
|
||||||
if thread_data.force_full_precision:
|
|
||||||
print('forcing full precision on NVIDIA 16xx cards, to avoid green images. GPU detected: ', thread_data.device_name)
|
|
||||||
# Apply force_full_precision now before models are loaded.
|
# Apply force_full_precision now before models are loaded.
|
||||||
thread_data.precision = 'full'
|
context.half_precision = False
|
||||||
|
|
||||||
print(f'Setting {device} as active')
|
log.info(f'Setting {device} as active, with precision: {"half" if context.half_precision else "full"}')
|
||||||
torch.cuda.device(device)
|
torch.cuda.device(device)
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|
||||||
|
def needs_to_force_full_precision(context):
|
||||||
|
if 'FORCE_FULL_PRECISION' in os.environ:
|
||||||
|
return True
|
||||||
|
|
||||||
|
device_name = context.device_name.lower()
|
||||||
|
return (('nvidia' in device_name or 'geforce' in device_name) and (' 1660' in device_name or ' 1650' in device_name)) or ('Quadro T2000' in device_name)
|
||||||
|
|
||||||
|
def get_max_vram_usage_level(device):
|
||||||
|
if device != 'cpu':
|
||||||
|
_, mem_total = torch.cuda.mem_get_info(device)
|
||||||
|
mem_total /= float(10**9)
|
||||||
|
|
||||||
|
if mem_total < 4.5:
|
||||||
|
return 'low'
|
||||||
|
elif mem_total < 6.5:
|
||||||
|
return 'balanced'
|
||||||
|
|
||||||
|
return 'high'
|
||||||
|
|
||||||
def validate_device_id(device, log_prefix=''):
|
def validate_device_id(device, log_prefix=''):
|
||||||
def is_valid():
|
def is_valid():
|
||||||
if not isinstance(device, str):
|
if not isinstance(device, str):
|
||||||
@ -132,7 +159,7 @@ def is_device_compatible(device):
|
|||||||
try:
|
try:
|
||||||
validate_device_id(device, log_prefix='is_device_compatible')
|
validate_device_id(device, log_prefix='is_device_compatible')
|
||||||
except:
|
except:
|
||||||
print(str(e))
|
log.error(str(e))
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if device == 'cpu': return True
|
if device == 'cpu': return True
|
||||||
@ -141,10 +168,10 @@ def is_device_compatible(device):
|
|||||||
_, mem_total = torch.cuda.mem_get_info(device)
|
_, mem_total = torch.cuda.mem_get_info(device)
|
||||||
mem_total /= float(10**9)
|
mem_total /= float(10**9)
|
||||||
if mem_total < 3.0:
|
if mem_total < 3.0:
|
||||||
print(f'GPU {device} with less than 3 GB of VRAM is not compatible with Stable Diffusion')
|
log.warn(f'GPU {device} with less than 3 GB of VRAM is not compatible with Stable Diffusion')
|
||||||
return False
|
return False
|
||||||
except RuntimeError as e:
|
except RuntimeError as e:
|
||||||
print(str(e))
|
log.error(str(e))
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@ -164,5 +191,5 @@ def get_processor_name():
|
|||||||
if "model name" in line:
|
if "model name" in line:
|
||||||
return re.sub(".*model name.*:", "", line, 1).strip()
|
return re.sub(".*model name.*:", "", line, 1).strip()
|
||||||
except:
|
except:
|
||||||
print(traceback.format_exc())
|
log.error(traceback.format_exc())
|
||||||
return "cpu"
|
return "cpu"
|
223
ui/easydiffusion/model_manager.py
Normal file
223
ui/easydiffusion/model_manager.py
Normal file
@ -0,0 +1,223 @@
|
|||||||
|
import os
|
||||||
|
|
||||||
|
from easydiffusion import app, device_manager
|
||||||
|
from easydiffusion.types import TaskData
|
||||||
|
from easydiffusion.utils import log
|
||||||
|
|
||||||
|
from sdkit import Context
|
||||||
|
from sdkit.models import load_model, unload_model, get_model_info_from_db, scan_model
|
||||||
|
from sdkit.utils import hash_file_quick
|
||||||
|
|
||||||
|
KNOWN_MODEL_TYPES = ['stable-diffusion', 'vae', 'hypernetwork', 'gfpgan', 'realesrgan']
|
||||||
|
MODEL_EXTENSIONS = {
|
||||||
|
'stable-diffusion': ['.ckpt', '.safetensors'],
|
||||||
|
'vae': ['.vae.pt', '.ckpt', '.safetensors'],
|
||||||
|
'hypernetwork': ['.pt', '.safetensors'],
|
||||||
|
'gfpgan': ['.pth'],
|
||||||
|
'realesrgan': ['.pth'],
|
||||||
|
}
|
||||||
|
DEFAULT_MODELS = {
|
||||||
|
'stable-diffusion': [ # needed to support the legacy installations
|
||||||
|
'custom-model', # only one custom model file was supported initially, creatively named 'custom-model'
|
||||||
|
'sd-v1-4', # Default fallback.
|
||||||
|
],
|
||||||
|
'gfpgan': ['GFPGANv1.3'],
|
||||||
|
'realesrgan': ['RealESRGAN_x4plus'],
|
||||||
|
}
|
||||||
|
VRAM_USAGE_LEVEL_TO_OPTIMIZATIONS = {
|
||||||
|
'balanced': {'KEEP_FS_AND_CS_IN_CPU', 'SET_ATTENTION_STEP_TO_4'},
|
||||||
|
'low': {'KEEP_ENTIRE_MODEL_IN_CPU'},
|
||||||
|
'high': {},
|
||||||
|
}
|
||||||
|
MODELS_TO_LOAD_ON_START = ['stable-diffusion', 'vae', 'hypernetwork']
|
||||||
|
|
||||||
|
known_models = {}
|
||||||
|
|
||||||
|
def init():
|
||||||
|
make_model_folders()
|
||||||
|
getModels() # run this once, to cache the picklescan results
|
||||||
|
|
||||||
|
def load_default_models(context: Context):
|
||||||
|
set_vram_optimizations(context)
|
||||||
|
|
||||||
|
# init default model paths
|
||||||
|
for model_type in MODELS_TO_LOAD_ON_START:
|
||||||
|
context.model_paths[model_type] = resolve_model_to_use(model_type=model_type)
|
||||||
|
load_model(context, model_type)
|
||||||
|
|
||||||
|
def unload_all(context: Context):
|
||||||
|
for model_type in KNOWN_MODEL_TYPES:
|
||||||
|
unload_model(context, model_type)
|
||||||
|
|
||||||
|
def resolve_model_to_use(model_name:str=None, model_type:str=None):
|
||||||
|
model_extensions = MODEL_EXTENSIONS.get(model_type, [])
|
||||||
|
default_models = DEFAULT_MODELS.get(model_type, [])
|
||||||
|
config = app.getConfig()
|
||||||
|
|
||||||
|
model_dirs = [os.path.join(app.MODELS_DIR, model_type), app.SD_DIR]
|
||||||
|
if not model_name: # When None try user configured model.
|
||||||
|
# config = getConfig()
|
||||||
|
if 'model' in config and model_type in config['model']:
|
||||||
|
model_name = config['model'][model_type]
|
||||||
|
|
||||||
|
if model_name:
|
||||||
|
# Check models directory
|
||||||
|
models_dir_path = os.path.join(app.MODELS_DIR, model_type, model_name)
|
||||||
|
for model_extension in model_extensions:
|
||||||
|
if os.path.exists(models_dir_path + model_extension):
|
||||||
|
return models_dir_path + model_extension
|
||||||
|
if os.path.exists(model_name + model_extension):
|
||||||
|
return os.path.abspath(model_name + model_extension)
|
||||||
|
|
||||||
|
# Default locations
|
||||||
|
if model_name in default_models:
|
||||||
|
default_model_path = os.path.join(app.SD_DIR, model_name)
|
||||||
|
for model_extension in model_extensions:
|
||||||
|
if os.path.exists(default_model_path + model_extension):
|
||||||
|
return default_model_path + model_extension
|
||||||
|
|
||||||
|
# Can't find requested model, check the default paths.
|
||||||
|
for default_model in default_models:
|
||||||
|
for model_dir in model_dirs:
|
||||||
|
default_model_path = os.path.join(model_dir, default_model)
|
||||||
|
for model_extension in model_extensions:
|
||||||
|
if os.path.exists(default_model_path + model_extension):
|
||||||
|
if model_name is not None:
|
||||||
|
log.warn(f'Could not find the configured custom model {model_name}{model_extension}. Using the default one: {default_model_path}{model_extension}')
|
||||||
|
return default_model_path + model_extension
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
def reload_models_if_necessary(context: Context, task_data: TaskData):
|
||||||
|
model_paths_in_req = {
|
||||||
|
'stable-diffusion': task_data.use_stable_diffusion_model,
|
||||||
|
'vae': task_data.use_vae_model,
|
||||||
|
'hypernetwork': task_data.use_hypernetwork_model,
|
||||||
|
'gfpgan': task_data.use_face_correction,
|
||||||
|
'realesrgan': task_data.use_upscale,
|
||||||
|
}
|
||||||
|
models_to_reload = {model_type: path for model_type, path in model_paths_in_req.items() if context.model_paths.get(model_type) != path}
|
||||||
|
|
||||||
|
if set_vram_optimizations(context): # reload SD
|
||||||
|
models_to_reload['stable-diffusion'] = model_paths_in_req['stable-diffusion']
|
||||||
|
|
||||||
|
if 'stable-diffusion' in models_to_reload:
|
||||||
|
quick_hash = hash_file_quick(models_to_reload['stable-diffusion'])
|
||||||
|
known_model_info = get_model_info_from_db(quick_hash=quick_hash)
|
||||||
|
|
||||||
|
for model_type, model_path_in_req in models_to_reload.items():
|
||||||
|
context.model_paths[model_type] = model_path_in_req
|
||||||
|
|
||||||
|
action_fn = unload_model if context.model_paths[model_type] is None else load_model
|
||||||
|
action_fn(context, model_type, scan_model=False) # we've scanned them already
|
||||||
|
|
||||||
|
def resolve_model_paths(task_data: TaskData):
|
||||||
|
task_data.use_stable_diffusion_model = resolve_model_to_use(task_data.use_stable_diffusion_model, model_type='stable-diffusion')
|
||||||
|
task_data.use_vae_model = resolve_model_to_use(task_data.use_vae_model, model_type='vae')
|
||||||
|
task_data.use_hypernetwork_model = resolve_model_to_use(task_data.use_hypernetwork_model, model_type='hypernetwork')
|
||||||
|
|
||||||
|
if task_data.use_face_correction: task_data.use_face_correction = resolve_model_to_use(task_data.use_face_correction, 'gfpgan')
|
||||||
|
if task_data.use_upscale: task_data.use_upscale = resolve_model_to_use(task_data.use_upscale, 'realesrgan')
|
||||||
|
|
||||||
|
def set_vram_optimizations(context: Context):
|
||||||
|
config = app.getConfig()
|
||||||
|
|
||||||
|
max_usage_level = device_manager.get_max_vram_usage_level(context.device)
|
||||||
|
vram_usage_level = config.get('vram_usage_level', 'balanced')
|
||||||
|
|
||||||
|
v = {'low': 0, 'balanced': 1, 'high': 2}
|
||||||
|
if v[vram_usage_level] > v[max_usage_level]:
|
||||||
|
log.error(f'Requested GPU Memory Usage level ({vram_usage_level}) is higher than what is ' + \
|
||||||
|
f'possible ({max_usage_level}) on this device ({context.device}). Using "{max_usage_level}" instead')
|
||||||
|
vram_usage_level = max_usage_level
|
||||||
|
|
||||||
|
vram_optimizations = VRAM_USAGE_LEVEL_TO_OPTIMIZATIONS[vram_usage_level]
|
||||||
|
|
||||||
|
if vram_optimizations != context.vram_optimizations:
|
||||||
|
context.vram_optimizations = vram_optimizations
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
def make_model_folders():
|
||||||
|
for model_type in KNOWN_MODEL_TYPES:
|
||||||
|
model_dir_path = os.path.join(app.MODELS_DIR, model_type)
|
||||||
|
|
||||||
|
os.makedirs(model_dir_path, exist_ok=True)
|
||||||
|
|
||||||
|
help_file_name = f'Place your {model_type} model files here.txt'
|
||||||
|
help_file_contents = f'Supported extensions: {" or ".join(MODEL_EXTENSIONS.get(model_type))}'
|
||||||
|
|
||||||
|
with open(os.path.join(model_dir_path, help_file_name), 'w', encoding='utf-8') as f:
|
||||||
|
f.write(help_file_contents)
|
||||||
|
|
||||||
|
def is_malicious_model(file_path):
|
||||||
|
try:
|
||||||
|
scan_result = scan_model(file_path)
|
||||||
|
if scan_result.issues_count > 0 or scan_result.infected_files > 0:
|
||||||
|
log.warn(":warning: [bold red]Scan %s: %d scanned, %d issue, %d infected.[/bold red]" % (file_path, scan_result.scanned_files, scan_result.issues_count, scan_result.infected_files))
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
log.debug("Scan %s: [green]%d scanned, %d issue, %d infected.[/green]" % (file_path, scan_result.scanned_files, scan_result.issues_count, scan_result.infected_files))
|
||||||
|
return False
|
||||||
|
except Exception as e:
|
||||||
|
log.error(f'error while scanning: {file_path}, error: {e}')
|
||||||
|
return False
|
||||||
|
|
||||||
|
def getModels():
|
||||||
|
models = {
|
||||||
|
'active': {
|
||||||
|
'stable-diffusion': 'sd-v1-4',
|
||||||
|
'vae': '',
|
||||||
|
'hypernetwork': '',
|
||||||
|
},
|
||||||
|
'options': {
|
||||||
|
'stable-diffusion': ['sd-v1-4'],
|
||||||
|
'vae': [],
|
||||||
|
'hypernetwork': [],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
models_scanned = 0
|
||||||
|
def listModels(model_type):
|
||||||
|
nonlocal models_scanned
|
||||||
|
|
||||||
|
model_extensions = MODEL_EXTENSIONS.get(model_type, [])
|
||||||
|
models_dir = os.path.join(app.MODELS_DIR, model_type)
|
||||||
|
if not os.path.exists(models_dir):
|
||||||
|
os.makedirs(models_dir)
|
||||||
|
|
||||||
|
for file in os.listdir(models_dir):
|
||||||
|
for model_extension in model_extensions:
|
||||||
|
if not file.endswith(model_extension):
|
||||||
|
continue
|
||||||
|
|
||||||
|
model_path = os.path.join(models_dir, file)
|
||||||
|
mtime = os.path.getmtime(model_path)
|
||||||
|
mod_time = known_models[model_path] if model_path in known_models else -1
|
||||||
|
if mod_time != mtime:
|
||||||
|
models_scanned += 1
|
||||||
|
if is_malicious_model(model_path):
|
||||||
|
models['scan-error'] = file
|
||||||
|
return
|
||||||
|
known_models[model_path] = mtime
|
||||||
|
|
||||||
|
model_name = file[:-len(model_extension)]
|
||||||
|
models['options'][model_type].append(model_name)
|
||||||
|
|
||||||
|
models['options'][model_type] = [*set(models['options'][model_type])] # remove duplicates
|
||||||
|
models['options'][model_type].sort()
|
||||||
|
|
||||||
|
# custom models
|
||||||
|
listModels(model_type='stable-diffusion')
|
||||||
|
listModels(model_type='vae')
|
||||||
|
listModels(model_type='hypernetwork')
|
||||||
|
|
||||||
|
if models_scanned > 0: log.info(f'[green]Scanned {models_scanned} models. Nothing infected[/]')
|
||||||
|
|
||||||
|
# legacy
|
||||||
|
custom_weight_path = os.path.join(app.SD_DIR, 'custom-model.ckpt')
|
||||||
|
if os.path.exists(custom_weight_path):
|
||||||
|
models['options']['stable-diffusion'].append('custom-model')
|
||||||
|
|
||||||
|
return models
|
124
ui/easydiffusion/renderer.py
Normal file
124
ui/easydiffusion/renderer.py
Normal file
@ -0,0 +1,124 @@
|
|||||||
|
import queue
|
||||||
|
import time
|
||||||
|
import json
|
||||||
|
|
||||||
|
from easydiffusion import device_manager
|
||||||
|
from easydiffusion.types import TaskData, Response, Image as ResponseImage, UserInitiatedStop, GenerateImageRequest
|
||||||
|
from easydiffusion.utils import get_printable_request, save_images_to_disk, log
|
||||||
|
|
||||||
|
from sdkit import Context
|
||||||
|
from sdkit.generate import generate_images
|
||||||
|
from sdkit.filter import apply_filters
|
||||||
|
from sdkit.utils import img_to_buffer, img_to_base64_str, latent_samples_to_images, gc
|
||||||
|
|
||||||
|
context = Context() # thread-local
|
||||||
|
'''
|
||||||
|
runtime data (bound locally to this thread), for e.g. device, references to loaded models, optimization flags etc
|
||||||
|
'''
|
||||||
|
|
||||||
|
def init(device):
|
||||||
|
'''
|
||||||
|
Initializes the fields that will be bound to this runtime's context, and sets the current torch device
|
||||||
|
'''
|
||||||
|
context.stop_processing = False
|
||||||
|
context.temp_images = {}
|
||||||
|
context.partial_x_samples = None
|
||||||
|
|
||||||
|
device_manager.device_init(context, device)
|
||||||
|
|
||||||
|
def make_images(req: GenerateImageRequest, task_data: TaskData, data_queue: queue.Queue, task_temp_images: list, step_callback):
|
||||||
|
context.stop_processing = False
|
||||||
|
log.info(f'request: {get_printable_request(req)}')
|
||||||
|
log.info(f'task data: {task_data.dict()}')
|
||||||
|
|
||||||
|
images = make_images_internal(req, task_data, data_queue, task_temp_images, step_callback)
|
||||||
|
|
||||||
|
res = Response(req, task_data, images=construct_response(images, task_data, base_seed=req.seed))
|
||||||
|
res = res.json()
|
||||||
|
data_queue.put(json.dumps(res))
|
||||||
|
log.info('Task completed')
|
||||||
|
|
||||||
|
return res
|
||||||
|
|
||||||
|
def make_images_internal(req: GenerateImageRequest, task_data: TaskData, data_queue: queue.Queue, task_temp_images: list, step_callback):
|
||||||
|
images, user_stopped = generate_images_internal(req, task_data, data_queue, task_temp_images, step_callback, task_data.stream_image_progress)
|
||||||
|
filtered_images = filter_images(task_data, images, user_stopped)
|
||||||
|
|
||||||
|
if task_data.save_to_disk_path is not None:
|
||||||
|
save_images_to_disk(images, filtered_images, req, task_data)
|
||||||
|
|
||||||
|
return filtered_images if task_data.show_only_filtered_image else images + filtered_images
|
||||||
|
|
||||||
|
def generate_images_internal(req: GenerateImageRequest, task_data: TaskData, data_queue: queue.Queue, task_temp_images: list, step_callback, stream_image_progress: bool):
|
||||||
|
context.temp_images.clear()
|
||||||
|
|
||||||
|
callback = make_step_callback(req, task_data, data_queue, task_temp_images, step_callback, stream_image_progress)
|
||||||
|
|
||||||
|
try:
|
||||||
|
images = generate_images(context, callback=callback, **req.dict())
|
||||||
|
user_stopped = False
|
||||||
|
except UserInitiatedStop:
|
||||||
|
images = []
|
||||||
|
user_stopped = True
|
||||||
|
if context.partial_x_samples is not None:
|
||||||
|
images = latent_samples_to_images(context, context.partial_x_samples)
|
||||||
|
context.partial_x_samples = None
|
||||||
|
finally:
|
||||||
|
gc(context)
|
||||||
|
|
||||||
|
return images, user_stopped
|
||||||
|
|
||||||
|
def filter_images(task_data: TaskData, images: list, user_stopped):
|
||||||
|
if user_stopped or (task_data.use_face_correction is None and task_data.use_upscale is None):
|
||||||
|
return images
|
||||||
|
|
||||||
|
filters_to_apply = []
|
||||||
|
if task_data.use_face_correction and 'gfpgan' in task_data.use_face_correction.lower(): filters_to_apply.append('gfpgan')
|
||||||
|
if task_data.use_upscale and 'realesrgan' in task_data.use_upscale.lower(): filters_to_apply.append('realesrgan')
|
||||||
|
|
||||||
|
return apply_filters(context, filters_to_apply, images)
|
||||||
|
|
||||||
|
def construct_response(images: list, task_data: TaskData, base_seed: int):
|
||||||
|
return [
|
||||||
|
ResponseImage(
|
||||||
|
data=img_to_base64_str(img, task_data.output_format, task_data.output_quality),
|
||||||
|
seed=base_seed + i
|
||||||
|
) for i, img in enumerate(images)
|
||||||
|
]
|
||||||
|
|
||||||
|
def make_step_callback(req: GenerateImageRequest, task_data: TaskData, data_queue: queue.Queue, task_temp_images: list, step_callback, stream_image_progress: bool):
|
||||||
|
n_steps = req.num_inference_steps if req.init_image is None else int(req.num_inference_steps * req.prompt_strength)
|
||||||
|
last_callback_time = -1
|
||||||
|
|
||||||
|
def update_temp_img(x_samples, task_temp_images: list):
|
||||||
|
partial_images = []
|
||||||
|
images = latent_samples_to_images(context, x_samples)
|
||||||
|
for i, img in enumerate(images):
|
||||||
|
buf = img_to_buffer(img, output_format='JPEG')
|
||||||
|
|
||||||
|
context.temp_images[f"{task_data.request_id}/{i}"] = buf
|
||||||
|
task_temp_images[i] = buf
|
||||||
|
partial_images.append({'path': f"/image/tmp/{task_data.request_id}/{i}"})
|
||||||
|
del images
|
||||||
|
return partial_images
|
||||||
|
|
||||||
|
def on_image_step(x_samples, i):
|
||||||
|
nonlocal last_callback_time
|
||||||
|
|
||||||
|
context.partial_x_samples = x_samples
|
||||||
|
step_time = time.time() - last_callback_time if last_callback_time != -1 else -1
|
||||||
|
last_callback_time = time.time()
|
||||||
|
|
||||||
|
progress = {"step": i, "step_time": step_time, "total_steps": n_steps}
|
||||||
|
|
||||||
|
if stream_image_progress and i % 5 == 0:
|
||||||
|
progress['output'] = update_temp_img(x_samples, task_temp_images)
|
||||||
|
|
||||||
|
data_queue.put(json.dumps(progress))
|
||||||
|
|
||||||
|
step_callback()
|
||||||
|
|
||||||
|
if context.stop_processing:
|
||||||
|
raise UserInitiatedStop("User requested that we stop processing")
|
||||||
|
|
||||||
|
return on_image_step
|
219
ui/easydiffusion/server.py
Normal file
219
ui/easydiffusion/server.py
Normal file
@ -0,0 +1,219 @@
|
|||||||
|
"""server.py: FastAPI SD-UI Web Host.
|
||||||
|
Notes:
|
||||||
|
async endpoints always run on the main thread. Without they run on the thread pool.
|
||||||
|
"""
|
||||||
|
import os
|
||||||
|
import traceback
|
||||||
|
import datetime
|
||||||
|
from typing import List, Union
|
||||||
|
|
||||||
|
from fastapi import FastAPI, HTTPException
|
||||||
|
from fastapi.staticfiles import StaticFiles
|
||||||
|
from starlette.responses import FileResponse, JSONResponse, StreamingResponse
|
||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
from easydiffusion import app, model_manager, task_manager
|
||||||
|
from easydiffusion.types import TaskData, GenerateImageRequest
|
||||||
|
from easydiffusion.utils import log
|
||||||
|
|
||||||
|
log.info(f'started in {app.SD_DIR}')
|
||||||
|
log.info(f'started at {datetime.datetime.now():%x %X}')
|
||||||
|
|
||||||
|
server_api = FastAPI()
|
||||||
|
|
||||||
|
NOCACHE_HEADERS={"Cache-Control": "no-cache, no-store, must-revalidate", "Pragma": "no-cache", "Expires": "0"}
|
||||||
|
|
||||||
|
class NoCacheStaticFiles(StaticFiles):
|
||||||
|
def is_not_modified(self, response_headers, request_headers) -> bool:
|
||||||
|
if 'content-type' in response_headers and ('javascript' in response_headers['content-type'] or 'css' in response_headers['content-type']):
|
||||||
|
response_headers.update(NOCACHE_HEADERS)
|
||||||
|
return False
|
||||||
|
|
||||||
|
return super().is_not_modified(response_headers, request_headers)
|
||||||
|
|
||||||
|
class SetAppConfigRequest(BaseModel):
|
||||||
|
update_branch: str = None
|
||||||
|
render_devices: Union[List[str], List[int], str, int] = None
|
||||||
|
model_vae: str = None
|
||||||
|
ui_open_browser_on_start: bool = None
|
||||||
|
listen_to_network: bool = None
|
||||||
|
listen_port: int = None
|
||||||
|
|
||||||
|
def init():
|
||||||
|
server_api.mount('/media', NoCacheStaticFiles(directory=os.path.join(app.SD_UI_DIR, 'media')), name="media")
|
||||||
|
|
||||||
|
for plugins_dir, dir_prefix in app.UI_PLUGINS_SOURCES:
|
||||||
|
server_api.mount(f'/plugins/{dir_prefix}', NoCacheStaticFiles(directory=plugins_dir), name=f"plugins-{dir_prefix}")
|
||||||
|
|
||||||
|
@server_api.post('/app_config')
|
||||||
|
async def set_app_config(req : SetAppConfigRequest):
|
||||||
|
return set_app_config_internal(req)
|
||||||
|
|
||||||
|
@server_api.get('/get/{key:path}')
|
||||||
|
def read_web_data(key:str=None):
|
||||||
|
return read_web_data_internal(key)
|
||||||
|
|
||||||
|
@server_api.get('/ping') # Get server and optionally session status.
|
||||||
|
def ping(session_id:str=None):
|
||||||
|
return ping_internal(session_id)
|
||||||
|
|
||||||
|
@server_api.post('/render')
|
||||||
|
def render(req: dict):
|
||||||
|
return render_internal(req)
|
||||||
|
|
||||||
|
@server_api.get('/image/stream/{task_id:int}')
|
||||||
|
def stream(task_id:int):
|
||||||
|
return stream_internal(task_id)
|
||||||
|
|
||||||
|
@server_api.get('/image/stop')
|
||||||
|
def stop(task: int):
|
||||||
|
return stop_internal(task)
|
||||||
|
|
||||||
|
@server_api.get('/image/tmp/{task_id:int}/{img_id:int}')
|
||||||
|
def get_image(task_id: int, img_id: int):
|
||||||
|
return get_image_internal(task_id, img_id)
|
||||||
|
|
||||||
|
@server_api.get('/')
|
||||||
|
def read_root():
|
||||||
|
return FileResponse(os.path.join(app.SD_UI_DIR, 'index.html'), headers=NOCACHE_HEADERS)
|
||||||
|
|
||||||
|
@server_api.on_event("shutdown")
|
||||||
|
def shutdown_event(): # Signal render thread to close on shutdown
|
||||||
|
task_manager.current_state_error = SystemExit('Application shutting down.')
|
||||||
|
|
||||||
|
# API implementations
|
||||||
|
def set_app_config_internal(req : SetAppConfigRequest):
|
||||||
|
config = app.getConfig()
|
||||||
|
if req.update_branch is not None:
|
||||||
|
config['update_branch'] = req.update_branch
|
||||||
|
if req.render_devices is not None:
|
||||||
|
update_render_devices_in_config(config, req.render_devices)
|
||||||
|
if req.ui_open_browser_on_start is not None:
|
||||||
|
if 'ui' not in config:
|
||||||
|
config['ui'] = {}
|
||||||
|
config['ui']['open_browser_on_start'] = req.ui_open_browser_on_start
|
||||||
|
if req.listen_to_network is not None:
|
||||||
|
if 'net' not in config:
|
||||||
|
config['net'] = {}
|
||||||
|
config['net']['listen_to_network'] = bool(req.listen_to_network)
|
||||||
|
if req.listen_port is not None:
|
||||||
|
if 'net' not in config:
|
||||||
|
config['net'] = {}
|
||||||
|
config['net']['listen_port'] = int(req.listen_port)
|
||||||
|
try:
|
||||||
|
app.setConfig(config)
|
||||||
|
|
||||||
|
if req.render_devices:
|
||||||
|
app.update_render_threads()
|
||||||
|
|
||||||
|
return JSONResponse({'status': 'OK'}, headers=NOCACHE_HEADERS)
|
||||||
|
except Exception as e:
|
||||||
|
log.error(traceback.format_exc())
|
||||||
|
raise HTTPException(status_code=500, detail=str(e))
|
||||||
|
|
||||||
|
def update_render_devices_in_config(config, render_devices):
|
||||||
|
if render_devices not in ('cpu', 'auto') and not render_devices.startswith('cuda:'):
|
||||||
|
raise HTTPException(status_code=400, detail=f'Invalid render device requested: {render_devices}')
|
||||||
|
|
||||||
|
if render_devices.startswith('cuda:'):
|
||||||
|
render_devices = render_devices.split(',')
|
||||||
|
|
||||||
|
config['render_devices'] = render_devices
|
||||||
|
|
||||||
|
def read_web_data_internal(key:str=None):
|
||||||
|
if not key: # /get without parameters, stable-diffusion easter egg.
|
||||||
|
raise HTTPException(status_code=418, detail="StableDiffusion is drawing a teapot!") # HTTP418 I'm a teapot
|
||||||
|
elif key == 'app_config':
|
||||||
|
return JSONResponse(app.getConfig(), headers=NOCACHE_HEADERS)
|
||||||
|
elif key == 'system_info':
|
||||||
|
config = app.getConfig()
|
||||||
|
system_info = {
|
||||||
|
'devices': task_manager.get_devices(),
|
||||||
|
'hosts': app.getIPConfig(),
|
||||||
|
'default_output_dir': os.path.join(os.path.expanduser("~"), app.OUTPUT_DIRNAME),
|
||||||
|
}
|
||||||
|
system_info['devices']['config'] = config.get('render_devices', "auto")
|
||||||
|
return JSONResponse(system_info, headers=NOCACHE_HEADERS)
|
||||||
|
elif key == 'models':
|
||||||
|
return JSONResponse(model_manager.getModels(), headers=NOCACHE_HEADERS)
|
||||||
|
elif key == 'modifiers': return FileResponse(os.path.join(app.SD_UI_DIR, 'modifiers.json'), headers=NOCACHE_HEADERS)
|
||||||
|
elif key == 'ui_plugins': return JSONResponse(app.getUIPlugins(), headers=NOCACHE_HEADERS)
|
||||||
|
else:
|
||||||
|
raise HTTPException(status_code=404, detail=f'Request for unknown {key}') # HTTP404 Not Found
|
||||||
|
|
||||||
|
def ping_internal(session_id:str=None):
|
||||||
|
if task_manager.is_alive() <= 0: # Check that render threads are alive.
|
||||||
|
if task_manager.current_state_error: raise HTTPException(status_code=500, detail=str(task_manager.current_state_error))
|
||||||
|
raise HTTPException(status_code=500, detail='Render thread is dead.')
|
||||||
|
if task_manager.current_state_error and not isinstance(task_manager.current_state_error, StopAsyncIteration): raise HTTPException(status_code=500, detail=str(task_manager.current_state_error))
|
||||||
|
# Alive
|
||||||
|
response = {'status': str(task_manager.current_state)}
|
||||||
|
if session_id:
|
||||||
|
session = task_manager.get_cached_session(session_id, update_ttl=True)
|
||||||
|
response['tasks'] = {id(t): t.status for t in session.tasks}
|
||||||
|
response['devices'] = task_manager.get_devices()
|
||||||
|
return JSONResponse(response, headers=NOCACHE_HEADERS)
|
||||||
|
|
||||||
|
def render_internal(req: dict):
|
||||||
|
try:
|
||||||
|
# separate out the request data into rendering and task-specific data
|
||||||
|
render_req: GenerateImageRequest = GenerateImageRequest.parse_obj(req)
|
||||||
|
task_data: TaskData = TaskData.parse_obj(req)
|
||||||
|
|
||||||
|
render_req.init_image_mask = req.get('mask') # hack: will rename this in the HTTP API in a future revision
|
||||||
|
|
||||||
|
app.save_to_config(task_data.use_stable_diffusion_model, task_data.use_vae_model, task_data.use_hypernetwork_model, task_data.vram_usage_level)
|
||||||
|
|
||||||
|
# enqueue the task
|
||||||
|
new_task = task_manager.render(render_req, task_data)
|
||||||
|
response = {
|
||||||
|
'status': str(task_manager.current_state),
|
||||||
|
'queue': len(task_manager.tasks_queue),
|
||||||
|
'stream': f'/image/stream/{id(new_task)}',
|
||||||
|
'task': id(new_task)
|
||||||
|
}
|
||||||
|
return JSONResponse(response, headers=NOCACHE_HEADERS)
|
||||||
|
except ChildProcessError as e: # Render thread is dead
|
||||||
|
raise HTTPException(status_code=500, detail=f'Rendering thread has died.') # HTTP500 Internal Server Error
|
||||||
|
except ConnectionRefusedError as e: # Unstarted task pending limit reached, deny queueing too many.
|
||||||
|
raise HTTPException(status_code=503, detail=str(e)) # HTTP503 Service Unavailable
|
||||||
|
except Exception as e:
|
||||||
|
log.error(traceback.format_exc())
|
||||||
|
raise HTTPException(status_code=500, detail=str(e))
|
||||||
|
|
||||||
|
def stream_internal(task_id:int):
|
||||||
|
#TODO Move to WebSockets ??
|
||||||
|
task = task_manager.get_cached_task(task_id, update_ttl=True)
|
||||||
|
if not task: raise HTTPException(status_code=404, detail=f'Request {task_id} not found.') # HTTP404 NotFound
|
||||||
|
#if (id(task) != task_id): raise HTTPException(status_code=409, detail=f'Wrong task id received. Expected:{id(task)}, Received:{task_id}') # HTTP409 Conflict
|
||||||
|
if task.buffer_queue.empty() and not task.lock.locked():
|
||||||
|
if task.response:
|
||||||
|
#log.info(f'Session {session_id} sending cached response')
|
||||||
|
return JSONResponse(task.response, headers=NOCACHE_HEADERS)
|
||||||
|
raise HTTPException(status_code=425, detail='Too Early, task not started yet.') # HTTP425 Too Early
|
||||||
|
#log.info(f'Session {session_id} opened live render stream {id(task.buffer_queue)}')
|
||||||
|
return StreamingResponse(task.read_buffer_generator(), media_type='application/json')
|
||||||
|
|
||||||
|
def stop_internal(task: int):
|
||||||
|
if not task:
|
||||||
|
if task_manager.current_state == task_manager.ServerStates.Online or task_manager.current_state == task_manager.ServerStates.Unavailable:
|
||||||
|
raise HTTPException(status_code=409, detail='Not currently running any tasks.') # HTTP409 Conflict
|
||||||
|
task_manager.current_state_error = StopAsyncIteration('')
|
||||||
|
return {'OK'}
|
||||||
|
task_id = task
|
||||||
|
task = task_manager.get_cached_task(task_id, update_ttl=False)
|
||||||
|
if not task: raise HTTPException(status_code=404, detail=f'Task {task_id} was not found.') # HTTP404 Not Found
|
||||||
|
if isinstance(task.error, StopAsyncIteration): raise HTTPException(status_code=409, detail=f'Task {task_id} is already stopped.') # HTTP409 Conflict
|
||||||
|
task.error = StopAsyncIteration(f'Task {task_id} stop requested.')
|
||||||
|
return {'OK'}
|
||||||
|
|
||||||
|
def get_image_internal(task_id: int, img_id: int):
|
||||||
|
task = task_manager.get_cached_task(task_id, update_ttl=True)
|
||||||
|
if not task: raise HTTPException(status_code=410, detail=f'Task {task_id} could not be found.') # HTTP404 NotFound
|
||||||
|
if not task.temp_images[img_id]: raise HTTPException(status_code=425, detail='Too Early, task data is not available yet.') # HTTP425 Too Early
|
||||||
|
try:
|
||||||
|
img_data = task.temp_images[img_id]
|
||||||
|
img_data.seek(0)
|
||||||
|
return StreamingResponse(img_data, media_type='image/jpeg')
|
||||||
|
except KeyError as e:
|
||||||
|
raise HTTPException(status_code=500, detail=str(e))
|
@ -11,12 +11,13 @@ TASK_TTL = 15 * 60 # seconds, Discard last session's task timeout
|
|||||||
|
|
||||||
import torch
|
import torch
|
||||||
import queue, threading, time, weakref
|
import queue, threading, time, weakref
|
||||||
from typing import Any, Generator, Hashable, Optional, Union
|
from typing import Any, Hashable
|
||||||
|
|
||||||
from pydantic import BaseModel
|
from easydiffusion import device_manager
|
||||||
from sd_internal import Request, Response, runtime, device_manager
|
from easydiffusion.types import TaskData, GenerateImageRequest
|
||||||
|
from easydiffusion.utils import log
|
||||||
|
|
||||||
THREAD_NAME_PREFIX = 'Runtime-Render/'
|
THREAD_NAME_PREFIX = ''
|
||||||
ERR_LOCK_FAILED = ' failed to acquire lock within timeout.'
|
ERR_LOCK_FAILED = ' failed to acquire lock within timeout.'
|
||||||
LOCK_TIMEOUT = 15 # Maximum locking time in seconds before failing a task.
|
LOCK_TIMEOUT = 15 # Maximum locking time in seconds before failing a task.
|
||||||
# It's better to get an exception than a deadlock... ALWAYS use timeout in critical paths.
|
# It's better to get an exception than a deadlock... ALWAYS use timeout in critical paths.
|
||||||
@ -36,11 +37,13 @@ class ServerStates:
|
|||||||
class Unavailable(Symbol): pass
|
class Unavailable(Symbol): pass
|
||||||
|
|
||||||
class RenderTask(): # Task with output queue and completion lock.
|
class RenderTask(): # Task with output queue and completion lock.
|
||||||
def __init__(self, req: Request):
|
def __init__(self, req: GenerateImageRequest, task_data: TaskData):
|
||||||
self.request: Request = req # Initial Request
|
task_data.request_id = id(self)
|
||||||
|
self.render_request: GenerateImageRequest = req # Initial Request
|
||||||
|
self.task_data: TaskData = task_data
|
||||||
self.response: Any = None # Copy of the last reponse
|
self.response: Any = None # Copy of the last reponse
|
||||||
self.render_device = None # Select the task affinity. (Not used to change active devices).
|
self.render_device = None # Select the task affinity. (Not used to change active devices).
|
||||||
self.temp_images:list = [None] * req.num_outputs * (1 if req.show_only_filtered_image else 2)
|
self.temp_images:list = [None] * req.num_outputs * (1 if task_data.show_only_filtered_image else 2)
|
||||||
self.error: Exception = None
|
self.error: Exception = None
|
||||||
self.lock: threading.Lock = threading.Lock() # Locks at task start and unlocks when task is completed
|
self.lock: threading.Lock = threading.Lock() # Locks at task start and unlocks when task is completed
|
||||||
self.buffer_queue: queue.Queue = queue.Queue() # Queue of JSON string segments
|
self.buffer_queue: queue.Queue = queue.Queue() # Queue of JSON string segments
|
||||||
@ -51,55 +54,25 @@ class RenderTask(): # Task with output queue and completion lock.
|
|||||||
self.buffer_queue.task_done()
|
self.buffer_queue.task_done()
|
||||||
yield res
|
yield res
|
||||||
except queue.Empty as e: yield
|
except queue.Empty as e: yield
|
||||||
|
@property
|
||||||
# defaults from https://huggingface.co/blog/stable_diffusion
|
def status(self):
|
||||||
class ImageRequest(BaseModel):
|
if self.lock.locked():
|
||||||
session_id: str = "session"
|
return 'running'
|
||||||
prompt: str = ""
|
if isinstance(self.error, StopAsyncIteration):
|
||||||
negative_prompt: str = ""
|
return 'stopped'
|
||||||
init_image: str = None # base64
|
if self.error:
|
||||||
mask: str = None # base64
|
return 'error'
|
||||||
num_outputs: int = 1
|
if not self.buffer_queue.empty():
|
||||||
num_inference_steps: int = 50
|
return 'buffer'
|
||||||
guidance_scale: float = 7.5
|
if self.response:
|
||||||
width: int = 512
|
return 'completed'
|
||||||
height: int = 512
|
return 'pending'
|
||||||
seed: int = 42
|
@property
|
||||||
prompt_strength: float = 0.8
|
def is_pending(self):
|
||||||
sampler: str = None # "ddim", "plms", "heun", "euler", "euler_a", "dpm2", "dpm2_a", "lms"
|
return bool(not self.response and not self.error)
|
||||||
# allow_nsfw: bool = False
|
|
||||||
save_to_disk_path: str = None
|
|
||||||
turbo: bool = True
|
|
||||||
use_cpu: bool = False ##TODO Remove after UI and plugins transition.
|
|
||||||
render_device: str = None # Select the task affinity. (Not used to change active devices).
|
|
||||||
use_full_precision: bool = False
|
|
||||||
use_face_correction: str = None # or "GFPGANv1.3"
|
|
||||||
use_upscale: str = None # or "RealESRGAN_x4plus" or "RealESRGAN_x4plus_anime_6B"
|
|
||||||
use_stable_diffusion_model: str = "sd-v1-4"
|
|
||||||
use_vae_model: str = None
|
|
||||||
show_only_filtered_image: bool = False
|
|
||||||
output_format: str = "jpeg" # or "png"
|
|
||||||
output_quality: int = 75
|
|
||||||
|
|
||||||
stream_progress_updates: bool = False
|
|
||||||
stream_image_progress: bool = False
|
|
||||||
|
|
||||||
class FilterRequest(BaseModel):
|
|
||||||
session_id: str = "session"
|
|
||||||
model: str = None
|
|
||||||
name: str = ""
|
|
||||||
init_image: str = None # base64
|
|
||||||
width: int = 512
|
|
||||||
height: int = 512
|
|
||||||
save_to_disk_path: str = None
|
|
||||||
turbo: bool = True
|
|
||||||
render_device: str = None
|
|
||||||
use_full_precision: bool = False
|
|
||||||
output_format: str = "jpeg" # or "png"
|
|
||||||
output_quality: int = 75
|
|
||||||
|
|
||||||
# Temporary cache to allow to query tasks results for a short time after they are completed.
|
# Temporary cache to allow to query tasks results for a short time after they are completed.
|
||||||
class TaskCache():
|
class DataCache():
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self._base = dict()
|
self._base = dict()
|
||||||
self._lock: threading.Lock = threading.Lock()
|
self._lock: threading.Lock = threading.Lock()
|
||||||
@ -108,7 +81,7 @@ class TaskCache():
|
|||||||
def _is_expired(self, timestamp: int) -> bool:
|
def _is_expired(self, timestamp: int) -> bool:
|
||||||
return int(time.time()) >= timestamp
|
return int(time.time()) >= timestamp
|
||||||
def clean(self) -> None:
|
def clean(self) -> None:
|
||||||
if not self._lock.acquire(blocking=True, timeout=LOCK_TIMEOUT): raise Exception('TaskCache.clean' + ERR_LOCK_FAILED)
|
if not self._lock.acquire(blocking=True, timeout=LOCK_TIMEOUT): raise Exception('DataCache.clean' + ERR_LOCK_FAILED)
|
||||||
try:
|
try:
|
||||||
# Create a list of expired keys to delete
|
# Create a list of expired keys to delete
|
||||||
to_delete = []
|
to_delete = []
|
||||||
@ -118,16 +91,22 @@ class TaskCache():
|
|||||||
to_delete.append(key)
|
to_delete.append(key)
|
||||||
# Remove Items
|
# Remove Items
|
||||||
for key in to_delete:
|
for key in to_delete:
|
||||||
|
(_, val) = self._base[key]
|
||||||
|
if isinstance(val, RenderTask):
|
||||||
|
log.debug(f'RenderTask {key} expired. Data removed.')
|
||||||
|
elif isinstance(val, SessionState):
|
||||||
|
log.debug(f'Session {key} expired. Data removed.')
|
||||||
|
else:
|
||||||
|
log.debug(f'Key {key} expired. Data removed.')
|
||||||
del self._base[key]
|
del self._base[key]
|
||||||
print(f'Session {key} expired. Data removed.')
|
|
||||||
finally:
|
finally:
|
||||||
self._lock.release()
|
self._lock.release()
|
||||||
def clear(self) -> None:
|
def clear(self) -> None:
|
||||||
if not self._lock.acquire(blocking=True, timeout=LOCK_TIMEOUT): raise Exception('TaskCache.clear' + ERR_LOCK_FAILED)
|
if not self._lock.acquire(blocking=True, timeout=LOCK_TIMEOUT): raise Exception('DataCache.clear' + ERR_LOCK_FAILED)
|
||||||
try: self._base.clear()
|
try: self._base.clear()
|
||||||
finally: self._lock.release()
|
finally: self._lock.release()
|
||||||
def delete(self, key: Hashable) -> bool:
|
def delete(self, key: Hashable) -> bool:
|
||||||
if not self._lock.acquire(blocking=True, timeout=LOCK_TIMEOUT): raise Exception('TaskCache.delete' + ERR_LOCK_FAILED)
|
if not self._lock.acquire(blocking=True, timeout=LOCK_TIMEOUT): raise Exception('DataCache.delete' + ERR_LOCK_FAILED)
|
||||||
try:
|
try:
|
||||||
if key not in self._base:
|
if key not in self._base:
|
||||||
return False
|
return False
|
||||||
@ -136,7 +115,7 @@ class TaskCache():
|
|||||||
finally:
|
finally:
|
||||||
self._lock.release()
|
self._lock.release()
|
||||||
def keep(self, key: Hashable, ttl: int) -> bool:
|
def keep(self, key: Hashable, ttl: int) -> bool:
|
||||||
if not self._lock.acquire(blocking=True, timeout=LOCK_TIMEOUT): raise Exception('TaskCache.keep' + ERR_LOCK_FAILED)
|
if not self._lock.acquire(blocking=True, timeout=LOCK_TIMEOUT): raise Exception('DataCache.keep' + ERR_LOCK_FAILED)
|
||||||
try:
|
try:
|
||||||
if key in self._base:
|
if key in self._base:
|
||||||
_, value = self._base.get(key)
|
_, value = self._base.get(key)
|
||||||
@ -146,25 +125,24 @@ class TaskCache():
|
|||||||
finally:
|
finally:
|
||||||
self._lock.release()
|
self._lock.release()
|
||||||
def put(self, key: Hashable, value: Any, ttl: int) -> bool:
|
def put(self, key: Hashable, value: Any, ttl: int) -> bool:
|
||||||
if not self._lock.acquire(blocking=True, timeout=LOCK_TIMEOUT): raise Exception('TaskCache.put' + ERR_LOCK_FAILED)
|
if not self._lock.acquire(blocking=True, timeout=LOCK_TIMEOUT): raise Exception('DataCache.put' + ERR_LOCK_FAILED)
|
||||||
try:
|
try:
|
||||||
self._base[key] = (
|
self._base[key] = (
|
||||||
self._get_ttl_time(ttl), value
|
self._get_ttl_time(ttl), value
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(str(e))
|
log.error(traceback.format_exc())
|
||||||
print(traceback.format_exc())
|
|
||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
return True
|
return True
|
||||||
finally:
|
finally:
|
||||||
self._lock.release()
|
self._lock.release()
|
||||||
def tryGet(self, key: Hashable) -> Any:
|
def tryGet(self, key: Hashable) -> Any:
|
||||||
if not self._lock.acquire(blocking=True, timeout=LOCK_TIMEOUT): raise Exception('TaskCache.tryGet' + ERR_LOCK_FAILED)
|
if not self._lock.acquire(blocking=True, timeout=LOCK_TIMEOUT): raise Exception('DataCache.tryGet' + ERR_LOCK_FAILED)
|
||||||
try:
|
try:
|
||||||
ttl, value = self._base.get(key, (None, None))
|
ttl, value = self._base.get(key, (None, None))
|
||||||
if ttl is not None and self._is_expired(ttl):
|
if ttl is not None and self._is_expired(ttl):
|
||||||
print(f'Session {key} expired. Discarding data.')
|
log.debug(f'Session {key} expired. Discarding data.')
|
||||||
del self._base[key]
|
del self._base[key]
|
||||||
return None
|
return None
|
||||||
return value
|
return value
|
||||||
@ -175,43 +153,40 @@ manager_lock = threading.RLock()
|
|||||||
render_threads = []
|
render_threads = []
|
||||||
current_state = ServerStates.Init
|
current_state = ServerStates.Init
|
||||||
current_state_error:Exception = None
|
current_state_error:Exception = None
|
||||||
current_model_path = None
|
|
||||||
current_vae_path = None
|
|
||||||
tasks_queue = []
|
tasks_queue = []
|
||||||
task_cache = TaskCache()
|
session_cache = DataCache()
|
||||||
default_model_to_load = None
|
task_cache = DataCache()
|
||||||
default_vae_to_load = None
|
|
||||||
weak_thread_data = weakref.WeakKeyDictionary()
|
weak_thread_data = weakref.WeakKeyDictionary()
|
||||||
|
idle_event: threading.Event = threading.Event()
|
||||||
|
|
||||||
def preload_model(ckpt_file_path=None, vae_file_path=None):
|
class SessionState():
|
||||||
global current_state, current_state_error, current_model_path, current_vae_path
|
def __init__(self, id: str):
|
||||||
if ckpt_file_path == None:
|
self._id = id
|
||||||
ckpt_file_path = default_model_to_load
|
self._tasks_ids = []
|
||||||
if vae_file_path == None:
|
@property
|
||||||
vae_file_path = default_vae_to_load
|
def id(self):
|
||||||
if ckpt_file_path == current_model_path and vae_file_path == current_vae_path:
|
return self._id
|
||||||
return
|
@property
|
||||||
current_state = ServerStates.LoadingModel
|
def tasks(self):
|
||||||
try:
|
tasks = []
|
||||||
from . import runtime
|
for task_id in self._tasks_ids:
|
||||||
runtime.thread_data.ckpt_file = ckpt_file_path
|
task = task_cache.tryGet(task_id)
|
||||||
runtime.thread_data.vae_file = vae_file_path
|
if task:
|
||||||
runtime.load_model_ckpt()
|
tasks.append(task)
|
||||||
current_model_path = ckpt_file_path
|
return tasks
|
||||||
current_vae_path = vae_file_path
|
def put(self, task, ttl=TASK_TTL):
|
||||||
current_state_error = None
|
task_id = id(task)
|
||||||
current_state = ServerStates.Online
|
self._tasks_ids.append(task_id)
|
||||||
except Exception as e:
|
if not task_cache.put(task_id, task, ttl):
|
||||||
current_model_path = None
|
return False
|
||||||
current_vae_path = None
|
while len(self._tasks_ids) > len(render_threads) * 2:
|
||||||
current_state_error = e
|
self._tasks_ids.pop(0)
|
||||||
current_state = ServerStates.Unavailable
|
return True
|
||||||
print(traceback.format_exc())
|
|
||||||
|
|
||||||
def thread_get_next_task():
|
def thread_get_next_task():
|
||||||
from . import runtime
|
from easydiffusion import renderer
|
||||||
if not manager_lock.acquire(blocking=True, timeout=LOCK_TIMEOUT):
|
if not manager_lock.acquire(blocking=True, timeout=LOCK_TIMEOUT):
|
||||||
print('Render thread on device', runtime.thread_data.device, 'failed to acquire manager lock.')
|
log.warn(f'Render thread on device: {renderer.context.device} failed to acquire manager lock.')
|
||||||
return None
|
return None
|
||||||
if len(tasks_queue) <= 0:
|
if len(tasks_queue) <= 0:
|
||||||
manager_lock.release()
|
manager_lock.release()
|
||||||
@ -219,7 +194,7 @@ def thread_get_next_task():
|
|||||||
task = None
|
task = None
|
||||||
try: # Select a render task.
|
try: # Select a render task.
|
||||||
for queued_task in tasks_queue:
|
for queued_task in tasks_queue:
|
||||||
if queued_task.render_device and runtime.thread_data.device != queued_task.render_device:
|
if queued_task.render_device and renderer.context.device != queued_task.render_device:
|
||||||
# Is asking for a specific render device.
|
# Is asking for a specific render device.
|
||||||
if is_alive(queued_task.render_device) > 0:
|
if is_alive(queued_task.render_device) > 0:
|
||||||
continue # requested device alive, skip current one.
|
continue # requested device alive, skip current one.
|
||||||
@ -228,7 +203,7 @@ def thread_get_next_task():
|
|||||||
queued_task.error = Exception(queued_task.render_device + ' is not currently active.')
|
queued_task.error = Exception(queued_task.render_device + ' is not currently active.')
|
||||||
task = queued_task
|
task = queued_task
|
||||||
break
|
break
|
||||||
if not queued_task.render_device and runtime.thread_data.device == 'cpu' and is_alive() > 1:
|
if not queued_task.render_device and renderer.context.device == 'cpu' and is_alive() > 1:
|
||||||
# not asking for any specific devices, cpu want to grab task but other render devices are alive.
|
# not asking for any specific devices, cpu want to grab task but other render devices are alive.
|
||||||
continue # Skip Tasks, don't run on CPU unless there is nothing else or user asked for it.
|
continue # Skip Tasks, don't run on CPU unless there is nothing else or user asked for it.
|
||||||
task = queued_task
|
task = queued_task
|
||||||
@ -240,40 +215,47 @@ def thread_get_next_task():
|
|||||||
manager_lock.release()
|
manager_lock.release()
|
||||||
|
|
||||||
def thread_render(device):
|
def thread_render(device):
|
||||||
global current_state, current_state_error, current_model_path, current_vae_path
|
global current_state, current_state_error
|
||||||
from . import runtime
|
|
||||||
|
from easydiffusion import renderer, model_manager
|
||||||
try:
|
try:
|
||||||
runtime.thread_init(device)
|
renderer.init(device)
|
||||||
except Exception as e:
|
|
||||||
print(traceback.format_exc())
|
|
||||||
weak_thread_data[threading.current_thread()] = {
|
weak_thread_data[threading.current_thread()] = {
|
||||||
'error': e
|
'device': renderer.context.device,
|
||||||
|
'device_name': renderer.context.device_name,
|
||||||
|
'alive': True
|
||||||
|
}
|
||||||
|
|
||||||
|
current_state = ServerStates.LoadingModel
|
||||||
|
model_manager.load_default_models(renderer.context)
|
||||||
|
|
||||||
|
current_state = ServerStates.Online
|
||||||
|
except Exception as e:
|
||||||
|
log.error(traceback.format_exc())
|
||||||
|
weak_thread_data[threading.current_thread()] = {
|
||||||
|
'error': e,
|
||||||
|
'alive': False
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
weak_thread_data[threading.current_thread()] = {
|
|
||||||
'device': runtime.thread_data.device,
|
|
||||||
'device_name': runtime.thread_data.device_name,
|
|
||||||
'alive': True
|
|
||||||
}
|
|
||||||
if runtime.thread_data.device != 'cpu' or is_alive() == 1:
|
|
||||||
preload_model()
|
|
||||||
current_state = ServerStates.Online
|
|
||||||
while True:
|
while True:
|
||||||
|
session_cache.clean()
|
||||||
task_cache.clean()
|
task_cache.clean()
|
||||||
if not weak_thread_data[threading.current_thread()]['alive']:
|
if not weak_thread_data[threading.current_thread()]['alive']:
|
||||||
print(f'Shutting down thread for device {runtime.thread_data.device}')
|
log.info(f'Shutting down thread for device {renderer.context.device}')
|
||||||
runtime.unload_models()
|
model_manager.unload_all(renderer.context)
|
||||||
runtime.unload_filters()
|
|
||||||
return
|
return
|
||||||
if isinstance(current_state_error, SystemExit):
|
if isinstance(current_state_error, SystemExit):
|
||||||
current_state = ServerStates.Unavailable
|
current_state = ServerStates.Unavailable
|
||||||
return
|
return
|
||||||
task = thread_get_next_task()
|
task = thread_get_next_task()
|
||||||
if task is None:
|
if task is None:
|
||||||
time.sleep(0.05)
|
idle_event.clear()
|
||||||
|
idle_event.wait(timeout=1)
|
||||||
continue
|
continue
|
||||||
if task.error is not None:
|
if task.error is not None:
|
||||||
print(task.error)
|
log.error(task.error)
|
||||||
task.response = {"status": 'failed', "detail": str(task.error)}
|
task.response = {"status": 'failed', "detail": str(task.error)}
|
||||||
task.buffer_queue.put(json.dumps(task.response))
|
task.buffer_queue.put(json.dumps(task.response))
|
||||||
continue
|
continue
|
||||||
@ -282,51 +264,62 @@ def thread_render(device):
|
|||||||
task.response = {"status": 'failed', "detail": str(task.error)}
|
task.response = {"status": 'failed', "detail": str(task.error)}
|
||||||
task.buffer_queue.put(json.dumps(task.response))
|
task.buffer_queue.put(json.dumps(task.response))
|
||||||
continue
|
continue
|
||||||
print(f'Session {task.request.session_id} starting task {id(task)} on {runtime.thread_data.device_name}')
|
log.info(f'Session {task.task_data.session_id} starting task {id(task)} on {renderer.context.device_name}')
|
||||||
if not task.lock.acquire(blocking=False): raise Exception('Got locked task from queue.')
|
if not task.lock.acquire(blocking=False): raise Exception('Got locked task from queue.')
|
||||||
try:
|
try:
|
||||||
if runtime.is_model_reload_necessary(task.request):
|
|
||||||
current_state = ServerStates.LoadingModel
|
|
||||||
runtime.reload_model()
|
|
||||||
current_model_path = task.request.use_stable_diffusion_model
|
|
||||||
current_vae_path = task.request.use_vae_model
|
|
||||||
|
|
||||||
def step_callback():
|
def step_callback():
|
||||||
global current_state_error
|
global current_state_error
|
||||||
|
|
||||||
if isinstance(current_state_error, SystemExit) or isinstance(current_state_error, StopAsyncIteration) or isinstance(task.error, StopAsyncIteration):
|
if isinstance(current_state_error, SystemExit) or isinstance(current_state_error, StopAsyncIteration) or isinstance(task.error, StopAsyncIteration):
|
||||||
runtime.thread_data.stop_processing = True
|
renderer.context.stop_processing = True
|
||||||
if isinstance(current_state_error, StopAsyncIteration):
|
if isinstance(current_state_error, StopAsyncIteration):
|
||||||
task.error = current_state_error
|
task.error = current_state_error
|
||||||
current_state_error = None
|
current_state_error = None
|
||||||
print(f'Session {task.request.session_id} sent cancel signal for task {id(task)}')
|
log.info(f'Session {task.task_data.session_id} sent cancel signal for task {id(task)}')
|
||||||
|
|
||||||
task_cache.keep(task.request.session_id, TASK_TTL)
|
current_state = ServerStates.LoadingModel
|
||||||
|
model_manager.resolve_model_paths(task.task_data)
|
||||||
|
model_manager.reload_models_if_necessary(renderer.context, task.task_data)
|
||||||
|
|
||||||
current_state = ServerStates.Rendering
|
current_state = ServerStates.Rendering
|
||||||
task.response = runtime.mk_img(task.request, task.buffer_queue, task.temp_images, step_callback)
|
task.response = renderer.make_images(task.render_request, task.task_data, task.buffer_queue, task.temp_images, step_callback)
|
||||||
|
# Before looping back to the generator, mark cache as still alive.
|
||||||
|
task_cache.keep(id(task), TASK_TTL)
|
||||||
|
session_cache.keep(task.task_data.session_id, TASK_TTL)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
task.error = e
|
task.error = e
|
||||||
print(traceback.format_exc())
|
task.response = {"status": 'failed', "detail": str(task.error)}
|
||||||
|
task.buffer_queue.put(json.dumps(task.response))
|
||||||
|
log.error(traceback.format_exc())
|
||||||
continue
|
continue
|
||||||
finally:
|
finally:
|
||||||
# Task completed
|
# Task completed
|
||||||
task.lock.release()
|
task.lock.release()
|
||||||
task_cache.keep(task.request.session_id, TASK_TTL)
|
task_cache.keep(id(task), TASK_TTL)
|
||||||
|
session_cache.keep(task.task_data.session_id, TASK_TTL)
|
||||||
if isinstance(task.error, StopAsyncIteration):
|
if isinstance(task.error, StopAsyncIteration):
|
||||||
print(f'Session {task.request.session_id} task {id(task)} cancelled!')
|
log.info(f'Session {task.task_data.session_id} task {id(task)} cancelled!')
|
||||||
elif task.error is not None:
|
elif task.error is not None:
|
||||||
print(f'Session {task.request.session_id} task {id(task)} failed!')
|
log.info(f'Session {task.task_data.session_id} task {id(task)} failed!')
|
||||||
else:
|
else:
|
||||||
print(f'Session {task.request.session_id} task {id(task)} completed by {runtime.thread_data.device_name}.')
|
log.info(f'Session {task.task_data.session_id} task {id(task)} completed by {renderer.context.device_name}.')
|
||||||
current_state = ServerStates.Online
|
current_state = ServerStates.Online
|
||||||
|
|
||||||
def get_cached_task(session_id:str, update_ttl:bool=False):
|
def get_cached_task(task_id:str, update_ttl:bool=False):
|
||||||
# By calling keep before tryGet, wont discard if was expired.
|
# By calling keep before tryGet, wont discard if was expired.
|
||||||
if update_ttl and not task_cache.keep(session_id, TASK_TTL):
|
if update_ttl and not task_cache.keep(task_id, TASK_TTL):
|
||||||
# Failed to keep task, already gone.
|
# Failed to keep task, already gone.
|
||||||
return None
|
return None
|
||||||
return task_cache.tryGet(session_id)
|
return task_cache.tryGet(task_id)
|
||||||
|
|
||||||
|
def get_cached_session(session_id:str, update_ttl:bool=False):
|
||||||
|
if update_ttl:
|
||||||
|
session_cache.keep(session_id, TASK_TTL)
|
||||||
|
session = session_cache.tryGet(session_id)
|
||||||
|
if not session:
|
||||||
|
session = SessionState(session_id)
|
||||||
|
session_cache.put(session_id, session, TASK_TTL)
|
||||||
|
return session
|
||||||
|
|
||||||
def get_devices():
|
def get_devices():
|
||||||
devices = {
|
devices = {
|
||||||
@ -346,6 +339,7 @@ def get_devices():
|
|||||||
'name': torch.cuda.get_device_name(device),
|
'name': torch.cuda.get_device_name(device),
|
||||||
'mem_free': mem_free,
|
'mem_free': mem_free,
|
||||||
'mem_total': mem_total,
|
'mem_total': mem_total,
|
||||||
|
'max_vram_usage_level': device_manager.get_max_vram_usage_level(device),
|
||||||
}
|
}
|
||||||
|
|
||||||
# list the compatible devices
|
# list the compatible devices
|
||||||
@ -395,7 +389,7 @@ def is_alive(device=None):
|
|||||||
|
|
||||||
def start_render_thread(device):
|
def start_render_thread(device):
|
||||||
if not manager_lock.acquire(blocking=True, timeout=LOCK_TIMEOUT): raise Exception('start_render_thread' + ERR_LOCK_FAILED)
|
if not manager_lock.acquire(blocking=True, timeout=LOCK_TIMEOUT): raise Exception('start_render_thread' + ERR_LOCK_FAILED)
|
||||||
print('Start new Rendering Thread on device', device)
|
log.info(f'Start new Rendering Thread on device: {device}')
|
||||||
try:
|
try:
|
||||||
rthread = threading.Thread(target=thread_render, kwargs={'device': device})
|
rthread = threading.Thread(target=thread_render, kwargs={'device': device})
|
||||||
rthread.daemon = True
|
rthread.daemon = True
|
||||||
@ -407,7 +401,7 @@ def start_render_thread(device):
|
|||||||
timeout = DEVICE_START_TIMEOUT
|
timeout = DEVICE_START_TIMEOUT
|
||||||
while not rthread.is_alive() or not rthread in weak_thread_data or not 'device' in weak_thread_data[rthread]:
|
while not rthread.is_alive() or not rthread in weak_thread_data or not 'device' in weak_thread_data[rthread]:
|
||||||
if rthread in weak_thread_data and 'error' in weak_thread_data[rthread]:
|
if rthread in weak_thread_data and 'error' in weak_thread_data[rthread]:
|
||||||
print(rthread, device, 'error:', weak_thread_data[rthread]['error'])
|
log.error(f"{rthread}, {device}, error: {weak_thread_data[rthread]['error']}")
|
||||||
return False
|
return False
|
||||||
if timeout <= 0:
|
if timeout <= 0:
|
||||||
return False
|
return False
|
||||||
@ -419,11 +413,11 @@ def stop_render_thread(device):
|
|||||||
try:
|
try:
|
||||||
device_manager.validate_device_id(device, log_prefix='stop_render_thread')
|
device_manager.validate_device_id(device, log_prefix='stop_render_thread')
|
||||||
except:
|
except:
|
||||||
print(traceback.format_exc())
|
log.error(traceback.format_exc())
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if not manager_lock.acquire(blocking=True, timeout=LOCK_TIMEOUT): raise Exception('stop_render_thread' + ERR_LOCK_FAILED)
|
if not manager_lock.acquire(blocking=True, timeout=LOCK_TIMEOUT): raise Exception('stop_render_thread' + ERR_LOCK_FAILED)
|
||||||
print('Stopping Rendering Thread on device', device)
|
log.info(f'Stopping Rendering Thread on device: {device}')
|
||||||
|
|
||||||
try:
|
try:
|
||||||
thread_to_remove = None
|
thread_to_remove = None
|
||||||
@ -446,82 +440,51 @@ def stop_render_thread(device):
|
|||||||
|
|
||||||
def update_render_threads(render_devices, active_devices):
|
def update_render_threads(render_devices, active_devices):
|
||||||
devices_to_start, devices_to_stop = device_manager.get_device_delta(render_devices, active_devices)
|
devices_to_start, devices_to_stop = device_manager.get_device_delta(render_devices, active_devices)
|
||||||
print('devices_to_start', devices_to_start)
|
log.debug(f'devices_to_start: {devices_to_start}')
|
||||||
print('devices_to_stop', devices_to_stop)
|
log.debug(f'devices_to_stop: {devices_to_stop}')
|
||||||
|
|
||||||
for device in devices_to_stop:
|
for device in devices_to_stop:
|
||||||
if is_alive(device) <= 0:
|
if is_alive(device) <= 0:
|
||||||
print(device, 'is not alive')
|
log.debug(f'{device} is not alive')
|
||||||
continue
|
continue
|
||||||
if not stop_render_thread(device):
|
if not stop_render_thread(device):
|
||||||
print(device, 'could not stop render thread')
|
log.warn(f'{device} could not stop render thread')
|
||||||
|
|
||||||
for device in devices_to_start:
|
for device in devices_to_start:
|
||||||
if is_alive(device) >= 1:
|
if is_alive(device) >= 1:
|
||||||
print(device, 'already registered.')
|
log.debug(f'{device} already registered.')
|
||||||
continue
|
continue
|
||||||
if not start_render_thread(device):
|
if not start_render_thread(device):
|
||||||
print(device, 'failed to start.')
|
log.warn(f'{device} failed to start.')
|
||||||
|
|
||||||
if is_alive() <= 0: # No running devices, probably invalid user config.
|
if is_alive() <= 0: # No running devices, probably invalid user config.
|
||||||
raise EnvironmentError('ERROR: No active render devices! Please verify the "render_devices" value in config.json')
|
raise EnvironmentError('ERROR: No active render devices! Please verify the "render_devices" value in config.json')
|
||||||
|
|
||||||
print('active devices', get_devices()['active'])
|
log.debug(f"active devices: {get_devices()['active']}")
|
||||||
|
|
||||||
def shutdown_event(): # Signal render thread to close on shutdown
|
def shutdown_event(): # Signal render thread to close on shutdown
|
||||||
global current_state_error
|
global current_state_error
|
||||||
current_state_error = SystemExit('Application shutting down.')
|
current_state_error = SystemExit('Application shutting down.')
|
||||||
|
|
||||||
def render(req : ImageRequest):
|
def render(render_req: GenerateImageRequest, task_data: TaskData):
|
||||||
if is_alive() <= 0: # Render thread is dead
|
current_thread_count = is_alive()
|
||||||
|
if current_thread_count <= 0: # Render thread is dead
|
||||||
raise ChildProcessError('Rendering thread has died.')
|
raise ChildProcessError('Rendering thread has died.')
|
||||||
|
|
||||||
# Alive, check if task in cache
|
# Alive, check if task in cache
|
||||||
task = task_cache.tryGet(req.session_id)
|
session = get_cached_session(task_data.session_id, update_ttl=True)
|
||||||
if task and not task.response and not task.error and not task.lock.locked():
|
pending_tasks = list(filter(lambda t: t.is_pending, session.tasks))
|
||||||
# Unstarted task pending, deny queueing more than one.
|
if current_thread_count < len(pending_tasks):
|
||||||
raise ConnectionRefusedError(f'Session {req.session_id} has an already pending task.')
|
raise ConnectionRefusedError(f'Session {task_data.session_id} already has {len(pending_tasks)} pending tasks out of {current_thread_count}.')
|
||||||
#
|
|
||||||
from . import runtime
|
|
||||||
r = Request()
|
|
||||||
r.session_id = req.session_id
|
|
||||||
r.prompt = req.prompt
|
|
||||||
r.negative_prompt = req.negative_prompt
|
|
||||||
r.init_image = req.init_image
|
|
||||||
r.mask = req.mask
|
|
||||||
r.num_outputs = req.num_outputs
|
|
||||||
r.num_inference_steps = req.num_inference_steps
|
|
||||||
r.guidance_scale = req.guidance_scale
|
|
||||||
r.width = req.width
|
|
||||||
r.height = req.height
|
|
||||||
r.seed = req.seed
|
|
||||||
r.prompt_strength = req.prompt_strength
|
|
||||||
r.sampler = req.sampler
|
|
||||||
# r.allow_nsfw = req.allow_nsfw
|
|
||||||
r.turbo = req.turbo
|
|
||||||
r.use_full_precision = req.use_full_precision
|
|
||||||
r.save_to_disk_path = req.save_to_disk_path
|
|
||||||
r.use_upscale: str = req.use_upscale
|
|
||||||
r.use_face_correction = req.use_face_correction
|
|
||||||
r.use_stable_diffusion_model = req.use_stable_diffusion_model
|
|
||||||
r.use_vae_model = req.use_vae_model
|
|
||||||
r.show_only_filtered_image = req.show_only_filtered_image
|
|
||||||
r.output_format = req.output_format
|
|
||||||
r.output_quality = req.output_quality
|
|
||||||
|
|
||||||
r.stream_progress_updates = True # the underlying implementation only supports streaming
|
new_task = RenderTask(render_req, task_data)
|
||||||
r.stream_image_progress = req.stream_image_progress
|
if session.put(new_task, TASK_TTL):
|
||||||
|
|
||||||
if not req.stream_progress_updates:
|
|
||||||
r.stream_image_progress = False
|
|
||||||
|
|
||||||
new_task = RenderTask(r)
|
|
||||||
|
|
||||||
if task_cache.put(r.session_id, new_task, TASK_TTL):
|
|
||||||
# Use twice the normal timeout for adding user requests.
|
# Use twice the normal timeout for adding user requests.
|
||||||
# Tries to force task_cache.put to fail before tasks_queue.put would.
|
# Tries to force session.put to fail before tasks_queue.put would.
|
||||||
if manager_lock.acquire(blocking=True, timeout=LOCK_TIMEOUT * 2):
|
if manager_lock.acquire(blocking=True, timeout=LOCK_TIMEOUT * 2):
|
||||||
try:
|
try:
|
||||||
tasks_queue.append(new_task)
|
tasks_queue.append(new_task)
|
||||||
|
idle_event.set()
|
||||||
return new_task
|
return new_task
|
||||||
finally:
|
finally:
|
||||||
manager_lock.release()
|
manager_lock.release()
|
87
ui/easydiffusion/types.py
Normal file
87
ui/easydiffusion/types.py
Normal file
@ -0,0 +1,87 @@
|
|||||||
|
from pydantic import BaseModel
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
class GenerateImageRequest(BaseModel):
|
||||||
|
prompt: str = ""
|
||||||
|
negative_prompt: str = ""
|
||||||
|
|
||||||
|
seed: int = 42
|
||||||
|
width: int = 512
|
||||||
|
height: int = 512
|
||||||
|
|
||||||
|
num_outputs: int = 1
|
||||||
|
num_inference_steps: int = 50
|
||||||
|
guidance_scale: float = 7.5
|
||||||
|
|
||||||
|
init_image: Any = None
|
||||||
|
init_image_mask: Any = None
|
||||||
|
prompt_strength: float = 0.8
|
||||||
|
preserve_init_image_color_profile = False
|
||||||
|
|
||||||
|
sampler_name: str = None # "ddim", "plms", "heun", "euler", "euler_a", "dpm2", "dpm2_a", "lms"
|
||||||
|
hypernetwork_strength: float = 0
|
||||||
|
|
||||||
|
class TaskData(BaseModel):
|
||||||
|
request_id: str = None
|
||||||
|
session_id: str = "session"
|
||||||
|
save_to_disk_path: str = None
|
||||||
|
vram_usage_level: str = "balanced" # or "low" or "medium"
|
||||||
|
|
||||||
|
use_face_correction: str = None # or "GFPGANv1.3"
|
||||||
|
use_upscale: str = None # or "RealESRGAN_x4plus" or "RealESRGAN_x4plus_anime_6B"
|
||||||
|
use_stable_diffusion_model: str = "sd-v1-4"
|
||||||
|
use_stable_diffusion_config: str = "v1-inference"
|
||||||
|
use_vae_model: str = None
|
||||||
|
use_hypernetwork_model: str = None
|
||||||
|
|
||||||
|
show_only_filtered_image: bool = False
|
||||||
|
output_format: str = "jpeg" # or "png"
|
||||||
|
output_quality: int = 75
|
||||||
|
metadata_output_format: str = "txt" # or "json"
|
||||||
|
stream_image_progress: bool = False
|
||||||
|
|
||||||
|
class Image:
|
||||||
|
data: str # base64
|
||||||
|
seed: int
|
||||||
|
is_nsfw: bool
|
||||||
|
path_abs: str = None
|
||||||
|
|
||||||
|
def __init__(self, data, seed):
|
||||||
|
self.data = data
|
||||||
|
self.seed = seed
|
||||||
|
|
||||||
|
def json(self):
|
||||||
|
return {
|
||||||
|
"data": self.data,
|
||||||
|
"seed": self.seed,
|
||||||
|
"path_abs": self.path_abs,
|
||||||
|
}
|
||||||
|
|
||||||
|
class Response:
|
||||||
|
render_request: GenerateImageRequest
|
||||||
|
task_data: TaskData
|
||||||
|
images: list
|
||||||
|
|
||||||
|
def __init__(self, render_request: GenerateImageRequest, task_data: TaskData, images: list):
|
||||||
|
self.render_request = render_request
|
||||||
|
self.task_data = task_data
|
||||||
|
self.images = images
|
||||||
|
|
||||||
|
def json(self):
|
||||||
|
del self.render_request.init_image
|
||||||
|
del self.render_request.init_image_mask
|
||||||
|
|
||||||
|
res = {
|
||||||
|
"status": 'succeeded',
|
||||||
|
"render_request": self.render_request.dict(),
|
||||||
|
"task_data": self.task_data.dict(),
|
||||||
|
"output": [],
|
||||||
|
}
|
||||||
|
|
||||||
|
for image in self.images:
|
||||||
|
res["output"].append(image.json())
|
||||||
|
|
||||||
|
return res
|
||||||
|
|
||||||
|
class UserInitiatedStop(Exception):
|
||||||
|
pass
|
8
ui/easydiffusion/utils/__init__.py
Normal file
8
ui/easydiffusion/utils/__init__.py
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
import logging
|
||||||
|
|
||||||
|
log = logging.getLogger('easydiffusion')
|
||||||
|
|
||||||
|
from .save_utils import (
|
||||||
|
save_images_to_disk,
|
||||||
|
get_printable_request,
|
||||||
|
)
|
79
ui/easydiffusion/utils/save_utils.py
Normal file
79
ui/easydiffusion/utils/save_utils.py
Normal file
@ -0,0 +1,79 @@
|
|||||||
|
import os
|
||||||
|
import time
|
||||||
|
import base64
|
||||||
|
import re
|
||||||
|
|
||||||
|
from easydiffusion.types import TaskData, GenerateImageRequest
|
||||||
|
|
||||||
|
from sdkit.utils import save_images, save_dicts
|
||||||
|
|
||||||
|
filename_regex = re.compile('[^a-zA-Z0-9]')
|
||||||
|
|
||||||
|
# keep in sync with `ui/media/js/dnd.js`
|
||||||
|
TASK_TEXT_MAPPING = {
|
||||||
|
'prompt': 'Prompt',
|
||||||
|
'width': 'Width',
|
||||||
|
'height': 'Height',
|
||||||
|
'seed': 'Seed',
|
||||||
|
'num_inference_steps': 'Steps',
|
||||||
|
'guidance_scale': 'Guidance Scale',
|
||||||
|
'prompt_strength': 'Prompt Strength',
|
||||||
|
'use_face_correction': 'Use Face Correction',
|
||||||
|
'use_upscale': 'Use Upscaling',
|
||||||
|
'sampler_name': 'Sampler',
|
||||||
|
'negative_prompt': 'Negative Prompt',
|
||||||
|
'use_stable_diffusion_model': 'Stable Diffusion model',
|
||||||
|
'use_hypernetwork_model': 'Hypernetwork model',
|
||||||
|
'hypernetwork_strength': 'Hypernetwork Strength'
|
||||||
|
}
|
||||||
|
|
||||||
|
def save_images_to_disk(images: list, filtered_images: list, req: GenerateImageRequest, task_data: TaskData):
|
||||||
|
save_dir_path = os.path.join(task_data.save_to_disk_path, filename_regex.sub('_', task_data.session_id))
|
||||||
|
metadata_entries = get_metadata_entries_for_request(req, task_data)
|
||||||
|
|
||||||
|
if task_data.show_only_filtered_image or filtered_images == images:
|
||||||
|
save_images(filtered_images, save_dir_path, file_name=make_filename_callback(req), output_format=task_data.output_format, output_quality=task_data.output_quality)
|
||||||
|
save_dicts(metadata_entries, save_dir_path, file_name=make_filename_callback(req), output_format=task_data.metadata_output_format)
|
||||||
|
else:
|
||||||
|
save_images(images, save_dir_path, file_name=make_filename_callback(req), output_format=task_data.output_format, output_quality=task_data.output_quality)
|
||||||
|
save_images(filtered_images, save_dir_path, file_name=make_filename_callback(req, suffix='filtered'), output_format=task_data.output_format, output_quality=task_data.output_quality)
|
||||||
|
save_dicts(metadata_entries, save_dir_path, file_name=make_filename_callback(req, suffix='filtered'), output_format=task_data.metadata_output_format)
|
||||||
|
|
||||||
|
def get_metadata_entries_for_request(req: GenerateImageRequest, task_data: TaskData):
|
||||||
|
metadata = get_printable_request(req)
|
||||||
|
metadata.update({
|
||||||
|
'use_stable_diffusion_model': task_data.use_stable_diffusion_model,
|
||||||
|
'use_vae_model': task_data.use_vae_model,
|
||||||
|
'use_hypernetwork_model': task_data.use_hypernetwork_model,
|
||||||
|
'use_face_correction': task_data.use_face_correction,
|
||||||
|
'use_upscale': task_data.use_upscale,
|
||||||
|
})
|
||||||
|
|
||||||
|
# if text, format it in the text format expected by the UI
|
||||||
|
is_txt_format = (task_data.metadata_output_format.lower() == 'txt')
|
||||||
|
if is_txt_format:
|
||||||
|
metadata = {TASK_TEXT_MAPPING[key]: val for key, val in metadata.items() if key in TASK_TEXT_MAPPING}
|
||||||
|
|
||||||
|
entries = [metadata.copy() for _ in range(req.num_outputs)]
|
||||||
|
for i, entry in enumerate(entries):
|
||||||
|
entry['Seed' if is_txt_format else 'seed'] = req.seed + i
|
||||||
|
|
||||||
|
return entries
|
||||||
|
|
||||||
|
def get_printable_request(req: GenerateImageRequest):
|
||||||
|
metadata = req.dict()
|
||||||
|
del metadata['init_image']
|
||||||
|
del metadata['init_image_mask']
|
||||||
|
return metadata
|
||||||
|
|
||||||
|
def make_filename_callback(req: GenerateImageRequest, suffix=None):
|
||||||
|
def make_filename(i):
|
||||||
|
img_id = base64.b64encode(int(time.time()+i).to_bytes(8, 'big')).decode() # Generate unique ID based on time.
|
||||||
|
img_id = img_id.translate({43:None, 47:None, 61:None})[-8:] # Remove + / = and keep last 8 chars.
|
||||||
|
|
||||||
|
prompt_flattened = filename_regex.sub('_', req.prompt)[:50]
|
||||||
|
name = f"{prompt_flattened}_{img_id}"
|
||||||
|
name = name if suffix is None else f'{name}_{suffix}'
|
||||||
|
return name
|
||||||
|
|
||||||
|
return make_filename
|
@ -24,8 +24,8 @@
|
|||||||
<div id="top-nav">
|
<div id="top-nav">
|
||||||
<div id="logo">
|
<div id="logo">
|
||||||
<h1>
|
<h1>
|
||||||
Stable Diffusion UI
|
Easy Diffusion
|
||||||
<small>v2.4.18 <span id="updateBranchLabel"></span></small>
|
<small>v2.5.0 <span id="updateBranchLabel"></span></small>
|
||||||
</h1>
|
</h1>
|
||||||
</div>
|
</div>
|
||||||
<div id="server-status">
|
<div id="server-status">
|
||||||
@ -55,7 +55,7 @@
|
|||||||
<input id="prompt_from_file" name="prompt_from_file" type="file" /> <!-- hidden -->
|
<input id="prompt_from_file" name="prompt_from_file" type="file" /> <!-- hidden -->
|
||||||
<label for="negative_prompt" class="collapsible" id="negative_prompt_handle">
|
<label for="negative_prompt" class="collapsible" id="negative_prompt_handle">
|
||||||
Negative Prompt
|
Negative Prompt
|
||||||
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/Writing-prompts#negative-prompts" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip right">Click to learn more about Negative Prompts</span></i></a>
|
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/Writing-prompts#negative-prompts" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Click to learn more about Negative Prompts</span></i></a>
|
||||||
<small>(optional)</small>
|
<small>(optional)</small>
|
||||||
</label>
|
</label>
|
||||||
<div class="collapsible-content">
|
<div class="collapsible-content">
|
||||||
@ -95,12 +95,16 @@
|
|||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div id="editor-inputs-tags-container" class="row">
|
<div id="editor-inputs-tags-container" class="row">
|
||||||
<label>Image Modifiers <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip right">click an Image Modifier to remove it, use Ctrl+Mouse Wheel to adjust its weight</span></i>:</label>
|
<label>Image Modifiers <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">click an Image Modifier to remove it, use Ctrl+Mouse Wheel to adjust its weight</span></i>:</label>
|
||||||
<div id="editor-inputs-tags-list"></div>
|
<div id="editor-inputs-tags-list"></div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<button id="makeImage" class="primaryButton">Make Image</button>
|
<button id="makeImage" class="primaryButton">Make Image</button>
|
||||||
<button id="stopImage" class="secondaryButton">Stop All</button>
|
<div id="render-buttons">
|
||||||
|
<button id="stopImage" class="secondaryButton">Stop All</button>
|
||||||
|
<button id="pause"><i class="fa-solid fa-pause"></i> Pause All</button>
|
||||||
|
<button id="resume"><i class="fa-solid fa-play"></i> Resume</button>
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<span class="line-separator"></span>
|
<span class="line-separator"></span>
|
||||||
@ -109,7 +113,7 @@
|
|||||||
<h4 class="collapsible">
|
<h4 class="collapsible">
|
||||||
Image Settings
|
Image Settings
|
||||||
<i id="reset-image-settings" class="fa-solid fa-arrow-rotate-left section-button">
|
<i id="reset-image-settings" class="fa-solid fa-arrow-rotate-left section-button">
|
||||||
<span class="simple-tooltip right">
|
<span class="simple-tooltip top-left">
|
||||||
Reset Image Settings
|
Reset Image Settings
|
||||||
</span>
|
</span>
|
||||||
</i>
|
</i>
|
||||||
@ -123,26 +127,36 @@
|
|||||||
<select id="stable_diffusion_model" name="stable_diffusion_model">
|
<select id="stable_diffusion_model" name="stable_diffusion_model">
|
||||||
<!-- <option value="sd-v1-4" selected>sd-v1-4</option> -->
|
<!-- <option value="sd-v1-4" selected>sd-v1-4</option> -->
|
||||||
</select>
|
</select>
|
||||||
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/Custom-Models" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip right">Click to learn more about custom models</span></i></a>
|
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/Custom-Models" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Click to learn more about custom models</span></i></a>
|
||||||
</td></tr>
|
</td></tr>
|
||||||
|
<!-- <tr id="modelConfigSelection" class="pl-5"><td><label for="model_config">Model Config:</i></label></td><td>
|
||||||
|
<select id="model_config" name="model_config">
|
||||||
|
</select>
|
||||||
|
</td></tr> -->
|
||||||
<tr class="pl-5"><td><label for="vae_model">Custom VAE:</i></label></td><td>
|
<tr class="pl-5"><td><label for="vae_model">Custom VAE:</i></label></td><td>
|
||||||
<select id="vae_model" name="vae_model">
|
<select id="vae_model" name="vae_model">
|
||||||
<!-- <option value="" selected>None</option> -->
|
<!-- <option value="" selected>None</option> -->
|
||||||
</select>
|
</select>
|
||||||
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/VAE-Variational-Auto-Encoder" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip right">Click to learn more about VAEs</span></i></a>
|
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/VAE-Variational-Auto-Encoder" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Click to learn more about VAEs</span></i></a>
|
||||||
</td></tr>
|
</td></tr>
|
||||||
<tr id="samplerSelection" class="pl-5"><td><label for="sampler">Sampler:</label></td><td>
|
<tr id="samplerSelection" class="pl-5"><td><label for="sampler_name">Sampler:</label></td><td>
|
||||||
<select id="sampler" name="sampler">
|
<select id="sampler_name" name="sampler_name">
|
||||||
<option value="plms">plms</option>
|
<option value="plms">PLMS</option>
|
||||||
<option value="ddim">ddim</option>
|
<option value="ddim">DDIM</option>
|
||||||
<option value="heun">heun</option>
|
<option value="heun">Heun</option>
|
||||||
<option value="euler">euler</option>
|
<option value="euler">Euler</option>
|
||||||
<option value="euler_a" selected>euler_a</option>
|
<option value="euler_a" selected>Euler Ancestral</option>
|
||||||
<option value="dpm2">dpm2</option>
|
<option value="dpm2">DPM2</option>
|
||||||
<option value="dpm2_a">dpm2_a</option>
|
<option value="dpm2_a">DPM2 Ancestral</option>
|
||||||
<option value="lms">lms</option>
|
<option value="lms">LMS</option>
|
||||||
|
<option value="dpm_solver_stability">DPM Solver (Stability AI)</option>
|
||||||
|
<option value="dpmpp_2s_a" selected>DPM++ 2s Ancestral</option>
|
||||||
|
<option value="dpmpp_2m">DPM++ 2m</option>
|
||||||
|
<option value="dpmpp_sde">DPM++ SDE</option>
|
||||||
|
<option value="dpm_fast">DPM Fast</option>
|
||||||
|
<option value="dpm_adaptive">DPM Adaptive</option>
|
||||||
</select>
|
</select>
|
||||||
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/How-to-Use#samplers" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip right">Click to learn more about samplers</span></i></a>
|
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/How-to-Use#samplers" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Click to learn more about samplers</span></i></a>
|
||||||
</td></tr>
|
</td></tr>
|
||||||
<tr class="pl-5"><td><label>Image Size: </label></td><td>
|
<tr class="pl-5"><td><label>Image Size: </label></td><td>
|
||||||
<select id="width" name="width" value="512">
|
<select id="width" name="width" value="512">
|
||||||
@ -191,8 +205,17 @@
|
|||||||
<label for="height"><small>(height)</small></label>
|
<label for="height"><small>(height)</small></label>
|
||||||
</td></tr>
|
</td></tr>
|
||||||
<tr class="pl-5"><td><label for="num_inference_steps">Inference Steps:</label></td><td> <input id="num_inference_steps" name="num_inference_steps" size="4" value="25" onkeypress="preventNonNumericalInput(event)"></td></tr>
|
<tr class="pl-5"><td><label for="num_inference_steps">Inference Steps:</label></td><td> <input id="num_inference_steps" name="num_inference_steps" size="4" value="25" onkeypress="preventNonNumericalInput(event)"></td></tr>
|
||||||
<tr class="pl-5"><td><label for="guidance_scale_slider">Guidance Scale:</label></td><td> <input id="guidance_scale_slider" name="guidance_scale_slider" class="editor-slider" value="75" type="range" min="10" max="500"> <input id="guidance_scale" name="guidance_scale" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"></td></tr>
|
<tr class="pl-5"><td><label for="guidance_scale_slider">Guidance Scale:</label></td><td> <input id="guidance_scale_slider" name="guidance_scale_slider" class="editor-slider" value="75" type="range" min="11" max="500"> <input id="guidance_scale" name="guidance_scale" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"></td></tr>
|
||||||
<tr id="prompt_strength_container" class="pl-5"><td><label for="prompt_strength_slider">Prompt Strength:</label></td><td> <input id="prompt_strength_slider" name="prompt_strength_slider" class="editor-slider" value="80" type="range" min="0" max="99"> <input id="prompt_strength" name="prompt_strength" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"><br/></td></tr></span>
|
<tr id="prompt_strength_container" class="pl-5"><td><label for="prompt_strength_slider">Prompt Strength:</label></td><td> <input id="prompt_strength_slider" name="prompt_strength_slider" class="editor-slider" value="80" type="range" min="0" max="99"> <input id="prompt_strength" name="prompt_strength" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"><br/></td></tr>
|
||||||
|
<tr class="pl-5"><td><label for="hypernetwork_model">Hypernetwork:</i></label></td><td>
|
||||||
|
<select id="hypernetwork_model" name="hypernetwork_model">
|
||||||
|
<!-- <option value="" selected>None</option> -->
|
||||||
|
</select>
|
||||||
|
</td></tr>
|
||||||
|
<tr id="hypernetwork_strength_container" class="pl-5">
|
||||||
|
<td><label for="hypernetwork_strength_slider">Hypernetwork Strength:</label></td>
|
||||||
|
<td> <input id="hypernetwork_strength_slider" name="hypernetwork_strength_slider" class="editor-slider" value="100" type="range" min="0" max="100"> <input id="hypernetwork_strength" name="hypernetwork_strength" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"><br/></td>
|
||||||
|
</tr>
|
||||||
<tr class="pl-5"><td><label for="output_format">Output Format:</label></td><td>
|
<tr class="pl-5"><td><label for="output_format">Output Format:</label></td><td>
|
||||||
<select id="output_format" name="output_format">
|
<select id="output_format" name="output_format">
|
||||||
<option value="jpeg" selected>jpeg</option>
|
<option value="jpeg" selected>jpeg</option>
|
||||||
@ -200,13 +223,14 @@
|
|||||||
</select>
|
</select>
|
||||||
</td></tr>
|
</td></tr>
|
||||||
<tr class="pl-5" id="output_quality_row"><td><label for="output_quality">JPEG Quality:</label></td><td>
|
<tr class="pl-5" id="output_quality_row"><td><label for="output_quality">JPEG Quality:</label></td><td>
|
||||||
<input id="output_quality_slider" name="output_quality" class="editor-slider" value="75" type="range" min="10" max="95"> <input id="output_quality" name="output_quality" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)">
|
<input id="output_quality_slider" name="output_quality" class="editor-slider" value="75" type="range" min="10" max="95"> <input id="output_quality" name="output_quality" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)">
|
||||||
</td></tr>
|
</td></tr>
|
||||||
</table></div>
|
</table></div>
|
||||||
|
|
||||||
<div><ul>
|
<div><ul>
|
||||||
<li><b class="settings-subheader">Render Settings</b></li>
|
<li><b class="settings-subheader">Render Settings</b></li>
|
||||||
<li class="pl-5"><input id="stream_image_progress" name="stream_image_progress" type="checkbox"> <label for="stream_image_progress">Show a live preview <small>(uses more VRAM, slower images)</small></label></li>
|
<li class="pl-5"><input id="stream_image_progress" name="stream_image_progress" type="checkbox"> <label for="stream_image_progress">Show a live preview <small>(uses more VRAM, slower images)</small></label></li>
|
||||||
|
<li id="apply_color_correction_setting" class="pl-5"><input id="apply_color_correction" name="apply_color_correction" type="checkbox"> <label for="apply_color_correction">Preserve color profile <small>(helps during inpainting)</small></label></li>
|
||||||
<li class="pl-5"><input id="use_face_correction" name="use_face_correction" type="checkbox"> <label for="use_face_correction">Fix incorrect faces and eyes <small>(uses GFPGAN)</small></label></li>
|
<li class="pl-5"><input id="use_face_correction" name="use_face_correction" type="checkbox"> <label for="use_face_correction">Fix incorrect faces and eyes <small>(uses GFPGAN)</small></label></li>
|
||||||
<li class="pl-5">
|
<li class="pl-5">
|
||||||
<input id="use_upscale" name="use_upscale" type="checkbox"> <label for="use_upscale">Upscale image by 4x with </label>
|
<input id="use_upscale" name="use_upscale" type="checkbox"> <label for="use_upscale">Upscale image by 4x with </label>
|
||||||
@ -272,7 +296,7 @@
|
|||||||
<tr><td><label>Compatible Graphics Cards (all):</label></td><td id="system-info-gpus-all" class="value"></td></tr>
|
<tr><td><label>Compatible Graphics Cards (all):</label></td><td id="system-info-gpus-all" class="value"></td></tr>
|
||||||
<tr><td></td><td> </td></tr>
|
<tr><td></td><td> </td></tr>
|
||||||
<tr><td><label>Used for rendering 🔥:</label></td><td id="system-info-rendering-devices" class="value"></td></tr>
|
<tr><td><label>Used for rendering 🔥:</label></td><td id="system-info-rendering-devices" class="value"></td></tr>
|
||||||
<tr><td><label>Server Addresses <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip right">You can access Stable Diffusion UI from other devices using these addresses</span></i> :</label></td><td id="system-info-server-hosts" class="value"></td></tr>
|
<tr><td><label>Server Addresses <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">You can access Stable Diffusion UI from other devices using these addresses</span></i> :</label></td><td id="system-info-server-hosts" class="value"></td></tr>
|
||||||
</table>
|
</table>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
@ -388,10 +412,13 @@
|
|||||||
</div>
|
</div>
|
||||||
</body>
|
</body>
|
||||||
<script src="media/js/utils.js"></script>
|
<script src="media/js/utils.js"></script>
|
||||||
|
<script src="media/js/engine.js"></script>
|
||||||
<script src="media/js/parameters.js"></script>
|
<script src="media/js/parameters.js"></script>
|
||||||
<script src="media/js/plugins.js"></script>
|
<script src="media/js/plugins.js"></script>
|
||||||
|
|
||||||
<script src="media/js/image-modifiers.js"></script>
|
<script src="media/js/image-modifiers.js"></script>
|
||||||
<script src="media/js/auto-save.js"></script>
|
<script src="media/js/auto-save.js"></script>
|
||||||
|
|
||||||
<script src="media/js/main.js"></script>
|
<script src="media/js/main.js"></script>
|
||||||
<script src="media/js/themes.js"></script>
|
<script src="media/js/themes.js"></script>
|
||||||
<script src="media/js/dnd.js"></script>
|
<script src="media/js/dnd.js"></script>
|
||||||
@ -400,14 +427,17 @@
|
|||||||
async function init() {
|
async function init() {
|
||||||
await initSettings()
|
await initSettings()
|
||||||
await getModels()
|
await getModels()
|
||||||
await getDiskPath()
|
|
||||||
await getAppConfig()
|
await getAppConfig()
|
||||||
await loadUIPlugins()
|
await loadUIPlugins()
|
||||||
await loadModifiers()
|
await loadModifiers()
|
||||||
await getSystemInfo()
|
await getSystemInfo()
|
||||||
|
|
||||||
setInterval(healthCheck, HEALTH_PING_INTERVAL * 1000)
|
SD.init({
|
||||||
healthCheck()
|
events: {
|
||||||
|
statusChange: setServerStatus
|
||||||
|
, idle: onIdle
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
playSound()
|
playSound()
|
||||||
}
|
}
|
||||||
|
10
ui/main.py
Normal file
10
ui/main.py
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
from easydiffusion import model_manager, app, server
|
||||||
|
from easydiffusion.server import server_api # required for uvicorn
|
||||||
|
|
||||||
|
# Init the app
|
||||||
|
model_manager.init()
|
||||||
|
app.init()
|
||||||
|
server.init()
|
||||||
|
|
||||||
|
# start the browser ui
|
||||||
|
app.open_browser()
|
@ -136,6 +136,10 @@
|
|||||||
background: var(--background-color3);
|
background: var(--background-color3);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.editor-controls-right .image-editor-button {
|
||||||
|
margin-bottom: 4px;
|
||||||
|
}
|
||||||
|
|
||||||
#init_image_button_inpaint .input-toggle {
|
#init_image_button_inpaint .input-toggle {
|
||||||
position: absolute;
|
position: absolute;
|
||||||
left: 16px;
|
left: 16px;
|
||||||
@ -208,4 +212,4 @@
|
|||||||
}
|
}
|
||||||
.image-editor-popup h4 {
|
.image-editor-popup h4 {
|
||||||
text-align: left;
|
text-align: left;
|
||||||
}
|
}
|
||||||
|
@ -139,7 +139,7 @@ code {
|
|||||||
padding: 16px;
|
padding: 16px;
|
||||||
display: flex;
|
display: flex;
|
||||||
flex-direction: column;
|
flex-direction: column;
|
||||||
flex: 0 0 370pt;
|
flex: 0 0 380pt;
|
||||||
}
|
}
|
||||||
#editor label {
|
#editor label {
|
||||||
font-weight: normal;
|
font-weight: normal;
|
||||||
@ -191,15 +191,29 @@ code {
|
|||||||
background: rgb(132, 8, 0);
|
background: rgb(132, 8, 0);
|
||||||
border: 2px solid rgb(122, 29, 0);
|
border: 2px solid rgb(122, 29, 0);
|
||||||
color: rgb(255, 221, 255);
|
color: rgb(255, 221, 255);
|
||||||
width: 100%;
|
|
||||||
height: 30pt;
|
height: 30pt;
|
||||||
border-radius: 6px;
|
border-radius: 6px;
|
||||||
display: none;
|
flex-grow: 2;
|
||||||
margin-top: 2pt;
|
|
||||||
}
|
}
|
||||||
#stopImage:hover {
|
#stopImage:hover {
|
||||||
background: rgb(177, 27, 0);
|
background: rgb(177, 27, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
div#render-buttons {
|
||||||
|
gap: 3px;
|
||||||
|
margin-top: 4px;
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
button#pause {
|
||||||
|
flex-grow: 1;
|
||||||
|
background: var(--accent-color);
|
||||||
|
}
|
||||||
|
button#resume {
|
||||||
|
flex-grow: 1;
|
||||||
|
background: var(--accent-color);
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
|
||||||
.flex-container {
|
.flex-container {
|
||||||
display: flex;
|
display: flex;
|
||||||
width: 100%;
|
width: 100%;
|
||||||
@ -265,7 +279,7 @@ img {
|
|||||||
}
|
}
|
||||||
.preview-prompt {
|
.preview-prompt {
|
||||||
font-size: 13pt;
|
font-size: 13pt;
|
||||||
margin-bottom: 10pt;
|
display: inline;
|
||||||
}
|
}
|
||||||
#coffeeButton {
|
#coffeeButton {
|
||||||
height: 23px;
|
height: 23px;
|
||||||
@ -391,14 +405,34 @@ img {
|
|||||||
.imageTaskContainer > div > .collapsible-handle {
|
.imageTaskContainer > div > .collapsible-handle {
|
||||||
display: none;
|
display: none;
|
||||||
}
|
}
|
||||||
|
.dropTargetBefore::before{
|
||||||
|
content: "";
|
||||||
|
border: 1px solid #fff;
|
||||||
|
margin-bottom: -2px;
|
||||||
|
display: block;
|
||||||
|
box-shadow: 0 0 5px #fff;
|
||||||
|
transform: translate(0px, -14px);
|
||||||
|
}
|
||||||
|
.dropTargetAfter::after{
|
||||||
|
content: "";
|
||||||
|
border: 1px solid #fff;
|
||||||
|
margin-bottom: -2px;
|
||||||
|
display: block;
|
||||||
|
box-shadow: 0 0 5px #fff;
|
||||||
|
transform: translate(0px, 14px);
|
||||||
|
}
|
||||||
|
.drag-handle {
|
||||||
|
margin-right: 6px;
|
||||||
|
cursor: move;
|
||||||
|
}
|
||||||
.taskStatusLabel {
|
.taskStatusLabel {
|
||||||
float: left;
|
|
||||||
font-size: 8pt;
|
font-size: 8pt;
|
||||||
background:var(--background-color2);
|
background:var(--background-color2);
|
||||||
border: 1px solid rgb(61, 62, 66);
|
border: 1px solid rgb(61, 62, 66);
|
||||||
padding: 2pt 4pt;
|
padding: 2pt 4pt;
|
||||||
border-radius: 2pt;
|
border-radius: 2pt;
|
||||||
margin-right: 5pt;
|
margin-right: 5pt;
|
||||||
|
display: inline;
|
||||||
}
|
}
|
||||||
.activeTaskLabel {
|
.activeTaskLabel {
|
||||||
background:rgb(0, 90, 30);
|
background:rgb(0, 90, 30);
|
||||||
@ -448,6 +482,7 @@ img {
|
|||||||
font-size: 10pt;
|
font-size: 10pt;
|
||||||
color: #aaa;
|
color: #aaa;
|
||||||
margin-bottom: 5pt;
|
margin-bottom: 5pt;
|
||||||
|
margin-top: 5pt;
|
||||||
}
|
}
|
||||||
.img-batch {
|
.img-batch {
|
||||||
display: inline;
|
display: inline;
|
||||||
@ -893,6 +928,15 @@ input::file-selector-button {
|
|||||||
transform: translate(-50%, 100%);
|
transform: translate(-50%, 100%);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.simple-tooltip.top-left {
|
||||||
|
top: 0px;
|
||||||
|
left: 0px;
|
||||||
|
transform: translate(calc(-100% + 15%), calc(-100% + 15%));
|
||||||
|
}
|
||||||
|
:hover > .simple-tooltip.top-left {
|
||||||
|
transform: translate(-80%, -100%);
|
||||||
|
}
|
||||||
|
|
||||||
/* PROGRESS BAR */
|
/* PROGRESS BAR */
|
||||||
.progress-bar {
|
.progress-bar {
|
||||||
background: var(--background-color3);
|
background: var(--background-color3);
|
||||||
@ -901,6 +945,7 @@ input::file-selector-button {
|
|||||||
height: 16px;
|
height: 16px;
|
||||||
position: relative;
|
position: relative;
|
||||||
transition: 0.25s 1s border, 0.25s 1s height;
|
transition: 0.25s 1s border, 0.25s 1s height;
|
||||||
|
clear: both;
|
||||||
}
|
}
|
||||||
.progress-bar > div {
|
.progress-bar > div {
|
||||||
background: var(--accent-color);
|
background: var(--accent-color);
|
||||||
@ -1052,6 +1097,27 @@ button:active {
|
|||||||
left: 1px;
|
left: 1px;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
div.task-initimg > img {
|
||||||
|
margin-right: 6px;
|
||||||
|
display: block;
|
||||||
|
}
|
||||||
|
div.task-fs-initimage {
|
||||||
|
display: none;
|
||||||
|
# position: absolute;
|
||||||
|
}
|
||||||
|
div.task-initimg:hover div.task-fs-initimage {
|
||||||
|
display: block;
|
||||||
|
position: absolute;
|
||||||
|
z-index: 9999;
|
||||||
|
box-shadow: 0 0 30px #000;
|
||||||
|
margin-top:-64px;
|
||||||
|
}
|
||||||
|
div.top-right {
|
||||||
|
position: absolute;
|
||||||
|
top: 8px;
|
||||||
|
right: 8px;
|
||||||
|
}
|
||||||
|
|
||||||
button#save-system-settings-btn {
|
button#save-system-settings-btn {
|
||||||
padding: 4pt 8pt;
|
padding: 4pt 8pt;
|
||||||
}
|
}
|
||||||
@ -1061,3 +1127,50 @@ button#save-system-settings-btn {
|
|||||||
#ip-info div {
|
#ip-info div {
|
||||||
line-height: 200%;
|
line-height: 200%;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* SCROLLBARS */
|
||||||
|
:root {
|
||||||
|
--scrollbar-width: 14px;
|
||||||
|
--scrollbar-radius: 10px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.scrollbar-editor::-webkit-scrollbar {
|
||||||
|
width: 8px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.scrollbar-editor::-webkit-scrollbar-track {
|
||||||
|
border-radius: 10px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.scrollbar-editor::-webkit-scrollbar-thumb {
|
||||||
|
background: --background-color2;
|
||||||
|
border-radius: 10px;
|
||||||
|
}
|
||||||
|
|
||||||
|
::-webkit-scrollbar {
|
||||||
|
width: var(--scrollbar-width);
|
||||||
|
}
|
||||||
|
|
||||||
|
::-webkit-scrollbar-track {
|
||||||
|
box-shadow: inset 0 0 5px var(--input-border-color);
|
||||||
|
border-radius: var(--input-border-radius);
|
||||||
|
}
|
||||||
|
|
||||||
|
::-webkit-scrollbar-thumb {
|
||||||
|
background: var(--background-color2);
|
||||||
|
border-radius: var(--scrollbar-radius);
|
||||||
|
}
|
||||||
|
|
||||||
|
body.pause {
|
||||||
|
border: solid 12px var(--accent-color);
|
||||||
|
}
|
||||||
|
|
||||||
|
body.wait-pause {
|
||||||
|
animation: blinker 2s linear infinite;
|
||||||
|
}
|
||||||
|
|
||||||
|
@keyframes blinker {
|
||||||
|
0% { border: solid 12px var(--accent-color); }
|
||||||
|
50% { border: solid 12px var(--background-color1); }
|
||||||
|
100% { border: solid 12px var(--accent-color); }
|
||||||
|
}
|
||||||
|
@ -14,12 +14,14 @@ const SETTINGS_IDS_LIST = [
|
|||||||
"num_outputs_parallel",
|
"num_outputs_parallel",
|
||||||
"stable_diffusion_model",
|
"stable_diffusion_model",
|
||||||
"vae_model",
|
"vae_model",
|
||||||
"sampler",
|
"hypernetwork_model",
|
||||||
|
"sampler_name",
|
||||||
"width",
|
"width",
|
||||||
"height",
|
"height",
|
||||||
"num_inference_steps",
|
"num_inference_steps",
|
||||||
"guidance_scale",
|
"guidance_scale",
|
||||||
"prompt_strength",
|
"prompt_strength",
|
||||||
|
"hypernetwork_strength",
|
||||||
"output_format",
|
"output_format",
|
||||||
"output_quality",
|
"output_quality",
|
||||||
"negative_prompt",
|
"negative_prompt",
|
||||||
@ -34,10 +36,11 @@ const SETTINGS_IDS_LIST = [
|
|||||||
"save_to_disk",
|
"save_to_disk",
|
||||||
"diskPath",
|
"diskPath",
|
||||||
"sound_toggle",
|
"sound_toggle",
|
||||||
"turbo",
|
"vram_usage_level",
|
||||||
"use_full_precision",
|
|
||||||
"confirm_dangerous_actions",
|
"confirm_dangerous_actions",
|
||||||
"auto_save_settings"
|
"metadata_output_format",
|
||||||
|
"auto_save_settings",
|
||||||
|
"apply_color_correction"
|
||||||
]
|
]
|
||||||
|
|
||||||
const IGNORE_BY_DEFAULT = [
|
const IGNORE_BY_DEFAULT = [
|
||||||
@ -129,7 +132,7 @@ function loadSettings() {
|
|||||||
var saved_settings_text = localStorage.getItem(SETTINGS_KEY)
|
var saved_settings_text = localStorage.getItem(SETTINGS_KEY)
|
||||||
if (saved_settings_text) {
|
if (saved_settings_text) {
|
||||||
var saved_settings = JSON.parse(saved_settings_text)
|
var saved_settings = JSON.parse(saved_settings_text)
|
||||||
if (saved_settings.find(s => s.key == "auto_save_settings").value == false) {
|
if (saved_settings.find(s => s.key == "auto_save_settings")?.value == false) {
|
||||||
setSetting("auto_save_settings", false)
|
setSetting("auto_save_settings", false)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -275,7 +278,6 @@ function tryLoadOldSettings() {
|
|||||||
"soundEnabled": "sound_toggle",
|
"soundEnabled": "sound_toggle",
|
||||||
"saveToDisk": "save_to_disk",
|
"saveToDisk": "save_to_disk",
|
||||||
"useCPU": "use_cpu",
|
"useCPU": "use_cpu",
|
||||||
"useFullPrecision": "use_full_precision",
|
|
||||||
"useTurboMode": "turbo",
|
"useTurboMode": "turbo",
|
||||||
"diskPath": "diskPath",
|
"diskPath": "diskPath",
|
||||||
"useFaceCorrection": "use_face_correction",
|
"useFaceCorrection": "use_face_correction",
|
||||||
|
@ -25,6 +25,7 @@ function parseBoolean(stringValue) {
|
|||||||
case "no":
|
case "no":
|
||||||
case "off":
|
case "off":
|
||||||
case "0":
|
case "0":
|
||||||
|
case "none":
|
||||||
case null:
|
case null:
|
||||||
case undefined:
|
case undefined:
|
||||||
return false;
|
return false;
|
||||||
@ -160,9 +161,9 @@ const TASK_MAPPING = {
|
|||||||
readUI: () => (useUpscalingField.checked ? upscaleModelField.value : undefined),
|
readUI: () => (useUpscalingField.checked ? upscaleModelField.value : undefined),
|
||||||
parse: (val) => val
|
parse: (val) => val
|
||||||
},
|
},
|
||||||
sampler: { name: 'Sampler',
|
sampler_name: { name: 'Sampler',
|
||||||
setUI: (sampler) => {
|
setUI: (sampler_name) => {
|
||||||
samplerField.value = sampler
|
samplerField.value = sampler_name
|
||||||
},
|
},
|
||||||
readUI: () => samplerField.value,
|
readUI: () => samplerField.value,
|
||||||
parse: (val) => val
|
parse: (val) => val
|
||||||
@ -171,7 +172,7 @@ const TASK_MAPPING = {
|
|||||||
setUI: (use_stable_diffusion_model) => {
|
setUI: (use_stable_diffusion_model) => {
|
||||||
const oldVal = stableDiffusionModelField.value
|
const oldVal = stableDiffusionModelField.value
|
||||||
|
|
||||||
use_stable_diffusion_model = getModelPath(use_stable_diffusion_model, ['.ckpt'])
|
use_stable_diffusion_model = getModelPath(use_stable_diffusion_model, ['.ckpt', '.safetensors'])
|
||||||
stableDiffusionModelField.value = use_stable_diffusion_model
|
stableDiffusionModelField.value = use_stable_diffusion_model
|
||||||
|
|
||||||
if (!stableDiffusionModelField.value) {
|
if (!stableDiffusionModelField.value) {
|
||||||
@ -184,6 +185,7 @@ const TASK_MAPPING = {
|
|||||||
use_vae_model: { name: 'VAE model',
|
use_vae_model: { name: 'VAE model',
|
||||||
setUI: (use_vae_model) => {
|
setUI: (use_vae_model) => {
|
||||||
const oldVal = vaeModelField.value
|
const oldVal = vaeModelField.value
|
||||||
|
use_vae_model = (use_vae_model === undefined || use_vae_model === null || use_vae_model === 'None' ? '' : use_vae_model)
|
||||||
|
|
||||||
if (use_vae_model !== '') {
|
if (use_vae_model !== '') {
|
||||||
use_vae_model = getModelPath(use_vae_model, ['.vae.pt', '.ckpt'])
|
use_vae_model = getModelPath(use_vae_model, ['.vae.pt', '.ckpt'])
|
||||||
@ -194,6 +196,29 @@ const TASK_MAPPING = {
|
|||||||
readUI: () => vaeModelField.value,
|
readUI: () => vaeModelField.value,
|
||||||
parse: (val) => val
|
parse: (val) => val
|
||||||
},
|
},
|
||||||
|
use_hypernetwork_model: { name: 'Hypernetwork model',
|
||||||
|
setUI: (use_hypernetwork_model) => {
|
||||||
|
const oldVal = hypernetworkModelField.value
|
||||||
|
use_hypernetwork_model = (use_hypernetwork_model === undefined || use_hypernetwork_model === null || use_hypernetwork_model === 'None' ? '' : use_hypernetwork_model)
|
||||||
|
|
||||||
|
if (use_hypernetwork_model !== '') {
|
||||||
|
use_hypernetwork_model = getModelPath(use_hypernetwork_model, ['.pt'])
|
||||||
|
use_hypernetwork_model = use_hypernetwork_model !== '' ? use_hypernetwork_model : oldVal
|
||||||
|
}
|
||||||
|
hypernetworkModelField.value = use_hypernetwork_model
|
||||||
|
hypernetworkModelField.dispatchEvent(new Event('change'))
|
||||||
|
},
|
||||||
|
readUI: () => hypernetworkModelField.value,
|
||||||
|
parse: (val) => val
|
||||||
|
},
|
||||||
|
hypernetwork_strength: { name: 'Hypernetwork Strength',
|
||||||
|
setUI: (hypernetwork_strength) => {
|
||||||
|
hypernetworkStrengthField.value = hypernetwork_strength
|
||||||
|
updateHypernetworkStrengthSlider()
|
||||||
|
},
|
||||||
|
readUI: () => parseFloat(hypernetworkStrengthField.value),
|
||||||
|
parse: (val) => parseFloat(val)
|
||||||
|
},
|
||||||
|
|
||||||
num_outputs: { name: 'Parallel Images',
|
num_outputs: { name: 'Parallel Images',
|
||||||
setUI: (num_outputs) => {
|
setUI: (num_outputs) => {
|
||||||
@ -217,13 +242,6 @@ const TASK_MAPPING = {
|
|||||||
readUI: () => turboField.checked,
|
readUI: () => turboField.checked,
|
||||||
parse: (val) => Boolean(val)
|
parse: (val) => Boolean(val)
|
||||||
},
|
},
|
||||||
use_full_precision: { name: 'Use Full Precision',
|
|
||||||
setUI: (use_full_precision) => {
|
|
||||||
useFullPrecisionField.checked = use_full_precision
|
|
||||||
},
|
|
||||||
readUI: () => useFullPrecisionField.checked,
|
|
||||||
parse: (val) => Boolean(val)
|
|
||||||
},
|
|
||||||
|
|
||||||
stream_image_progress: { name: 'Stream Image Progress',
|
stream_image_progress: { name: 'Stream Image Progress',
|
||||||
setUI: (stream_image_progress) => {
|
setUI: (stream_image_progress) => {
|
||||||
@ -328,6 +346,7 @@ function getModelPath(filename, extensions)
|
|||||||
}
|
}
|
||||||
|
|
||||||
const TASK_TEXT_MAPPING = {
|
const TASK_TEXT_MAPPING = {
|
||||||
|
prompt: 'Prompt',
|
||||||
width: 'Width',
|
width: 'Width',
|
||||||
height: 'Height',
|
height: 'Height',
|
||||||
seed: 'Seed',
|
seed: 'Seed',
|
||||||
@ -336,9 +355,11 @@ const TASK_TEXT_MAPPING = {
|
|||||||
prompt_strength: 'Prompt Strength',
|
prompt_strength: 'Prompt Strength',
|
||||||
use_face_correction: 'Use Face Correction',
|
use_face_correction: 'Use Face Correction',
|
||||||
use_upscale: 'Use Upscaling',
|
use_upscale: 'Use Upscaling',
|
||||||
sampler: 'Sampler',
|
sampler_name: 'Sampler',
|
||||||
negative_prompt: 'Negative Prompt',
|
negative_prompt: 'Negative Prompt',
|
||||||
use_stable_diffusion_model: 'Stable Diffusion model'
|
use_stable_diffusion_model: 'Stable Diffusion model',
|
||||||
|
use_hypernetwork_model: 'Hypernetwork model',
|
||||||
|
hypernetwork_strength: 'Hypernetwork Strength'
|
||||||
}
|
}
|
||||||
const afterPromptRe = /^\s*Width\s*:\s*\d+\s*(?:\r\n|\r|\n)+\s*Height\s*:\s*\d+\s*(\r\n|\r|\n)+Seed\s*:\s*\d+\s*$/igm
|
const afterPromptRe = /^\s*Width\s*:\s*\d+\s*(?:\r\n|\r|\n)+\s*Height\s*:\s*\d+\s*(\r\n|\r|\n)+Seed\s*:\s*\d+\s*$/igm
|
||||||
function parseTaskFromText(str) {
|
function parseTaskFromText(str) {
|
||||||
@ -386,6 +407,9 @@ async function parseContent(text) {
|
|||||||
if (text.startsWith('{') && text.endsWith('}')) {
|
if (text.startsWith('{') && text.endsWith('}')) {
|
||||||
try {
|
try {
|
||||||
const task = JSON.parse(text)
|
const task = JSON.parse(text)
|
||||||
|
if (!('reqBody' in task)) { // support the format saved to the disk, by the UI
|
||||||
|
task.reqBody = Object.assign({}, task)
|
||||||
|
}
|
||||||
restoreTaskToUI(task)
|
restoreTaskToUI(task)
|
||||||
return true
|
return true
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
@ -453,7 +477,6 @@ document.addEventListener("dragover", dragOverHandler)
|
|||||||
const TASK_REQ_NO_EXPORT = [
|
const TASK_REQ_NO_EXPORT = [
|
||||||
"use_cpu",
|
"use_cpu",
|
||||||
"turbo",
|
"turbo",
|
||||||
"use_full_precision",
|
|
||||||
"save_to_disk_path"
|
"save_to_disk_path"
|
||||||
]
|
]
|
||||||
const resetSettings = document.getElementById('reset-image-settings')
|
const resetSettings = document.getElementById('reset-image-settings')
|
||||||
@ -465,7 +488,7 @@ function checkReadTextClipboardPermission (result) {
|
|||||||
// PASTE ICON
|
// PASTE ICON
|
||||||
const pasteIcon = document.createElement('i')
|
const pasteIcon = document.createElement('i')
|
||||||
pasteIcon.className = 'fa-solid fa-paste section-button'
|
pasteIcon.className = 'fa-solid fa-paste section-button'
|
||||||
pasteIcon.innerHTML = `<span class="simple-tooltip right">Paste Image Settings</span>`
|
pasteIcon.innerHTML = `<span class="simple-tooltip top-left">Paste Image Settings</span>`
|
||||||
pasteIcon.addEventListener('click', async (event) => {
|
pasteIcon.addEventListener('click', async (event) => {
|
||||||
event.stopPropagation()
|
event.stopPropagation()
|
||||||
// Add css class 'active'
|
// Add css class 'active'
|
||||||
@ -505,7 +528,7 @@ function checkWriteToClipboardPermission (result) {
|
|||||||
// COPY ICON
|
// COPY ICON
|
||||||
const copyIcon = document.createElement('i')
|
const copyIcon = document.createElement('i')
|
||||||
copyIcon.className = 'fa-solid fa-clipboard section-button'
|
copyIcon.className = 'fa-solid fa-clipboard section-button'
|
||||||
copyIcon.innerHTML = `<span class="simple-tooltip right">Copy Image Settings</span>`
|
copyIcon.innerHTML = `<span class="simple-tooltip top-left">Copy Image Settings</span>`
|
||||||
copyIcon.addEventListener('click', (event) => {
|
copyIcon.addEventListener('click', (event) => {
|
||||||
event.stopPropagation()
|
event.stopPropagation()
|
||||||
// Add css class 'active'
|
// Add css class 'active'
|
||||||
|
1310
ui/media/js/engine.js
Normal file
1310
ui/media/js/engine.js
Normal file
File diff suppressed because it is too large
Load Diff
@ -105,7 +105,26 @@ const IMAGE_EDITOR_ACTIONS = [
|
|||||||
icon: "fa-solid fa-xmark",
|
icon: "fa-solid fa-xmark",
|
||||||
handler: (editor) => {
|
handler: (editor) => {
|
||||||
editor.ctx_current.clearRect(0, 0, editor.width, editor.height)
|
editor.ctx_current.clearRect(0, 0, editor.width, editor.height)
|
||||||
}
|
},
|
||||||
|
trackHistory: true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "undo",
|
||||||
|
name: "Undo",
|
||||||
|
icon: "fa-solid fa-rotate-left",
|
||||||
|
handler: (editor) => {
|
||||||
|
editor.history.undo()
|
||||||
|
},
|
||||||
|
trackHistory: false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "redo",
|
||||||
|
name: "Redo",
|
||||||
|
icon: "fa-solid fa-rotate-right",
|
||||||
|
handler: (editor) => {
|
||||||
|
editor.history.redo()
|
||||||
|
},
|
||||||
|
trackHistory: false
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -166,11 +185,12 @@ var IMAGE_EDITOR_SECTIONS = [
|
|||||||
name: "brush_size",
|
name: "brush_size",
|
||||||
title: "Brush Size",
|
title: "Brush Size",
|
||||||
default: 48,
|
default: 48,
|
||||||
options: [ 16, 24, 32, 48, 64 ],
|
options: [ 6, 12, 16, 24, 30, 40, 48, 64 ],
|
||||||
initElement: (element, option) => {
|
initElement: (element, option) => {
|
||||||
element.parentElement.style.flex = option
|
element.parentElement.style.flex = option
|
||||||
element.style.width = option + "px"
|
element.style.width = option + "px"
|
||||||
element.style.height = option + "px"
|
element.style.height = option + "px"
|
||||||
|
element.style['margin-right'] = '2px'
|
||||||
element.style["border-radius"] = (option / 2).toFixed() + "px"
|
element.style["border-radius"] = (option / 2).toFixed() + "px"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -384,6 +404,7 @@ class ImageEditor {
|
|||||||
|
|
||||||
if (this.inpainter) {
|
if (this.inpainter) {
|
||||||
this.selectOption("color", IMAGE_EDITOR_SECTIONS.find(s => s.name == "color").options.indexOf("#ffffff"))
|
this.selectOption("color", IMAGE_EDITOR_SECTIONS.find(s => s.name == "color").options.indexOf("#ffffff"))
|
||||||
|
this.selectOption("opacity", IMAGE_EDITOR_SECTIONS.find(s => s.name == "opacity").options.indexOf(0.4))
|
||||||
}
|
}
|
||||||
|
|
||||||
// initialize the right-side controls
|
// initialize the right-side controls
|
||||||
@ -434,13 +455,14 @@ class ImageEditor {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var max_size = Math.min(parseInt(window.innerWidth * 0.9), width, 768)
|
|
||||||
if (width > height) {
|
if (width > height) {
|
||||||
|
var max_size = Math.min(parseInt(window.innerWidth * 0.9), width, 768)
|
||||||
var multiplier = max_size / width
|
var multiplier = max_size / width
|
||||||
width = (multiplier * width).toFixed()
|
width = (multiplier * width).toFixed()
|
||||||
height = (multiplier * height).toFixed()
|
height = (multiplier * height).toFixed()
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
|
var max_size = Math.min(parseInt(window.innerHeight * 0.9), height, 768)
|
||||||
var multiplier = max_size / height
|
var multiplier = max_size / height
|
||||||
width = (multiplier * width).toFixed()
|
width = (multiplier * width).toFixed()
|
||||||
height = (multiplier * height).toFixed()
|
height = (multiplier * height).toFixed()
|
||||||
@ -523,7 +545,9 @@ class ImageEditor {
|
|||||||
}
|
}
|
||||||
runAction(action_id) {
|
runAction(action_id) {
|
||||||
var action = IMAGE_EDITOR_ACTIONS.find(a => a.id == action_id)
|
var action = IMAGE_EDITOR_ACTIONS.find(a => a.id == action_id)
|
||||||
this.history.pushAction(action_id)
|
if (action.trackHistory) {
|
||||||
|
this.history.pushAction(action_id)
|
||||||
|
}
|
||||||
action.handler(this)
|
action.handler(this)
|
||||||
}
|
}
|
||||||
setBrush(layer = null, options = null) {
|
setBrush(layer = null, options = null) {
|
||||||
@ -678,3 +702,5 @@ document.getElementById("init_image_button_draw").addEventListener("click", () =
|
|||||||
document.getElementById("init_image_button_inpaint").addEventListener("click", () => {
|
document.getElementById("init_image_button_inpaint").addEventListener("click", () => {
|
||||||
imageInpainter.show()
|
imageInpainter.show()
|
||||||
})
|
})
|
||||||
|
|
||||||
|
img2imgUnload() // no init image when the app starts
|
||||||
|
1121
ui/media/js/main.js
1121
ui/media/js/main.js
File diff suppressed because it is too large
Load Diff
@ -53,6 +53,23 @@ var PARAMETERS = [
|
|||||||
return `<input id="${parameter.id}" name="${parameter.id}" size="30" disabled>`
|
return `<input id="${parameter.id}" name="${parameter.id}" size="30" disabled>`
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
id: "metadata_output_format",
|
||||||
|
type: ParameterType.select,
|
||||||
|
label: "Metadata format",
|
||||||
|
note: "will be saved to disk in this format",
|
||||||
|
default: "txt",
|
||||||
|
options: [
|
||||||
|
{
|
||||||
|
value: "txt",
|
||||||
|
label: "txt"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
value: "json",
|
||||||
|
label: "json"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
},
|
||||||
{
|
{
|
||||||
id: "sound_toggle",
|
id: "sound_toggle",
|
||||||
type: ParameterType.checkbox,
|
type: ParameterType.checkbox,
|
||||||
@ -61,6 +78,13 @@ var PARAMETERS = [
|
|||||||
icon: "fa-volume-low",
|
icon: "fa-volume-low",
|
||||||
default: true,
|
default: true,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
id: "process_order_toggle",
|
||||||
|
type: ParameterType.checkbox,
|
||||||
|
label: "Process newest jobs first",
|
||||||
|
note: "reverse the normal processing order",
|
||||||
|
default: false,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
id: "ui_open_browser_on_start",
|
id: "ui_open_browser_on_start",
|
||||||
type: ParameterType.checkbox,
|
type: ParameterType.checkbox,
|
||||||
@ -70,12 +94,20 @@ var PARAMETERS = [
|
|||||||
default: true,
|
default: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: "turbo",
|
id: "vram_usage_level",
|
||||||
type: ParameterType.checkbox,
|
type: ParameterType.select,
|
||||||
label: "Turbo Mode",
|
label: "GPU Memory Usage",
|
||||||
note: "generates images faster, but uses an additional 1 GB of GPU memory",
|
note: "Faster performance requires more GPU memory (VRAM)<br/><br/>" +
|
||||||
|
"<b>Balanced:</b> nearly as fast as High, much lower VRAM usage<br/>" +
|
||||||
|
"<b>High:</b> fastest, maximum GPU memory usage</br>" +
|
||||||
|
"<b>Low:</b> slowest, force-used for GPUs with 4 GB (or less) memory",
|
||||||
icon: "fa-forward",
|
icon: "fa-forward",
|
||||||
default: true,
|
default: "balanced",
|
||||||
|
options: [
|
||||||
|
{value: "balanced", label: "Balanced"},
|
||||||
|
{value: "high", label: "High"},
|
||||||
|
{value: "low", label: "Low"}
|
||||||
|
],
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: "use_cpu",
|
id: "use_cpu",
|
||||||
@ -98,14 +130,6 @@ var PARAMETERS = [
|
|||||||
note: "to process in parallel",
|
note: "to process in parallel",
|
||||||
default: false,
|
default: false,
|
||||||
},
|
},
|
||||||
{
|
|
||||||
id: "use_full_precision",
|
|
||||||
type: ParameterType.checkbox,
|
|
||||||
label: "Use Full Precision",
|
|
||||||
note: "for GPU-only. warning: this will consume more VRAM",
|
|
||||||
icon: "fa-crosshairs",
|
|
||||||
default: false,
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
id: "auto_save_settings",
|
id: "auto_save_settings",
|
||||||
type: ParameterType.checkbox,
|
type: ParameterType.checkbox,
|
||||||
@ -140,14 +164,6 @@ var PARAMETERS = [
|
|||||||
return `<input id="${parameter.id}" name="${parameter.id}" size="6" value="9000" onkeypress="preventNonNumericalInput(event)">`
|
return `<input id="${parameter.id}" name="${parameter.id}" size="6" value="9000" onkeypress="preventNonNumericalInput(event)">`
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
|
||||||
id: "test_sd2",
|
|
||||||
type: ParameterType.checkbox,
|
|
||||||
label: "Test SD 2.0",
|
|
||||||
note: "Experimental! High memory usage! GPU-only! Not the final version! Please restart the program after changing this.",
|
|
||||||
icon: "fa-fire",
|
|
||||||
default: false,
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
id: "use_beta_channel",
|
id: "use_beta_channel",
|
||||||
type: ParameterType.checkbox,
|
type: ParameterType.checkbox,
|
||||||
@ -203,16 +219,14 @@ function initParameters() {
|
|||||||
|
|
||||||
initParameters()
|
initParameters()
|
||||||
|
|
||||||
let turboField = document.querySelector('#turbo')
|
let vramUsageLevelField = document.querySelector('#vram_usage_level')
|
||||||
let useCPUField = document.querySelector('#use_cpu')
|
let useCPUField = document.querySelector('#use_cpu')
|
||||||
let autoPickGPUsField = document.querySelector('#auto_pick_gpus')
|
let autoPickGPUsField = document.querySelector('#auto_pick_gpus')
|
||||||
let useGPUsField = document.querySelector('#use_gpus')
|
let useGPUsField = document.querySelector('#use_gpus')
|
||||||
let useFullPrecisionField = document.querySelector('#use_full_precision')
|
|
||||||
let saveToDiskField = document.querySelector('#save_to_disk')
|
let saveToDiskField = document.querySelector('#save_to_disk')
|
||||||
let diskPathField = document.querySelector('#diskPath')
|
let diskPathField = document.querySelector('#diskPath')
|
||||||
let listenToNetworkField = document.querySelector("#listen_to_network")
|
let listenToNetworkField = document.querySelector("#listen_to_network")
|
||||||
let listenPortField = document.querySelector("#listen_port")
|
let listenPortField = document.querySelector("#listen_port")
|
||||||
let testSD2Field = document.querySelector("#test_sd2")
|
|
||||||
let useBetaChannelField = document.querySelector("#use_beta_channel")
|
let useBetaChannelField = document.querySelector("#use_beta_channel")
|
||||||
let uiOpenBrowserOnStartField = document.querySelector("#ui_open_browser_on_start")
|
let uiOpenBrowserOnStartField = document.querySelector("#ui_open_browser_on_start")
|
||||||
let confirmDangerousActionsField = document.querySelector("#confirm_dangerous_actions")
|
let confirmDangerousActionsField = document.querySelector("#confirm_dangerous_actions")
|
||||||
@ -249,12 +263,6 @@ async function getAppConfig() {
|
|||||||
if (config.ui && config.ui.open_browser_on_start === false) {
|
if (config.ui && config.ui.open_browser_on_start === false) {
|
||||||
uiOpenBrowserOnStartField.checked = false
|
uiOpenBrowserOnStartField.checked = false
|
||||||
}
|
}
|
||||||
if ('test_sd2' in config) {
|
|
||||||
testSD2Field.checked = config['test_sd2']
|
|
||||||
}
|
|
||||||
|
|
||||||
let testSD2SettingEntry = getParameterSettingsEntry('test_sd2')
|
|
||||||
testSD2SettingEntry.style.display = (config.update_branch === 'beta' ? '' : 'none')
|
|
||||||
if (config.net && config.net.listen_to_network === false) {
|
if (config.net && config.net.listen_to_network === false) {
|
||||||
listenToNetworkField.checked = false
|
listenToNetworkField.checked = false
|
||||||
}
|
}
|
||||||
@ -320,20 +328,10 @@ autoPickGPUsField.addEventListener('click', function() {
|
|||||||
gpuSettingEntry.style.display = (this.checked ? 'none' : '')
|
gpuSettingEntry.style.display = (this.checked ? 'none' : '')
|
||||||
})
|
})
|
||||||
|
|
||||||
async function getDiskPath() {
|
async function setDiskPath(defaultDiskPath) {
|
||||||
try {
|
var diskPath = getSetting("diskPath")
|
||||||
var diskPath = getSetting("diskPath")
|
if (diskPath == '' || diskPath == undefined || diskPath == "undefined") {
|
||||||
if (diskPath == '' || diskPath == undefined || diskPath == "undefined") {
|
setSetting("diskPath", defaultDiskPath)
|
||||||
let res = await fetch('/get/output_dir')
|
|
||||||
if (res.status === 200) {
|
|
||||||
res = await res.json()
|
|
||||||
res = res.output_dir
|
|
||||||
|
|
||||||
setSetting("diskPath", res)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} catch (e) {
|
|
||||||
console.log('error fetching output dir path', e)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -368,73 +366,69 @@ function setHostInfo(hosts) {
|
|||||||
|
|
||||||
async function getSystemInfo() {
|
async function getSystemInfo() {
|
||||||
try {
|
try {
|
||||||
let res = await fetch('/get/system_info')
|
const res = await SD.getSystemInfo()
|
||||||
if (res.status === 200) {
|
let devices = res['devices']
|
||||||
res = await res.json()
|
|
||||||
let devices = res['devices']
|
|
||||||
let hosts = res['hosts']
|
|
||||||
|
|
||||||
let allDeviceIds = Object.keys(devices['all']).filter(d => d !== 'cpu')
|
let allDeviceIds = Object.keys(devices['all']).filter(d => d !== 'cpu')
|
||||||
let activeDeviceIds = Object.keys(devices['active']).filter(d => d !== 'cpu')
|
let activeDeviceIds = Object.keys(devices['active']).filter(d => d !== 'cpu')
|
||||||
|
|
||||||
if (activeDeviceIds.length === 0) {
|
if (activeDeviceIds.length === 0) {
|
||||||
useCPUField.checked = true
|
useCPUField.checked = true
|
||||||
}
|
|
||||||
|
|
||||||
if (allDeviceIds.length < MIN_GPUS_TO_SHOW_SELECTION || useCPUField.checked) {
|
|
||||||
let gpuSettingEntry = getParameterSettingsEntry('use_gpus')
|
|
||||||
gpuSettingEntry.style.display = 'none'
|
|
||||||
let autoPickGPUSettingEntry = getParameterSettingsEntry('auto_pick_gpus')
|
|
||||||
autoPickGPUSettingEntry.style.display = 'none'
|
|
||||||
}
|
|
||||||
|
|
||||||
if (allDeviceIds.length === 0) {
|
|
||||||
useCPUField.checked = true
|
|
||||||
useCPUField.disabled = true // no compatible GPUs, so make the CPU mandatory
|
|
||||||
}
|
|
||||||
|
|
||||||
autoPickGPUsField.checked = (devices['config'] === 'auto')
|
|
||||||
|
|
||||||
useGPUsField.innerHTML = ''
|
|
||||||
allDeviceIds.forEach(device => {
|
|
||||||
let deviceName = devices['all'][device]['name']
|
|
||||||
let deviceOption = `<option value="${device}">${deviceName} (${device})</option>`
|
|
||||||
useGPUsField.insertAdjacentHTML('beforeend', deviceOption)
|
|
||||||
})
|
|
||||||
|
|
||||||
if (autoPickGPUsField.checked) {
|
|
||||||
let gpuSettingEntry = getParameterSettingsEntry('use_gpus')
|
|
||||||
gpuSettingEntry.style.display = 'none'
|
|
||||||
} else {
|
|
||||||
$('#use_gpus').val(activeDeviceIds)
|
|
||||||
}
|
|
||||||
|
|
||||||
setDeviceInfo(devices)
|
|
||||||
setHostInfo(hosts)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (allDeviceIds.length < MIN_GPUS_TO_SHOW_SELECTION || useCPUField.checked) {
|
||||||
|
let gpuSettingEntry = getParameterSettingsEntry('use_gpus')
|
||||||
|
gpuSettingEntry.style.display = 'none'
|
||||||
|
let autoPickGPUSettingEntry = getParameterSettingsEntry('auto_pick_gpus')
|
||||||
|
autoPickGPUSettingEntry.style.display = 'none'
|
||||||
|
}
|
||||||
|
|
||||||
|
if (allDeviceIds.length === 0) {
|
||||||
|
useCPUField.checked = true
|
||||||
|
useCPUField.disabled = true // no compatible GPUs, so make the CPU mandatory
|
||||||
|
}
|
||||||
|
|
||||||
|
autoPickGPUsField.checked = (devices['config'] === 'auto')
|
||||||
|
|
||||||
|
useGPUsField.innerHTML = ''
|
||||||
|
allDeviceIds.forEach(device => {
|
||||||
|
let deviceName = devices['all'][device]['name']
|
||||||
|
let deviceOption = `<option value="${device}">${deviceName} (${device})</option>`
|
||||||
|
useGPUsField.insertAdjacentHTML('beforeend', deviceOption)
|
||||||
|
})
|
||||||
|
|
||||||
|
if (autoPickGPUsField.checked) {
|
||||||
|
let gpuSettingEntry = getParameterSettingsEntry('use_gpus')
|
||||||
|
gpuSettingEntry.style.display = 'none'
|
||||||
|
} else {
|
||||||
|
$('#use_gpus').val(activeDeviceIds)
|
||||||
|
}
|
||||||
|
|
||||||
|
setDeviceInfo(devices)
|
||||||
|
setHostInfo(res['hosts'])
|
||||||
|
setDiskPath(res['default_output_dir'])
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
console.log('error fetching devices', e)
|
console.log('error fetching devices', e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
saveSettingsBtn.addEventListener('click', function() {
|
saveSettingsBtn.addEventListener('click', function() {
|
||||||
let updateBranch = (useBetaChannelField.checked ? 'beta' : 'main')
|
|
||||||
|
|
||||||
if (listenPortField.value == '') {
|
if (listenPortField.value == '') {
|
||||||
alert('The network port field must not be empty.')
|
alert('The network port field must not be empty.')
|
||||||
} else if (listenPortField.value<1 || listenPortField.value>65535) {
|
return
|
||||||
alert('The network port must be a number from 1 to 65535')
|
|
||||||
} else {
|
|
||||||
changeAppConfig({
|
|
||||||
'render_devices': getCurrentRenderDeviceSelection(),
|
|
||||||
'update_branch': updateBranch,
|
|
||||||
'ui_open_browser_on_start': uiOpenBrowserOnStartField.checked,
|
|
||||||
'listen_to_network': listenToNetworkField.checked,
|
|
||||||
'listen_port': listenPortField.value,
|
|
||||||
'test_sd2': testSD2Field.checked
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
if (listenPortField.value < 1 || listenPortField.value > 65535) {
|
||||||
|
alert('The network port must be a number from 1 to 65535')
|
||||||
|
return
|
||||||
|
}
|
||||||
|
let updateBranch = (useBetaChannelField.checked ? 'beta' : 'main')
|
||||||
|
changeAppConfig({
|
||||||
|
'render_devices': getCurrentRenderDeviceSelection(),
|
||||||
|
'update_branch': updateBranch,
|
||||||
|
'ui_open_browser_on_start': uiOpenBrowserOnStartField.checked,
|
||||||
|
'listen_to_network': listenToNetworkField.checked,
|
||||||
|
'listen_port': listenPortField.value
|
||||||
|
})
|
||||||
saveSettingsBtn.classList.add('active')
|
saveSettingsBtn.classList.add('active')
|
||||||
asyncDelay(300).then(() => saveSettingsBtn.classList.remove('active'))
|
asyncDelay(300).then(() => saveSettingsBtn.classList.remove('active'))
|
||||||
})
|
})
|
||||||
|
@ -25,23 +25,47 @@ const PLUGINS = {
|
|||||||
* })
|
* })
|
||||||
*/
|
*/
|
||||||
IMAGE_INFO_BUTTONS: [],
|
IMAGE_INFO_BUTTONS: [],
|
||||||
MODIFIERS_LOAD: []
|
MODIFIERS_LOAD: [],
|
||||||
|
TASK_CREATE: [],
|
||||||
|
OUTPUTS_FORMATS: new ServiceContainer(
|
||||||
|
function png() { return (reqBody) => new SD.RenderTask(reqBody) }
|
||||||
|
, function jpeg() { return (reqBody) => new SD.RenderTask(reqBody) }
|
||||||
|
),
|
||||||
|
}
|
||||||
|
PLUGINS.OUTPUTS_FORMATS.register = function(...args) {
|
||||||
|
const service = ServiceContainer.prototype.register.apply(this, args)
|
||||||
|
if (typeof outputFormatField !== 'undefined') {
|
||||||
|
const newOption = document.createElement("option")
|
||||||
|
newOption.setAttribute("value", service.name)
|
||||||
|
newOption.innerText = service.name
|
||||||
|
outputFormatField.appendChild(newOption)
|
||||||
|
}
|
||||||
|
return service
|
||||||
|
}
|
||||||
|
|
||||||
|
function loadScript(url) {
|
||||||
|
const script = document.createElement('script')
|
||||||
|
const promiseSrc = new PromiseSource()
|
||||||
|
script.addEventListener('error', () => promiseSrc.reject(new Error(`Script "${url}" couldn't be loaded.`)))
|
||||||
|
script.addEventListener('load', () => promiseSrc.resolve(url))
|
||||||
|
script.src = url + '?t=' + Date.now()
|
||||||
|
|
||||||
|
console.log('loading script', url)
|
||||||
|
document.head.appendChild(script)
|
||||||
|
|
||||||
|
return promiseSrc.promise
|
||||||
}
|
}
|
||||||
|
|
||||||
async function loadUIPlugins() {
|
async function loadUIPlugins() {
|
||||||
try {
|
try {
|
||||||
let res = await fetch('/get/ui_plugins')
|
const res = await fetch('/get/ui_plugins')
|
||||||
if (res.status === 200) {
|
if (!res.ok) {
|
||||||
res = await res.json()
|
console.error(`Error HTTP${res.status} while loading plugins list. - ${res.statusText}`)
|
||||||
res.forEach(pluginPath => {
|
return
|
||||||
let script = document.createElement('script')
|
|
||||||
script.src = pluginPath + '?t=' + Date.now()
|
|
||||||
|
|
||||||
console.log('loading plugin', pluginPath)
|
|
||||||
|
|
||||||
document.head.appendChild(script)
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
const plugins = await res.json()
|
||||||
|
const loadingPromises = plugins.map(loadScript)
|
||||||
|
return await Promise.allSettled(loadingPromises)
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
console.log('error fetching plugin paths', e)
|
console.log('error fetching plugin paths', e)
|
||||||
}
|
}
|
||||||
|
@ -1,32 +1,37 @@
|
|||||||
|
"use strict";
|
||||||
|
|
||||||
// https://gomakethings.com/finding-the-next-and-previous-sibling-elements-that-match-a-selector-with-vanilla-js/
|
// https://gomakethings.com/finding-the-next-and-previous-sibling-elements-that-match-a-selector-with-vanilla-js/
|
||||||
function getNextSibling(elem, selector) {
|
function getNextSibling(elem, selector) {
|
||||||
// Get the next sibling element
|
// Get the next sibling element
|
||||||
var sibling = elem.nextElementSibling
|
let sibling = elem.nextElementSibling
|
||||||
|
|
||||||
// If there's no selector, return the first sibling
|
// If there's no selector, return the first sibling
|
||||||
if (!selector) return sibling
|
if (!selector) {
|
||||||
|
return sibling
|
||||||
|
}
|
||||||
|
|
||||||
// If the sibling matches our selector, use it
|
// If the sibling matches our selector, use it
|
||||||
// If not, jump to the next sibling and continue the loop
|
// If not, jump to the next sibling and continue the loop
|
||||||
while (sibling) {
|
while (sibling) {
|
||||||
if (sibling.matches(selector)) return sibling
|
if (sibling.matches(selector)) {
|
||||||
|
return sibling
|
||||||
|
}
|
||||||
sibling = sibling.nextElementSibling
|
sibling = sibling.nextElementSibling
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/* Panel Stuff */
|
/* Panel Stuff */
|
||||||
|
|
||||||
// true = open
|
// true = open
|
||||||
var COLLAPSIBLES_INITIALIZED = false;
|
let COLLAPSIBLES_INITIALIZED = false;
|
||||||
const COLLAPSIBLES_KEY = "collapsibles";
|
const COLLAPSIBLES_KEY = "collapsibles";
|
||||||
const COLLAPSIBLE_PANELS = []; // filled in by createCollapsibles with all the elements matching .collapsible
|
const COLLAPSIBLE_PANELS = []; // filled in by createCollapsibles with all the elements matching .collapsible
|
||||||
|
|
||||||
// on-init call this for any panels that are marked open
|
// on-init call this for any panels that are marked open
|
||||||
function toggleCollapsible(element) {
|
function toggleCollapsible(element) {
|
||||||
var collapsibleHeader = element.querySelector(".collapsible");
|
const collapsibleHeader = element.querySelector(".collapsible");
|
||||||
var handle = element.querySelector(".collapsible-handle");
|
const handle = element.querySelector(".collapsible-handle");
|
||||||
collapsibleHeader.classList.toggle("active")
|
collapsibleHeader.classList.toggle("active")
|
||||||
let content = getNextSibling(collapsibleHeader, '.collapsible-content')
|
let content = getNextSibling(collapsibleHeader, '.collapsible-content')
|
||||||
if (!collapsibleHeader.classList.contains("active")) {
|
if (!collapsibleHeader.classList.contains("active")) {
|
||||||
@ -40,6 +45,7 @@ function toggleCollapsible(element) {
|
|||||||
handle.innerHTML = '➖' // minus
|
handle.innerHTML = '➖' // minus
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
document.dispatchEvent(new CustomEvent('collapsibleClick', { detail: collapsibleHeader }))
|
||||||
|
|
||||||
if (COLLAPSIBLES_INITIALIZED && COLLAPSIBLE_PANELS.includes(element)) {
|
if (COLLAPSIBLES_INITIALIZED && COLLAPSIBLE_PANELS.includes(element)) {
|
||||||
saveCollapsibles()
|
saveCollapsibles()
|
||||||
@ -47,16 +53,16 @@ function toggleCollapsible(element) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function saveCollapsibles() {
|
function saveCollapsibles() {
|
||||||
var values = {}
|
let values = {}
|
||||||
COLLAPSIBLE_PANELS.forEach(element => {
|
COLLAPSIBLE_PANELS.forEach(element => {
|
||||||
var value = element.querySelector(".collapsible").className.indexOf("active") !== -1
|
let value = element.querySelector(".collapsible").className.indexOf("active") !== -1
|
||||||
values[element.id] = value
|
values[element.id] = value
|
||||||
})
|
})
|
||||||
localStorage.setItem(COLLAPSIBLES_KEY, JSON.stringify(values))
|
localStorage.setItem(COLLAPSIBLES_KEY, JSON.stringify(values))
|
||||||
}
|
}
|
||||||
|
|
||||||
function createCollapsibles(node) {
|
function createCollapsibles(node) {
|
||||||
var save = false
|
let save = false
|
||||||
if (!node) {
|
if (!node) {
|
||||||
node = document
|
node = document
|
||||||
save = true
|
save = true
|
||||||
@ -81,7 +87,7 @@ function createCollapsibles(node) {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
if (save) {
|
if (save) {
|
||||||
var saved = localStorage.getItem(COLLAPSIBLES_KEY)
|
let saved = localStorage.getItem(COLLAPSIBLES_KEY)
|
||||||
if (!saved) {
|
if (!saved) {
|
||||||
saved = tryLoadOldCollapsibles();
|
saved = tryLoadOldCollapsibles();
|
||||||
}
|
}
|
||||||
@ -89,9 +95,9 @@ function createCollapsibles(node) {
|
|||||||
saveCollapsibles()
|
saveCollapsibles()
|
||||||
saved = localStorage.getItem(COLLAPSIBLES_KEY)
|
saved = localStorage.getItem(COLLAPSIBLES_KEY)
|
||||||
}
|
}
|
||||||
var values = JSON.parse(saved)
|
let values = JSON.parse(saved)
|
||||||
COLLAPSIBLE_PANELS.forEach(element => {
|
COLLAPSIBLE_PANELS.forEach(element => {
|
||||||
var value = element.querySelector(".collapsible").className.indexOf("active") !== -1
|
let value = element.querySelector(".collapsible").className.indexOf("active") !== -1
|
||||||
if (values[element.id] != value) {
|
if (values[element.id] != value) {
|
||||||
toggleCollapsible(element)
|
toggleCollapsible(element)
|
||||||
}
|
}
|
||||||
@ -101,17 +107,17 @@ function createCollapsibles(node) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function tryLoadOldCollapsibles() {
|
function tryLoadOldCollapsibles() {
|
||||||
var old_map = {
|
const old_map = {
|
||||||
"advancedPanelOpen": "editor-settings",
|
"advancedPanelOpen": "editor-settings",
|
||||||
"modifiersPanelOpen": "editor-modifiers",
|
"modifiersPanelOpen": "editor-modifiers",
|
||||||
"negativePromptPanelOpen": "editor-inputs-prompt"
|
"negativePromptPanelOpen": "editor-inputs-prompt"
|
||||||
};
|
};
|
||||||
if (localStorage.getItem(Object.keys(old_map)[0])) {
|
if (localStorage.getItem(Object.keys(old_map)[0])) {
|
||||||
var result = {};
|
let result = {};
|
||||||
Object.keys(old_map).forEach(key => {
|
Object.keys(old_map).forEach(key => {
|
||||||
var value = localStorage.getItem(key);
|
const value = localStorage.getItem(key);
|
||||||
if (value !== null) {
|
if (value !== null) {
|
||||||
result[old_map[key]] = value == true || value == "true"
|
result[old_map[key]] = (value == true || value == "true")
|
||||||
localStorage.removeItem(key)
|
localStorage.removeItem(key)
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
@ -150,17 +156,17 @@ function millisecondsToStr(milliseconds) {
|
|||||||
return (number > 1) ? 's' : ''
|
return (number > 1) ? 's' : ''
|
||||||
}
|
}
|
||||||
|
|
||||||
var temp = Math.floor(milliseconds / 1000)
|
let temp = Math.floor(milliseconds / 1000)
|
||||||
var hours = Math.floor((temp %= 86400) / 3600)
|
let hours = Math.floor((temp %= 86400) / 3600)
|
||||||
var s = ''
|
let s = ''
|
||||||
if (hours) {
|
if (hours) {
|
||||||
s += hours + ' hour' + numberEnding(hours) + ' '
|
s += hours + ' hour' + numberEnding(hours) + ' '
|
||||||
}
|
}
|
||||||
var minutes = Math.floor((temp %= 3600) / 60)
|
let minutes = Math.floor((temp %= 3600) / 60)
|
||||||
if (minutes) {
|
if (minutes) {
|
||||||
s += minutes + ' minute' + numberEnding(minutes) + ' '
|
s += minutes + ' minute' + numberEnding(minutes) + ' '
|
||||||
}
|
}
|
||||||
var seconds = temp % 60
|
let seconds = temp % 60
|
||||||
if (!hours && minutes < 4 && seconds) {
|
if (!hours && minutes < 4 && seconds) {
|
||||||
s += seconds + ' second' + numberEnding(seconds)
|
s += seconds + ' second' + numberEnding(seconds)
|
||||||
}
|
}
|
||||||
@ -178,7 +184,7 @@ function BraceExpander() {
|
|||||||
function bracePair(tkns, iPosn, iNest, lstCommas) {
|
function bracePair(tkns, iPosn, iNest, lstCommas) {
|
||||||
if (iPosn >= tkns.length || iPosn < 0) return null;
|
if (iPosn >= tkns.length || iPosn < 0) return null;
|
||||||
|
|
||||||
var t = tkns[iPosn],
|
let t = tkns[iPosn],
|
||||||
n = (t === '{') ? (
|
n = (t === '{') ? (
|
||||||
iNest + 1
|
iNest + 1
|
||||||
) : (t === '}' ? (
|
) : (t === '}' ? (
|
||||||
@ -198,7 +204,7 @@ function BraceExpander() {
|
|||||||
function andTree(dctSofar, tkns) {
|
function andTree(dctSofar, tkns) {
|
||||||
if (!tkns.length) return [dctSofar, []];
|
if (!tkns.length) return [dctSofar, []];
|
||||||
|
|
||||||
var dctParse = dctSofar ? dctSofar : {
|
let dctParse = dctSofar ? dctSofar : {
|
||||||
fn: and,
|
fn: and,
|
||||||
args: []
|
args: []
|
||||||
},
|
},
|
||||||
@ -231,14 +237,14 @@ function BraceExpander() {
|
|||||||
// Parse of a PARADIGM subtree
|
// Parse of a PARADIGM subtree
|
||||||
function orTree(dctSofar, tkns, lstCommas) {
|
function orTree(dctSofar, tkns, lstCommas) {
|
||||||
if (!tkns.length) return [dctSofar, []];
|
if (!tkns.length) return [dctSofar, []];
|
||||||
var iLast = lstCommas.length;
|
let iLast = lstCommas.length;
|
||||||
|
|
||||||
return {
|
return {
|
||||||
fn: or,
|
fn: or,
|
||||||
args: splitsAt(
|
args: splitsAt(
|
||||||
lstCommas, tkns
|
lstCommas, tkns
|
||||||
).map(function (x, i) {
|
).map(function (x, i) {
|
||||||
var ts = x.slice(
|
let ts = x.slice(
|
||||||
1, i === iLast ? (
|
1, i === iLast ? (
|
||||||
-1
|
-1
|
||||||
) : void 0
|
) : void 0
|
||||||
@ -256,7 +262,7 @@ function BraceExpander() {
|
|||||||
// List of unescaped braces and commas, and remaining strings
|
// List of unescaped braces and commas, and remaining strings
|
||||||
function tokens(str) {
|
function tokens(str) {
|
||||||
// Filter function excludes empty splitting artefacts
|
// Filter function excludes empty splitting artefacts
|
||||||
var toS = function (x) {
|
let toS = function (x) {
|
||||||
return x.toString();
|
return x.toString();
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -270,7 +276,7 @@ function BraceExpander() {
|
|||||||
// PARSE TREE OPERATOR (1 of 2)
|
// PARSE TREE OPERATOR (1 of 2)
|
||||||
// Each possible head * each possible tail
|
// Each possible head * each possible tail
|
||||||
function and(args) {
|
function and(args) {
|
||||||
var lng = args.length,
|
let lng = args.length,
|
||||||
head = lng ? args[0] : null,
|
head = lng ? args[0] : null,
|
||||||
lstHead = "string" === typeof head ? (
|
lstHead = "string" === typeof head ? (
|
||||||
[head]
|
[head]
|
||||||
@ -330,7 +336,7 @@ function BraceExpander() {
|
|||||||
// s -> [s]
|
// s -> [s]
|
||||||
this.expand = function(s) {
|
this.expand = function(s) {
|
||||||
// BRACE EXPRESSION PARSED
|
// BRACE EXPRESSION PARSED
|
||||||
var dctParse = andTree(null, tokens(s))[0];
|
let dctParse = andTree(null, tokens(s))[0];
|
||||||
|
|
||||||
// ABSTRACT SYNTAX TREE LOGGED
|
// ABSTRACT SYNTAX TREE LOGGED
|
||||||
// console.log(pp(dctParse));
|
// console.log(pp(dctParse));
|
||||||
@ -341,21 +347,75 @@ function BraceExpander() {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/** Pause the execution of an async function until timer elapse.
|
||||||
|
* @Returns a promise that will resolve after the specified timeout.
|
||||||
|
*/
|
||||||
function asyncDelay(timeout) {
|
function asyncDelay(timeout) {
|
||||||
return new Promise(function(resolve, reject) {
|
return new Promise(function(resolve, reject) {
|
||||||
setTimeout(resolve, timeout, true)
|
setTimeout(resolve, timeout, true)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Simple debounce function, placeholder for the one in engine.js for simple use cases */
|
function PromiseSource() {
|
||||||
function debounce(func, timeout = 300){
|
const srcPromise = new Promise((resolve, reject) => {
|
||||||
let timer;
|
Object.defineProperties(this, {
|
||||||
return (...args) => {
|
resolve: { value: resolve, writable: false }
|
||||||
clearTimeout(timer);
|
, reject: { value: reject, writable: false }
|
||||||
timer = setTimeout(() => { func.apply(this, args); }, timeout);
|
})
|
||||||
};
|
})
|
||||||
|
Object.defineProperties(this, {
|
||||||
|
promise: {value: makeQuerablePromise(srcPromise), writable: false}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** A debounce is a higher-order function, which is a function that returns another function
|
||||||
|
* that, as long as it continues to be invoked, will not be triggered.
|
||||||
|
* The function will be called after it stops being called for N milliseconds.
|
||||||
|
* If `immediate` is passed, trigger the function on the leading edge, instead of the trailing.
|
||||||
|
* @Returns a promise that will resolve to func return value.
|
||||||
|
*/
|
||||||
|
function debounce (func, wait, immediate) {
|
||||||
|
if (typeof wait === "undefined") {
|
||||||
|
wait = 40
|
||||||
|
}
|
||||||
|
if (typeof wait !== "number") {
|
||||||
|
throw new Error("wait is not an number.")
|
||||||
|
}
|
||||||
|
let timeout = null
|
||||||
|
let lastPromiseSrc = new PromiseSource()
|
||||||
|
const applyFn = function(context, args) {
|
||||||
|
let result = undefined
|
||||||
|
try {
|
||||||
|
result = func.apply(context, args)
|
||||||
|
} catch (err) {
|
||||||
|
lastPromiseSrc.reject(err)
|
||||||
|
}
|
||||||
|
if (result instanceof Promise) {
|
||||||
|
result.then(lastPromiseSrc.resolve, lastPromiseSrc.reject)
|
||||||
|
} else {
|
||||||
|
lastPromiseSrc.resolve(result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return function(...args) {
|
||||||
|
const callNow = Boolean(immediate && !timeout)
|
||||||
|
const context = this;
|
||||||
|
if (timeout) {
|
||||||
|
clearTimeout(timeout)
|
||||||
|
}
|
||||||
|
timeout = setTimeout(function () {
|
||||||
|
if (!immediate) {
|
||||||
|
applyFn(context, args)
|
||||||
|
}
|
||||||
|
lastPromiseSrc = new PromiseSource()
|
||||||
|
timeout = null
|
||||||
|
}, wait)
|
||||||
|
if (callNow) {
|
||||||
|
applyFn(context, args)
|
||||||
|
}
|
||||||
|
return lastPromiseSrc.promise
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
function preventNonNumericalInput(e) {
|
function preventNonNumericalInput(e) {
|
||||||
e = e || window.event;
|
e = e || window.event;
|
||||||
@ -369,6 +429,83 @@ function preventNonNumericalInput(e) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Returns the global object for the current execution environement.
|
||||||
|
* @Returns window in a browser, global in node and self in a ServiceWorker.
|
||||||
|
* @Notes Allows unit testing and use of the engine outside of a browser.
|
||||||
|
*/
|
||||||
|
function getGlobal() {
|
||||||
|
if (typeof globalThis === 'object') {
|
||||||
|
return globalThis
|
||||||
|
} else if (typeof global === 'object') {
|
||||||
|
return global
|
||||||
|
} else if (typeof self === 'object') {
|
||||||
|
return self
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
return Function('return this')()
|
||||||
|
} catch {
|
||||||
|
// If the Function constructor fails, we're in a browser with eval disabled by CSP headers.
|
||||||
|
return window
|
||||||
|
} // Returns undefined if global can't be found.
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Check if x is an Array or a TypedArray.
|
||||||
|
* @Returns true if x is an Array or a TypedArray, false otherwise.
|
||||||
|
*/
|
||||||
|
function isArrayOrTypedArray(x) {
|
||||||
|
return Boolean(typeof x === 'object' && (Array.isArray(x) || (ArrayBuffer.isView(x) && !(x instanceof DataView))))
|
||||||
|
}
|
||||||
|
|
||||||
|
function makeQuerablePromise(promise) {
|
||||||
|
if (typeof promise !== 'object') {
|
||||||
|
throw new Error('promise is not an object.')
|
||||||
|
}
|
||||||
|
if (!(promise instanceof Promise)) {
|
||||||
|
throw new Error('Argument is not a promise.')
|
||||||
|
}
|
||||||
|
// Don't modify a promise that's been already modified.
|
||||||
|
if ('isResolved' in promise || 'isRejected' in promise || 'isPending' in promise) {
|
||||||
|
return promise
|
||||||
|
}
|
||||||
|
let isPending = true
|
||||||
|
let isRejected = false
|
||||||
|
let rejectReason = undefined
|
||||||
|
let isResolved = false
|
||||||
|
let resolvedValue = undefined
|
||||||
|
const qurPro = promise.then(
|
||||||
|
function(val){
|
||||||
|
isResolved = true
|
||||||
|
isPending = false
|
||||||
|
resolvedValue = val
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
, function(reason) {
|
||||||
|
rejectReason = reason
|
||||||
|
isRejected = true
|
||||||
|
isPending = false
|
||||||
|
throw reason
|
||||||
|
}
|
||||||
|
)
|
||||||
|
Object.defineProperties(qurPro, {
|
||||||
|
'isResolved': {
|
||||||
|
get: () => isResolved
|
||||||
|
}
|
||||||
|
, 'resolvedValue': {
|
||||||
|
get: () => resolvedValue
|
||||||
|
}
|
||||||
|
, 'isPending': {
|
||||||
|
get: () => isPending
|
||||||
|
}
|
||||||
|
, 'isRejected': {
|
||||||
|
get: () => isRejected
|
||||||
|
}
|
||||||
|
, 'rejectReason': {
|
||||||
|
get: () => rejectReason
|
||||||
|
}
|
||||||
|
})
|
||||||
|
return qurPro
|
||||||
|
}
|
||||||
|
|
||||||
/* inserts custom html to allow prettifying of inputs */
|
/* inserts custom html to allow prettifying of inputs */
|
||||||
function prettifyInputs(root_element) {
|
function prettifyInputs(root_element) {
|
||||||
root_element.querySelectorAll(`input[type="checkbox"]`).forEach(element => {
|
root_element.querySelectorAll(`input[type="checkbox"]`).forEach(element => {
|
||||||
@ -384,3 +521,156 @@ function prettifyInputs(root_element) {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
class GenericEventSource {
|
||||||
|
#events = {};
|
||||||
|
#types = []
|
||||||
|
constructor(...eventsTypes) {
|
||||||
|
if (Array.isArray(eventsTypes) && eventsTypes.length === 1 && Array.isArray(eventsTypes[0])) {
|
||||||
|
eventsTypes = eventsTypes[0]
|
||||||
|
}
|
||||||
|
this.#types.push(...eventsTypes)
|
||||||
|
}
|
||||||
|
get eventTypes() {
|
||||||
|
return this.#types
|
||||||
|
}
|
||||||
|
/** Add a new event listener
|
||||||
|
*/
|
||||||
|
addEventListener(name, handler) {
|
||||||
|
if (!this.#types.includes(name)) {
|
||||||
|
throw new Error('Invalid event name.')
|
||||||
|
}
|
||||||
|
if (this.#events.hasOwnProperty(name)) {
|
||||||
|
this.#events[name].push(handler)
|
||||||
|
} else {
|
||||||
|
this.#events[name] = [handler]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/** Remove the event listener
|
||||||
|
*/
|
||||||
|
removeEventListener(name, handler) {
|
||||||
|
if (!this.#events.hasOwnProperty(name)) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
const index = this.#events[name].indexOf(handler)
|
||||||
|
if (index != -1) {
|
||||||
|
this.#events[name].splice(index, 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fireEvent(name, ...args) {
|
||||||
|
if (!this.#types.includes(name)) {
|
||||||
|
throw new Error(`Event ${String(name)} missing from Events.types`)
|
||||||
|
}
|
||||||
|
if (!this.#events.hasOwnProperty(name)) {
|
||||||
|
return Promise.resolve()
|
||||||
|
}
|
||||||
|
if (!args || !args.length) {
|
||||||
|
args = []
|
||||||
|
}
|
||||||
|
const evs = this.#events[name]
|
||||||
|
if (evs.length <= 0) {
|
||||||
|
return Promise.resolve()
|
||||||
|
}
|
||||||
|
return Promise.allSettled(evs.map((callback) => {
|
||||||
|
try {
|
||||||
|
return Promise.resolve(callback.apply(SD, args))
|
||||||
|
} catch (ex) {
|
||||||
|
return Promise.reject(ex)
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
class ServiceContainer {
|
||||||
|
#services = new Map()
|
||||||
|
#singletons = new Map()
|
||||||
|
constructor(...servicesParams) {
|
||||||
|
servicesParams.forEach(this.register.bind(this))
|
||||||
|
}
|
||||||
|
get services () {
|
||||||
|
return this.#services
|
||||||
|
}
|
||||||
|
get singletons() {
|
||||||
|
return this.#singletons
|
||||||
|
}
|
||||||
|
register(params) {
|
||||||
|
if (ServiceContainer.isConstructor(params)) {
|
||||||
|
if (typeof params.name !== 'string') {
|
||||||
|
throw new Error('params.name is not a string.')
|
||||||
|
}
|
||||||
|
params = {name:params.name, definition:params}
|
||||||
|
}
|
||||||
|
if (typeof params !== 'object') {
|
||||||
|
throw new Error('params is not an object.')
|
||||||
|
}
|
||||||
|
[ 'name',
|
||||||
|
'definition',
|
||||||
|
].forEach((key) => {
|
||||||
|
if (!(key in params)) {
|
||||||
|
console.error('Invalid service %o registration.', params)
|
||||||
|
throw new Error(`params.${key} is not defined.`)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
const opts = {definition: params.definition}
|
||||||
|
if ('dependencies' in params) {
|
||||||
|
if (Array.isArray(params.dependencies)) {
|
||||||
|
params.dependencies.forEach((dep) => {
|
||||||
|
if (typeof dep !== 'string') {
|
||||||
|
throw new Error('dependency name is not a string.')
|
||||||
|
}
|
||||||
|
})
|
||||||
|
opts.dependencies = params.dependencies
|
||||||
|
} else {
|
||||||
|
throw new Error('params.dependencies is not an array.')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (params.singleton) {
|
||||||
|
opts.singleton = true
|
||||||
|
}
|
||||||
|
this.#services.set(params.name, opts)
|
||||||
|
return Object.assign({name: params.name}, opts)
|
||||||
|
}
|
||||||
|
get(name) {
|
||||||
|
const ctorInfos = this.#services.get(name)
|
||||||
|
if (!ctorInfos) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if(!ServiceContainer.isConstructor(ctorInfos.definition)) {
|
||||||
|
return ctorInfos.definition
|
||||||
|
}
|
||||||
|
if(!ctorInfos.singleton) {
|
||||||
|
return this._createInstance(ctorInfos)
|
||||||
|
}
|
||||||
|
const singletonInstance = this.#singletons.get(name)
|
||||||
|
if(singletonInstance) {
|
||||||
|
return singletonInstance
|
||||||
|
}
|
||||||
|
const newSingletonInstance = this._createInstance(ctorInfos)
|
||||||
|
this.#singletons.set(name, newSingletonInstance)
|
||||||
|
return newSingletonInstance
|
||||||
|
}
|
||||||
|
|
||||||
|
_getResolvedDependencies(service) {
|
||||||
|
let classDependencies = []
|
||||||
|
if(service.dependencies) {
|
||||||
|
classDependencies = service.dependencies.map(this.get.bind(this))
|
||||||
|
}
|
||||||
|
return classDependencies
|
||||||
|
}
|
||||||
|
|
||||||
|
_createInstance(service) {
|
||||||
|
if (!ServiceContainer.isClass(service.definition)) {
|
||||||
|
// Call as normal function.
|
||||||
|
return service.definition(...this._getResolvedDependencies(service))
|
||||||
|
}
|
||||||
|
// Use new
|
||||||
|
return new service.definition(...this._getResolvedDependencies(service))
|
||||||
|
}
|
||||||
|
|
||||||
|
static isClass(definition) {
|
||||||
|
return typeof definition === 'function' && Boolean(definition.prototype) && definition.prototype.constructor === definition
|
||||||
|
}
|
||||||
|
static isConstructor(definition) {
|
||||||
|
return typeof definition === 'function'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -16,18 +16,12 @@
|
|||||||
clearAllPreviewsBtn.parentNode.insertBefore(autoScrollControl, clearAllPreviewsBtn.nextSibling)
|
clearAllPreviewsBtn.parentNode.insertBefore(autoScrollControl, clearAllPreviewsBtn.nextSibling)
|
||||||
prettifyInputs(document);
|
prettifyInputs(document);
|
||||||
let autoScroll = document.querySelector("#auto_scroll")
|
let autoScroll = document.querySelector("#auto_scroll")
|
||||||
|
|
||||||
/**
|
// save/restore the toggle state
|
||||||
* the use of initSettings() in the autoscroll plugin seems to be breaking the models dropdown and the save-to-disk folder field
|
autoScroll.addEventListener('click', (e) => {
|
||||||
* in the settings tab. They're both blank, because they're being re-initialized. Their earlier values came from the API call,
|
localStorage.setItem('auto_scroll', autoScroll.checked)
|
||||||
* but those values aren't stored in localStorage, since they aren't user-specified.
|
})
|
||||||
* So when initSettings() is called a second time, it overwrites the values with an empty string.
|
autoScroll.checked = localStorage.getItem('auto_scroll') == "true"
|
||||||
*
|
|
||||||
* We could either rework how new components can register themselves to be auto-saved, without having to call initSettings() again.
|
|
||||||
* Or we could move the autoscroll code into the main code, and include it in the list of fields in auto-save.js
|
|
||||||
*/
|
|
||||||
// SETTINGS_IDS_LIST.push("auto_scroll")
|
|
||||||
// initSettings()
|
|
||||||
|
|
||||||
// observe for changes in the preview pane
|
// observe for changes in the preview pane
|
||||||
var observer = new MutationObserver(function (mutations) {
|
var observer = new MutationObserver(function (mutations) {
|
||||||
|
@ -1,7 +1,10 @@
|
|||||||
(function () {
|
(function () { "use strict"
|
||||||
"use strict"
|
if (typeof editorModifierTagsList !== 'object') {
|
||||||
|
console.error('editorModifierTagsList missing...')
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
var styleSheet = document.createElement("style");
|
const styleSheet = document.createElement("style");
|
||||||
styleSheet.textContent = `
|
styleSheet.textContent = `
|
||||||
.modifier-card-tiny.drag-sort-active {
|
.modifier-card-tiny.drag-sort-active {
|
||||||
background: transparent;
|
background: transparent;
|
||||||
@ -12,7 +15,7 @@
|
|||||||
document.head.appendChild(styleSheet);
|
document.head.appendChild(styleSheet);
|
||||||
|
|
||||||
// observe for changes in tag list
|
// observe for changes in tag list
|
||||||
var observer = new MutationObserver(function (mutations) {
|
const observer = new MutationObserver(function (mutations) {
|
||||||
// mutations.forEach(function (mutation) {
|
// mutations.forEach(function (mutation) {
|
||||||
if (editorModifierTagsList.childNodes.length > 0) {
|
if (editorModifierTagsList.childNodes.length > 0) {
|
||||||
ModifierDragAndDrop(editorModifierTagsList)
|
ModifierDragAndDrop(editorModifierTagsList)
|
||||||
|
@ -1,8 +1,11 @@
|
|||||||
(function () {
|
(function () { "use strict"
|
||||||
"use strict"
|
if (typeof editorModifierTagsList !== 'object') {
|
||||||
|
console.error('editorModifierTagsList missing...')
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// observe for changes in tag list
|
// observe for changes in tag list
|
||||||
var observer = new MutationObserver(function (mutations) {
|
const observer = new MutationObserver(function (mutations) {
|
||||||
// mutations.forEach(function (mutation) {
|
// mutations.forEach(function (mutation) {
|
||||||
if (editorModifierTagsList.childNodes.length > 0) {
|
if (editorModifierTagsList.childNodes.length > 0) {
|
||||||
ModifierMouseWheel(editorModifierTagsList)
|
ModifierMouseWheel(editorModifierTagsList)
|
||||||
|
29
ui/plugins/ui/SpecRunner.html
Normal file
29
ui/plugins/ui/SpecRunner.html
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<meta charset="utf-8">
|
||||||
|
<title>Jasmine Spec Runner v4.5.0</title>
|
||||||
|
|
||||||
|
<link rel="shortcut icon" type="image/png" href="./jasmine/jasmine_favicon.png">
|
||||||
|
<link rel="stylesheet" href="./jasmine/jasmine.css">
|
||||||
|
|
||||||
|
<script src="./jasmine/jasmine.js"></script>
|
||||||
|
<script src="./jasmine/jasmine-html.js"></script>
|
||||||
|
<script src="./jasmine/boot0.js"></script>
|
||||||
|
<!-- optional: include a file here that configures the Jasmine env -->
|
||||||
|
<script src="./jasmine/boot1.js"></script>
|
||||||
|
|
||||||
|
<!-- include source files here... -->
|
||||||
|
<script src="/media/js/utils.js?v=4"></script>
|
||||||
|
<script src="/media/js/engine.js?v=1"></script>
|
||||||
|
<!-- <script src="./engine.js?v=1"></script> -->
|
||||||
|
<script src="/media/js/plugins.js?v=1"></script>
|
||||||
|
|
||||||
|
<!-- include spec files here... -->
|
||||||
|
<script src="./jasmineSpec.js"></script>
|
||||||
|
|
||||||
|
</head>
|
||||||
|
|
||||||
|
<body>
|
||||||
|
</body>
|
||||||
|
</html>
|
64
ui/plugins/ui/jasmine/boot0.js
Normal file
64
ui/plugins/ui/jasmine/boot0.js
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
/*
|
||||||
|
Copyright (c) 2008-2022 Pivotal Labs
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining
|
||||||
|
a copy of this software and associated documentation files (the
|
||||||
|
"Software"), to deal in the Software without restriction, including
|
||||||
|
without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
distribute, sublicense, and/or sell copies of the Software, and to
|
||||||
|
permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be
|
||||||
|
included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
|
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||||
|
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||||
|
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||||
|
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
This file starts the process of "booting" Jasmine. It initializes Jasmine,
|
||||||
|
makes its globals available, and creates the env. This file should be loaded
|
||||||
|
after `jasmine.js` and `jasmine_html.js`, but before `boot1.js` or any project
|
||||||
|
source files or spec files are loaded.
|
||||||
|
*/
|
||||||
|
(function() {
|
||||||
|
const jasmineRequire = window.jasmineRequire || require('./jasmine.js');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ## Require & Instantiate
|
||||||
|
*
|
||||||
|
* Require Jasmine's core files. Specifically, this requires and attaches all of Jasmine's code to the `jasmine` reference.
|
||||||
|
*/
|
||||||
|
const jasmine = jasmineRequire.core(jasmineRequire),
|
||||||
|
global = jasmine.getGlobal();
|
||||||
|
global.jasmine = jasmine;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Since this is being run in a browser and the results should populate to an HTML page, require the HTML-specific Jasmine code, injecting the same reference.
|
||||||
|
*/
|
||||||
|
jasmineRequire.html(jasmine);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create the Jasmine environment. This is used to run all specs in a project.
|
||||||
|
*/
|
||||||
|
const env = jasmine.getEnv();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ## The Global Interface
|
||||||
|
*
|
||||||
|
* Build up the functions that will be exposed as the Jasmine public interface. A project can customize, rename or alias any of these functions as desired, provided the implementation remains unchanged.
|
||||||
|
*/
|
||||||
|
const jasmineInterface = jasmineRequire.interface(jasmine, env);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add all of the Jasmine global/public interface to the global scope, so a project can use the public interface directly. For example, calling `describe` in specs instead of `jasmine.getEnv().describe`.
|
||||||
|
*/
|
||||||
|
for (const property in jasmineInterface) {
|
||||||
|
global[property] = jasmineInterface[property];
|
||||||
|
}
|
||||||
|
})();
|
132
ui/plugins/ui/jasmine/boot1.js
Normal file
132
ui/plugins/ui/jasmine/boot1.js
Normal file
@ -0,0 +1,132 @@
|
|||||||
|
/*
|
||||||
|
Copyright (c) 2008-2022 Pivotal Labs
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining
|
||||||
|
a copy of this software and associated documentation files (the
|
||||||
|
"Software"), to deal in the Software without restriction, including
|
||||||
|
without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
distribute, sublicense, and/or sell copies of the Software, and to
|
||||||
|
permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be
|
||||||
|
included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
|
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||||
|
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||||
|
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||||
|
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
This file finishes 'booting' Jasmine, performing all of the necessary
|
||||||
|
initialization before executing the loaded environment and all of a project's
|
||||||
|
specs. This file should be loaded after `boot0.js` but before any project
|
||||||
|
source files or spec files are loaded. Thus this file can also be used to
|
||||||
|
customize Jasmine for a project.
|
||||||
|
|
||||||
|
If a project is using Jasmine via the standalone distribution, this file can
|
||||||
|
be customized directly. If you only wish to configure the Jasmine env, you
|
||||||
|
can load another file that calls `jasmine.getEnv().configure({...})`
|
||||||
|
after `boot0.js` is loaded and before this file is loaded.
|
||||||
|
*/
|
||||||
|
|
||||||
|
(function() {
|
||||||
|
const env = jasmine.getEnv();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ## Runner Parameters
|
||||||
|
*
|
||||||
|
* More browser specific code - wrap the query string in an object and to allow for getting/setting parameters from the runner user interface.
|
||||||
|
*/
|
||||||
|
|
||||||
|
const queryString = new jasmine.QueryString({
|
||||||
|
getWindowLocation: function() {
|
||||||
|
return window.location;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
const filterSpecs = !!queryString.getParam('spec');
|
||||||
|
|
||||||
|
const config = {
|
||||||
|
stopOnSpecFailure: queryString.getParam('stopOnSpecFailure'),
|
||||||
|
stopSpecOnExpectationFailure: queryString.getParam(
|
||||||
|
'stopSpecOnExpectationFailure'
|
||||||
|
),
|
||||||
|
hideDisabled: queryString.getParam('hideDisabled')
|
||||||
|
};
|
||||||
|
|
||||||
|
const random = queryString.getParam('random');
|
||||||
|
|
||||||
|
if (random !== undefined && random !== '') {
|
||||||
|
config.random = random;
|
||||||
|
}
|
||||||
|
|
||||||
|
const seed = queryString.getParam('seed');
|
||||||
|
if (seed) {
|
||||||
|
config.seed = seed;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ## Reporters
|
||||||
|
* The `HtmlReporter` builds all of the HTML UI for the runner page. This reporter paints the dots, stars, and x's for specs, as well as all spec names and all failures (if any).
|
||||||
|
*/
|
||||||
|
const htmlReporter = new jasmine.HtmlReporter({
|
||||||
|
env: env,
|
||||||
|
navigateWithNewParam: function(key, value) {
|
||||||
|
return queryString.navigateWithNewParam(key, value);
|
||||||
|
},
|
||||||
|
addToExistingQueryString: function(key, value) {
|
||||||
|
return queryString.fullStringWithNewParam(key, value);
|
||||||
|
},
|
||||||
|
getContainer: function() {
|
||||||
|
return document.body;
|
||||||
|
},
|
||||||
|
createElement: function() {
|
||||||
|
return document.createElement.apply(document, arguments);
|
||||||
|
},
|
||||||
|
createTextNode: function() {
|
||||||
|
return document.createTextNode.apply(document, arguments);
|
||||||
|
},
|
||||||
|
timer: new jasmine.Timer(),
|
||||||
|
filterSpecs: filterSpecs
|
||||||
|
});
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The `jsApiReporter` also receives spec results, and is used by any environment that needs to extract the results from JavaScript.
|
||||||
|
*/
|
||||||
|
env.addReporter(jsApiReporter);
|
||||||
|
env.addReporter(htmlReporter);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Filter which specs will be run by matching the start of the full name against the `spec` query param.
|
||||||
|
*/
|
||||||
|
const specFilter = new jasmine.HtmlSpecFilter({
|
||||||
|
filterString: function() {
|
||||||
|
return queryString.getParam('spec');
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
config.specFilter = function(spec) {
|
||||||
|
return specFilter.matches(spec.getFullName());
|
||||||
|
};
|
||||||
|
|
||||||
|
env.configure(config);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ## Execution
|
||||||
|
*
|
||||||
|
* Replace the browser window's `onload`, ensure it's called, and then run all of the loaded specs. This includes initializing the `HtmlReporter` instance and then executing the loaded Jasmine environment. All of this will happen after all of the specs are loaded.
|
||||||
|
*/
|
||||||
|
const currentWindowOnload = window.onload;
|
||||||
|
|
||||||
|
window.onload = function() {
|
||||||
|
if (currentWindowOnload) {
|
||||||
|
currentWindowOnload();
|
||||||
|
}
|
||||||
|
htmlReporter.initialize();
|
||||||
|
env.execute();
|
||||||
|
};
|
||||||
|
})();
|
964
ui/plugins/ui/jasmine/jasmine-html.js
Normal file
964
ui/plugins/ui/jasmine/jasmine-html.js
Normal file
@ -0,0 +1,964 @@
|
|||||||
|
/*
|
||||||
|
Copyright (c) 2008-2022 Pivotal Labs
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining
|
||||||
|
a copy of this software and associated documentation files (the
|
||||||
|
"Software"), to deal in the Software without restriction, including
|
||||||
|
without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
distribute, sublicense, and/or sell copies of the Software, and to
|
||||||
|
permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be
|
||||||
|
included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
|
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||||
|
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||||
|
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||||
|
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*/
|
||||||
|
// eslint-disable-next-line no-var
|
||||||
|
var jasmineRequire = window.jasmineRequire || require('./jasmine.js');
|
||||||
|
|
||||||
|
jasmineRequire.html = function(j$) {
|
||||||
|
j$.ResultsNode = jasmineRequire.ResultsNode();
|
||||||
|
j$.HtmlReporter = jasmineRequire.HtmlReporter(j$);
|
||||||
|
j$.QueryString = jasmineRequire.QueryString();
|
||||||
|
j$.HtmlSpecFilter = jasmineRequire.HtmlSpecFilter();
|
||||||
|
};
|
||||||
|
|
||||||
|
jasmineRequire.HtmlReporter = function(j$) {
|
||||||
|
function ResultsStateBuilder() {
|
||||||
|
this.topResults = new j$.ResultsNode({}, '', null);
|
||||||
|
this.currentParent = this.topResults;
|
||||||
|
this.specsExecuted = 0;
|
||||||
|
this.failureCount = 0;
|
||||||
|
this.pendingSpecCount = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
ResultsStateBuilder.prototype.suiteStarted = function(result) {
|
||||||
|
this.currentParent.addChild(result, 'suite');
|
||||||
|
this.currentParent = this.currentParent.last();
|
||||||
|
};
|
||||||
|
|
||||||
|
ResultsStateBuilder.prototype.suiteDone = function(result) {
|
||||||
|
this.currentParent.updateResult(result);
|
||||||
|
if (this.currentParent !== this.topResults) {
|
||||||
|
this.currentParent = this.currentParent.parent;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (result.status === 'failed') {
|
||||||
|
this.failureCount++;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
ResultsStateBuilder.prototype.specStarted = function(result) {};
|
||||||
|
|
||||||
|
ResultsStateBuilder.prototype.specDone = function(result) {
|
||||||
|
this.currentParent.addChild(result, 'spec');
|
||||||
|
|
||||||
|
if (result.status !== 'excluded') {
|
||||||
|
this.specsExecuted++;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (result.status === 'failed') {
|
||||||
|
this.failureCount++;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (result.status == 'pending') {
|
||||||
|
this.pendingSpecCount++;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
ResultsStateBuilder.prototype.jasmineDone = function(result) {
|
||||||
|
if (result.failedExpectations) {
|
||||||
|
this.failureCount += result.failedExpectations.length;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
function HtmlReporter(options) {
|
||||||
|
function config() {
|
||||||
|
return (options.env && options.env.configuration()) || {};
|
||||||
|
}
|
||||||
|
|
||||||
|
const getContainer = options.getContainer;
|
||||||
|
const createElement = options.createElement;
|
||||||
|
const createTextNode = options.createTextNode;
|
||||||
|
const navigateWithNewParam = options.navigateWithNewParam || function() {};
|
||||||
|
const addToExistingQueryString =
|
||||||
|
options.addToExistingQueryString || defaultQueryString;
|
||||||
|
const filterSpecs = options.filterSpecs;
|
||||||
|
let htmlReporterMain;
|
||||||
|
let symbols;
|
||||||
|
const deprecationWarnings = [];
|
||||||
|
const failures = [];
|
||||||
|
|
||||||
|
this.initialize = function() {
|
||||||
|
clearPrior();
|
||||||
|
htmlReporterMain = createDom(
|
||||||
|
'div',
|
||||||
|
{ className: 'jasmine_html-reporter' },
|
||||||
|
createDom(
|
||||||
|
'div',
|
||||||
|
{ className: 'jasmine-banner' },
|
||||||
|
createDom('a', {
|
||||||
|
className: 'jasmine-title',
|
||||||
|
href: 'http://jasmine.github.io/',
|
||||||
|
target: '_blank'
|
||||||
|
}),
|
||||||
|
createDom('span', { className: 'jasmine-version' }, j$.version)
|
||||||
|
),
|
||||||
|
createDom('ul', { className: 'jasmine-symbol-summary' }),
|
||||||
|
createDom('div', { className: 'jasmine-alert' }),
|
||||||
|
createDom(
|
||||||
|
'div',
|
||||||
|
{ className: 'jasmine-results' },
|
||||||
|
createDom('div', { className: 'jasmine-failures' })
|
||||||
|
)
|
||||||
|
);
|
||||||
|
getContainer().appendChild(htmlReporterMain);
|
||||||
|
};
|
||||||
|
|
||||||
|
let totalSpecsDefined;
|
||||||
|
this.jasmineStarted = function(options) {
|
||||||
|
totalSpecsDefined = options.totalSpecsDefined || 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
const summary = createDom('div', { className: 'jasmine-summary' });
|
||||||
|
|
||||||
|
const stateBuilder = new ResultsStateBuilder();
|
||||||
|
|
||||||
|
this.suiteStarted = function(result) {
|
||||||
|
stateBuilder.suiteStarted(result);
|
||||||
|
};
|
||||||
|
|
||||||
|
this.suiteDone = function(result) {
|
||||||
|
stateBuilder.suiteDone(result);
|
||||||
|
|
||||||
|
if (result.status === 'failed') {
|
||||||
|
failures.push(failureDom(result));
|
||||||
|
}
|
||||||
|
addDeprecationWarnings(result, 'suite');
|
||||||
|
};
|
||||||
|
|
||||||
|
this.specStarted = function(result) {
|
||||||
|
stateBuilder.specStarted(result);
|
||||||
|
};
|
||||||
|
|
||||||
|
this.specDone = function(result) {
|
||||||
|
stateBuilder.specDone(result);
|
||||||
|
|
||||||
|
if (noExpectations(result)) {
|
||||||
|
const noSpecMsg = "Spec '" + result.fullName + "' has no expectations.";
|
||||||
|
if (result.status === 'failed') {
|
||||||
|
console.error(noSpecMsg);
|
||||||
|
} else {
|
||||||
|
console.warn(noSpecMsg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!symbols) {
|
||||||
|
symbols = find('.jasmine-symbol-summary');
|
||||||
|
}
|
||||||
|
|
||||||
|
symbols.appendChild(
|
||||||
|
createDom('li', {
|
||||||
|
className: this.displaySpecInCorrectFormat(result),
|
||||||
|
id: 'spec_' + result.id,
|
||||||
|
title: result.fullName
|
||||||
|
})
|
||||||
|
);
|
||||||
|
|
||||||
|
if (result.status === 'failed') {
|
||||||
|
failures.push(failureDom(result));
|
||||||
|
}
|
||||||
|
|
||||||
|
addDeprecationWarnings(result, 'spec');
|
||||||
|
};
|
||||||
|
|
||||||
|
this.displaySpecInCorrectFormat = function(result) {
|
||||||
|
return noExpectations(result) && result.status === 'passed'
|
||||||
|
? 'jasmine-empty'
|
||||||
|
: this.resultStatus(result.status);
|
||||||
|
};
|
||||||
|
|
||||||
|
this.resultStatus = function(status) {
|
||||||
|
if (status === 'excluded') {
|
||||||
|
return config().hideDisabled
|
||||||
|
? 'jasmine-excluded-no-display'
|
||||||
|
: 'jasmine-excluded';
|
||||||
|
}
|
||||||
|
return 'jasmine-' + status;
|
||||||
|
};
|
||||||
|
|
||||||
|
this.jasmineDone = function(doneResult) {
|
||||||
|
stateBuilder.jasmineDone(doneResult);
|
||||||
|
const banner = find('.jasmine-banner');
|
||||||
|
const alert = find('.jasmine-alert');
|
||||||
|
const order = doneResult && doneResult.order;
|
||||||
|
|
||||||
|
alert.appendChild(
|
||||||
|
createDom(
|
||||||
|
'span',
|
||||||
|
{ className: 'jasmine-duration' },
|
||||||
|
'finished in ' + doneResult.totalTime / 1000 + 's'
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
banner.appendChild(optionsMenu(config()));
|
||||||
|
|
||||||
|
if (stateBuilder.specsExecuted < totalSpecsDefined) {
|
||||||
|
const skippedMessage =
|
||||||
|
'Ran ' +
|
||||||
|
stateBuilder.specsExecuted +
|
||||||
|
' of ' +
|
||||||
|
totalSpecsDefined +
|
||||||
|
' specs - run all';
|
||||||
|
// include window.location.pathname to fix issue with karma-jasmine-html-reporter in angular: see https://github.com/jasmine/jasmine/issues/1906
|
||||||
|
const skippedLink =
|
||||||
|
(window.location.pathname || '') +
|
||||||
|
addToExistingQueryString('spec', '');
|
||||||
|
alert.appendChild(
|
||||||
|
createDom(
|
||||||
|
'span',
|
||||||
|
{ className: 'jasmine-bar jasmine-skipped' },
|
||||||
|
createDom(
|
||||||
|
'a',
|
||||||
|
{ href: skippedLink, title: 'Run all specs' },
|
||||||
|
skippedMessage
|
||||||
|
)
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
let statusBarMessage = '';
|
||||||
|
let statusBarClassName = 'jasmine-overall-result jasmine-bar ';
|
||||||
|
const globalFailures =
|
||||||
|
(doneResult && doneResult.failedExpectations) || [];
|
||||||
|
const failed = stateBuilder.failureCount + globalFailures.length > 0;
|
||||||
|
|
||||||
|
if (totalSpecsDefined > 0 || failed) {
|
||||||
|
statusBarMessage +=
|
||||||
|
pluralize('spec', stateBuilder.specsExecuted) +
|
||||||
|
', ' +
|
||||||
|
pluralize('failure', stateBuilder.failureCount);
|
||||||
|
if (stateBuilder.pendingSpecCount) {
|
||||||
|
statusBarMessage +=
|
||||||
|
', ' + pluralize('pending spec', stateBuilder.pendingSpecCount);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (doneResult.overallStatus === 'passed') {
|
||||||
|
statusBarClassName += ' jasmine-passed ';
|
||||||
|
} else if (doneResult.overallStatus === 'incomplete') {
|
||||||
|
statusBarClassName += ' jasmine-incomplete ';
|
||||||
|
statusBarMessage =
|
||||||
|
'Incomplete: ' +
|
||||||
|
doneResult.incompleteReason +
|
||||||
|
', ' +
|
||||||
|
statusBarMessage;
|
||||||
|
} else {
|
||||||
|
statusBarClassName += ' jasmine-failed ';
|
||||||
|
}
|
||||||
|
|
||||||
|
let seedBar;
|
||||||
|
if (order && order.random) {
|
||||||
|
seedBar = createDom(
|
||||||
|
'span',
|
||||||
|
{ className: 'jasmine-seed-bar' },
|
||||||
|
', randomized with seed ',
|
||||||
|
createDom(
|
||||||
|
'a',
|
||||||
|
{
|
||||||
|
title: 'randomized with seed ' + order.seed,
|
||||||
|
href: seedHref(order.seed)
|
||||||
|
},
|
||||||
|
order.seed
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
alert.appendChild(
|
||||||
|
createDom(
|
||||||
|
'span',
|
||||||
|
{ className: statusBarClassName },
|
||||||
|
statusBarMessage,
|
||||||
|
seedBar
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
const errorBarClassName = 'jasmine-bar jasmine-errored';
|
||||||
|
const afterAllMessagePrefix = 'AfterAll ';
|
||||||
|
|
||||||
|
for (let i = 0; i < globalFailures.length; i++) {
|
||||||
|
alert.appendChild(
|
||||||
|
createDom(
|
||||||
|
'span',
|
||||||
|
{ className: errorBarClassName },
|
||||||
|
globalFailureMessage(globalFailures[i])
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
function globalFailureMessage(failure) {
|
||||||
|
if (failure.globalErrorType === 'load') {
|
||||||
|
const prefix = 'Error during loading: ' + failure.message;
|
||||||
|
|
||||||
|
if (failure.filename) {
|
||||||
|
return (
|
||||||
|
prefix + ' in ' + failure.filename + ' line ' + failure.lineno
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
return prefix;
|
||||||
|
}
|
||||||
|
} else if (failure.globalErrorType === 'afterAll') {
|
||||||
|
return afterAllMessagePrefix + failure.message;
|
||||||
|
} else {
|
||||||
|
return failure.message;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
addDeprecationWarnings(doneResult);
|
||||||
|
|
||||||
|
for (let i = 0; i < deprecationWarnings.length; i++) {
|
||||||
|
const children = [];
|
||||||
|
let context;
|
||||||
|
|
||||||
|
switch (deprecationWarnings[i].runnableType) {
|
||||||
|
case 'spec':
|
||||||
|
context = '(in spec: ' + deprecationWarnings[i].runnableName + ')';
|
||||||
|
break;
|
||||||
|
case 'suite':
|
||||||
|
context = '(in suite: ' + deprecationWarnings[i].runnableName + ')';
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
context = '';
|
||||||
|
}
|
||||||
|
|
||||||
|
deprecationWarnings[i].message.split('\n').forEach(function(line) {
|
||||||
|
children.push(line);
|
||||||
|
children.push(createDom('br'));
|
||||||
|
});
|
||||||
|
|
||||||
|
children[0] = 'DEPRECATION: ' + children[0];
|
||||||
|
children.push(context);
|
||||||
|
|
||||||
|
if (deprecationWarnings[i].stack) {
|
||||||
|
children.push(createExpander(deprecationWarnings[i].stack));
|
||||||
|
}
|
||||||
|
|
||||||
|
alert.appendChild(
|
||||||
|
createDom(
|
||||||
|
'span',
|
||||||
|
{ className: 'jasmine-bar jasmine-warning' },
|
||||||
|
children
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
const results = find('.jasmine-results');
|
||||||
|
results.appendChild(summary);
|
||||||
|
|
||||||
|
summaryList(stateBuilder.topResults, summary);
|
||||||
|
|
||||||
|
if (failures.length) {
|
||||||
|
alert.appendChild(
|
||||||
|
createDom(
|
||||||
|
'span',
|
||||||
|
{ className: 'jasmine-menu jasmine-bar jasmine-spec-list' },
|
||||||
|
createDom('span', {}, 'Spec List | '),
|
||||||
|
createDom(
|
||||||
|
'a',
|
||||||
|
{ className: 'jasmine-failures-menu', href: '#' },
|
||||||
|
'Failures'
|
||||||
|
)
|
||||||
|
)
|
||||||
|
);
|
||||||
|
alert.appendChild(
|
||||||
|
createDom(
|
||||||
|
'span',
|
||||||
|
{ className: 'jasmine-menu jasmine-bar jasmine-failure-list' },
|
||||||
|
createDom(
|
||||||
|
'a',
|
||||||
|
{ className: 'jasmine-spec-list-menu', href: '#' },
|
||||||
|
'Spec List'
|
||||||
|
),
|
||||||
|
createDom('span', {}, ' | Failures ')
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
find('.jasmine-failures-menu').onclick = function() {
|
||||||
|
setMenuModeTo('jasmine-failure-list');
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
find('.jasmine-spec-list-menu').onclick = function() {
|
||||||
|
setMenuModeTo('jasmine-spec-list');
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
|
||||||
|
setMenuModeTo('jasmine-failure-list');
|
||||||
|
|
||||||
|
const failureNode = find('.jasmine-failures');
|
||||||
|
for (let i = 0; i < failures.length; i++) {
|
||||||
|
failureNode.appendChild(failures[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
return this;
|
||||||
|
|
||||||
|
function failureDom(result) {
|
||||||
|
const failure = createDom(
|
||||||
|
'div',
|
||||||
|
{ className: 'jasmine-spec-detail jasmine-failed' },
|
||||||
|
failureDescription(result, stateBuilder.currentParent),
|
||||||
|
createDom('div', { className: 'jasmine-messages' })
|
||||||
|
);
|
||||||
|
const messages = failure.childNodes[1];
|
||||||
|
|
||||||
|
for (let i = 0; i < result.failedExpectations.length; i++) {
|
||||||
|
const expectation = result.failedExpectations[i];
|
||||||
|
messages.appendChild(
|
||||||
|
createDom(
|
||||||
|
'div',
|
||||||
|
{ className: 'jasmine-result-message' },
|
||||||
|
expectation.message
|
||||||
|
)
|
||||||
|
);
|
||||||
|
messages.appendChild(
|
||||||
|
createDom(
|
||||||
|
'div',
|
||||||
|
{ className: 'jasmine-stack-trace' },
|
||||||
|
expectation.stack
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (result.failedExpectations.length === 0) {
|
||||||
|
messages.appendChild(
|
||||||
|
createDom(
|
||||||
|
'div',
|
||||||
|
{ className: 'jasmine-result-message' },
|
||||||
|
'Spec has no expectations'
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (result.debugLogs) {
|
||||||
|
messages.appendChild(debugLogTable(result.debugLogs));
|
||||||
|
}
|
||||||
|
|
||||||
|
return failure;
|
||||||
|
}
|
||||||
|
|
||||||
|
function debugLogTable(debugLogs) {
|
||||||
|
const tbody = createDom('tbody');
|
||||||
|
|
||||||
|
debugLogs.forEach(function(entry) {
|
||||||
|
tbody.appendChild(
|
||||||
|
createDom(
|
||||||
|
'tr',
|
||||||
|
{},
|
||||||
|
createDom('td', {}, entry.timestamp.toString()),
|
||||||
|
createDom('td', {}, entry.message)
|
||||||
|
)
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
return createDom(
|
||||||
|
'div',
|
||||||
|
{ className: 'jasmine-debug-log' },
|
||||||
|
createDom(
|
||||||
|
'div',
|
||||||
|
{ className: 'jasmine-debug-log-header' },
|
||||||
|
'Debug logs'
|
||||||
|
),
|
||||||
|
createDom(
|
||||||
|
'table',
|
||||||
|
{},
|
||||||
|
createDom(
|
||||||
|
'thead',
|
||||||
|
{},
|
||||||
|
createDom(
|
||||||
|
'tr',
|
||||||
|
{},
|
||||||
|
createDom('th', {}, 'Time (ms)'),
|
||||||
|
createDom('th', {}, 'Message')
|
||||||
|
)
|
||||||
|
),
|
||||||
|
tbody
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
function summaryList(resultsTree, domParent) {
|
||||||
|
let specListNode;
|
||||||
|
for (let i = 0; i < resultsTree.children.length; i++) {
|
||||||
|
const resultNode = resultsTree.children[i];
|
||||||
|
if (filterSpecs && !hasActiveSpec(resultNode)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (resultNode.type === 'suite') {
|
||||||
|
const suiteListNode = createDom(
|
||||||
|
'ul',
|
||||||
|
{ className: 'jasmine-suite', id: 'suite-' + resultNode.result.id },
|
||||||
|
createDom(
|
||||||
|
'li',
|
||||||
|
{
|
||||||
|
className:
|
||||||
|
'jasmine-suite-detail jasmine-' + resultNode.result.status
|
||||||
|
},
|
||||||
|
createDom(
|
||||||
|
'a',
|
||||||
|
{ href: specHref(resultNode.result) },
|
||||||
|
resultNode.result.description
|
||||||
|
)
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
summaryList(resultNode, suiteListNode);
|
||||||
|
domParent.appendChild(suiteListNode);
|
||||||
|
}
|
||||||
|
if (resultNode.type === 'spec') {
|
||||||
|
if (domParent.getAttribute('class') !== 'jasmine-specs') {
|
||||||
|
specListNode = createDom('ul', { className: 'jasmine-specs' });
|
||||||
|
domParent.appendChild(specListNode);
|
||||||
|
}
|
||||||
|
let specDescription = resultNode.result.description;
|
||||||
|
if (noExpectations(resultNode.result)) {
|
||||||
|
specDescription = 'SPEC HAS NO EXPECTATIONS ' + specDescription;
|
||||||
|
}
|
||||||
|
if (
|
||||||
|
resultNode.result.status === 'pending' &&
|
||||||
|
resultNode.result.pendingReason !== ''
|
||||||
|
) {
|
||||||
|
specDescription =
|
||||||
|
specDescription +
|
||||||
|
' PENDING WITH MESSAGE: ' +
|
||||||
|
resultNode.result.pendingReason;
|
||||||
|
}
|
||||||
|
specListNode.appendChild(
|
||||||
|
createDom(
|
||||||
|
'li',
|
||||||
|
{
|
||||||
|
className: 'jasmine-' + resultNode.result.status,
|
||||||
|
id: 'spec-' + resultNode.result.id
|
||||||
|
},
|
||||||
|
createDom(
|
||||||
|
'a',
|
||||||
|
{ href: specHref(resultNode.result) },
|
||||||
|
specDescription
|
||||||
|
)
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function optionsMenu(config) {
|
||||||
|
const optionsMenuDom = createDom(
|
||||||
|
'div',
|
||||||
|
{ className: 'jasmine-run-options' },
|
||||||
|
createDom('span', { className: 'jasmine-trigger' }, 'Options'),
|
||||||
|
createDom(
|
||||||
|
'div',
|
||||||
|
{ className: 'jasmine-payload' },
|
||||||
|
createDom(
|
||||||
|
'div',
|
||||||
|
{ className: 'jasmine-stop-on-failure' },
|
||||||
|
createDom('input', {
|
||||||
|
className: 'jasmine-fail-fast',
|
||||||
|
id: 'jasmine-fail-fast',
|
||||||
|
type: 'checkbox'
|
||||||
|
}),
|
||||||
|
createDom(
|
||||||
|
'label',
|
||||||
|
{ className: 'jasmine-label', for: 'jasmine-fail-fast' },
|
||||||
|
'stop execution on spec failure'
|
||||||
|
)
|
||||||
|
),
|
||||||
|
createDom(
|
||||||
|
'div',
|
||||||
|
{ className: 'jasmine-throw-failures' },
|
||||||
|
createDom('input', {
|
||||||
|
className: 'jasmine-throw',
|
||||||
|
id: 'jasmine-throw-failures',
|
||||||
|
type: 'checkbox'
|
||||||
|
}),
|
||||||
|
createDom(
|
||||||
|
'label',
|
||||||
|
{ className: 'jasmine-label', for: 'jasmine-throw-failures' },
|
||||||
|
'stop spec on expectation failure'
|
||||||
|
)
|
||||||
|
),
|
||||||
|
createDom(
|
||||||
|
'div',
|
||||||
|
{ className: 'jasmine-random-order' },
|
||||||
|
createDom('input', {
|
||||||
|
className: 'jasmine-random',
|
||||||
|
id: 'jasmine-random-order',
|
||||||
|
type: 'checkbox'
|
||||||
|
}),
|
||||||
|
createDom(
|
||||||
|
'label',
|
||||||
|
{ className: 'jasmine-label', for: 'jasmine-random-order' },
|
||||||
|
'run tests in random order'
|
||||||
|
)
|
||||||
|
),
|
||||||
|
createDom(
|
||||||
|
'div',
|
||||||
|
{ className: 'jasmine-hide-disabled' },
|
||||||
|
createDom('input', {
|
||||||
|
className: 'jasmine-disabled',
|
||||||
|
id: 'jasmine-hide-disabled',
|
||||||
|
type: 'checkbox'
|
||||||
|
}),
|
||||||
|
createDom(
|
||||||
|
'label',
|
||||||
|
{ className: 'jasmine-label', for: 'jasmine-hide-disabled' },
|
||||||
|
'hide disabled tests'
|
||||||
|
)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
const failFastCheckbox = optionsMenuDom.querySelector(
|
||||||
|
'#jasmine-fail-fast'
|
||||||
|
);
|
||||||
|
failFastCheckbox.checked = config.stopOnSpecFailure;
|
||||||
|
failFastCheckbox.onclick = function() {
|
||||||
|
navigateWithNewParam('stopOnSpecFailure', !config.stopOnSpecFailure);
|
||||||
|
};
|
||||||
|
|
||||||
|
const throwCheckbox = optionsMenuDom.querySelector(
|
||||||
|
'#jasmine-throw-failures'
|
||||||
|
);
|
||||||
|
throwCheckbox.checked = config.stopSpecOnExpectationFailure;
|
||||||
|
throwCheckbox.onclick = function() {
|
||||||
|
navigateWithNewParam(
|
||||||
|
'stopSpecOnExpectationFailure',
|
||||||
|
!config.stopSpecOnExpectationFailure
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
|
const randomCheckbox = optionsMenuDom.querySelector(
|
||||||
|
'#jasmine-random-order'
|
||||||
|
);
|
||||||
|
randomCheckbox.checked = config.random;
|
||||||
|
randomCheckbox.onclick = function() {
|
||||||
|
navigateWithNewParam('random', !config.random);
|
||||||
|
};
|
||||||
|
|
||||||
|
const hideDisabled = optionsMenuDom.querySelector(
|
||||||
|
'#jasmine-hide-disabled'
|
||||||
|
);
|
||||||
|
hideDisabled.checked = config.hideDisabled;
|
||||||
|
hideDisabled.onclick = function() {
|
||||||
|
navigateWithNewParam('hideDisabled', !config.hideDisabled);
|
||||||
|
};
|
||||||
|
|
||||||
|
const optionsTrigger = optionsMenuDom.querySelector('.jasmine-trigger'),
|
||||||
|
optionsPayload = optionsMenuDom.querySelector('.jasmine-payload'),
|
||||||
|
isOpen = /\bjasmine-open\b/;
|
||||||
|
|
||||||
|
optionsTrigger.onclick = function() {
|
||||||
|
if (isOpen.test(optionsPayload.className)) {
|
||||||
|
optionsPayload.className = optionsPayload.className.replace(
|
||||||
|
isOpen,
|
||||||
|
''
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
optionsPayload.className += ' jasmine-open';
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
return optionsMenuDom;
|
||||||
|
}
|
||||||
|
|
||||||
|
function failureDescription(result, suite) {
|
||||||
|
const wrapper = createDom(
|
||||||
|
'div',
|
||||||
|
{ className: 'jasmine-description' },
|
||||||
|
createDom(
|
||||||
|
'a',
|
||||||
|
{ title: result.description, href: specHref(result) },
|
||||||
|
result.description
|
||||||
|
)
|
||||||
|
);
|
||||||
|
let suiteLink;
|
||||||
|
|
||||||
|
while (suite && suite.parent) {
|
||||||
|
wrapper.insertBefore(createTextNode(' > '), wrapper.firstChild);
|
||||||
|
suiteLink = createDom(
|
||||||
|
'a',
|
||||||
|
{ href: suiteHref(suite) },
|
||||||
|
suite.result.description
|
||||||
|
);
|
||||||
|
wrapper.insertBefore(suiteLink, wrapper.firstChild);
|
||||||
|
|
||||||
|
suite = suite.parent;
|
||||||
|
}
|
||||||
|
|
||||||
|
return wrapper;
|
||||||
|
}
|
||||||
|
|
||||||
|
function suiteHref(suite) {
|
||||||
|
const els = [];
|
||||||
|
|
||||||
|
while (suite && suite.parent) {
|
||||||
|
els.unshift(suite.result.description);
|
||||||
|
suite = suite.parent;
|
||||||
|
}
|
||||||
|
|
||||||
|
// include window.location.pathname to fix issue with karma-jasmine-html-reporter in angular: see https://github.com/jasmine/jasmine/issues/1906
|
||||||
|
return (
|
||||||
|
(window.location.pathname || '') +
|
||||||
|
addToExistingQueryString('spec', els.join(' '))
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
function addDeprecationWarnings(result, runnableType) {
|
||||||
|
if (result && result.deprecationWarnings) {
|
||||||
|
for (let i = 0; i < result.deprecationWarnings.length; i++) {
|
||||||
|
const warning = result.deprecationWarnings[i].message;
|
||||||
|
deprecationWarnings.push({
|
||||||
|
message: warning,
|
||||||
|
stack: result.deprecationWarnings[i].stack,
|
||||||
|
runnableName: result.fullName,
|
||||||
|
runnableType: runnableType
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function createExpander(stackTrace) {
|
||||||
|
const expandLink = createDom('a', { href: '#' }, 'Show stack trace');
|
||||||
|
const root = createDom(
|
||||||
|
'div',
|
||||||
|
{ className: 'jasmine-expander' },
|
||||||
|
expandLink,
|
||||||
|
createDom(
|
||||||
|
'div',
|
||||||
|
{ className: 'jasmine-expander-contents jasmine-stack-trace' },
|
||||||
|
stackTrace
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
expandLink.addEventListener('click', function(e) {
|
||||||
|
e.preventDefault();
|
||||||
|
|
||||||
|
if (root.classList.contains('jasmine-expanded')) {
|
||||||
|
root.classList.remove('jasmine-expanded');
|
||||||
|
expandLink.textContent = 'Show stack trace';
|
||||||
|
} else {
|
||||||
|
root.classList.add('jasmine-expanded');
|
||||||
|
expandLink.textContent = 'Hide stack trace';
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
return root;
|
||||||
|
}
|
||||||
|
|
||||||
|
function find(selector) {
|
||||||
|
return getContainer().querySelector('.jasmine_html-reporter ' + selector);
|
||||||
|
}
|
||||||
|
|
||||||
|
function clearPrior() {
|
||||||
|
const oldReporter = find('');
|
||||||
|
|
||||||
|
if (oldReporter) {
|
||||||
|
getContainer().removeChild(oldReporter);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function createDom(type, attrs, childrenArrayOrVarArgs) {
|
||||||
|
const el = createElement(type);
|
||||||
|
let children;
|
||||||
|
|
||||||
|
if (j$.isArray_(childrenArrayOrVarArgs)) {
|
||||||
|
children = childrenArrayOrVarArgs;
|
||||||
|
} else {
|
||||||
|
children = [];
|
||||||
|
|
||||||
|
for (let i = 2; i < arguments.length; i++) {
|
||||||
|
children.push(arguments[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (let i = 0; i < children.length; i++) {
|
||||||
|
const child = children[i];
|
||||||
|
|
||||||
|
if (typeof child === 'string') {
|
||||||
|
el.appendChild(createTextNode(child));
|
||||||
|
} else {
|
||||||
|
if (child) {
|
||||||
|
el.appendChild(child);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const attr in attrs) {
|
||||||
|
if (attr == 'className') {
|
||||||
|
el[attr] = attrs[attr];
|
||||||
|
} else {
|
||||||
|
el.setAttribute(attr, attrs[attr]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return el;
|
||||||
|
}
|
||||||
|
|
||||||
|
function pluralize(singular, count) {
|
||||||
|
const word = count == 1 ? singular : singular + 's';
|
||||||
|
|
||||||
|
return '' + count + ' ' + word;
|
||||||
|
}
|
||||||
|
|
||||||
|
function specHref(result) {
|
||||||
|
// include window.location.pathname to fix issue with karma-jasmine-html-reporter in angular: see https://github.com/jasmine/jasmine/issues/1906
|
||||||
|
return (
|
||||||
|
(window.location.pathname || '') +
|
||||||
|
addToExistingQueryString('spec', result.fullName)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
function seedHref(seed) {
|
||||||
|
// include window.location.pathname to fix issue with karma-jasmine-html-reporter in angular: see https://github.com/jasmine/jasmine/issues/1906
|
||||||
|
return (
|
||||||
|
(window.location.pathname || '') +
|
||||||
|
addToExistingQueryString('seed', seed)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
function defaultQueryString(key, value) {
|
||||||
|
return '?' + key + '=' + value;
|
||||||
|
}
|
||||||
|
|
||||||
|
function setMenuModeTo(mode) {
|
||||||
|
htmlReporterMain.setAttribute('class', 'jasmine_html-reporter ' + mode);
|
||||||
|
}
|
||||||
|
|
||||||
|
function noExpectations(result) {
|
||||||
|
const allExpectations =
|
||||||
|
result.failedExpectations.length + result.passedExpectations.length;
|
||||||
|
|
||||||
|
return (
|
||||||
|
allExpectations === 0 &&
|
||||||
|
(result.status === 'passed' || result.status === 'failed')
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
function hasActiveSpec(resultNode) {
|
||||||
|
if (resultNode.type == 'spec' && resultNode.result.status != 'excluded') {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (resultNode.type == 'suite') {
|
||||||
|
for (let i = 0, j = resultNode.children.length; i < j; i++) {
|
||||||
|
if (hasActiveSpec(resultNode.children[i])) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return HtmlReporter;
|
||||||
|
};
|
||||||
|
|
||||||
|
jasmineRequire.HtmlSpecFilter = function() {
|
||||||
|
function HtmlSpecFilter(options) {
|
||||||
|
const filterString =
|
||||||
|
options &&
|
||||||
|
options.filterString() &&
|
||||||
|
options.filterString().replace(/[-[\]{}()*+?.,\\^$|#\s]/g, '\\$&');
|
||||||
|
const filterPattern = new RegExp(filterString);
|
||||||
|
|
||||||
|
this.matches = function(specName) {
|
||||||
|
return filterPattern.test(specName);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
return HtmlSpecFilter;
|
||||||
|
};
|
||||||
|
|
||||||
|
jasmineRequire.ResultsNode = function() {
|
||||||
|
function ResultsNode(result, type, parent) {
|
||||||
|
this.result = result;
|
||||||
|
this.type = type;
|
||||||
|
this.parent = parent;
|
||||||
|
|
||||||
|
this.children = [];
|
||||||
|
|
||||||
|
this.addChild = function(result, type) {
|
||||||
|
this.children.push(new ResultsNode(result, type, this));
|
||||||
|
};
|
||||||
|
|
||||||
|
this.last = function() {
|
||||||
|
return this.children[this.children.length - 1];
|
||||||
|
};
|
||||||
|
|
||||||
|
this.updateResult = function(result) {
|
||||||
|
this.result = result;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
return ResultsNode;
|
||||||
|
};
|
||||||
|
|
||||||
|
jasmineRequire.QueryString = function() {
|
||||||
|
function QueryString(options) {
|
||||||
|
this.navigateWithNewParam = function(key, value) {
|
||||||
|
options.getWindowLocation().search = this.fullStringWithNewParam(
|
||||||
|
key,
|
||||||
|
value
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
|
this.fullStringWithNewParam = function(key, value) {
|
||||||
|
const paramMap = queryStringToParamMap();
|
||||||
|
paramMap[key] = value;
|
||||||
|
return toQueryString(paramMap);
|
||||||
|
};
|
||||||
|
|
||||||
|
this.getParam = function(key) {
|
||||||
|
return queryStringToParamMap()[key];
|
||||||
|
};
|
||||||
|
|
||||||
|
return this;
|
||||||
|
|
||||||
|
function toQueryString(paramMap) {
|
||||||
|
const qStrPairs = [];
|
||||||
|
for (const prop in paramMap) {
|
||||||
|
qStrPairs.push(
|
||||||
|
encodeURIComponent(prop) + '=' + encodeURIComponent(paramMap[prop])
|
||||||
|
);
|
||||||
|
}
|
||||||
|
return '?' + qStrPairs.join('&');
|
||||||
|
}
|
||||||
|
|
||||||
|
function queryStringToParamMap() {
|
||||||
|
const paramStr = options.getWindowLocation().search.substring(1);
|
||||||
|
let params = [];
|
||||||
|
const paramMap = {};
|
||||||
|
|
||||||
|
if (paramStr.length > 0) {
|
||||||
|
params = paramStr.split('&');
|
||||||
|
for (let i = 0; i < params.length; i++) {
|
||||||
|
const p = params[i].split('=');
|
||||||
|
let value = decodeURIComponent(p[1]);
|
||||||
|
if (value === 'true' || value === 'false') {
|
||||||
|
value = JSON.parse(value);
|
||||||
|
}
|
||||||
|
paramMap[decodeURIComponent(p[0])] = value;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return paramMap;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return QueryString;
|
||||||
|
};
|
301
ui/plugins/ui/jasmine/jasmine.css
Normal file
301
ui/plugins/ui/jasmine/jasmine.css
Normal file
File diff suppressed because one or more lines are too long
10468
ui/plugins/ui/jasmine/jasmine.js
Normal file
10468
ui/plugins/ui/jasmine/jasmine.js
Normal file
File diff suppressed because it is too large
Load Diff
BIN
ui/plugins/ui/jasmine/jasmine_favicon.png
Normal file
BIN
ui/plugins/ui/jasmine/jasmine_favicon.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 1.5 KiB |
412
ui/plugins/ui/jasmineSpec.js
Normal file
412
ui/plugins/ui/jasmineSpec.js
Normal file
@ -0,0 +1,412 @@
|
|||||||
|
"use strict"
|
||||||
|
|
||||||
|
const JASMINE_SESSION_ID = `jasmine-${String(Date.now()).slice(8)}`
|
||||||
|
|
||||||
|
beforeEach(function () {
|
||||||
|
jasmine.DEFAULT_TIMEOUT_INTERVAL = 15 * 60 * 1000 // Test timeout after 15 minutes
|
||||||
|
jasmine.addMatchers({
|
||||||
|
toBeOneOf: function () {
|
||||||
|
return {
|
||||||
|
compare: function (actual, expected) {
|
||||||
|
return {
|
||||||
|
pass: expected.includes(actual)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
describe('stable-diffusion-ui', function() {
|
||||||
|
beforeEach(function() {
|
||||||
|
expect(typeof SD).toBe('object')
|
||||||
|
expect(typeof SD.serverState).toBe('object')
|
||||||
|
expect(typeof SD.serverState.status).toBe('string')
|
||||||
|
})
|
||||||
|
it('should be able to reach the backend', async function() {
|
||||||
|
expect(SD.serverState.status).toBe(SD.ServerStates.unavailable)
|
||||||
|
SD.sessionId = JASMINE_SESSION_ID
|
||||||
|
await SD.init()
|
||||||
|
expect(SD.isServerAvailable()).toBeTrue()
|
||||||
|
})
|
||||||
|
|
||||||
|
it('enfore the current task state', function() {
|
||||||
|
const task = new SD.Task()
|
||||||
|
expect(task.status).toBe(SD.TaskStatus.init)
|
||||||
|
expect(task.isPending).toBeTrue()
|
||||||
|
|
||||||
|
task._setStatus(SD.TaskStatus.pending)
|
||||||
|
expect(task.status).toBe(SD.TaskStatus.pending)
|
||||||
|
expect(task.isPending).toBeTrue()
|
||||||
|
expect(function() {
|
||||||
|
task._setStatus(SD.TaskStatus.init)
|
||||||
|
}).toThrowError()
|
||||||
|
|
||||||
|
task._setStatus(SD.TaskStatus.waiting)
|
||||||
|
expect(task.status).toBe(SD.TaskStatus.waiting)
|
||||||
|
expect(task.isPending).toBeTrue()
|
||||||
|
expect(function() {
|
||||||
|
task._setStatus(SD.TaskStatus.pending)
|
||||||
|
}).toThrowError()
|
||||||
|
|
||||||
|
task._setStatus(SD.TaskStatus.processing)
|
||||||
|
expect(task.status).toBe(SD.TaskStatus.processing)
|
||||||
|
expect(task.isPending).toBeTrue()
|
||||||
|
expect(function() {
|
||||||
|
task._setStatus(SD.TaskStatus.pending)
|
||||||
|
}).toThrowError()
|
||||||
|
|
||||||
|
task._setStatus(SD.TaskStatus.failed)
|
||||||
|
expect(task.status).toBe(SD.TaskStatus.failed)
|
||||||
|
expect(task.isPending).toBeFalse()
|
||||||
|
expect(function() {
|
||||||
|
task._setStatus(SD.TaskStatus.processing)
|
||||||
|
}).toThrowError()
|
||||||
|
expect(function() {
|
||||||
|
task._setStatus(SD.TaskStatus.completed)
|
||||||
|
}).toThrowError()
|
||||||
|
})
|
||||||
|
it('should be able to run tasks', async function() {
|
||||||
|
expect(typeof SD.Task.run).toBe('function')
|
||||||
|
const promiseGenerator = (function*(val) {
|
||||||
|
expect(val).toBe('start')
|
||||||
|
expect(yield 1 + 1).toBe(4)
|
||||||
|
expect(yield 2 + 2).toBe(8)
|
||||||
|
yield asyncDelay(500)
|
||||||
|
expect(yield 3 + 3).toBe(12)
|
||||||
|
expect(yield 4 + 4).toBe(16)
|
||||||
|
return 8 + 8
|
||||||
|
})('start')
|
||||||
|
const callback = function({value, done}) {
|
||||||
|
return {value: 2 * value, done}
|
||||||
|
}
|
||||||
|
expect(await SD.Task.run(promiseGenerator, {callback})).toBe(32)
|
||||||
|
})
|
||||||
|
it('should be able to queue tasks', async function() {
|
||||||
|
expect(typeof SD.Task.enqueue).toBe('function')
|
||||||
|
const promiseGenerator = (function*(val) {
|
||||||
|
expect(val).toBe('start')
|
||||||
|
expect(yield 1 + 1).toBe(4)
|
||||||
|
expect(yield 2 + 2).toBe(8)
|
||||||
|
yield asyncDelay(500)
|
||||||
|
expect(yield 3 + 3).toBe(12)
|
||||||
|
expect(yield 4 + 4).toBe(16)
|
||||||
|
return 8 + 8
|
||||||
|
})('start')
|
||||||
|
const callback = function({value, done}) {
|
||||||
|
return {value: 2 * value, done}
|
||||||
|
}
|
||||||
|
const gen = SD.Task.asGenerator({generator: promiseGenerator, callback})
|
||||||
|
expect(await SD.Task.enqueue(gen)).toBe(32)
|
||||||
|
})
|
||||||
|
it('should be able to chain handlers', async function() {
|
||||||
|
expect(typeof SD.Task.enqueue).toBe('function')
|
||||||
|
const promiseGenerator = (function*(val) {
|
||||||
|
expect(val).toBe('start')
|
||||||
|
expect(yield {test: '1'}).toEqual({test: '1', foo: 'bar'})
|
||||||
|
expect(yield 2 + 2).toEqual(8)
|
||||||
|
yield asyncDelay(500)
|
||||||
|
expect(yield 3 + 3).toEqual(12)
|
||||||
|
expect(yield {test: 4}).toEqual({test: 8, foo: 'bar'})
|
||||||
|
return {test: 8}
|
||||||
|
})('start')
|
||||||
|
const gen1 = SD.Task.asGenerator({generator: promiseGenerator, callback: function({value, done}) {
|
||||||
|
if (typeof value === "object") {
|
||||||
|
value['foo'] = 'bar'
|
||||||
|
}
|
||||||
|
return {value, done}
|
||||||
|
}})
|
||||||
|
const gen2 = SD.Task.asGenerator({generator: gen1, callback: function({value, done}) {
|
||||||
|
if (typeof value === 'number') {
|
||||||
|
value = 2 * value
|
||||||
|
}
|
||||||
|
if (typeof value === 'object' && typeof value.test === 'number') {
|
||||||
|
value.test = 2 * value.test
|
||||||
|
}
|
||||||
|
return {value, done}
|
||||||
|
}})
|
||||||
|
expect(await SD.Task.enqueue(gen2)).toEqual({test:32, foo: 'bar'})
|
||||||
|
})
|
||||||
|
describe('ServiceContainer', function() {
|
||||||
|
it('should be able to register providers', function() {
|
||||||
|
const cont = new ServiceContainer(
|
||||||
|
function foo() {
|
||||||
|
this.bar = ''
|
||||||
|
},
|
||||||
|
function bar() {
|
||||||
|
return () => 0
|
||||||
|
},
|
||||||
|
{ name: 'zero', definition: 0 },
|
||||||
|
{ name: 'ctx', definition: () => Object.create(null), singleton: true },
|
||||||
|
{ name: 'test',
|
||||||
|
definition: (ctx, missing, one, foo) => {
|
||||||
|
expect(ctx).toEqual({ran: true})
|
||||||
|
expect(one).toBe(1)
|
||||||
|
expect(typeof foo).toBe('object')
|
||||||
|
expect(foo.bar).toBeDefined()
|
||||||
|
expect(typeof missing).toBe('undefined')
|
||||||
|
return {foo: 'bar'}
|
||||||
|
}, dependencies: ['ctx', 'missing', 'one', 'foo']
|
||||||
|
}
|
||||||
|
)
|
||||||
|
const fooObj = cont.get('foo')
|
||||||
|
expect(typeof fooObj).toBe('object')
|
||||||
|
fooObj.ran = true
|
||||||
|
|
||||||
|
const ctx = cont.get('ctx')
|
||||||
|
expect(ctx).toEqual({})
|
||||||
|
ctx.ran = true
|
||||||
|
|
||||||
|
const bar = cont.get('bar')
|
||||||
|
expect(typeof bar).toBe('function')
|
||||||
|
expect(bar()).toBe(0)
|
||||||
|
|
||||||
|
cont.register({name: 'one', definition: 1})
|
||||||
|
const test = cont.get('test')
|
||||||
|
expect(typeof test).toBe('object')
|
||||||
|
expect(test.foo).toBe('bar')
|
||||||
|
})
|
||||||
|
})
|
||||||
|
it('should be able to stream data in chunks', async function() {
|
||||||
|
expect(SD.isServerAvailable()).toBeTrue()
|
||||||
|
const nbr_steps = 15
|
||||||
|
let res = await fetch('/render', {
|
||||||
|
method: 'POST',
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json'
|
||||||
|
},
|
||||||
|
body: JSON.stringify({
|
||||||
|
"prompt": "a photograph of an astronaut riding a horse",
|
||||||
|
"negative_prompt": "",
|
||||||
|
"width": 128,
|
||||||
|
"height": 128,
|
||||||
|
"seed": Math.floor(Math.random() * 10000000),
|
||||||
|
|
||||||
|
"sampler": "plms",
|
||||||
|
"use_stable_diffusion_model": "sd-v1-4",
|
||||||
|
"num_inference_steps": nbr_steps,
|
||||||
|
"guidance_scale": 7.5,
|
||||||
|
|
||||||
|
"numOutputsParallel": 1,
|
||||||
|
"stream_image_progress": true,
|
||||||
|
"show_only_filtered_image": true,
|
||||||
|
"output_format": "jpeg",
|
||||||
|
|
||||||
|
"session_id": JASMINE_SESSION_ID,
|
||||||
|
}),
|
||||||
|
})
|
||||||
|
expect(res.ok).toBeTruthy()
|
||||||
|
const renderRequest = await res.json()
|
||||||
|
expect(typeof renderRequest.stream).toBe('string')
|
||||||
|
expect(renderRequest.task).toBeDefined()
|
||||||
|
|
||||||
|
// Wait for server status to update.
|
||||||
|
await SD.waitUntil(() => {
|
||||||
|
console.log('Waiting for %s to be received...', renderRequest.task)
|
||||||
|
return (!SD.serverState.tasks || SD.serverState.tasks[String(renderRequest.task)])
|
||||||
|
}, 250, 10 * 60 * 1000)
|
||||||
|
// Wait for task to start on server.
|
||||||
|
await SD.waitUntil(() => {
|
||||||
|
console.log('Waiting for %s to start...', renderRequest.task)
|
||||||
|
return !SD.serverState.tasks || SD.serverState.tasks[String(renderRequest.task)] !== 'pending'
|
||||||
|
}, 250)
|
||||||
|
|
||||||
|
const reader = new SD.ChunkedStreamReader(renderRequest.stream)
|
||||||
|
const parseToString = reader.parse
|
||||||
|
reader.parse = function(value) {
|
||||||
|
value = parseToString.call(this, value)
|
||||||
|
if (!value || value.length <= 0) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return reader.readStreamAsJSON(value.join(''))
|
||||||
|
}
|
||||||
|
reader.onNext = function({done, value}) {
|
||||||
|
console.log(value)
|
||||||
|
if (typeof value === 'object' && 'status' in value) {
|
||||||
|
done = true
|
||||||
|
}
|
||||||
|
return {done, value}
|
||||||
|
}
|
||||||
|
let lastUpdate = undefined
|
||||||
|
let stepCount = 0
|
||||||
|
let complete = false
|
||||||
|
//for await (const stepUpdate of reader) {
|
||||||
|
for await (const stepUpdate of reader.open()) {
|
||||||
|
console.log('ChunkedStreamReader received ', stepUpdate)
|
||||||
|
lastUpdate = stepUpdate
|
||||||
|
if (complete) {
|
||||||
|
expect(stepUpdate.status).toBe('succeeded')
|
||||||
|
expect(stepUpdate.output).toHaveSize(1)
|
||||||
|
} else {
|
||||||
|
expect(stepUpdate.total_steps).toBe(nbr_steps)
|
||||||
|
expect(stepUpdate.step).toBe(stepCount)
|
||||||
|
if (stepUpdate.step === stepUpdate.total_steps) {
|
||||||
|
complete = true
|
||||||
|
} else {
|
||||||
|
stepCount++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for(let i=1; i <= 5; ++i) {
|
||||||
|
res = await fetch(renderRequest.stream)
|
||||||
|
expect(res.ok).toBeTruthy()
|
||||||
|
const cachedResponse = await res.json()
|
||||||
|
console.log('Cache test %s received %o', i, cachedResponse)
|
||||||
|
expect(lastUpdate).toEqual(cachedResponse)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('should be able to make renders', function() {
|
||||||
|
beforeEach(function() {
|
||||||
|
expect(SD.isServerAvailable()).toBeTrue()
|
||||||
|
})
|
||||||
|
it('basic inline request', async function() {
|
||||||
|
let stepCount = 0
|
||||||
|
let complete = false
|
||||||
|
const result = await SD.render({
|
||||||
|
"prompt": "a photograph of an astronaut riding a horse",
|
||||||
|
"width": 128,
|
||||||
|
"height": 128,
|
||||||
|
"num_inference_steps": 10,
|
||||||
|
"show_only_filtered_image": false,
|
||||||
|
//"use_face_correction": 'GFPGANv1.3',
|
||||||
|
"use_upscale": "RealESRGAN_x4plus",
|
||||||
|
"session_id": JASMINE_SESSION_ID,
|
||||||
|
}, function(event) {
|
||||||
|
console.log(this, event)
|
||||||
|
if ('update' in event) {
|
||||||
|
const stepUpdate = event.update
|
||||||
|
if (complete || (stepUpdate.status && stepUpdate.step === stepUpdate.total_steps)) {
|
||||||
|
expect(stepUpdate.status).toBe('succeeded')
|
||||||
|
expect(stepUpdate.output).toHaveSize(2)
|
||||||
|
} else {
|
||||||
|
expect(stepUpdate.step).toBe(stepCount)
|
||||||
|
if (stepUpdate.step === stepUpdate.total_steps) {
|
||||||
|
complete = true
|
||||||
|
} else {
|
||||||
|
stepCount++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
console.log(result)
|
||||||
|
expect(result.status).toBe('succeeded')
|
||||||
|
expect(result.output).toHaveSize(2)
|
||||||
|
})
|
||||||
|
it('post and reader request', async function() {
|
||||||
|
const renderTask = new SD.RenderTask({
|
||||||
|
"prompt": "a photograph of an astronaut riding a horse",
|
||||||
|
"width": 128,
|
||||||
|
"height": 128,
|
||||||
|
"seed": SD.MAX_SEED_VALUE,
|
||||||
|
"num_inference_steps": 10,
|
||||||
|
"session_id": JASMINE_SESSION_ID,
|
||||||
|
})
|
||||||
|
expect(renderTask.status).toBe(SD.TaskStatus.init)
|
||||||
|
|
||||||
|
const timeout = -1
|
||||||
|
const renderRequest = await renderTask.post(timeout)
|
||||||
|
expect(typeof renderRequest.stream).toBe('string')
|
||||||
|
expect(renderTask.status).toBe(SD.TaskStatus.waiting)
|
||||||
|
expect(renderTask.streamUrl).toBe(renderRequest.stream)
|
||||||
|
|
||||||
|
await renderTask.waitUntil({state: SD.TaskStatus.processing, callback: () => console.log('Waiting for render task to start...') })
|
||||||
|
expect(renderTask.status).toBe(SD.TaskStatus.processing)
|
||||||
|
|
||||||
|
let stepCount = 0
|
||||||
|
let complete = false
|
||||||
|
//for await (const stepUpdate of renderTask.reader) {
|
||||||
|
for await (const stepUpdate of renderTask.reader.open()) {
|
||||||
|
console.log(stepUpdate)
|
||||||
|
if (complete || (stepUpdate.status && stepUpdate.step === stepUpdate.total_steps)) {
|
||||||
|
expect(stepUpdate.status).toBe('succeeded')
|
||||||
|
expect(stepUpdate.output).toHaveSize(1)
|
||||||
|
} else {
|
||||||
|
expect(stepUpdate.step).toBe(stepCount)
|
||||||
|
if (stepUpdate.step === stepUpdate.total_steps) {
|
||||||
|
complete = true
|
||||||
|
} else {
|
||||||
|
stepCount++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
expect(renderTask.status).toBe(SD.TaskStatus.completed)
|
||||||
|
expect(renderTask.result.status).toBe('succeeded')
|
||||||
|
expect(renderTask.result.output).toHaveSize(1)
|
||||||
|
})
|
||||||
|
it('queued request', async function() {
|
||||||
|
let stepCount = 0
|
||||||
|
let complete = false
|
||||||
|
const renderTask = new SD.RenderTask({
|
||||||
|
"prompt": "a photograph of an astronaut riding a horse",
|
||||||
|
"width": 128,
|
||||||
|
"height": 128,
|
||||||
|
"num_inference_steps": 10,
|
||||||
|
"show_only_filtered_image": false,
|
||||||
|
//"use_face_correction": 'GFPGANv1.3',
|
||||||
|
"use_upscale": "RealESRGAN_x4plus",
|
||||||
|
"session_id": JASMINE_SESSION_ID,
|
||||||
|
})
|
||||||
|
await renderTask.enqueue(function(event) {
|
||||||
|
console.log(this, event)
|
||||||
|
if ('update' in event) {
|
||||||
|
const stepUpdate = event.update
|
||||||
|
if (complete || (stepUpdate.status && stepUpdate.step === stepUpdate.total_steps)) {
|
||||||
|
expect(stepUpdate.status).toBe('succeeded')
|
||||||
|
expect(stepUpdate.output).toHaveSize(2)
|
||||||
|
} else {
|
||||||
|
expect(stepUpdate.step).toBe(stepCount)
|
||||||
|
if (stepUpdate.step === stepUpdate.total_steps) {
|
||||||
|
complete = true
|
||||||
|
} else {
|
||||||
|
stepCount++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
console.log(renderTask.result)
|
||||||
|
expect(renderTask.result.status).toBe('succeeded')
|
||||||
|
expect(renderTask.result.output).toHaveSize(2)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
describe('# Special cases', function() {
|
||||||
|
it('should throw an exception on set for invalid sessionId', function() {
|
||||||
|
expect(function() {
|
||||||
|
SD.sessionId = undefined
|
||||||
|
}).toThrowError("Can't set sessionId to undefined.")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
const loadCompleted = window.onload
|
||||||
|
let loadEvent = undefined
|
||||||
|
window.onload = function(evt) {
|
||||||
|
loadEvent = evt
|
||||||
|
}
|
||||||
|
if (!PLUGINS.SELFTEST) {
|
||||||
|
PLUGINS.SELFTEST = {}
|
||||||
|
}
|
||||||
|
loadUIPlugins().then(function() {
|
||||||
|
console.log('loadCompleted', loadEvent)
|
||||||
|
describe('@Plugins', function() {
|
||||||
|
it('exposes hooks to overide', function() {
|
||||||
|
expect(typeof PLUGINS.IMAGE_INFO_BUTTONS).toBe('object')
|
||||||
|
expect(typeof PLUGINS.TASK_CREATE).toBe('object')
|
||||||
|
})
|
||||||
|
describe('supports selftests', function() { // Hook to allow plugins to define tests.
|
||||||
|
const pluginsTests = Object.keys(PLUGINS.SELFTEST).filter((key) => PLUGINS.SELFTEST.hasOwnProperty(key))
|
||||||
|
if (!pluginsTests || pluginsTests.length <= 0) {
|
||||||
|
it('but nothing loaded...', function() {
|
||||||
|
expect(true).toBeTruthy()
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for (const pTest of pluginsTests) {
|
||||||
|
describe(pTest, function() {
|
||||||
|
const testFn = PLUGINS.SELFTEST[pTest]
|
||||||
|
return Promise.resolve(testFn.call(jasmine, pTest))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
loadCompleted.call(window, loadEvent)
|
||||||
|
})
|
@ -1,11 +1,21 @@
|
|||||||
(function() {
|
(function() {
|
||||||
document.querySelector('#tab-container').insertAdjacentHTML('beforeend', `
|
// Register selftests when loaded by jasmine.
|
||||||
|
if (typeof PLUGINS?.SELFTEST === 'object') {
|
||||||
|
PLUGINS.SELFTEST["release-notes"] = function() {
|
||||||
|
it('should be able to fetch CHANGES.md', async function() {
|
||||||
|
let releaseNotes = await fetch(`https://raw.githubusercontent.com/cmdr2/stable-diffusion-ui/main/CHANGES.md`)
|
||||||
|
expect(releaseNotes.status).toBe(200)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
document.querySelector('#tab-container')?.insertAdjacentHTML('beforeend', `
|
||||||
<span id="tab-news" class="tab">
|
<span id="tab-news" class="tab">
|
||||||
<span><i class="fa fa-bolt icon"></i> What's new?</span>
|
<span><i class="fa fa-bolt icon"></i> What's new?</span>
|
||||||
</span>
|
</span>
|
||||||
`)
|
`)
|
||||||
|
|
||||||
document.querySelector('#tab-content-wrapper').insertAdjacentHTML('beforeend', `
|
document.querySelector('#tab-content-wrapper')?.insertAdjacentHTML('beforeend', `
|
||||||
<div id="tab-content-news" class="tab-content">
|
<div id="tab-content-news" class="tab-content">
|
||||||
<div id="news" class="tab-content-inner">
|
<div id="news" class="tab-content-inner">
|
||||||
Loading..
|
Loading..
|
||||||
@ -13,6 +23,16 @@
|
|||||||
</div>
|
</div>
|
||||||
`)
|
`)
|
||||||
|
|
||||||
|
const tabNews = document.querySelector('#tab-news')
|
||||||
|
if (tabNews) {
|
||||||
|
linkTabContents(tabNews)
|
||||||
|
}
|
||||||
|
const news = document.querySelector('#news')
|
||||||
|
if (!news) {
|
||||||
|
// news tab not found, dont exec plugin code.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
document.querySelector('body').insertAdjacentHTML('beforeend', `
|
document.querySelector('body').insertAdjacentHTML('beforeend', `
|
||||||
<style>
|
<style>
|
||||||
#tab-content-news .tab-content-inner {
|
#tab-content-news .tab-content-inner {
|
||||||
@ -23,25 +43,22 @@
|
|||||||
</style>
|
</style>
|
||||||
`)
|
`)
|
||||||
|
|
||||||
linkTabContents(document.querySelector('#tab-news'))
|
loadScript('/media/js/marked.min.js').then(async function() {
|
||||||
|
|
||||||
let markedScript = document.createElement('script')
|
|
||||||
markedScript.src = '/media/js/marked.min.js'
|
|
||||||
|
|
||||||
markedScript.onload = async function() {
|
|
||||||
let appConfig = await fetch('/get/app_config')
|
let appConfig = await fetch('/get/app_config')
|
||||||
|
if (!appConfig.ok) {
|
||||||
|
console.error('[release-notes] Failed to get app_config.')
|
||||||
|
return
|
||||||
|
}
|
||||||
appConfig = await appConfig.json()
|
appConfig = await appConfig.json()
|
||||||
|
|
||||||
let updateBranch = appConfig.update_branch || 'main'
|
const updateBranch = appConfig.update_branch || 'main'
|
||||||
|
|
||||||
let news = document.querySelector('#news')
|
|
||||||
let releaseNotes = await fetch(`https://raw.githubusercontent.com/cmdr2/stable-diffusion-ui/${updateBranch}/CHANGES.md`)
|
let releaseNotes = await fetch(`https://raw.githubusercontent.com/cmdr2/stable-diffusion-ui/${updateBranch}/CHANGES.md`)
|
||||||
if (releaseNotes.status != 200) {
|
if (!releaseNotes.ok) {
|
||||||
|
console.error('[release-notes] Failed to get CHANGES.md.')
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
releaseNotes = await releaseNotes.text()
|
releaseNotes = await releaseNotes.text()
|
||||||
news.innerHTML = marked.parse(releaseNotes)
|
news.innerHTML = marked.parse(releaseNotes)
|
||||||
}
|
})
|
||||||
|
|
||||||
document.querySelector('body').appendChild(markedScript)
|
|
||||||
})()
|
})()
|
25
ui/plugins/ui/selftest.plugin.js
Normal file
25
ui/plugins/ui/selftest.plugin.js
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
/* SD-UI Selftest Plugin.js
|
||||||
|
*/
|
||||||
|
(function() { "use strict"
|
||||||
|
const ID_PREFIX = "selftest-plugin"
|
||||||
|
|
||||||
|
const links = document.getElementById("community-links")
|
||||||
|
if (!links) {
|
||||||
|
console.error('%s the ID "community-links" cannot be found.', ID_PREFIX)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add link to Jasmine SpecRunner
|
||||||
|
const pluginLink = document.createElement('li')
|
||||||
|
const options = {
|
||||||
|
'stopSpecOnExpectationFailure': "true",
|
||||||
|
'stopOnSpecFailure': 'false',
|
||||||
|
'random': 'false',
|
||||||
|
'hideDisabled': 'false'
|
||||||
|
}
|
||||||
|
const optStr = Object.entries(options).map(([key, val]) => `${key}=${val}`).join('&')
|
||||||
|
pluginLink.innerHTML = `<a id="${ID_PREFIX}-starttest" href="${location.protocol}/plugins/core/SpecRunner.html?${optStr}" target="_blank"><i class="fa-solid fa-vial-circle-check"></i> Start SelfTest</a>`
|
||||||
|
links.appendChild(pluginLink)
|
||||||
|
|
||||||
|
console.log('%s loaded!', ID_PREFIX)
|
||||||
|
})()
|
@ -1,111 +0,0 @@
|
|||||||
import json
|
|
||||||
|
|
||||||
class Request:
|
|
||||||
session_id: str = "session"
|
|
||||||
prompt: str = ""
|
|
||||||
negative_prompt: str = ""
|
|
||||||
init_image: str = None # base64
|
|
||||||
mask: str = None # base64
|
|
||||||
num_outputs: int = 1
|
|
||||||
num_inference_steps: int = 50
|
|
||||||
guidance_scale: float = 7.5
|
|
||||||
width: int = 512
|
|
||||||
height: int = 512
|
|
||||||
seed: int = 42
|
|
||||||
prompt_strength: float = 0.8
|
|
||||||
sampler: str = None # "ddim", "plms", "heun", "euler", "euler_a", "dpm2", "dpm2_a", "lms"
|
|
||||||
# allow_nsfw: bool = False
|
|
||||||
precision: str = "autocast" # or "full"
|
|
||||||
save_to_disk_path: str = None
|
|
||||||
turbo: bool = True
|
|
||||||
use_full_precision: bool = False
|
|
||||||
use_face_correction: str = None # or "GFPGANv1.3"
|
|
||||||
use_upscale: str = None # or "RealESRGAN_x4plus" or "RealESRGAN_x4plus_anime_6B"
|
|
||||||
use_stable_diffusion_model: str = "sd-v1-4"
|
|
||||||
use_vae_model: str = None
|
|
||||||
show_only_filtered_image: bool = False
|
|
||||||
output_format: str = "jpeg" # or "png"
|
|
||||||
output_quality: int = 75
|
|
||||||
|
|
||||||
stream_progress_updates: bool = False
|
|
||||||
stream_image_progress: bool = False
|
|
||||||
|
|
||||||
def json(self):
|
|
||||||
return {
|
|
||||||
"session_id": self.session_id,
|
|
||||||
"prompt": self.prompt,
|
|
||||||
"negative_prompt": self.negative_prompt,
|
|
||||||
"num_outputs": self.num_outputs,
|
|
||||||
"num_inference_steps": self.num_inference_steps,
|
|
||||||
"guidance_scale": self.guidance_scale,
|
|
||||||
"width": self.width,
|
|
||||||
"height": self.height,
|
|
||||||
"seed": self.seed,
|
|
||||||
"prompt_strength": self.prompt_strength,
|
|
||||||
"sampler": self.sampler,
|
|
||||||
"use_face_correction": self.use_face_correction,
|
|
||||||
"use_upscale": self.use_upscale,
|
|
||||||
"use_stable_diffusion_model": self.use_stable_diffusion_model,
|
|
||||||
"use_vae_model": self.use_vae_model,
|
|
||||||
"output_format": self.output_format,
|
|
||||||
"output_quality": self.output_quality,
|
|
||||||
}
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return f'''
|
|
||||||
session_id: {self.session_id}
|
|
||||||
prompt: {self.prompt}
|
|
||||||
negative_prompt: {self.negative_prompt}
|
|
||||||
seed: {self.seed}
|
|
||||||
num_inference_steps: {self.num_inference_steps}
|
|
||||||
sampler: {self.sampler}
|
|
||||||
guidance_scale: {self.guidance_scale}
|
|
||||||
w: {self.width}
|
|
||||||
h: {self.height}
|
|
||||||
precision: {self.precision}
|
|
||||||
save_to_disk_path: {self.save_to_disk_path}
|
|
||||||
turbo: {self.turbo}
|
|
||||||
use_full_precision: {self.use_full_precision}
|
|
||||||
use_face_correction: {self.use_face_correction}
|
|
||||||
use_upscale: {self.use_upscale}
|
|
||||||
use_stable_diffusion_model: {self.use_stable_diffusion_model}
|
|
||||||
use_vae_model: {self.use_vae_model}
|
|
||||||
show_only_filtered_image: {self.show_only_filtered_image}
|
|
||||||
output_format: {self.output_format}
|
|
||||||
output_quality: {self.output_quality}
|
|
||||||
|
|
||||||
stream_progress_updates: {self.stream_progress_updates}
|
|
||||||
stream_image_progress: {self.stream_image_progress}'''
|
|
||||||
|
|
||||||
class Image:
|
|
||||||
data: str # base64
|
|
||||||
seed: int
|
|
||||||
is_nsfw: bool
|
|
||||||
path_abs: str = None
|
|
||||||
|
|
||||||
def __init__(self, data, seed):
|
|
||||||
self.data = data
|
|
||||||
self.seed = seed
|
|
||||||
|
|
||||||
def json(self):
|
|
||||||
return {
|
|
||||||
"data": self.data,
|
|
||||||
"seed": self.seed,
|
|
||||||
"path_abs": self.path_abs,
|
|
||||||
}
|
|
||||||
|
|
||||||
class Response:
|
|
||||||
request: Request
|
|
||||||
images: list
|
|
||||||
|
|
||||||
def json(self):
|
|
||||||
res = {
|
|
||||||
"status": 'succeeded',
|
|
||||||
"request": self.request.json(),
|
|
||||||
"output": [],
|
|
||||||
}
|
|
||||||
|
|
||||||
for image in self.images:
|
|
||||||
res["output"].append(image.json())
|
|
||||||
|
|
||||||
return res
|
|
@ -1,162 +0,0 @@
|
|||||||
diff --git a/optimizedSD/ddpm.py b/optimizedSD/ddpm.py
|
|
||||||
index 79058bc..a473411 100644
|
|
||||||
--- a/optimizedSD/ddpm.py
|
|
||||||
+++ b/optimizedSD/ddpm.py
|
|
||||||
@@ -564,12 +564,12 @@ class UNet(DDPM):
|
|
||||||
unconditional_guidance_scale=unconditional_guidance_scale,
|
|
||||||
callback=callback, img_callback=img_callback)
|
|
||||||
|
|
||||||
+ yield from samples
|
|
||||||
+
|
|
||||||
if(self.turbo):
|
|
||||||
self.model1.to("cpu")
|
|
||||||
self.model2.to("cpu")
|
|
||||||
|
|
||||||
- return samples
|
|
||||||
-
|
|
||||||
@torch.no_grad()
|
|
||||||
def plms_sampling(self, cond,b, img,
|
|
||||||
ddim_use_original_steps=False,
|
|
||||||
@@ -608,10 +608,10 @@ class UNet(DDPM):
|
|
||||||
old_eps.append(e_t)
|
|
||||||
if len(old_eps) >= 4:
|
|
||||||
old_eps.pop(0)
|
|
||||||
- if callback: callback(i)
|
|
||||||
- if img_callback: img_callback(pred_x0, i)
|
|
||||||
+ if callback: yield from callback(i)
|
|
||||||
+ if img_callback: yield from img_callback(pred_x0, i)
|
|
||||||
|
|
||||||
- return img
|
|
||||||
+ yield from img_callback(img, len(iterator)-1)
|
|
||||||
|
|
||||||
@torch.no_grad()
|
|
||||||
def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
|
|
||||||
@@ -740,13 +740,13 @@ class UNet(DDPM):
|
|
||||||
unconditional_guidance_scale=unconditional_guidance_scale,
|
|
||||||
unconditional_conditioning=unconditional_conditioning)
|
|
||||||
|
|
||||||
- if callback: callback(i)
|
|
||||||
- if img_callback: img_callback(x_dec, i)
|
|
||||||
+ if callback: yield from callback(i)
|
|
||||||
+ if img_callback: yield from img_callback(x_dec, i)
|
|
||||||
|
|
||||||
if mask is not None:
|
|
||||||
- return x0 * mask + (1. - mask) * x_dec
|
|
||||||
+ x_dec = x0 * mask + (1. - mask) * x_dec
|
|
||||||
|
|
||||||
- return x_dec
|
|
||||||
+ yield from img_callback(x_dec, len(iterator)-1)
|
|
||||||
|
|
||||||
|
|
||||||
@torch.no_grad()
|
|
||||||
@@ -820,12 +820,12 @@ class UNet(DDPM):
|
|
||||||
|
|
||||||
|
|
||||||
d = to_d(x, sigma_hat, denoised)
|
|
||||||
- if callback: callback(i)
|
|
||||||
- if img_callback: img_callback(x, i)
|
|
||||||
+ if callback: yield from callback(i)
|
|
||||||
+ if img_callback: yield from img_callback(x, i)
|
|
||||||
dt = sigmas[i + 1] - sigma_hat
|
|
||||||
# Euler method
|
|
||||||
x = x + d * dt
|
|
||||||
- return x
|
|
||||||
+ yield from img_callback(x, len(sigmas)-1)
|
|
||||||
|
|
||||||
@torch.no_grad()
|
|
||||||
def euler_ancestral_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None, callback=None, disable=None, img_callback=None):
|
|
||||||
@@ -852,14 +852,14 @@ class UNet(DDPM):
|
|
||||||
denoised = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
|
|
||||||
|
|
||||||
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1])
|
|
||||||
- if callback: callback(i)
|
|
||||||
- if img_callback: img_callback(x, i)
|
|
||||||
+ if callback: yield from callback(i)
|
|
||||||
+ if img_callback: yield from img_callback(x, i)
|
|
||||||
d = to_d(x, sigmas[i], denoised)
|
|
||||||
# Euler method
|
|
||||||
dt = sigma_down - sigmas[i]
|
|
||||||
x = x + d * dt
|
|
||||||
x = x + torch.randn_like(x) * sigma_up
|
|
||||||
- return x
|
|
||||||
+ yield from img_callback(x, len(sigmas)-1)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -892,8 +892,8 @@ class UNet(DDPM):
|
|
||||||
denoised = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
|
|
||||||
|
|
||||||
d = to_d(x, sigma_hat, denoised)
|
|
||||||
- if callback: callback(i)
|
|
||||||
- if img_callback: img_callback(x, i)
|
|
||||||
+ if callback: yield from callback(i)
|
|
||||||
+ if img_callback: yield from img_callback(x, i)
|
|
||||||
dt = sigmas[i + 1] - sigma_hat
|
|
||||||
if sigmas[i + 1] == 0:
|
|
||||||
# Euler method
|
|
||||||
@@ -913,7 +913,7 @@ class UNet(DDPM):
|
|
||||||
d_2 = to_d(x_2, sigmas[i + 1], denoised_2)
|
|
||||||
d_prime = (d + d_2) / 2
|
|
||||||
x = x + d_prime * dt
|
|
||||||
- return x
|
|
||||||
+ yield from img_callback(x, len(sigmas)-1)
|
|
||||||
|
|
||||||
|
|
||||||
@torch.no_grad()
|
|
||||||
@@ -944,8 +944,8 @@ class UNet(DDPM):
|
|
||||||
e_t_uncond, e_t = (x_in + eps * c_out).chunk(2)
|
|
||||||
denoised = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
|
|
||||||
|
|
||||||
- if callback: callback(i)
|
|
||||||
- if img_callback: img_callback(x, i)
|
|
||||||
+ if callback: yield from callback(i)
|
|
||||||
+ if img_callback: yield from img_callback(x, i)
|
|
||||||
|
|
||||||
d = to_d(x, sigma_hat, denoised)
|
|
||||||
# Midpoint method, where the midpoint is chosen according to a rho=3 Karras schedule
|
|
||||||
@@ -966,7 +966,7 @@ class UNet(DDPM):
|
|
||||||
|
|
||||||
d_2 = to_d(x_2, sigma_mid, denoised_2)
|
|
||||||
x = x + d_2 * dt_2
|
|
||||||
- return x
|
|
||||||
+ yield from img_callback(x, len(sigmas)-1)
|
|
||||||
|
|
||||||
|
|
||||||
@torch.no_grad()
|
|
||||||
@@ -994,8 +994,8 @@ class UNet(DDPM):
|
|
||||||
|
|
||||||
|
|
||||||
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1])
|
|
||||||
- if callback: callback(i)
|
|
||||||
- if img_callback: img_callback(x, i)
|
|
||||||
+ if callback: yield from callback(i)
|
|
||||||
+ if img_callback: yield from img_callback(x, i)
|
|
||||||
d = to_d(x, sigmas[i], denoised)
|
|
||||||
# Midpoint method, where the midpoint is chosen according to a rho=3 Karras schedule
|
|
||||||
sigma_mid = ((sigmas[i] ** (1 / 3) + sigma_down ** (1 / 3)) / 2) ** 3
|
|
||||||
@@ -1016,7 +1016,7 @@ class UNet(DDPM):
|
|
||||||
d_2 = to_d(x_2, sigma_mid, denoised_2)
|
|
||||||
x = x + d_2 * dt_2
|
|
||||||
x = x + torch.randn_like(x) * sigma_up
|
|
||||||
- return x
|
|
||||||
+ yield from img_callback(x, len(sigmas)-1)
|
|
||||||
|
|
||||||
|
|
||||||
@torch.no_grad()
|
|
||||||
@@ -1042,8 +1042,8 @@ class UNet(DDPM):
|
|
||||||
e_t_uncond, e_t = (x_in + eps * c_out).chunk(2)
|
|
||||||
denoised = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
|
|
||||||
|
|
||||||
- if callback: callback(i)
|
|
||||||
- if img_callback: img_callback(x, i)
|
|
||||||
+ if callback: yield from callback(i)
|
|
||||||
+ if img_callback: yield from img_callback(x, i)
|
|
||||||
|
|
||||||
d = to_d(x, sigmas[i], denoised)
|
|
||||||
ds.append(d)
|
|
||||||
@@ -1054,4 +1054,4 @@ class UNet(DDPM):
|
|
||||||
cur_order = min(i + 1, order)
|
|
||||||
coeffs = [linear_multistep_coeff(cur_order, sigmas.cpu(), i, j) for j in range(cur_order)]
|
|
||||||
x = x + sum(coeff * d for coeff, d in zip(coeffs, reversed(ds)))
|
|
||||||
- return x
|
|
||||||
+ yield from img_callback(x, len(sigmas)-1)
|
|
@ -1,84 +0,0 @@
|
|||||||
diff --git a/ldm/models/diffusion/ddim.py b/ldm/models/diffusion/ddim.py
|
|
||||||
index 27ead0e..6215939 100644
|
|
||||||
--- a/ldm/models/diffusion/ddim.py
|
|
||||||
+++ b/ldm/models/diffusion/ddim.py
|
|
||||||
@@ -100,7 +100,7 @@ class DDIMSampler(object):
|
|
||||||
size = (batch_size, C, H, W)
|
|
||||||
print(f'Data shape for DDIM sampling is {size}, eta {eta}')
|
|
||||||
|
|
||||||
- samples, intermediates = self.ddim_sampling(conditioning, size,
|
|
||||||
+ samples = self.ddim_sampling(conditioning, size,
|
|
||||||
callback=callback,
|
|
||||||
img_callback=img_callback,
|
|
||||||
quantize_denoised=quantize_x0,
|
|
||||||
@@ -117,7 +117,8 @@ class DDIMSampler(object):
|
|
||||||
dynamic_threshold=dynamic_threshold,
|
|
||||||
ucg_schedule=ucg_schedule
|
|
||||||
)
|
|
||||||
- return samples, intermediates
|
|
||||||
+ # return samples, intermediates
|
|
||||||
+ yield from samples
|
|
||||||
|
|
||||||
@torch.no_grad()
|
|
||||||
def ddim_sampling(self, cond, shape,
|
|
||||||
@@ -168,14 +169,15 @@ class DDIMSampler(object):
|
|
||||||
unconditional_conditioning=unconditional_conditioning,
|
|
||||||
dynamic_threshold=dynamic_threshold)
|
|
||||||
img, pred_x0 = outs
|
|
||||||
- if callback: callback(i)
|
|
||||||
- if img_callback: img_callback(pred_x0, i)
|
|
||||||
+ if callback: yield from callback(i)
|
|
||||||
+ if img_callback: yield from img_callback(pred_x0, i)
|
|
||||||
|
|
||||||
if index % log_every_t == 0 or index == total_steps - 1:
|
|
||||||
intermediates['x_inter'].append(img)
|
|
||||||
intermediates['pred_x0'].append(pred_x0)
|
|
||||||
|
|
||||||
- return img, intermediates
|
|
||||||
+ # return img, intermediates
|
|
||||||
+ yield from img_callback(pred_x0, len(iterator)-1)
|
|
||||||
|
|
||||||
@torch.no_grad()
|
|
||||||
def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
|
|
||||||
diff --git a/ldm/models/diffusion/plms.py b/ldm/models/diffusion/plms.py
|
|
||||||
index 7002a36..0951f39 100644
|
|
||||||
--- a/ldm/models/diffusion/plms.py
|
|
||||||
+++ b/ldm/models/diffusion/plms.py
|
|
||||||
@@ -96,7 +96,7 @@ class PLMSSampler(object):
|
|
||||||
size = (batch_size, C, H, W)
|
|
||||||
print(f'Data shape for PLMS sampling is {size}')
|
|
||||||
|
|
||||||
- samples, intermediates = self.plms_sampling(conditioning, size,
|
|
||||||
+ samples = self.plms_sampling(conditioning, size,
|
|
||||||
callback=callback,
|
|
||||||
img_callback=img_callback,
|
|
||||||
quantize_denoised=quantize_x0,
|
|
||||||
@@ -112,7 +112,8 @@ class PLMSSampler(object):
|
|
||||||
unconditional_conditioning=unconditional_conditioning,
|
|
||||||
dynamic_threshold=dynamic_threshold,
|
|
||||||
)
|
|
||||||
- return samples, intermediates
|
|
||||||
+ #return samples, intermediates
|
|
||||||
+ yield from samples
|
|
||||||
|
|
||||||
@torch.no_grad()
|
|
||||||
def plms_sampling(self, cond, shape,
|
|
||||||
@@ -165,14 +166,15 @@ class PLMSSampler(object):
|
|
||||||
old_eps.append(e_t)
|
|
||||||
if len(old_eps) >= 4:
|
|
||||||
old_eps.pop(0)
|
|
||||||
- if callback: callback(i)
|
|
||||||
- if img_callback: img_callback(pred_x0, i)
|
|
||||||
+ if callback: yield from callback(i)
|
|
||||||
+ if img_callback: yield from img_callback(pred_x0, i)
|
|
||||||
|
|
||||||
if index % log_every_t == 0 or index == total_steps - 1:
|
|
||||||
intermediates['x_inter'].append(img)
|
|
||||||
intermediates['pred_x0'].append(pred_x0)
|
|
||||||
|
|
||||||
- return img, intermediates
|
|
||||||
+ # return img, intermediates
|
|
||||||
+ yield from img_callback(pred_x0, len(iterator)-1)
|
|
||||||
|
|
||||||
@torch.no_grad()
|
|
||||||
def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
|
|
File diff suppressed because it is too large
Load Diff
492
ui/server.py
492
ui/server.py
@ -1,492 +0,0 @@
|
|||||||
"""server.py: FastAPI SD-UI Web Host.
|
|
||||||
Notes:
|
|
||||||
async endpoints always run on the main thread. Without they run on the thread pool.
|
|
||||||
"""
|
|
||||||
import json
|
|
||||||
import traceback
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import os
|
|
||||||
import socket
|
|
||||||
import picklescan.scanner
|
|
||||||
import rich
|
|
||||||
|
|
||||||
SD_DIR = os.getcwd()
|
|
||||||
print('started in ', SD_DIR)
|
|
||||||
|
|
||||||
SD_UI_DIR = os.getenv('SD_UI_PATH', None)
|
|
||||||
sys.path.append(os.path.dirname(SD_UI_DIR))
|
|
||||||
|
|
||||||
CONFIG_DIR = os.path.abspath(os.path.join(SD_UI_DIR, '..', 'scripts'))
|
|
||||||
MODELS_DIR = os.path.abspath(os.path.join(SD_DIR, '..', 'models'))
|
|
||||||
|
|
||||||
USER_UI_PLUGINS_DIR = os.path.abspath(os.path.join(SD_DIR, '..', 'plugins', 'ui'))
|
|
||||||
CORE_UI_PLUGINS_DIR = os.path.abspath(os.path.join(SD_UI_DIR, 'plugins', 'ui'))
|
|
||||||
UI_PLUGINS_SOURCES = ((CORE_UI_PLUGINS_DIR, 'core'), (USER_UI_PLUGINS_DIR, 'user'))
|
|
||||||
|
|
||||||
STABLE_DIFFUSION_MODEL_EXTENSIONS = ['.ckpt', '.safetensors']
|
|
||||||
VAE_MODEL_EXTENSIONS = ['.vae.pt', '.ckpt']
|
|
||||||
|
|
||||||
OUTPUT_DIRNAME = "Stable Diffusion UI" # in the user's home folder
|
|
||||||
TASK_TTL = 15 * 60 # Discard last session's task timeout
|
|
||||||
APP_CONFIG_DEFAULTS = {
|
|
||||||
# auto: selects the cuda device with the most free memory, cuda: use the currently active cuda device.
|
|
||||||
'render_devices': 'auto', # valid entries: 'auto', 'cpu' or 'cuda:N' (where N is a GPU index)
|
|
||||||
'update_branch': 'main',
|
|
||||||
'ui': {
|
|
||||||
'open_browser_on_start': True,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
APP_CONFIG_DEFAULT_MODELS = [
|
|
||||||
# needed to support the legacy installations
|
|
||||||
'custom-model', # Check if user has a custom model, use it first.
|
|
||||||
'sd-v1-4', # Default fallback.
|
|
||||||
]
|
|
||||||
|
|
||||||
from fastapi import FastAPI, HTTPException
|
|
||||||
from fastapi.staticfiles import StaticFiles
|
|
||||||
from starlette.responses import FileResponse, JSONResponse, StreamingResponse
|
|
||||||
from pydantic import BaseModel
|
|
||||||
import logging
|
|
||||||
#import queue, threading, time
|
|
||||||
from typing import Any, Generator, Hashable, List, Optional, Union
|
|
||||||
|
|
||||||
from sd_internal import Request, Response, task_manager
|
|
||||||
|
|
||||||
app = FastAPI()
|
|
||||||
|
|
||||||
modifiers_cache = None
|
|
||||||
outpath = os.path.join(os.path.expanduser("~"), OUTPUT_DIRNAME)
|
|
||||||
|
|
||||||
os.makedirs(USER_UI_PLUGINS_DIR, exist_ok=True)
|
|
||||||
|
|
||||||
# don't show access log entries for URLs that start with the given prefix
|
|
||||||
ACCESS_LOG_SUPPRESS_PATH_PREFIXES = ['/ping', '/image', '/modifier-thumbnails']
|
|
||||||
|
|
||||||
NOCACHE_HEADERS={"Cache-Control": "no-cache, no-store, must-revalidate", "Pragma": "no-cache", "Expires": "0"}
|
|
||||||
|
|
||||||
class NoCacheStaticFiles(StaticFiles):
|
|
||||||
def is_not_modified(self, response_headers, request_headers) -> bool:
|
|
||||||
if 'content-type' in response_headers and ('javascript' in response_headers['content-type'] or 'css' in response_headers['content-type']):
|
|
||||||
response_headers.update(NOCACHE_HEADERS)
|
|
||||||
return False
|
|
||||||
|
|
||||||
return super().is_not_modified(response_headers, request_headers)
|
|
||||||
|
|
||||||
app.mount('/media', NoCacheStaticFiles(directory=os.path.join(SD_UI_DIR, 'media')), name="media")
|
|
||||||
|
|
||||||
for plugins_dir, dir_prefix in UI_PLUGINS_SOURCES:
|
|
||||||
app.mount(f'/plugins/{dir_prefix}', NoCacheStaticFiles(directory=plugins_dir), name=f"plugins-{dir_prefix}")
|
|
||||||
|
|
||||||
def getConfig(default_val=APP_CONFIG_DEFAULTS):
|
|
||||||
try:
|
|
||||||
config_json_path = os.path.join(CONFIG_DIR, 'config.json')
|
|
||||||
if not os.path.exists(config_json_path):
|
|
||||||
return default_val
|
|
||||||
with open(config_json_path, 'r', encoding='utf-8') as f:
|
|
||||||
config = json.load(f)
|
|
||||||
if 'net' not in config:
|
|
||||||
config['net'] = {}
|
|
||||||
if os.getenv('SD_UI_BIND_PORT') is not None:
|
|
||||||
config['net']['listen_port'] = int(os.getenv('SD_UI_BIND_PORT'))
|
|
||||||
if os.getenv('SD_UI_BIND_IP') is not None:
|
|
||||||
config['net']['listen_to_network'] = ( os.getenv('SD_UI_BIND_IP') == '0.0.0.0' )
|
|
||||||
return config
|
|
||||||
except Exception as e:
|
|
||||||
print(str(e))
|
|
||||||
print(traceback.format_exc())
|
|
||||||
return default_val
|
|
||||||
|
|
||||||
def setConfig(config):
|
|
||||||
print( json.dumps(config) )
|
|
||||||
try: # config.json
|
|
||||||
config_json_path = os.path.join(CONFIG_DIR, 'config.json')
|
|
||||||
with open(config_json_path, 'w', encoding='utf-8') as f:
|
|
||||||
json.dump(config, f)
|
|
||||||
except:
|
|
||||||
print(traceback.format_exc())
|
|
||||||
|
|
||||||
try: # config.bat
|
|
||||||
config_bat_path = os.path.join(CONFIG_DIR, 'config.bat')
|
|
||||||
config_bat = []
|
|
||||||
|
|
||||||
if 'update_branch' in config:
|
|
||||||
config_bat.append(f"@set update_branch={config['update_branch']}")
|
|
||||||
|
|
||||||
config_bat.append(f"@set SD_UI_BIND_PORT={config['net']['listen_port']}")
|
|
||||||
bind_ip = '0.0.0.0' if config['net']['listen_to_network'] else '127.0.0.1'
|
|
||||||
config_bat.append(f"@set SD_UI_BIND_IP={bind_ip}")
|
|
||||||
|
|
||||||
config_bat.append(f"@set test_sd2={'Y' if config.get('test_sd2', False) else 'N'}")
|
|
||||||
|
|
||||||
if len(config_bat) > 0:
|
|
||||||
with open(config_bat_path, 'w', encoding='utf-8') as f:
|
|
||||||
f.write('\r\n'.join(config_bat))
|
|
||||||
except:
|
|
||||||
print(traceback.format_exc())
|
|
||||||
|
|
||||||
try: # config.sh
|
|
||||||
config_sh_path = os.path.join(CONFIG_DIR, 'config.sh')
|
|
||||||
config_sh = ['#!/bin/bash']
|
|
||||||
|
|
||||||
if 'update_branch' in config:
|
|
||||||
config_sh.append(f"export update_branch={config['update_branch']}")
|
|
||||||
|
|
||||||
config_sh.append(f"export SD_UI_BIND_PORT={config['net']['listen_port']}")
|
|
||||||
bind_ip = '0.0.0.0' if config['net']['listen_to_network'] else '127.0.0.1'
|
|
||||||
config_sh.append(f"export SD_UI_BIND_IP={bind_ip}")
|
|
||||||
|
|
||||||
config_sh.append(f"export test_sd2=\"{'Y' if config.get('test_sd2', False) else 'N'}\"")
|
|
||||||
|
|
||||||
if len(config_sh) > 1:
|
|
||||||
with open(config_sh_path, 'w', encoding='utf-8') as f:
|
|
||||||
f.write('\n'.join(config_sh))
|
|
||||||
except:
|
|
||||||
print(traceback.format_exc())
|
|
||||||
|
|
||||||
def resolve_model_to_use(model_name:str, model_type:str, model_dir:str, model_extensions:list, default_models=[]):
|
|
||||||
config = getConfig()
|
|
||||||
|
|
||||||
model_dirs = [os.path.join(MODELS_DIR, model_dir), SD_DIR]
|
|
||||||
if not model_name: # When None try user configured model.
|
|
||||||
# config = getConfig()
|
|
||||||
if 'model' in config and model_type in config['model']:
|
|
||||||
model_name = config['model'][model_type]
|
|
||||||
if model_name:
|
|
||||||
is_sd2 = config.get('test_sd2', False)
|
|
||||||
if model_name.startswith('sd2_') and not is_sd2: # temp hack, until SD2 is unified with 1.4
|
|
||||||
print('ERROR: Cannot use SD 2.0 models with SD 1.0 code. Using the sd-v1-4 model instead!')
|
|
||||||
model_name = 'sd-v1-4'
|
|
||||||
|
|
||||||
# Check models directory
|
|
||||||
models_dir_path = os.path.join(MODELS_DIR, model_dir, model_name)
|
|
||||||
for model_extension in model_extensions:
|
|
||||||
if os.path.exists(models_dir_path + model_extension):
|
|
||||||
return models_dir_path
|
|
||||||
if os.path.exists(model_name + model_extension):
|
|
||||||
# Direct Path to file
|
|
||||||
model_name = os.path.abspath(model_name)
|
|
||||||
return model_name
|
|
||||||
# Default locations
|
|
||||||
if model_name in default_models:
|
|
||||||
default_model_path = os.path.join(SD_DIR, model_name)
|
|
||||||
for model_extension in model_extensions:
|
|
||||||
if os.path.exists(default_model_path + model_extension):
|
|
||||||
return default_model_path
|
|
||||||
# Can't find requested model, check the default paths.
|
|
||||||
for default_model in default_models:
|
|
||||||
for model_dir in model_dirs:
|
|
||||||
default_model_path = os.path.join(model_dir, default_model)
|
|
||||||
for model_extension in model_extensions:
|
|
||||||
if os.path.exists(default_model_path + model_extension):
|
|
||||||
if model_name is not None:
|
|
||||||
print(f'Could not find the configured custom model {model_name}{model_extension}. Using the default one: {default_model_path}{model_extension}')
|
|
||||||
return default_model_path
|
|
||||||
raise Exception('No valid models found.')
|
|
||||||
|
|
||||||
def resolve_ckpt_to_use(model_name:str=None):
|
|
||||||
return resolve_model_to_use(model_name, model_type='stable-diffusion', model_dir='stable-diffusion', model_extensions=STABLE_DIFFUSION_MODEL_EXTENSIONS, default_models=APP_CONFIG_DEFAULT_MODELS)
|
|
||||||
|
|
||||||
def resolve_vae_to_use(model_name:str=None):
|
|
||||||
try:
|
|
||||||
return resolve_model_to_use(model_name, model_type='vae', model_dir='vae', model_extensions=VAE_MODEL_EXTENSIONS, default_models=[])
|
|
||||||
except:
|
|
||||||
return None
|
|
||||||
|
|
||||||
class SetAppConfigRequest(BaseModel):
|
|
||||||
update_branch: str = None
|
|
||||||
render_devices: Union[List[str], List[int], str, int] = None
|
|
||||||
model_vae: str = None
|
|
||||||
ui_open_browser_on_start: bool = None
|
|
||||||
listen_to_network: bool = None
|
|
||||||
listen_port: int = None
|
|
||||||
test_sd2: bool = None
|
|
||||||
|
|
||||||
@app.post('/app_config')
|
|
||||||
async def setAppConfig(req : SetAppConfigRequest):
|
|
||||||
config = getConfig()
|
|
||||||
if req.update_branch is not None:
|
|
||||||
config['update_branch'] = req.update_branch
|
|
||||||
if req.render_devices is not None:
|
|
||||||
update_render_devices_in_config(config, req.render_devices)
|
|
||||||
if req.ui_open_browser_on_start is not None:
|
|
||||||
if 'ui' not in config:
|
|
||||||
config['ui'] = {}
|
|
||||||
config['ui']['open_browser_on_start'] = req.ui_open_browser_on_start
|
|
||||||
if req.listen_to_network is not None:
|
|
||||||
if 'net' not in config:
|
|
||||||
config['net'] = {}
|
|
||||||
config['net']['listen_to_network'] = bool(req.listen_to_network)
|
|
||||||
if req.listen_port is not None:
|
|
||||||
if 'net' not in config:
|
|
||||||
config['net'] = {}
|
|
||||||
config['net']['listen_port'] = int(req.listen_port)
|
|
||||||
if req.test_sd2 is not None:
|
|
||||||
config['test_sd2'] = req.test_sd2
|
|
||||||
try:
|
|
||||||
setConfig(config)
|
|
||||||
|
|
||||||
if req.render_devices:
|
|
||||||
update_render_threads()
|
|
||||||
|
|
||||||
return JSONResponse({'status': 'OK'}, headers=NOCACHE_HEADERS)
|
|
||||||
except Exception as e:
|
|
||||||
print(traceback.format_exc())
|
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
||||||
def is_malicious_model(file_path):
|
|
||||||
try:
|
|
||||||
scan_result = picklescan.scanner.scan_file_path(file_path)
|
|
||||||
if scan_result.issues_count > 0 or scan_result.infected_files > 0:
|
|
||||||
rich.print(":warning: [bold red]Scan %s: %d scanned, %d issue, %d infected.[/bold red]" % (file_path, scan_result.scanned_files, scan_result.issues_count, scan_result.infected_files))
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
rich.print("Scan %s: [green]%d scanned, %d issue, %d infected.[/green]" % (file_path, scan_result.scanned_files, scan_result.issues_count, scan_result.infected_files))
|
|
||||||
return False
|
|
||||||
except Exception as e:
|
|
||||||
print('error while scanning', file_path, 'error:', e)
|
|
||||||
return False
|
|
||||||
|
|
||||||
known_models = {}
|
|
||||||
def getModels():
|
|
||||||
models = {
|
|
||||||
'active': {
|
|
||||||
'stable-diffusion': 'sd-v1-4',
|
|
||||||
'vae': '',
|
|
||||||
},
|
|
||||||
'options': {
|
|
||||||
'stable-diffusion': ['sd-v1-4'],
|
|
||||||
'vae': [],
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
def listModels(models_dirname, model_type, model_extensions):
|
|
||||||
models_dir = os.path.join(MODELS_DIR, models_dirname)
|
|
||||||
if not os.path.exists(models_dir):
|
|
||||||
os.makedirs(models_dir)
|
|
||||||
|
|
||||||
for file in os.listdir(models_dir):
|
|
||||||
for model_extension in model_extensions:
|
|
||||||
if not file.endswith(model_extension):
|
|
||||||
continue
|
|
||||||
|
|
||||||
model_path = os.path.join(models_dir, file)
|
|
||||||
mtime = os.path.getmtime(model_path)
|
|
||||||
mod_time = known_models[model_path] if model_path in known_models else -1
|
|
||||||
if mod_time != mtime:
|
|
||||||
if is_malicious_model(model_path):
|
|
||||||
models['scan-error'] = file
|
|
||||||
return
|
|
||||||
known_models[model_path] = mtime
|
|
||||||
|
|
||||||
model_name = file[:-len(model_extension)]
|
|
||||||
models['options'][model_type].append(model_name)
|
|
||||||
|
|
||||||
models['options'][model_type] = [*set(models['options'][model_type])] # remove duplicates
|
|
||||||
models['options'][model_type].sort()
|
|
||||||
|
|
||||||
# custom models
|
|
||||||
listModels(models_dirname='stable-diffusion', model_type='stable-diffusion', model_extensions=STABLE_DIFFUSION_MODEL_EXTENSIONS)
|
|
||||||
listModels(models_dirname='vae', model_type='vae', model_extensions=VAE_MODEL_EXTENSIONS)
|
|
||||||
|
|
||||||
# legacy
|
|
||||||
custom_weight_path = os.path.join(SD_DIR, 'custom-model.ckpt')
|
|
||||||
if os.path.exists(custom_weight_path):
|
|
||||||
models['options']['stable-diffusion'].append('custom-model')
|
|
||||||
|
|
||||||
return models
|
|
||||||
|
|
||||||
def getUIPlugins():
|
|
||||||
plugins = []
|
|
||||||
|
|
||||||
for plugins_dir, dir_prefix in UI_PLUGINS_SOURCES:
|
|
||||||
for file in os.listdir(plugins_dir):
|
|
||||||
if file.endswith('.plugin.js'):
|
|
||||||
plugins.append(f'/plugins/{dir_prefix}/{file}')
|
|
||||||
|
|
||||||
return plugins
|
|
||||||
|
|
||||||
def getIPConfig():
|
|
||||||
ips = socket.gethostbyname_ex(socket.gethostname())
|
|
||||||
ips[2].append(ips[0])
|
|
||||||
return ips[2]
|
|
||||||
|
|
||||||
@app.get('/get/{key:path}')
|
|
||||||
def read_web_data(key:str=None):
|
|
||||||
if not key: # /get without parameters, stable-diffusion easter egg.
|
|
||||||
raise HTTPException(status_code=418, detail="StableDiffusion is drawing a teapot!") # HTTP418 I'm a teapot
|
|
||||||
elif key == 'app_config':
|
|
||||||
config = getConfig(default_val=None)
|
|
||||||
if config is None:
|
|
||||||
config = APP_CONFIG_DEFAULTS
|
|
||||||
return JSONResponse(config, headers=NOCACHE_HEADERS)
|
|
||||||
elif key == 'system_info':
|
|
||||||
config = getConfig()
|
|
||||||
system_info = {
|
|
||||||
'devices': task_manager.get_devices(),
|
|
||||||
'hosts': getIPConfig(),
|
|
||||||
}
|
|
||||||
system_info['devices']['config'] = config.get('render_devices', "auto")
|
|
||||||
return JSONResponse(system_info, headers=NOCACHE_HEADERS)
|
|
||||||
elif key == 'models':
|
|
||||||
return JSONResponse(getModels(), headers=NOCACHE_HEADERS)
|
|
||||||
elif key == 'modifiers': return FileResponse(os.path.join(SD_UI_DIR, 'modifiers.json'), headers=NOCACHE_HEADERS)
|
|
||||||
elif key == 'output_dir': return JSONResponse({ 'output_dir': outpath }, headers=NOCACHE_HEADERS)
|
|
||||||
elif key == 'ui_plugins': return JSONResponse(getUIPlugins(), headers=NOCACHE_HEADERS)
|
|
||||||
else:
|
|
||||||
raise HTTPException(status_code=404, detail=f'Request for unknown {key}') # HTTP404 Not Found
|
|
||||||
|
|
||||||
@app.get('/ping') # Get server and optionally session status.
|
|
||||||
def ping(session_id:str=None):
|
|
||||||
if task_manager.is_alive() <= 0: # Check that render threads are alive.
|
|
||||||
if task_manager.current_state_error: raise HTTPException(status_code=500, detail=str(task_manager.current_state_error))
|
|
||||||
raise HTTPException(status_code=500, detail='Render thread is dead.')
|
|
||||||
if task_manager.current_state_error and not isinstance(task_manager.current_state_error, StopAsyncIteration): raise HTTPException(status_code=500, detail=str(task_manager.current_state_error))
|
|
||||||
# Alive
|
|
||||||
response = {'status': str(task_manager.current_state)}
|
|
||||||
if session_id:
|
|
||||||
task = task_manager.get_cached_task(session_id, update_ttl=True)
|
|
||||||
if task:
|
|
||||||
response['task'] = id(task)
|
|
||||||
if task.lock.locked():
|
|
||||||
response['session'] = 'running'
|
|
||||||
elif isinstance(task.error, StopAsyncIteration):
|
|
||||||
response['session'] = 'stopped'
|
|
||||||
elif task.error:
|
|
||||||
response['session'] = 'error'
|
|
||||||
elif not task.buffer_queue.empty():
|
|
||||||
response['session'] = 'buffer'
|
|
||||||
elif task.response:
|
|
||||||
response['session'] = 'completed'
|
|
||||||
else:
|
|
||||||
response['session'] = 'pending'
|
|
||||||
response['devices'] = task_manager.get_devices()
|
|
||||||
return JSONResponse(response, headers=NOCACHE_HEADERS)
|
|
||||||
|
|
||||||
def save_model_to_config(ckpt_model_name, vae_model_name):
|
|
||||||
config = getConfig()
|
|
||||||
if 'model' not in config:
|
|
||||||
config['model'] = {}
|
|
||||||
|
|
||||||
config['model']['stable-diffusion'] = ckpt_model_name
|
|
||||||
config['model']['vae'] = vae_model_name
|
|
||||||
|
|
||||||
if vae_model_name is None or vae_model_name == "":
|
|
||||||
del config['model']['vae']
|
|
||||||
|
|
||||||
setConfig(config)
|
|
||||||
|
|
||||||
def update_render_devices_in_config(config, render_devices):
|
|
||||||
if render_devices not in ('cpu', 'auto') and not render_devices.startswith('cuda:'):
|
|
||||||
raise HTTPException(status_code=400, detail=f'Invalid render device requested: {render_devices}')
|
|
||||||
|
|
||||||
if render_devices.startswith('cuda:'):
|
|
||||||
render_devices = render_devices.split(',')
|
|
||||||
|
|
||||||
config['render_devices'] = render_devices
|
|
||||||
|
|
||||||
@app.post('/render')
|
|
||||||
def render(req : task_manager.ImageRequest):
|
|
||||||
try:
|
|
||||||
save_model_to_config(req.use_stable_diffusion_model, req.use_vae_model)
|
|
||||||
req.use_stable_diffusion_model = resolve_ckpt_to_use(req.use_stable_diffusion_model)
|
|
||||||
req.use_vae_model = resolve_vae_to_use(req.use_vae_model)
|
|
||||||
new_task = task_manager.render(req)
|
|
||||||
response = {
|
|
||||||
'status': str(task_manager.current_state),
|
|
||||||
'queue': len(task_manager.tasks_queue),
|
|
||||||
'stream': f'/image/stream/{req.session_id}/{id(new_task)}',
|
|
||||||
'task': id(new_task)
|
|
||||||
}
|
|
||||||
return JSONResponse(response, headers=NOCACHE_HEADERS)
|
|
||||||
except ChildProcessError as e: # Render thread is dead
|
|
||||||
raise HTTPException(status_code=500, detail=f'Rendering thread has died.') # HTTP500 Internal Server Error
|
|
||||||
except ConnectionRefusedError as e: # Unstarted task pending, deny queueing more than one.
|
|
||||||
raise HTTPException(status_code=503, detail=f'Session {req.session_id} has an already pending task.') # HTTP503 Service Unavailable
|
|
||||||
except Exception as e:
|
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
||||||
@app.get('/image/stream/{session_id:str}/{task_id:int}')
|
|
||||||
def stream(session_id:str, task_id:int):
|
|
||||||
#TODO Move to WebSockets ??
|
|
||||||
task = task_manager.get_cached_task(session_id, update_ttl=True)
|
|
||||||
if not task: raise HTTPException(status_code=410, detail='No request received.') # HTTP410 Gone
|
|
||||||
if (id(task) != task_id): raise HTTPException(status_code=409, detail=f'Wrong task id received. Expected:{id(task)}, Received:{task_id}') # HTTP409 Conflict
|
|
||||||
if task.buffer_queue.empty() and not task.lock.locked():
|
|
||||||
if task.response:
|
|
||||||
#print(f'Session {session_id} sending cached response')
|
|
||||||
return JSONResponse(task.response, headers=NOCACHE_HEADERS)
|
|
||||||
raise HTTPException(status_code=425, detail='Too Early, task not started yet.') # HTTP425 Too Early
|
|
||||||
#print(f'Session {session_id} opened live render stream {id(task.buffer_queue)}')
|
|
||||||
return StreamingResponse(task.read_buffer_generator(), media_type='application/json')
|
|
||||||
|
|
||||||
@app.get('/image/stop')
|
|
||||||
def stop(session_id:str=None):
|
|
||||||
if not session_id:
|
|
||||||
if task_manager.current_state == task_manager.ServerStates.Online or task_manager.current_state == task_manager.ServerStates.Unavailable:
|
|
||||||
raise HTTPException(status_code=409, detail='Not currently running any tasks.') # HTTP409 Conflict
|
|
||||||
task_manager.current_state_error = StopAsyncIteration('')
|
|
||||||
return {'OK'}
|
|
||||||
task = task_manager.get_cached_task(session_id, update_ttl=False)
|
|
||||||
if not task: raise HTTPException(status_code=404, detail=f'Session {session_id} has no active task.') # HTTP404 Not Found
|
|
||||||
if isinstance(task.error, StopAsyncIteration): raise HTTPException(status_code=409, detail=f'Session {session_id} task is already stopped.') # HTTP409 Conflict
|
|
||||||
task.error = StopAsyncIteration('')
|
|
||||||
return {'OK'}
|
|
||||||
|
|
||||||
@app.get('/image/tmp/{session_id}/{img_id:int}')
|
|
||||||
def get_image(session_id, img_id):
|
|
||||||
task = task_manager.get_cached_task(session_id, update_ttl=True)
|
|
||||||
if not task: raise HTTPException(status_code=410, detail=f'Session {session_id} has not submitted a task.') # HTTP410 Gone
|
|
||||||
if not task.temp_images[img_id]: raise HTTPException(status_code=425, detail='Too Early, task data is not available yet.') # HTTP425 Too Early
|
|
||||||
try:
|
|
||||||
img_data = task.temp_images[img_id]
|
|
||||||
img_data.seek(0)
|
|
||||||
return StreamingResponse(img_data, media_type='image/jpeg')
|
|
||||||
except KeyError as e:
|
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
||||||
@app.get('/')
|
|
||||||
def read_root():
|
|
||||||
return FileResponse(os.path.join(SD_UI_DIR, 'index.html'), headers=NOCACHE_HEADERS)
|
|
||||||
|
|
||||||
@app.on_event("shutdown")
|
|
||||||
def shutdown_event(): # Signal render thread to close on shutdown
|
|
||||||
task_manager.current_state_error = SystemExit('Application shutting down.')
|
|
||||||
|
|
||||||
# don't log certain requests
|
|
||||||
class LogSuppressFilter(logging.Filter):
|
|
||||||
def filter(self, record: logging.LogRecord) -> bool:
|
|
||||||
path = record.getMessage()
|
|
||||||
for prefix in ACCESS_LOG_SUPPRESS_PATH_PREFIXES:
|
|
||||||
if path.find(prefix) != -1:
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
logging.getLogger('uvicorn.access').addFilter(LogSuppressFilter())
|
|
||||||
|
|
||||||
# Check models and prepare cache for UI open
|
|
||||||
getModels()
|
|
||||||
|
|
||||||
# Start the task_manager
|
|
||||||
task_manager.default_model_to_load = resolve_ckpt_to_use()
|
|
||||||
task_manager.default_vae_to_load = resolve_vae_to_use()
|
|
||||||
|
|
||||||
def update_render_threads():
|
|
||||||
config = getConfig()
|
|
||||||
render_devices = config.get('render_devices', 'auto')
|
|
||||||
active_devices = task_manager.get_devices()['active'].keys()
|
|
||||||
|
|
||||||
print('requesting for render_devices', render_devices)
|
|
||||||
task_manager.update_render_threads(render_devices, active_devices)
|
|
||||||
|
|
||||||
update_render_threads()
|
|
||||||
|
|
||||||
# start the browser ui
|
|
||||||
def open_browser():
|
|
||||||
config = getConfig()
|
|
||||||
ui = config.get('ui', {})
|
|
||||||
net = config.get('net', {'listen_port':9000})
|
|
||||||
port = net.get('listen_port', 9000)
|
|
||||||
if ui.get('open_browser_on_start', True):
|
|
||||||
import webbrowser; webbrowser.open(f"http://localhost:{port}")
|
|
||||||
|
|
||||||
open_browser()
|
|
Reference in New Issue
Block a user