mirror of
https://github.com/easydiffusion/easydiffusion.git
synced 2024-11-26 10:16:12 +01:00
Merge branch 'beta' into mdiller_ui_reorganize
This commit is contained in:
commit
69c7f22053
@ -191,7 +191,9 @@ call WHERE uvicorn > .tmp
|
||||
|
||||
|
||||
if not exist "..\models\stable-diffusion" mkdir "..\models\stable-diffusion"
|
||||
if not exist "..\models\vae" mkdir "..\models\vae"
|
||||
echo. > "..\models\stable-diffusion\Put your custom ckpt files here.txt"
|
||||
echo. > "..\models\vae\Put your VAE files here.txt"
|
||||
|
||||
@if exist "sd-v1-4.ckpt" (
|
||||
for %%I in ("sd-v1-4.ckpt") do if "%%~zI" EQU "4265380512" (
|
||||
@ -321,22 +323,22 @@ echo. > "..\models\stable-diffusion\Put your custom ckpt files here.txt"
|
||||
|
||||
|
||||
|
||||
@if exist "..\models\stable-diffusion\vae-ft-mse-840000-ema-pruned.vae.pt" (
|
||||
for %%I in ("..\models\stable-diffusion\vae-ft-mse-840000-ema-pruned.vae.pt") do if "%%~zI" EQU "334695179" (
|
||||
@if exist "..\models\vae\vae-ft-mse-840000-ema-pruned.ckpt" (
|
||||
for %%I in ("..\models\vae\vae-ft-mse-840000-ema-pruned.ckpt") do if "%%~zI" EQU "334695179" (
|
||||
echo "Data files (weights) necessary for the default VAE (sd-vae-ft-mse-original) were already downloaded"
|
||||
) else (
|
||||
echo. & echo "The default VAE (sd-vae-ft-mse-original) file present at models\stable-diffusion\vae-ft-mse-840000-ema-pruned.vae.pt is invalid. It is only %%~zI bytes in size. Re-downloading.." & echo.
|
||||
del "..\models\stable-diffusion\vae-ft-mse-840000-ema-pruned.vae.pt"
|
||||
echo. & echo "The default VAE (sd-vae-ft-mse-original) file present at models\vae\vae-ft-mse-840000-ema-pruned.ckpt is invalid. It is only %%~zI bytes in size. Re-downloading.." & echo.
|
||||
del "..\models\vae\vae-ft-mse-840000-ema-pruned.ckpt"
|
||||
)
|
||||
)
|
||||
|
||||
@if not exist "..\models\stable-diffusion\vae-ft-mse-840000-ema-pruned.vae.pt" (
|
||||
@if not exist "..\models\vae\vae-ft-mse-840000-ema-pruned.ckpt" (
|
||||
@echo. & echo "Downloading data files (weights) for the default VAE (sd-vae-ft-mse-original).." & echo.
|
||||
|
||||
@call curl -L -k https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.ckpt > ..\models\stable-diffusion\vae-ft-mse-840000-ema-pruned.vae.pt
|
||||
@call curl -L -k https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.ckpt > ..\models\vae\vae-ft-mse-840000-ema-pruned.ckpt
|
||||
|
||||
@if exist "..\models\stable-diffusion\vae-ft-mse-840000-ema-pruned.vae.pt" (
|
||||
for %%I in ("..\models\stable-diffusion\vae-ft-mse-840000-ema-pruned.vae.pt") do if "%%~zI" NEQ "334695179" (
|
||||
@if exist "..\models\vae\vae-ft-mse-840000-ema-pruned.ckpt" (
|
||||
for %%I in ("..\models\vae\vae-ft-mse-840000-ema-pruned.ckpt") do if "%%~zI" NEQ "334695179" (
|
||||
echo. & echo "Error: The downloaded default VAE (sd-vae-ft-mse-original) file was invalid! Bytes downloaded: %%~zI" & echo.
|
||||
echo. & echo "Error downloading the data files (weights) for the default VAE (sd-vae-ft-mse-original). Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
||||
pause
|
||||
|
@ -170,7 +170,9 @@ fi
|
||||
|
||||
|
||||
mkdir -p "../models/stable-diffusion"
|
||||
mkdir -p "../models/vae"
|
||||
echo "" > "../models/stable-diffusion/Put your custom ckpt files here.txt"
|
||||
echo "" > "../models/vae/Put your VAE files here.txt"
|
||||
|
||||
if [ -f "sd-v1-4.ckpt" ]; then
|
||||
model_size=`find "sd-v1-4.ckpt" -printf "%s"`
|
||||
@ -300,24 +302,24 @@ if [ ! -f "RealESRGAN_x4plus_anime_6B.pth" ]; then
|
||||
fi
|
||||
|
||||
|
||||
if [ -f "../models/stable-diffusion/vae-ft-mse-840000-ema-pruned.vae.pt" ]; then
|
||||
model_size=`find ../models/stable-diffusion/vae-ft-mse-840000-ema-pruned.vae.pt -printf "%s"`
|
||||
if [ -f "../models/vae/vae-ft-mse-840000-ema-pruned.ckpt" ]; then
|
||||
model_size=`find ../models/vae/vae-ft-mse-840000-ema-pruned.ckpt -printf "%s"`
|
||||
|
||||
if [ "$model_size" -eq "334695179" ]; then
|
||||
echo "Data files (weights) necessary for the default VAE (sd-vae-ft-mse-original) were already downloaded"
|
||||
else
|
||||
printf "\n\nThe model file present at models/stable-diffusion/vae-ft-mse-840000-ema-pruned.vae.pt is invalid. It is only $model_size bytes in size. Re-downloading.."
|
||||
rm ../models/stable-diffusion/vae-ft-mse-840000-ema-pruned.vae.pt
|
||||
printf "\n\nThe model file present at models/vae/vae-ft-mse-840000-ema-pruned.ckpt is invalid. It is only $model_size bytes in size. Re-downloading.."
|
||||
rm ../models/vae/vae-ft-mse-840000-ema-pruned.ckpt
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ! -f "../models/stable-diffusion/vae-ft-mse-840000-ema-pruned.vae.pt" ]; then
|
||||
if [ ! -f "../models/vae/vae-ft-mse-840000-ema-pruned.ckpt" ]; then
|
||||
echo "Downloading data files (weights) for the default VAE (sd-vae-ft-mse-original).."
|
||||
|
||||
curl -L -k https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.ckpt > ../models/stable-diffusion/vae-ft-mse-840000-ema-pruned.vae.pt
|
||||
curl -L -k https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.ckpt > ../models/vae/vae-ft-mse-840000-ema-pruned.ckpt
|
||||
|
||||
if [ -f "../models/stable-diffusion/vae-ft-mse-840000-ema-pruned.vae.pt" ]; then
|
||||
model_size=`find ../models/stable-diffusion/vae-ft-mse-840000-ema-pruned.vae.pt -printf "%s"`
|
||||
if [ -f "../models/vae/vae-ft-mse-840000-ema-pruned.ckpt" ]; then
|
||||
model_size=`find ../models/vae/vae-ft-mse-840000-ema-pruned.ckpt -printf "%s"`
|
||||
if [ ! "$model_size" -eq "334695179" ]; then
|
||||
printf "\n\nError: The downloaded default VAE (sd-vae-ft-mse-original) file was invalid! Bytes downloaded: $model_size\n\n"
|
||||
printf "\n\nError downloading the data files (weights) for the default VAE (sd-vae-ft-mse-original). Sorry about that, please try to:\n 1. Run this installer again.\n 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting\n 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB\n 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues\nThanks!\n\n"
|
||||
|
@ -7,7 +7,7 @@
|
||||
<link rel="icon" type="image/png" href="/media/images/favicon-32x32.png" sizes="32x32">
|
||||
<link rel="stylesheet" href="/media/css/fonts.css?v=1">
|
||||
<link rel="stylesheet" href="/media/css/themes.css?v=1">
|
||||
<link rel="stylesheet" href="/media/css/main.css?v=4">
|
||||
<link rel="stylesheet" href="/media/css/main.css?v=6">
|
||||
<link rel="stylesheet" href="/media/css/auto-save.css?v=3">
|
||||
<link rel="stylesheet" href="/media/css/modifier-thumbnails.css?v=3">
|
||||
<link rel="stylesheet" href="/media/css/fontawesome-all.min.css?v=1">
|
||||
@ -19,7 +19,7 @@
|
||||
<div id="container">
|
||||
<div id="top-nav">
|
||||
<div id="logo">
|
||||
<h1>Stable Diffusion UI <small>v2.3.9 <span id="updateBranchLabel"></span></small></h1>
|
||||
<h1>Stable Diffusion UI <small>v2.3.10 <span id="updateBranchLabel"></span></small></h1>
|
||||
</div>
|
||||
<div id="server-status">
|
||||
<div id="server-status-color">●</div>
|
||||
@ -47,7 +47,11 @@
|
||||
<textarea id="prompt" class="col-free">a photograph of an astronaut riding a horse</textarea>
|
||||
<input id="prompt_from_file" name="prompt_from_file" type="file" /> <!-- hidden -->
|
||||
|
||||
<label for="negative_prompt" class="collapsible" id="negative_prompt_handle">Negative Prompt <small>(optional)</small></label>
|
||||
<label for="negative_prompt" class="collapsible" id="negative_prompt_handle">
|
||||
Negative Prompt
|
||||
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/Writing-prompts#negative-prompts" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip right">Click to learn more about Negative Prompts</span></i></a>
|
||||
<small>(optional)</small>
|
||||
</label>
|
||||
<div class="collapsible-content">
|
||||
<input id="negative_prompt" name="negative_prompt" placeholder="list the things to remove from the image (e.g. fog, green)">
|
||||
</div>
|
||||
@ -64,7 +68,12 @@
|
||||
</div>
|
||||
|
||||
<br/>
|
||||
<input id="enable_mask" name="enable_mask" type="checkbox"> <label for="enable_mask">In-Painting (beta) <small>(select the area which the AI will paint into)</small></label>
|
||||
<input id="enable_mask" name="enable_mask" type="checkbox">
|
||||
<label for="enable_mask">
|
||||
In-Painting (beta)
|
||||
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/Inpainting" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip right">Click to learn more about InPainting</span></i></a>
|
||||
<small>(select the area which the AI will paint into)</small>
|
||||
</label>
|
||||
<div id="inpaintingEditor"></div>
|
||||
</div>
|
||||
</div>
|
||||
@ -92,12 +101,19 @@
|
||||
<div id="editor-settings-entries" class="collapsible-content">
|
||||
<div><table>
|
||||
<tr><b class="settings-subheader">Image Settings</b></tr>
|
||||
<tr class="pl-5"><td><label for="seed">Seed:</label></td><td><input id="seed" name="seed" size="10" value="30000"> <input id="random_seed" name="random_seed" type="checkbox" checked><label for="random_seed">Random</label></td></tr>
|
||||
<tr class="pl-5"><td><label for="num_outputs_total">Number of Images:</label></td><td><input id="num_outputs_total" name="num_outputs_total" value="1" size="1"> <label><small>(total)</small></label> <input id="num_outputs_parallel" name="num_outputs_parallel" value="1" size="1"> <label for="num_outputs_parallel"><small>(in parallel)</small></label></td></tr>
|
||||
<tr class="pl-5"><td><label for="seed">Seed:</label></td><td><input id="seed" name="seed" size="10" value="30000" onkeypress="preventNonNumericalInput(event)"> <input id="random_seed" name="random_seed" type="checkbox" checked><label for="random_seed">Random</label></td></tr>
|
||||
<tr class="pl-5"><td><label for="num_outputs_total">Number of Images:</label></td><td><input id="num_outputs_total" name="num_outputs_total" value="1" size="1" onkeypress="preventNonNumericalInput(event)"> <label><small>(total)</small></label> <input id="num_outputs_parallel" name="num_outputs_parallel" value="1" size="1" onkeypress="preventNonNumericalInput(event)"> <label for="num_outputs_parallel"><small>(in parallel)</small></label></td></tr>
|
||||
<tr class="pl-5"><td><label for="stable_diffusion_model">Model:</label></td><td>
|
||||
<select id="stable_diffusion_model" name="stable_diffusion_model">
|
||||
<!-- <option value="sd-v1-4" selected>sd-v1-4</option> -->
|
||||
</select>
|
||||
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/Custom-Models" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip right">Click to learn more about custom models</span></i></a>
|
||||
</td></tr>
|
||||
<tr class="pl-5"><td><label for="vae_model">Custom VAE:</i></label></td><td>
|
||||
<select id="vae_model" name="vae_model">
|
||||
<!-- <option value="" selected>None</option> -->
|
||||
</select>
|
||||
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/VAE-Variational-Auto-Encoder" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip right">Click to learn more about VAEs</span></i></a>
|
||||
</td></tr>
|
||||
<tr id="samplerSelection" class="pl-5"><td><label for="sampler">Sampler:</label></td><td>
|
||||
<select id="sampler" name="sampler">
|
||||
@ -110,6 +126,7 @@
|
||||
<option value="dpm2_a">dpm2_a</option>
|
||||
<option value="lms">lms</option>
|
||||
</select>
|
||||
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/How-to-Use#samplers" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip right">Click to learn more about samplers</span></i></a>
|
||||
</td></tr>
|
||||
<tr class="pl-5"><td><label>Image Size: </label></td><td>
|
||||
<select id="width" name="width" value="512">
|
||||
@ -157,9 +174,9 @@
|
||||
</select>
|
||||
<label for="height"><small>(height)</small></label>
|
||||
</td></tr>
|
||||
<tr class="pl-5"><td><label for="num_inference_steps">Inference Steps:</label></td><td> <input id="num_inference_steps" name="num_inference_steps" size="4" value="25"></td></tr>
|
||||
<tr class="pl-5"><td><label for="guidance_scale_slider">Guidance Scale:</label></td><td> <input id="guidance_scale_slider" name="guidance_scale_slider" class="editor-slider" value="75" type="range" min="10" max="500"> <input id="guidance_scale" name="guidance_scale" size="4"></td></tr>
|
||||
<tr id="prompt_strength_container" class="pl-5"><td><label for="prompt_strength_slider">Prompt Strength:</label></td><td> <input id="prompt_strength_slider" name="prompt_strength_slider" class="editor-slider" value="80" type="range" min="0" max="99"> <input id="prompt_strength" name="prompt_strength" size="4"><br/></td></tr></span>
|
||||
<tr class="pl-5"><td><label for="num_inference_steps">Inference Steps:</label></td><td> <input id="num_inference_steps" name="num_inference_steps" size="4" value="25" onkeypress="preventNonNumericalInput(event)"></td></tr>
|
||||
<tr class="pl-5"><td><label for="guidance_scale_slider">Guidance Scale:</label></td><td> <input id="guidance_scale_slider" name="guidance_scale_slider" class="editor-slider" value="75" type="range" min="10" max="500"> <input id="guidance_scale" name="guidance_scale" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"></td></tr>
|
||||
<tr id="prompt_strength_container" class="pl-5"><td><label for="prompt_strength_slider">Prompt Strength:</label></td><td> <input id="prompt_strength_slider" name="prompt_strength_slider" class="editor-slider" value="80" type="range" min="0" max="99"> <input id="prompt_strength" name="prompt_strength" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"><br/></td></tr></span>
|
||||
<tr class="pl-5"><td><label for="output_format">Output Format:</label></td><td>
|
||||
<select id="output_format" name="output_format">
|
||||
<option value="jpeg" selected>jpeg</option>
|
||||
@ -170,7 +187,7 @@
|
||||
|
||||
<div><ul>
|
||||
<li><b class="settings-subheader">Render Settings</b></li>
|
||||
<li class="pl-5"><input id="stream_image_progress" name="stream_image_progress" type="checkbox"> <label for="stream_image_progress">Show a live preview <small>(uses more VRAM, slightly slower image creation)</small></label></li>
|
||||
<li class="pl-5"><input id="stream_image_progress" name="stream_image_progress" type="checkbox"> <label for="stream_image_progress">Show a live preview <small>(uses more VRAM, and slower image creation)</small></label></li>
|
||||
<li class="pl-5"><input id="use_face_correction" name="use_face_correction" type="checkbox" checked> <label for="use_face_correction">Fix incorrect faces and eyes <small>(uses GFPGAN)</small></label></li>
|
||||
<li class="pl-5">
|
||||
<input id="use_upscale" name="use_upscale" type="checkbox"> <label for="use_upscale">Upscale image by 4x with </label>
|
||||
@ -274,14 +291,15 @@
|
||||
</div>
|
||||
</body>
|
||||
|
||||
<script src="media/js/parameters.js?v=1"></script>
|
||||
<script src="media/js/parameters.js?v=2"></script>
|
||||
<script src="media/js/plugins.js?v=1"></script>
|
||||
<script src="media/js/utils.js?v=4"></script>
|
||||
<script src="media/js/utils.js?v=5"></script>
|
||||
<script src="media/js/inpainting-editor.js?v=1"></script>
|
||||
<script src="media/js/image-modifiers.js?v=4"></script>
|
||||
<script src="media/js/auto-save.js?v=3"></script>
|
||||
<script src="media/js/main.js?v=7"></script>
|
||||
<script src="media/js/auto-save.js?v=4"></script>
|
||||
<script src="media/js/main.js?v=8"></script>
|
||||
<script src="media/js/themes.js?v=3"></script>
|
||||
<script src="media/js/dnd.js?v=6"></script>
|
||||
<script>
|
||||
async function init() {
|
||||
await initSettings()
|
||||
|
@ -593,13 +593,38 @@ input::file-selector-button {
|
||||
#server-status {
|
||||
top: 66%;
|
||||
}
|
||||
.popup > div {
|
||||
padding-left: 5px !important;
|
||||
padding-right: 5px !important;
|
||||
}
|
||||
.popup > div input, .popup > div select {
|
||||
max-width: 40vw;
|
||||
}
|
||||
.popup .close-button {
|
||||
padding: 0px !important;
|
||||
margin: 24px !important;
|
||||
}
|
||||
.simple-tooltip.right {
|
||||
right: initial;
|
||||
left: 0px;
|
||||
top: 50%;
|
||||
transform: translate(calc(-100% + 15%), -50%);
|
||||
}
|
||||
:hover > .simple-tooltip.right {
|
||||
transform: translate(100%, -50%);
|
||||
}
|
||||
}
|
||||
|
||||
@media (min-width: 700px) {
|
||||
#editor {
|
||||
max-width: 500px;
|
||||
}
|
||||
}
|
||||
|
||||
.help-btn {
|
||||
position: relative;
|
||||
}
|
||||
|
||||
#promptsFromFileBtn {
|
||||
font-size: 9pt;
|
||||
}
|
||||
@ -608,6 +633,20 @@ input::file-selector-button {
|
||||
position: relative;
|
||||
transform: translateY(-13%);
|
||||
}
|
||||
#copy-image-settings {
|
||||
position: relative;
|
||||
cursor: pointer;
|
||||
padding: 8px;
|
||||
opacity: 1;
|
||||
transition: opacity 0.2;
|
||||
}
|
||||
.collapsible:not(.active) #copy-image-settings {
|
||||
display: none;
|
||||
}
|
||||
#copy-image-settings.hidden {
|
||||
opacity: 0;
|
||||
pointer-events: none;
|
||||
}
|
||||
|
||||
.section-button {
|
||||
cursor: pointer;
|
||||
@ -633,7 +672,7 @@ input::file-selector-button {
|
||||
.simple-tooltip {
|
||||
border-radius: 3px;
|
||||
font-weight: bold;
|
||||
font-size: 16px;
|
||||
font-size: 12px;
|
||||
background-color: var(--background-color3);
|
||||
|
||||
visibility: hidden;
|
||||
@ -744,6 +783,12 @@ input::file-selector-button {
|
||||
transition: 0s visibility, 0.3s opacity;
|
||||
}
|
||||
|
||||
@media only screen and (min-height: 1050px) {
|
||||
.popup {
|
||||
position: fixed;
|
||||
}
|
||||
}
|
||||
|
||||
.popup > div {
|
||||
position: relative;
|
||||
background: var(--background-color2);
|
||||
@ -806,33 +851,3 @@ input::file-selector-button {
|
||||
}
|
||||
|
||||
|
||||
/* FLOATING FIXED STUFF */
|
||||
/*
|
||||
@media (min-width: 700px) {
|
||||
#editor {
|
||||
max-width: 500px;
|
||||
}
|
||||
|
||||
#tab-content {
|
||||
flex-direction: column;
|
||||
}
|
||||
|
||||
#editor {
|
||||
flex-direction: row;
|
||||
max-width: none;
|
||||
max-height: 500px;
|
||||
}
|
||||
|
||||
#editor .line-separator {
|
||||
height: inherit;
|
||||
flex-basis: 8px;
|
||||
flex-grow: 0;
|
||||
flex-shrink: 0;
|
||||
margin: 0px 16px;
|
||||
}
|
||||
|
||||
#editor-settings-entries {
|
||||
flex-direction: row;
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
@ -13,6 +13,7 @@ const SETTINGS_IDS_LIST = [
|
||||
"num_outputs_total",
|
||||
"num_outputs_parallel",
|
||||
"stable_diffusion_model",
|
||||
"vae_model",
|
||||
"sampler",
|
||||
"width",
|
||||
"height",
|
||||
|
417
ui/media/js/dnd.js
Normal file
417
ui/media/js/dnd.js
Normal file
@ -0,0 +1,417 @@
|
||||
"use strict" // Opt in to a restricted variant of JavaScript
|
||||
|
||||
function parseBoolean(stringValue) {
|
||||
if (typeof stringValue === 'boolean') {
|
||||
return stringValue
|
||||
}
|
||||
if (typeof stringValue === 'number') {
|
||||
return stringValue !== 0
|
||||
}
|
||||
if (typeof stringValue !== 'string') {
|
||||
return false
|
||||
}
|
||||
switch(stringValue?.toLowerCase()?.trim()) {
|
||||
case "true":
|
||||
case "yes":
|
||||
case "on":
|
||||
case "1":
|
||||
return true;
|
||||
|
||||
case "false":
|
||||
case "no":
|
||||
case "off":
|
||||
case "0":
|
||||
case null:
|
||||
case undefined:
|
||||
return false;
|
||||
}
|
||||
try {
|
||||
return Boolean(JSON.parse(stringValue));
|
||||
} catch {
|
||||
return Boolean(stringValue)
|
||||
}
|
||||
}
|
||||
|
||||
const TASK_MAPPING = {
|
||||
prompt: { name: 'Prompt',
|
||||
setUI: (prompt) => {
|
||||
promptField.value = prompt
|
||||
},
|
||||
readUI: () => promptField.value,
|
||||
parse: (val) => val
|
||||
},
|
||||
negative_prompt: { name: 'Negative Prompt',
|
||||
setUI: (negative_prompt) => {
|
||||
negativePromptField.value = negative_prompt
|
||||
},
|
||||
readUI: () => negativePromptField.value,
|
||||
parse: (val) => val
|
||||
},
|
||||
width: { name: 'Width',
|
||||
setUI: (width) => {
|
||||
const oldVal = widthField.value
|
||||
widthField.value = width
|
||||
if (!widthField.value) {
|
||||
widthField.value = oldVal
|
||||
}
|
||||
},
|
||||
readUI: () => parseInt(widthField.value),
|
||||
parse: (val) => parseInt(val)
|
||||
},
|
||||
height: { name: 'Height',
|
||||
setUI: (height) => {
|
||||
const oldVal = heightField.value
|
||||
heightField.value = height
|
||||
if (!heightField.value) {
|
||||
heightField.value = oldVal
|
||||
}
|
||||
},
|
||||
readUI: () => parseInt(heightField.value),
|
||||
parse: (val) => parseInt(val)
|
||||
},
|
||||
seed: { name: 'Seed',
|
||||
setUI: (seed) => {
|
||||
if (!seed) {
|
||||
randomSeedField.checked = true
|
||||
seedField.disabled = true
|
||||
return
|
||||
}
|
||||
randomSeedField.checked = false
|
||||
seedField.disabled = false
|
||||
seedField.value = seed
|
||||
},
|
||||
readUI: () => (randomSeedField.checked ? Math.floor(Math.random() * 10000000) : parseInt(seedField.value)),
|
||||
parse: (val) => parseInt(val)
|
||||
},
|
||||
num_inference_steps: { name: 'Steps',
|
||||
setUI: (num_inference_steps) => {
|
||||
numInferenceStepsField.value = num_inference_steps
|
||||
},
|
||||
readUI: () => parseInt(numInferenceStepsField.value),
|
||||
parse: (val) => parseInt(val)
|
||||
},
|
||||
guidance_scale: { name: 'Guidance Scale',
|
||||
setUI: (guidance_scale) => {
|
||||
guidanceScaleField.value = guidance_scale
|
||||
updateGuidanceScaleSlider()
|
||||
},
|
||||
readUI: () => parseFloat(guidanceScaleField.value),
|
||||
parse: (val) => parseFloat(val)
|
||||
},
|
||||
prompt_strength: { name: 'Prompt Strength',
|
||||
setUI: (prompt_strength) => {
|
||||
promptStrengthField.value = prompt_strength
|
||||
updatePromptStrengthSlider()
|
||||
},
|
||||
readUI: () => parseFloat(promptStrengthField.value),
|
||||
parse: (val) => parseFloat(val)
|
||||
},
|
||||
|
||||
init_image: { name: 'Initial Image',
|
||||
setUI: (init_image) => {
|
||||
initImagePreview.src = init_image
|
||||
},
|
||||
readUI: () => initImagePreview.src,
|
||||
parse: (val) => val
|
||||
},
|
||||
mask: { name: 'Mask',
|
||||
setUI: (mask) => {
|
||||
inpaintingEditor.setImg(mask)
|
||||
maskSetting.checked = Boolean(mask)
|
||||
},
|
||||
readUI: () => (maskSetting.checked ? inpaintingEditor.getImg() : undefined),
|
||||
parse: (val) => val
|
||||
},
|
||||
|
||||
use_face_correction: { name: 'Use Face Correction',
|
||||
setUI: (use_face_correction) => {
|
||||
useFaceCorrectionField.checked = parseBoolean(use_face_correction)
|
||||
},
|
||||
readUI: () => useFaceCorrectionField.checked,
|
||||
parse: (val) => parseBoolean(val)
|
||||
},
|
||||
use_upscale: { name: 'Use Upscaling',
|
||||
setUI: (use_upscale) => {
|
||||
const oldVal = upscaleModelField.value
|
||||
upscaleModelField.value = use_upscale
|
||||
if (upscaleModelField.value) { // Is a valid value for the field.
|
||||
useUpscalingField.checked = true
|
||||
upscaleModelField.disabled = false
|
||||
} else { // Not a valid value, restore the old value and disable the filter.
|
||||
upscaleModelField.disabled = true
|
||||
upscaleModelField.value = oldVal
|
||||
useUpscalingField.checked = false
|
||||
}
|
||||
},
|
||||
readUI: () => (useUpscalingField.checked ? upscaleModelField.value : undefined),
|
||||
parse: (val) => val
|
||||
},
|
||||
sampler: { name: 'Sampler',
|
||||
setUI: (sampler) => {
|
||||
samplerField.value = sampler
|
||||
},
|
||||
readUI: () => samplerField.value,
|
||||
parse: (val) => val
|
||||
},
|
||||
use_stable_diffusion_model: { name: 'Stable Diffusion model',
|
||||
setUI: (use_stable_diffusion_model) => {
|
||||
const oldVal = stableDiffusionModelField.value
|
||||
|
||||
let pathIdx = use_stable_diffusion_model.lastIndexOf('/') // Linux, Mac paths
|
||||
if (pathIdx < 0) {
|
||||
pathIdx = use_stable_diffusion_model.lastIndexOf('\\') // Windows paths.
|
||||
}
|
||||
if (pathIdx >= 0) {
|
||||
use_stable_diffusion_model = use_stable_diffusion_model.slice(pathIdx + 1)
|
||||
}
|
||||
const modelExt = '.ckpt'
|
||||
if (use_stable_diffusion_model.endsWith(modelExt)) {
|
||||
use_stable_diffusion_model = use_stable_diffusion_model.slice(0, use_stable_diffusion_model.length - modelExt.length)
|
||||
}
|
||||
|
||||
stableDiffusionModelField.value = use_stable_diffusion_model
|
||||
|
||||
if (!stableDiffusionModelField.value) {
|
||||
stableDiffusionModelField.value = oldVal
|
||||
}
|
||||
},
|
||||
readUI: () => stableDiffusionModelField.value,
|
||||
parse: (val) => val
|
||||
},
|
||||
|
||||
numOutputsParallel: { name: 'Parallel Images',
|
||||
setUI: (numOutputsParallel) => {
|
||||
numOutputsParallelField.value = numOutputsParallel
|
||||
},
|
||||
readUI: () => parseInt(numOutputsParallelField.value),
|
||||
parse: (val) => val
|
||||
},
|
||||
|
||||
use_cpu: { name: 'Use CPU',
|
||||
setUI: (use_cpu) => {
|
||||
useCPUField.checked = use_cpu
|
||||
},
|
||||
readUI: () => useCPUField.checked,
|
||||
parse: (val) => val
|
||||
},
|
||||
turbo: { name: 'Turbo',
|
||||
setUI: (turbo) => {
|
||||
turboField.checked = turbo
|
||||
},
|
||||
readUI: () => turboField.checked,
|
||||
parse: (val) => Boolean(val)
|
||||
},
|
||||
use_full_precision: { name: 'Use Full Precision',
|
||||
setUI: (use_full_precision) => {
|
||||
useFullPrecisionField.checked = use_full_precision
|
||||
},
|
||||
readUI: () => useFullPrecisionField.checked,
|
||||
parse: (val) => Boolean(val)
|
||||
},
|
||||
|
||||
stream_image_progress: { name: 'Stream Image Progress',
|
||||
setUI: (stream_image_progress) => {
|
||||
streamImageProgressField.checked = (parseInt(numOutputsTotalField.value) > 50 ? false : stream_image_progress)
|
||||
},
|
||||
readUI: () => streamImageProgressField.checked,
|
||||
parse: (val) => Boolean(val)
|
||||
},
|
||||
show_only_filtered_image: { name: 'Show only the corrected/upscaled image',
|
||||
setUI: (show_only_filtered_image) => {
|
||||
showOnlyFilteredImageField.checked = show_only_filtered_image
|
||||
},
|
||||
readUI: () => showOnlyFilteredImageField.checked,
|
||||
parse: (val) => Boolean(val)
|
||||
},
|
||||
output_format: { name: 'Output Format',
|
||||
setUI: (output_format) => {
|
||||
outputFormatField.value = output_format
|
||||
},
|
||||
readUI: () => outputFormatField.value,
|
||||
parse: (val) => val
|
||||
},
|
||||
save_to_disk_path: { name: 'Save to disk path',
|
||||
setUI: (save_to_disk_path) => {
|
||||
saveToDiskField.checked = Boolean(save_to_disk_path)
|
||||
diskPathField.value = save_to_disk_path
|
||||
},
|
||||
readUI: () => diskPathField.value,
|
||||
parse: (val) => val
|
||||
}
|
||||
}
|
||||
function restoreTaskToUI(task) {
|
||||
if ('numOutputsTotal' in task) {
|
||||
numOutputsTotalField.value = task.numOutputsTotal
|
||||
}
|
||||
if ('seed' in task) {
|
||||
randomSeedField.checked = false
|
||||
seedField.value = task.seed
|
||||
}
|
||||
if (!('reqBody' in task)) {
|
||||
return
|
||||
}
|
||||
for (const key in TASK_MAPPING) {
|
||||
if (key in task.reqBody) {
|
||||
TASK_MAPPING[key].setUI(task.reqBody[key])
|
||||
}
|
||||
}
|
||||
}
|
||||
function readUI() {
|
||||
const reqBody = {}
|
||||
for (const key in TASK_MAPPING) {
|
||||
reqBody[key] = TASK_MAPPING[key].readUI()
|
||||
}
|
||||
return {
|
||||
'numOutputsTotal': parseInt(numOutputsTotalField.value),
|
||||
'seed': TASK_MAPPING['seed'].readUI(),
|
||||
'reqBody': reqBody
|
||||
}
|
||||
}
|
||||
|
||||
const TASK_TEXT_MAPPING = {
|
||||
width: 'Width',
|
||||
height: 'Height',
|
||||
seed: 'Seed',
|
||||
num_inference_steps: 'Steps',
|
||||
guidance_scale: 'Guidance Scale',
|
||||
prompt_strength: 'Prompt Strength',
|
||||
use_face_correction: 'Use Face Correction',
|
||||
use_upscale: 'Use Upscaling',
|
||||
sampler: 'Sampler',
|
||||
negative_prompt: 'Negative Prompt',
|
||||
use_stable_diffusion_model: 'Stable Diffusion model'
|
||||
}
|
||||
const afterPromptRe = /^\s*Width\s*:\s*\d+\s*(?:\r\n|\r|\n)+\s*Height\s*:\s*\d+\s*(\r\n|\r|\n)+Seed\s*:\s*\d+\s*$/igm
|
||||
function parseTaskFromText(str) {
|
||||
const taskReqBody = {}
|
||||
|
||||
// Prompt
|
||||
afterPromptRe.lastIndex = 0
|
||||
const match = afterPromptRe.exec(str)
|
||||
if (match) {
|
||||
let prompt = str.slice(0, match.index)
|
||||
str = str.slice(prompt.length)
|
||||
taskReqBody.prompt = prompt.trim()
|
||||
console.log('Prompt:', taskReqBody.prompt)
|
||||
}
|
||||
for (const key in TASK_TEXT_MAPPING) {
|
||||
const name = TASK_TEXT_MAPPING[key];
|
||||
let val = undefined
|
||||
|
||||
const reName = new RegExp(`${name}\\ *:\\ *(.*)(?:\\r\\n|\\r|\\n)*`, 'igm')
|
||||
const match = reName.exec(str);
|
||||
if (match) {
|
||||
console.log(match)
|
||||
str = str.slice(0, match.index) + str.slice(match.index + match[0].length)
|
||||
val = match[1]
|
||||
}
|
||||
if (val !== undefined) {
|
||||
taskReqBody[key] = TASK_MAPPING[key].parse(val.trim())
|
||||
console.log(TASK_MAPPING[key].name + ':', taskReqBody[key])
|
||||
if (!str) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if (Object.keys(taskReqBody).length <= 0) {
|
||||
return undefined
|
||||
}
|
||||
const task = { reqBody: taskReqBody }
|
||||
if ('seed' in taskReqBody) {
|
||||
task.seed = taskReqBody.seed
|
||||
}
|
||||
return task
|
||||
}
|
||||
|
||||
async function readFile(file, i) {
|
||||
const fileContent = (await file.text()).trim()
|
||||
|
||||
// JSON File.
|
||||
if (fileContent.startsWith('{') && fileContent.endsWith('}')) {
|
||||
try {
|
||||
const task = JSON.parse(fileContent)
|
||||
restoreTaskToUI(task)
|
||||
} catch (e) {
|
||||
console.warn(`file[${i}]:${file.name} - File couldn't be parsed.`, e)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Normal txt file.
|
||||
const task = parseTaskFromText(fileContent)
|
||||
if (task) {
|
||||
restoreTaskToUI(task)
|
||||
} else {
|
||||
console.warn(`file[${i}]:${file.name} - File couldn't be parsed.`)
|
||||
}
|
||||
}
|
||||
|
||||
function dropHandler(ev) {
|
||||
console.log('Content dropped...')
|
||||
let items = []
|
||||
|
||||
if (ev?.dataTransfer?.items) { // Use DataTransferItemList interface
|
||||
items = Array.from(ev.dataTransfer.items)
|
||||
items = items.filter(item => item.kind === 'file')
|
||||
items = items.map(item => item.getAsFile())
|
||||
} else if (ev?.dataTransfer?.files) { // Use DataTransfer interface
|
||||
items = Array.from(ev.dataTransfer.files)
|
||||
}
|
||||
|
||||
items = items.filter(item => item.name.toLowerCase().endsWith('.txt') || item.name.toLowerCase().endsWith('.json'))
|
||||
|
||||
if (items.length > 0) {
|
||||
ev.preventDefault() // Prevent default behavior (Prevent file/content from being opened)
|
||||
items.forEach(readFile)
|
||||
}
|
||||
}
|
||||
function dragOverHandler(ev) {
|
||||
console.log('Content in drop zone')
|
||||
|
||||
// Prevent default behavior (Prevent file/content from being opened)
|
||||
ev.preventDefault()
|
||||
|
||||
ev.dataTransfer.dropEffect = "copy"
|
||||
|
||||
let img = new Image()
|
||||
img.src = location.host + '/media/images/favicon-32x32.png'
|
||||
ev.dataTransfer.setDragImage(img, 16, 16)
|
||||
}
|
||||
|
||||
document.addEventListener("drop", dropHandler)
|
||||
document.addEventListener("dragover", dragOverHandler)
|
||||
|
||||
const TASK_REQ_NO_EXPORT = [
|
||||
"use_cpu",
|
||||
"turbo",
|
||||
"use_full_precision",
|
||||
"save_to_disk_path"
|
||||
]
|
||||
|
||||
// Adds a copy icon if the browser grants permission to write to clipboard.
|
||||
function checkWriteToClipboardPermission (result) {
|
||||
if (result.state == "granted" || result.state == "prompt") {
|
||||
const resetSettings = document.getElementById('reset-image-settings')
|
||||
const copyIcon = document.createElement('i')
|
||||
copyIcon.id = 'copy-image-settings'
|
||||
copyIcon.className = 'fa-solid fa-clipboard'
|
||||
copyIcon.innerHTML = `<span class="simple-tooltip right">Copy Image Settings</span>`
|
||||
copyIcon.addEventListener('click', (event) => {
|
||||
event.stopPropagation()
|
||||
const uiState = readUI()
|
||||
TASK_REQ_NO_EXPORT.forEach((key) => delete uiState.reqBody[key])
|
||||
if (uiState.reqBody.init_image && !IMAGE_REGEX.test(uiState.reqBody.init_image)) {
|
||||
delete uiState.reqBody.init_image
|
||||
delete uiState.reqBody.prompt_strength
|
||||
}
|
||||
navigator.clipboard.writeText(JSON.stringify(uiState, undefined, 4))
|
||||
})
|
||||
resetSettings.parentNode.insertBefore(copyIcon, resetSettings)
|
||||
}
|
||||
}
|
||||
navigator.permissions.query({ name: "clipboard-write" }).then(checkWriteToClipboardPermission, (e) => {
|
||||
if (e instanceof TypeError && typeof navigator?.clipboard?.writeText === 'function') {
|
||||
// Fix for firefox https://bugzilla.mozilla.org/show_bug.cgi?id=1560373
|
||||
checkWriteToClipboardPermission({state:"granted"})
|
||||
}
|
||||
})
|
@ -39,7 +39,7 @@ let useFaceCorrectionField = document.querySelector("#use_face_correction")
|
||||
let useUpscalingField = document.querySelector("#use_upscale")
|
||||
let upscaleModelField = document.querySelector("#upscale_model")
|
||||
let stableDiffusionModelField = document.querySelector('#stable_diffusion_model')
|
||||
let vaeModelField = document.querySelector('#default_vae_model')
|
||||
let vaeModelField = document.querySelector('#vae_model')
|
||||
let outputFormatField = document.querySelector('#output_format')
|
||||
let showOnlyFilteredImageField = document.querySelector("#show_only_filtered_image")
|
||||
let updateBranchLabel = document.querySelector("#updateBranchLabel")
|
||||
@ -340,10 +340,10 @@ function onDownloadImageClick(req, img) {
|
||||
imgDownload.click()
|
||||
}
|
||||
|
||||
function modifyCurrentRequest(req, ...reqDiff) {
|
||||
function modifyCurrentRequest(...reqDiff) {
|
||||
const newTaskRequest = getCurrentUserRequest()
|
||||
|
||||
newTaskRequest.reqBody = Object.assign({}, req, ...reqDiff, {
|
||||
newTaskRequest.reqBody = Object.assign(newTaskRequest.reqBody, ...reqDiff, {
|
||||
use_cpu: useCPUField.checked
|
||||
})
|
||||
newTaskRequest.seed = newTaskRequest.reqBody.seed
|
||||
@ -774,6 +774,7 @@ function getCurrentUserRequest() {
|
||||
use_cpu: useCPUField.checked,
|
||||
use_full_precision: useFullPrecisionField.checked,
|
||||
use_stable_diffusion_model: stableDiffusionModelField.value,
|
||||
use_vae_model: vaeModelField.value,
|
||||
stream_progress_updates: true,
|
||||
stream_image_progress: (numOutputsTotal > 50 ? false : streamImageProgressField.checked),
|
||||
show_only_filtered_image: showOnlyFilteredImageField.checked,
|
||||
@ -823,7 +824,10 @@ function makeImage() {
|
||||
|
||||
function createTask(task) {
|
||||
let taskConfig = `Seed: ${task.seed}, Sampler: ${task.reqBody.sampler}, Inference Steps: ${task.reqBody.num_inference_steps}, Guidance Scale: ${task.reqBody.guidance_scale}, Model: ${task.reqBody.use_stable_diffusion_model}`
|
||||
if (negativePromptField.value.trim() !== '') {
|
||||
if (task.reqBody.use_vae_model.trim() !== '') {
|
||||
taskConfig += `, VAE: ${task.reqBody.use_vae_model}`
|
||||
}
|
||||
if (task.reqBody.negative_prompt.trim() !== '') {
|
||||
taskConfig += `, Negative Prompt: ${task.reqBody.negative_prompt}`
|
||||
}
|
||||
if (task.reqBody.init_image !== undefined) {
|
||||
@ -1141,12 +1145,6 @@ useBetaChannelField.addEventListener('click', async function(e) {
|
||||
})
|
||||
})
|
||||
|
||||
vaeModelField.addEventListener('change', async function() {
|
||||
await changeAppConfig({
|
||||
'model_vae': this.value
|
||||
})
|
||||
})
|
||||
|
||||
async function getAppConfig() {
|
||||
try {
|
||||
let res = await fetch('/get/app_config')
|
||||
@ -1165,22 +1163,25 @@ async function getAppConfig() {
|
||||
|
||||
async function getModels() {
|
||||
try {
|
||||
var model_setting_key = "stable_diffusion_model"
|
||||
var selectedModel = SETTINGS[model_setting_key].value
|
||||
var sd_model_setting_key = "stable_diffusion_model"
|
||||
var vae_model_setting_key = "vae_model"
|
||||
var selectedSDModel = SETTINGS[sd_model_setting_key].value
|
||||
var selectedVaeModel = SETTINGS[vae_model_setting_key].value
|
||||
let res = await fetch('/get/models')
|
||||
const models = await res.json()
|
||||
|
||||
let activeModels = models['active']
|
||||
console.log('get models response', models)
|
||||
|
||||
let modelOptions = models['options']
|
||||
let stableDiffusionOptions = modelOptions['stable-diffusion']
|
||||
let vaeOptions = modelOptions['vae']
|
||||
let activeVae = activeModels['vae']
|
||||
vaeOptions.unshift('') // add a None option
|
||||
|
||||
function createModelOptions(modelField, selectedModel) {
|
||||
return function(modelName) {
|
||||
let modelOption = document.createElement('option')
|
||||
modelOption.value = modelName
|
||||
modelOption.innerText = modelName
|
||||
modelOption.innerText = modelName !== '' ? modelName : 'None'
|
||||
|
||||
if (modelName === selectedModel) {
|
||||
modelOption.selected = true
|
||||
@ -1190,16 +1191,14 @@ async function getModels() {
|
||||
}
|
||||
}
|
||||
|
||||
stableDiffusionOptions.forEach(createModelOptions(stableDiffusionModelField, selectedModel))
|
||||
vaeOptions.forEach(createModelOptions(vaeModelField, activeVae))
|
||||
stableDiffusionOptions.forEach(createModelOptions(stableDiffusionModelField, selectedSDModel))
|
||||
vaeOptions.forEach(createModelOptions(vaeModelField, selectedVaeModel))
|
||||
|
||||
// TODO: set default for model here too
|
||||
SETTINGS[model_setting_key].default = stableDiffusionOptions[0]
|
||||
if (getSetting(model_setting_key) == '' || SETTINGS[model_setting_key].value == '') {
|
||||
setSetting(model_setting_key, stableDiffusionOptions[0])
|
||||
SETTINGS[sd_model_setting_key].default = stableDiffusionOptions[0]
|
||||
if (getSetting(sd_model_setting_key) == '' || SETTINGS[sd_model_setting_key].value == '') {
|
||||
setSetting(sd_model_setting_key, stableDiffusionOptions[0])
|
||||
}
|
||||
|
||||
console.log('get models response', models)
|
||||
} catch (e) {
|
||||
console.log('get models error', e)
|
||||
}
|
||||
|
@ -51,11 +51,6 @@ var PARAMETERS = [
|
||||
return `<input id="${parameter.id}" name="${parameter.id}" size="30" disabled>`
|
||||
}
|
||||
},
|
||||
{
|
||||
id: "default_vae_model",
|
||||
type: ParameterType.select, // Note: options generated dynamically
|
||||
label: "Default VAE",
|
||||
},
|
||||
{
|
||||
id: "sound_toggle",
|
||||
type: ParameterType.checkbox,
|
||||
|
@ -30,9 +30,15 @@ function toggleCollapsible(element) {
|
||||
collapsibleHeader.classList.toggle("active")
|
||||
let content = getNextSibling(collapsibleHeader, '.collapsible-content')
|
||||
if (!collapsibleHeader.classList.contains("active")) {
|
||||
handle.innerHTML = '➕' // plus
|
||||
content.style.display = "none"
|
||||
if (handle != null) { // render results don't have a handle
|
||||
handle.innerHTML = '➕' // plus
|
||||
}
|
||||
} else {
|
||||
handle.innerHTML = '➖' // minus
|
||||
content.style.display = "block"
|
||||
if (handle != null) { // render results don't have a handle
|
||||
handle.innerHTML = '➖' // minus
|
||||
}
|
||||
}
|
||||
|
||||
if (COLLAPSIBLES_INITIALIZED && COLLAPSIBLE_PANELS.includes(element)) {
|
||||
@ -340,3 +346,15 @@ function asyncDelay(timeout) {
|
||||
setTimeout(resolve, timeout, true)
|
||||
})
|
||||
}
|
||||
|
||||
function preventNonNumericalInput(e) {
|
||||
e = e || window.event;
|
||||
let charCode = (typeof e.which == "undefined") ? e.keyCode : e.which;
|
||||
let charStr = String.fromCharCode(charCode);
|
||||
let re = e.target.getAttribute('pattern') || '^[0-9]+$'
|
||||
re = new RegExp(re)
|
||||
|
||||
if (!charStr.match(re)) {
|
||||
e.preventDefault();
|
||||
}
|
||||
}
|
||||
|
@ -18,7 +18,6 @@ class Request:
|
||||
precision: str = "autocast" # or "full"
|
||||
save_to_disk_path: str = None
|
||||
turbo: bool = True
|
||||
use_cpu: bool = False
|
||||
use_full_precision: bool = False
|
||||
use_face_correction: str = None # or "GFPGANv1.3"
|
||||
use_upscale: str = None # or "RealESRGAN_x4plus" or "RealESRGAN_x4plus_anime_6B"
|
||||
@ -50,7 +49,7 @@ class Request:
|
||||
"output_format": self.output_format,
|
||||
}
|
||||
|
||||
def to_string(self):
|
||||
def __str__(self):
|
||||
return f'''
|
||||
session_id: {self.session_id}
|
||||
prompt: {self.prompt}
|
||||
@ -64,7 +63,6 @@ class Request:
|
||||
precision: {self.precision}
|
||||
save_to_disk_path: {self.save_to_disk_path}
|
||||
turbo: {self.turbo}
|
||||
use_cpu: {self.use_cpu}
|
||||
use_full_precision: {self.use_full_precision}
|
||||
use_face_correction: {self.use_face_correction}
|
||||
use_upscale: {self.use_upscale}
|
||||
|
@ -45,6 +45,25 @@ from io import BytesIO
|
||||
from threading import local as LocalThreadVars
|
||||
thread_data = LocalThreadVars()
|
||||
|
||||
def get_processor_name():
|
||||
try:
|
||||
import platform, subprocess
|
||||
if platform.system() == "Windows":
|
||||
return platform.processor()
|
||||
elif platform.system() == "Darwin":
|
||||
os.environ['PATH'] = os.environ['PATH'] + os.pathsep + '/usr/sbin'
|
||||
command ="sysctl -n machdep.cpu.brand_string"
|
||||
return subprocess.check_output(command).strip()
|
||||
elif platform.system() == "Linux":
|
||||
command = "cat /proc/cpuinfo"
|
||||
all_info = subprocess.check_output(command, shell=True).decode().strip()
|
||||
for line in all_info.split("\n"):
|
||||
if "model name" in line:
|
||||
return re.sub( ".*model name.*:", "", line,1).strip()
|
||||
except:
|
||||
print(traceback.format_exc())
|
||||
return "cpu"
|
||||
|
||||
def device_would_fail(device):
|
||||
if device == 'cpu': return None
|
||||
# Returns None when no issues found, otherwise returns the detected error str.
|
||||
@ -68,17 +87,17 @@ def device_select(device):
|
||||
print(failure_msg)
|
||||
return False
|
||||
|
||||
device_name = torch.cuda.get_device_name(device)
|
||||
thread_data.device_name = torch.cuda.get_device_name(device)
|
||||
thread_data.device = device
|
||||
|
||||
# otherwise these NVIDIA cards create green images
|
||||
thread_data.force_full_precision = ('nvidia' in device_name.lower() or 'geforce' in device_name.lower()) and (' 1660' in device_name or ' 1650' in device_name)
|
||||
# Force full precision on 1660 and 1650 NVIDIA cards to avoid creating green images
|
||||
device_name = thread_data.device_name.lower()
|
||||
thread_data.force_full_precision = ('nvidia' in device_name or 'geforce' in device_name) and (' 1660' in device_name or ' 1650' in device_name)
|
||||
if thread_data.force_full_precision:
|
||||
print('forcing full precision on NVIDIA 16xx cards, to avoid green images. GPU detected: ', device_name)
|
||||
print('forcing full precision on NVIDIA 16xx cards, to avoid green images. GPU detected: ', thread_data.device_name)
|
||||
# Apply force_full_precision now before models are loaded.
|
||||
thread_data.precision = 'full'
|
||||
|
||||
thread_data.device = device
|
||||
thread_data.has_valid_gpu = True
|
||||
return True
|
||||
|
||||
def device_init(device_selection=None):
|
||||
@ -100,27 +119,31 @@ def device_init(device_selection=None):
|
||||
thread_data.model_is_half = False
|
||||
thread_data.model_fs_is_half = False
|
||||
thread_data.device = None
|
||||
thread_data.device_name = None
|
||||
thread_data.unet_bs = 1
|
||||
thread_data.precision = 'autocast'
|
||||
thread_data.sampler_plms = None
|
||||
thread_data.sampler_ddim = None
|
||||
|
||||
thread_data.turbo = False
|
||||
thread_data.has_valid_gpu = False
|
||||
thread_data.force_full_precision = False
|
||||
thread_data.reduced_memory = True
|
||||
|
||||
if device_selection.lower() == 'cpu':
|
||||
print('CPU requested, skipping gpu init.')
|
||||
device_selection = device_selection.lower()
|
||||
|
||||
if device_selection == 'cpu':
|
||||
thread_data.device = 'cpu'
|
||||
thread_data.device_name = get_processor_name()
|
||||
print('Render device CPU available as', thread_data.device_name)
|
||||
return
|
||||
if not torch.cuda.is_available():
|
||||
if device_selection == 'auto' or device_selection == 'current':
|
||||
print('WARNING: torch.cuda is not available. Using the CPU, but this will be very slow!')
|
||||
print('WARNING: Could not find a compatible GPU. Using the CPU, but this will be very slow!')
|
||||
thread_data.device = 'cpu'
|
||||
thread_data.device_name = get_processor_name()
|
||||
return
|
||||
else:
|
||||
raise EnvironmentError('torch.cuda is not available.')
|
||||
raise EnvironmentError(f'Could not find a compatible GPU for the requested device_selection: {device_selection}!')
|
||||
device_count = torch.cuda.device_count()
|
||||
if device_count <= 1 and device_selection == 'auto':
|
||||
device_selection = 'current' # Use 'auto' only when there is more than one compatible device found.
|
||||
@ -141,10 +164,10 @@ def device_init(device_selection=None):
|
||||
print(f'Setting GPU:{device} as active')
|
||||
torch.cuda.device(device)
|
||||
return
|
||||
if isinstance(device_selection, str):
|
||||
device_selection = device_selection.lower()
|
||||
if device_selection.startswith('gpu:'):
|
||||
device_selection = int(device_selection[4:])
|
||||
|
||||
if device_selection.startswith('gpu:'):
|
||||
device_selection = int(device_selection[4:])
|
||||
|
||||
if device_selection != 'cuda' and device_selection != 'current' and device_selection != 'gpu':
|
||||
if device_select(device_selection):
|
||||
if isinstance(device_selection, int):
|
||||
@ -162,6 +185,7 @@ def device_init(device_selection=None):
|
||||
return
|
||||
print('WARNING: No compatible GPU found. Using the CPU, but this will be very slow!')
|
||||
thread_data.device = 'cpu'
|
||||
thread_data.device_name = get_processor_name()
|
||||
|
||||
def is_first_cuda_device(device):
|
||||
if device is None: return False
|
||||
@ -234,13 +258,15 @@ def load_model_ckpt():
|
||||
_, _ = modelFS.load_state_dict(sd, strict=False)
|
||||
|
||||
if thread_data.vae_file is not None:
|
||||
if os.path.exists(thread_data.vae_file + '.vae.pt'):
|
||||
print(f"Loading VAE weights from: {thread_data.vae_file}.vae.pt")
|
||||
vae_ckpt = torch.load(thread_data.vae_file + '.vae.pt', map_location="cpu")
|
||||
vae_dict = {k: v for k, v in vae_ckpt["state_dict"].items() if k[0:4] != "loss"}
|
||||
modelFS.first_stage_model.load_state_dict(vae_dict, strict=False)
|
||||
else:
|
||||
print(f'Cannot find VAE file: {thread_data.vae_file}.vae.pt')
|
||||
for model_extension in ['.ckpt', '.vae.pt']:
|
||||
if os.path.exists(thread_data.vae_file + model_extension):
|
||||
print(f"Loading VAE weights from: {thread_data.vae_file}{model_extension}")
|
||||
vae_ckpt = torch.load(thread_data.vae_file + model_extension, map_location="cpu")
|
||||
vae_dict = {k: v for k, v in vae_ckpt["state_dict"].items() if k[0:4] != "loss"}
|
||||
modelFS.first_stage_model.load_state_dict(vae_dict, strict=False)
|
||||
break
|
||||
else:
|
||||
print(f'Cannot find VAE file: {thread_data.vae_file}{model_extension}')
|
||||
|
||||
modelFS.eval()
|
||||
if thread_data.device != 'cpu':
|
||||
@ -261,7 +287,12 @@ def load_model_ckpt():
|
||||
thread_data.model_is_half = False
|
||||
thread_data.model_fs_is_half = False
|
||||
|
||||
print('loaded', thread_data.ckpt_file, 'as', model.device, '->', modelCS.cond_stage_model.device, '->', thread_data.modelFS.device, 'using precision', thread_data.precision)
|
||||
print(f'''loaded model
|
||||
model file: {thread_data.ckpt_file}.ckpt
|
||||
model.device: {model.device}
|
||||
modelCS.device: {modelCS.cond_stage_model.device}
|
||||
modelFS.device: {thread_data.modelFS.device}
|
||||
using precision: {thread_data.precision}''')
|
||||
|
||||
def unload_filters():
|
||||
if thread_data.model_gfpgan is not None:
|
||||
@ -341,13 +372,21 @@ def load_model_real_esrgan():
|
||||
thread_data.model_real_esrgan.model.name = thread_data.real_esrgan_file
|
||||
print('loaded ', thread_data.real_esrgan_file, 'to', thread_data.model_real_esrgan.device, 'precision', thread_data.precision)
|
||||
|
||||
|
||||
def get_session_out_path(disk_path, session_id):
|
||||
if disk_path is None: return None
|
||||
if session_id is None: return None
|
||||
|
||||
session_out_path = os.path.join(disk_path, filename_regex.sub('_',session_id))
|
||||
os.makedirs(session_out_path, exist_ok=True)
|
||||
return session_out_path
|
||||
|
||||
def get_base_path(disk_path, session_id, prompt, img_id, ext, suffix=None):
|
||||
if disk_path is None: return None
|
||||
if session_id is None: return None
|
||||
if ext is None: raise Exception('Missing ext')
|
||||
|
||||
session_out_path = os.path.join(disk_path, session_id)
|
||||
os.makedirs(session_out_path, exist_ok=True)
|
||||
session_out_path = get_session_out_path(disk_path, session_id)
|
||||
|
||||
prompt_flattened = filename_regex.sub('_', prompt)[:50]
|
||||
|
||||
@ -475,7 +514,7 @@ def do_mk_img(req: Request):
|
||||
thread_data.vae_file = req.use_vae_model
|
||||
needs_model_reload = True
|
||||
|
||||
if thread_data.has_valid_gpu:
|
||||
if thread_data.device != 'cpu':
|
||||
if (thread_data.precision == 'autocast' and (req.use_full_precision or not thread_data.model_is_half)) or \
|
||||
(thread_data.precision == 'full' and not req.use_full_precision and not thread_data.force_full_precision):
|
||||
thread_data.precision = 'full' if req.use_full_precision else 'autocast'
|
||||
@ -500,7 +539,7 @@ def do_mk_img(req: Request):
|
||||
opt_f = 8
|
||||
opt_ddim_eta = 0.0
|
||||
|
||||
print(req.to_string(), '\n device', thread_data.device)
|
||||
print(req, '\n device', torch.device(thread_data.device), "as", thread_data.device_name)
|
||||
print('\n\n Using precision:', thread_data.precision)
|
||||
|
||||
seed_everything(opt_seed)
|
||||
@ -552,8 +591,7 @@ def do_mk_img(req: Request):
|
||||
print(f"target t_enc is {t_enc} steps")
|
||||
|
||||
if req.save_to_disk_path is not None:
|
||||
session_out_path = os.path.join(req.save_to_disk_path, req.session_id)
|
||||
os.makedirs(session_out_path, exist_ok=True)
|
||||
session_out_path = get_session_out_path(req.save_to_disk_path, req.session_id)
|
||||
else:
|
||||
session_out_path = None
|
||||
|
||||
|
@ -21,6 +21,7 @@ LOCK_TIMEOUT = 15 # Maximum locking time in seconds before failing a task.
|
||||
# It's better to get an exception than a deadlock... ALWAYS use timeout in critical paths.
|
||||
|
||||
DEVICE_START_TIMEOUT = 60 # seconds - Maximum time to wait for a render device to init.
|
||||
CPU_UNLOAD_TIMEOUT = 4 * 60 # seconds - Idle time before CPU unload resource when GPUs are present.
|
||||
|
||||
class SymbolClass(type): # Print nicely formatted Symbol names.
|
||||
def __repr__(self): return self.__qualname__
|
||||
@ -38,6 +39,7 @@ class RenderTask(): # Task with output queue and completion lock.
|
||||
def __init__(self, req: Request):
|
||||
self.request: Request = req # Initial Request
|
||||
self.response: Any = None # Copy of the last reponse
|
||||
self.render_device = None
|
||||
self.temp_images:list = [None] * req.num_outputs * (1 if req.show_only_filtered_image else 2)
|
||||
self.error: Exception = None
|
||||
self.lock: threading.Lock = threading.Lock() # Locks at task start and unlocks when task is completed
|
||||
@ -68,7 +70,8 @@ class ImageRequest(BaseModel):
|
||||
# allow_nsfw: bool = False
|
||||
save_to_disk_path: str = None
|
||||
turbo: bool = True
|
||||
use_cpu: bool = False
|
||||
use_cpu: bool = False ##TODO Remove after UI and plugins transition.
|
||||
render_device: str = None
|
||||
use_full_precision: bool = False
|
||||
use_face_correction: str = None # or "GFPGANv1.3"
|
||||
use_upscale: str = None # or "RealESRGAN_x4plus" or "RealESRGAN_x4plus_anime_6B"
|
||||
@ -89,7 +92,7 @@ class FilterRequest(BaseModel):
|
||||
height: int = 512
|
||||
save_to_disk_path: str = None
|
||||
turbo: bool = True
|
||||
use_cpu: bool = False
|
||||
render_device: str = None
|
||||
use_full_precision: bool = False
|
||||
output_format: str = "jpeg" # or "png"
|
||||
|
||||
@ -219,26 +222,24 @@ def thread_get_next_task():
|
||||
queued_task.error = Exception('cuda:0 is not available with the current config. Remove GFPGANer filter to run task.')
|
||||
task = queued_task
|
||||
break
|
||||
if queued_task.request.use_cpu:
|
||||
if queued_task.render_device == 'cpu':
|
||||
queued_task.error = Exception('Cpu cannot be used to run this task. Remove GFPGANer filter to run task.')
|
||||
task = queued_task
|
||||
break
|
||||
if not runtime.is_first_cuda_device(runtime.thread_data.device):
|
||||
continue # Wait for cuda:0
|
||||
if queued_task.request.use_cpu and runtime.thread_data.device != 'cpu':
|
||||
if is_alive('cpu') > 0:
|
||||
continue # CPU Tasks, Skip GPU device
|
||||
if queued_task.render_device and runtime.thread_data.device != queued_task.render_device:
|
||||
# Is asking for a specific render device.
|
||||
if is_alive(queued_task.render_device) > 0:
|
||||
continue # requested device alive, skip current one.
|
||||
else:
|
||||
queued_task.error = Exception('Cpu is not enabled in render_devices.')
|
||||
task = queued_task
|
||||
break
|
||||
if not queued_task.request.use_cpu and runtime.thread_data.device == 'cpu':
|
||||
if is_alive() > 1: # cpu is alive, so need more than one.
|
||||
continue # GPU Tasks, don't run on CPU unless there is nothing else.
|
||||
else:
|
||||
queued_task.error = Exception('No active gpu found. Please check the error message in the command-line window at startup.')
|
||||
# Requested device is not active, return error to UI.
|
||||
queued_task.error = Exception(str(queued_task.render_device) + ' is not currently active.')
|
||||
task = queued_task
|
||||
break
|
||||
if not queued_task.render_device and runtime.thread_data.device == 'cpu' and is_alive() > 1:
|
||||
# not asking for any specific devices, cpu want to grab task but other render devices are alive.
|
||||
continue # Skip Tasks, don't run on CPU unless there is nothing else or user asked for it.
|
||||
task = queued_task
|
||||
break
|
||||
if task is not None:
|
||||
@ -252,11 +253,15 @@ def thread_render(device):
|
||||
from . import runtime
|
||||
try:
|
||||
runtime.device_init(device)
|
||||
except:
|
||||
except Exception as e:
|
||||
print(traceback.format_exc())
|
||||
weak_thread_data[threading.current_thread()] = {
|
||||
'error': e
|
||||
}
|
||||
return
|
||||
weak_thread_data[threading.current_thread()] = {
|
||||
'device': runtime.thread_data.device
|
||||
'device': runtime.thread_data.device,
|
||||
'device_name': runtime.thread_data.device_name
|
||||
}
|
||||
if runtime.thread_data.device != 'cpu' or is_alive() == 1:
|
||||
preload_model()
|
||||
@ -268,6 +273,12 @@ def thread_render(device):
|
||||
return
|
||||
task = thread_get_next_task()
|
||||
if task is None:
|
||||
if runtime.thread_data.device == 'cpu' and is_alive() > 1 and hasattr(runtime.thread_data, 'lastActive') and time.time() - runtime.thread_data.lastActive > CPU_UNLOAD_TIMEOUT:
|
||||
# GPUs present and CPU is idle. Unload resources.
|
||||
runtime.unload_models()
|
||||
runtime.unload_filters()
|
||||
del runtime.thread_data.lastActive
|
||||
print('unloaded models from CPU because it was idle for too long')
|
||||
time.sleep(1)
|
||||
continue
|
||||
if task.error is not None:
|
||||
@ -280,9 +291,12 @@ def thread_render(device):
|
||||
task.response = {"status": 'failed', "detail": str(task.error)}
|
||||
task.buffer_queue.put(json.dumps(task.response))
|
||||
continue
|
||||
print(f'Session {task.request.session_id} starting task {id(task)}')
|
||||
print(f'Session {task.request.session_id} starting task {id(task)} on {runtime.thread_data.device_name}')
|
||||
if not task.lock.acquire(blocking=False): raise Exception('Got locked task from queue.')
|
||||
try:
|
||||
if runtime.thread_data.device == 'cpu' and is_alive() > 1:
|
||||
# CPU is not the only device. Keep track of active time to unload resources later.
|
||||
runtime.thread_data.lastActive = time.time()
|
||||
# Open data generator.
|
||||
res = runtime.mk_img(task.request)
|
||||
if current_model_path == task.request.use_stable_diffusion_model:
|
||||
@ -331,7 +345,7 @@ def thread_render(device):
|
||||
elif task.error is not None:
|
||||
print(f'Session {task.request.session_id} task {id(task)} failed!')
|
||||
else:
|
||||
print(f'Session {task.request.session_id} task {id(task)} completed.')
|
||||
print(f'Session {task.request.session_id} task {id(task)} completed by {runtime.thread_data.device_name}.')
|
||||
current_state = ServerStates.Online
|
||||
|
||||
def get_cached_task(session_id:str, update_ttl:bool=False):
|
||||
@ -341,6 +355,21 @@ def get_cached_task(session_id:str, update_ttl:bool=False):
|
||||
return None
|
||||
return task_cache.tryGet(session_id)
|
||||
|
||||
def get_devices():
|
||||
if not manager_lock.acquire(blocking=True, timeout=LOCK_TIMEOUT): raise Exception('get_devices' + ERR_LOCK_FAILED)
|
||||
try:
|
||||
device_dict = {}
|
||||
for rthread in render_threads:
|
||||
if not rthread.is_alive():
|
||||
continue
|
||||
weak_data = weak_thread_data.get(rthread)
|
||||
if not weak_data or not 'device' in weak_data or not 'device_name' in weak_data:
|
||||
continue
|
||||
device_dict.update({weak_data['device']:weak_data['device_name']})
|
||||
return device_dict
|
||||
finally:
|
||||
manager_lock.release()
|
||||
|
||||
def is_first_cuda_device(device):
|
||||
from . import runtime # When calling runtime from outside thread_render DO NOT USE thread specific attributes or functions.
|
||||
return runtime.is_first_cuda_device(device)
|
||||
@ -352,8 +381,7 @@ def is_alive(name=None):
|
||||
for rthread in render_threads:
|
||||
if name is not None:
|
||||
weak_data = weak_thread_data.get(rthread)
|
||||
if weak_data is None or weak_data['device'] is None:
|
||||
print('The thread', rthread.name, 'is registered but has no data store in the task manager.')
|
||||
if weak_data is None or not 'device' in weak_data or weak_data['device'] is None:
|
||||
continue
|
||||
thread_name = str(weak_data['device']).lower()
|
||||
if is_first_cuda_device(name):
|
||||
@ -380,6 +408,8 @@ def start_render_thread(device='auto'):
|
||||
manager_lock.release()
|
||||
timeout = DEVICE_START_TIMEOUT
|
||||
while not rthread.is_alive() or not rthread in weak_thread_data or not 'device' in weak_thread_data[rthread]:
|
||||
if rthread in weak_thread_data and 'error' in weak_thread_data[rthread]:
|
||||
return False
|
||||
if timeout <= 0:
|
||||
return False
|
||||
timeout -= 1
|
||||
@ -416,7 +446,6 @@ def render(req : ImageRequest):
|
||||
r.sampler = req.sampler
|
||||
# r.allow_nsfw = req.allow_nsfw
|
||||
r.turbo = req.turbo
|
||||
r.use_cpu = req.use_cpu
|
||||
r.use_full_precision = req.use_full_precision
|
||||
r.save_to_disk_path = req.save_to_disk_path
|
||||
r.use_upscale: str = req.use_upscale
|
||||
@ -433,6 +462,8 @@ def render(req : ImageRequest):
|
||||
r.stream_image_progress = False
|
||||
|
||||
new_task = RenderTask(r)
|
||||
new_task.render_device = req.render_device
|
||||
|
||||
if task_cache.put(r.session_id, new_task, TASK_TTL):
|
||||
# Use twice the normal timeout for adding user requests.
|
||||
# Tries to force task_cache.put to fail before tasks_queue.put would.
|
||||
|
166
ui/server.py
166
ui/server.py
@ -30,9 +30,6 @@ APP_CONFIG_DEFAULT_MODELS = [
|
||||
'custom-model', # Check if user has a custom model, use it first.
|
||||
'sd-v1-4', # Default fallback.
|
||||
]
|
||||
APP_CONFIG_DEFAULT_VAE = [
|
||||
'vae-ft-mse-840000-ema-pruned', # Default fallback.
|
||||
]
|
||||
|
||||
from fastapi import FastAPI, HTTPException
|
||||
from fastapi.staticfiles import StaticFiles
|
||||
@ -107,11 +104,24 @@ def setConfig(config):
|
||||
config_bat = [
|
||||
f"@set update_branch={config['update_branch']}"
|
||||
]
|
||||
if len(gpu_devices) > 0 and not has_first_cuda_device:
|
||||
config_bat.append('::Set the devices visible inside SD-UI here')
|
||||
config_bat.append(f"::@set CUDA_VISIBLE_DEVICES={','.join(gpu_devices)}") # Needs better detection for edge cases, add as a comment for now.
|
||||
print('Add the line "@set CUDA_VISIBLE_DEVICES=N" where N is the GPUs to use to config.bat')
|
||||
|
||||
if os.getenv('CUDA_VISIBLE_DEVICES') is None:
|
||||
if len(gpu_devices) > 0 and not has_first_cuda_device:
|
||||
config_bat.append('::Set the devices visible inside SD-UI here')
|
||||
config_bat.append(f"::@set CUDA_VISIBLE_DEVICES={','.join(gpu_devices)}") # Needs better detection for edge cases, add as a comment for now.
|
||||
print('Add the line "@set CUDA_VISIBLE_DEVICES=N" where N is the GPUs to use to config.bat')
|
||||
else:
|
||||
config_bat.append(f"@set CUDA_VISIBLE_DEVICES={os.getenv('CUDA_VISIBLE_DEVICES')}")
|
||||
if len(gpu_devices) > 0 and not has_first_cuda_device:
|
||||
print('GPU:0 seems to be missing! Validate that CUDA_VISIBLE_DEVICES is set properly.')
|
||||
config_bat_path = os.path.join(CONFIG_DIR, 'config.bat')
|
||||
|
||||
if os.getenv('SD_UI_BIND_PORT') is not None:
|
||||
config_bat.append(f"@set SD_UI_BIND_PORT={os.getenv('SD_UI_BIND_PORT')}")
|
||||
if os.getenv('SD_UI_BIND_IP') is not None:
|
||||
config_bat.append(f"@set SD_UI_BIND_IP={os.getenv('SD_UI_BIND_IP')}")
|
||||
|
||||
|
||||
with open(config_bat_path, 'w', encoding='utf-8') as f:
|
||||
f.write('\r\n'.join(config_bat))
|
||||
except Exception as e:
|
||||
@ -122,17 +132,28 @@ def setConfig(config):
|
||||
'#!/bin/bash',
|
||||
f"export update_branch={config['update_branch']}"
|
||||
]
|
||||
if len(gpu_devices) > 0 and not has_first_cuda_device:
|
||||
config_sh.append('#Set the devices visible inside SD-UI here')
|
||||
config_sh.append(f"#CUDA_VISIBLE_DEVICES={','.join(gpu_devices)}") # Needs better detection for edge cases, add as a comment for now.
|
||||
print('Add the line "CUDA_VISIBLE_DEVICES=N" where N is the GPUs to use to config.sh')
|
||||
if os.getenv('CUDA_VISIBLE_DEVICES') is None:
|
||||
if len(gpu_devices) > 0 and not has_first_cuda_device:
|
||||
config_sh.append('#Set the devices visible inside SD-UI here')
|
||||
config_sh.append(f"#CUDA_VISIBLE_DEVICES={','.join(gpu_devices)}") # Needs better detection for edge cases, add as a comment for now.
|
||||
print('Add the line "CUDA_VISIBLE_DEVICES=N" where N is the GPUs to use to config.sh')
|
||||
else:
|
||||
config_sh.append(f"export CUDA_VISIBLE_DEVICES=\"{os.getenv('CUDA_VISIBLE_DEVICES')}\"")
|
||||
if len(gpu_devices) > 0 and not has_first_cuda_device:
|
||||
print('GPU:0 seems to be missing! Validate that CUDA_VISIBLE_DEVICES is set properly.')
|
||||
|
||||
if os.getenv('SD_UI_BIND_PORT') is not None:
|
||||
config_sh.append(f"export SD_UI_BIND_PORT={os.getenv('SD_UI_BIND_PORT')}")
|
||||
if os.getenv('SD_UI_BIND_IP') is not None:
|
||||
config_sh.append(f"export SD_UI_BIND_IP={os.getenv('SD_UI_BIND_IP')}")
|
||||
|
||||
config_sh_path = os.path.join(CONFIG_DIR, 'config.sh')
|
||||
with open(config_sh_path, 'w', encoding='utf-8') as f:
|
||||
f.write('\n'.join(config_sh))
|
||||
except Exception as e:
|
||||
print(traceback.format_exc())
|
||||
|
||||
def resolve_model_to_use(model_name:str, model_type:str, model_dir:str, model_extension:str, default_models=[]):
|
||||
def resolve_model_to_use(model_name:str, model_type:str, model_dir:str, model_extensions:list, default_models=[]):
|
||||
model_dirs = [os.path.join(MODELS_DIR, model_dir), SD_DIR]
|
||||
if not model_name: # When None try user configured model.
|
||||
config = getConfig()
|
||||
@ -141,43 +162,38 @@ def resolve_model_to_use(model_name:str, model_type:str, model_dir:str, model_ex
|
||||
if model_name:
|
||||
# Check models directory
|
||||
models_dir_path = os.path.join(MODELS_DIR, model_dir, model_name)
|
||||
if os.path.exists(models_dir_path + model_extension):
|
||||
return models_dir_path
|
||||
if os.path.exists(model_name + model_extension):
|
||||
# Direct Path to file
|
||||
model_name = os.path.abspath(model_name)
|
||||
return model_name
|
||||
for model_extension in model_extensions:
|
||||
if os.path.exists(models_dir_path + model_extension):
|
||||
return models_dir_path
|
||||
if os.path.exists(model_name + model_extension):
|
||||
# Direct Path to file
|
||||
model_name = os.path.abspath(model_name)
|
||||
return model_name
|
||||
# Default locations
|
||||
if model_name in default_models:
|
||||
default_model_path = os.path.join(SD_DIR, model_name)
|
||||
if os.path.exists(default_model_path + model_extension):
|
||||
return default_model_path
|
||||
for model_extension in model_extensions:
|
||||
if os.path.exists(default_model_path + model_extension):
|
||||
return default_model_path
|
||||
# Can't find requested model, check the default paths.
|
||||
for default_model in default_models:
|
||||
for model_dir in model_dirs:
|
||||
default_model_path = os.path.join(model_dir, default_model)
|
||||
if os.path.exists(default_model_path + model_extension):
|
||||
if model_name is not None:
|
||||
print(f'Could not find the configured custom model {model_name}{model_extension}. Using the default one: {default_model_path}{model_extension}')
|
||||
return default_model_path
|
||||
for model_extension in model_extensions:
|
||||
if os.path.exists(default_model_path + model_extension):
|
||||
if model_name is not None:
|
||||
print(f'Could not find the configured custom model {model_name}{model_extension}. Using the default one: {default_model_path}{model_extension}')
|
||||
return default_model_path
|
||||
raise Exception('No valid models found.')
|
||||
|
||||
def resolve_ckpt_to_use(model_name:str=None):
|
||||
return resolve_model_to_use(model_name, model_type='stable-diffusion', model_dir='stable-diffusion', model_extension='.ckpt', default_models=APP_CONFIG_DEFAULT_MODELS)
|
||||
return resolve_model_to_use(model_name, model_type='stable-diffusion', model_dir='stable-diffusion', model_extensions=['.ckpt'], default_models=APP_CONFIG_DEFAULT_MODELS)
|
||||
|
||||
def resolve_vae_to_use(ckpt_model_path:str=None):
|
||||
if ckpt_model_path is not None:
|
||||
if os.path.exists(ckpt_model_path + '.vae.pt'):
|
||||
return ckpt_model_path
|
||||
|
||||
ckpt_model_name = os.path.basename(ckpt_model_path)
|
||||
model_dirs = [os.path.join(MODELS_DIR, 'stable-diffusion'), SD_DIR]
|
||||
for model_dir in model_dirs:
|
||||
default_model_path = os.path.join(model_dir, ckpt_model_name)
|
||||
if os.path.exists(default_model_path + '.vae.pt'):
|
||||
return default_model_path
|
||||
|
||||
return resolve_model_to_use(model_name=None, model_type='vae', model_dir='stable-diffusion', model_extension='.vae.pt', default_models=APP_CONFIG_DEFAULT_VAE)
|
||||
def resolve_vae_to_use(model_name:str=None):
|
||||
try:
|
||||
return resolve_model_to_use(model_name, model_type='vae', model_dir='vae', model_extensions=['.vae.pt', '.ckpt'], default_models=[])
|
||||
except:
|
||||
return None
|
||||
|
||||
class SetAppConfigRequest(BaseModel):
|
||||
update_branch: str = None
|
||||
@ -203,10 +219,6 @@ async def setAppConfig(req : SetAppConfigRequest):
|
||||
render_devices.append('GPU:' + req.render_devices)
|
||||
if len(render_devices) > 0:
|
||||
config['render_devices'] = render_devices
|
||||
if req.model_vae:
|
||||
if 'model' not in config:
|
||||
config['model'] = {}
|
||||
config['model']['vae'] = req.model_vae
|
||||
try:
|
||||
setConfig(config)
|
||||
return JSONResponse({'status': 'OK'}, headers=NOCACHE_HEADERS)
|
||||
@ -226,21 +238,25 @@ def getModels():
|
||||
},
|
||||
}
|
||||
|
||||
def listModels(models_dirname, model_type, model_extensions):
|
||||
models_dir = os.path.join(MODELS_DIR, models_dirname)
|
||||
for file in os.listdir(models_dir):
|
||||
for model_extension in model_extensions:
|
||||
if file.endswith(model_extension):
|
||||
model_name = file[:-len(model_extension)]
|
||||
models['options'][model_type].append(model_name)
|
||||
|
||||
models['options'][model_type] = [*set(models['options'][model_type])] # remove duplicates
|
||||
|
||||
# custom models
|
||||
sd_models_dir = os.path.join(MODELS_DIR, 'stable-diffusion')
|
||||
for model_type, model_extension in [('stable-diffusion', '.ckpt'), ('vae', '.vae.pt')]:
|
||||
for file in os.listdir(sd_models_dir):
|
||||
if file.endswith(model_extension):
|
||||
model_name = file[:-len(model_extension)]
|
||||
models['options'][model_type].append(model_name)
|
||||
listModels(models_dirname='stable-diffusion', model_type='stable-diffusion', model_extensions=['.ckpt'])
|
||||
listModels(models_dirname='vae', model_type='vae', model_extensions=['.vae.pt', '.ckpt'])
|
||||
|
||||
# legacy
|
||||
custom_weight_path = os.path.join(SD_DIR, 'custom-model.ckpt')
|
||||
if os.path.exists(custom_weight_path):
|
||||
models['options']['stable-diffusion'].append('custom-model')
|
||||
|
||||
models['active']['vae'] = os.path.basename(task_manager.default_vae_to_load)
|
||||
|
||||
return models
|
||||
|
||||
def getUIPlugins():
|
||||
@ -261,6 +277,8 @@ def read_web_data(key:str=None):
|
||||
if config is None:
|
||||
raise HTTPException(status_code=500, detail="Config file is missing or unreadable")
|
||||
return JSONResponse(config, headers=NOCACHE_HEADERS)
|
||||
elif key == 'devices':
|
||||
return JSONResponse(task_manager.get_devices(), headers=NOCACHE_HEADERS)
|
||||
elif key == 'models':
|
||||
return JSONResponse(getModels(), headers=NOCACHE_HEADERS)
|
||||
elif key == 'modifiers': return FileResponse(os.path.join(SD_UI_DIR, 'modifiers.json'), headers=NOCACHE_HEADERS)
|
||||
@ -295,23 +313,32 @@ def ping(session_id:str=None):
|
||||
response['session'] = 'pending'
|
||||
return JSONResponse(response, headers=NOCACHE_HEADERS)
|
||||
|
||||
def save_model_to_config(model_name):
|
||||
def save_model_to_config(ckpt_model_name, vae_model_name):
|
||||
config = getConfig()
|
||||
if 'model' not in config:
|
||||
config['model'] = {}
|
||||
|
||||
config['model']['stable-diffusion'] = model_name
|
||||
config['model']['stable-diffusion'] = ckpt_model_name
|
||||
config['model']['vae'] = vae_model_name
|
||||
|
||||
if vae_model_name is None or vae_model_name == "":
|
||||
del config['model']['vae']
|
||||
|
||||
setConfig(config)
|
||||
|
||||
@app.post('/render')
|
||||
def render(req : task_manager.ImageRequest):
|
||||
if req.use_cpu and task_manager.is_alive('cpu') <= 0: raise HTTPException(status_code=403, detail=f'CPU rendering is not enabled in config.json or the thread has died...') # HTTP403 Forbidden
|
||||
if req.use_cpu: # TODO Remove after transition.
|
||||
print('WARNING Replace {use_cpu: true} by {render_device: "cpu"}')
|
||||
req.render_device = 'cpu'
|
||||
del req.use_cpu
|
||||
if req.render_device and task_manager.is_alive(req.render_device) <= 0: raise HTTPException(status_code=403, detail=f'{req.render_device} rendering is not enabled in config.json or the thread has died...') # HTTP403 Forbidden
|
||||
if req.use_face_correction and task_manager.is_alive(0) <= 0: #TODO Remove when GFPGANer is fixed upstream.
|
||||
raise HTTPException(status_code=412, detail=f'GFPGANer only works GPU:0, use CUDA_VISIBLE_DEVICES if GFPGANer is needed on a specific GPU.') # HTTP412 Precondition Failed
|
||||
try:
|
||||
save_model_to_config(req.use_stable_diffusion_model)
|
||||
save_model_to_config(req.use_stable_diffusion_model, req.use_vae_model)
|
||||
req.use_stable_diffusion_model = resolve_ckpt_to_use(req.use_stable_diffusion_model)
|
||||
req.use_vae_model = resolve_vae_to_use(ckpt_model_path=req.use_stable_diffusion_model)
|
||||
req.use_vae_model = resolve_vae_to_use(req.use_vae_model)
|
||||
new_task = task_manager.render(req)
|
||||
response = {
|
||||
'status': str(task_manager.current_state),
|
||||
@ -390,44 +417,41 @@ config = getConfig()
|
||||
|
||||
# Start the task_manager
|
||||
task_manager.default_model_to_load = resolve_ckpt_to_use()
|
||||
task_manager.default_vae_to_load = resolve_vae_to_use(ckpt_model_path=task_manager.default_model_to_load)
|
||||
task_manager.default_vae_to_load = resolve_vae_to_use()
|
||||
if 'render_devices' in config: # Start a new thread for each device.
|
||||
if isinstance(config['render_devices'], str):
|
||||
config['render_devices'] = config['render_devices'].split(',')
|
||||
if not isinstance(config['render_devices'], list):
|
||||
raise Exception('Invalid render_devices value in config.')
|
||||
for device in config['render_devices']:
|
||||
if task_manager.is_alive(device) >= 1:
|
||||
print(device, 'already registered.')
|
||||
continue
|
||||
if not task_manager.start_render_thread(device):
|
||||
print(device, 'failed to start.')
|
||||
if task_manager.is_alive() <= 0: # No running devices, probably invalid user config.
|
||||
print('WARNING: No active render devices after loading config. Validate "render_devices" in config.json')
|
||||
print('Loading default render devices to replace invalid render_devices field from config', config['render_devices'])
|
||||
|
||||
display_warning = False
|
||||
if task_manager.is_alive() <= 0: # Either no defauls or no devices after loading config.
|
||||
# Select best GPU device using free memory, if more than one device.
|
||||
if task_manager.start_render_thread('auto'): # Detect best device for renders
|
||||
if task_manager.is_alive(0) <= 0: # has no cuda:0
|
||||
if task_manager.is_alive('cpu') >= 1: # auto used CPU.
|
||||
pass # Maybe add warning here about CPU mode...
|
||||
elif task_manager.start_render_thread('cuda'): # An other cuda device is better and cuda:0 is missing, try to start it...
|
||||
display_warning = True # And warn user to update settings...
|
||||
else:
|
||||
print('Failed to start GPU:0...')
|
||||
# if cuda:0 is missing, another cuda device is better. try to start it...
|
||||
if task_manager.is_alive(0) <= 0 and task_manager.is_alive('cpu') <= 0 and not task_manager.start_render_thread('cuda'):
|
||||
print('Failed to start GPU:0...')
|
||||
else:
|
||||
print('Failed to start gpu device.')
|
||||
if task_manager.is_alive('cpu') <= 0:
|
||||
# Allow CPU to be used for renders
|
||||
if not task_manager.start_render_thread('cpu'):
|
||||
print('Failed to start CPU render device...')
|
||||
if task_manager.is_alive('cpu') <= 0 and not task_manager.start_render_thread('cpu'): # Allow CPU to be used for renders
|
||||
print('Failed to start CPU render device...')
|
||||
|
||||
if display_warning or task_manager.is_alive(0) <= 0:
|
||||
is_using_a_gpu = (task_manager.is_alive() > task_manager.is_alive('cpu'))
|
||||
if is_using_a_gpu and task_manager.is_alive(0) <= 0:
|
||||
print('WARNING: GFPGANer only works on GPU:0, use CUDA_VISIBLE_DEVICES if GFPGANer is needed on a specific GPU.')
|
||||
print('Using CUDA_VISIBLE_DEVICES will remap the selected devices starting at GPU:0 fixing GFPGANer')
|
||||
print('Add the line "@set CUDA_VISIBLE_DEVICES=N" where N is the GPUs to use to config.bat')
|
||||
print('Add the line "CUDA_VISIBLE_DEVICES=N" where N is the GPUs to use to config.sh')
|
||||
|
||||
del display_warning
|
||||
print('active devices', task_manager.get_devices())
|
||||
|
||||
# start the browser ui
|
||||
import webbrowser; webbrowser.open('http://localhost:9000')
|
||||
import webbrowser; webbrowser.open('http://localhost:9000')
|
||||
|
Loading…
Reference in New Issue
Block a user