mirror of
https://github.com/easydiffusion/easydiffusion.git
synced 2024-12-24 16:08:55 +01:00
Merge branch 'beta' into mod-thumbnails
This commit is contained in:
commit
4f6287c163
@ -15,7 +15,7 @@
|
||||
|
||||
@call git reset --hard
|
||||
@call git pull
|
||||
@call git checkout d154155d4c0b43e13ec1f00eb72b7ff9d522fcf9
|
||||
@call git checkout f6cfebffa752ee11a7b07497b8529d5971de916c
|
||||
|
||||
@call git apply ..\ui\sd_internal\ddim_callback.patch
|
||||
@call git apply ..\ui\sd_internal\env_yaml.patch
|
||||
@ -33,7 +33,7 @@
|
||||
)
|
||||
|
||||
@cd stable-diffusion
|
||||
@call git checkout d154155d4c0b43e13ec1f00eb72b7ff9d522fcf9
|
||||
@call git checkout f6cfebffa752ee11a7b07497b8529d5971de916c
|
||||
|
||||
@call git apply ..\ui\sd_internal\ddim_callback.patch
|
||||
@call git apply ..\ui\sd_internal\env_yaml.patch
|
||||
|
@ -16,7 +16,7 @@ if [ -e "scripts/install_status.txt" ] && [ `grep -c sd_git_cloned scripts/insta
|
||||
|
||||
git reset --hard
|
||||
git pull
|
||||
git checkout d154155d4c0b43e13ec1f00eb72b7ff9d522fcf9
|
||||
git checkout f6cfebffa752ee11a7b07497b8529d5971de916c
|
||||
|
||||
git apply ../ui/sd_internal/ddim_callback.patch
|
||||
git apply ../ui/sd_internal/env_yaml.patch
|
||||
@ -34,7 +34,7 @@ else
|
||||
fi
|
||||
|
||||
cd stable-diffusion
|
||||
git checkout d154155d4c0b43e13ec1f00eb72b7ff9d522fcf9
|
||||
git checkout f6cfebffa752ee11a7b07497b8529d5971de916c
|
||||
|
||||
git apply ../ui/sd_internal/ddim_callback.patch
|
||||
git apply ../ui/sd_internal/env_yaml.patch
|
||||
|
524
ui/index.html
524
ui/index.html
@ -1,6 +1,8 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<link rel="icon" type="image/png" href="/media/favicon-16x16.png" sizes="16x16">
|
||||
<link rel="icon" type="image/png" href="/media/favicon-32x32.png" sizes="32x32">
|
||||
<style>
|
||||
body {
|
||||
font-family: Arial, Helvetica, sans-serif;
|
||||
@ -19,7 +21,8 @@
|
||||
}
|
||||
#prompt {
|
||||
width: 100%;
|
||||
height: 50pt;
|
||||
height: 65pt;
|
||||
box-sizing: border-box;
|
||||
}
|
||||
@media screen and (max-width: 600px) {
|
||||
#prompt {
|
||||
@ -27,7 +30,7 @@
|
||||
}
|
||||
}
|
||||
.image_preview_container {
|
||||
display: none;
|
||||
/* display: none; */
|
||||
margin-top: 10pt;
|
||||
}
|
||||
.image_clear_btn {
|
||||
@ -45,14 +48,14 @@
|
||||
font-family: Verdana;
|
||||
font-size: 8pt;
|
||||
}
|
||||
#editor-settings-entries {
|
||||
.settings-box ul {
|
||||
font-size: 9pt;
|
||||
margin-bottom: 5px;
|
||||
padding-left: 10px;
|
||||
list-style-type: none;
|
||||
}
|
||||
#editor-settings-entries li {
|
||||
padding-bottom: 3pt;
|
||||
.settings-box li {
|
||||
padding-bottom: 4pt;
|
||||
}
|
||||
.editor-slider {
|
||||
transform: translateY(30%);
|
||||
@ -60,6 +63,9 @@
|
||||
#outputMsg {
|
||||
font-size: small;
|
||||
}
|
||||
#progressBar {
|
||||
font-size: small;
|
||||
}
|
||||
#footer {
|
||||
font-size: small;
|
||||
padding-left: 10pt;
|
||||
@ -102,23 +108,26 @@
|
||||
}
|
||||
|
||||
#container {
|
||||
width: 75%;
|
||||
width: 90%;
|
||||
margin-left: auto;
|
||||
margin-right: auto;
|
||||
}
|
||||
@media screen and (max-width: 1400px) {
|
||||
@media screen and (max-width: 1800px) {
|
||||
#container {
|
||||
width: 100%;
|
||||
}
|
||||
}
|
||||
#meta small {
|
||||
#logo small {
|
||||
font-size: 11pt;
|
||||
}
|
||||
#editor {
|
||||
padding: 5px;
|
||||
}
|
||||
#editor label {
|
||||
font-weight: bold;
|
||||
font-weight: normal;
|
||||
}
|
||||
.settings-box label small {
|
||||
color: rgb(153, 153, 153);
|
||||
}
|
||||
#preview {
|
||||
padding: 5px;
|
||||
@ -169,6 +178,9 @@
|
||||
.col-50 {
|
||||
flex: 50%;
|
||||
}
|
||||
.col-fixed-10 {
|
||||
flex: 0 0 400pt;
|
||||
}
|
||||
.col-free {
|
||||
flex: 1;
|
||||
}
|
||||
@ -220,16 +232,19 @@
|
||||
display: none;
|
||||
}
|
||||
#server-status {
|
||||
display: inline;
|
||||
float: right;
|
||||
transform: translateY(-5pt);
|
||||
}
|
||||
#server-status-color {
|
||||
width: 8pt;
|
||||
/* width: 8pt;
|
||||
height: 8pt;
|
||||
border-radius: 4pt;
|
||||
background-color: rgb(128, 87, 0);
|
||||
border-radius: 4pt; */
|
||||
font-size: 14pt;
|
||||
color: rgb(128, 87, 0);
|
||||
/* background-color: rgb(197, 1, 1); */
|
||||
float: left;
|
||||
transform: translateY(15%);
|
||||
/* transform: translateY(15%); */
|
||||
display: inline;
|
||||
}
|
||||
#server-status-msg {
|
||||
color: rgb(128, 87, 0);
|
||||
@ -244,6 +259,7 @@
|
||||
height: 23px;
|
||||
transform: translateY(25%);
|
||||
}
|
||||
|
||||
.modifier-card {
|
||||
box-shadow: 0 4px 8px 0 rgba(0,0,0,0.2);
|
||||
transition: 0.1s;
|
||||
@ -459,18 +475,156 @@
|
||||
width: 6em;
|
||||
margin-bottom: 0.5em;
|
||||
}
|
||||
|
||||
#inpaintingEditor {
|
||||
width: 300pt;
|
||||
height: 300pt;
|
||||
margin-top: 5pt;
|
||||
}
|
||||
.drawing-board-canvas-wrapper {
|
||||
background-size: 100% 100%;
|
||||
}
|
||||
#inpaintingEditor canvas {
|
||||
opacity: 0.6;
|
||||
}
|
||||
#enable_mask {
|
||||
margin-top: 8pt;
|
||||
}
|
||||
|
||||
#top-nav {
|
||||
padding-top: 3pt;
|
||||
padding-bottom: 15pt;
|
||||
}
|
||||
#top-nav .icon {
|
||||
padding-right: 4pt;
|
||||
font-size: 14pt;
|
||||
transform: translateY(1pt);
|
||||
}
|
||||
#logo {
|
||||
display: inline;
|
||||
}
|
||||
#logo h1 {
|
||||
display: inline;
|
||||
}
|
||||
#top-nav-items {
|
||||
list-style-type: none;
|
||||
display: inline;
|
||||
float: right;
|
||||
}
|
||||
#top-nav-items > li {
|
||||
float: left;
|
||||
display: inline;
|
||||
padding-left: 20pt;
|
||||
cursor: default;
|
||||
}
|
||||
#initial-text {
|
||||
padding-top: 15pt;
|
||||
padding-left: 4pt;
|
||||
}
|
||||
.settings-subheader {
|
||||
font-size: 10pt;
|
||||
font-weight: bold;
|
||||
}
|
||||
.pl-5 {
|
||||
padding-left: 5pt;
|
||||
}
|
||||
#system-settings {
|
||||
width: 360pt;
|
||||
transform: translateX(-100%) translateX(70pt);
|
||||
|
||||
padding-top: 10pt;
|
||||
padding-bottom: 10pt;
|
||||
}
|
||||
#system-settings ul {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
#system-settings li {
|
||||
padding-left: 5pt;
|
||||
}
|
||||
#community-links {
|
||||
list-style-type: none;
|
||||
margin: 0;
|
||||
padding: 12pt;
|
||||
padding-bottom: 0pt;
|
||||
transform: translateX(-15%);
|
||||
}
|
||||
#community-links li {
|
||||
padding-bottom: 12pt;
|
||||
display: block;
|
||||
font-size: 10pt;
|
||||
}
|
||||
#community-links li .fa-fw {
|
||||
padding-right: 2pt;
|
||||
}
|
||||
#community-links li a {
|
||||
color: white;
|
||||
text-decoration: none;
|
||||
}
|
||||
.dropdown {
|
||||
overflow: hidden;
|
||||
}
|
||||
.dropdown-content {
|
||||
display: none;
|
||||
position: absolute;
|
||||
z-index: 2;
|
||||
|
||||
background: rgb(18, 18, 19);
|
||||
border: 2px solid rgb(37, 38, 41);
|
||||
border-radius: 7px;
|
||||
padding: 5px;
|
||||
margin-bottom: 15px;
|
||||
box-shadow: 0 20px 28px 0 rgba(0, 0, 0, 0.15), 0 6px 20px 0 rgba(0, 0, 0, 0.15);
|
||||
}
|
||||
.dropdown:hover .dropdown-content {
|
||||
display: block;
|
||||
}
|
||||
</style>
|
||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.2.0/css/all.min.css">
|
||||
<link rel="stylesheet" href="/media/drawingboard.min.css">
|
||||
<script src="/media/jquery-3.6.1.min.js"></script>
|
||||
<script src="/media/drawingboard.min.js"></script>
|
||||
</html>
|
||||
<body>
|
||||
<div id="container">
|
||||
<div class="flex-container">
|
||||
<div id="editor" class="col-50">
|
||||
<div id="meta">
|
||||
<div id="server-status">
|
||||
<div id="server-status-color"> </div>
|
||||
<span id="server-status-msg">Stable Diffusion is starting..</span>
|
||||
<div id="top-nav">
|
||||
<div id="logo">
|
||||
<h1>Stable Diffusion UI <small>v2.16 <span id="updateBranchLabel"></span></small></h1>
|
||||
</div>
|
||||
<ul id="top-nav-items">
|
||||
<li class="dropdown">
|
||||
<span><i class="fa fa-comments icon"></i> Help & Community</span>
|
||||
<ul id="community-links" class="dropdown-content">
|
||||
<li><a href="https://github.com/cmdr2/stable-diffusion-ui/blob/main/Troubleshooting.md" target="_blank"><i class="fa-solid fa-circle-question fa-fw"></i> Usual problems and solutions</a></li>
|
||||
<li><a href="https://discord.com/invite/u9yhsFmEkB" target="_blank"><i class="fa-brands fa-discord fa-fw"></i> Discord user community</a></li>
|
||||
<li><a href="https://github.com/cmdr2/stable-diffusion-ui" target="_blank"><i class="fa-brands fa-github fa-fw"></i> Source code on GitHub</a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="dropdown">
|
||||
<span><i class="fa fa-gear icon"></i> Settings</span>
|
||||
<div id="system-settings" class="panel-box settings-box dropdown-content">
|
||||
<ul id="system-settings-entries">
|
||||
<li><b class="settings-subheader">System Settings</b></li>
|
||||
<br/>
|
||||
<li><input id="save_to_disk" name="save_to_disk" type="checkbox"> <label for="save_to_disk">Automatically save to <input id="diskPath" name="diskPath" size="40" disabled></label></li>
|
||||
<li><input id="sound_toggle" name="sound_toggle" type="checkbox" checked> <label for="sound_toggle">Play sound on task completion</label></li>
|
||||
<li><input id="turbo" name="turbo" type="checkbox" checked> <label for="turbo">Turbo mode <small>(generates images faster, but uses an additional 1 GB of GPU memory)</small></label></li>
|
||||
<li><input id="use_cpu" name="use_cpu" type="checkbox"> <label for="use_cpu">Use CPU instead of GPU <small>(warning: this will be *very* slow)</small></label></li>
|
||||
<li><input id="use_full_precision" name="use_full_precision" type="checkbox"> <label for="use_full_precision">Use full precision <small>(for GPU-only. warning: this will consume more VRAM)</small></label></li>
|
||||
<!-- <li><input id="allow_nsfw" name="allow_nsfw" type="checkbox"> <label for="allow_nsfw">Allow NSFW Content (You confirm you are above 18 years of age)</label></li> -->
|
||||
<br/>
|
||||
<li><input id="use_beta_channel" name="use_beta_channel" type="checkbox"> <label for="use_beta_channel">🔥Beta channel. Get the latest features immediately (but could be less stable). Please restart the program after changing this.</label></li>
|
||||
</ul>
|
||||
</div>
|
||||
<h1>Stable Diffusion UI <small>v2.1 <span id="updateBranchLabel"></span></small></h1>
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
<div class="flex-container">
|
||||
<div id="editor" class="col-fixed-10">
|
||||
<div id="server-status">
|
||||
<div id="server-status-color">●</div>
|
||||
<span id="server-status-msg">Stable Diffusion is starting..</span>
|
||||
</div>
|
||||
<div id="editor-inputs">
|
||||
<div id="editor-inputs-prompt" class="row">
|
||||
@ -479,10 +633,15 @@
|
||||
</div>
|
||||
|
||||
<div id="editor-inputs-init-image" class="row">
|
||||
<label for="init_image"><b>Initial Image:</b> (optional) </label> <input id="init_image" name="init_image" type="file" /> </button><br/>
|
||||
<label for="init_image"><b>Initial Image:</b> (optional) </label> <input id="init_image" name="init_image" type="file" /><br/>
|
||||
|
||||
<div id="init_image_preview_container" class="image_preview_container">
|
||||
<img id="init_image_preview" src="" width="100" height="100" />
|
||||
<button id="init_image_clear" class="image_clear_btn">X</button>
|
||||
<button class="init_image_clear image_clear_btn">X</button>
|
||||
|
||||
<br/>
|
||||
<input id="enable_mask" name="enable_mask" type="checkbox"> <label for="enable_mask">In-Painting (select the area which the AI will paint into)</label>
|
||||
<div id="inpaintingEditor"></div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@ -497,22 +656,25 @@
|
||||
|
||||
<div class="line-separator"> </div>
|
||||
|
||||
<div id="editor-settings" class="panel-box">
|
||||
<h4 class="collapsible">Advanced Settings</h4>
|
||||
<div id="editor-settings" class="panel-box settings-box">
|
||||
<h4 class="collapsible">Image Settings</h4>
|
||||
<ul id="editor-settings-entries" class="collapsible-content">
|
||||
<li><input id="use_face_correction" name="use_face_correction" type="checkbox" checked> <label for="use_face_correction">Fix incorrect faces and eyes (uses GFPGAN)</label></li>
|
||||
<li>
|
||||
<input id="use_upscale" name="use_upscale" type="checkbox"> <label for="use_upscale">Upscale the image to 4x resolution using </label>
|
||||
<select id="upscale_model" name="upscale_model">
|
||||
<option value="RealESRGAN_x4plus" selected>RealESRGAN_x4plus</option>
|
||||
<option value="RealESRGAN_x4plus_anime_6B">RealESRGAN_x4plus_anime_6B</option>
|
||||
<li><b class="settings-subheader">Image Settings</b></li>
|
||||
<li class="pl-5"><label for="seed">Seed:</label> <input id="seed" name="seed" size="10" value="30000"> <input id="random_seed" name="random_seed" type="checkbox" checked> <label for="random_seed">Random Image</label></li>
|
||||
<li class="pl-5"><label for="num_outputs_total">Number of images to make:</label> <input id="num_outputs_total" name="num_outputs_total" value="1" size="1"> <label for="num_outputs_parallel">Generate in parallel:</label> <input id="num_outputs_parallel" name="num_outputs_parallel" value="1" size="1"> (images at once)</li>
|
||||
<li id="samplerSelection" class="pl-5"><label for="sampler">Sampler:</label>
|
||||
<select id="sampler" name="sampler">
|
||||
<option value="plms" selected>plms</option>
|
||||
<option value="ddim">ddim</option>
|
||||
<option value="heun">heun</option>
|
||||
<option value="euler">euler</option>
|
||||
<option value="euler_a">euler_a</option>
|
||||
<option value="dpm2">dpm2</option>
|
||||
<option value="dpm2_a">dpm2_a</option>
|
||||
<option value="lms">lms</option>
|
||||
</select>
|
||||
</li>
|
||||
<li><input id="show_only_filtered_image" name="show_only_filtered_image" type="checkbox" checked> <label for="show_only_filtered_image">Show only the corrected/upscaled image</label></li>
|
||||
<br/>
|
||||
<li><label for="seed">Seed:</label> <input id="seed" name="seed" size="10" value="30000"> <input id="random_seed" name="random_seed" type="checkbox" checked> <label for="random_seed">Random Image</label></li>
|
||||
<li><label for="num_outputs_total">Number of images to make:</label> <input id="num_outputs_total" name="num_outputs_total" value="1" size="4"> <label for="num_outputs_parallel">Generate in parallel:</label> <input id="num_outputs_parallel" name="num_outputs_parallel" value="1" size="4"> (images at once)</li>
|
||||
<li><label for="width">Width:</label>
|
||||
<li class="pl-5"><label>Image Size: </label>
|
||||
<select id="width" name="width" value="512">
|
||||
<option value="128">128 (*)</option>
|
||||
<option value="192">192</option>
|
||||
@ -533,9 +695,7 @@
|
||||
<option value="1536">1536</option>
|
||||
<option value="1792">1792</option>
|
||||
<option value="2048">2048</option>
|
||||
</select>
|
||||
</li>
|
||||
<li><label for="height">Height:</label>
|
||||
</select> <label for="width"><small>(width)</small></label>
|
||||
<select id="height" name="height" value="512">
|
||||
<option value="128">128 (*)</option>
|
||||
<option value="192">192</option>
|
||||
@ -557,19 +717,27 @@
|
||||
<option value="1792">1792</option>
|
||||
<option value="2048">2048</option>
|
||||
</select>
|
||||
<label for="height"><small>(height)</small></label>
|
||||
</li>
|
||||
<li><label for="num_inference_steps">Number of inference steps:</label> <input id="num_inference_steps" name="num_inference_steps" size="4" value="50"></li>
|
||||
<li><label for="guidance_scale_slider">Guidance Scale:</label> <input id="guidance_scale_slider" name="guidance_scale_slider" class="editor-slider" value="75" type="range" min="10" max="200"> <input id="guidance_scale" name="guidance_scale" size="4"></li>
|
||||
<li><span id="prompt_strength_container"><label for="prompt_strength_slider">Prompt Strength:</label> <input id="prompt_strength_slider" name="prompt_strength_slider" class="editor-slider" value="80" type="range" min="0" max="99"> <input id="prompt_strength" name="prompt_strength" size="4"><br/></span></li>
|
||||
<li> </li>
|
||||
<li><input id="save_to_disk" name="save_to_disk" type="checkbox"> <label for="save_to_disk">Automatically save to <input id="diskPath" name="diskPath" size="40" disabled></label></li>
|
||||
<li><input id="sound_toggle" name="sound_toggle" type="checkbox" checked> <label for="sound_toggle">Play sound on task completion</label></li>
|
||||
<li><input id="turbo" name="turbo" type="checkbox" checked> <label for="turbo">Turbo mode (generates images faster, but uses an additional 1 GB of GPU memory)</label></li>
|
||||
<li><input id="use_cpu" name="use_cpu" type="checkbox"> <label for="use_cpu">Use CPU instead of GPU (warning: this will be *very* slow)</label></li>
|
||||
<li><input id="use_full_precision" name="use_full_precision" type="checkbox"> <label for="use_full_precision">Use full precision (for GPU-only. warning: this will consume more VRAM)</label></li>
|
||||
<!-- <li><input id="allow_nsfw" name="allow_nsfw" type="checkbox"> <label for="allow_nsfw">Allow NSFW Content (You confirm you are above 18 years of age)</label></li> -->
|
||||
<li class="pl-5"><label for="num_inference_steps">Number of inference steps:</label> <input id="num_inference_steps" name="num_inference_steps" size="4" value="50"></li>
|
||||
<li class="pl-5"><label for="guidance_scale_slider">Guidance Scale:</label> <input id="guidance_scale_slider" name="guidance_scale_slider" class="editor-slider" value="75" type="range" min="10" max="500"> <input id="guidance_scale" name="guidance_scale" size="4"></li>
|
||||
<li class="pl-5"><span id="prompt_strength_container"><label for="prompt_strength_slider">Prompt Strength:</label> <input id="prompt_strength_slider" name="prompt_strength_slider" class="editor-slider" value="80" type="range" min="0" max="99"> <input id="prompt_strength" name="prompt_strength" size="4"><br/></span></li>
|
||||
|
||||
<br/>
|
||||
<li><input id="use_beta_channel" name="use_beta_channel" type="checkbox"> <label for="use_beta_channel">🔥Beta channel. Get the latest features immediately (but could be less stable). Please restart the program after changing this.</label></li>
|
||||
|
||||
<li><b class="settings-subheader">Render Settings</b></li>
|
||||
<li class="pl-5"><input id="stream_image_progress" name="stream_image_progress" type="checkbox"> <label for="stream_image_progress">Show a live preview of the image <small>(consumes more VRAM, slightly slower image generation)</small></label></li>
|
||||
<li class="pl-5"><input id="use_face_correction" name="use_face_correction" type="checkbox" checked> <label for="use_face_correction">Fix incorrect faces and eyes <small>(uses GFPGAN)</small></label></li>
|
||||
<li class="pl-5">
|
||||
<input id="use_upscale" name="use_upscale" type="checkbox"> <label for="use_upscale">Upscale the image to 4x resolution using </label>
|
||||
<select id="upscale_model" name="upscale_model">
|
||||
<option value="RealESRGAN_x4plus" selected>RealESRGAN_x4plus</option>
|
||||
<option value="RealESRGAN_x4plus_anime_6B">RealESRGAN_x4plus_anime_6B</option>
|
||||
</select>
|
||||
</li>
|
||||
<li class="pl-5"><input id="show_only_filtered_image" name="show_only_filtered_image" type="checkbox" checked> <label for="show_only_filtered_image">Show only the corrected/upscaled image</label></li>
|
||||
<br/>
|
||||
<li><small>The system-related settings have been moved to the top-right corner.</small></li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
@ -589,10 +757,15 @@
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div id="preview" class="col-50">
|
||||
<div id="preview-prompt">Type a prompt and press the "Make Image" button.<br/><br/>You can set an "Initial Image" if you want to guide the AI.<br/><br/>You can also add modifiers like "Realistic", "Pencil Sketch", "ArtStation" etc by browsing through the "Image Modifiers" section and selecting the desired modifiers.<br/><br/>Click "Advanced Settings" for additional settings like seed, image size, number of images to generate etc.<br/><br/>Enjoy! :)</div>
|
||||
<div id="preview" class="col-free">
|
||||
<div id="preview-prompt">
|
||||
<div id="initial-text">
|
||||
Type a prompt and press the "Make Image" button.<br/><br/>You can set an "Initial Image" if you want to guide the AI.<br/><br/>You can also add modifiers like "Realistic", "Pencil Sketch", "ArtStation" etc by browsing through the "Image Modifiers" section and selecting the desired modifiers.<br/><br/>Click "Advanced Settings" for additional settings like seed, image size, number of images to generate etc.<br/><br/>Enjoy! :)
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div id="outputMsg"></div>
|
||||
<div id="progressBar"></div>
|
||||
<div id="current-images" class="img-preview">
|
||||
</div>
|
||||
</div>
|
||||
@ -624,11 +797,14 @@ const MODIFIERS_PANEL_OPEN_KEY = "modifiersPanelOpen"
|
||||
const USE_FACE_CORRECTION_KEY = "useFaceCorrection"
|
||||
const USE_UPSCALING_KEY = "useUpscaling"
|
||||
const SHOW_ONLY_FILTERED_IMAGE_KEY = "showOnlyFilteredImage"
|
||||
const STREAM_IMAGE_PROGRESS_KEY = "streamImageProgress"
|
||||
const HEALTH_PING_INTERVAL = 5 // seconds
|
||||
const MAX_INIT_IMAGE_DIMENSION = 768
|
||||
|
||||
const IMAGE_REGEX = new RegExp('data:image/[A-Za-z]+;base64')
|
||||
|
||||
let sessionId = new Date().getTime()
|
||||
|
||||
let promptField = document.querySelector('#prompt')
|
||||
let numOutputsTotalField = document.querySelector('#num_outputs_total')
|
||||
let numOutputsParallelField = document.querySelector('#num_outputs_parallel')
|
||||
@ -641,8 +817,8 @@ let widthField = document.querySelector('#width')
|
||||
let heightField = document.querySelector('#height')
|
||||
let initImageSelector = document.querySelector("#init_image")
|
||||
let initImagePreview = document.querySelector("#init_image_preview")
|
||||
// let maskImageSelector = document.querySelector("#mask")
|
||||
// let maskImagePreview = document.querySelector("#mask_preview")
|
||||
let maskImageSelector = document.querySelector("#mask")
|
||||
let maskImagePreview = document.querySelector("#mask_preview")
|
||||
let turboField = document.querySelector('#turbo')
|
||||
let useCPUField = document.querySelector('#use_cpu')
|
||||
let useFullPrecisionField = document.querySelector('#use_full_precision')
|
||||
@ -652,23 +828,27 @@ let diskPathField = document.querySelector('#diskPath')
|
||||
let useBetaChannelField = document.querySelector("#use_beta_channel")
|
||||
let promptStrengthSlider = document.querySelector('#prompt_strength_slider')
|
||||
let promptStrengthField = document.querySelector('#prompt_strength')
|
||||
let samplerField = document.querySelector('#sampler')
|
||||
let samplerSelectionContainer = document.querySelector("#samplerSelection")
|
||||
let useFaceCorrectionField = document.querySelector("#use_face_correction")
|
||||
let useUpscalingField = document.querySelector("#use_upscale")
|
||||
let upscaleModelField = document.querySelector("#upscale_model")
|
||||
let showOnlyFilteredImageField = document.querySelector("#show_only_filtered_image")
|
||||
let updateBranchLabel = document.querySelector("#updateBranchLabel")
|
||||
let streamImageProgressField = document.querySelector("#stream_image_progress")
|
||||
|
||||
let makeImageBtn = document.querySelector('#makeImage')
|
||||
let stopImageBtn = document.querySelector('#stopImage')
|
||||
|
||||
let imagesContainer = document.querySelector('#current-images')
|
||||
let initImagePreviewContainer = document.querySelector('#init_image_preview_container')
|
||||
let initImageClearBtn = document.querySelector('#init_image_clear')
|
||||
let initImageClearBtn = document.querySelector('.init_image_clear')
|
||||
let promptStrengthContainer = document.querySelector('#prompt_strength_container')
|
||||
|
||||
// let maskSetting = document.querySelector('#mask_setting')
|
||||
// let maskSetting = document.querySelector('#editor-inputs-mask_setting')
|
||||
// let maskImagePreviewContainer = document.querySelector('#mask_preview_container')
|
||||
// let maskImageClearBtn = document.querySelector('#mask_clear')
|
||||
let maskSetting = document.querySelector('#enable_mask')
|
||||
|
||||
let editorModifierEntries = document.querySelector('#editor-modifiers-entries')
|
||||
let editorModifierTagsList = document.querySelector('#editor-inputs-tags-list')
|
||||
@ -685,6 +865,7 @@ let previewPrompt = document.querySelector('#preview-prompt')
|
||||
let showConfigToggle = document.querySelector('#configToggleBtn')
|
||||
// let configBox = document.querySelector('#config')
|
||||
let outputMsg = document.querySelector('#outputMsg')
|
||||
let progressBar = document.querySelector("#progressBar")
|
||||
|
||||
let soundToggle = document.querySelector('#sound_toggle')
|
||||
|
||||
@ -693,12 +874,36 @@ let serverStatusMsg = document.querySelector('#server-status-msg')
|
||||
|
||||
let advancedPanelHandle = document.querySelector("#editor-settings .collapsible")
|
||||
let modifiersPanelHandle = document.querySelector("#editor-modifiers .collapsible")
|
||||
let inpaintingEditorContainer = document.querySelector('#inpaintingEditor')
|
||||
let inpaintingEditor = new DrawingBoard.Board('inpaintingEditor', {
|
||||
color: "#ffffff",
|
||||
background: false,
|
||||
size: 30,
|
||||
webStorage: false,
|
||||
controls: [{'DrawingMode': {'filler': false}}, 'Size', 'Navigation']
|
||||
})
|
||||
let inpaintingEditorCanvasBackground = document.querySelector('.drawing-board-canvas-wrapper')
|
||||
// let inpaintingEditorControls = document.querySelector('.drawing-board-controls')
|
||||
|
||||
// let inpaintingEditorMetaControl = document.createElement('div')
|
||||
// inpaintingEditorMetaControl.className = 'drawing-board-control'
|
||||
// let initImageClearBtnToolbar = document.createElement('button')
|
||||
// initImageClearBtnToolbar.className = 'init_image_clear'
|
||||
// initImageClearBtnToolbar.innerHTML = 'Remove Image'
|
||||
// inpaintingEditorMetaControl.appendChild(initImageClearBtnToolbar)
|
||||
// inpaintingEditorControls.appendChild(inpaintingEditorMetaControl)
|
||||
|
||||
let maskResetButton = document.querySelector('.drawing-board-control-navigation-reset')
|
||||
maskResetButton.innerHTML = 'Clear'
|
||||
maskResetButton.style.fontWeight = 'normal'
|
||||
maskResetButton.style.fontSize = '10pt'
|
||||
|
||||
let serverStatus = 'offline'
|
||||
let activeTags = []
|
||||
let modifiers = []
|
||||
let lastPromptUsed = ''
|
||||
let taskStopped = true
|
||||
let batchesDone = 0
|
||||
|
||||
const modifierThumbnailPath = 'static/modifier-thumbnails';
|
||||
const activeCardClass = 'modifier-card-active';
|
||||
@ -777,6 +982,10 @@ function isModifiersPanelOpenEnabled() {
|
||||
return getLocalStorageBoolItem(MODIFIERS_PANEL_OPEN_KEY, false)
|
||||
}
|
||||
|
||||
function isStreamImageProgressEnabled() {
|
||||
return getLocalStorageBoolItem(STREAM_IMAGE_PROGRESS_KEY, false)
|
||||
}
|
||||
|
||||
function setStatus(statusType, msg, msgType) {
|
||||
if (statusType !== 'server') {
|
||||
return;
|
||||
@ -784,12 +993,12 @@ function setStatus(statusType, msg, msgType) {
|
||||
|
||||
if (msgType == 'error') {
|
||||
// msg = '<span style="color: red">' + msg + '<span>'
|
||||
serverStatusColor.style.backgroundColor = 'red'
|
||||
serverStatusColor.style.color = 'red'
|
||||
serverStatusMsg.style.color = 'red'
|
||||
serverStatusMsg.innerText = 'Stable Diffusion has stopped'
|
||||
} else if (msgType == 'success') {
|
||||
// msg = '<span style="color: green">' + msg + '<span>'
|
||||
serverStatusColor.style.backgroundColor = 'green'
|
||||
serverStatusColor.style.color = 'green'
|
||||
serverStatusMsg.style.color = 'green'
|
||||
serverStatusMsg.innerText = 'Stable Diffusion is ready'
|
||||
serverStatus = 'online'
|
||||
@ -836,14 +1045,37 @@ async function healthCheck() {
|
||||
}
|
||||
}
|
||||
|
||||
function makeImageElement(width, height) {
|
||||
let imgItem = document.createElement('div')
|
||||
imgItem.className = 'imgItem'
|
||||
|
||||
let img = document.createElement('img')
|
||||
img.width = parseInt(width)
|
||||
img.height = parseInt(height)
|
||||
|
||||
imgItem.appendChild(img)
|
||||
imagesContainer.appendChild(imgItem)
|
||||
|
||||
return imgItem
|
||||
}
|
||||
|
||||
// makes a single image. don't call this directly, use makeImage() instead
|
||||
async function doMakeImage(reqBody) {
|
||||
async function doMakeImage(reqBody, batchCount) {
|
||||
if (taskStopped) {
|
||||
return
|
||||
}
|
||||
|
||||
let res = ''
|
||||
let seed = reqBody['seed']
|
||||
let numOutputs = parseInt(reqBody['num_outputs'])
|
||||
|
||||
let images = []
|
||||
|
||||
function makeImageContainers(numImages) {
|
||||
for (let i = images.length; i < numImages; i++) {
|
||||
images.push(makeImageElement(reqBody.width, reqBody.height))
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
res = await fetch('/image', {
|
||||
@ -854,15 +1086,82 @@ async function doMakeImage(reqBody) {
|
||||
body: JSON.stringify(reqBody)
|
||||
})
|
||||
|
||||
let reader = res.body.getReader()
|
||||
let textDecoder = new TextDecoder()
|
||||
let finalJSON = ''
|
||||
let prevTime = -1
|
||||
while (true) {
|
||||
try {
|
||||
let t = new Date().getTime()
|
||||
|
||||
const {value, done} = await reader.read()
|
||||
if (done) {
|
||||
break
|
||||
}
|
||||
|
||||
let timeTaken = (prevTime === -1 ? -1 : t - prevTime)
|
||||
|
||||
let jsonStr = textDecoder.decode(value)
|
||||
|
||||
try {
|
||||
let stepUpdate = JSON.parse(jsonStr)
|
||||
|
||||
if (stepUpdate.step === undefined) {
|
||||
finalJSON += jsonStr
|
||||
} else {
|
||||
let batchSize = stepUpdate.total_steps
|
||||
let overallStepCount = stepUpdate.step + batchesDone * batchSize
|
||||
let totalSteps = batchCount * batchSize
|
||||
let percent = 100 * (overallStepCount / totalSteps)
|
||||
percent = (percent > 100 ? 100 : percent)
|
||||
percent = percent.toFixed(0)
|
||||
|
||||
stepsRemaining = totalSteps - overallStepCount
|
||||
stepsRemaining = (stepsRemaining < 0 ? 0 : stepsRemaining)
|
||||
timeRemaining = (timeTaken === -1 ? '' : stepsRemaining * timeTaken) // ms
|
||||
|
||||
outputMsg.innerHTML = `Batch ${batchesDone+1} of ${batchCount}`
|
||||
progressBar.innerHTML = `Generating image(s): ${percent}%`
|
||||
|
||||
if (timeTaken !== -1) {
|
||||
progressBar.innerHTML += `<br>Time remaining (approx): ${millisecondsToStr(timeRemaining)}`
|
||||
}
|
||||
progressBar.style.display = 'block'
|
||||
|
||||
if (stepUpdate.output !== undefined) {
|
||||
makeImageContainers(numOutputs)
|
||||
|
||||
for (idx in stepUpdate.output) {
|
||||
let imgItem = images[idx]
|
||||
let img = imgItem.firstChild
|
||||
let tmpImageData = stepUpdate.output[idx]
|
||||
img.src = tmpImageData['path'] + '?t=' + new Date().getTime()
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
finalJSON += jsonStr
|
||||
}
|
||||
|
||||
prevTime = t
|
||||
} catch (e) {
|
||||
logError('Stable Diffusion had an error. Please check the logs in the command-line window.', res)
|
||||
res = undefined
|
||||
throw e
|
||||
}
|
||||
}
|
||||
|
||||
if (res.status != 200) {
|
||||
if (serverStatus === 'online') {
|
||||
logError('Stable Diffusion had an error: ' + await res.text() + '. This happens sometimes. Maybe modify the prompt or seed a little bit?', res)
|
||||
logError('Stable Diffusion had an error: ' + await res.text(), res)
|
||||
} else {
|
||||
logError("Stable Diffusion is still starting up, please wait. If this goes on beyond a few minutes, Stable Diffusion has probably crashed.", res)
|
||||
logError("Stable Diffusion is still starting up, please wait. If this goes on beyond a few minutes, Stable Diffusion has probably crashed. Please check the error message in the command-line window.", res)
|
||||
}
|
||||
res = undefined
|
||||
progressBar.style.display = 'none'
|
||||
} else {
|
||||
res = await res.json()
|
||||
res = JSON.parse(finalJSON)
|
||||
progressBar.style.display = 'none'
|
||||
|
||||
if (res.status !== 'succeeded') {
|
||||
let msg = ''
|
||||
@ -886,7 +1185,10 @@ async function doMakeImage(reqBody) {
|
||||
}
|
||||
} catch (e) {
|
||||
console.log('request error', e)
|
||||
logError('Stable Diffusion had an error. Please check the logs in the command-line window. <br/><br/>' + e + '<br/><pre>' + e.stack + '</pre>', res)
|
||||
setStatus('request', 'error', 'error')
|
||||
progressBar.style.display = 'none'
|
||||
res = undefined
|
||||
}
|
||||
|
||||
if (!res) {
|
||||
@ -895,6 +1197,8 @@ async function doMakeImage(reqBody) {
|
||||
|
||||
lastPromptUsed = reqBody['prompt']
|
||||
|
||||
makeImageContainers(res.output.length)
|
||||
|
||||
for (let idx in res.output) {
|
||||
let imgBody = ''
|
||||
let seed = 0
|
||||
@ -909,12 +1213,9 @@ async function doMakeImage(reqBody) {
|
||||
continue
|
||||
}
|
||||
|
||||
let imgItem = document.createElement('div')
|
||||
imgItem.className = 'imgItem'
|
||||
let imgItem = images[idx]
|
||||
let img = imgItem.firstChild
|
||||
|
||||
let img = document.createElement('img')
|
||||
img.width = parseInt(reqBody.width)
|
||||
img.height = parseInt(reqBody.height)
|
||||
img.src = imgBody
|
||||
|
||||
let imgItemInfo = document.createElement('span')
|
||||
@ -932,19 +1233,19 @@ async function doMakeImage(reqBody) {
|
||||
imgSaveBtn.className = 'imgSaveBtn'
|
||||
imgSaveBtn.innerText = 'Download'
|
||||
|
||||
imgItem.appendChild(img)
|
||||
imgItem.appendChild(imgItemInfo)
|
||||
imgItemInfo.appendChild(imgSeedLabel)
|
||||
imgItemInfo.appendChild(imgUseBtn)
|
||||
imgItemInfo.appendChild(imgSaveBtn)
|
||||
imagesContainer.appendChild(imgItem)
|
||||
|
||||
imgUseBtn.addEventListener('click', function() {
|
||||
initImageSelector.value = null
|
||||
initImagePreview.src = imgBody
|
||||
|
||||
initImagePreviewContainer.style.display = 'block'
|
||||
inpaintingEditorContainer.style.display = 'none'
|
||||
promptStrengthContainer.style.display = 'block'
|
||||
maskSetting.checked = false
|
||||
|
||||
// maskSetting.style.display = 'block'
|
||||
|
||||
@ -995,7 +1296,7 @@ async function makeImage() {
|
||||
|
||||
let validation = validateInput()
|
||||
if (validation['isValid']) {
|
||||
outputMsg.innerHTML = 'Fetching..'
|
||||
outputMsg.innerHTML = 'Starting..'
|
||||
} else {
|
||||
if (validation['error']) {
|
||||
logError(validation['error'])
|
||||
@ -1008,10 +1309,12 @@ async function makeImage() {
|
||||
setStatus('request', 'fetching..')
|
||||
|
||||
makeImageBtn.innerHTML = 'Processing..'
|
||||
makeImageBtn.disabled = true
|
||||
makeImageBtn.style.display = 'none'
|
||||
stopImageBtn.style.display = 'block'
|
||||
|
||||
taskStopped = false
|
||||
batchesDone = 0
|
||||
|
||||
let seed = (randomSeedField.checked ? Math.floor(Math.random() * 10000000) : parseInt(seedField.value))
|
||||
let numOutputsTotal = parseInt(numOutputsTotalField.value)
|
||||
@ -1019,6 +1322,8 @@ async function makeImage() {
|
||||
let batchCount = Math.ceil(numOutputsTotal / numOutputsParallel)
|
||||
let batchSize = numOutputsParallel
|
||||
|
||||
let streamImageProgress = (numOutputsTotal > 50 ? false : streamImageProgressField.checked)
|
||||
|
||||
let prompt = promptField.value
|
||||
if (activeTags.length > 0) {
|
||||
let promptTags = activeTags.map(x => x.name).join(", ");
|
||||
@ -1028,6 +1333,7 @@ async function makeImage() {
|
||||
previewPrompt.innerText = prompt
|
||||
|
||||
let reqBody = {
|
||||
session_id: sessionId,
|
||||
prompt: prompt,
|
||||
num_outputs: batchSize,
|
||||
num_inference_steps: numInferenceStepsField.value,
|
||||
@ -1037,7 +1343,10 @@ async function makeImage() {
|
||||
// allow_nsfw: allowNSFWField.checked,
|
||||
turbo: turboField.checked,
|
||||
use_cpu: useCPUField.checked,
|
||||
use_full_precision: useFullPrecisionField.checked
|
||||
use_full_precision: useFullPrecisionField.checked,
|
||||
stream_progress_updates: true,
|
||||
stream_image_progress: streamImageProgress,
|
||||
show_only_filtered_image: showOnlyFilteredImageField.checked
|
||||
}
|
||||
|
||||
if (IMAGE_REGEX.test(initImagePreview.src)) {
|
||||
@ -1047,6 +1356,13 @@ async function makeImage() {
|
||||
// if (IMAGE_REGEX.test(maskImagePreview.src)) {
|
||||
// reqBody['mask'] = maskImagePreview.src
|
||||
// }
|
||||
if (maskSetting.checked) {
|
||||
reqBody['mask'] = inpaintingEditor.getImg()
|
||||
}
|
||||
|
||||
reqBody['sampler'] = 'ddim'
|
||||
} else {
|
||||
reqBody['sampler'] = samplerField.value
|
||||
}
|
||||
|
||||
if (saveToDiskField.checked && diskPathField.value.trim() !== '') {
|
||||
@ -1061,10 +1377,6 @@ async function makeImage() {
|
||||
reqBody['use_upscale'] = upscaleModelField.value
|
||||
}
|
||||
|
||||
if (showOnlyFilteredImageField.checked && (useUpscalingField.checked || useFaceCorrectionField.checked)) {
|
||||
reqBody['show_only_filtered_image'] = showOnlyFilteredImageField.checked
|
||||
}
|
||||
|
||||
let time = new Date().getTime()
|
||||
imagesContainer.innerHTML = ''
|
||||
|
||||
@ -1073,7 +1385,8 @@ async function makeImage() {
|
||||
for (let i = 0; i < batchCount; i++) {
|
||||
reqBody['seed'] = seed + (i * batchSize)
|
||||
|
||||
let success = await doMakeImage(reqBody)
|
||||
let success = await doMakeImage(reqBody, batchCount)
|
||||
batchesDone++
|
||||
|
||||
if (success) {
|
||||
outputMsg.innerText = 'Processed batch ' + (i+1) + '/' + batchCount
|
||||
@ -1177,6 +1490,9 @@ useFullPrecisionField.checked = isUseFullPrecisionEnabled()
|
||||
turboField.addEventListener('click', handleBoolSettingChange(USE_TURBO_MODE_KEY))
|
||||
turboField.checked = isUseTurboModeEnabled()
|
||||
|
||||
streamImageProgressField.addEventListener('click', handleBoolSettingChange(STREAM_IMAGE_PROGRESS_KEY))
|
||||
streamImageProgressField.checked = isStreamImageProgressEnabled()
|
||||
|
||||
diskPathField.addEventListener('change', handleStringSettingChange(DISK_PATH_KEY))
|
||||
|
||||
saveToDiskField.addEventListener('click', function(e) {
|
||||
@ -1213,8 +1529,8 @@ function updateGuidanceScale() {
|
||||
function updateGuidanceScaleSlider() {
|
||||
if (guidanceScaleField.value < 0) {
|
||||
guidanceScaleField.value = 0
|
||||
} else if (guidanceScaleField.value > 20) {
|
||||
guidanceScaleField.value = 20
|
||||
} else if (guidanceScaleField.value > 50) {
|
||||
guidanceScaleField.value = 50
|
||||
}
|
||||
|
||||
guidanceScaleSlider.value = guidanceScaleField.value * 10
|
||||
@ -1300,6 +1616,7 @@ checkRandomSeed()
|
||||
function showInitImagePreview() {
|
||||
if (initImageSelector.files.length === 0) {
|
||||
initImagePreviewContainer.style.display = 'none'
|
||||
// inpaintingEditorContainer.style.display = 'none'
|
||||
promptStrengthContainer.style.display = 'none'
|
||||
// maskSetting.style.display = 'none'
|
||||
return
|
||||
@ -1312,9 +1629,10 @@ function showInitImagePreview() {
|
||||
// console.log(file.name, reader.result)
|
||||
initImagePreview.src = reader.result
|
||||
initImagePreviewContainer.style.display = 'block'
|
||||
inpaintingEditorContainer.style.display = 'none'
|
||||
promptStrengthContainer.style.display = 'block'
|
||||
|
||||
// maskSetting.style.display = 'block'
|
||||
samplerSelectionContainer.style.display = 'none'
|
||||
// maskSetting.checked = false
|
||||
})
|
||||
|
||||
if (file) {
|
||||
@ -1324,24 +1642,37 @@ function showInitImagePreview() {
|
||||
initImageSelector.addEventListener('change', showInitImagePreview)
|
||||
showInitImagePreview()
|
||||
|
||||
initImagePreview.addEventListener('load', function() {
|
||||
inpaintingEditorCanvasBackground.style.backgroundImage = "url('" + this.src + "')"
|
||||
// maskSetting.style.display = 'block'
|
||||
// inpaintingEditorContainer.style.display = 'block'
|
||||
})
|
||||
|
||||
initImageClearBtn.addEventListener('click', function() {
|
||||
initImageSelector.value = null
|
||||
// maskImageSelector.value = null
|
||||
|
||||
initImagePreview.src = ''
|
||||
// maskImagePreview.src = ''
|
||||
maskSetting.checked = false
|
||||
|
||||
initImagePreviewContainer.style.display = 'none'
|
||||
// inpaintingEditorContainer.style.display = 'none'
|
||||
// maskImagePreviewContainer.style.display = 'none'
|
||||
|
||||
// maskSetting.style.display = 'none'
|
||||
|
||||
promptStrengthContainer.style.display = 'none'
|
||||
samplerSelectionContainer.style.display = 'block'
|
||||
})
|
||||
|
||||
maskSetting.addEventListener('click', function() {
|
||||
inpaintingEditorContainer.style.display = (this.checked ? 'block' : 'none')
|
||||
})
|
||||
|
||||
// function showMaskImagePreview() {
|
||||
// if (maskImageSelector.files.length === 0) {
|
||||
// maskImagePreviewContainer.style.display = 'none'
|
||||
// // maskImagePreviewContainer.style.display = 'none'
|
||||
// return
|
||||
// }
|
||||
|
||||
@ -1349,8 +1680,8 @@ initImageClearBtn.addEventListener('click', function() {
|
||||
// let file = maskImageSelector.files[0]
|
||||
|
||||
// reader.addEventListener('load', function() {
|
||||
// maskImagePreview.src = reader.result
|
||||
// maskImagePreviewContainer.style.display = 'block'
|
||||
// // maskImagePreview.src = reader.result
|
||||
// // maskImagePreviewContainer.style.display = 'block'
|
||||
// })
|
||||
|
||||
// if (file) {
|
||||
@ -1363,8 +1694,32 @@ initImageClearBtn.addEventListener('click', function() {
|
||||
// maskImageClearBtn.addEventListener('click', function() {
|
||||
// maskImageSelector.value = null
|
||||
// maskImagePreview.src = ''
|
||||
// maskImagePreviewContainer.style.display = 'none'
|
||||
// // maskImagePreviewContainer.style.display = 'none'
|
||||
// })
|
||||
|
||||
// https://stackoverflow.com/a/8212878
|
||||
function millisecondsToStr(milliseconds) {
|
||||
function numberEnding (number) {
|
||||
return (number > 1) ? 's' : '';
|
||||
}
|
||||
|
||||
var temp = Math.floor(milliseconds / 1000);
|
||||
var hours = Math.floor((temp %= 86400) / 3600);
|
||||
var s = ''
|
||||
if (hours) {
|
||||
s += hours + ' hour' + numberEnding(hours) + ' ';
|
||||
}
|
||||
var minutes = Math.floor((temp %= 3600) / 60);
|
||||
if (minutes) {
|
||||
s += minutes + ' minute' + numberEnding(minutes) + ' ';
|
||||
}
|
||||
var seconds = temp % 60;
|
||||
if (!hours && minutes < 4 && seconds) {
|
||||
s += seconds + ' second' + numberEnding(seconds);
|
||||
}
|
||||
|
||||
return s;
|
||||
}
|
||||
</script>
|
||||
<script>
|
||||
function createCollapsibles(node) {
|
||||
@ -1651,5 +2006,4 @@ async function init() {
|
||||
|
||||
init()
|
||||
</script>
|
||||
|
||||
</html>
|
||||
|
5
ui/media/drawingboard.min.css
vendored
Normal file
5
ui/media/drawingboard.min.css
vendored
Normal file
File diff suppressed because one or more lines are too long
4
ui/media/drawingboard.min.js
vendored
Normal file
4
ui/media/drawingboard.min.js
vendored
Normal file
File diff suppressed because one or more lines are too long
BIN
ui/media/favicon-16x16.png
Normal file
BIN
ui/media/favicon-16x16.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 466 B |
BIN
ui/media/favicon-32x32.png
Normal file
BIN
ui/media/favicon-32x32.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 973 B |
2
ui/media/jquery-3.6.1.min.js
vendored
Normal file
2
ui/media/jquery-3.6.1.min.js
vendored
Normal file
File diff suppressed because one or more lines are too long
@ -1,6 +1,7 @@
|
||||
import json
|
||||
|
||||
class Request:
|
||||
session_id: str = "session"
|
||||
prompt: str = ""
|
||||
init_image: str = None # base64
|
||||
mask: str = None # base64
|
||||
@ -11,6 +12,7 @@ class Request:
|
||||
height: int = 512
|
||||
seed: int = 42
|
||||
prompt_strength: float = 0.8
|
||||
sampler: str = None # "ddim", "plms", "heun", "euler", "euler_a", "dpm2", "dpm2_a", "lms"
|
||||
# allow_nsfw: bool = False
|
||||
precision: str = "autocast" # or "full"
|
||||
save_to_disk_path: str = None
|
||||
@ -21,8 +23,12 @@ class Request:
|
||||
use_upscale: str = None # or "RealESRGAN_x4plus" or "RealESRGAN_x4plus_anime_6B"
|
||||
show_only_filtered_image: bool = False
|
||||
|
||||
stream_progress_updates: bool = False
|
||||
stream_image_progress: bool = False
|
||||
|
||||
def json(self):
|
||||
return {
|
||||
"session_id": self.session_id,
|
||||
"prompt": self.prompt,
|
||||
"num_outputs": self.num_outputs,
|
||||
"num_inference_steps": self.num_inference_steps,
|
||||
@ -31,15 +37,18 @@ class Request:
|
||||
"height": self.height,
|
||||
"seed": self.seed,
|
||||
"prompt_strength": self.prompt_strength,
|
||||
"sampler": self.sampler,
|
||||
"use_face_correction": self.use_face_correction,
|
||||
"use_upscale": self.use_upscale,
|
||||
}
|
||||
|
||||
def to_string(self):
|
||||
return f'''
|
||||
session_id: {self.session_id}
|
||||
prompt: {self.prompt}
|
||||
seed: {self.seed}
|
||||
num_inference_steps: {self.num_inference_steps}
|
||||
sampler: {self.sampler}
|
||||
guidance_scale: {self.guidance_scale}
|
||||
w: {self.width}
|
||||
h: {self.height}
|
||||
@ -50,7 +59,10 @@ class Request:
|
||||
use_full_precision: {self.use_full_precision}
|
||||
use_face_correction: {self.use_face_correction}
|
||||
use_upscale: {self.use_upscale}
|
||||
show_only_filtered_image: {self.show_only_filtered_image}'''
|
||||
show_only_filtered_image: {self.show_only_filtered_image}
|
||||
|
||||
stream_progress_updates: {self.stream_progress_updates}
|
||||
stream_image_progress: {self.stream_image_progress}'''
|
||||
|
||||
class Image:
|
||||
data: str # base64
|
||||
@ -71,13 +83,11 @@ class Image:
|
||||
|
||||
class Response:
|
||||
request: Request
|
||||
session_id: str
|
||||
images: list
|
||||
|
||||
def json(self):
|
||||
res = {
|
||||
"status": 'succeeded',
|
||||
"session_id": self.session_id,
|
||||
"request": self.request.json(),
|
||||
"output": [],
|
||||
}
|
||||
|
@ -1,8 +1,26 @@
|
||||
diff --git a/optimizedSD/ddpm.py b/optimizedSD/ddpm.py
|
||||
index dcf7901..1f99adc 100644
|
||||
index b967b55..35ef520 100644
|
||||
--- a/optimizedSD/ddpm.py
|
||||
+++ b/optimizedSD/ddpm.py
|
||||
@@ -528,7 +528,8 @@ class UNet(DDPM):
|
||||
@@ -22,7 +22,7 @@ from ldm.util import exists, default, instantiate_from_config
|
||||
from ldm.modules.diffusionmodules.util import make_beta_schedule
|
||||
from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like
|
||||
from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
|
||||
-from samplers import CompVisDenoiser, get_ancestral_step, to_d, append_dims,linear_multistep_coeff
|
||||
+from .samplers import CompVisDenoiser, get_ancestral_step, to_d, append_dims,linear_multistep_coeff
|
||||
|
||||
def disabled_train(self):
|
||||
"""Overwrite model.train with this function to make sure train/eval mode
|
||||
@@ -506,6 +506,8 @@ class UNet(DDPM):
|
||||
|
||||
x_latent = noise if x0 is None else x0
|
||||
# sampling
|
||||
+ if sampler in ('ddim', 'dpm2', 'heun', 'dpm2_a', 'lms') and not hasattr(self, 'ddim_timesteps'):
|
||||
+ self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=False)
|
||||
|
||||
if sampler == "plms":
|
||||
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=False)
|
||||
@@ -528,39 +530,46 @@ class UNet(DDPM):
|
||||
elif sampler == "ddim":
|
||||
samples = self.ddim_sampling(x_latent, conditioning, S, unconditional_guidance_scale=unconditional_guidance_scale,
|
||||
unconditional_conditioning=unconditional_conditioning,
|
||||
@ -10,9 +28,69 @@ index dcf7901..1f99adc 100644
|
||||
+ mask = mask,init_latent=x_T,use_original_steps=False,
|
||||
+ callback=callback, img_callback=img_callback)
|
||||
|
||||
# elif sampler == "euler":
|
||||
# cvd = CompVisDenoiser(self.alphas_cumprod)
|
||||
@@ -687,7 +688,8 @@ class UNet(DDPM):
|
||||
elif sampler == "euler":
|
||||
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=False)
|
||||
samples = self.euler_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
||||
- unconditional_guidance_scale=unconditional_guidance_scale)
|
||||
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
||||
+ img_callback=img_callback)
|
||||
elif sampler == "euler_a":
|
||||
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=False)
|
||||
samples = self.euler_ancestral_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
||||
- unconditional_guidance_scale=unconditional_guidance_scale)
|
||||
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
||||
+ img_callback=img_callback)
|
||||
|
||||
elif sampler == "dpm2":
|
||||
samples = self.dpm_2_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
||||
- unconditional_guidance_scale=unconditional_guidance_scale)
|
||||
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
||||
+ img_callback=img_callback)
|
||||
elif sampler == "heun":
|
||||
samples = self.heun_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
||||
- unconditional_guidance_scale=unconditional_guidance_scale)
|
||||
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
||||
+ img_callback=img_callback)
|
||||
|
||||
elif sampler == "dpm2_a":
|
||||
samples = self.dpm_2_ancestral_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
||||
- unconditional_guidance_scale=unconditional_guidance_scale)
|
||||
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
||||
+ img_callback=img_callback)
|
||||
|
||||
|
||||
elif sampler == "lms":
|
||||
samples = self.lms_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
||||
- unconditional_guidance_scale=unconditional_guidance_scale)
|
||||
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
||||
+ img_callback=img_callback)
|
||||
+
|
||||
+ yield from samples
|
||||
|
||||
if(self.turbo):
|
||||
self.model1.to("cpu")
|
||||
self.model2.to("cpu")
|
||||
|
||||
- return samples
|
||||
-
|
||||
@torch.no_grad()
|
||||
def plms_sampling(self, cond,b, img,
|
||||
ddim_use_original_steps=False,
|
||||
@@ -599,10 +608,10 @@ class UNet(DDPM):
|
||||
old_eps.append(e_t)
|
||||
if len(old_eps) >= 4:
|
||||
old_eps.pop(0)
|
||||
- if callback: callback(i)
|
||||
- if img_callback: img_callback(pred_x0, i)
|
||||
+ if callback: yield from callback(i)
|
||||
+ if img_callback: yield from img_callback(pred_x0, i)
|
||||
|
||||
- return img
|
||||
+ yield from img_callback(img, len(iterator)-1)
|
||||
|
||||
@torch.no_grad()
|
||||
def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
|
||||
@@ -706,7 +715,8 @@ class UNet(DDPM):
|
||||
|
||||
@torch.no_grad()
|
||||
def ddim_sampling(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,
|
||||
@ -22,13 +100,233 @@ index dcf7901..1f99adc 100644
|
||||
|
||||
timesteps = self.ddim_timesteps
|
||||
timesteps = timesteps[:t_start]
|
||||
@@ -710,6 +712,9 @@ class UNet(DDPM):
|
||||
x_dec = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,
|
||||
@@ -730,10 +740,13 @@ class UNet(DDPM):
|
||||
unconditional_guidance_scale=unconditional_guidance_scale,
|
||||
unconditional_conditioning=unconditional_conditioning)
|
||||
+
|
||||
+ if callback: callback(i)
|
||||
+ if img_callback: img_callback(x_dec, i)
|
||||
|
||||
+ if callback: yield from callback(i)
|
||||
+ if img_callback: yield from img_callback(x_dec, i)
|
||||
+
|
||||
if mask is not None:
|
||||
return x0 * mask + (1. - mask) * x_dec
|
||||
- return x0 * mask + (1. - mask) * x_dec
|
||||
+ x_dec = x0 * mask + (1. - mask) * x_dec
|
||||
|
||||
- return x_dec
|
||||
+ yield from img_callback(x_dec, len(iterator)-1)
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
@@ -779,13 +792,16 @@ class UNet(DDPM):
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
- def euler_sampling(self, ac, x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None,callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.):
|
||||
+ def euler_sampling(self, ac, x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None,callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.,
|
||||
+ img_callback=None):
|
||||
"""Implements Algorithm 2 (Euler steps) from Karras et al. (2022)."""
|
||||
extra_args = {} if extra_args is None else extra_args
|
||||
cvd = CompVisDenoiser(ac)
|
||||
sigmas = cvd.get_sigmas(S)
|
||||
x = x*sigmas[0]
|
||||
|
||||
+ print(f"Running Euler Sampling with {len(sigmas) - 1} timesteps")
|
||||
+
|
||||
s_in = x.new_ones([x.shape[0]]).half()
|
||||
for i in trange(len(sigmas) - 1, disable=disable):
|
||||
gamma = min(s_churn / (len(sigmas) - 1), 2 ** 0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.
|
||||
@@ -807,13 +823,18 @@ class UNet(DDPM):
|
||||
d = to_d(x, sigma_hat, denoised)
|
||||
if callback is not None:
|
||||
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})
|
||||
+
|
||||
+ if img_callback: yield from img_callback(x, i)
|
||||
+
|
||||
dt = sigmas[i + 1] - sigma_hat
|
||||
# Euler method
|
||||
x = x + d * dt
|
||||
- return x
|
||||
+
|
||||
+ yield from img_callback(x, len(sigmas)-1)
|
||||
|
||||
@torch.no_grad()
|
||||
- def euler_ancestral_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None, callback=None, disable=None):
|
||||
+ def euler_ancestral_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None, callback=None, disable=None,
|
||||
+ img_callback=None):
|
||||
"""Ancestral sampling with Euler method steps."""
|
||||
extra_args = {} if extra_args is None else extra_args
|
||||
|
||||
@@ -822,6 +843,8 @@ class UNet(DDPM):
|
||||
sigmas = cvd.get_sigmas(S)
|
||||
x = x*sigmas[0]
|
||||
|
||||
+ print(f"Running Euler Ancestral Sampling with {len(sigmas) - 1} timesteps")
|
||||
+
|
||||
s_in = x.new_ones([x.shape[0]]).half()
|
||||
for i in trange(len(sigmas) - 1, disable=disable):
|
||||
|
||||
@@ -837,17 +860,22 @@ class UNet(DDPM):
|
||||
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1])
|
||||
if callback is not None:
|
||||
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
|
||||
+
|
||||
+ if img_callback: yield from img_callback(x, i)
|
||||
+
|
||||
d = to_d(x, sigmas[i], denoised)
|
||||
# Euler method
|
||||
dt = sigma_down - sigmas[i]
|
||||
x = x + d * dt
|
||||
x = x + torch.randn_like(x) * sigma_up
|
||||
- return x
|
||||
+
|
||||
+ yield from img_callback(x, len(sigmas)-1)
|
||||
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
- def heun_sampling(self, ac, x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.):
|
||||
+ def heun_sampling(self, ac, x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.,
|
||||
+ img_callback=None):
|
||||
"""Implements Algorithm 2 (Heun steps) from Karras et al. (2022)."""
|
||||
extra_args = {} if extra_args is None else extra_args
|
||||
|
||||
@@ -855,6 +883,8 @@ class UNet(DDPM):
|
||||
sigmas = cvd.get_sigmas(S)
|
||||
x = x*sigmas[0]
|
||||
|
||||
+ print(f"Running Heun Sampling with {len(sigmas) - 1} timesteps")
|
||||
+
|
||||
|
||||
s_in = x.new_ones([x.shape[0]]).half()
|
||||
for i in trange(len(sigmas) - 1, disable=disable):
|
||||
@@ -876,6 +906,9 @@ class UNet(DDPM):
|
||||
d = to_d(x, sigma_hat, denoised)
|
||||
if callback is not None:
|
||||
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})
|
||||
+
|
||||
+ if img_callback: yield from img_callback(x, i)
|
||||
+
|
||||
dt = sigmas[i + 1] - sigma_hat
|
||||
if sigmas[i + 1] == 0:
|
||||
# Euler method
|
||||
@@ -895,11 +928,13 @@ class UNet(DDPM):
|
||||
d_2 = to_d(x_2, sigmas[i + 1], denoised_2)
|
||||
d_prime = (d + d_2) / 2
|
||||
x = x + d_prime * dt
|
||||
- return x
|
||||
+
|
||||
+ yield from img_callback(x, len(sigmas)-1)
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
- def dpm_2_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.):
|
||||
+ def dpm_2_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.,
|
||||
+ img_callback=None):
|
||||
"""A sampler inspired by DPM-Solver-2 and Algorithm 2 from Karras et al. (2022)."""
|
||||
extra_args = {} if extra_args is None else extra_args
|
||||
|
||||
@@ -907,6 +942,8 @@ class UNet(DDPM):
|
||||
sigmas = cvd.get_sigmas(S)
|
||||
x = x*sigmas[0]
|
||||
|
||||
+ print(f"Running DPM2 Sampling with {len(sigmas) - 1} timesteps")
|
||||
+
|
||||
s_in = x.new_ones([x.shape[0]]).half()
|
||||
for i in trange(len(sigmas) - 1, disable=disable):
|
||||
gamma = min(s_churn / (len(sigmas) - 1), 2 ** 0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.
|
||||
@@ -924,7 +961,7 @@ class UNet(DDPM):
|
||||
e_t_uncond, e_t = (x_in + eps * c_out).chunk(2)
|
||||
denoised = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
|
||||
|
||||
-
|
||||
+ if img_callback: yield from img_callback(x, i)
|
||||
|
||||
d = to_d(x, sigma_hat, denoised)
|
||||
# Midpoint method, where the midpoint is chosen according to a rho=3 Karras schedule
|
||||
@@ -945,11 +982,13 @@ class UNet(DDPM):
|
||||
|
||||
d_2 = to_d(x_2, sigma_mid, denoised_2)
|
||||
x = x + d_2 * dt_2
|
||||
- return x
|
||||
+
|
||||
+ yield from img_callback(x, len(sigmas)-1)
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
- def dpm_2_ancestral_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None):
|
||||
+ def dpm_2_ancestral_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None,
|
||||
+ img_callback=None):
|
||||
"""Ancestral sampling with DPM-Solver inspired second-order steps."""
|
||||
extra_args = {} if extra_args is None else extra_args
|
||||
|
||||
@@ -957,6 +996,8 @@ class UNet(DDPM):
|
||||
sigmas = cvd.get_sigmas(S)
|
||||
x = x*sigmas[0]
|
||||
|
||||
+ print(f"Running DPM2 Ancestral Sampling with {len(sigmas) - 1} timesteps")
|
||||
+
|
||||
s_in = x.new_ones([x.shape[0]]).half()
|
||||
for i in trange(len(sigmas) - 1, disable=disable):
|
||||
|
||||
@@ -973,6 +1014,9 @@ class UNet(DDPM):
|
||||
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1])
|
||||
if callback is not None:
|
||||
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
|
||||
+
|
||||
+ if img_callback: yield from img_callback(x, i)
|
||||
+
|
||||
d = to_d(x, sigmas[i], denoised)
|
||||
# Midpoint method, where the midpoint is chosen according to a rho=3 Karras schedule
|
||||
sigma_mid = ((sigmas[i] ** (1 / 3) + sigma_down ** (1 / 3)) / 2) ** 3
|
||||
@@ -993,11 +1037,13 @@ class UNet(DDPM):
|
||||
d_2 = to_d(x_2, sigma_mid, denoised_2)
|
||||
x = x + d_2 * dt_2
|
||||
x = x + torch.randn_like(x) * sigma_up
|
||||
- return x
|
||||
+
|
||||
+ yield from img_callback(x, len(sigmas)-1)
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
- def lms_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None, order=4):
|
||||
+ def lms_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None, order=4,
|
||||
+ img_callback=None):
|
||||
extra_args = {} if extra_args is None else extra_args
|
||||
s_in = x.new_ones([x.shape[0]])
|
||||
|
||||
@@ -1005,6 +1051,8 @@ class UNet(DDPM):
|
||||
sigmas = cvd.get_sigmas(S)
|
||||
x = x*sigmas[0]
|
||||
|
||||
+ print(f"Running LMS Sampling with {len(sigmas) - 1} timesteps")
|
||||
+
|
||||
ds = []
|
||||
for i in trange(len(sigmas) - 1, disable=disable):
|
||||
|
||||
@@ -1017,6 +1065,7 @@ class UNet(DDPM):
|
||||
e_t_uncond, e_t = (x_in + eps * c_out).chunk(2)
|
||||
denoised = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
|
||||
|
||||
+ if img_callback: yield from img_callback(x, i)
|
||||
|
||||
d = to_d(x, sigmas[i], denoised)
|
||||
ds.append(d)
|
||||
@@ -1027,4 +1076,5 @@ class UNet(DDPM):
|
||||
cur_order = min(i + 1, order)
|
||||
coeffs = [linear_multistep_coeff(cur_order, sigmas.cpu(), i, j) for j in range(cur_order)]
|
||||
x = x + sum(coeff * d for coeff, d in zip(coeffs, reversed(ds)))
|
||||
- return x
|
||||
+
|
||||
+ yield from img_callback(x, len(sigmas)-1)
|
||||
diff --git a/optimizedSD/openaimodelSplit.py b/optimizedSD/openaimodelSplit.py
|
||||
index abc3098..7a32ffe 100644
|
||||
--- a/optimizedSD/openaimodelSplit.py
|
||||
+++ b/optimizedSD/openaimodelSplit.py
|
||||
@@ -13,7 +13,7 @@ from ldm.modules.diffusionmodules.util import (
|
||||
normalization,
|
||||
timestep_embedding,
|
||||
)
|
||||
-from splitAttention import SpatialTransformer
|
||||
+from .splitAttention import SpatialTransformer
|
||||
|
||||
|
||||
class AttentionPool2d(nn.Module):
|
||||
|
@ -1,9 +1,10 @@
|
||||
import json
|
||||
import os, re
|
||||
import traceback
|
||||
import torch
|
||||
import numpy as np
|
||||
from omegaconf import OmegaConf
|
||||
from PIL import Image
|
||||
from PIL import Image, ImageOps
|
||||
from tqdm import tqdm, trange
|
||||
from itertools import islice
|
||||
from einops import rearrange
|
||||
@ -32,10 +33,11 @@ filename_regex = re.compile('[^a-zA-Z0-9]')
|
||||
from . import Request, Response, Image as ResponseImage
|
||||
import base64
|
||||
from io import BytesIO
|
||||
#from colorama import Fore
|
||||
|
||||
# local
|
||||
session_id = str(uuid.uuid4())[-8:]
|
||||
stop_processing = False
|
||||
temp_images = {}
|
||||
|
||||
ckpt_file = None
|
||||
gfpgan_file = None
|
||||
@ -184,23 +186,47 @@ def load_model_real_esrgan(real_esrgan_to_use):
|
||||
print('loaded ', real_esrgan_to_use, 'to', device, 'precision', precision)
|
||||
|
||||
def mk_img(req: Request):
|
||||
global modelFS, device
|
||||
try:
|
||||
yield from do_mk_img(req)
|
||||
except Exception as e:
|
||||
print(traceback.format_exc())
|
||||
|
||||
gc()
|
||||
|
||||
if device != "cpu":
|
||||
modelFS.to("cpu")
|
||||
modelCS.to("cpu")
|
||||
|
||||
model.model1.to("cpu")
|
||||
model.model2.to("cpu")
|
||||
|
||||
gc()
|
||||
|
||||
yield json.dumps({
|
||||
"status": 'failed',
|
||||
"detail": str(e)
|
||||
})
|
||||
|
||||
def do_mk_img(req: Request):
|
||||
global model, modelCS, modelFS, device
|
||||
global model_gfpgan, model_real_esrgan
|
||||
global stop_processing
|
||||
|
||||
stop_processing = False
|
||||
|
||||
res = Response()
|
||||
res.session_id = session_id
|
||||
res.request = req
|
||||
res.images = []
|
||||
|
||||
temp_images.clear()
|
||||
|
||||
model.turbo = req.turbo
|
||||
if req.use_cpu:
|
||||
if device != 'cpu':
|
||||
device = 'cpu'
|
||||
|
||||
if model_is_half:
|
||||
del model, modelCS, modelFS
|
||||
load_model_ckpt(ckpt_file, device)
|
||||
|
||||
load_model_gfpgan(gfpgan_file)
|
||||
@ -215,7 +241,8 @@ def mk_img(req: Request):
|
||||
(req.init_image is None and model_fs_is_half) or \
|
||||
(req.init_image is not None and not model_fs_is_half and not force_full_precision):
|
||||
|
||||
load_model_ckpt(ckpt_file, device, model.turbo, unet_bs, ('full' if req.use_full_precision else 'autocast'), half_model_fs=(req.init_image is not None and not req.use_full_precision))
|
||||
del model, modelCS, modelFS
|
||||
load_model_ckpt(ckpt_file, device, req.turbo, unet_bs, ('full' if req.use_full_precision else 'autocast'), half_model_fs=(req.init_image is not None and not req.use_full_precision))
|
||||
|
||||
if prev_device != device:
|
||||
load_model_gfpgan(gfpgan_file)
|
||||
@ -248,6 +275,7 @@ def mk_img(req: Request):
|
||||
opt_use_upscale = req.use_upscale
|
||||
opt_show_only_filtered = req.show_only_filtered_image
|
||||
opt_format = 'png'
|
||||
opt_sampler_name = req.sampler
|
||||
|
||||
print(req.to_string(), '\n device', device)
|
||||
|
||||
@ -265,6 +293,8 @@ def mk_img(req: Request):
|
||||
else:
|
||||
precision_scope = nullcontext
|
||||
|
||||
mask = None
|
||||
|
||||
if req.init_image is None:
|
||||
handler = _txt2img
|
||||
|
||||
@ -284,18 +314,22 @@ def mk_img(req: Request):
|
||||
init_image = repeat(init_image, '1 ... -> b ...', b=batch_size)
|
||||
init_latent = modelFS.get_first_stage_encoding(modelFS.encode_first_stage(init_image)) # move to latent space
|
||||
|
||||
if device != "cpu":
|
||||
mem = torch.cuda.memory_allocated() / 1e6
|
||||
modelFS.to("cpu")
|
||||
while torch.cuda.memory_allocated() / 1e6 >= mem:
|
||||
time.sleep(1)
|
||||
if req.mask is not None:
|
||||
mask = load_mask(req.mask, opt_W, opt_H, init_latent.shape[2], init_latent.shape[3], True).to(device)
|
||||
mask = mask[0][0].unsqueeze(0).repeat(4, 1, 1).unsqueeze(0)
|
||||
mask = repeat(mask, '1 ... -> b ...', b=batch_size)
|
||||
|
||||
if device != "cpu" and precision == "autocast":
|
||||
mask = mask.half()
|
||||
|
||||
move_fs_to_cpu()
|
||||
|
||||
assert 0. <= opt_strength <= 1., 'can only work with strength in [0.0, 1.0]'
|
||||
t_enc = int(opt_strength * opt_ddim_steps)
|
||||
print(f"target t_enc is {t_enc} steps")
|
||||
|
||||
if opt_save_to_disk_path is not None:
|
||||
session_out_path = os.path.join(opt_save_to_disk_path, session_id)
|
||||
session_out_path = os.path.join(opt_save_to_disk_path, req.session_id)
|
||||
os.makedirs(session_out_path, exist_ok=True)
|
||||
else:
|
||||
session_out_path = None
|
||||
@ -326,29 +360,60 @@ def mk_img(req: Request):
|
||||
else:
|
||||
c = modelCS.get_learned_conditioning(prompts)
|
||||
|
||||
modelFS.to(device)
|
||||
|
||||
partial_x_samples = None
|
||||
def img_callback(x_samples, i):
|
||||
nonlocal partial_x_samples
|
||||
|
||||
partial_x_samples = x_samples
|
||||
|
||||
if req.stream_progress_updates:
|
||||
n_steps = opt_ddim_steps if req.init_image is None else t_enc
|
||||
progress = {"step": i, "total_steps": n_steps}
|
||||
|
||||
if req.stream_image_progress and i % 5 == 0:
|
||||
partial_images = []
|
||||
|
||||
for i in range(batch_size):
|
||||
x_samples_ddim = modelFS.decode_first_stage(x_samples[i].unsqueeze(0))
|
||||
x_sample = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
|
||||
x_sample = 255.0 * rearrange(x_sample[0].cpu().numpy(), "c h w -> h w c")
|
||||
x_sample = x_sample.astype(np.uint8)
|
||||
img = Image.fromarray(x_sample)
|
||||
buf = BytesIO()
|
||||
img.save(buf, format='JPEG')
|
||||
buf.seek(0)
|
||||
|
||||
del img, x_sample, x_samples_ddim
|
||||
# don't delete x_samples, it is used in the code that called this callback
|
||||
|
||||
temp_images[str(req.session_id) + '/' + str(i)] = buf
|
||||
partial_images.append({'path': f'/image/tmp/{req.session_id}/{i}'})
|
||||
|
||||
progress['output'] = partial_images
|
||||
|
||||
yield json.dumps(progress)
|
||||
|
||||
if stop_processing:
|
||||
raise UserInitiatedStop("User requested that we stop processing")
|
||||
|
||||
# run the handler
|
||||
try:
|
||||
if handler == _txt2img:
|
||||
x_samples = _txt2img(opt_W, opt_H, opt_n_samples, opt_ddim_steps, opt_scale, None, opt_C, opt_f, opt_ddim_eta, c, uc, opt_seed, img_callback)
|
||||
x_samples = _txt2img(opt_W, opt_H, opt_n_samples, opt_ddim_steps, opt_scale, None, opt_C, opt_f, opt_ddim_eta, c, uc, opt_seed, img_callback, mask, opt_sampler_name)
|
||||
else:
|
||||
x_samples = _img2img(init_latent, t_enc, batch_size, opt_scale, c, uc, opt_ddim_steps, opt_ddim_eta, opt_seed, img_callback)
|
||||
x_samples = _img2img(init_latent, t_enc, batch_size, opt_scale, c, uc, opt_ddim_steps, opt_ddim_eta, opt_seed, img_callback, mask)
|
||||
|
||||
yield from x_samples
|
||||
|
||||
x_samples = partial_x_samples
|
||||
except UserInitiatedStop:
|
||||
if partial_x_samples is None:
|
||||
continue
|
||||
|
||||
x_samples = partial_x_samples
|
||||
|
||||
modelFS.to(device)
|
||||
|
||||
print("saving images")
|
||||
for i in range(batch_size):
|
||||
|
||||
@ -358,6 +423,14 @@ def mk_img(req: Request):
|
||||
x_sample = x_sample.astype(np.uint8)
|
||||
img = Image.fromarray(x_sample)
|
||||
|
||||
has_filters = (opt_use_face_correction is not None and opt_use_face_correction.startswith('GFPGAN')) or \
|
||||
(opt_use_upscale is not None and opt_use_upscale.startswith('RealESRGAN'))
|
||||
|
||||
return_orig_img = not has_filters or not opt_show_only_filtered
|
||||
|
||||
if stop_processing:
|
||||
return_orig_img = True
|
||||
|
||||
if opt_save_to_disk_path is not None:
|
||||
prompt_flattened = filename_regex.sub('_', prompts[0])
|
||||
prompt_flattened = prompt_flattened[:50]
|
||||
@ -368,12 +441,12 @@ def mk_img(req: Request):
|
||||
img_out_path = os.path.join(session_out_path, f"{file_path}.{opt_format}")
|
||||
meta_out_path = os.path.join(session_out_path, f"{file_path}.txt")
|
||||
|
||||
if not opt_show_only_filtered:
|
||||
if return_orig_img:
|
||||
save_image(img, img_out_path)
|
||||
|
||||
save_metadata(meta_out_path, prompts, opt_seed, opt_W, opt_H, opt_ddim_steps, opt_scale, opt_strength, opt_use_face_correction, opt_use_upscale)
|
||||
save_metadata(meta_out_path, prompts, opt_seed, opt_W, opt_H, opt_ddim_steps, opt_scale, opt_strength, opt_use_face_correction, opt_use_upscale, opt_sampler_name)
|
||||
|
||||
if not opt_show_only_filtered:
|
||||
if return_orig_img:
|
||||
img_data = img_to_base64_str(img)
|
||||
res_image_orig = ResponseImage(data=img_data, seed=opt_seed)
|
||||
res.images.append(res_image_orig)
|
||||
@ -381,8 +454,10 @@ def mk_img(req: Request):
|
||||
if opt_save_to_disk_path is not None:
|
||||
res_image_orig.path_abs = img_out_path
|
||||
|
||||
if (opt_use_face_correction is not None and opt_use_face_correction.startswith('GFPGAN')) or \
|
||||
(opt_use_upscale is not None and opt_use_upscale.startswith('RealESRGAN')):
|
||||
del img
|
||||
|
||||
if has_filters and not stop_processing:
|
||||
print('Applying filters..')
|
||||
|
||||
gc()
|
||||
filters_applied = []
|
||||
@ -410,18 +485,19 @@ def mk_img(req: Request):
|
||||
save_image(filtered_image, filtered_img_out_path)
|
||||
res_image_filtered.path_abs = filtered_img_out_path
|
||||
|
||||
del filtered_image
|
||||
|
||||
seeds += str(opt_seed) + ","
|
||||
opt_seed += 1
|
||||
|
||||
if device != "cpu":
|
||||
mem = torch.cuda.memory_allocated() / 1e6
|
||||
modelFS.to("cpu")
|
||||
while torch.cuda.memory_allocated() / 1e6 >= mem:
|
||||
time.sleep(1)
|
||||
del x_samples
|
||||
move_fs_to_cpu()
|
||||
gc()
|
||||
del x_samples, x_samples_ddim, x_sample
|
||||
print("memory_final = ", torch.cuda.memory_allocated() / 1e6)
|
||||
|
||||
return res
|
||||
print('Task completed')
|
||||
|
||||
yield json.dumps(res.json())
|
||||
|
||||
def save_image(img, img_out_path):
|
||||
try:
|
||||
@ -429,8 +505,8 @@ def save_image(img, img_out_path):
|
||||
except:
|
||||
print('could not save the file', traceback.format_exc())
|
||||
|
||||
def save_metadata(meta_out_path, prompts, opt_seed, opt_W, opt_H, opt_ddim_steps, opt_scale, opt_prompt_strength, opt_correct_face, opt_upscale):
|
||||
metadata = f"{prompts[0]}\nWidth: {opt_W}\nHeight: {opt_H}\nSeed: {opt_seed}\nSteps: {opt_ddim_steps}\nGuidance Scale: {opt_scale}\nPrompt Strength: {opt_prompt_strength}\nUse Face Correction: {opt_correct_face}\nUse Upscaling: {opt_upscale}"
|
||||
def save_metadata(meta_out_path, prompts, opt_seed, opt_W, opt_H, opt_ddim_steps, opt_scale, opt_prompt_strength, opt_correct_face, opt_upscale, sampler_name):
|
||||
metadata = f"{prompts[0]}\nWidth: {opt_W}\nHeight: {opt_H}\nSeed: {opt_seed}\nSteps: {opt_ddim_steps}\nGuidance Scale: {opt_scale}\nPrompt Strength: {opt_prompt_strength}\nUse Face Correction: {opt_correct_face}\nUse Upscaling: {opt_upscale}\nSampler: {sampler_name}"
|
||||
|
||||
try:
|
||||
with open(meta_out_path, 'w') as f:
|
||||
@ -438,7 +514,7 @@ def save_metadata(meta_out_path, prompts, opt_seed, opt_W, opt_H, opt_ddim_steps
|
||||
except:
|
||||
print('could not save the file', traceback.format_exc())
|
||||
|
||||
def _txt2img(opt_W, opt_H, opt_n_samples, opt_ddim_steps, opt_scale, start_code, opt_C, opt_f, opt_ddim_eta, c, uc, opt_seed, img_callback):
|
||||
def _txt2img(opt_W, opt_H, opt_n_samples, opt_ddim_steps, opt_scale, start_code, opt_C, opt_f, opt_ddim_eta, c, uc, opt_seed, img_callback, mask, sampler_name):
|
||||
shape = [opt_n_samples, opt_C, opt_H // opt_f, opt_W // opt_f]
|
||||
|
||||
if device != "cpu":
|
||||
@ -458,12 +534,13 @@ def _txt2img(opt_W, opt_H, opt_n_samples, opt_ddim_steps, opt_scale, start_code,
|
||||
eta=opt_ddim_eta,
|
||||
x_T=start_code,
|
||||
img_callback=img_callback,
|
||||
sampler = 'plms',
|
||||
mask=mask,
|
||||
sampler = sampler_name,
|
||||
)
|
||||
|
||||
return samples_ddim
|
||||
yield from samples_ddim
|
||||
|
||||
def _img2img(init_latent, t_enc, batch_size, opt_scale, c, uc, opt_ddim_steps, opt_ddim_eta, opt_seed, img_callback):
|
||||
def _img2img(init_latent, t_enc, batch_size, opt_scale, c, uc, opt_ddim_steps, opt_ddim_eta, opt_seed, img_callback, mask):
|
||||
# encode (scaled latent)
|
||||
z_enc = model.stochastic_encode(
|
||||
init_latent,
|
||||
@ -472,6 +549,8 @@ def _img2img(init_latent, t_enc, batch_size, opt_scale, c, uc, opt_ddim_steps, o
|
||||
opt_ddim_eta,
|
||||
opt_ddim_steps,
|
||||
)
|
||||
x_T = None if mask is None else init_latent
|
||||
|
||||
# decode it
|
||||
samples_ddim = model.sample(
|
||||
t_enc,
|
||||
@ -480,10 +559,19 @@ def _img2img(init_latent, t_enc, batch_size, opt_scale, c, uc, opt_ddim_steps, o
|
||||
unconditional_guidance_scale=opt_scale,
|
||||
unconditional_conditioning=uc,
|
||||
img_callback=img_callback,
|
||||
mask=mask,
|
||||
x_T=x_T,
|
||||
sampler = 'ddim'
|
||||
)
|
||||
|
||||
return samples_ddim
|
||||
yield from samples_ddim
|
||||
|
||||
def move_fs_to_cpu():
|
||||
if device != "cpu":
|
||||
mem = torch.cuda.memory_allocated() / 1e6
|
||||
modelFS.to("cpu")
|
||||
while torch.cuda.memory_allocated() / 1e6 >= mem:
|
||||
time.sleep(1)
|
||||
|
||||
def gc():
|
||||
if device == 'cpu':
|
||||
@ -525,6 +613,31 @@ def load_img(img_str, w0, h0):
|
||||
image = torch.from_numpy(image)
|
||||
return 2.*image - 1.
|
||||
|
||||
def load_mask(mask_str, h0, w0, newH, newW, invert=False):
|
||||
image = base64_str_to_img(mask_str).convert("RGB")
|
||||
w, h = image.size
|
||||
print(f"loaded input mask of size ({w}, {h})")
|
||||
|
||||
if invert:
|
||||
print("inverted")
|
||||
image = ImageOps.invert(image)
|
||||
# where_0, where_1 = np.where(image == 0), np.where(image == 255)
|
||||
# image[where_0], image[where_1] = 255, 0
|
||||
|
||||
if h0 is not None and w0 is not None:
|
||||
h, w = h0, w0
|
||||
|
||||
w, h = map(lambda x: x - x % 64, (w, h)) # resize to integer multiple of 64
|
||||
|
||||
print(f"New mask size ({w}, {h})")
|
||||
image = image.resize((newW, newH), resample=Image.Resampling.LANCZOS)
|
||||
image = np.array(image)
|
||||
|
||||
image = image.astype(np.float32) / 255.0
|
||||
image = image[None].transpose(0, 3, 1, 2)
|
||||
image = torch.from_numpy(image)
|
||||
return image
|
||||
|
||||
# https://stackoverflow.com/a/61114178
|
||||
def img_to_base64_str(img):
|
||||
buffered = BytesIO()
|
||||
|
46
ui/server.py
46
ui/server.py
@ -18,7 +18,7 @@ OUTPUT_DIRNAME = "Stable Diffusion UI" # in the user's home folder
|
||||
|
||||
from fastapi import FastAPI, HTTPException
|
||||
from fastapi.staticfiles import StaticFiles
|
||||
from starlette.responses import FileResponse
|
||||
from starlette.responses import FileResponse, StreamingResponse
|
||||
from pydantic import BaseModel
|
||||
import logging
|
||||
|
||||
@ -34,6 +34,7 @@ outpath = os.path.join(os.path.expanduser("~"), OUTPUT_DIRNAME)
|
||||
|
||||
# defaults from https://huggingface.co/blog/stable_diffusion
|
||||
class ImageRequest(BaseModel):
|
||||
session_id: str = "session"
|
||||
prompt: str = ""
|
||||
init_image: str = None # base64
|
||||
mask: str = None # base64
|
||||
@ -44,6 +45,7 @@ class ImageRequest(BaseModel):
|
||||
height: int = 512
|
||||
seed: int = 42
|
||||
prompt_strength: float = 0.8
|
||||
sampler: str = None # "ddim", "plms", "heun", "euler", "euler_a", "dpm2", "dpm2_a", "lms"
|
||||
# allow_nsfw: bool = False
|
||||
save_to_disk_path: str = None
|
||||
turbo: bool = True
|
||||
@ -53,9 +55,14 @@ class ImageRequest(BaseModel):
|
||||
use_upscale: str = None # or "RealESRGAN_x4plus" or "RealESRGAN_x4plus_anime_6B"
|
||||
show_only_filtered_image: bool = False
|
||||
|
||||
stream_progress_updates: bool = False
|
||||
stream_image_progress: bool = False
|
||||
|
||||
class SetAppConfigRequest(BaseModel):
|
||||
update_branch: str = "main"
|
||||
|
||||
app.mount('/media', StaticFiles(directory=os.path.join(SD_UI_DIR, 'media/')), name="media")
|
||||
|
||||
@app.get('/')
|
||||
def read_root():
|
||||
headers = {"Cache-Control": "no-cache, no-store, must-revalidate", "Pragma": "no-cache", "Expires": "0"}
|
||||
@ -90,6 +97,7 @@ def image(req : ImageRequest):
|
||||
from sd_internal import runtime
|
||||
|
||||
r = Request()
|
||||
r.session_id = req.session_id
|
||||
r.prompt = req.prompt
|
||||
r.init_image = req.init_image
|
||||
r.mask = req.mask
|
||||
@ -100,6 +108,7 @@ def image(req : ImageRequest):
|
||||
r.height = req.height
|
||||
r.seed = req.seed
|
||||
r.prompt_strength = req.prompt_strength
|
||||
r.sampler = req.sampler
|
||||
# r.allow_nsfw = req.allow_nsfw
|
||||
r.turbo = req.turbo
|
||||
r.use_cpu = req.use_cpu
|
||||
@ -109,10 +118,24 @@ def image(req : ImageRequest):
|
||||
r.use_face_correction = req.use_face_correction
|
||||
r.show_only_filtered_image = req.show_only_filtered_image
|
||||
|
||||
try:
|
||||
res: Response = runtime.mk_img(r)
|
||||
r.stream_progress_updates = True # the underlying implementation only supports streaming
|
||||
r.stream_image_progress = req.stream_image_progress
|
||||
|
||||
return res.json()
|
||||
try:
|
||||
if not req.stream_progress_updates:
|
||||
r.stream_image_progress = False
|
||||
|
||||
res = runtime.mk_img(r)
|
||||
|
||||
if req.stream_progress_updates:
|
||||
return StreamingResponse(res, media_type='application/json')
|
||||
else: # compatibility mode: buffer the streaming responses, and return the last one
|
||||
last_result = None
|
||||
|
||||
for result in res:
|
||||
last_result = result
|
||||
|
||||
return json.loads(last_result)
|
||||
except Exception as e:
|
||||
print(traceback.format_exc())
|
||||
return HTTPException(status_code=500, detail=str(e))
|
||||
@ -131,6 +154,13 @@ def stop():
|
||||
print(traceback.format_exc())
|
||||
return HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
@app.get('/image/tmp/{session_id}/{img_id}')
|
||||
def get_image(session_id, img_id):
|
||||
from sd_internal import runtime
|
||||
buf = runtime.temp_images[session_id + '/' + img_id]
|
||||
buf.seek(0)
|
||||
return StreamingResponse(buf, media_type='image/jpeg')
|
||||
|
||||
@app.post('/app_config')
|
||||
async def setAppConfig(req : SetAppConfigRequest):
|
||||
try:
|
||||
@ -176,14 +206,6 @@ def getAppConfig():
|
||||
print(traceback.format_exc())
|
||||
return HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
@app.get('/media/ding.mp3')
|
||||
def read_ding():
|
||||
return FileResponse(os.path.join(SD_UI_DIR, 'media/ding.mp3'))
|
||||
|
||||
@app.get('/media/kofi.png')
|
||||
def read_modifiers():
|
||||
return FileResponse(os.path.join(SD_UI_DIR, 'media/kofi.png'))
|
||||
|
||||
@app.get('/modifiers.json')
|
||||
def read_modifiers():
|
||||
return FileResponse(os.path.join(SD_UI_DIR, 'modifiers.json'))
|
||||
|
Loading…
Reference in New Issue
Block a user