mirror of
https://github.com/easydiffusion/easydiffusion.git
synced 2025-01-15 19:00:28 +01:00
Merge pull request #356 from cmdr2/beta
Task Manager (support multiple tabs and user agents), Themes, Auto-save settings, Prompt Matrix (one prompt per line), Load Prompts from a file, UI theme tweaks
This commit is contained in:
commit
3de5f10d52
@ -4,10 +4,11 @@
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<link rel="icon" type="image/png" href="/media/favicon-16x16.png" sizes="16x16">
|
||||
<link rel="icon" type="image/png" href="/media/favicon-32x32.png" sizes="32x32">
|
||||
<link rel="stylesheet" href="/media/main.css?v=22">
|
||||
<link rel="stylesheet" href="/media/main.css?v=25">
|
||||
<link rel="stylesheet" href="/media/modifier-thumbnails.css?v=1">
|
||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.2.0/css/all.min.css">
|
||||
<link rel="stylesheet" href="/media/drawingboard.min.css">
|
||||
<link href="https://fonts.googleapis.com/css2?family=Work+Sans:wght@400;600;700;800&display=swap" rel="stylesheet">
|
||||
<script src="/media/jquery-3.6.1.min.js"></script>
|
||||
<script src="/media/drawingboard.min.js"></script>
|
||||
</head>
|
||||
@ -15,7 +16,7 @@
|
||||
<div id="container">
|
||||
<div id="top-nav">
|
||||
<div id="logo">
|
||||
<h1>Stable Diffusion UI <small>v2.21 <span id="updateBranchLabel"></span></small></h1>
|
||||
<h1>Stable Diffusion UI <small>v2.28 <span id="updateBranchLabel"></span></small></h1>
|
||||
</div>
|
||||
<ul id="top-nav-items">
|
||||
<li class="dropdown">
|
||||
@ -33,11 +34,19 @@
|
||||
<ul id="system-settings-entries">
|
||||
<li><b class="settings-subheader">System Settings</b></li>
|
||||
<br/>
|
||||
<li><label for="theme">Theme: </label><select id="theme" name="theme"><option value="theme-default">Default</option></select></li>
|
||||
<li><input id="save_to_disk" name="save_to_disk" type="checkbox"> <label for="save_to_disk">Automatically save to <input id="diskPath" name="diskPath" size="40" disabled></label></li>
|
||||
<li><input id="sound_toggle" name="sound_toggle" type="checkbox" checked> <label for="sound_toggle">Play sound on task completion</label></li>
|
||||
<li><input id="turbo" name="turbo" type="checkbox" checked> <label for="turbo">Turbo mode <small>(generates images faster, but uses an additional 1 GB of GPU memory)</small></label></li>
|
||||
<li><input id="use_cpu" name="use_cpu" type="checkbox"> <label for="use_cpu">Use CPU instead of GPU <small>(warning: this will be *very* slow)</small></label></li>
|
||||
<li><input id="use_full_precision" name="use_full_precision" type="checkbox"> <label for="use_full_precision">Use full precision <small>(for GPU-only. warning: this will consume more VRAM)</small></label></li>
|
||||
<li>
|
||||
<input id="auto_save_settings" name="auto_save_settings" type="checkbox">
|
||||
<label for="auto_save_settings">Automatically save settings <small>(settings restored on browser load)</small></label>
|
||||
<br/>
|
||||
<button id="configureSettingsSaveBtn">Configure</button>
|
||||
<button id="restoreDefaultSettingsBtn">Restore Defaults</button>
|
||||
</li>
|
||||
<!-- <li><input id="allow_nsfw" name="allow_nsfw" type="checkbox"> <label for="allow_nsfw">Allow NSFW Content (You confirm you are above 18 years of age)</label></li> -->
|
||||
<br/>
|
||||
<li><input id="use_beta_channel" name="use_beta_channel" type="checkbox"> <label for="use_beta_channel">🔥Beta channel. Get the latest features immediately (but could be less stable). Please restart the program after changing this.</label></li>
|
||||
@ -55,18 +64,25 @@
|
||||
</div>
|
||||
<div id="editor-inputs">
|
||||
<div id="editor-inputs-prompt" class="row">
|
||||
<label for="prompt">Prompt</label>
|
||||
<label for="prompt"><b>Enter Prompt</b></label> <small>or</small> <button id="promptsFromFileBtn">Load from a file</button>
|
||||
<textarea id="prompt" class="col-free">a photograph of an astronaut riding a horse</textarea>
|
||||
<small>(or)</small> <button id="promptsFromFileBtn">Load prompts from a file</button> <small>(one prompt per line)</small>
|
||||
<input id="prompt_from_file" name="prompt_from_file" type="file" /> <!-- hidden -->
|
||||
|
||||
<label for="negative_prompt" class="collapsible" id="negative_prompt_handle">Negative Prompt <small>(optional)</small></label>
|
||||
<div class="collapsible-content">
|
||||
<input id="negative_prompt" name="negative_prompt" placeholder="list the things to remove from the image (e.g. fog, green)">
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div id="editor-inputs-init-image" class="row">
|
||||
<label for="init_image"><b>Initial Image (img2img):</b> (optional) </label> <input id="init_image" name="init_image" type="file" /><br/>
|
||||
<label for="init_image">Initial Image (img2img) <small>(optional)</small> </label> <input id="init_image" name="init_image" type="file" /><br/>
|
||||
|
||||
<div id="init_image_preview_container" class="image_preview_container">
|
||||
<img id="init_image_preview" src="" width="100" height="100" />
|
||||
<button class="init_image_clear image_clear_btn">X</button>
|
||||
<div id="init_image_wrapper">
|
||||
<img id="init_image_preview" src="" />
|
||||
<span id="init_image_size_box"></span>
|
||||
<button class="init_image_clear image_clear_btn">X</button>
|
||||
</div>
|
||||
|
||||
<br/>
|
||||
<input id="enable_mask" name="enable_mask" type="checkbox"> <label for="enable_mask">In-Painting (beta) <small>(select the area which the AI will paint into)</small></label>
|
||||
@ -88,15 +104,16 @@
|
||||
<div id="editor-settings" class="panel-box settings-box">
|
||||
<h4 class="collapsible">Image Settings</h4>
|
||||
<ul id="editor-settings-entries" class="collapsible-content">
|
||||
<li><b class="settings-subheader">Image Settings</b></li>
|
||||
<li class="pl-5"><label for="seed">Seed:</label> <input id="seed" name="seed" size="10" value="30000"> <input id="random_seed" name="random_seed" type="checkbox" checked> <label for="random_seed">Random Image</label></li>
|
||||
<li class="pl-5"><label for="num_outputs_total">Number of images to make:</label> <input id="num_outputs_total" name="num_outputs_total" value="1" size="1"> <label for="num_outputs_parallel">Generate in parallel:</label> <input id="num_outputs_parallel" name="num_outputs_parallel" value="1" size="1"> (images at once)</li>
|
||||
<li class="pl-5"><label for="stable_diffusion_model">Model:</label>
|
||||
<li><table>
|
||||
<tr><b class="settings-subheader">Image Settings</b></tr>
|
||||
<tr class="pl-5"><td><label for="seed">Seed:</label></td><td><input id="seed" name="seed" size="10" value="30000"> <input id="random_seed" name="random_seed" type="checkbox" checked> <label for="random_seed">Random</label></td></tr>
|
||||
<tr class="pl-5"><td><label for="num_outputs_total">Number of Images:</label></td><td><input id="num_outputs_total" name="num_outputs_total" value="1" size="1"> <label><small>(total)</small></label> <input id="num_outputs_parallel" name="num_outputs_parallel" value="1" size="1"> <label for="num_outputs_parallel"><small>(in parallel)</small></label></td></tr>
|
||||
<tr class="pl-5"><td><label for="stable_diffusion_model">Model:</label></td><td>
|
||||
<select id="stable_diffusion_model" name="stable_diffusion_model">
|
||||
<!-- <option value="sd-v1-4" selected>sd-v1-4</option> -->
|
||||
</select>
|
||||
</li>
|
||||
<li id="samplerSelection" class="pl-5"><label for="sampler">Sampler:</label>
|
||||
</td></tr>
|
||||
<tr id="samplerSelection" class="pl-5"><td><label for="sampler">Sampler:</label></td><td>
|
||||
<select id="sampler" name="sampler">
|
||||
<option value="plms">plms</option>
|
||||
<option value="ddim">ddim</option>
|
||||
@ -107,8 +124,8 @@
|
||||
<option value="dpm2_a">dpm2_a</option>
|
||||
<option value="lms">lms</option>
|
||||
</select>
|
||||
</li>
|
||||
<li class="pl-5"><label>Image Size: </label>
|
||||
</td></tr>
|
||||
<tr class="pl-5"><td><label>Image Size: </label></td><td>
|
||||
<select id="width" name="width" value="512">
|
||||
<option value="128">128 (*)</option>
|
||||
<option value="192">192</option>
|
||||
@ -153,29 +170,25 @@
|
||||
<option value="2048">2048</option>
|
||||
</select>
|
||||
<label for="height"><small>(height)</small></label>
|
||||
</li>
|
||||
<li class="pl-5"><label for="num_inference_steps">Number of inference steps:</label> <input id="num_inference_steps" name="num_inference_steps" size="4" value="25"></li>
|
||||
<li class="pl-5"><label for="guidance_scale_slider">Guidance Scale:</label> <input id="guidance_scale_slider" name="guidance_scale_slider" class="editor-slider" value="75" type="range" min="10" max="500"> <input id="guidance_scale" name="guidance_scale" size="4"></li>
|
||||
<li class="pl-5"><span id="prompt_strength_container"><label for="prompt_strength_slider">Prompt Strength:</label> <input id="prompt_strength_slider" name="prompt_strength_slider" class="editor-slider" value="80" type="range" min="0" max="99"> <input id="prompt_strength" name="prompt_strength" size="4"><br/></span></li>
|
||||
<li class="pl-5"><label for="output_format">Output format:</label>
|
||||
</td></tr>
|
||||
<tr class="pl-5"><td><label for="num_inference_steps">Inference Steps:</label></td><td> <input id="num_inference_steps" name="num_inference_steps" size="4" value="25"></td></tr>
|
||||
<tr class="pl-5"><td><label for="guidance_scale_slider">Guidance Scale:</label></td><td> <input id="guidance_scale_slider" name="guidance_scale_slider" class="editor-slider" value="75" type="range" min="10" max="500"> <input id="guidance_scale" name="guidance_scale" size="4"></td></tr>
|
||||
<tr id="prompt_strength_container" class="pl-5"><td><label for="prompt_strength_slider">Prompt Strength:</label></td><td> <input id="prompt_strength_slider" name="prompt_strength_slider" class="editor-slider" value="80" type="range" min="0" max="99"> <input id="prompt_strength" name="prompt_strength" size="4"><br/></td></tr></span>
|
||||
<tr class="pl-5"><td><label for="output_format">Output Format:</label></td><td>
|
||||
<select id="output_format" name="output_format">
|
||||
<option value="jpeg" selected>jpeg</option>
|
||||
<option value="png">png</option>
|
||||
</select>
|
||||
</li>
|
||||
|
||||
<br/>
|
||||
|
||||
<li><b class="settings-subheader">Prompt Settings</b></li>
|
||||
<li class="pl-5"><label for="negative_prompt">Negative Prompt:</label> <input id="negative_prompt" name="negative_prompt" size="55"></li>
|
||||
</td></tr>
|
||||
</li></table>
|
||||
|
||||
<br/>
|
||||
|
||||
<li><b class="settings-subheader">Render Settings</b></li>
|
||||
<li class="pl-5"><input id="stream_image_progress" name="stream_image_progress" type="checkbox"> <label for="stream_image_progress">Show a live preview of the image <small>(uses more VRAM, slightly slower image creation)</small></label></li>
|
||||
<li class="pl-5"><input id="stream_image_progress" name="stream_image_progress" type="checkbox"> <label for="stream_image_progress">Show a live preview <small>(uses more VRAM, slightly slower image creation)</small></label></li>
|
||||
<li class="pl-5"><input id="use_face_correction" name="use_face_correction" type="checkbox" checked> <label for="use_face_correction">Fix incorrect faces and eyes <small>(uses GFPGAN)</small></label></li>
|
||||
<li class="pl-5">
|
||||
<input id="use_upscale" name="use_upscale" type="checkbox"> <label for="use_upscale">Upscale the image to 4x resolution using </label>
|
||||
<input id="use_upscale" name="use_upscale" type="checkbox"> <label for="use_upscale">Upscale image by 4x with </label>
|
||||
<select id="upscale_model" name="upscale_model">
|
||||
<option value="RealESRGAN_x4plus" selected>RealESRGAN_x4plus</option>
|
||||
<option value="RealESRGAN_x4plus_anime_6B">RealESRGAN_x4plus_anime_6B</option>
|
||||
@ -212,6 +225,17 @@
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div id="save-settings-config" style="display:none">
|
||||
<div>
|
||||
<span id="save-settings-config-close-btn">X</span>
|
||||
<h1>Save Settings Configuration</h1>
|
||||
<p>Select which settings should be saved and reloaded when restarting the browser</p>
|
||||
<table id="save-settings-config-table">
|
||||
<tr><th>Setting</th><th></th><th>Default value</th></tr>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="line-separator"> </div>
|
||||
|
||||
<div id="footer" class="panel-box">
|
||||
@ -226,13 +250,15 @@
|
||||
</div>
|
||||
</body>
|
||||
|
||||
<script src="media/main.js?v=32"></script>
|
||||
<script src="media/auto-save.js?v=2"></script>
|
||||
<script src="media/main.js?v=37"></script>
|
||||
<script>
|
||||
async function init() {
|
||||
await loadModifiers()
|
||||
await getDiskPath()
|
||||
await getAppConfig()
|
||||
await getModels()
|
||||
await initSettings()
|
||||
|
||||
setInterval(healthCheck, HEALTH_PING_INTERVAL * 1000)
|
||||
healthCheck()
|
||||
|
158
ui/media/auto-save.js
Normal file
158
ui/media/auto-save.js
Normal file
@ -0,0 +1,158 @@
|
||||
// Saving settings
|
||||
let saveSettingsCheckbox = document.getElementById("auto_save_settings")
|
||||
let saveSettingsConfigTable = document.getElementById("save-settings-config-table")
|
||||
let saveSettingsConfigOverlay = document.getElementById("save-settings-config")
|
||||
|
||||
const SETTINGS_KEY = "user_settings"
|
||||
var SETTINGS_SHOULD_SAVE_MAP = {} // key=id. dict initialized in initSettings
|
||||
var SETTINGS_VALUES = {} // key=id. dict initialized in initSettings
|
||||
var SETTINGS_DEFAULTS = {} // key=id. dict initialized in initSettings
|
||||
var SETTINGS_TO_SAVE = [] // list of elements initialized by initSettings
|
||||
var SETTINGS_IDS_LIST = [
|
||||
"seed",
|
||||
"random_seed",
|
||||
"num_outputs_total",
|
||||
"num_outputs_parallel",
|
||||
"stable_diffusion_model",
|
||||
"sampler",
|
||||
"width",
|
||||
"height",
|
||||
"num_inference_steps",
|
||||
"guidance_scale_slider",
|
||||
"prompt_strength_slider",
|
||||
"output_format",
|
||||
"negative_prompt",
|
||||
"stream_image_progress",
|
||||
"use_face_correction",
|
||||
"use_upscale",
|
||||
"show_only_filtered_image",
|
||||
"upscale_model",
|
||||
"preview-image",
|
||||
"modifier-card-size-slider",
|
||||
"theme"
|
||||
]
|
||||
|
||||
async function initSettings() {
|
||||
SETTINGS_IDS_LIST.forEach(id => SETTINGS_TO_SAVE.push(document.getElementById(id)))
|
||||
SETTINGS_TO_SAVE.forEach(element => {
|
||||
SETTINGS_SHOULD_SAVE_MAP[element.id] = true
|
||||
SETTINGS_DEFAULTS[element.id] = getSetting(element)
|
||||
SETTINGS_VALUES[element.id] = getSetting(element)
|
||||
element.addEventListener("input", settingChangeHandler)
|
||||
element.addEventListener("change", settingChangeHandler)
|
||||
})
|
||||
loadSettings()
|
||||
fillSaveSettingsConfigTable()
|
||||
}
|
||||
|
||||
function getSetting(element) {
|
||||
if (element.type == "checkbox") {
|
||||
return element.checked
|
||||
}
|
||||
return element.value
|
||||
}
|
||||
function setSetting(element, value) {
|
||||
if (getSetting(element) == value) {
|
||||
return // no setting necessary
|
||||
}
|
||||
if (element.type == "checkbox") {
|
||||
element.checked = value
|
||||
}
|
||||
else {
|
||||
element.value = value
|
||||
}
|
||||
element.dispatchEvent(new Event("input"))
|
||||
element.dispatchEvent(new Event("change"))
|
||||
}
|
||||
|
||||
function saveSettings() {
|
||||
localStorage.setItem(SETTINGS_KEY, JSON.stringify({
|
||||
values: SETTINGS_VALUES,
|
||||
should_save: SETTINGS_SHOULD_SAVE_MAP
|
||||
}))
|
||||
}
|
||||
|
||||
|
||||
var CURRENTLY_LOADING_SETTINGS = false
|
||||
function loadSettings() {
|
||||
if (!saveSettingsCheckbox.checked) {
|
||||
return
|
||||
}
|
||||
var saved_settings = JSON.parse(localStorage.getItem(SETTINGS_KEY))
|
||||
if (saved_settings) {
|
||||
var values = saved_settings.values
|
||||
var should_save = saved_settings.should_save
|
||||
CURRENTLY_LOADING_SETTINGS = true
|
||||
SETTINGS_TO_SAVE.forEach(element => {
|
||||
if (element.id in values) {
|
||||
SETTINGS_SHOULD_SAVE_MAP[element.id] = should_save[element.id]
|
||||
SETTINGS_VALUES[element.id] = values[element.id]
|
||||
if (SETTINGS_SHOULD_SAVE_MAP[element.id]) {
|
||||
setSetting(element, SETTINGS_VALUES[element.id])
|
||||
}
|
||||
}
|
||||
})
|
||||
CURRENTLY_LOADING_SETTINGS = false
|
||||
}
|
||||
else {
|
||||
saveSettings()
|
||||
}
|
||||
}
|
||||
|
||||
document.querySelector('#restoreDefaultSettingsBtn').addEventListener('click', loadDefaultSettings)
|
||||
function loadDefaultSettings() {
|
||||
CURRENTLY_LOADING_SETTINGS = true
|
||||
SETTINGS_TO_SAVE.forEach(element => {
|
||||
SETTINGS_VALUES[element.id] = SETTINGS_DEFAULTS[element.id]
|
||||
setSetting(element, SETTINGS_VALUES[element.id])
|
||||
})
|
||||
CURRENTLY_LOADING_SETTINGS = false
|
||||
saveSettings()
|
||||
}
|
||||
|
||||
function settingChangeHandler(event) {
|
||||
if (!CURRENTLY_LOADING_SETTINGS) {
|
||||
var element = event.target
|
||||
var value = getSetting(element)
|
||||
if (value != SETTINGS_VALUES[element.id]) {
|
||||
SETTINGS_VALUES[element.id] = value
|
||||
saveSettings()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function fillSaveSettingsConfigTable() {
|
||||
SETTINGS_TO_SAVE.forEach(element => {
|
||||
var caption = element.id
|
||||
var label = document.querySelector(`label[for='${element.id}']`)
|
||||
if (label) {
|
||||
caption = label.innerText
|
||||
var truncate_length = 25
|
||||
if (caption.length > truncate_length) {
|
||||
caption = caption.substring(0, truncate_length - 3) + "..."
|
||||
}
|
||||
}
|
||||
var default_value = SETTINGS_DEFAULTS[element.id]
|
||||
var checkbox_id = `shouldsave_${element.id}`
|
||||
var is_checked = SETTINGS_SHOULD_SAVE_MAP[element.id] ? "checked" : ""
|
||||
var newrow = `<tr><td><label for="${checkbox_id}">${caption}</label></td><td><input id="${checkbox_id}" name="${checkbox_id}" ${is_checked} type="checkbox" ></td><td><small>(${default_value})</small></td></tr>`
|
||||
saveSettingsConfigTable.insertAdjacentHTML("beforeend", newrow)
|
||||
var checkbox = document.getElementById(checkbox_id)
|
||||
checkbox.addEventListener("input", event => {
|
||||
SETTINGS_SHOULD_SAVE_MAP[element.id] = checkbox.checked
|
||||
saveSettings()
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
document.getElementById("save-settings-config-close-btn").addEventListener('click', () => {
|
||||
saveSettingsConfigOverlay.style.display = 'none'
|
||||
})
|
||||
document.getElementById("configureSettingsSaveBtn").addEventListener('click', () => {
|
||||
saveSettingsConfigOverlay.style.display = 'block'
|
||||
})
|
||||
saveSettingsConfigOverlay.addEventListener('click', (event) => {
|
||||
if (event.target.id == saveSettingsConfigOverlay.id) {
|
||||
saveSettingsConfigOverlay.style.display = 'none'
|
||||
}
|
||||
})
|
@ -1,8 +1,161 @@
|
||||
:root {
|
||||
--background-color1: rgb(32, 33, 36); /* main parts of the page */
|
||||
--background-color2: rgb(44, 45, 48); /* main panels */
|
||||
--background-color3: rgb(47, 49, 53);
|
||||
--background-color4: rgb(18, 18, 19); /* settings dropdowns */
|
||||
|
||||
--accent-hue: 266;
|
||||
--accent-lightness: 36%;
|
||||
--accent-lightness-hover: 40%;
|
||||
|
||||
--text-color: #eee;
|
||||
|
||||
--input-text-color: black;
|
||||
--input-background-color: #e9e9ed;
|
||||
--input-border-color: #8f8f9d;
|
||||
|
||||
--button-text-color: var(--input-text-color);
|
||||
--button-color: #e9e9ed;
|
||||
--button-border: 1px solid #8f8f9d;
|
||||
|
||||
/* other */
|
||||
--input-border-radius: 4px;
|
||||
--input-border-size: 1px;
|
||||
--accent-color: hsl(var(--accent-hue), 100%, var(--accent-lightness));
|
||||
--accent-color-hover: hsl(var(--accent-hue), 100%, var(--accent-lightness-hover));
|
||||
--make-image-border: 2px solid hsl(var(--accent-hue), 100%, calc(var(--accent-lightness) - 21%));
|
||||
}
|
||||
|
||||
.theme-light {
|
||||
--background-color1: white;
|
||||
--background-color2: #dddddd;
|
||||
--background-color3: #e7e9eb;
|
||||
--background-color4: #cccccc;
|
||||
|
||||
--text-color: black;
|
||||
|
||||
--input-text-color: black;
|
||||
--input-background-color: #f8f9fa;
|
||||
--input-border-color: grey;
|
||||
}
|
||||
|
||||
.theme-discord {
|
||||
--background-color1: #36393f;
|
||||
--background-color2: #2f3136;
|
||||
--background-color3: #292b2f;
|
||||
--background-color4: #202225;
|
||||
|
||||
--accent-hue: 235;
|
||||
--accent-lightness: 65%;
|
||||
--make-image-border: none;
|
||||
|
||||
--button-color: var(--accent-color);
|
||||
--button-border: none;
|
||||
|
||||
--input-text-color: #ccc;
|
||||
--input-border-size: 2px;
|
||||
--input-background-color: #202225;
|
||||
--input-border-color: var(--input-background-color);
|
||||
}
|
||||
|
||||
.theme-cool-blue {
|
||||
--main-hue: 222;
|
||||
--main-saturation: 18%;
|
||||
--value-base: 19%;
|
||||
--value-step: 3%;
|
||||
--background-color1: hsl(var(--main-hue), var(--main-saturation), var(--value-base));
|
||||
--background-color2: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) - (1 * var(--value-step))));
|
||||
--background-color3: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) - (2 * var(--value-step))));
|
||||
--background-color4: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) - (3 * var(--value-step))));
|
||||
|
||||
--accent-hue: 212;
|
||||
--make-image-border: none;
|
||||
|
||||
--button-color: var(--accent-color);
|
||||
--button-border: none;
|
||||
|
||||
--input-border-size: 1px;
|
||||
--input-background-color: var(--background-color3);
|
||||
--input-text-color: #ccc;
|
||||
--input-border-color: var(--background-color4);
|
||||
}
|
||||
|
||||
|
||||
.theme-blurple {
|
||||
--main-hue: 235;
|
||||
--main-saturation: 18%;
|
||||
--value-base: 16%;
|
||||
--value-step: 3%;
|
||||
--background-color1: hsl(var(--main-hue), var(--main-saturation), var(--value-base));
|
||||
--background-color2: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) - (1 * var(--value-step))));
|
||||
--background-color3: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) - (2 * var(--value-step))));
|
||||
--background-color4: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) - (3 * var(--value-step))));
|
||||
|
||||
--make-image-border: none;
|
||||
|
||||
--button-color: var(--accent-color);
|
||||
--button-border: none;
|
||||
|
||||
--input-border-size: 1px;
|
||||
--input-background-color: var(--background-color3);
|
||||
--input-text-color: #ccc;
|
||||
--input-border-color: var(--background-color4);
|
||||
}
|
||||
|
||||
.theme-super-dark {
|
||||
--main-hue: 222;
|
||||
--main-saturation: 18%;
|
||||
--value-base: 5%;
|
||||
--value-step: 5%;
|
||||
--background-color1: hsl(var(--main-hue), var(--main-saturation), var(--value-base));
|
||||
--background-color2: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) + (1 * var(--value-step))));
|
||||
--background-color3: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) + (2 * var(--value-step))));
|
||||
--background-color4: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) + (3 * var(--value-step))));
|
||||
|
||||
--make-image-border: none;
|
||||
|
||||
--button-color: var(--accent-color);
|
||||
--button-border: none;
|
||||
|
||||
--input-border-size: 0px;
|
||||
--input-background-color: var(--background-color3);
|
||||
--input-text-color: #ccc;
|
||||
--input-border-color: var(--background-color4);
|
||||
}
|
||||
|
||||
.theme-wild {
|
||||
--main-hue: 128;
|
||||
--main-saturation: 18%;
|
||||
--value-base: 20%;
|
||||
--value-step: 5%;
|
||||
--background-color1: hsl(var(--main-hue), var(--main-saturation), var(--value-base));
|
||||
--background-color2: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) - (1 * var(--value-step))));
|
||||
--background-color3: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) - (2 * var(--value-step))));
|
||||
--background-color4: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) - (3 * var(--value-step))));
|
||||
|
||||
--accent-hue: 212;
|
||||
--make-image-border: none;
|
||||
|
||||
--button-color: var(--accent-color);
|
||||
--button-border: none;
|
||||
|
||||
--input-border-size: 1px;
|
||||
--input-background-color: hsl(222, var(--main-saturation), calc(var(--value-base) - (2 * var(--value-step))));
|
||||
--input-text-color: red;
|
||||
--input-border-color: green;
|
||||
}
|
||||
|
||||
|
||||
* {
|
||||
font-family: Work Sans, Verdana, Geneva, sans-serif;
|
||||
box-sizing: border-box;
|
||||
transition: background 0.5s, color 0.5s, background-color 0.5s;
|
||||
}
|
||||
|
||||
body {
|
||||
font-family: Arial, Helvetica, sans-serif;
|
||||
font-size: 11pt;
|
||||
background-color: rgb(32, 33, 36);
|
||||
color: #eee;
|
||||
background-color: var(--background-color1);
|
||||
color: var(--text-color);
|
||||
}
|
||||
a {
|
||||
color: rgb(0, 102, 204);
|
||||
@ -16,12 +169,8 @@ label {
|
||||
#prompt {
|
||||
width: 100%;
|
||||
height: 65pt;
|
||||
box-sizing: border-box;
|
||||
}
|
||||
@media screen and (max-width: 600px) {
|
||||
#prompt {
|
||||
width: 95%;
|
||||
}
|
||||
font-size: 13px;
|
||||
margin-bottom: 6px;
|
||||
}
|
||||
.image_preview_container {
|
||||
/* display: none; */
|
||||
@ -29,7 +178,7 @@ label {
|
||||
}
|
||||
.image_clear_btn {
|
||||
position: absolute;
|
||||
transform: translateX(-50%) translateY(-35%);
|
||||
transform: translate(30%, -30%);
|
||||
background: black;
|
||||
color: white;
|
||||
border: 2pt solid #ccc;
|
||||
@ -41,6 +190,8 @@ label {
|
||||
height: 16pt;
|
||||
font-family: Verdana;
|
||||
font-size: 8pt;
|
||||
top: 0px;
|
||||
right: 0px;
|
||||
}
|
||||
.settings-box ul {
|
||||
font-size: 9pt;
|
||||
@ -71,7 +222,7 @@ label {
|
||||
}
|
||||
.imgSeedLabel {
|
||||
font-size: 0.8em;
|
||||
background-color: rgb(44, 45, 48);
|
||||
background-color: var(--background-color2);
|
||||
border-radius: 3px;
|
||||
padding: 5px;
|
||||
}
|
||||
@ -101,7 +252,7 @@ label {
|
||||
margin-bottom: 7px;
|
||||
}
|
||||
#container {
|
||||
width: 90%;
|
||||
width: 95%;
|
||||
margin-left: auto;
|
||||
margin-right: auto;
|
||||
}
|
||||
@ -121,6 +272,7 @@ label {
|
||||
}
|
||||
.settings-box label small {
|
||||
color: rgb(153, 153, 153);
|
||||
margin-right: 10px;
|
||||
}
|
||||
#preview {
|
||||
padding: 5px;
|
||||
@ -143,14 +295,14 @@ label {
|
||||
}
|
||||
#makeImage {
|
||||
flex: 0 0 70px;
|
||||
background: rgb(80, 0, 185);
|
||||
border: 2px solid rgb(40, 0, 78);
|
||||
background: var(--accent-color);
|
||||
border: var(--make-image-border);
|
||||
color: rgb(255, 221, 255);
|
||||
width: 100%;
|
||||
height: 30pt;
|
||||
}
|
||||
#makeImage:hover {
|
||||
background: rgb(93, 0, 214);
|
||||
background: hsl(var(--accent-hue), 100%, calc(var(--accent-lightness) + 6%));
|
||||
}
|
||||
#stopImage {
|
||||
flex: 0 0 70px;
|
||||
@ -167,12 +319,13 @@ label {
|
||||
}
|
||||
.flex-container {
|
||||
display: flex;
|
||||
width: 100%;
|
||||
}
|
||||
.col-50 {
|
||||
flex: 50%;
|
||||
}
|
||||
.col-fixed-10 {
|
||||
flex: 0 0 380pt;
|
||||
flex: 0 0 350pt;
|
||||
}
|
||||
.col-free {
|
||||
flex: 1;
|
||||
@ -194,8 +347,8 @@ label {
|
||||
padding-right: 5px;
|
||||
}
|
||||
.panel-box {
|
||||
background: rgb(44, 45, 48);
|
||||
border: 1px solid rgb(47, 49, 53);
|
||||
background: var(--background-color2);
|
||||
border: 1px solid var(--background-color3);
|
||||
border-radius: 7px;
|
||||
padding: 5px;
|
||||
margin-bottom: 15px;
|
||||
@ -234,18 +387,18 @@ img {
|
||||
height: 8pt;
|
||||
border-radius: 4pt; */
|
||||
font-size: 14pt;
|
||||
color: rgb(128, 87, 0);
|
||||
color: rgb(200, 139, 0);
|
||||
/* background-color: rgb(197, 1, 1); */
|
||||
/* transform: translateY(15%); */
|
||||
display: inline;
|
||||
}
|
||||
#server-status-msg {
|
||||
color: rgb(128, 87, 0);
|
||||
color: rgb(200, 139, 0);
|
||||
padding-left: 2pt;
|
||||
font-size: 10pt;
|
||||
}
|
||||
.preview-prompt {
|
||||
font-size: 16pt;
|
||||
font-size: 13pt;
|
||||
margin-bottom: 10pt;
|
||||
}
|
||||
#coffeeButton {
|
||||
@ -261,6 +414,9 @@ img {
|
||||
.drawing-board-canvas-wrapper {
|
||||
background-size: 100% 100%;
|
||||
}
|
||||
.drawing-board-controls {
|
||||
min-width: 273px;
|
||||
}
|
||||
.drawing-board-control > button {
|
||||
background-color: #eee;
|
||||
border-radius: 3pt;
|
||||
@ -343,7 +499,7 @@ img {
|
||||
padding-right: 2pt;
|
||||
}
|
||||
#community-links li a {
|
||||
color: white;
|
||||
color: var(--text-color);
|
||||
text-decoration: none;
|
||||
}
|
||||
.dropdown {
|
||||
@ -354,8 +510,8 @@ img {
|
||||
position: absolute;
|
||||
z-index: 2;
|
||||
|
||||
background: rgb(18, 18, 19);
|
||||
border: 2px solid rgb(37, 38, 41);
|
||||
background: var(--background-color4);
|
||||
border: 2px solid var(--background-color2);
|
||||
border-radius: 7px;
|
||||
padding: 5px;
|
||||
margin-bottom: 15px;
|
||||
@ -366,7 +522,7 @@ img {
|
||||
}
|
||||
|
||||
.imageTaskContainer {
|
||||
border: 1px solid #333;
|
||||
border: 1px solid var(--background-color2);
|
||||
margin-bottom: 10pt;
|
||||
padding: 5pt;
|
||||
border-radius: 5pt;
|
||||
@ -375,7 +531,7 @@ img {
|
||||
.taskStatusLabel {
|
||||
float: left;
|
||||
font-size: 8pt;
|
||||
background:rgb(44, 45, 48);
|
||||
background:var(--background-color2);
|
||||
border: 1px solid rgb(61, 62, 66);
|
||||
padding: 2pt 4pt;
|
||||
border-radius: 2pt;
|
||||
@ -384,7 +540,12 @@ img {
|
||||
.activeTaskLabel {
|
||||
background:rgb(0, 90, 30);
|
||||
border: 1px solid rgb(0, 75, 19);
|
||||
color:rgb(204, 255, 217)
|
||||
color:rgb(222, 253, 230)
|
||||
}
|
||||
.waitingTaskLabel {
|
||||
background:rgb(128, 89, 0);
|
||||
border: 1px solid rgb(107, 75, 0);
|
||||
color:rgb(255, 242, 211)
|
||||
}
|
||||
.secondaryButton {
|
||||
background: rgb(132, 8, 0);
|
||||
@ -413,4 +574,218 @@ img {
|
||||
}
|
||||
#prompt_from_file {
|
||||
display: none;
|
||||
}
|
||||
#init_image_preview {
|
||||
max-width: 150px;
|
||||
max-height: 150px;
|
||||
object-fit: contain;
|
||||
border-radius: 6px;
|
||||
transition: all 1s ease-in-out;
|
||||
}
|
||||
|
||||
#init_image_preview:hover {
|
||||
max-width: 500px;
|
||||
max-height: 1000px;
|
||||
transition: all 1s 0.5s ease-in-out;
|
||||
}
|
||||
|
||||
#init_image_wrapper {
|
||||
position: relative;
|
||||
width: fit-content;
|
||||
}
|
||||
|
||||
#init_image_size_box {
|
||||
position: absolute;
|
||||
right: 0px;
|
||||
bottom: 3px;
|
||||
padding: 3px;
|
||||
background: black;
|
||||
color: white;
|
||||
text-shadow: 0px 0px 4px black;
|
||||
opacity: 60%;
|
||||
font-size: 12px;
|
||||
border-radius: 6px 0px;
|
||||
}
|
||||
|
||||
#editor-settings-entries table td {
|
||||
padding: 0px;
|
||||
line-height: 28px;
|
||||
}
|
||||
|
||||
#editor-settings-entries table td:first-child {
|
||||
float: right;
|
||||
padding-right: 4px;
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
#negative_prompt {
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
button,
|
||||
input[type="file"],
|
||||
input[type="checkbox"],
|
||||
select,
|
||||
option {
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
input,
|
||||
select,
|
||||
textarea {
|
||||
border-radius: var(--input-border-radius);
|
||||
padding: 4px;
|
||||
accent-color: var(--accent-color);
|
||||
background: var(--input-background-color);
|
||||
border: var(--input-border-size) solid var(--input-border-color);
|
||||
color: var(--input-text-color);
|
||||
font-size: 9pt;
|
||||
}
|
||||
|
||||
input:hover {
|
||||
accent-color: var(--accent-color-hover);
|
||||
}
|
||||
|
||||
input {
|
||||
padding: 4px 6px;
|
||||
}
|
||||
|
||||
input:focus,
|
||||
select:focus,
|
||||
textarea:focus {
|
||||
outline: 2px solid var(--accent-color);
|
||||
}
|
||||
|
||||
input[disabled],
|
||||
select[disabled],
|
||||
textarea[disabled] {
|
||||
opacity: 0.5;
|
||||
}
|
||||
|
||||
input[type="file"] {
|
||||
width: 100%;
|
||||
padding: 2px;
|
||||
}
|
||||
|
||||
button,
|
||||
input::file-selector-button {
|
||||
padding: 2px 4px;
|
||||
border-radius: 4px;
|
||||
background: var(--button-color);
|
||||
color: var(--button-text-color);
|
||||
border: var(--button-border);
|
||||
}
|
||||
|
||||
input::file-selector-button {
|
||||
padding: 0px 4px;
|
||||
height: 19px;
|
||||
}
|
||||
|
||||
/* input[type="range"] {
|
||||
-webkit-appearance: none;
|
||||
appearance: none;
|
||||
margin: 0px;
|
||||
padding: 0px;
|
||||
cursor: pointer;
|
||||
line-height: 29px;
|
||||
}
|
||||
|
||||
input[type="range"]::-webkit-slider-thumb,
|
||||
input[type="range"]::-moz-range-thumb {
|
||||
-webkit-appearance: none;
|
||||
appearance: none;
|
||||
width: 24px;
|
||||
height: 24px;
|
||||
border-radius: 12px;
|
||||
} */
|
||||
|
||||
@media screen and (max-width: 700px) {
|
||||
body {
|
||||
margin: 0px;
|
||||
}
|
||||
#container {
|
||||
margin: 0px;
|
||||
padding: 10px
|
||||
}
|
||||
.flex-container {
|
||||
flex-direction: column;
|
||||
}
|
||||
#preview {
|
||||
margin: 0px;
|
||||
padding: 0px;
|
||||
}
|
||||
#preview .collapsible-content {
|
||||
padding: 0px;
|
||||
}
|
||||
#preview .collapsible-content {
|
||||
padding: 0px;
|
||||
}
|
||||
.imgItem {
|
||||
margin-right: 0px;
|
||||
}
|
||||
.imgItem img {
|
||||
height: 100%;
|
||||
width: 100%;
|
||||
object-fit: contain;
|
||||
}
|
||||
.dropdown-content {
|
||||
width: auto !important;
|
||||
transform: none !important;
|
||||
left: 0px;
|
||||
right: 0px;
|
||||
}
|
||||
}
|
||||
|
||||
/* Auto-Settings Styling */
|
||||
#auto_save_settings:not(:checked) ~ button {
|
||||
display: none;
|
||||
}
|
||||
|
||||
#save-settings-config {
|
||||
position: fixed;
|
||||
background: rgba(32, 33, 36, 50%);
|
||||
top: 0px;
|
||||
left: 0px;
|
||||
width: 100vw;
|
||||
height: 100vh;
|
||||
z-index: 1000;
|
||||
}
|
||||
|
||||
#save-settings-config > div {
|
||||
background: var(--background-color2);
|
||||
max-width: 600px;
|
||||
margin: auto;
|
||||
margin-top: 100px;
|
||||
border-radius: 6px;
|
||||
padding: 30px;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
#save-settings-config-table {
|
||||
margin: auto;
|
||||
}
|
||||
|
||||
#save-settings-config-table td:first-child,
|
||||
#save-settings-config-table th:first-child {
|
||||
float: right;
|
||||
}
|
||||
|
||||
#save-settings-config-table td:last-child,
|
||||
#save-settings-config-table th:last-child {
|
||||
float: left;
|
||||
}
|
||||
|
||||
#save-settings-config-table td small {
|
||||
color: rgb(153, 153, 153);
|
||||
}
|
||||
|
||||
#save-settings-config-close-btn {
|
||||
float: right;
|
||||
cursor: pointer;
|
||||
padding: 10px;
|
||||
transform: translate(50%, -50%) scaleX(130%);
|
||||
}
|
||||
|
||||
#promptsFromFileBtn {
|
||||
font-size: 9pt;
|
||||
}
|
963
ui/media/main.js
963
ui/media/main.js
File diff suppressed because it is too large
Load Diff
@ -197,6 +197,34 @@ def load_model_real_esrgan(real_esrgan_to_use):
|
||||
|
||||
print('loaded ', real_esrgan_to_use, 'to', device, 'precision', precision)
|
||||
|
||||
def get_base_path(disk_path, session_id, prompt, img_id, ext, suffix=None):
|
||||
if disk_path is None: return None
|
||||
if session_id is None: return None
|
||||
if ext is None: raise Exception('Missing ext')
|
||||
|
||||
session_out_path = os.path.join(disk_path, session_id)
|
||||
os.makedirs(session_out_path, exist_ok=True)
|
||||
|
||||
prompt_flattened = filename_regex.sub('_', prompt)[:50]
|
||||
|
||||
if suffix is not None:
|
||||
return os.path.join(session_out_path, f"{prompt_flattened}_{img_id}_{suffix}.{ext}")
|
||||
return os.path.join(session_out_path, f"{prompt_flattened}_{img_id}.{ext}")
|
||||
|
||||
def apply_filters(filter_name, image_data):
|
||||
print(f'Applying filter {filter_name}...')
|
||||
gc()
|
||||
|
||||
if filter_name == 'gfpgan':
|
||||
_, _, output = model_gfpgan.enhance(image_data[:,:,::-1], has_aligned=False, only_center_face=False, paste_back=True)
|
||||
image_data = output[:,:,::-1]
|
||||
|
||||
if filter_name == 'real_esrgan':
|
||||
output, _ = model_real_esrgan.enhance(image_data[:,:,::-1])
|
||||
image_data = output[:,:,::-1]
|
||||
|
||||
return image_data
|
||||
|
||||
def mk_img(req: Request):
|
||||
try:
|
||||
yield from do_mk_img(req)
|
||||
@ -283,23 +311,13 @@ def do_mk_img(req: Request):
|
||||
|
||||
opt_prompt = req.prompt
|
||||
opt_seed = req.seed
|
||||
opt_n_samples = req.num_outputs
|
||||
opt_n_iter = 1
|
||||
opt_scale = req.guidance_scale
|
||||
opt_C = 4
|
||||
opt_H = req.height
|
||||
opt_W = req.width
|
||||
opt_f = 8
|
||||
opt_ddim_steps = req.num_inference_steps
|
||||
opt_ddim_eta = 0.0
|
||||
opt_strength = req.prompt_strength
|
||||
opt_save_to_disk_path = req.save_to_disk_path
|
||||
opt_init_img = req.init_image
|
||||
opt_use_face_correction = req.use_face_correction
|
||||
opt_use_upscale = req.use_upscale
|
||||
opt_show_only_filtered = req.show_only_filtered_image
|
||||
opt_format = req.output_format
|
||||
opt_sampler_name = req.sampler
|
||||
img_id = base64.b64encode(int(time.time()).to_bytes(8, 'big')).decode() # Generate unique ID based on time.
|
||||
img_id = img_id.translate({43:None, 47:None, 61:None})[-8:] # Remove + / = and keep last 8 chars.
|
||||
|
||||
print(req.to_string(), '\n device', device)
|
||||
|
||||
@ -307,7 +325,7 @@ def do_mk_img(req: Request):
|
||||
|
||||
seed_everything(opt_seed)
|
||||
|
||||
batch_size = opt_n_samples
|
||||
batch_size = req.num_outputs
|
||||
prompt = opt_prompt
|
||||
assert prompt is not None
|
||||
data = [batch_size * [prompt]]
|
||||
@ -327,7 +345,7 @@ def do_mk_img(req: Request):
|
||||
else:
|
||||
handler = _img2img
|
||||
|
||||
init_image = load_img(req.init_image, opt_W, opt_H)
|
||||
init_image = load_img(req.init_image, req.width, req.height)
|
||||
init_image = init_image.to(device)
|
||||
|
||||
if device != "cpu" and precision == "autocast":
|
||||
@ -339,7 +357,7 @@ def do_mk_img(req: Request):
|
||||
init_latent = modelFS.get_first_stage_encoding(modelFS.encode_first_stage(init_image)) # move to latent space
|
||||
|
||||
if req.mask is not None:
|
||||
mask = load_mask(req.mask, opt_W, opt_H, init_latent.shape[2], init_latent.shape[3], True).to(device)
|
||||
mask = load_mask(req.mask, req.width, req.height, init_latent.shape[2], init_latent.shape[3], True).to(device)
|
||||
mask = mask[0][0].unsqueeze(0).repeat(4, 1, 1).unsqueeze(0)
|
||||
mask = repeat(mask, '1 ... -> b ...', b=batch_size)
|
||||
|
||||
@ -348,12 +366,12 @@ def do_mk_img(req: Request):
|
||||
|
||||
move_fs_to_cpu()
|
||||
|
||||
assert 0. <= opt_strength <= 1., 'can only work with strength in [0.0, 1.0]'
|
||||
t_enc = int(opt_strength * opt_ddim_steps)
|
||||
assert 0. <= req.prompt_strength <= 1., 'can only work with strength in [0.0, 1.0]'
|
||||
t_enc = int(req.prompt_strength * req.num_inference_steps)
|
||||
print(f"target t_enc is {t_enc} steps")
|
||||
|
||||
if opt_save_to_disk_path is not None:
|
||||
session_out_path = os.path.join(opt_save_to_disk_path, req.session_id)
|
||||
if req.save_to_disk_path is not None:
|
||||
session_out_path = os.path.join(req.save_to_disk_path, req.session_id)
|
||||
os.makedirs(session_out_path, exist_ok=True)
|
||||
else:
|
||||
session_out_path = None
|
||||
@ -366,7 +384,7 @@ def do_mk_img(req: Request):
|
||||
with precision_scope("cuda"):
|
||||
modelCS.to(device)
|
||||
uc = None
|
||||
if opt_scale != 1.0:
|
||||
if req.guidance_scale != 1.0:
|
||||
uc = modelCS.get_learned_conditioning(batch_size * [req.negative_prompt])
|
||||
if isinstance(prompts, tuple):
|
||||
prompts = list(prompts)
|
||||
@ -387,14 +405,18 @@ def do_mk_img(req: Request):
|
||||
modelFS.to(device)
|
||||
|
||||
partial_x_samples = None
|
||||
last_callback_time = -1
|
||||
def img_callback(x_samples, i):
|
||||
nonlocal partial_x_samples
|
||||
nonlocal partial_x_samples, last_callback_time
|
||||
|
||||
partial_x_samples = x_samples
|
||||
|
||||
if req.stream_progress_updates:
|
||||
n_steps = opt_ddim_steps if req.init_image is None else t_enc
|
||||
progress = {"step": i, "total_steps": n_steps}
|
||||
n_steps = req.num_inference_steps if req.init_image is None else t_enc
|
||||
step_time = time.time() - last_callback_time if last_callback_time != -1 else -1
|
||||
last_callback_time = time.time()
|
||||
|
||||
progress = {"step": i, "total_steps": n_steps, "step_time": step_time}
|
||||
|
||||
if req.stream_image_progress and i % 5 == 0:
|
||||
partial_images = []
|
||||
@ -425,9 +447,9 @@ def do_mk_img(req: Request):
|
||||
# run the handler
|
||||
try:
|
||||
if handler == _txt2img:
|
||||
x_samples = _txt2img(opt_W, opt_H, opt_n_samples, opt_ddim_steps, opt_scale, None, opt_C, opt_f, opt_ddim_eta, c, uc, opt_seed, img_callback, mask, opt_sampler_name)
|
||||
x_samples = _txt2img(req.width, req.height, req.num_outputs, req.num_inference_steps, req.guidance_scale, None, opt_C, opt_f, opt_ddim_eta, c, uc, opt_seed, img_callback, mask, req.sampler)
|
||||
else:
|
||||
x_samples = _img2img(init_latent, t_enc, batch_size, opt_scale, c, uc, opt_ddim_steps, opt_ddim_eta, opt_seed, img_callback, mask)
|
||||
x_samples = _img2img(init_latent, t_enc, batch_size, req.guidance_scale, c, uc, req.num_inference_steps, opt_ddim_eta, opt_seed, img_callback, mask)
|
||||
|
||||
yield from x_samples
|
||||
|
||||
@ -447,69 +469,49 @@ def do_mk_img(req: Request):
|
||||
x_sample = x_sample.astype(np.uint8)
|
||||
img = Image.fromarray(x_sample)
|
||||
|
||||
has_filters = (opt_use_face_correction is not None and opt_use_face_correction.startswith('GFPGAN')) or \
|
||||
(opt_use_upscale is not None and opt_use_upscale.startswith('RealESRGAN'))
|
||||
has_filters = (req.use_face_correction is not None and req.use_face_correction.startswith('GFPGAN')) or \
|
||||
(req.use_upscale is not None and req.use_upscale.startswith('RealESRGAN'))
|
||||
|
||||
return_orig_img = not has_filters or not opt_show_only_filtered
|
||||
return_orig_img = not has_filters or not req.show_only_filtered_image
|
||||
|
||||
if stop_processing:
|
||||
return_orig_img = True
|
||||
|
||||
if opt_save_to_disk_path is not None:
|
||||
prompt_flattened = filename_regex.sub('_', prompts[0])
|
||||
prompt_flattened = prompt_flattened[:50]
|
||||
|
||||
img_id = str(uuid.uuid4())[-8:]
|
||||
|
||||
file_path = f"{prompt_flattened}_{img_id}"
|
||||
img_out_path = os.path.join(session_out_path, f"{file_path}.{opt_format}")
|
||||
meta_out_path = os.path.join(session_out_path, f"{file_path}.txt")
|
||||
|
||||
if req.save_to_disk_path is not None:
|
||||
if return_orig_img:
|
||||
img_out_path = get_base_path(req.save_to_disk_path, req.session_id, prompts[0], img_id, req.output_format)
|
||||
save_image(img, img_out_path)
|
||||
|
||||
save_metadata(meta_out_path, prompts, opt_seed, opt_W, opt_H, opt_ddim_steps, opt_scale, opt_strength, opt_use_face_correction, opt_use_upscale, opt_sampler_name, req.negative_prompt, ckpt_file)
|
||||
meta_out_path = get_base_path(req.save_to_disk_path, req.session_id, prompts[0], img_id, 'txt')
|
||||
save_metadata(meta_out_path, req, prompts[0], opt_seed)
|
||||
|
||||
if return_orig_img:
|
||||
img_data = img_to_base64_str(img, opt_format)
|
||||
img_data = img_to_base64_str(img, req.output_format)
|
||||
res_image_orig = ResponseImage(data=img_data, seed=opt_seed)
|
||||
res.images.append(res_image_orig)
|
||||
|
||||
if opt_save_to_disk_path is not None:
|
||||
if req.save_to_disk_path is not None:
|
||||
res_image_orig.path_abs = img_out_path
|
||||
|
||||
del img
|
||||
|
||||
if has_filters and not stop_processing:
|
||||
print('Applying filters..')
|
||||
|
||||
gc()
|
||||
filters_applied = []
|
||||
|
||||
if opt_use_face_correction:
|
||||
_, _, output = model_gfpgan.enhance(x_sample[:,:,::-1], has_aligned=False, only_center_face=False, paste_back=True)
|
||||
x_sample = output[:,:,::-1]
|
||||
filters_applied.append(opt_use_face_correction)
|
||||
|
||||
if opt_use_upscale:
|
||||
output, _ = model_real_esrgan.enhance(x_sample[:,:,::-1])
|
||||
x_sample = output[:,:,::-1]
|
||||
filters_applied.append(opt_use_upscale)
|
||||
|
||||
filtered_image = Image.fromarray(x_sample)
|
||||
|
||||
filtered_img_data = img_to_base64_str(filtered_image, opt_format)
|
||||
res_image_filtered = ResponseImage(data=filtered_img_data, seed=opt_seed)
|
||||
res.images.append(res_image_filtered)
|
||||
|
||||
filters_applied = "_".join(filters_applied)
|
||||
|
||||
if opt_save_to_disk_path is not None:
|
||||
filtered_img_out_path = os.path.join(session_out_path, f"{file_path}_{filters_applied}.{opt_format}")
|
||||
save_image(filtered_image, filtered_img_out_path)
|
||||
res_image_filtered.path_abs = filtered_img_out_path
|
||||
|
||||
del filtered_image
|
||||
if req.use_face_correction:
|
||||
x_sample = apply_filters('gfpgan', x_sample)
|
||||
filters_applied.append(req.use_face_correction)
|
||||
if req.use_upscale:
|
||||
x_sample = apply_filters('real_esrgan', x_sample)
|
||||
filters_applied.append(req.use_upscale)
|
||||
if (len(filters_applied) > 0):
|
||||
filtered_image = Image.fromarray(x_sample)
|
||||
filtered_img_data = img_to_base64_str(filtered_image, req.output_format)
|
||||
response_image = ResponseImage(data=filtered_img_data, seed=req.seed)
|
||||
res.images.append(response_image)
|
||||
if req.save_to_disk_path is not None:
|
||||
filtered_img_out_path = get_base_path(req.save_to_disk_path, req.session_id, prompts[0], img_id, req.output_format, "_".join(filters_applied))
|
||||
save_image(filtered_image, filtered_img_out_path)
|
||||
response_image.path_abs = filtered_img_out_path
|
||||
del filtered_image
|
||||
|
||||
seeds += str(opt_seed) + ","
|
||||
opt_seed += 1
|
||||
@ -529,11 +531,22 @@ def save_image(img, img_out_path):
|
||||
except:
|
||||
print('could not save the file', traceback.format_exc())
|
||||
|
||||
def save_metadata(meta_out_path, prompts, opt_seed, opt_W, opt_H, opt_ddim_steps, opt_scale, opt_prompt_strength, opt_correct_face, opt_upscale, sampler_name, negative_prompt, ckpt_file):
|
||||
metadata = f"{prompts[0]}\nWidth: {opt_W}\nHeight: {opt_H}\nSeed: {opt_seed}\nSteps: {opt_ddim_steps}\nGuidance Scale: {opt_scale}\nPrompt Strength: {opt_prompt_strength}\nUse Face Correction: {opt_correct_face}\nUse Upscaling: {opt_upscale}\nSampler: {sampler_name}\nNegative Prompt: {negative_prompt}\nStable Diffusion Model: {ckpt_file + '.ckpt'}"
|
||||
|
||||
def save_metadata(meta_out_path, req, prompt, opt_seed):
|
||||
metadata = f"""{prompt}
|
||||
Width: {req.width}
|
||||
Height: {req.height}
|
||||
Seed: {opt_seed}
|
||||
Steps: {req.num_inference_steps}
|
||||
Guidance Scale: {req.guidance_scale}
|
||||
Prompt Strength: {req.prompt_strength}
|
||||
Use Face Correction: {req.use_face_correction}
|
||||
Use Upscaling: {req.use_upscale}
|
||||
Sampler: {req.sampler}
|
||||
Negative Prompt: {req.negative_prompt}
|
||||
Stable Diffusion Model: {req.use_stable_diffusion_model + '.ckpt'}
|
||||
"""
|
||||
try:
|
||||
with open(meta_out_path, 'w') as f:
|
||||
with open(meta_out_path, 'w', encoding='utf-8') as f:
|
||||
f.write(metadata)
|
||||
except:
|
||||
print('could not save the file', traceback.format_exc())
|
||||
|
299
ui/sd_internal/task_manager.py
Normal file
299
ui/sd_internal/task_manager.py
Normal file
@ -0,0 +1,299 @@
|
||||
import json
|
||||
import traceback
|
||||
|
||||
TASK_TTL = 15 * 60 # Discard last session's task timeout
|
||||
|
||||
import queue, threading, time
|
||||
from typing import Any, Generator, Hashable, Optional, Union
|
||||
|
||||
from pydantic import BaseModel
|
||||
from sd_internal import Request, Response
|
||||
|
||||
class SymbolClass(type): # Print nicely formatted Symbol names.
|
||||
def __repr__(self): return self.__qualname__
|
||||
def __str__(self): return self.__name__
|
||||
class Symbol(metaclass=SymbolClass): pass
|
||||
|
||||
class ServerStates:
|
||||
class Init(Symbol): pass
|
||||
class LoadingModel(Symbol): pass
|
||||
class Online(Symbol): pass
|
||||
class Rendering(Symbol): pass
|
||||
class Unavailable(Symbol): pass
|
||||
|
||||
class RenderTask(): # Task with output queue and completion lock.
|
||||
def __init__(self, req: Request):
|
||||
self.request: Request = req # Initial Request
|
||||
self.response: Any = None # Copy of the last reponse
|
||||
self.temp_images:[] = [None] * req.num_outputs * (1 if req.show_only_filtered_image else 2)
|
||||
self.error: Exception = None
|
||||
self.lock: threading.Lock = threading.Lock() # Locks at task start and unlocks when task is completed
|
||||
self.buffer_queue: queue.Queue = queue.Queue() # Queue of JSON string segments
|
||||
async def read_buffer_generator(self):
|
||||
try:
|
||||
while not self.buffer_queue.empty():
|
||||
res = self.buffer_queue.get(block=False)
|
||||
self.buffer_queue.task_done()
|
||||
yield res
|
||||
except queue.Empty as e: yield
|
||||
|
||||
# defaults from https://huggingface.co/blog/stable_diffusion
|
||||
class ImageRequest(BaseModel):
|
||||
session_id: str = "session"
|
||||
prompt: str = ""
|
||||
negative_prompt: str = ""
|
||||
init_image: str = None # base64
|
||||
mask: str = None # base64
|
||||
num_outputs: int = 1
|
||||
num_inference_steps: int = 50
|
||||
guidance_scale: float = 7.5
|
||||
width: int = 512
|
||||
height: int = 512
|
||||
seed: int = 42
|
||||
prompt_strength: float = 0.8
|
||||
sampler: str = None # "ddim", "plms", "heun", "euler", "euler_a", "dpm2", "dpm2_a", "lms"
|
||||
# allow_nsfw: bool = False
|
||||
save_to_disk_path: str = None
|
||||
turbo: bool = True
|
||||
use_cpu: bool = False
|
||||
use_full_precision: bool = False
|
||||
use_face_correction: str = None # or "GFPGANv1.3"
|
||||
use_upscale: str = None # or "RealESRGAN_x4plus" or "RealESRGAN_x4plus_anime_6B"
|
||||
use_stable_diffusion_model: str = "sd-v1-4"
|
||||
show_only_filtered_image: bool = False
|
||||
output_format: str = "jpeg" # or "png"
|
||||
|
||||
stream_progress_updates: bool = False
|
||||
stream_image_progress: bool = False
|
||||
|
||||
# Temporary cache to allow to query tasks results for a short time after they are completed.
|
||||
class TaskCache():
|
||||
def __init__(self):
|
||||
self._base = dict()
|
||||
self._lock: threading.Lock = threading.RLock()
|
||||
def _get_ttl_time(self, ttl: int) -> int:
|
||||
return int(time.time()) + ttl
|
||||
def _is_expired(self, timestamp: int) -> bool:
|
||||
return int(time.time()) >= timestamp
|
||||
def clean(self) -> None:
|
||||
if not self._lock.acquire(blocking=True, timeout=10): raise Exception('TaskCache.clean failed to acquire lock within timeout.')
|
||||
try:
|
||||
# Create a list of expired keys to delete
|
||||
to_delete = []
|
||||
for key in self._base:
|
||||
ttl, _ = self._base[key]
|
||||
if self._is_expired(ttl):
|
||||
to_delete.append(key)
|
||||
# Remove Items
|
||||
for key in to_delete:
|
||||
del self._base[key]
|
||||
print(f'Session {key} expired. Data removed.')
|
||||
finally:
|
||||
self._lock.release()
|
||||
def clear(self) -> None:
|
||||
if not self._lock.acquire(blocking=True, timeout=10): raise Exception('TaskCache.clear failed to acquire lock within timeout.')
|
||||
try: self._base.clear()
|
||||
finally: self._lock.release()
|
||||
def delete(self, key: Hashable) -> bool:
|
||||
if not self._lock.acquire(blocking=True, timeout=10): raise Exception('TaskCache.delete failed to acquire lock within timeout.')
|
||||
try:
|
||||
if key not in self._base:
|
||||
return False
|
||||
del self._base[key]
|
||||
return True
|
||||
finally:
|
||||
self._lock.release()
|
||||
def keep(self, key: Hashable, ttl: int) -> bool:
|
||||
if not self._lock.acquire(blocking=True, timeout=10): raise Exception('TaskCache.keep failed to acquire lock within timeout.')
|
||||
try:
|
||||
if key in self._base:
|
||||
_, value = self._base.get(key)
|
||||
self._base[key] = (self._get_ttl_time(ttl), value)
|
||||
return True
|
||||
return False
|
||||
finally:
|
||||
self._lock.release()
|
||||
def put(self, key: Hashable, value: Any, ttl: int) -> bool:
|
||||
if not self._lock.acquire(blocking=True, timeout=10): raise Exception('TaskCache.put failed to acquire lock within timeout.')
|
||||
try:
|
||||
self._base[key] = (
|
||||
self._get_ttl_time(ttl), value
|
||||
)
|
||||
except Exception as e:
|
||||
print(str(e))
|
||||
print(traceback.format_exc())
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
finally:
|
||||
self._lock.release()
|
||||
def tryGet(self, key: Hashable) -> Any:
|
||||
if not self._lock.acquire(blocking=True, timeout=10): raise Exception('TaskCache.tryGet failed to acquire lock within timeout.')
|
||||
try:
|
||||
ttl, value = self._base.get(key, (None, None))
|
||||
if ttl is not None and self._is_expired(ttl):
|
||||
print(f'Session {key} expired. Discarding data.')
|
||||
self.delete(key)
|
||||
return None
|
||||
return value
|
||||
finally:
|
||||
self._lock.release()
|
||||
|
||||
current_state = ServerStates.Init
|
||||
current_state_error:Exception = None
|
||||
current_model_path = None
|
||||
tasks_queue = queue.Queue()
|
||||
task_cache = TaskCache()
|
||||
default_model_to_load = None
|
||||
|
||||
def preload_model(file_path=None):
|
||||
global current_state, current_state_error, current_model_path
|
||||
if file_path == None:
|
||||
file_path = default_model_to_load
|
||||
if file_path == current_model_path:
|
||||
return
|
||||
current_state = ServerStates.LoadingModel
|
||||
try:
|
||||
from . import runtime
|
||||
runtime.load_model_ckpt(ckpt_to_use=file_path)
|
||||
current_model_path = file_path
|
||||
current_state_error = None
|
||||
current_state = ServerStates.Online
|
||||
except Exception as e:
|
||||
current_model_path = None
|
||||
current_state_error = e
|
||||
current_state = ServerStates.Unavailable
|
||||
print(traceback.format_exc())
|
||||
|
||||
def thread_render():
|
||||
global current_state, current_state_error, current_model_path
|
||||
from . import runtime
|
||||
current_state = ServerStates.Online
|
||||
preload_model()
|
||||
while True:
|
||||
task_cache.clean()
|
||||
if isinstance(current_state_error, SystemExit):
|
||||
current_state = ServerStates.Unavailable
|
||||
return
|
||||
task = None
|
||||
try:
|
||||
task = tasks_queue.get(timeout=1)
|
||||
except queue.Empty as e:
|
||||
if isinstance(current_state_error, SystemExit):
|
||||
current_state = ServerStates.Unavailable
|
||||
return
|
||||
else: continue
|
||||
#if current_model_path != task.request.use_stable_diffusion_model:
|
||||
# preload_model(task.request.use_stable_diffusion_model)
|
||||
if current_state_error:
|
||||
task.error = current_state_error
|
||||
continue
|
||||
print(f'Session {task.request.session_id} starting task {id(task)}')
|
||||
try:
|
||||
task.lock.acquire(blocking=False)
|
||||
res = runtime.mk_img(task.request)
|
||||
if current_model_path == task.request.use_stable_diffusion_model:
|
||||
current_state = ServerStates.Rendering
|
||||
else:
|
||||
current_state = ServerStates.LoadingModel
|
||||
except Exception as e:
|
||||
task.error = e
|
||||
task.lock.release()
|
||||
tasks_queue.task_done()
|
||||
print(traceback.format_exc())
|
||||
continue
|
||||
dataQueue = None
|
||||
if task.request.stream_progress_updates:
|
||||
dataQueue = task.buffer_queue
|
||||
for result in res:
|
||||
if current_state == ServerStates.LoadingModel:
|
||||
current_state = ServerStates.Rendering
|
||||
current_model_path = task.request.use_stable_diffusion_model
|
||||
if isinstance(current_state_error, SystemExit) or isinstance(current_state_error, StopAsyncIteration) or isinstance(task.error, StopAsyncIteration):
|
||||
runtime.stop_processing = True
|
||||
if isinstance(current_state_error, StopAsyncIteration):
|
||||
task.error = current_state_error
|
||||
current_state_error = None
|
||||
print(f'Session {task.request.session_id} sent cancel signal for task {id(task)}')
|
||||
if dataQueue:
|
||||
dataQueue.put(result)
|
||||
if isinstance(result, str):
|
||||
result = json.loads(result)
|
||||
task.response = result
|
||||
if 'output' in result:
|
||||
for out_obj in result['output']:
|
||||
if 'path' in out_obj:
|
||||
img_id = out_obj['path'][out_obj['path'].rindex('/') + 1:]
|
||||
task.temp_images[int(img_id)] = runtime.temp_images[out_obj['path'][11:]]
|
||||
elif 'data' in out_obj:
|
||||
task.temp_images[result['output'].index(out_obj)] = out_obj['data']
|
||||
task_cache.keep(task.request.session_id, TASK_TTL)
|
||||
# Task completed
|
||||
task.lock.release()
|
||||
tasks_queue.task_done()
|
||||
task_cache.keep(task.request.session_id, TASK_TTL)
|
||||
if isinstance(task.error, StopAsyncIteration):
|
||||
print(f'Session {task.request.session_id} task {id(task)} cancelled!')
|
||||
elif task.error is not None:
|
||||
print(f'Session {task.request.session_id} task {id(task)} failed!')
|
||||
else:
|
||||
print(f'Session {task.request.session_id} task {id(task)} completed.')
|
||||
current_state = ServerStates.Online
|
||||
|
||||
render_thread = threading.Thread(target=thread_render)
|
||||
|
||||
def start_render_thread():
|
||||
# Start Rendering Thread
|
||||
render_thread.daemon = True
|
||||
render_thread.start()
|
||||
|
||||
def shutdown_event(): # Signal render thread to close on shutdown
|
||||
global current_state_error
|
||||
current_state_error = SystemExit('Application shutting down.')
|
||||
|
||||
def render(req : ImageRequest):
|
||||
if not render_thread.is_alive(): # Render thread is dead
|
||||
raise ChildProcessError('Rendering thread has died.')
|
||||
# Alive, check if task in cache
|
||||
task = task_cache.tryGet(req.session_id)
|
||||
if task and not task.response and not task.error and not task.lock.locked():
|
||||
# Unstarted task pending, deny queueing more than one.
|
||||
raise ConnectionRefusedError(f'Session {req.session_id} has an already pending task.')
|
||||
#
|
||||
from . import runtime
|
||||
r = Request()
|
||||
r.session_id = req.session_id
|
||||
r.prompt = req.prompt
|
||||
r.negative_prompt = req.negative_prompt
|
||||
r.init_image = req.init_image
|
||||
r.mask = req.mask
|
||||
r.num_outputs = req.num_outputs
|
||||
r.num_inference_steps = req.num_inference_steps
|
||||
r.guidance_scale = req.guidance_scale
|
||||
r.width = req.width
|
||||
r.height = req.height
|
||||
r.seed = req.seed
|
||||
r.prompt_strength = req.prompt_strength
|
||||
r.sampler = req.sampler
|
||||
# r.allow_nsfw = req.allow_nsfw
|
||||
r.turbo = req.turbo
|
||||
r.use_cpu = req.use_cpu
|
||||
r.use_full_precision = req.use_full_precision
|
||||
r.save_to_disk_path = req.save_to_disk_path
|
||||
r.use_upscale: str = req.use_upscale
|
||||
r.use_face_correction = req.use_face_correction
|
||||
r.use_stable_diffusion_model = req.use_stable_diffusion_model
|
||||
r.show_only_filtered_image = req.show_only_filtered_image
|
||||
r.output_format = req.output_format
|
||||
|
||||
r.stream_progress_updates = True # the underlying implementation only supports streaming
|
||||
r.stream_image_progress = req.stream_image_progress
|
||||
|
||||
if not req.stream_progress_updates:
|
||||
r.stream_image_progress = False
|
||||
|
||||
new_task = RenderTask(r)
|
||||
if task_cache.put(r.session_id, new_task, TASK_TTL):
|
||||
tasks_queue.put(new_task, block=True, timeout=30)
|
||||
return new_task
|
||||
raise RuntimeError('Failed to add task to cache.')
|
294
ui/server.py
294
ui/server.py
@ -14,90 +14,32 @@ CONFIG_DIR = os.path.abspath(os.path.join(SD_UI_DIR, '..', 'scripts'))
|
||||
MODELS_DIR = os.path.abspath(os.path.join(SD_DIR, '..', 'models'))
|
||||
|
||||
OUTPUT_DIRNAME = "Stable Diffusion UI" # in the user's home folder
|
||||
TASK_TTL = 15 * 60 # Discard last session's task timeout
|
||||
|
||||
from fastapi import FastAPI, HTTPException
|
||||
from fastapi.staticfiles import StaticFiles
|
||||
from starlette.responses import FileResponse, StreamingResponse
|
||||
from starlette.responses import FileResponse, JSONResponse, StreamingResponse
|
||||
from pydantic import BaseModel
|
||||
import logging
|
||||
import queue, threading, time
|
||||
from typing import Any, Generator, Hashable, Optional, Union
|
||||
|
||||
from sd_internal import Request, Response
|
||||
from sd_internal import Request, Response, task_manager
|
||||
|
||||
app = FastAPI()
|
||||
|
||||
model_loaded = False
|
||||
model_is_loading = False
|
||||
|
||||
modifiers_cache = None
|
||||
outpath = os.path.join(os.path.expanduser("~"), OUTPUT_DIRNAME)
|
||||
|
||||
# don't show access log entries for URLs that start with the given prefix
|
||||
ACCESS_LOG_SUPPRESS_PATH_PREFIXES = ['/ping', '/modifier-thumbnails']
|
||||
ACCESS_LOG_SUPPRESS_PATH_PREFIXES = ['/ping', '/image', '/modifier-thumbnails']
|
||||
|
||||
NOCACHE_HEADERS={"Cache-Control": "no-cache, no-store, must-revalidate", "Pragma": "no-cache", "Expires": "0"}
|
||||
app.mount('/media', StaticFiles(directory=os.path.join(SD_UI_DIR, 'media/')), name="media")
|
||||
|
||||
# defaults from https://huggingface.co/blog/stable_diffusion
|
||||
class ImageRequest(BaseModel):
|
||||
session_id: str = "session"
|
||||
prompt: str = ""
|
||||
negative_prompt: str = ""
|
||||
init_image: str = None # base64
|
||||
mask: str = None # base64
|
||||
num_outputs: int = 1
|
||||
num_inference_steps: int = 50
|
||||
guidance_scale: float = 7.5
|
||||
width: int = 512
|
||||
height: int = 512
|
||||
seed: int = 42
|
||||
prompt_strength: float = 0.8
|
||||
sampler: str = None # "ddim", "plms", "heun", "euler", "euler_a", "dpm2", "dpm2_a", "lms"
|
||||
# allow_nsfw: bool = False
|
||||
save_to_disk_path: str = None
|
||||
turbo: bool = True
|
||||
use_cpu: bool = False
|
||||
use_full_precision: bool = False
|
||||
use_face_correction: str = None # or "GFPGANv1.3"
|
||||
use_upscale: str = None # or "RealESRGAN_x4plus" or "RealESRGAN_x4plus_anime_6B"
|
||||
use_stable_diffusion_model: str = "sd-v1-4"
|
||||
show_only_filtered_image: bool = False
|
||||
output_format: str = "jpeg" # or "png"
|
||||
|
||||
stream_progress_updates: bool = False
|
||||
stream_image_progress: bool = False
|
||||
|
||||
class SetAppConfigRequest(BaseModel):
|
||||
update_branch: str = "main"
|
||||
|
||||
@app.get('/')
|
||||
def read_root():
|
||||
headers = {"Cache-Control": "no-cache, no-store, must-revalidate", "Pragma": "no-cache", "Expires": "0"}
|
||||
return FileResponse(os.path.join(SD_UI_DIR, 'index.html'), headers=headers)
|
||||
|
||||
@app.get('/ping')
|
||||
async def ping():
|
||||
global model_loaded, model_is_loading
|
||||
|
||||
try:
|
||||
if model_loaded:
|
||||
return {'OK'}
|
||||
|
||||
if model_is_loading:
|
||||
return {'ERROR'}
|
||||
|
||||
model_is_loading = True
|
||||
|
||||
from sd_internal import runtime
|
||||
|
||||
runtime.load_model_ckpt(ckpt_to_use=get_initial_model_to_load())
|
||||
|
||||
model_loaded = True
|
||||
model_is_loading = False
|
||||
|
||||
return {'OK'}
|
||||
except Exception as e:
|
||||
print(traceback.format_exc())
|
||||
return HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
# needs to support the legacy installations
|
||||
def get_initial_model_to_load():
|
||||
custom_weight_path = os.path.join(SD_DIR, 'custom-model.ckpt')
|
||||
@ -114,7 +56,6 @@ def get_initial_model_to_load():
|
||||
ckpt_to_use = model_path
|
||||
else:
|
||||
print('Could not find the configured custom model at:', model_path + '.ckpt', '. Using the default one:', ckpt_to_use + '.ckpt')
|
||||
|
||||
return ckpt_to_use
|
||||
|
||||
def resolve_model_to_use(model_name):
|
||||
@ -126,92 +67,110 @@ def resolve_model_to_use(model_name):
|
||||
model_path = legacy_model_path
|
||||
else:
|
||||
model_path = os.path.join(MODELS_DIR, 'stable-diffusion', model_name)
|
||||
|
||||
return model_path
|
||||
|
||||
@app.on_event("shutdown")
|
||||
def shutdown_event(): # Signal render thread to close on shutdown
|
||||
task_manager.current_state_error = SystemExit('Application shutting down.')
|
||||
|
||||
@app.get('/')
|
||||
def read_root():
|
||||
return FileResponse(os.path.join(SD_UI_DIR, 'index.html'), headers=NOCACHE_HEADERS)
|
||||
|
||||
@app.get('/ping') # Get server and optionally session status.
|
||||
def ping(session_id:str=None):
|
||||
if not task_manager.render_thread.is_alive(): # Render thread is dead.
|
||||
if task_manager.current_state_error: raise HTTPException(status_code=500, detail=str(current_state_error))
|
||||
raise HTTPException(status_code=500, detail='Render thread is dead.')
|
||||
if task_manager.current_state_error and not isinstance(task_manager.current_state_error, StopAsyncIteration): raise HTTPException(status_code=500, detail=str(current_state_error))
|
||||
# Alive
|
||||
response = {'status': str(task_manager.current_state)}
|
||||
if session_id:
|
||||
task = task_manager.task_cache.tryGet(session_id)
|
||||
if task:
|
||||
response['task'] = id(task)
|
||||
if task.lock.locked():
|
||||
response['session'] = 'running'
|
||||
elif isinstance(task.error, StopAsyncIteration):
|
||||
response['session'] = 'stopped'
|
||||
elif task.error:
|
||||
response['session'] = 'error'
|
||||
elif not task.buffer_queue.empty():
|
||||
response['session'] = 'buffer'
|
||||
elif task.response:
|
||||
response['session'] = 'completed'
|
||||
else:
|
||||
response['session'] = 'pending'
|
||||
return JSONResponse(response, headers=NOCACHE_HEADERS)
|
||||
|
||||
def save_model_to_config(model_name):
|
||||
config = getConfig()
|
||||
if 'model' not in config:
|
||||
config['model'] = {}
|
||||
|
||||
config['model']['stable-diffusion'] = model_name
|
||||
|
||||
setConfig(config)
|
||||
|
||||
@app.post('/image')
|
||||
def image(req : ImageRequest):
|
||||
from sd_internal import runtime
|
||||
|
||||
r = Request()
|
||||
r.session_id = req.session_id
|
||||
r.prompt = req.prompt
|
||||
r.negative_prompt = req.negative_prompt
|
||||
r.init_image = req.init_image
|
||||
r.mask = req.mask
|
||||
r.num_outputs = req.num_outputs
|
||||
r.num_inference_steps = req.num_inference_steps
|
||||
r.guidance_scale = req.guidance_scale
|
||||
r.width = req.width
|
||||
r.height = req.height
|
||||
r.seed = req.seed
|
||||
r.prompt_strength = req.prompt_strength
|
||||
r.sampler = req.sampler
|
||||
# r.allow_nsfw = req.allow_nsfw
|
||||
r.turbo = req.turbo
|
||||
r.use_cpu = req.use_cpu
|
||||
r.use_full_precision = req.use_full_precision
|
||||
r.save_to_disk_path = req.save_to_disk_path
|
||||
r.use_upscale: str = req.use_upscale
|
||||
r.use_face_correction = req.use_face_correction
|
||||
r.show_only_filtered_image = req.show_only_filtered_image
|
||||
r.output_format = req.output_format
|
||||
|
||||
r.stream_progress_updates = True # the underlying implementation only supports streaming
|
||||
r.stream_image_progress = req.stream_image_progress
|
||||
|
||||
r.use_stable_diffusion_model = resolve_model_to_use(req.use_stable_diffusion_model)
|
||||
|
||||
save_model_to_config(req.use_stable_diffusion_model)
|
||||
|
||||
@app.post('/render')
|
||||
def render(req : task_manager.ImageRequest):
|
||||
try:
|
||||
if not req.stream_progress_updates:
|
||||
r.stream_image_progress = False
|
||||
|
||||
res = runtime.mk_img(r)
|
||||
|
||||
if req.stream_progress_updates:
|
||||
return StreamingResponse(res, media_type='application/json')
|
||||
else: # compatibility mode: buffer the streaming responses, and return the last one
|
||||
last_result = None
|
||||
|
||||
for result in res:
|
||||
last_result = result
|
||||
|
||||
return json.loads(last_result)
|
||||
save_model_to_config(req.use_stable_diffusion_model)
|
||||
req.use_stable_diffusion_model = resolve_model_to_use(req.use_stable_diffusion_model)
|
||||
new_task = task_manager.render(req)
|
||||
response = {
|
||||
'status': str(task_manager.current_state),
|
||||
'queue': task_manager.tasks_queue.qsize(),
|
||||
'stream': f'/image/stream/{req.session_id}/{id(new_task)}',
|
||||
'task': id(new_task)
|
||||
}
|
||||
return JSONResponse(response, headers=NOCACHE_HEADERS)
|
||||
except ChildProcessError as e: # Render thread is dead
|
||||
raise HTTPException(status_code=500, detail=f'Rendering thread has died.') # HTTP500 Internal Server Error
|
||||
except ConnectionRefusedError as e: # Unstarted task pending, deny queueing more than one.
|
||||
raise HTTPException(status_code=503, detail=f'Session {req.session_id} has an already pending task.') # HTTP503 Service Unavailable
|
||||
except Exception as e:
|
||||
print(traceback.format_exc())
|
||||
return HTTPException(status_code=500, detail=str(e))
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
@app.get('/image/stream/{session_id:str}/{task_id:int}')
|
||||
def stream(session_id:str, task_id:int):
|
||||
#TODO Move to WebSockets ??
|
||||
task = task_manager.task_cache.tryGet(session_id)
|
||||
if not task: raise HTTPException(status_code=410, detail='No request received.') # HTTP410 Gone
|
||||
if (id(task) != task_id): raise HTTPException(status_code=409, detail=f'Wrong task id received. Expected:{id(task)}, Received:{task_id}') # HTTP409 Conflict
|
||||
if task.buffer_queue.empty() and not task.lock.locked():
|
||||
if task.response:
|
||||
#print(f'Session {session_id} sending cached response')
|
||||
return JSONResponse(task.response, headers=NOCACHE_HEADERS)
|
||||
raise HTTPException(status_code=425, detail='Too Early, task not started yet.') # HTTP425 Too Early
|
||||
#print(f'Session {session_id} opened live render stream {id(task.buffer_queue)}')
|
||||
return StreamingResponse(task.read_buffer_generator(), media_type='application/json')
|
||||
|
||||
@app.get('/image/stop')
|
||||
def stop():
|
||||
try:
|
||||
if model_is_loading:
|
||||
return {'ERROR'}
|
||||
|
||||
from sd_internal import runtime
|
||||
runtime.stop_processing = True
|
||||
|
||||
def stop(session_id:str=None):
|
||||
if not session_id:
|
||||
if task_manager.current_state == task_manager.ServerStates.Online or task_manager.current_state == task_manager.ServerStates.Unavailable:
|
||||
raise HTTPException(status_code=409, detail='Not currently running any tasks.') # HTTP409 Conflict
|
||||
task_manager.current_state_error = StopAsyncIteration('')
|
||||
return {'OK'}
|
||||
except Exception as e:
|
||||
print(traceback.format_exc())
|
||||
return HTTPException(status_code=500, detail=str(e))
|
||||
task = task_manager.task_cache.tryGet(session_id)
|
||||
if not task: raise HTTPException(status_code=404, detail=f'Session {session_id} has no active task.') # HTTP404 Not Found
|
||||
if isinstance(task.error, StopAsyncIteration): raise HTTPException(status_code=409, detail=f'Session {session_id} task is already stopped.') # HTTP409 Conflict
|
||||
task.error = StopAsyncIteration('')
|
||||
return {'OK'}
|
||||
|
||||
@app.get('/image/tmp/{session_id}/{img_id}')
|
||||
@app.get('/image/tmp/{session_id}/{img_id:int}')
|
||||
def get_image(session_id, img_id):
|
||||
from sd_internal import runtime
|
||||
buf = runtime.temp_images[session_id + '/' + img_id]
|
||||
buf.seek(0)
|
||||
return StreamingResponse(buf, media_type='image/jpeg')
|
||||
task = task_manager.task_cache.tryGet(session_id)
|
||||
if not task: raise HTTPException(status_code=410, detail=f'Session {session_id} has not submitted a task.') # HTTP410 Gone
|
||||
if not task.temp_images[img_id]: raise HTTPException(status_code=425, detail='Too Early, task data is not available yet.') # HTTP425 Too Early
|
||||
try:
|
||||
img_data = task.temp_images[img_id]
|
||||
if isinstance(img_data, str):
|
||||
return img_data
|
||||
img_data.seek(0)
|
||||
return StreamingResponse(img_data, media_type='image/jpeg')
|
||||
except KeyError as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
@app.post('/app_config')
|
||||
async def setAppConfig(req : SetAppConfigRequest):
|
||||
@ -228,56 +187,41 @@ async def setAppConfig(req : SetAppConfigRequest):
|
||||
config_bat_path = os.path.join(CONFIG_DIR, 'config.bat')
|
||||
config_sh_path = os.path.join(CONFIG_DIR, 'config.sh')
|
||||
|
||||
with open(config_json_path, 'w') as f:
|
||||
with open(config_json_path, 'w', encoding='utf-8') as f:
|
||||
f.write(config_json_str)
|
||||
|
||||
with open(config_bat_path, 'w') as f:
|
||||
with open(config_bat_path, 'w', encoding='utf-8') as f:
|
||||
f.write(config_bat_str)
|
||||
|
||||
with open(config_sh_path, 'w') as f:
|
||||
with open(config_sh_path, 'w', encoding='utf-8') as f:
|
||||
f.write(config_sh_str)
|
||||
|
||||
return {'OK'}
|
||||
except Exception as e:
|
||||
print(traceback.format_exc())
|
||||
return HTTPException(status_code=500, detail=str(e))
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
@app.get('/app_config')
|
||||
def getAppConfig():
|
||||
def getConfig(default_val={}):
|
||||
try:
|
||||
config_json_path = os.path.join(CONFIG_DIR, 'config.json')
|
||||
|
||||
if not os.path.exists(config_json_path):
|
||||
return HTTPException(status_code=500, detail="No config file")
|
||||
|
||||
with open(config_json_path, 'r') as f:
|
||||
return default_val
|
||||
with open(config_json_path, 'r', encoding='utf-8') as f:
|
||||
return json.load(f)
|
||||
except Exception as e:
|
||||
print(str(e))
|
||||
print(traceback.format_exc())
|
||||
return HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
def getConfig():
|
||||
try:
|
||||
config_json_path = os.path.join(CONFIG_DIR, 'config.json')
|
||||
|
||||
if not os.path.exists(config_json_path):
|
||||
return {}
|
||||
|
||||
with open(config_json_path, 'r') as f:
|
||||
return json.load(f)
|
||||
except Exception as e:
|
||||
return {}
|
||||
return default_val
|
||||
|
||||
def setConfig(config):
|
||||
try:
|
||||
config_json_path = os.path.join(CONFIG_DIR, 'config.json')
|
||||
|
||||
with open(config_json_path, 'w') as f:
|
||||
with open(config_json_path, 'w', encoding='utf-8') as f:
|
||||
return json.dump(config, f)
|
||||
except:
|
||||
print(str(e))
|
||||
print(traceback.format_exc())
|
||||
|
||||
@app.get('/models')
|
||||
def getModels():
|
||||
models = {
|
||||
'active': {
|
||||
@ -307,14 +251,21 @@ def getModels():
|
||||
|
||||
return models
|
||||
|
||||
@app.get('/modifiers.json')
|
||||
def read_modifiers():
|
||||
headers = {"Cache-Control": "no-cache, no-store, must-revalidate", "Pragma": "no-cache", "Expires": "0"}
|
||||
return FileResponse(os.path.join(SD_UI_DIR, 'modifiers.json'), headers=headers)
|
||||
|
||||
@app.get('/output_dir')
|
||||
def read_home_dir():
|
||||
return {outpath}
|
||||
@app.get('/get/{key:path}')
|
||||
def read_web_data(key:str=None):
|
||||
if not key: # /get without parameters, stable-diffusion easter egg.
|
||||
raise HTTPException(status_code=418, detail="StableDiffusion is drawing a teapot!") # HTTP418 I'm a teapot
|
||||
elif key == 'app_config':
|
||||
config = getConfig(default_val=None)
|
||||
if config is None:
|
||||
raise HTTPException(status_code=500, detail="Config file is missing or unreadable")
|
||||
return JSONResponse(config, headers=NOCACHE_HEADERS)
|
||||
elif key == 'models':
|
||||
return JSONResponse(getModels(), headers=NOCACHE_HEADERS)
|
||||
elif key == 'modifiers': return FileResponse(os.path.join(SD_UI_DIR, 'modifiers.json'), headers=NOCACHE_HEADERS)
|
||||
elif key == 'output_dir': return JSONResponse({ 'output_dir': outpath }, headers=NOCACHE_HEADERS)
|
||||
else:
|
||||
raise HTTPException(status_code=404, detail=f'Request for unknown {key}') # HTTP404 Not Found
|
||||
|
||||
# don't log certain requests
|
||||
class LogSuppressFilter(logging.Filter):
|
||||
@ -323,10 +274,11 @@ class LogSuppressFilter(logging.Filter):
|
||||
for prefix in ACCESS_LOG_SUPPRESS_PATH_PREFIXES:
|
||||
if path.find(prefix) != -1:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
logging.getLogger('uvicorn.access').addFilter(LogSuppressFilter())
|
||||
|
||||
task_manager.default_model_to_load = get_initial_model_to_load()
|
||||
task_manager.start_render_thread()
|
||||
|
||||
# start the browser ui
|
||||
import webbrowser; webbrowser.open('http://localhost:9000')
|
Loading…
Reference in New Issue
Block a user