Merge branch 'beta' into Mouse-wheel-behavior-fixes

This commit is contained in:
cmdr2 2022-11-30 16:22:45 +05:30 committed by GitHub
commit 7c50b8bf94
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 616 additions and 387 deletions

27
3rd-PARTY-LICENSES Normal file
View File

@ -0,0 +1,27 @@
jquery-confirm
==============
https://craftpip.github.io/jquery-confirm/
jquery-confirm is licensed under the MIT license:
The MIT License (MIT)
Copyright (c) 2019 Boniface Pereira
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -19,8 +19,15 @@
- Configuration to prevent the browser from opening on startup
- Lots of minor bug fixes
- A `What's New?` tab in the UI
- Ask for a confimation before clearing the results pane or stopping a render task. The dialog can be skipped by holding down the shift key while clicking on the button.
- Show the network addresses of the server in the systems setting dialog
### Detailed changelog
* 2.4.17 - 30 Nov 2022 - Scroll to generated image. Thanks @patriceac
* 2.4.17 - 30 Nov 2022 - Show the network addresses of the server in the systems setting dialog. Thanks @JeLuf
* 2.4.17 - 30 Nov 2022 - Fix a bug where GFPGAN wouldn't work properly when multiple GPUs tried to run it at the same time. Thanks @madrang
* 2.4.17 - 30 Nov 2022 - Confirm before stopping or clearing all the tasks. Thanks @JeLuf
* 2.4.16 - 29 Nov 2022 - Bug fixes for SD 2.0 - remove the need for patching, default to SD 1.4 model if trying to load an SD2 model in SD1.4.
* 2.4.15 - 25 Nov 2022 - Experimental support for SD 2.0. Uses lots of memory, not optimized, probably GPU-only.
* 2.4.14 - 22 Nov 2022 - Change the backend to a custom fork of Stable Diffusion
* 2.4.13 - 21 Nov 2022 - Change the modifier weight via mouse wheel, drag to reorder selected modifiers, and some more modifier-related fixes. Thanks @patriceac

View File

@ -42,13 +42,9 @@ if NOT DEFINED test_sd2 set test_sd2=N
if "%test_sd2%" == "N" (
@call git -c advice.detachedHead=false checkout 7f32368ed1030a6e710537047bacd908adea183a
@call git apply --whitespace=warn ..\ui\sd_internal\ddim_callback.patch
)
if "%test_sd2%" == "Y" (
@call git -c advice.detachedHead=false checkout 6e2f82187f8ecc4ea59ac37dc239cfcc78038f6d
@call git apply ..\ui\sd_internal\ddim_callback_sd2.patch
@call git -c advice.detachedHead=false checkout 5d647c5459f4cd790672512222bc41903c01bb71
)
@cd ..
@ -66,8 +62,6 @@ if NOT DEFINED test_sd2 set test_sd2=N
@cd stable-diffusion
@call git -c advice.detachedHead=false checkout 7f32368ed1030a6e710537047bacd908adea183a
@call git apply --whitespace=warn ..\ui\sd_internal\ddim_callback.patch
@cd ..
)

8
scripts/on_sd_start.sh Normal file → Executable file
View File

@ -37,12 +37,8 @@ if [ -e "scripts/install_status.txt" ] && [ `grep -c sd_git_cloned scripts/insta
if [ "$test_sd2" == "N" ]; then
git -c advice.detachedHead=false checkout 7f32368ed1030a6e710537047bacd908adea183a
git apply --whitespace=warn ../ui/sd_internal/ddim_callback.patch || fail "ddim patch failed"
elif [ "$test_sd2" == "Y" ]; then
git -c advice.detachedHead=false checkout 992f111312afa9ec1a01beaa9733cb9728f5acd3
git apply --whitespace=warn ../ui/sd_internal/ddim_callback_sd2.patch || fail "sd2 ddim patch failed"
git -c advice.detachedHead=false checkout 5d647c5459f4cd790672512222bc41903c01bb71
fi
cd ..
@ -58,8 +54,6 @@ else
cd stable-diffusion
git -c advice.detachedHead=false checkout 7f32368ed1030a6e710537047bacd908adea183a
git apply --whitespace=warn ../ui/sd_internal/ddim_callback.patch || fail "ddim patch failed"
cd ..
fi

View File

@ -3,6 +3,7 @@
<head>
<title>Stable Diffusion UI</title>
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="theme-color" content="#673AB6">
<link rel="icon" type="image/png" href="/media/images/favicon-16x16.png" sizes="16x16">
<link rel="icon" type="image/png" href="/media/images/favicon-32x32.png" sizes="32x32">
<link rel="stylesheet" href="/media/css/fonts.css">
@ -12,7 +13,10 @@
<link rel="stylesheet" href="/media/css/modifier-thumbnails.css">
<link rel="stylesheet" href="/media/css/fontawesome-all.min.css">
<link rel="stylesheet" href="/media/css/drawingboard.min.css">
<link rel="stylesheet" href="/media/css//jquery-confirm.min.css">
<link rel="manifest" href="/media/manifest.webmanifest">
<script src="/media/js/jquery-3.6.1.min.js"></script>
<script src="/media/js/jquery-confirm.min.js"></script>
<script src="/media/js/drawingboard.min.js"></script>
<script src="/media/js/marked.min.js"></script>
</head>
@ -22,7 +26,7 @@
<div id="logo">
<h1>
Stable Diffusion UI
<small>v2.4.15 <span id="updateBranchLabel"></span></small>
<small>v2.4.17 <span id="updateBranchLabel"></span></small>
</h1>
</div>
<div id="server-status">
@ -250,8 +254,17 @@
<br/><br/>
<div>
<h3><i class="fa fa-microchip icon"></i> System Info</h3>
<div id="system-info"></div>
<div id="system-info">
<table>
<tr><td><label>Processor:</label></td><td id="system-info-cpu" class="value"></td></tr>
<tr><td><label>Compatible Graphics Cards (all):</label></td><td id="system-info-gpus-all" class="value"></td></tr>
<tr><td></td><td>&nbsp;</td></tr>
<tr><td><label>Used for rendering 🔥:</label></td><td id="system-info-rendering-devices" class="value"></td></tr>
<tr><td><label>Server Addresses <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip right">You can access Stable Diffusion UI from other devices using these addresses</span></i> :</label></td><td id="system-info-server-hosts" class="value"></td></tr>
</table>
</div>
</div>
</div>
</div>
<div id="tab-content-about" class="tab-content">
@ -348,7 +361,7 @@ async function init() {
await getAppConfig()
await loadModifiers()
await loadUIPlugins()
await getDevices()
await getSystemInfo()
setInterval(healthCheck, HEALTH_PING_INTERVAL * 1000)
healthCheck()

9
ui/media/css/jquery-confirm.min.css vendored Normal file

File diff suppressed because one or more lines are too long

View File

@ -210,7 +210,7 @@ code {
}
.collapsible-content {
display: block;
padding-left: 15px;
padding-left: 10px;
}
.collapsible-content h5 {
padding: 5pt 0pt;
@ -658,11 +658,15 @@ input::file-selector-button {
opacity: 1;
}
/* MOBILE SUPPORT */
@media screen and (max-width: 700px) {
/* Small screens */
@media screen and (max-width: 1265px) {
#top-nav {
flex-direction: column;
}
}
/* MOBILE SUPPORT */
@media screen and (max-width: 700px) {
body {
margin: 0px;
}
@ -712,7 +716,7 @@ input::file-selector-button {
padding-right: 0px;
}
#server-status {
display: none;
top: 75%;
}
.popup > div {
padding-left: 5px !important;
@ -730,6 +734,15 @@ input::file-selector-button {
}
}
@media screen and (max-width: 500px) {
#server-status #server-status-msg {
display: none;
}
#server-status:hover #server-status-msg {
display: inline;
}
}
@media (min-width: 700px) {
/* #editor {
max-width: 480px;
@ -997,8 +1010,17 @@ button:hover {
button:active {
transition-duration: 0.1s;
background-color: hsl(var(--accent-hue), 100%, calc(var(--accent-lightness) + 24%));
position: relative;
top: 1px;
left: 1px;
}
button#save-system-settings-btn {
padding: 4pt 8pt;
}
#ip-info a {
color:var(--text-color)
}
#ip-info div {
line-height: 200%;
}

View File

@ -30,6 +30,9 @@
--primary-button-border: none;
--input-switch-padding: 1px;
--input-height: 18px;
/* Main theme color, hex color fallback. */
--theme-color-fallback: #673AB6;
}
.theme-light {
@ -44,6 +47,8 @@
--input-text-color: black;
--input-background-color: #f8f9fa;
--input-border-color: grey;
--theme-color-fallback: #aaaaaa;
}
.theme-discord {
@ -58,6 +63,8 @@
--input-border-size: 2px;
--input-background-color: #202225;
--input-border-color: var(--input-background-color);
--theme-color-fallback: #202225;
}
.theme-cool-blue {
@ -71,8 +78,10 @@
--background-color4: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) - (3 * var(--value-step))));
--input-background-color: var(--background-color3);
--accent-hue: 212;
--theme-color-fallback: #0056b8;
}
@ -87,6 +96,8 @@
--background-color4: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) - (3 * var(--value-step))));
--input-background-color: var(--background-color3);
--theme-color-fallback: #5300b8;
}
.theme-super-dark {
@ -101,6 +112,8 @@
--input-background-color: var(--background-color3);
--input-border-size: 0px;
--theme-color-fallback: #000000;
}
.theme-wild {
@ -117,8 +130,8 @@
--input-border-size: 1px;
--input-background-color: hsl(222, var(--main-saturation), calc(var(--value-base) - (2 * var(--value-step))));
--input-text-color: red;
--input-border-color: green;
--input-text-color: #FF0000;
--input-border-color: #005E05;
}
.theme-gnomie {
@ -136,6 +149,8 @@
--input-background-color: #2a2a2a;
--input-border-size: 0px;
--input-border-color: var(--input-background-color);
--theme-color-fallback: #2168bf;
}
.theme-gnomie .panel-box {

View File

@ -35,6 +35,7 @@ const SETTINGS_IDS_LIST = [
"sound_toggle",
"turbo",
"use_full_precision",
"confirm_dangerous_actions",
"auto_save_settings"
]
@ -55,6 +56,9 @@ async function initSettings() {
if (!element) {
console.error(`Missing settings element ${id}`)
}
if (id in SETTINGS) { // don't create it again
return
}
SETTINGS[id] = {
key: id,
element: element,

View File

@ -192,9 +192,9 @@ const TASK_MAPPING = {
parse: (val) => val
},
numOutputsParallel: { name: 'Parallel Images',
setUI: (numOutputsParallel) => {
numOutputsParallelField.value = numOutputsParallel
num_outputs: { name: 'Parallel Images',
setUI: (num_outputs) => {
numOutputsParallelField.value = num_outputs
},
readUI: () => parseInt(numOutputsParallelField.value),
parse: (val) => val
@ -328,6 +328,7 @@ function getModelPath(filename, extensions)
filename = filename.slice(0, filename.length - ext.length)
}
})
return filename
}
const TASK_TEXT_MAPPING = {

View File

@ -90,9 +90,7 @@ function createModifierGroup(modifierGroup, initiallyExpanded) {
if (activeTags.map(x => trimModifiers(x.name)).includes(trimModifiers(modifierName))) {
// remove modifier from active array
activeTags = activeTags.filter(x => trimModifiers(x.name) != trimModifiers(modifierName))
modifierCard.classList.remove(activeCardClass)
modifierCard.querySelector('.modifier-card-image-overlay').innerText = '+'
toggleCardState(modifierCard, false)
} else {
// add modifier to active array
activeTags.push({
@ -101,10 +99,7 @@ function createModifierGroup(modifierGroup, initiallyExpanded) {
'originElement': modifierCard,
'previews': modifierPreviews
})
modifierCard.classList.add(activeCardClass)
modifierCard.querySelector('.modifier-card-image-overlay').innerText = '-'
toggleCardState(modifierCard, true)
}
refreshTagsList()
@ -226,8 +221,7 @@ function refreshTagsList() {
let idx = activeTags.indexOf(tag)
if (idx !== -1 && activeTags[idx].originElement !== undefined) {
activeTags[idx].originElement.classList.remove(activeCardClass)
activeTags[idx].originElement.querySelector('.modifier-card-image-overlay').innerText = '+'
toggleCardState(activeTags[idx].originElement, false)
activeTags.splice(idx, 1)
refreshTagsList()
@ -240,6 +234,16 @@ function refreshTagsList() {
editorModifierTagsList.appendChild(brk)
}
function toggleCardState(card, makeActive) {
if (makeActive) {
card.classList.add(activeCardClass)
card.querySelector('.modifier-card-image-overlay').innerText = '-'
} else {
card.classList.remove(activeCardClass)
card.querySelector('.modifier-card-image-overlay').innerText = '+'
}
}
function changePreviewImages(val) {
const previewImages = document.querySelectorAll('.modifier-card-image-container img')

10
ui/media/js/jquery-confirm.min.js vendored Normal file

File diff suppressed because one or more lines are too long

View File

@ -138,6 +138,33 @@ function isServerAvailable() {
}
}
// shiftOrConfirm(e, prompt, fn)
// e : MouseEvent
// prompt : Text to be shown as prompt. Should be a question to which "yes" is a good answer.
// fn : function to be called if the user confirms the dialog or has the shift key pressed
//
// If the user had the shift key pressed while clicking, the function fn will be executed.
// If the setting "confirm_dangerous_actions" in the system settings is disabled, the function
// fn will be executed.
// Otherwise, a confirmation dialog is shown. If the user confirms, the function fn will also
// be executed.
function shiftOrConfirm(e, prompt, fn) {
e.stopPropagation()
if (e.shiftKey || !confirmDangerousActionsField.checked) {
fn(e)
} else {
$.confirm({ theme: 'supervan',
title: prompt,
content: 'Tip: To skip this dialog, use shift-click or disable the setting "Confirm dangerous actions" in the systems setting.',
buttons: {
yes: () => { fn(e) },
cancel: () => {}
}
});
}
}
function logMsg(msg, level, outputMsg) {
if (outputMsg.hasChildNodes()) {
outputMsg.appendChild(document.createElement('br'))
@ -169,34 +196,6 @@ function playSound() {
})
}
}
function setSystemInfo(devices) {
let cpu = devices.all.cpu.name
let allGPUs = Object.keys(devices.all).filter(d => d != 'cpu')
let activeGPUs = Object.keys(devices.active)
function ID_TO_TEXT(d) {
let info = devices.all[d]
if ("mem_free" in info && "mem_total" in info) {
return `${info.name} <small>(${d}) (${info.mem_free.toFixed(1)}Gb free / ${info.mem_total.toFixed(1)} Gb total)</small>`
} else {
return `${info.name} <small>(${d}) (no memory info)</small>`
}
}
allGPUs = allGPUs.map(ID_TO_TEXT)
activeGPUs = activeGPUs.map(ID_TO_TEXT)
let systemInfo = `
<table>
<tr><td><label>Processor:</label></td><td class="value">${cpu}</td></tr>
<tr><td><label>Compatible Graphics Cards (all):</label></td><td class="value">${allGPUs.join('</br>')}</td></tr>
<tr><td></td><td>&nbsp;</td></tr>
<tr><td><label>Used for rendering 🔥:</label></td><td class="value">${activeGPUs.join('</br>')}</td></tr>
</table>`
let systemInfoEl = document.querySelector('#system-info')
systemInfoEl.innerHTML = systemInfo
}
async function healthCheck() {
try {
@ -231,7 +230,7 @@ async function healthCheck() {
break
}
if (serverState.devices) {
setSystemInfo(serverState.devices)
setDeviceInfo(serverState.devices)
}
serverState.time = Date.now()
} catch (e) {
@ -887,8 +886,7 @@ function createTask(task) {
task['progressBar'] = taskEntry.querySelector('.progress-bar')
task['stopTask'] = taskEntry.querySelector('.stopTask')
task['stopTask'].addEventListener('click', async function(e) {
e.stopPropagation()
task['stopTask'].addEventListener('click', (e) => { shiftOrConfirm(e, "Are you sure? Should this task be stopped?", async function(e) {
if (task['isProcessing']) {
task.isProcessing = false
task.progressBar.classList.remove("active")
@ -903,9 +901,9 @@ function createTask(task) {
taskQueue.splice(idx, 1)
}
taskEntry.remove()
removeTask(taskEntry)
}
})
})})
task['useSettings'] = taskEntry.querySelector('.useSettings')
task['useSettings'].addEventListener('click', function(e) {
@ -934,10 +932,10 @@ function getPrompts() {
prompts = prompts.filter(prompt => prompt !== '')
if (activeTags.length > 0) {
const promptTags = activeTags.map(x => x.name).join(", ")
prompts = prompts.map((prompt) => `${prompt}, ${promptTags}`)
const promptTags = activeTags.map(x => x.name).join(", ")
prompts = prompts.map((prompt) => `${prompt}, ${promptTags}`)
}
let promptsToMake = applySetOperator(prompts)
promptsToMake = applyPermuteOperator(promptsToMake)
@ -1047,21 +1045,25 @@ async function stopAllTasks() {
}
}
clearAllPreviewsBtn.addEventListener('click', async function() {
function removeTask(taskToRemove) {
taskToRemove.remove()
if (document.querySelector('.imageTaskContainer') === null) {
previewTools.style.display = 'none'
initialText.style.display = 'block'
}
}
clearAllPreviewsBtn.addEventListener('click', (e) => { shiftOrConfirm(e, "Are you sure? Remove all results and tasks from the results pane?", async function() {
await stopAllTasks()
let taskEntries = document.querySelectorAll('.imageTaskContainer')
taskEntries.forEach(task => {
task.remove()
})
taskEntries.forEach(removeTask)
})})
previewTools.style.display = 'none'
initialText.style.display = 'block'
})
stopImageBtn.addEventListener('click', async function() {
stopImageBtn.addEventListener('click', (e) => { shiftOrConfirm(e, "Are you sure? Do you want to stop all the tasks?", async function(e) {
await stopAllTasks()
})
})})
widthField.addEventListener('change', onDimensionChange)
heightField.addEventListener('change', onDimensionChange)

View File

@ -5,9 +5,9 @@
*/
var ParameterType = {
checkbox: "checkbox",
select: "select",
select_multiple: "select_multiple",
custom: "custom",
select: "select",
select_multiple: "select_multiple",
custom: "custom",
};
/**
@ -23,174 +23,182 @@
/** @type {Array.<Parameter>} */
var PARAMETERS = [
{
id: "theme",
type: ParameterType.select,
label: "Theme",
default: "theme-default",
note: "customize the look and feel of the ui",
options: [ // Note: options expanded dynamically
{
value: "theme-default",
label: "Default"
}
],
icon: "fa-palette"
},
{
id: "save_to_disk",
type: ParameterType.checkbox,
label: "Auto-Save Images",
note: "automatically saves images to the specified location",
icon: "fa-download",
default: false,
},
{
id: "diskPath",
type: ParameterType.custom,
label: "Save Location",
render: (parameter) => {
return `<input id="${parameter.id}" name="${parameter.id}" size="30" disabled>`
}
},
{
id: "sound_toggle",
type: ParameterType.checkbox,
label: "Enable Sound",
note: "plays a sound on task completion",
icon: "fa-volume-low",
default: true,
},
{
id: "ui_open_browser_on_start",
type: ParameterType.checkbox,
label: "Open browser on startup",
note: "starts the default browser on startup",
icon: "fa-window-restore",
default: true,
},
{
id: "turbo",
type: ParameterType.checkbox,
label: "Turbo Mode",
note: "generates images faster, but uses an additional 1 GB of GPU memory",
icon: "fa-forward",
default: true,
},
{
id: "use_cpu",
type: ParameterType.checkbox,
label: "Use CPU (not GPU)",
note: "warning: this will be *very* slow",
icon: "fa-microchip",
default: false,
},
{
id: "auto_pick_gpus",
type: ParameterType.checkbox,
label: "Automatically pick the GPUs (experimental)",
default: false,
},
{
id: "use_gpus",
type: ParameterType.select_multiple,
label: "GPUs to use (experimental)",
note: "to process in parallel",
default: false,
},
{
id: "use_full_precision",
type: ParameterType.checkbox,
label: "Use Full Precision",
note: "for GPU-only. warning: this will consume more VRAM",
icon: "fa-crosshairs",
default: false,
},
{
id: "auto_save_settings",
type: ParameterType.checkbox,
label: "Auto-Save Settings",
note: "restores settings on browser load",
icon: "fa-gear",
default: true,
},
{
id: "listen_to_network",
type: ParameterType.checkbox,
label: "Make Stable Diffusion available on your network",
note: "Other devices on your network can access this web page",
icon: "fa-network-wired",
default: true,
},
{
id: "listen_port",
type: ParameterType.custom,
label: "Network port",
note: "Port that this server listens to. The '9000' part in 'http://localhost:9000'",
icon: "fa-anchor",
render: (parameter) => {
return `<input id="${parameter.id}" name="${parameter.id}" size="6" value="9000" onkeypress="preventNonNumericalInput(event)">`
}
},
{
id: "test_sd2",
type: ParameterType.checkbox,
label: "Test SD 2.0",
note: "Experimental! High memory usage! GPU-only! Please restart the program after changing this.",
icon: "fa-fire",
default: false,
},
{
id: "use_beta_channel",
type: ParameterType.checkbox,
label: "Beta channel",
note: "Get the latest features immediately (but could be less stable). Please restart the program after changing this.",
icon: "fa-fire",
default: false,
},
{
id: "theme",
type: ParameterType.select,
label: "Theme",
default: "theme-default",
note: "customize the look and feel of the ui",
options: [ // Note: options expanded dynamically
{
value: "theme-default",
label: "Default"
}
],
icon: "fa-palette"
},
{
id: "save_to_disk",
type: ParameterType.checkbox,
label: "Auto-Save Images",
note: "automatically saves images to the specified location",
icon: "fa-download",
default: false,
},
{
id: "diskPath",
type: ParameterType.custom,
label: "Save Location",
render: (parameter) => {
return `<input id="${parameter.id}" name="${parameter.id}" size="30" disabled>`
}
},
{
id: "sound_toggle",
type: ParameterType.checkbox,
label: "Enable Sound",
note: "plays a sound on task completion",
icon: "fa-volume-low",
default: true,
},
{
id: "ui_open_browser_on_start",
type: ParameterType.checkbox,
label: "Open browser on startup",
note: "starts the default browser on startup",
icon: "fa-window-restore",
default: true,
},
{
id: "turbo",
type: ParameterType.checkbox,
label: "Turbo Mode",
note: "generates images faster, but uses an additional 1 GB of GPU memory",
icon: "fa-forward",
default: true,
},
{
id: "use_cpu",
type: ParameterType.checkbox,
label: "Use CPU (not GPU)",
note: "warning: this will be *very* slow",
icon: "fa-microchip",
default: false,
},
{
id: "auto_pick_gpus",
type: ParameterType.checkbox,
label: "Automatically pick the GPUs (experimental)",
default: false,
},
{
id: "use_gpus",
type: ParameterType.select_multiple,
label: "GPUs to use (experimental)",
note: "to process in parallel",
default: false,
},
{
id: "use_full_precision",
type: ParameterType.checkbox,
label: "Use Full Precision",
note: "for GPU-only. warning: this will consume more VRAM",
icon: "fa-crosshairs",
default: false,
},
{
id: "auto_save_settings",
type: ParameterType.checkbox,
label: "Auto-Save Settings",
note: "restores settings on browser load",
icon: "fa-gear",
default: true,
},
{
id: "confirm_dangerous_actions",
type: ParameterType.checkbox,
label: "Confirm dangerous actions",
note: "Actions that might lead to data loss must either be clicked with the shift key pressed, or confirmed in an 'Are you sure?' dialog",
icon: "fa-check-double",
default: true,
},
{
id: "listen_to_network",
type: ParameterType.checkbox,
label: "Make Stable Diffusion available on your network",
note: "Other devices on your network can access this web page",
icon: "fa-network-wired",
default: true,
},
{
id: "listen_port",
type: ParameterType.custom,
label: "Network port",
note: "Port that this server listens to. The '9000' part in 'http://localhost:9000'",
icon: "fa-anchor",
render: (parameter) => {
return `<input id="${parameter.id}" name="${parameter.id}" size="6" value="9000" onkeypress="preventNonNumericalInput(event)">`
}
},
{
id: "test_sd2",
type: ParameterType.checkbox,
label: "Test SD 2.0",
note: "Experimental! High memory usage! GPU-only! Not the final version! Please restart the program after changing this.",
icon: "fa-fire",
default: false,
},
{
id: "use_beta_channel",
type: ParameterType.checkbox,
label: "Beta channel",
note: "Get the latest features immediately (but could be less stable). Please restart the program after changing this.",
icon: "fa-fire",
default: false,
},
];
function getParameterSettingsEntry(id) {
let parameter = PARAMETERS.filter(p => p.id === id)
if (parameter.length === 0) {
return
}
return parameter[0].settingsEntry
let parameter = PARAMETERS.filter(p => p.id === id)
if (parameter.length === 0) {
return
}
return parameter[0].settingsEntry
}
function getParameterElement(parameter) {
switch (parameter.type) {
case ParameterType.checkbox:
var is_checked = parameter.default ? " checked" : "";
return `<input id="${parameter.id}" name="${parameter.id}"${is_checked} type="checkbox">`
case ParameterType.select:
case ParameterType.select_multiple:
var options = (parameter.options || []).map(option => `<option value="${option.value}">${option.label}</option>`).join("")
var multiple = (parameter.type == ParameterType.select_multiple ? 'multiple' : '')
return `<select id="${parameter.id}" name="${parameter.id}" ${multiple}>${options}</select>`
case ParameterType.custom:
return parameter.render(parameter)
default:
console.error(`Invalid type for parameter ${parameter.id}`);
return "ERROR: Invalid Type"
}
switch (parameter.type) {
case ParameterType.checkbox:
var is_checked = parameter.default ? " checked" : "";
return `<input id="${parameter.id}" name="${parameter.id}"${is_checked} type="checkbox">`
case ParameterType.select:
case ParameterType.select_multiple:
var options = (parameter.options || []).map(option => `<option value="${option.value}">${option.label}</option>`).join("")
var multiple = (parameter.type == ParameterType.select_multiple ? 'multiple' : '')
return `<select id="${parameter.id}" name="${parameter.id}" ${multiple}>${options}</select>`
case ParameterType.custom:
return parameter.render(parameter)
default:
console.error(`Invalid type for parameter ${parameter.id}`);
return "ERROR: Invalid Type"
}
}
let parametersTable = document.querySelector("#system-settings .parameters-table")
/* fill in the system settings popup table */
function initParameters() {
PARAMETERS.forEach(parameter => {
var element = getParameterElement(parameter)
var note = parameter.note ? `<small>${parameter.note}</small>` : "";
var icon = parameter.icon ? `<i class="fa ${parameter.icon}"></i>` : "";
var newrow = document.createElement('div')
newrow.innerHTML = `
<div>${icon}</div>
<div><label for="${parameter.id}">${parameter.label}</label>${note}</div>
<div>${element}</div>`
parametersTable.appendChild(newrow)
parameter.settingsEntry = newrow
})
PARAMETERS.forEach(parameter => {
var element = getParameterElement(parameter)
var note = parameter.note ? `<small>${parameter.note}</small>` : "";
var icon = parameter.icon ? `<i class="fa ${parameter.icon}"></i>` : "";
var newrow = document.createElement('div')
newrow.innerHTML = `
<div>${icon}</div>
<div><label for="${parameter.id}">${parameter.label}</label>${note}</div>
<div>${element}</div>`
parametersTable.appendChild(newrow)
parameter.settingsEntry = newrow
})
}
initParameters()
@ -207,9 +215,11 @@ let listenPortField = document.querySelector("#listen_port")
let testSD2Field = document.querySelector("#test_sd2")
let useBetaChannelField = document.querySelector("#use_beta_channel")
let uiOpenBrowserOnStartField = document.querySelector("#ui_open_browser_on_start")
let confirmDangerousActionsField = document.querySelector("#confirm_dangerous_actions")
let saveSettingsBtn = document.querySelector('#save-system-settings-btn')
async function changeAppConfig(configDelta) {
try {
let res = await fetch('/app_config', {
@ -242,12 +252,15 @@ async function getAppConfig() {
if ('test_sd2' in config) {
testSD2Field.checked = config['test_sd2']
}
if (config.net && config.net.listen_to_network === false) {
listenToNetworkField.checked = false
}
if (config.net && config.net.listen_port !== undefined) {
listenPortField.value = config.net.listen_port
}
let testSD2SettingEntry = getParameterSettingsEntry('test_sd2')
testSD2SettingEntry.style.display = (config.update_branch === 'beta' ? '' : 'none')
if (config.net && config.net.listen_to_network === false) {
listenToNetworkField.checked = false
}
if (config.net && config.net.listen_port !== undefined) {
listenPortField.value = config.net.listen_port
}
console.log('get config status response', config)
} catch (e) {
@ -275,7 +288,6 @@ function getCurrentRenderDeviceSelection() {
useCPUField.addEventListener('click', function() {
let gpuSettingEntry = getParameterSettingsEntry('use_gpus')
let autoPickGPUSettingEntry = getParameterSettingsEntry('auto_pick_gpus')
console.log("hello", this.checked);
if (this.checked) {
gpuSettingEntry.style.display = 'none'
autoPickGPUSettingEntry.style.display = 'none'
@ -325,14 +337,45 @@ async function getDiskPath() {
}
}
async function getDevices() {
function setDeviceInfo(devices) {
let cpu = devices.all.cpu.name
let allGPUs = Object.keys(devices.all).filter(d => d != 'cpu')
let activeGPUs = Object.keys(devices.active)
function ID_TO_TEXT(d) {
let info = devices.all[d]
if ("mem_free" in info && "mem_total" in info) {
return `${info.name} <small>(${d}) (${info.mem_free.toFixed(1)}Gb free / ${info.mem_total.toFixed(1)} Gb total)</small>`
} else {
return `${info.name} <small>(${d}) (no memory info)</small>`
}
}
allGPUs = allGPUs.map(ID_TO_TEXT)
activeGPUs = activeGPUs.map(ID_TO_TEXT)
let systemInfoEl = document.querySelector('#system-info')
systemInfoEl.querySelector('#system-info-cpu').innerText = cpu
systemInfoEl.querySelector('#system-info-gpus-all').innerHTML = allGPUs.join('</br>')
systemInfoEl.querySelector('#system-info-rendering-devices').innerHTML = activeGPUs.join('</br>')
}
function setHostInfo(hosts) {
let port = listenPortField.value
hosts = hosts.map(addr => `http://${addr}:${port}/`).map(url => `<div><a href="${url}">${url}</a></div>`)
document.querySelector('#system-info-server-hosts').innerHTML = hosts.join('')
}
async function getSystemInfo() {
try {
let res = await fetch('/get/devices')
let res = await fetch('/get/system_info')
if (res.status === 200) {
res = await res.json()
let devices = res['devices']
let hosts = res['hosts']
let allDeviceIds = Object.keys(res['all']).filter(d => d !== 'cpu')
let activeDeviceIds = Object.keys(res['active']).filter(d => d !== 'cpu')
let allDeviceIds = Object.keys(devices['all']).filter(d => d !== 'cpu')
let activeDeviceIds = Object.keys(devices['active']).filter(d => d !== 'cpu')
if (activeDeviceIds.length === 0) {
useCPUField.checked = true
@ -350,11 +393,11 @@ async function getDevices() {
useCPUField.disabled = true // no compatible GPUs, so make the CPU mandatory
}
autoPickGPUsField.checked = (res['config'] === 'auto')
autoPickGPUsField.checked = (devices['config'] === 'auto')
useGPUsField.innerHTML = ''
allDeviceIds.forEach(device => {
let deviceName = res['all'][device]['name']
let deviceName = devices['all'][device]['name']
let deviceOption = `<option value="${device}">${deviceName} (${device})</option>`
useGPUsField.insertAdjacentHTML('beforeend', deviceOption)
})
@ -365,6 +408,9 @@ async function getDevices() {
} else {
$('#use_gpus').val(activeDeviceIds)
}
setDeviceInfo(devices)
setHostInfo(hosts)
}
} catch (e) {
console.log('error fetching devices', e)
@ -372,23 +418,23 @@ async function getDevices() {
}
saveSettingsBtn.addEventListener('click', function() {
let updateBranch = (useBetaChannelField.checked ? 'beta' : 'main')
let updateBranch = (useBetaChannelField.checked ? 'beta' : 'main')
if (listenPortField.value == '') {
alert('The network port field must not be empty.')
} else if (listenPortField.value<1 || listenPortField.value>65535) {
alert('The network port must be a number from 1 to 65535')
} else {
changeAppConfig({
'render_devices': getCurrentRenderDeviceSelection(),
'update_branch': updateBranch,
'ui_open_browser_on_start': uiOpenBrowserOnStartField.checked,
'listen_to_network': listenToNetworkField.checked,
'listen_port': listenPortField.value,
'test_sd2': testSD2Field.checked
})
}
if (listenPortField.value == '') {
alert('The network port field must not be empty.')
} else if (listenPortField.value<1 || listenPortField.value>65535) {
alert('The network port must be a number from 1 to 65535')
} else {
changeAppConfig({
'render_devices': getCurrentRenderDeviceSelection(),
'update_branch': updateBranch,
'ui_open_browser_on_start': uiOpenBrowserOnStartField.checked,
'listen_to_network': listenToNetworkField.checked,
'listen_port': listenPortField.value,
'test_sd2': testSD2Field.checked
})
}
saveSettingsBtn.classList.add('active')
asyncDelay(300).then(() => saveSettingsBtn.classList.remove('active'))
saveSettingsBtn.classList.add('active')
asyncDelay(300).then(() => saveSettingsBtn.classList.remove('active'))
})

View File

@ -60,6 +60,7 @@ function themeFieldChanged() {
body.style = "";
var theme = THEMES.find(t => t.key == theme_key);
let borderColor = undefined
if (theme) {
// refresh variables incase they are back referencing
Array.from(DEFAULT_THEME.rule.style)
@ -67,7 +68,14 @@ function themeFieldChanged() {
.forEach(cssVariable => {
body.style.setProperty(cssVariable, DEFAULT_THEME.rule.style.getPropertyValue(cssVariable));
});
borderColor = theme.rule.style.getPropertyValue('--input-border-color').trim()
if (!borderColor.startsWith('#')) {
borderColor = theme.rule.style.getPropertyValue('--theme-color-fallback')
}
} else {
borderColor = DEFAULT_THEME.rule.style.getPropertyValue('--theme-color-fallback')
}
document.querySelector('meta[name="theme-color"]').setAttribute("content", borderColor)
}
themeField.addEventListener('change', themeFieldChanged);

View File

@ -1,17 +1,17 @@
// https://gomakethings.com/finding-the-next-and-previous-sibling-elements-that-match-a-selector-with-vanilla-js/
function getNextSibling(elem, selector) {
// Get the next sibling element
var sibling = elem.nextElementSibling
// Get the next sibling element
var sibling = elem.nextElementSibling
// If there's no selector, return the first sibling
if (!selector) return sibling
// If there's no selector, return the first sibling
if (!selector) return sibling
// If the sibling matches our selector, use it
// If not, jump to the next sibling and continue the loop
while (sibling) {
if (sibling.matches(selector)) return sibling
sibling = sibling.nextElementSibling
}
// If the sibling matches our selector, use it
// If not, jump to the next sibling and continue the loop
while (sibling) {
if (sibling.matches(selector)) return sibling
sibling = sibling.nextElementSibling
}
}

View File

@ -0,0 +1,8 @@
{
"name": "Stable Diffusion UI",
"display": "standalone",
"display_override": [
"window-controls-overlay"
],
"theme_color": "#000000"
}

View File

@ -0,0 +1,42 @@
(function () {
"use strict"
var styleSheet = document.createElement("style");
styleSheet.textContent = `
.auto-scroll {
float: right;
}
`;
document.head.appendChild(styleSheet);
const autoScrollControl = document.createElement('div');
autoScrollControl.innerHTML = `<input id="auto_scroll" name="auto_scroll" type="checkbox">
<label for="auto_scroll">Scroll to generated image</label>`
autoScrollControl.className = "auto-scroll"
clearAllPreviewsBtn.parentNode.insertBefore(autoScrollControl, clearAllPreviewsBtn.nextSibling)
prettifyInputs(document);
let autoScroll = document.querySelector("#auto_scroll")
SETTINGS_IDS_LIST.push("auto_scroll")
initSettings()
// observe for changes in the preview pane
var observer = new MutationObserver(function (mutations) {
mutations.forEach(function (mutation) {
if (mutation.target.className == 'img-batch') {
Autoscroll(mutation.target)
}
})
})
observer.observe(document.getElementById('preview'), {
childList: true,
subtree: true
})
function Autoscroll(target) {
if (autoScroll.checked && target !== null) {
target.parentElement.parentElement.parentElement.scrollIntoView();
}
}
})()

View File

@ -7,6 +7,7 @@ Notes:
import json
import os, re
import traceback
import queue
import torch
import numpy as np
from gc import collect as gc_collect
@ -27,6 +28,8 @@ from gfpgan import GFPGANer
from basicsr.archs.rrdbnet_arch import RRDBNet
from realesrgan import RealESRGANer
from threading import Lock
import uuid
logging.set_verbosity_error()
@ -34,7 +37,7 @@ logging.set_verbosity_error()
# consts
config_yaml = "optimizedSD/v1-inference.yaml"
filename_regex = re.compile('[^a-zA-Z0-9]')
force_gfpgan_to_cuda0 = True # workaround: gfpgan currently works only on cuda:0
gfpgan_temp_device_lock = Lock() # workaround: gfpgan currently can only start on one device at a time.
# api stuff
from sd_internal import device_manager
@ -308,12 +311,6 @@ def move_to_cpu(model):
def load_model_gfpgan():
if thread_data.gfpgan_file is None: raise ValueError(f'Thread gfpgan_file is undefined.')
# hack for a bug in facexlib: https://github.com/xinntao/facexlib/pull/19/files
from facexlib.detection import retinaface
retinaface.device = torch.device(thread_data.device)
print('forced retinaface.device to', thread_data.device)
model_path = thread_data.gfpgan_file + ".pth"
thread_data.model_gfpgan = GFPGANer(device=torch.device(thread_data.device), model_path=model_path, upscale=1, arch='clean', channel_multiplier=2, bg_upsampler=None)
print('loaded', thread_data.gfpgan_file, 'to', thread_data.model_gfpgan.device, 'precision', thread_data.precision)
@ -369,15 +366,23 @@ def apply_filters(filter_name, image_data, model_path=None):
image_data.to(thread_data.device)
if filter_name == 'gfpgan':
if model_path is not None and model_path != thread_data.gfpgan_file:
thread_data.gfpgan_file = model_path
load_model_gfpgan()
elif not thread_data.model_gfpgan:
load_model_gfpgan()
if thread_data.model_gfpgan is None: raise Exception('Model "gfpgan" not loaded.')
print('enhance with', thread_data.gfpgan_file, 'on', thread_data.model_gfpgan.device, 'precision', thread_data.precision)
_, _, output = thread_data.model_gfpgan.enhance(image_data[:,:,::-1], has_aligned=False, only_center_face=False, paste_back=True)
image_data = output[:,:,::-1]
# This lock is only ever used here. No need to use timeout for the request. Should never deadlock.
with gfpgan_temp_device_lock: # Wait for any other devices to complete before starting.
# hack for a bug in facexlib: https://github.com/xinntao/facexlib/pull/19/files
from facexlib.detection import retinaface
retinaface.device = torch.device(thread_data.device)
print('forced retinaface.device to', thread_data.device)
if model_path is not None and model_path != thread_data.gfpgan_file:
thread_data.gfpgan_file = model_path
load_model_gfpgan()
elif not thread_data.model_gfpgan:
load_model_gfpgan()
if thread_data.model_gfpgan is None: raise Exception('Model "gfpgan" not loaded.')
print('enhance with', thread_data.gfpgan_file, 'on', thread_data.model_gfpgan.device, 'precision', thread_data.precision)
_, _, output = thread_data.model_gfpgan.enhance(image_data[:,:,::-1], has_aligned=False, only_center_face=False, paste_back=True)
image_data = output[:,:,::-1]
if filter_name == 'real_esrgan':
if model_path is not None and model_path != thread_data.real_esrgan_file:
@ -392,9 +397,34 @@ def apply_filters(filter_name, image_data, model_path=None):
return image_data
def mk_img(req: Request):
def is_model_reload_necessary(req: Request):
# custom model support:
# the req.use_stable_diffusion_model needs to be a valid path
# to the ckpt file (without the extension).
if not os.path.exists(req.use_stable_diffusion_model + '.ckpt'): raise FileNotFoundError(f'Cannot find {req.use_stable_diffusion_model}.ckpt')
needs_model_reload = False
if not thread_data.model or thread_data.ckpt_file != req.use_stable_diffusion_model or thread_data.vae_file != req.use_vae_model:
thread_data.ckpt_file = req.use_stable_diffusion_model
thread_data.vae_file = req.use_vae_model
needs_model_reload = True
if thread_data.device != 'cpu':
if (thread_data.precision == 'autocast' and (req.use_full_precision or not thread_data.model_is_half)) or \
(thread_data.precision == 'full' and not req.use_full_precision and not thread_data.force_full_precision):
thread_data.precision = 'full' if req.use_full_precision else 'autocast'
needs_model_reload = True
return needs_model_reload
def reload_model():
unload_models()
unload_filters()
load_model_ckpt()
def mk_img(req: Request, data_queue: queue.Queue, task_temp_images: list, step_callback):
try:
yield from do_mk_img(req)
return do_mk_img(req, data_queue, task_temp_images, step_callback)
except Exception as e:
print(traceback.format_exc())
@ -405,12 +435,13 @@ def mk_img(req: Request):
thread_data.model.model2.to("cpu")
gc() # Release from memory.
yield json.dumps({
data_queue.put(json.dumps({
"status": 'failed',
"detail": str(e)
})
}))
raise e
def update_temp_img(req, x_samples):
def update_temp_img(req, x_samples, task_temp_images: list):
partial_images = []
for i in range(req.num_outputs):
if thread_data.test_sd2:
@ -421,19 +452,18 @@ def update_temp_img(req, x_samples):
x_sample = 255.0 * rearrange(x_sample[0].cpu().numpy(), "c h w -> h w c")
x_sample = x_sample.astype(np.uint8)
img = Image.fromarray(x_sample)
buf = BytesIO()
img.save(buf, format='JPEG')
buf.seek(0)
buf = img_to_buffer(img, output_format='JPEG')
del img, x_sample, x_sample_ddim
# don't delete x_samples, it is used in the code that called this callback
thread_data.temp_images[str(req.session_id) + '/' + str(i)] = buf
task_temp_images[i] = buf
partial_images.append({'path': f'/image/tmp/{req.session_id}/{i}'})
return partial_images
# Build and return the apropriate generator for do_mk_img
def get_image_progress_generator(req, extra_props=None):
def get_image_progress_generator(req, data_queue: queue.Queue, task_temp_images: list, step_callback, extra_props=None):
if not req.stream_progress_updates:
def empty_callback(x_samples, i): return x_samples
return empty_callback
@ -452,15 +482,17 @@ def get_image_progress_generator(req, extra_props=None):
progress.update(extra_props)
if req.stream_image_progress and i % 5 == 0:
progress['output'] = update_temp_img(req, x_samples)
progress['output'] = update_temp_img(req, x_samples, task_temp_images)
yield json.dumps(progress)
data_queue.put(json.dumps(progress))
step_callback()
if thread_data.stop_processing:
raise UserInitiatedStop("User requested that we stop processing")
return img_callback
def do_mk_img(req: Request):
def do_mk_img(req: Request, data_queue: queue.Queue, task_temp_images: list, step_callback):
thread_data.stop_processing = False
res = Response()
@ -469,28 +501,6 @@ def do_mk_img(req: Request):
thread_data.temp_images.clear()
# custom model support:
# the req.use_stable_diffusion_model needs to be a valid path
# to the ckpt file (without the extension).
if not os.path.exists(req.use_stable_diffusion_model + '.ckpt'): raise FileNotFoundError(f'Cannot find {req.use_stable_diffusion_model}.ckpt')
needs_model_reload = False
if not thread_data.model or thread_data.ckpt_file != req.use_stable_diffusion_model or thread_data.vae_file != req.use_vae_model:
thread_data.ckpt_file = req.use_stable_diffusion_model
thread_data.vae_file = req.use_vae_model
needs_model_reload = True
if thread_data.device != 'cpu':
if (thread_data.precision == 'autocast' and (req.use_full_precision or not thread_data.model_is_half)) or \
(thread_data.precision == 'full' and not req.use_full_precision and not thread_data.force_full_precision):
thread_data.precision = 'full' if req.use_full_precision else 'autocast'
needs_model_reload = True
if needs_model_reload:
unload_models()
unload_filters()
load_model_ckpt()
if thread_data.turbo != req.turbo and not thread_data.test_sd2:
thread_data.turbo = req.turbo
thread_data.model.turbo = req.turbo
@ -606,7 +616,7 @@ def do_mk_img(req: Request):
thread_data.modelFS.to(thread_data.device)
n_steps = req.num_inference_steps if req.init_image is None else t_enc
img_callback = get_image_progress_generator(req, {"total_steps": n_steps})
img_callback = get_image_progress_generator(req, data_queue, task_temp_images, step_callback, {"total_steps": n_steps})
# run the handler
try:
@ -615,13 +625,6 @@ def do_mk_img(req: Request):
x_samples = _txt2img(req.width, req.height, req.num_outputs, req.num_inference_steps, req.guidance_scale, None, opt_C, opt_f, opt_ddim_eta, c, uc, opt_seed, img_callback, mask, req.sampler)
else:
x_samples = _img2img(init_latent, t_enc, batch_size, req.guidance_scale, c, uc, req.num_inference_steps, opt_ddim_eta, opt_seed, img_callback, mask, opt_C, req.height, req.width, opt_f)
if req.stream_progress_updates:
yield from x_samples
if hasattr(thread_data, 'partial_x_samples'):
if thread_data.partial_x_samples is not None:
x_samples = thread_data.partial_x_samples
del thread_data.partial_x_samples
except UserInitiatedStop:
if not hasattr(thread_data, 'partial_x_samples'):
continue
@ -666,9 +669,11 @@ def do_mk_img(req: Request):
save_metadata(meta_out_path, req, prompts[0], opt_seed)
if return_orig_img:
img_str = img_to_base64_str(img, req.output_format)
img_buffer = img_to_buffer(img, req.output_format)
img_str = buffer_to_base64_str(img_buffer, req.output_format)
res_image_orig = ResponseImage(data=img_str, seed=opt_seed)
res.images.append(res_image_orig)
task_temp_images[i] = img_buffer
if req.save_to_disk_path is not None:
res_image_orig.path_abs = img_out_path
@ -684,9 +689,11 @@ def do_mk_img(req: Request):
filters_applied.append(req.use_upscale)
if (len(filters_applied) > 0):
filtered_image = Image.fromarray(img_data[i])
filtered_img_data = img_to_base64_str(filtered_image, req.output_format)
filtered_buffer = img_to_buffer(filtered_image, req.output_format)
filtered_img_data = buffer_to_base64_str(filtered_buffer, req.output_format)
response_image = ResponseImage(data=filtered_img_data, seed=opt_seed)
res.images.append(response_image)
task_temp_images[i] = filtered_buffer
if req.save_to_disk_path is not None:
filtered_img_out_path = get_base_path(req.save_to_disk_path, req.session_id, prompts[0], img_id, req.output_format, "_".join(filters_applied))
save_image(filtered_image, filtered_img_out_path)
@ -705,7 +712,10 @@ def do_mk_img(req: Request):
print(f'memory_final = {round(torch.cuda.memory_allocated(thread_data.device) / 1e6, 2)}Mb')
print('Task completed')
yield json.dumps(res.json())
res = res.json()
data_queue.put(json.dumps(res))
return res
def save_image(img, img_out_path):
try:
@ -771,7 +781,7 @@ def _txt2img(opt_W, opt_H, opt_n_samples, opt_ddim_steps, opt_scale, start_code,
sampler.make_schedule(ddim_num_steps=opt_ddim_steps, ddim_eta=opt_ddim_eta, verbose=False)
samples_ddim = sampler.sample(
samples_ddim, intermediates = sampler.sample(
S=opt_ddim_steps,
conditioning=c,
batch_size=opt_n_samples,
@ -804,7 +814,7 @@ def _txt2img(opt_W, opt_H, opt_n_samples, opt_ddim_steps, opt_scale, start_code,
mask=mask,
sampler = sampler_name,
)
yield from samples_ddim
return samples_ddim
def _img2img(init_latent, t_enc, batch_size, opt_scale, c, uc, opt_ddim_steps, opt_ddim_eta, opt_seed, img_callback, mask, opt_C=1, opt_H=1, opt_W=1, opt_f=1):
# encode (scaled latent)
@ -842,7 +852,7 @@ def _img2img(init_latent, t_enc, batch_size, opt_scale, c, uc, opt_ddim_steps, o
x_T=x_T,
sampler = 'ddim'
)
yield from samples_ddim
return samples_ddim
def gc():
gc_collect()
@ -910,8 +920,16 @@ def load_mask(mask_str, h0, w0, newH, newW, invert=False):
# https://stackoverflow.com/a/61114178
def img_to_base64_str(img, output_format="PNG"):
buffered = img_to_buffer(img, output_format)
return buffer_to_base64_str(buffered, output_format)
def img_to_buffer(img, output_format="PNG"):
buffered = BytesIO()
img.save(buffered, format=output_format)
buffered.seek(0)
return buffered
def buffer_to_base64_str(buffered, output_format="PNG"):
buffered.seek(0)
img_byte = buffered.getvalue()
mime_type = "image/png" if output_format.lower() == "png" else "image/jpeg"

View File

@ -283,45 +283,26 @@ def thread_render(device):
print(f'Session {task.request.session_id} starting task {id(task)} on {runtime.thread_data.device_name}')
if not task.lock.acquire(blocking=False): raise Exception('Got locked task from queue.')
try:
if runtime.thread_data.device == 'cpu' and is_alive() > 1:
# CPU is not the only device. Keep track of active time to unload resources later.
runtime.thread_data.lastActive = time.time()
# Open data generator.
res = runtime.mk_img(task.request)
if current_model_path == task.request.use_stable_diffusion_model:
current_state = ServerStates.Rendering
else:
if runtime.is_model_reload_necessary(task.request):
current_state = ServerStates.LoadingModel
# Start reading from generator.
dataQueue = None
if task.request.stream_progress_updates:
dataQueue = task.buffer_queue
for result in res:
if current_state == ServerStates.LoadingModel:
current_state = ServerStates.Rendering
current_model_path = task.request.use_stable_diffusion_model
current_vae_path = task.request.use_vae_model
runtime.reload_model()
current_model_path = task.request.use_stable_diffusion_model
current_vae_path = task.request.use_vae_model
def step_callback():
global current_state_error
if isinstance(current_state_error, SystemExit) or isinstance(current_state_error, StopAsyncIteration) or isinstance(task.error, StopAsyncIteration):
runtime.thread_data.stop_processing = True
if isinstance(current_state_error, StopAsyncIteration):
task.error = current_state_error
current_state_error = None
print(f'Session {task.request.session_id} sent cancel signal for task {id(task)}')
if dataQueue:
dataQueue.put(result)
if isinstance(result, str):
result = json.loads(result)
task.response = result
if 'output' in result:
for out_obj in result['output']:
if 'path' in out_obj:
img_id = out_obj['path'][out_obj['path'].rindex('/') + 1:]
task.temp_images[int(img_id)] = runtime.thread_data.temp_images[out_obj['path'][11:]]
elif 'data' in out_obj:
buf = runtime.base64_str_to_buffer(out_obj['data'])
task.temp_images[result['output'].index(out_obj)] = buf
# Before looping back to the generator, mark cache as still alive.
task_cache.keep(task.request.session_id, TASK_TTL)
task_cache.keep(task.request.session_id, TASK_TTL)
current_state = ServerStates.Rendering
task.response = runtime.mk_img(task.request, task.buffer_queue, task.temp_images, step_callback)
except Exception as e:
task.error = e
print(traceback.format_exc())

View File

@ -7,6 +7,7 @@ import traceback
import sys
import os
import socket
import picklescan.scanner
import rich
@ -144,12 +145,19 @@ def setConfig(config):
print(traceback.format_exc())
def resolve_model_to_use(model_name:str, model_type:str, model_dir:str, model_extensions:list, default_models=[]):
config = getConfig()
model_dirs = [os.path.join(MODELS_DIR, model_dir), SD_DIR]
if not model_name: # When None try user configured model.
config = getConfig()
# config = getConfig()
if 'model' in config and model_type in config['model']:
model_name = config['model'][model_type]
if model_name:
is_sd2 = config.get('test_sd2', False)
if model_name.startswith('sd2_') and not is_sd2: # temp hack, until SD2 is unified with 1.4
print('ERROR: Cannot use SD 2.0 models with SD 1.0 code. Using the sd-v1-4 model instead!')
model_name = 'sd-v1-4'
# Check models directory
models_dir_path = os.path.join(MODELS_DIR, model_dir, model_name)
for model_extension in model_extensions:
@ -237,9 +245,9 @@ def is_malicious_model(file_path):
return False
except Exception as e:
print('error while scanning', file_path, 'error:', e)
return False
known_models = {}
def getModels():
models = {
'active': {
@ -262,9 +270,14 @@ def getModels():
if not file.endswith(model_extension):
continue
if is_malicious_model(os.path.join(models_dir, file)):
models['scan-error'] = file
return
model_path = os.path.join(models_dir, file)
mtime = os.path.getmtime(model_path)
mod_time = known_models[model_path] if model_path in known_models else -1
if mod_time != mtime:
if is_malicious_model(model_path):
models['scan-error'] = file
return
known_models[model_path] = mtime
model_name = file[:-len(model_extension)]
models['options'][model_type].append(model_name)
@ -293,6 +306,11 @@ def getUIPlugins():
return plugins
def getIPConfig():
ips = socket.gethostbyname_ex(socket.getfqdn())
ips[2].append(ips[0])
return ips[2]
@app.get('/get/{key:path}')
def read_web_data(key:str=None):
if not key: # /get without parameters, stable-diffusion easter egg.
@ -302,11 +320,14 @@ def read_web_data(key:str=None):
if config is None:
config = APP_CONFIG_DEFAULTS
return JSONResponse(config, headers=NOCACHE_HEADERS)
elif key == 'devices':
elif key == 'system_info':
config = getConfig()
devices = task_manager.get_devices()
devices['config'] = config.get('render_devices', "auto")
return JSONResponse(devices, headers=NOCACHE_HEADERS)
system_info = {
'devices': task_manager.get_devices(),
'hosts': getIPConfig(),
}
system_info['devices']['config'] = config.get('render_devices', "auto")
return JSONResponse(system_info, headers=NOCACHE_HEADERS)
elif key == 'models':
return JSONResponse(getModels(), headers=NOCACHE_HEADERS)
elif key == 'modifiers': return FileResponse(os.path.join(SD_UI_DIR, 'modifiers.json'), headers=NOCACHE_HEADERS)
@ -442,6 +463,9 @@ class LogSuppressFilter(logging.Filter):
return True
logging.getLogger('uvicorn.access').addFilter(LogSuppressFilter())
# Check models and prepare cache for UI open
getModels()
# Start the task_manager
task_manager.default_model_to_load = resolve_ckpt_to_use()
task_manager.default_vae_to_load = resolve_vae_to_use()