Keep v2 files in the repo, for the updater

This commit is contained in:
cmdr2 2022-09-02 13:58:36 +05:30
parent cc14ac0bac
commit 472b8d0e51
11 changed files with 1656 additions and 0 deletions

View File

@ -0,0 +1 @@
installer\Scripts\activate.bat

30
scripts/on_env_start.bat Normal file
View File

@ -0,0 +1,30 @@
@echo. & echo "Stable Diffusion UI" & echo.
@cd ..
@>nul grep -c "sd_ui_git_cloned" scripts\install_status.txt
@if "%ERRORLEVEL%" EQU "0" (
@echo "Stable Diffusion UI's git repository was already installed. Updating.."
@cd sd-ui-files
@call git reset --hard
@call git pull
@cd ..
) else (
@echo. & echo "Downloading Stable Diffusion UI.." & echo.
@call git clone https://github.com/cmdr2/stable-diffusion-ui.git sd-ui-files && (
@echo sd_ui_git_cloned >> scripts\install_status.txt
) || (
@echo "Error downloading Stable Diffusion UI. Please try re-running this installer. If it doesn't work, please copy the messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB or file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues"
pause
@exit /b
)
)
@xcopy sd-ui-files\ui ui /s /i
@xcopy sd-ui-files\scripts scripts /s /i
@call scripts\on_sd_start.bat

82
scripts/on_sd_start.bat Normal file
View File

@ -0,0 +1,82 @@
@>nul grep -c "sd_git_cloned" scripts\install_status.txt
@if "%ERRORLEVEL%" EQU "0" (
@echo "Stable Diffusion's git repository was already installed. Updating.."
@cd stable-diffusion
@call git reset --hard
@call git pull
@cd ..
) else (
@echo. & echo "Downloading Stable Diffusion.." & echo.
@call git clone https://github.com/basujindal/stable-diffusion.git && (
@echo sd_git_cloned >> scripts\install_status.txt
) || (
@echo "Error downloading Stable Diffusion. Please try re-running this installer. If it doesn't work, please copy the messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB or file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues"
pause
@exit /b
)
)
@cd stable-diffusion
@>nul grep -c "conda_sd_env_created" ..\scripts\install_status.txt
@if "%ERRORLEVEL%" EQU "0" (
@echo "Packages necessary for Stable Diffusion were already installed"
) else (
@echo. & echo "Downloading packages necessary for Stable Diffusion.." & echo. & echo "***** This will take some time (depending on the speed of the Internet connection) and may appear to be stuck, but please be patient ***** .." & echo.
@rmdir /s /q .\env
@call conda env create --prefix env -f environment.yaml && (
@echo conda_sd_env_created >> ..\scripts\install_status.txt
) || (
echo "Error installing the packages necessary for Stable Diffusion. Please try re-running this installer. If it doesn't work, please copy the messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB or file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues"
pause
exit /b
)
)
@call conda activate .\env
@>nul grep -c "conda_sd_ui_deps_installed" ..\scripts\install_status.txt
@if "%ERRORLEVEL%" EQU "0" (
echo "Packages necessary for Stable Diffusion UI were already installed"
) else (
@echo. & echo "Downloading packages necessary for Stable Diffusion UI.." & echo.
@call conda install -c conda-forge -y --prefix env uvicorn fastapi && (
@echo conda_sd_ui_deps_installed >> ..\scripts\install_status.txt
) || (
echo "Error installing the packages necessary for Stable Diffusion UI. Please try re-running this installer. If it doesn't work, please copy the messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB or file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues"
pause
exit /b
)
)
@if exist "sd-v1-4.ckpt" (
echo "Data files (weights) necessary for Stable Diffusion were already downloaded"
) else (
@echo. & echo "Downloading data files (weights) for Stable Diffusion.." & echo.
@call curl https://www.googleapis.com/storage/v1/b/aai-blog-files/o/sd-v1-4.ckpt?alt=media > sd-v1-4.ckpt
@if not exist "sd-v1-4.ckpt" (
echo "Error downloading the data files (weights) for Stable Diffusion. Please try re-running this installer. If it doesn't work, please copy the messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB or file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues"
pause
exit /b
)
)
@echo sd_weights_downloaded >> ..\scripts\install_status.txt
@echo sd_install_complete >> ..\scripts\install_status.txt
@echo. & echo "Stable Diffusion is ready!" & echo.
@set SD_UI_PATH=%cd%\..\ui
@uvicorn server:app --app-dir "%SD_UI_PATH%" --port 9000 --host 0.0.0.0
@pause

View File

@ -0,0 +1,6 @@
@call conda --version
@call git --version
cd %CONDA_PREFIX%\..\scripts
on_env_start.bat

View File

@ -0,0 +1,2 @@
Set-ItemProperty -Path 'HKLM:\SYSTEM\CurrentControlSet\Control\FileSystem' -Name LongPathsEnabled -Type DWord -Value 1
pause

921
ui/index.html Normal file
View File

@ -0,0 +1,921 @@
<!DOCTYPE html>
<html>
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<style>
body {
font-family: Arial, Helvetica, sans-serif;
font-size: 11pt;
background-color: rgb(32, 33, 36);
color: #eee;
}
a {
color: rgb(0, 102, 204);
}
a:visited {
color: rgb(0, 102, 204);
}
label {
font-size: 10pt;
}
#prompt {
width: 100%;
height: 50pt;
}
@media screen and (max-width: 600px) {
#prompt {
width: 95%;
}
}
.image_preview_container {
display: none;
margin-top: 10pt;
}
.image_clear_btn {
position: absolute;
transform: translateX(-50%) translateY(-35%);
background: black;
color: white;
border: 2pt solid #ccc;
padding: 0;
cursor: pointer;
outline: inherit;
border-radius: 8pt;
width: 16pt;
height: 16pt;
font-family: Verdana;
font-size: 8pt;
}
#editor-settings-entries {
font-size: 9pt;
margin-bottom: 5px;
padding-left: 10px;
list-style-type: none;
}
#editor-settings-entries li {
padding-bottom: 3pt;
}
#guidance_scale {
transform: translateY(30%);
}
#outputMsg {
font-size: small;
}
#footer {
font-size: small;
padding-left: 10pt;
background: none;
}
#footer-legal {
font-size: 8pt;
}
.imgSeedLabel {
position: absolute;
transform: translateX(-100%);
margin-top: 5pt;
margin-left: -5pt;
font-size: 10pt;
background-color: #333;
opacity: 0.8;
color: #ddd;
border-radius: 3pt;
padding: 1pt 3pt;
}
.imgUseBtn {
position: absolute;
transform: translateX(-100%);
margin-top: 30pt;
margin-left: -5pt;
}
.imgSaveBtn {
position: absolute;
transform: translateX(-100%);
margin-top: 55pt;
margin-left: -5pt;
}
.imgItem {
display: inline;
padding-right: 10px;
}
.imgItemInfo {
opacity: 0.5;
}
#container {
width: 75%;
margin-left: auto;
margin-right: auto;
}
@media screen and (max-width: 1400px) {
#container {
width: 100%;
}
}
#meta small {
font-size: 11pt;
}
#editor {
padding: 5px;
}
#editor label {
font-weight: bold;
}
#preview {
padding: 5px;
}
#editor-inputs {
margin-bottom: 20px;
}
#editor-inputs-prompt {
flex: 1;
}
#editor-inputs .row {
padding-bottom: 10px;
}
#makeImage {
border-radius: 6px;
}
#editor-modifiers h5 {
padding: 5pt 0;
margin: 0;
}
#makeImage {
flex: 0 0 70px;
background: rgb(80, 0, 185);
border: 2px solid rgb(40, 0, 78);
color: rgb(255, 221, 255);
width: 100%;
height: 30pt;
}
#makeImage:hover {
background: rgb(93, 0, 214);
}
.flex-container {
display: flex;
}
.col-50 {
flex: 50%;
}
.col-free {
flex: 1;
}
.collapsible {
cursor: pointer;
}
.collapsible-content {
display: none;
padding-left: 15px;
}
.collapsible-content h5 {
padding: 5pt 0pt;
margin: 0;
font-size: 10pt;
}
.collapsible-handle {
color: white;
padding-right: 5px;
}
.panel-box {
background: rgb(44, 45, 48);
border: 1px solid rgb(47, 49, 53);
border-radius: 7px;
padding: 5px;
margin-bottom: 15px;
box-shadow: 0 4px 8px 0 rgba(0, 0, 0, 0.15), 0 6px 20px 0 rgba(0, 0, 0, 0.15);
}
.panel-box h4 {
margin: 0;
padding: 2px 0;
}
.prompt-modifier-tag {
border: 1px solid rgb(10, 0, 24);
border-radius: 4px;
padding: 0pt 3pt;
margin-right: 2pt;
cursor: pointer;
display: inline;
background: rgb(163, 163, 163);
color: black;
line-height: 25pt;
float: left;
font-size: 9pt;
}
.prompt-modifier-tag:hover {
background: black;
color: white;
}
#editor-modifiers-entries .prompt-modifier-tag {
background: #110f0f;
color: rgb(212, 212, 212);
margin-bottom: 4pt;
font-size: 10pt;
}
#editor-modifiers-entries .prompt-modifier-tag:hover {
background: rgb(163, 163, 163);
color: black;
}
#editor-modifiers .editor-modifiers-leaf {
padding-top: 10pt;
padding-bottom: 10pt;
}
#preview {
margin-left: 20pt;
}
img {
box-shadow: 0 4px 8px 0 rgba(0, 0, 0, 0.15), 0 6px 20px 0 rgba(0, 0, 0, 0.15);
}
.line-separator {
background: rgb(56, 56, 56);
height: 1pt;
margin: 15pt 0;
}
#editor-inputs-tags-container {
margin-top: 5pt;
display: none;
}
#server-status {
float: right;
}
#server-status-color {
width: 8pt;
height: 8pt;
border-radius: 4pt;
background-color: rgb(128, 87, 0);
/* background-color: rgb(197, 1, 1); */
float: left;
transform: translateY(15%);
}
#server-status-msg {
color: rgb(128, 87, 0);
padding-left: 2pt;
font-size: 10pt;
}
#preview-prompt {
font-size: 16pt;
margin-bottom: 10pt;
}
</style>
</html>
<body>
<div id="container">
<div class="flex-container">
<div id="editor" class="col-50">
<div id="meta">
<div id="server-status">
<div id="server-status-color">&nbsp;</div>
<span id="server-status-msg">Stable Diffusion is starting..</span>
</div>
<h1>Stable Diffusion UI <small>v2 (beta)</small></h1>
</div>
<div id="editor-inputs">
<div id="editor-inputs-prompt" class="row">
<label for="prompt">Prompt</label>
<textarea id="prompt" class="col-free">a photograph of an astronaut riding a horse</textarea>
</div>
<div id="editor-inputs-init-image" class="row">
<label for="init_image"><b>Initial Image:</b> (optional) </label> <input id="init_image" name="init_image" type="file" /> </button><br/>
<div id="init_image_preview_container" class="image_preview_container">
<img id="init_image_preview" src="" width="100" height="100" />
<button id="init_image_clear" class="image_clear_btn">X</button>
</div>
</div>
<div id="editor-inputs-tags-container" class="row">
<label>Tags: <small>(click a tag to remove it)</small></label>
<div id="editor-inputs-tags-list">
</div>
</div>
<button id="makeImage">Make Image</button>
</div>
<div class="line-separator">&nbsp;</div>
<div id="editor-settings" class="panel-box">
<h4 class="collapsible">Advanced Settings</h4>
<ul id="editor-settings-entries" class="collapsible-content">
<li><label for="seed">Seed:</label> <input id="seed" name="seed" size="10" value="30000"> <input id="random_seed" name="random_seed" type="checkbox" checked> <label for="random_seed">Random Image</label></li>
<li><label for="num_outputs_total">Number of images to make:</label> <input id="num_outputs_total" name="num_outputs_total" value="1" size="4"> <label for="num_outputs_parallel">Generate in parallel:</label> <input id="num_outputs_parallel" name="num_outputs_parallel" value="1" size="4"> (images at once)</li>
<li><label for="width">Width:</label> <select id="width" name="width" value="512"><option value="128">128</option><option value="256">256</option><option value="512" selected>512</option><option value="768">768</option><option value="1024">1024</option></select></li>
<li><label for="height">Height:</label> <select id="height" name="height" value="512"><option value="128">128</option><option value="256">256</option><option value="512" selected>512</option><option value="768">768</option></select></li>
<li><label for="num_inference_steps">Number of inference steps:</label> <input id="num_inference_steps" name="num_inference_steps" size="4" value="50"></li>
<li><label for="guidance_scale">Guidance Scale:</label> <input id="guidance_scale" name="guidance_scale" value="75" type="range" min="10" max="200"> <span id="guidance_scale_value"></span></li>
<li><span id="prompt_strength_container"><label for="prompt_strength">Prompt Strength:</label> <input id="prompt_strength" name="prompt_strength" value="8" type="range" min="0" max="10"> <span id="prompt_strength_value"></span><br/></span></li>
<li>&nbsp;</li>
<li><input id="save_to_disk" name="save_to_disk" type="checkbox"> <label for="save_to_disk">Automatically save to disk <span id="diskPath"></span></label></li>
<li><input id="sound_toggle" name="sound_toggle" type="checkbox" checked> <label for="sound_toggle">Play sound on task completion</label></li>
<li><input id="turbo" name="turbo" type="checkbox" checked> <label for="turbo">Turbo mode (generates images faster, but uses an additional 1 GB of GPU memory)</label></li>
<li><input id="use_cpu" name="use_cpu" type="checkbox"> <label for="use_cpu">Use CPU instead of GPU (warning: this will be *very* slow)</label></li>
<li><input id="use_full_precision" name="use_full_precision" type="checkbox"> <label for="use_full_precision">Use full precision (for GPU-only. warning: this will consume more VRAM)</label></li>
<!-- <li><input id="allow_nsfw" name="allow_nsfw" type="checkbox"> <label for="allow_nsfw">Allow NSFW Content (You confirm you are above 18 years of age)</label></li> -->
</ul>
</div>
<div id="editor-modifiers" class="panel-box">
<h4 class="collapsible">Image Modifiers (art styles, tags etc)</h4>
<div id="editor-modifiers-entries" class="collapsible-content">
</div>
</div>
</div>
<div id="preview" class="col-50">
<div id="preview-prompt">Type a prompt and press the "Make Image" button.<br/><br/>You can also add modifiers like "Realistic", "Pencil Sketch", "ArtStation" etc by browsing through the "Image Modifiers" section and selecting the desired modifiers.<br/><br/>Click "Advanced Settings" for additional settings like seed, image size, number of images to generate etc.<br/><br/>Enjoy! :)</div>
<div id="outputMsg"></div>
<div id="current-images" class="img-preview">
</div>
</div>
</div>
<div class="line-separator">&nbsp;</div>
<div id="footer" class="panel-box">
<p>Please feel free to <a href="https://github.com/cmdr2/stable-diffusion-ui/issues" target="_blank">file an issue</a> if you have any problems or suggestions in using this interface.</p>
<div id="footer-legal">
<p><b>Disclaimer:</b> The authors of this project are not responsible for any content generated using this interface.</p>
<p>This license of this software forbids you from sharing any content that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, <br/>spread misinformation and target vulnerable groups. For the full list of restrictions please read <a href="https://github.com/cmdr2/stable-diffusion-ui/blob/main/LICENSE" target="_blank">the license</a>.</p>
<p>By using this software, you consent to the terms and conditions of the license.</p>
</div>
</div>
</div>
</body>
<script>
const SOUND_ENABLED_KEY = "soundEnabled"
const HEALTH_PING_INTERVAL = 5 // seconds
let promptField = document.querySelector('#prompt')
let numOutputsTotalField = document.querySelector('#num_outputs_total')
let numOutputsParallelField = document.querySelector('#num_outputs_parallel')
let numInferenceStepsField = document.querySelector('#num_inference_steps')
let guidanceScaleField = document.querySelector('#guidance_scale')
let guidanceScaleValueLabel = document.querySelector('#guidance_scale_value')
let randomSeedField = document.querySelector("#random_seed")
let seedField = document.querySelector('#seed')
let widthField = document.querySelector('#width')
let heightField = document.querySelector('#height')
let initImageSelector = document.querySelector("#init_image")
let initImagePreview = document.querySelector("#init_image_preview")
// let maskImageSelector = document.querySelector("#mask")
// let maskImagePreview = document.querySelector("#mask_preview")
let turboField = document.querySelector('#turbo')
let useCPUField = document.querySelector('#use_cpu')
let useFullPrecisionField = document.querySelector('#use_full_precision')
let saveToDiskField = document.querySelector('#save_to_disk')
// let allowNSFWField = document.querySelector("#allow_nsfw")
let promptStrengthField = document.querySelector('#prompt_strength')
let promptStrengthValueLabel = document.querySelector('#prompt_strength_value')
let makeImageBtn = document.querySelector('#makeImage')
let imagesContainer = document.querySelector('#current-images')
let initImagePreviewContainer = document.querySelector('#init_image_preview_container')
let initImageClearBtn = document.querySelector('#init_image_clear')
let promptStrengthContainer = document.querySelector('#prompt_strength_container')
// let maskSetting = document.querySelector('#mask_setting')
// let maskImagePreviewContainer = document.querySelector('#mask_preview_container')
// let maskImageClearBtn = document.querySelector('#mask_clear')
let editorModifierEntries = document.querySelector('#editor-modifiers-entries')
let editorModifierTagsList = document.querySelector('#editor-inputs-tags-list')
let editorTagsContainer = document.querySelector('#editor-inputs-tags-container')
let previewPrompt = document.querySelector('#preview-prompt')
let showConfigToggle = document.querySelector('#configToggleBtn')
// let configBox = document.querySelector('#config')
let outputMsg = document.querySelector('#outputMsg')
let soundToggle = document.querySelector('#sound_toggle')
let serverStatusColor = document.querySelector('#server-status-color')
let serverStatusMsg = document.querySelector('#server-status-msg')
let serverStatus = 'offline'
let activeTags = []
function isSoundEnabled() {
if (localStorage.getItem(SOUND_ENABLED_KEY) === 'false') {
return false
}
return true
}
function setStatus(statusType, msg, msgType) {
if (statusType !== 'server') {
return;
}
if (msgType == 'error') {
// msg = '<span style="color: red">' + msg + '<span>'
serverStatusColor.style.backgroundColor = 'red'
serverStatusMsg.style.color = 'red'
serverStatusMsg.innerHTML = 'Stable Diffusion has stopped'
} else if (msgType == 'success') {
// msg = '<span style="color: green">' + msg + '<span>'
serverStatusColor.style.backgroundColor = 'green'
serverStatusMsg.style.color = 'green'
serverStatusMsg.innerHTML = 'Stable Diffusion is ready'
serverStatus = 'online'
}
}
function logError(msg, res) {
outputMsg.innerHTML = '<span style="color: red">Error: ' + msg + '</span>'
console.log('request error', res)
setStatus('request', 'error', 'error')
}
function playSound() {
const audio = new Audio('/media/ding.mp3')
audio.volume = 0.2
audio.play()
}
async function healthCheck() {
try {
let res = await fetch('/ping')
res = await res.json()
if (res[0] == 'OK') {
setStatus('server', 'online', 'success')
} else {
setStatus('server', 'offline', 'error')
}
} catch (e) {
setStatus('server', 'offline', 'error')
}
}
// makes a single image. don't call this directly, use makeImage() instead
async function doMakeImage(reqBody) {
let res = ''
let seed = reqBody['seed']
try {
res = await fetch('/image', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify(reqBody)
})
if (res.status != 200) {
if (serverStatus === 'online') {
logError('Stable Diffusion had an error: ' + await res.text() + '. This happens sometimes. Maybe modify the prompt or seed a little bit?', res)
} else {
logError("Stable Diffusion is still starting up, please wait. If this goes on beyond a few minutes, Stable Diffusion has probably crashed.", res)
}
res = undefined
} else {
res = await res.json()
if (res.status !== 'succeeded') {
let msg = ''
if (res.detail !== undefined) {
msg = res.detail
} else {
msg = res
}
logError(msg, res)
res = undefined
}
}
} catch (e) {
console.log('request error', e)
setStatus('request', 'error', 'error')
}
if (!res) {
return false
}
for (let idx in res.output) {
let imgBody = ''
try {
let imgData = res.output[idx]
imgBody = imgData.data
} catch (e) {
console.log(imgBody)
setStatus('request', 'invalid image', 'error')
continue
}
let imgItem = document.createElement('div')
imgItem.className = 'imgItem'
let img = document.createElement('img')
img.width = parseInt(reqBody.width)
img.height = parseInt(reqBody.height)
img.src = imgBody
let imgItemInfo = document.createElement('span')
imgItemInfo.className = 'imgItemInfo'
let imgSeedLabel = document.createElement('span')
imgSeedLabel.className = 'imgSeedLabel'
imgSeedLabel.innerHTML = 'Seed: ' + seed
let imgUseBtn = document.createElement('button')
imgUseBtn.className = 'imgUseBtn'
imgUseBtn.innerHTML = 'Use as Input'
let imgSaveBtn = document.createElement('button')
imgSaveBtn.className = 'imgSaveBtn'
imgSaveBtn.innerHTML = 'Download'
imgItem.appendChild(img)
imgItem.appendChild(imgItemInfo)
imgItemInfo.appendChild(imgSeedLabel)
imgItemInfo.appendChild(imgUseBtn)
imgItemInfo.appendChild(imgSaveBtn)
imagesContainer.appendChild(imgItem)
imgUseBtn.addEventListener('click', function() {
initImageSelector.value = null
initImagePreview.src = imgBody
initImagePreviewContainer.style.display = 'block'
promptStrengthContainer.style.display = 'block'
// maskSetting.style.display = 'block'
randomSeedField.checked = false
seedField.value = seed
seedField.disabled = false
})
imgSaveBtn.addEventListener('click', function() {
let imgDownload = document.createElement('a')
imgDownload.download = generateUUID() + '.png'
imgDownload.href = imgBody
imgDownload.click()
})
imgItem.addEventListener('mouseenter', function() {
imgItemInfo.style.opacity = 1
})
imgItem.addEventListener('mouseleave', function() {
imgItemInfo.style.opacity = 0.5
})
}
return true
}
async function makeImage() {
if (serverStatus !== 'online') {
logError('The server is still starting up..')
return
}
setStatus('request', 'fetching..')
makeImageBtn.innerHTML = 'Processing..'
makeImageBtn.disabled = true
outputMsg.innerHTML = 'Fetching..'
const imageRegex = new RegExp('data:image/[A-Za-z]+;base64')
let seed = (randomSeedField.checked ? Math.floor(Math.random() * 10000) : parseInt(seedField.value))
let numOutputsTotal = parseInt(numOutputsTotalField.value)
let numOutputsParallel = parseInt(numOutputsParallelField.value)
let batchCount = Math.ceil(numOutputsTotal / numOutputsParallel)
let batchSize = numOutputsParallel
let prompt = promptField.value
if (activeTags.length > 0) {
let promptTags = activeTags.join(", ")
prompt += ", " + promptTags
}
previewPrompt.innerHTML = prompt
let reqBody = {
prompt: prompt,
num_outputs: batchSize,
num_inference_steps: numInferenceStepsField.value,
guidance_scale: parseInt(guidanceScaleField.value) / 10,
width: widthField.value,
height: heightField.value,
// allow_nsfw: allowNSFWField.checked,
save_to_disk: saveToDiskField.checked,
turbo: turboField.checked,
use_cpu: useCPUField.checked,
use_full_precision: useFullPrecisionField.checked
}
if (imageRegex.test(initImagePreview.src)) {
reqBody['init_image'] = initImagePreview.src
reqBody['prompt_strength'] = parseInt(promptStrengthField.value) / 10
// if (imageRegex.test(maskImagePreview.src)) {
// reqBody['mask'] = maskImagePreview.src
// }
}
let time = new Date().getTime()
imagesContainer.innerHTML = ''
let successCount = 0
for (let i = 0; i < batchCount; i++) {
reqBody['seed'] = seed + i
let success = await doMakeImage(reqBody)
if (success) {
outputMsg.innerHTML = 'Processed batch ' + (i+1) + '/' + batchCount
successCount++
}
}
makeImageBtn.innerHTML = 'Make Image'
makeImageBtn.disabled = false
if (isSoundEnabled()) {
playSound()
}
time = new Date().getTime() - time
time /= 1000
if (successCount === batchCount) {
outputMsg.innerHTML = 'Processed ' + numOutputsTotal + ' images in ' + time + ' seconds'
setStatus('request', 'done', 'success')
}
if (randomSeedField.checked) {
seedField.value = seed
}
}
function generateUUID() { // Public Domain/MIT
var d = new Date().getTime();//Timestamp
var d2 = ((typeof performance !== 'undefined') && performance.now && (performance.now()*1000)) || 0;//Time in microseconds since page-load or 0 if unsupported
return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function(c) {
var r = Math.random() * 16;//random number between 0 and 16
if(d > 0){//Use timestamp until depleted
r = (d + r)%16 | 0;
d = Math.floor(d/16);
} else {//Use microseconds since page-load if supported
r = (d2 + r)%16 | 0;
d2 = Math.floor(d2/16);
}
return (c === 'x' ? r : (r & 0x3 | 0x8)).toString(16);
});
}
function handleAudioEnabledChange(e) {
localStorage.setItem(SOUND_ENABLED_KEY, e.target.checked.toString())
}
soundToggle.addEventListener('click', handleAudioEnabledChange)
soundToggle.checked = isSoundEnabled();
makeImageBtn.addEventListener('click', makeImage)
// configBox.style.display = 'none'
// showConfigToggle.addEventListener('click', function() {
// configBox.style.display = (configBox.style.display === 'none' ? 'block' : 'none')
// showConfigToggle.innerHTML = (configBox.style.display === 'none' ? 'show' : 'hide')
// return false
// })
function updateGuidanceScale() {
guidanceScaleValueLabel.innerHTML = guidanceScaleField.value / 10
}
guidanceScaleField.addEventListener('input', updateGuidanceScale)
updateGuidanceScale()
function updatePromptStrength() {
promptStrengthValueLabel.innerHTML = promptStrengthField.value / 10
}
promptStrengthField.addEventListener('input', updatePromptStrength)
updatePromptStrength()
function checkRandomSeed() {
if (randomSeedField.checked) {
seedField.disabled = true
seedField.value = "random"
} else {
seedField.disabled = false
}
}
randomSeedField.addEventListener('input', checkRandomSeed)
checkRandomSeed()
function showInitImagePreview() {
if (initImageSelector.files.length === 0) {
initImagePreviewContainer.style.display = 'none'
promptStrengthContainer.style.display = 'none'
// maskSetting.style.display = 'none'
return
}
let reader = new FileReader()
let file = initImageSelector.files[0]
reader.addEventListener('load', function() {
// console.log(file.name, reader.result)
initImagePreview.src = reader.result
initImagePreviewContainer.style.display = 'block'
promptStrengthContainer.style.display = 'block'
// maskSetting.style.display = 'block'
})
if (file) {
reader.readAsDataURL(file)
}
}
initImageSelector.addEventListener('change', showInitImagePreview)
showInitImagePreview()
initImageClearBtn.addEventListener('click', function() {
initImageSelector.value = null
// maskImageSelector.value = null
initImagePreview.src = ''
// maskImagePreview.src = ''
initImagePreviewContainer.style.display = 'none'
// maskImagePreviewContainer.style.display = 'none'
// maskSetting.style.display = 'none'
promptStrengthContainer.style.display = 'none'
})
// function showMaskImagePreview() {
// if (maskImageSelector.files.length === 0) {
// maskImagePreviewContainer.style.display = 'none'
// return
// }
// let reader = new FileReader()
// let file = maskImageSelector.files[0]
// reader.addEventListener('load', function() {
// maskImagePreview.src = reader.result
// maskImagePreviewContainer.style.display = 'block'
// })
// if (file) {
// reader.readAsDataURL(file)
// }
// }
// maskImageSelector.addEventListener('change', showMaskImagePreview)
// showMaskImagePreview()
// maskImageClearBtn.addEventListener('click', function() {
// maskImageSelector.value = null
// maskImagePreview.src = ''
// maskImagePreviewContainer.style.display = 'none'
// })
</script>
<script>
function createCollapsibles(node) {
if (!node) {
node = document
}
let collapsibles = node.querySelectorAll(".collapsible")
collapsibles.forEach(function(c) {
let handle = document.createElement('span')
handle.className = 'collapsible-handle'
handle.innerHTML = '&#x2795;'
c.insertBefore(handle, c.firstChild)
c.addEventListener('click', function() {
this.classList.toggle("active")
let content = this.nextElementSibling
if (content.style.display === "block") {
content.style.display = "none"
handle.innerHTML = '&#x2795;' // plus
} else {
content.style.display = "block"
handle.innerHTML = '&#x2796;' // minus
}
})
})
}
createCollapsibles()
function refreshTagsList() {
editorModifierTagsList.innerHTML = ''
if (activeTags.length == 0) {
editorTagsContainer.style.display = 'none'
return
} else {
editorTagsContainer.style.display = 'block'
}
activeTags.forEach(function(tag) {
let el = document.createElement('div')
el.className = 'prompt-modifier-tag'
el.innerHTML = tag
editorModifierTagsList.appendChild(el)
el.addEventListener('click', function() {
let idx = activeTags.indexOf(tag)
if (idx !== -1) {
activeTags.splice(idx, 1)
refreshTagsList()
}
})
})
let brk = document.createElement('br')
brk.style.clear = 'both'
editorModifierTagsList.appendChild(brk)
}
async function getDiskPath() {
try {
let res = await fetch('/output_dir')
if (res.status === 200) {
res = await res.json()
res = res[0]
document.querySelector('#diskPath').innerHTML = '(to ' + res + ')'
}
} catch (e) {
console.log('error fetching output dir path', e)
}
}
async function loadModifiers() {
try {
let res = await fetch('/modifiers.json')
if (res.status === 200) {
res = await res.json()
res.forEach(function(m) {
let title = m[0]
let modifiers = m[1]
let titleEl = document.createElement('h5')
titleEl.className = 'collapsible'
titleEl.innerHTML = title
let modifiersEl = document.createElement('div')
modifiersEl.classList.add('collapsible-content', 'editor-modifiers-leaf')
modifiers.forEach(function(modifier) {
let tagEl = document.createElement('div')
tagEl.className = 'prompt-modifier-tag'
tagEl.innerHTML = modifier
modifiersEl.appendChild(tagEl)
tagEl.addEventListener('click', function() {
if (activeTags.includes(modifier)) {
return
}
activeTags.push(modifier)
refreshTagsList()
})
})
let brk = document.createElement('br')
brk.style.clear = 'both'
modifiersEl.appendChild(brk)
let e = document.createElement('div')
e.appendChild(titleEl)
e.appendChild(modifiersEl)
editorModifierEntries.appendChild(e)
})
createCollapsibles(editorModifierEntries)
}
} catch (e) {
console.log('error fetching modifiers', e)
}
}
async function init() {
await loadModifiers()
await getDiskPath()
setInterval(healthCheck, HEALTH_PING_INTERVAL * 1000)
healthCheck()
}
init()
</script>
</html>

BIN
ui/media/ding.mp3 Normal file

Binary file not shown.

92
ui/modifiers.json Normal file
View File

@ -0,0 +1,92 @@
[
["Drawing Style", [
"Sketch",
"Doodle",
"Children's Drawing",
"Line Art",
"Dot Art",
"Crosshatch",
"Detailed and Intricate",
"Cel Shading"
]],
["Visual Style", [
"2D",
"Cartoon",
"8-bit",
"16-bit",
"Graphic Novel",
"Visual Novel",
"Street Art",
"Fantasy",
"Realistic",
"Photo",
"Hard Edge Painting",
"Mural",
"Mosaic",
"Hydrodipped",
"Modern Art",
"Concept Art",
"Digital Art",
"CGI",
"Anaglyph",
"Comic Book",
"Lithography"
]],
["Pen", [
"Graphite",
"Colored Pencil",
"Ink",
"Chalk",
"Pastel Art",
"Oil Paint"
]],
["Carving and Etching", [
"Etching",
"Wood-Carving",
"Papercutting",
"Paper-Mache",
"Paper Model",
"Linocut",
"Pyrography"
]],
["Camera", [
"HD",
"Color Grading",
"Film Grain",
"White Balance",
"Golden Hour",
"Glamor Shot",
"War Photography",
"Lens Flare",
"Polaroid",
"Vintage"
]],
["Color", [
"Colorful",
"Electric Colors",
"Warm Color Palette",
"Infrared",
"Beautiful Lighting"
]],
["Emotions", [
"Happy",
"Excited",
"Sad",
"Lonely",
"Angry",
"Good",
"Evil"
]],
["Style of an artist or community", [
"by Andy Warhol",
"Artstation",
"by Asaf Hanuka",
"by Aubrey Beardsley",
"by H.R. Giger",
"by Hayao Mizaki",
"by Salvador Dali",
"by Tivadar Csontváry Kosztka",
"by Lisa Frank",
"by Pablo Piccaso"
]]
]

View File

@ -0,0 +1,62 @@
import json
class Request:
prompt: str = ""
init_image: str = None # base64
mask: str = None # base64
num_outputs: int = 1
num_inference_steps: int = 50
guidance_scale: float = 7.5
width: int = 512
height: int = 512
seed: int = 42
prompt_strength: float = 0.8
# allow_nsfw: bool = False
precision: str = "autocast" # or "full"
save_to_disk_path: str = None
turbo: bool = True
use_cpu: bool = False
use_full_precision: bool = False
def to_string(self):
return f'''
prompt: {self.prompt}
seed: {self.seed}
num_inference_steps: {self.num_inference_steps}
guidance_scale: {self.guidance_scale}
w: {self.width}
h: {self.height}
precision: {self.precision}
save_to_disk_path: {self.save_to_disk_path}
turbo: {self.turbo}
use_cpu: {self.use_cpu}
use_full_precision: {self.use_full_precision}'''
class Image:
data: str # base64
seed: int
is_nsfw: bool
def __init__(self, data, seed):
self.data = data
self.seed = seed
def json(self):
return {
"data": self.data,
"seed": self.seed,
}
class Response:
images: list
def json(self):
res = {
"status": 'succeeded',
"output": [],
}
for image in self.images:
res["output"].append(image.json())
return res

342
ui/sd_internal/runtime.py Normal file
View File

@ -0,0 +1,342 @@
import os, re
import torch
import numpy as np
from omegaconf import OmegaConf
from PIL import Image
from tqdm import tqdm, trange
from itertools import islice
from einops import rearrange
import time
from pytorch_lightning import seed_everything
from torch import autocast
from contextlib import nullcontext
from einops import rearrange, repeat
from ldm.util import instantiate_from_config
from optimizedSD.optimUtils import split_weighted_subprompts
from transformers import logging
logging.set_verbosity_error()
# consts
config_yaml = "optimizedSD/v1-inference.yaml"
# api stuff
from . import Request, Response, Image as ResponseImage
import base64
from io import BytesIO
# local
ckpt = None
model = None
modelCS = None
modelFS = None
model_is_half = False
model_fs_is_half = False
device = None
unet_bs = 1
precision = 'autocast'
sampler_plms = None
sampler_ddim = None
# api
def load_model(ckpt_to_use, device_to_use='cuda', turbo=False, unet_bs_to_use=1, precision_to_use='autocast', half_model_fs=False):
global ckpt, model, modelCS, modelFS, model_is_half, device, unet_bs, precision, model_fs_is_half
ckpt = ckpt_to_use
device = device_to_use
precision = precision_to_use
unet_bs = unet_bs_to_use
sd = load_model_from_config(f"{ckpt}")
li, lo = [], []
for key, value in sd.items():
sp = key.split(".")
if (sp[0]) == "model":
if "input_blocks" in sp:
li.append(key)
elif "middle_block" in sp:
li.append(key)
elif "time_embed" in sp:
li.append(key)
else:
lo.append(key)
for key in li:
sd["model1." + key[6:]] = sd.pop(key)
for key in lo:
sd["model2." + key[6:]] = sd.pop(key)
config = OmegaConf.load(f"{config_yaml}")
model = instantiate_from_config(config.modelUNet)
_, _ = model.load_state_dict(sd, strict=False)
model.eval()
model.cdevice = device
model.unet_bs = unet_bs
model.turbo = turbo
modelCS = instantiate_from_config(config.modelCondStage)
_, _ = modelCS.load_state_dict(sd, strict=False)
modelCS.eval()
modelCS.cond_stage_model.device = device
modelFS = instantiate_from_config(config.modelFirstStage)
_, _ = modelFS.load_state_dict(sd, strict=False)
modelFS.eval()
del sd
if device != "cpu" and precision == "autocast":
model.half()
modelCS.half()
model_is_half = True
else:
model_is_half = False
if half_model_fs:
modelFS.half()
model_fs_is_half = True
else:
model_fs_is_half = False
def mk_img(req: Request):
global modelFS, device
res = Response()
res.images = []
model.turbo = req.turbo
if req.use_cpu:
device = 'cpu'
if model_is_half:
print('reloading model for cpu')
load_model(ckpt, device)
else:
device = 'cuda'
if (precision == 'autocast' and (req.use_full_precision or not model_is_half)) or \
(precision == 'full' and not req.use_full_precision) or \
(req.init_image is None and model_fs_is_half) or \
(req.init_image is not None and not model_fs_is_half):
print('reloading model for cuda')
load_model(ckpt, device, model.turbo, unet_bs, ('full' if req.use_full_precision else 'autocast'), half_model_fs=(req.init_image is not None and not req.use_full_precision))
model.cdevice = device
modelCS.cond_stage_model.device = device
opt_prompt = req.prompt
opt_seed = req.seed
opt_n_samples = req.num_outputs
opt_n_iter = 1
opt_scale = req.guidance_scale
opt_C = 4
opt_H = req.height
opt_W = req.width
opt_f = 8
opt_ddim_steps = req.num_inference_steps
opt_ddim_eta = 0.0
opt_strength = req.prompt_strength
opt_save_to_disk_path = req.save_to_disk_path
opt_init_img = req.init_image
opt_format = 'png'
print(req.to_string(), 'device', device)
seed_everything(opt_seed)
batch_size = opt_n_samples
prompt = opt_prompt
assert prompt is not None
data = [batch_size * [prompt]]
if precision == "autocast" and device != "cpu":
precision_scope = autocast
else:
precision_scope = nullcontext
if req.init_image is None:
handler = _txt2img
init_latent = None
t_enc = None
else:
handler = _img2img
init_image = load_img(req.init_image)
init_image = init_image.to(device)
if device != "cpu" and precision == "autocast":
init_image = init_image.half()
modelFS.to(device)
init_image = repeat(init_image, '1 ... -> b ...', b=batch_size)
init_latent = modelFS.get_first_stage_encoding(modelFS.encode_first_stage(init_image)) # move to latent space
if device != "cpu":
mem = torch.cuda.memory_allocated() / 1e6
modelFS.to("cpu")
while torch.cuda.memory_allocated() / 1e6 >= mem:
time.sleep(1)
assert 0. <= opt_strength <= 1., 'can only work with strength in [0.0, 1.0]'
t_enc = int(opt_strength * opt_ddim_steps)
print(f"target t_enc is {t_enc} steps")
seeds = ""
with torch.no_grad():
for n in trange(opt_n_iter, desc="Sampling"):
for prompts in tqdm(data, desc="data"):
if opt_save_to_disk_path is not None:
sample_path = os.path.join(opt_save_to_disk_path, "_".join(re.split(":| ", prompts[0])))[:150]
os.makedirs(sample_path, exist_ok=True)
base_count = len(os.listdir(sample_path))
with precision_scope("cuda"):
modelCS.to(device)
uc = None
if opt_scale != 1.0:
uc = modelCS.get_learned_conditioning(batch_size * [""])
if isinstance(prompts, tuple):
prompts = list(prompts)
subprompts, weights = split_weighted_subprompts(prompts[0])
if len(subprompts) > 1:
c = torch.zeros_like(uc)
totalWeight = sum(weights)
# normalize each "sub prompt" and add it
for i in range(len(subprompts)):
weight = weights[i]
# if not skip_normalize:
weight = weight / totalWeight
c = torch.add(c, modelCS.get_learned_conditioning(subprompts[i]), alpha=weight)
else:
c = modelCS.get_learned_conditioning(prompts)
# run the handler
if handler == _txt2img:
x_samples = _txt2img(opt_W, opt_H, opt_n_samples, opt_ddim_steps, opt_scale, None, opt_C, opt_f, opt_ddim_eta, c, uc, opt_seed)
else:
x_samples = _img2img(init_latent, t_enc, batch_size, opt_scale, c, uc, opt_ddim_steps, opt_ddim_eta, opt_seed)
modelFS.to(device)
print("saving images")
for i in range(batch_size):
x_samples_ddim = modelFS.decode_first_stage(x_samples[i].unsqueeze(0))
x_sample = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
x_sample = 255.0 * rearrange(x_sample[0].cpu().numpy(), "c h w -> h w c")
img = Image.fromarray(x_sample.astype(np.uint8))
img_data = img_to_base64_str(img)
res.images.append(ResponseImage(data=img_data, seed=opt_seed))
if opt_save_to_disk_path is not None:
img.save(
os.path.join(sample_path, "seed_" + str(opt_seed) + "_" + f"{base_count:05}.{opt_format}")
)
base_count += 1
seeds += str(opt_seed) + ","
opt_seed += 1
if device != "cpu":
mem = torch.cuda.memory_allocated() / 1e6
modelFS.to("cpu")
while torch.cuda.memory_allocated() / 1e6 >= mem:
time.sleep(1)
del x_samples
print("memory_final = ", torch.cuda.memory_allocated() / 1e6)
return res
def _txt2img(opt_W, opt_H, opt_n_samples, opt_ddim_steps, opt_scale, start_code, opt_C, opt_f, opt_ddim_eta, c, uc, opt_seed):
shape = [opt_n_samples, opt_C, opt_H // opt_f, opt_W // opt_f]
if device != "cpu":
mem = torch.cuda.memory_allocated() / 1e6
modelCS.to("cpu")
while torch.cuda.memory_allocated() / 1e6 >= mem:
time.sleep(1)
samples_ddim = model.sample(
S=opt_ddim_steps,
conditioning=c,
seed=opt_seed,
shape=shape,
verbose=False,
unconditional_guidance_scale=opt_scale,
unconditional_conditioning=uc,
eta=opt_ddim_eta,
x_T=start_code,
sampler = 'plms',
)
return samples_ddim
def _img2img(init_latent, t_enc, batch_size, opt_scale, c, uc, opt_ddim_steps, opt_ddim_eta, opt_seed):
# encode (scaled latent)
z_enc = model.stochastic_encode(
init_latent,
torch.tensor([t_enc] * batch_size).to(device),
opt_seed,
opt_ddim_eta,
opt_ddim_steps,
)
# decode it
samples_ddim = model.sample(
t_enc,
c,
z_enc,
unconditional_guidance_scale=opt_scale,
unconditional_conditioning=uc,
sampler = 'ddim'
)
return samples_ddim
# internal
def chunk(it, size):
it = iter(it)
return iter(lambda: tuple(islice(it, size)), ())
def load_model_from_config(ckpt, verbose=False):
print(f"Loading model from {ckpt}")
pl_sd = torch.load(ckpt, map_location="cpu")
if "global_step" in pl_sd:
print(f"Global Step: {pl_sd['global_step']}")
sd = pl_sd["state_dict"]
return sd
# utils
def load_img(img_str):
image = base64_str_to_img(img_str).convert("RGB")
w, h = image.size
print(f"loaded input image of size ({w}, {h}) from base64")
w, h = map(lambda x: x - x % 64, (w, h)) # resize to integer multiple of 64
image = image.resize((w, h), resample=Image.LANCZOS)
image = np.array(image).astype(np.float32) / 255.0
image = image[None].transpose(0, 3, 1, 2)
image = torch.from_numpy(image)
return 2.*image - 1.
# https://stackoverflow.com/a/61114178
def img_to_base64_str(img):
buffered = BytesIO()
img.save(buffered, format="PNG")
buffered.seek(0)
img_byte = buffered.getvalue()
img_str = "data:image/png;base64," + base64.b64encode(img_byte).decode()
return img_str
def base64_str_to_img(img_str):
img_str = img_str[len("data:image/png;base64,"):]
data = base64.b64decode(img_str)
buffered = BytesIO(data)
img = Image.open(buffered)
return img

118
ui/server.py Normal file
View File

@ -0,0 +1,118 @@
import traceback
import sys
import os
SCRIPT_DIR = os.getcwd()
print('started in ', SCRIPT_DIR)
SD_UI_DIR = os.getenv('SD_UI_PATH', None)
sys.path.append(os.path.dirname(SD_UI_DIR))
OUTPUT_DIRNAME = "Stable Diffusion UI" # in the user's home folder
from fastapi import FastAPI, HTTPException
from starlette.responses import FileResponse
from pydantic import BaseModel
from sd_internal import Request, Response
app = FastAPI()
model_loaded = False
model_is_loading = False
modifiers_cache = None
outpath = os.path.join(os.path.expanduser("~"), OUTPUT_DIRNAME)
# defaults from https://huggingface.co/blog/stable_diffusion
class ImageRequest(BaseModel):
prompt: str = ""
init_image: str = None # base64
mask: str = None # base64
num_outputs: int = 1
num_inference_steps: int = 50
guidance_scale: float = 7.5
width: int = 512
height: int = 512
seed: int = 42
prompt_strength: float = 0.8
# allow_nsfw: bool = False
save_to_disk: bool = False
turbo: bool = True
use_cpu: bool = False
use_full_precision: bool = False
@app.get('/')
def read_root():
return FileResponse(os.path.join(SD_UI_DIR, 'index.html'))
@app.get('/ping')
async def ping():
global model_loaded, model_is_loading
try:
if model_loaded:
return {'OK'}
if model_is_loading:
return {'ERROR'}
model_is_loading = True
from sd_internal import runtime
runtime.load_model(ckpt_to_use="sd-v1-4.ckpt")
model_loaded = True
model_is_loading = False
return {'OK'}
except Exception as e:
print(traceback.format_exc())
return HTTPException(status_code=500, detail=str(e))
@app.post('/image')
async def image(req : ImageRequest):
from sd_internal import runtime
r = Request()
r.prompt = req.prompt
r.init_image = req.init_image
r.mask = req.mask
r.num_outputs = req.num_outputs
r.num_inference_steps = req.num_inference_steps
r.guidance_scale = req.guidance_scale
r.width = req.width
r.height = req.height
r.seed = req.seed
r.prompt_strength = req.prompt_strength
# r.allow_nsfw = req.allow_nsfw
r.turbo = req.turbo
r.use_cpu = req.use_cpu
r.use_full_precision = req.use_full_precision
if req.save_to_disk:
r.save_to_disk_path = outpath
try:
res: Response = runtime.mk_img(r)
return res.json()
except Exception as e:
print(traceback.format_exc())
return HTTPException(status_code=500, detail=str(e))
@app.get('/media/ding.mp3')
def read_ding():
return FileResponse(os.path.join(SD_UI_DIR, 'media/ding.mp3'))
@app.get('/modifiers.json')
def read_modifiers():
return FileResponse(os.path.join(SD_UI_DIR, 'modifiers.json'))
@app.get('/output_dir')
def read_home_dir():
return {outpath}
# start the browser ui
import webbrowser; webbrowser.open('http://localhost:9000')