UI-side changes for selecting multiple GPUs, and keeping the Use CPU option synchronized with the backend. This change isn't ready to be shipped, it still needs python-side changes to support the req.render_device config

This commit is contained in:
cmdr2 2022-11-09 19:17:44 +05:30
parent 087c10d52d
commit 058ce6fe82
6 changed files with 109 additions and 15 deletions

View File

@ -289,13 +289,13 @@
</div> </div>
</body> </body>
<script src="media/js/parameters.js?v=3"></script> <script src="media/js/parameters.js?v=4"></script>
<script src="media/js/plugins.js?v=1"></script> <script src="media/js/plugins.js?v=1"></script>
<script src="media/js/utils.js?v=6"></script> <script src="media/js/utils.js?v=6"></script>
<script src="media/js/inpainting-editor.js?v=1"></script> <script src="media/js/inpainting-editor.js?v=1"></script>
<script src="media/js/image-modifiers.js?v=6"></script> <script src="media/js/image-modifiers.js?v=6"></script>
<script src="media/js/auto-save.js?v=6"></script> <script src="media/js/auto-save.js?v=7"></script>
<script src="media/js/main.js?v=9"></script> <script src="media/js/main.js?v=10"></script>
<script src="media/js/themes.js?v=4"></script> <script src="media/js/themes.js?v=4"></script>
<script src="media/js/dnd.js?v=8"></script> <script src="media/js/dnd.js?v=8"></script>
<script> <script>
@ -306,6 +306,7 @@ async function init() {
await getAppConfig() await getAppConfig()
await loadModifiers() await loadModifiers()
await loadUIPlugins() await loadUIPlugins()
await getDevices()
setInterval(healthCheck, HEALTH_PING_INTERVAL * 1000) setInterval(healthCheck, HEALTH_PING_INTERVAL * 1000)
healthCheck() healthCheck()

View File

@ -34,7 +34,6 @@ const SETTINGS_IDS_LIST = [
"diskPath", "diskPath",
"sound_toggle", "sound_toggle",
"turbo", "turbo",
"use_cpu",
"use_full_precision", "use_full_precision",
"auto_save_settings" "auto_save_settings"
] ]

View File

@ -1,6 +1,7 @@
"use strict" // Opt in to a restricted variant of JavaScript "use strict" // Opt in to a restricted variant of JavaScript
const HEALTH_PING_INTERVAL = 5 // seconds const HEALTH_PING_INTERVAL = 5 // seconds
const MAX_INIT_IMAGE_DIMENSION = 768 const MAX_INIT_IMAGE_DIMENSION = 768
const MIN_GPUS_TO_SHOW_SELECTION = 1
const IMAGE_REGEX = new RegExp('data:image/[A-Za-z]+;base64') const IMAGE_REGEX = new RegExp('data:image/[A-Za-z]+;base64')
@ -26,6 +27,7 @@ let maskImageSelector = document.querySelector("#mask")
let maskImagePreview = document.querySelector("#mask_preview") let maskImagePreview = document.querySelector("#mask_preview")
let turboField = document.querySelector('#turbo') let turboField = document.querySelector('#turbo')
let useCPUField = document.querySelector('#use_cpu') let useCPUField = document.querySelector('#use_cpu')
let useGPUsField = document.querySelector('#use_gpus')
let useFullPrecisionField = document.querySelector('#use_full_precision') let useFullPrecisionField = document.querySelector('#use_full_precision')
let saveToDiskField = document.querySelector('#save_to_disk') let saveToDiskField = document.querySelector('#save_to_disk')
let diskPathField = document.querySelector('#diskPath') let diskPathField = document.querySelector('#diskPath')
@ -771,7 +773,7 @@ function getCurrentUserRequest() {
height: heightField.value, height: heightField.value,
// allow_nsfw: allowNSFWField.checked, // allow_nsfw: allowNSFWField.checked,
turbo: turboField.checked, turbo: turboField.checked,
use_cpu: useCPUField.checked, render_device: getCurrentRenderDeviceSelection(),
use_full_precision: useFullPrecisionField.checked, use_full_precision: useFullPrecisionField.checked,
use_stable_diffusion_model: stableDiffusionModelField.value, use_stable_diffusion_model: stableDiffusionModelField.value,
use_vae_model: vaeModelField.value, use_vae_model: vaeModelField.value,
@ -807,6 +809,14 @@ function getCurrentUserRequest() {
return newTask return newTask
} }
function getCurrentRenderDeviceSelection() {
if (useCPUField.checked) {
return 'cpu'
}
return $(useGPUsField).val().join(',')
}
function makeImage() { function makeImage() {
if (!isServerAvailable()) { if (!isServerAvailable()) {
alert('The server is not available.') alert('The server is not available.')
@ -1113,6 +1123,15 @@ promptStrengthSlider.addEventListener('input', updatePromptStrength)
promptStrengthField.addEventListener('input', updatePromptStrengthSlider) promptStrengthField.addEventListener('input', updatePromptStrengthSlider)
updatePromptStrength() updatePromptStrength()
useCPUField.addEventListener('click', function() {
let gpuSettingEntry = getParameterSettingsEntry('use_gpus')
if (this.checked) {
gpuSettingEntry.style.display = 'none'
} else if ($(useGPUsField).val().length >= MIN_GPUS_TO_SHOW_SELECTION) {
gpuSettingEntry.style.display = ''
}
})
async function changeAppConfig(configDelta) { async function changeAppConfig(configDelta) {
// if (!isServerAvailable()) { // if (!isServerAvailable()) {
// // logError('The server is still starting up..') // // logError('The server is still starting up..')
@ -1314,6 +1333,43 @@ async function getDiskPath() {
} }
} }
async function getDevices() {
try {
let res = await fetch('/get/devices')
if (res.status === 200) {
res = await res.json()
let allDeviceIds = Object.keys(res['all']).filter(d => d !== 'cpu')
let activeDeviceIds = Object.keys(res['active']).filter(d => d !== 'cpu')
if (activeDeviceIds.length === 0) {
useCPUField.checked = true
}
if (allDeviceIds.length < MIN_GPUS_TO_SHOW_SELECTION) {
let gpuSettingEntry = getParameterSettingsEntry('use_gpus')
gpuSettingEntry.style.display = 'none'
if (allDeviceIds.length === 0) {
useCPUField.checked = true
useCPUField.disabled = true // no compatible GPUs, so make the CPU mandatory
}
}
useGPUsField.innerHTML = ''
allDeviceIds.forEach(device => {
let deviceName = res['all'][device]
let selected = (activeDeviceIds.includes(device) ? 'selected' : '')
let deviceOption = `<option value="${device}" ${selected}>${deviceName}</option>`
useGPUsField.insertAdjacentHTML('beforeend', deviceOption)
})
}
} catch (e) {
console.log('error fetching devices', e)
}
}
/* setup popup handlers */ /* setup popup handlers */
document.querySelectorAll('.popup').forEach(popup => { document.querySelectorAll('.popup').forEach(popup => {

View File

@ -8,6 +8,7 @@
var ParameterType = { var ParameterType = {
checkbox: "checkbox", checkbox: "checkbox",
select: "select", select: "select",
select_multiple: "select_multiple",
custom: "custom", custom: "custom",
}; };
@ -72,6 +73,13 @@ var PARAMETERS = [
note: "warning: this will be *very* slow", note: "warning: this will be *very* slow",
default: false, default: false,
}, },
{
id: "use_gpus",
type: ParameterType.select_multiple,
label: "GPUs to use",
note: "select multiple GPUs to process in parallel",
default: false,
},
{ {
id: "use_full_precision", id: "use_full_precision",
type: ParameterType.checkbox, type: ParameterType.checkbox,
@ -95,6 +103,13 @@ var PARAMETERS = [
}, },
]; ];
function getParameterSettingsEntry(id) {
let parameter = PARAMETERS.filter(p => p.id === id)
if (parameter.length === 0) {
return
}
return parameter[0].settingsEntry
}
function getParameterElement(parameter) { function getParameterElement(parameter) {
switch (parameter.type) { switch (parameter.type) {
@ -102,8 +117,10 @@ function getParameterElement(parameter) {
var is_checked = parameter.default ? " checked" : ""; var is_checked = parameter.default ? " checked" : "";
return `<input id="${parameter.id}" name="${parameter.id}"${is_checked} type="checkbox">` return `<input id="${parameter.id}" name="${parameter.id}"${is_checked} type="checkbox">`
case ParameterType.select: case ParameterType.select:
case ParameterType.select_multiple:
var options = (parameter.options || []).map(option => `<option value="${option.value}">${option.label}</option>`).join("") var options = (parameter.options || []).map(option => `<option value="${option.value}">${option.label}</option>`).join("")
return `<select id="${parameter.id}" name="${parameter.id}">${options}</select>` var multiple = (parameter.type == ParameterType.select_multiple ? 'multiple' : '')
return `<select id="${parameter.id}" name="${parameter.id}" ${multiple}>${options}</select>`
case ParameterType.custom: case ParameterType.custom:
return parameter.render(parameter) return parameter.render(parameter)
default: default:
@ -118,10 +135,12 @@ function initParameters() {
PARAMETERS.forEach(parameter => { PARAMETERS.forEach(parameter => {
var element = getParameterElement(parameter) var element = getParameterElement(parameter)
var note = parameter.note ? `<small>${parameter.note}</small>` : ""; var note = parameter.note ? `<small>${parameter.note}</small>` : "";
var newrow = `<tr> var newrow = document.createElement('tr')
newrow.innerHTML = `
<td><label for="${parameter.id}">${parameter.label}</label></td> <td><label for="${parameter.id}">${parameter.label}</label></td>
<td><div>${element}${note}<div></td></tr>` <td><div>${element}${note}<div></td>`
parametersTable.insertAdjacentHTML("beforeend", newrow) parametersTable.appendChild(newrow)
parameter.settingsEntry = newrow
}) })
} }

View File

@ -9,11 +9,12 @@ import traceback
TASK_TTL = 15 * 60 # seconds, Discard last session's task timeout TASK_TTL = 15 * 60 # seconds, Discard last session's task timeout
import torch
import queue, threading, time, weakref import queue, threading, time, weakref
from typing import Any, Generator, Hashable, Optional, Union from typing import Any, Generator, Hashable, Optional, Union
from pydantic import BaseModel from pydantic import BaseModel
from sd_internal import Request, Response from sd_internal import Request, Response, runtime
THREAD_NAME_PREFIX = 'Runtime-Render/' THREAD_NAME_PREFIX = 'Runtime-Render/'
ERR_LOCK_FAILED = ' failed to acquire lock within timeout.' ERR_LOCK_FAILED = ' failed to acquire lock within timeout.'
@ -356,20 +357,36 @@ def get_cached_task(session_id:str, update_ttl:bool=False):
return task_cache.tryGet(session_id) return task_cache.tryGet(session_id)
def get_devices(): def get_devices():
devices = {
'all': {},
'active': {},
}
# list the compatible devices
gpu_count = torch.cuda.device_count()
for device in range(gpu_count):
if runtime.device_would_fail(device):
continue
devices['all'].update({device: torch.cuda.get_device_name(device)})
devices['all'].update({'cpu': runtime.get_processor_name()})
# list the activated devices
if not manager_lock.acquire(blocking=True, timeout=LOCK_TIMEOUT): raise Exception('get_devices' + ERR_LOCK_FAILED) if not manager_lock.acquire(blocking=True, timeout=LOCK_TIMEOUT): raise Exception('get_devices' + ERR_LOCK_FAILED)
try: try:
device_dict = {}
for rthread in render_threads: for rthread in render_threads:
if not rthread.is_alive(): if not rthread.is_alive():
continue continue
weak_data = weak_thread_data.get(rthread) weak_data = weak_thread_data.get(rthread)
if not weak_data or not 'device' in weak_data or not 'device_name' in weak_data: if not weak_data or not 'device' in weak_data or not 'device_name' in weak_data:
continue continue
device_dict.update({weak_data['device']:weak_data['device_name']}) devices['active'].update({weak_data['device']: weak_data['device_name']})
return device_dict
finally: finally:
manager_lock.release() manager_lock.release()
return devices
def is_first_cuda_device(device): def is_first_cuda_device(device):
from . import runtime # When calling runtime from outside thread_render DO NOT USE thread specific attributes or functions. from . import runtime # When calling runtime from outside thread_render DO NOT USE thread specific attributes or functions.
return runtime.is_first_cuda_device(device) return runtime.is_first_cuda_device(device)

View File

@ -336,6 +336,8 @@ def render(req : task_manager.ImageRequest):
print('WARNING Replace {use_cpu: true} by {render_device: "cpu"}') print('WARNING Replace {use_cpu: true} by {render_device: "cpu"}')
req.render_device = 'cpu' req.render_device = 'cpu'
del req.use_cpu del req.use_cpu
if req.render_device != 'cpu':
req.render_device = int(req.render_device)
if req.render_device and task_manager.is_alive(req.render_device) <= 0: raise HTTPException(status_code=403, detail=f'{req.render_device} rendering is not enabled in config.json or the thread has died...') # HTTP403 Forbidden if req.render_device and task_manager.is_alive(req.render_device) <= 0: raise HTTPException(status_code=403, detail=f'{req.render_device} rendering is not enabled in config.json or the thread has died...') # HTTP403 Forbidden
if req.use_face_correction and task_manager.is_alive(0) <= 0: #TODO Remove when GFPGANer is fixed upstream. if req.use_face_correction and task_manager.is_alive(0) <= 0: #TODO Remove when GFPGANer is fixed upstream.
raise HTTPException(status_code=412, detail=f'GFPGANer only works GPU:0, use CUDA_VISIBLE_DEVICES if GFPGANer is needed on a specific GPU.') # HTTP412 Precondition Failed raise HTTPException(status_code=412, detail=f'GFPGANer only works GPU:0, use CUDA_VISIBLE_DEVICES if GFPGANer is needed on a specific GPU.') # HTTP412 Precondition Failed
@ -437,7 +439,7 @@ if 'render_devices' in config: # Start a new thread for each device.
print('WARNING: No active render devices after loading config. Validate "render_devices" in config.json') print('WARNING: No active render devices after loading config. Validate "render_devices" in config.json')
print('Loading default render devices to replace invalid render_devices field from config', config['render_devices']) print('Loading default render devices to replace invalid render_devices field from config', config['render_devices'])
if task_manager.is_alive() <= 0: # Either no defauls or no devices after loading config. if task_manager.is_alive() <= 0: # Either no defaults or no devices after loading config.
# Select best GPU device using free memory, if more than one device. # Select best GPU device using free memory, if more than one device.
if task_manager.start_render_thread('auto'): # Detect best device for renders if task_manager.start_render_thread('auto'): # Detect best device for renders
# if cuda:0 is missing, another cuda device is better. try to start it... # if cuda:0 is missing, another cuda device is better. try to start it...
@ -455,7 +457,7 @@ if is_using_a_gpu and task_manager.is_alive(0) <= 0:
print('Add the line "@set CUDA_VISIBLE_DEVICES=N" where N is the GPUs to use to config.bat') print('Add the line "@set CUDA_VISIBLE_DEVICES=N" where N is the GPUs to use to config.bat')
print('Add the line "CUDA_VISIBLE_DEVICES=N" where N is the GPUs to use to config.sh') print('Add the line "CUDA_VISIBLE_DEVICES=N" where N is the GPUs to use to config.sh')
print('active devices', task_manager.get_devices()) # print('active devices', task_manager.get_devices())
# start the browser ui # start the browser ui
import webbrowser; webbrowser.open('http://localhost:9000') import webbrowser; webbrowser.open('http://localhost:9000')