Merge pull request #478 from madrang/beta

Changed update_render_threads to use SetAppConfigRequest.
This commit is contained in:
cmdr2 2022-11-15 11:04:52 +05:30 committed by GitHub
commit 6530e45178
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 40 additions and 34 deletions

View File

@ -803,7 +803,6 @@ function getCurrentUserRequest() {
height: heightField.value, height: heightField.value,
// allow_nsfw: allowNSFWField.checked, // allow_nsfw: allowNSFWField.checked,
turbo: turboField.checked, turbo: turboField.checked,
render_device: getCurrentRenderDeviceSelection(),
use_full_precision: useFullPrecisionField.checked, use_full_precision: useFullPrecisionField.checked,
use_stable_diffusion_model: stableDiffusionModelField.value, use_stable_diffusion_model: stableDiffusionModelField.value,
use_vae_model: vaeModelField.value, use_vae_model: vaeModelField.value,
@ -839,19 +838,6 @@ function getCurrentUserRequest() {
return newTask return newTask
} }
function getCurrentRenderDeviceSelection() {
let selectedGPUs = $('#use_gpus').val()
if (useCPUField.checked && !autoPickGPUsField.checked) {
return 'cpu'
}
if (autoPickGPUsField.checked || selectedGPUs.length == 0) {
return 'auto'
}
return selectedGPUs.join(',')
}
function makeImage() { function makeImage() {
if (!isServerAvailable()) { if (!isServerAvailable()) {
alert('The server is not available.') alert('The server is not available.')
@ -1165,6 +1151,19 @@ promptStrengthSlider.addEventListener('input', updatePromptStrength)
promptStrengthField.addEventListener('input', updatePromptStrengthSlider) promptStrengthField.addEventListener('input', updatePromptStrengthSlider)
updatePromptStrength() updatePromptStrength()
function getCurrentRenderDeviceSelection() {
let selectedGPUs = $('#use_gpus').val()
if (useCPUField.checked && !autoPickGPUsField.checked) {
return 'cpu'
}
if (autoPickGPUsField.checked || selectedGPUs.length == 0) {
return 'auto'
}
return selectedGPUs.join(',')
}
useCPUField.addEventListener('click', function() { useCPUField.addEventListener('click', function() {
let gpuSettingEntry = getParameterSettingsEntry('use_gpus') let gpuSettingEntry = getParameterSettingsEntry('use_gpus')
let autoPickGPUSettingEntry = getParameterSettingsEntry('auto_pick_gpus') let autoPickGPUSettingEntry = getParameterSettingsEntry('auto_pick_gpus')
@ -1184,11 +1183,19 @@ useCPUField.addEventListener('click', function() {
} }
gpuSettingEntry.style.display = (autoPickGPUsField.checked ? 'none' : '') gpuSettingEntry.style.display = (autoPickGPUsField.checked ? 'none' : '')
} }
changeAppConfig({
'render_devices': getCurrentRenderDeviceSelection()
})
}) })
useGPUsField.addEventListener('click', function() { useGPUsField.addEventListener('click', function() {
let selectedGPUs = $('#use_gpus').val() let selectedGPUs = $('#use_gpus').val()
autoPickGPUsField.checked = (selectedGPUs.length === 0) autoPickGPUsField.checked = (selectedGPUs.length === 0)
changeAppConfig({
'render_devices': getCurrentRenderDeviceSelection()
})
}) })
autoPickGPUsField.addEventListener('click', function() { autoPickGPUsField.addEventListener('click', function() {
@ -1198,6 +1205,10 @@ autoPickGPUsField.addEventListener('click', function() {
let gpuSettingEntry = getParameterSettingsEntry('use_gpus') let gpuSettingEntry = getParameterSettingsEntry('use_gpus')
gpuSettingEntry.style.display = (this.checked ? 'none' : '') gpuSettingEntry.style.display = (this.checked ? 'none' : '')
changeAppConfig({
'render_devices': getCurrentRenderDeviceSelection()
})
}) })
async function changeAppConfig(configDelta) { async function changeAppConfig(configDelta) {

View File

@ -13,10 +13,12 @@ def get_device_delta(render_devices, active_devices):
active_devices: ['cpu', 'cuda:N'...] active_devices: ['cpu', 'cuda:N'...]
''' '''
if render_devices is not None: if render_devices in ('cpu', 'auto'):
if render_devices in ('cpu', 'auto'): render_devices = [render_devices]
elif render_devices is not None:
if isinstance(render_devices, str):
render_devices = [render_devices] render_devices = [render_devices]
elif isinstance(render_devices, list) and len(render_devices) > 0: if isinstance(render_devices, list) and len(render_devices) > 0:
render_devices = list(filter(lambda x: x.startswith('cuda:'), render_devices)) render_devices = list(filter(lambda x: x.startswith('cuda:'), render_devices))
if len(render_devices) == 0: if len(render_devices) == 0:
raise Exception('Invalid render_devices value in config.json. Valid: {"render_devices": ["cuda:0", "cuda:1"...]}, or {"render_devices": "cpu"} or {"render_devices": "auto"}') raise Exception('Invalid render_devices value in config.json. Valid: {"render_devices": ["cuda:0", "cuda:1"...]}, or {"render_devices": "cpu"} or {"render_devices": "auto"}')

View File

@ -40,7 +40,7 @@ class RenderTask(): # Task with output queue and completion lock.
def __init__(self, req: Request): def __init__(self, req: Request):
self.request: Request = req # Initial Request self.request: Request = req # Initial Request
self.response: Any = None # Copy of the last reponse self.response: Any = None # Copy of the last reponse
self.render_device = None self.render_device = None # Select the task affinity. (Not used to change active devices).
self.temp_images:list = [None] * req.num_outputs * (1 if req.show_only_filtered_image else 2) self.temp_images:list = [None] * req.num_outputs * (1 if req.show_only_filtered_image else 2)
self.error: Exception = None self.error: Exception = None
self.lock: threading.Lock = threading.Lock() # Locks at task start and unlocks when task is completed self.lock: threading.Lock = threading.Lock() # Locks at task start and unlocks when task is completed
@ -72,7 +72,7 @@ class ImageRequest(BaseModel):
save_to_disk_path: str = None save_to_disk_path: str = None
turbo: bool = True turbo: bool = True
use_cpu: bool = False ##TODO Remove after UI and plugins transition. use_cpu: bool = False ##TODO Remove after UI and plugins transition.
render_device: str = 'auto' render_device: str = None # Select the task affinity. (Not used to change active devices).
use_full_precision: bool = False use_full_precision: bool = False
use_face_correction: str = None # or "GFPGANv1.3" use_face_correction: str = None # or "GFPGANv1.3"
use_upscale: str = None # or "RealESRGAN_x4plus" or "RealESRGAN_x4plus_anime_6B" use_upscale: str = None # or "RealESRGAN_x4plus" or "RealESRGAN_x4plus_anime_6B"

View File

@ -161,6 +161,8 @@ async def setAppConfig(req : SetAppConfigRequest):
config = getConfig() config = getConfig()
if req.update_branch: if req.update_branch:
config['update_branch'] = req.update_branch config['update_branch'] = req.update_branch
if req.render_devices:
update_render_threads_from_request(req.render_devices)
try: try:
setConfig(config) setConfig(config)
return JSONResponse({'status': 'OK'}, headers=NOCACHE_HEADERS) return JSONResponse({'status': 'OK'}, headers=NOCACHE_HEADERS)
@ -287,27 +289,18 @@ def save_render_devices_to_config(render_devices):
setConfig(config) setConfig(config)
def update_render_threads_on_request(req : task_manager.ImageRequest): def update_render_threads_from_request(render_device):
if req.use_cpu: # TODO Remove after transition. if render_device not in ('cpu', 'auto') and not render_device.startswith('cuda:'):
print('WARNING Replace {use_cpu: true} by {render_device: "cpu"}') raise HTTPException(status_code=400, detail=f'Invalid render device requested: {render_device}')
req.render_device = 'cpu'
del req.use_cpu
if req.render_device not in ('cpu', 'auto') and not req.render_device.startswith('cuda:'): if render_device.startswith('cuda:'):
raise HTTPException(status_code=400, detail=f'Invalid render device requested: {req.render_device}') render_device = render_device.split(',')
if req.render_device.startswith('cuda:'):
req.render_device = req.render_device.split(',')
save_render_devices_to_config(req.render_device)
del req.render_device
save_render_devices_to_config(render_device)
update_render_threads() update_render_threads()
@app.post('/render') @app.post('/render')
def render(req : task_manager.ImageRequest): def render(req : task_manager.ImageRequest):
update_render_threads_on_request(req)
try: try:
save_model_to_config(req.use_stable_diffusion_model, req.use_vae_model) save_model_to_config(req.use_stable_diffusion_model, req.use_vae_model)
req.use_stable_diffusion_model = resolve_ckpt_to_use(req.use_stable_diffusion_model) req.use_stable_diffusion_model = resolve_ckpt_to_use(req.use_stable_diffusion_model)