Removed Cpu from the devices allowed to run GFPGANer.

Added clear error for the user.
This commit is contained in:
Marc-Andre Ferland 2022-10-19 03:02:26 -04:00
parent 3fc66ec525
commit 21afe077d7
3 changed files with 28 additions and 24 deletions

View File

@ -524,11 +524,15 @@ async function doMakeImage(task) {
throw new Error('Connexion with server lost.') throw new Error('Connexion with server lost.')
} }
} while (serverState.time > (Date.now() - (10 * 1000)) && serverState.task !== renderRequest.task) } while (serverState.time > (Date.now() - (10 * 1000)) && serverState.task !== renderRequest.task)
if (serverState.session !== 'pending' && serverState.session !== 'running' && serverState.session !== 'buffer') { switch(serverState.session) {
if (serverState.session === 'stopped') { case 'pending':
case 'running':
case 'buffer':
case 'error': // Still valid, Update UI with error message
break
case 'stopped':
return false return false
} default:
throw new Error('Unexpected server task state: ' + serverState.session || 'Undefined') throw new Error('Unexpected server task state: ' + serverState.session || 'Undefined')
} }
while (serverState.task === renderRequest.task && serverState.session === 'pending') { while (serverState.task === renderRequest.task && serverState.session === 'pending') {

View File

@ -250,9 +250,9 @@ def load_model_gfpgan():
if thread_data.gfpgan_file is None: if thread_data.gfpgan_file is None:
print('load_model_gfpgan called without setting gfpgan_file') print('load_model_gfpgan called without setting gfpgan_file')
return return
if thread_data.device != 'cpu' and not is_first_cuda_device(thread_data.device): if not is_first_cuda_device(thread_data.device):
#TODO Remove when fixed - A bug with GFPGANer and facexlib needs to be fixed before use on other devices. #TODO Remove when fixed - A bug with GFPGANer and facexlib needs to be fixed before use on other devices.
raise Exception(f'Current device {torch.device(thread_data.device)} is not {torch.device(0)}.') raise Exception(f'Current device {torch.device(thread_data.device)} is not {torch.device(0)}. Cannot run GFPGANer.')
model_path = thread_data.gfpgan_file + ".pth" model_path = thread_data.gfpgan_file + ".pth"
thread_data.model_gfpgan = GFPGANer(device=torch.device(thread_data.device), model_path=model_path, upscale=1, arch='clean', channel_multiplier=2, bg_upsampler=None) thread_data.model_gfpgan = GFPGANer(device=torch.device(thread_data.device), model_path=model_path, upscale=1, arch='clean', channel_multiplier=2, bg_upsampler=None)
print('loaded', thread_data.gfpgan_file, 'to', thread_data.model_gfpgan.device, 'precision', thread_data.precision) print('loaded', thread_data.gfpgan_file, 'to', thread_data.model_gfpgan.device, 'precision', thread_data.precision)
@ -369,10 +369,10 @@ def do_mk_img(req: Request):
if needs_model_reload: if needs_model_reload:
load_model_ckpt() load_model_ckpt()
if req.use_face_correction != thread_data.gfpgan_file: if req.use_face_correction is not None and req.use_face_correction != thread_data.gfpgan_file:
thread_data.gfpgan_file = req.use_face_correction thread_data.gfpgan_file = req.use_face_correction
load_model_gfpgan() load_model_gfpgan()
if req.use_upscale != thread_data.real_esrgan_file: if req.use_upscale is not None and req.use_upscale != thread_data.real_esrgan_file:
thread_data.real_esrgan_file = req.use_upscale thread_data.real_esrgan_file = req.use_upscale
load_model_real_esrgan() load_model_real_esrgan()

View File

@ -225,23 +225,18 @@ def thread_render(device):
continue continue
try: # Select a render task. try: # Select a render task.
for queued_task in tasks_queue: for queued_task in tasks_queue:
cpu_alive = is_alive('cpu')
if queued_task.request.use_face_correction: #TODO Remove when fixed - A bug with GFPGANer and facexlib needs to be fixed before use on other devices. if queued_task.request.use_face_correction: #TODO Remove when fixed - A bug with GFPGANer and facexlib needs to be fixed before use on other devices.
# Allows GFPGANer on cuda:0 and use cpu only when cuda:0 is not available. if is_alive(0) <= 0: # Allows GFPGANer only on cuda:0.
first_device_alive = True if is_alive(0) >= 1 else False queued_task.error = Exception('cuda:0 is not available with the current config. Remove GFPGANer filter to run task.')
if cpu_alive <= 0 and not first_device_alive: task = queued_task
queued_task.request.use_face_correction = False continue
print('cuda:0 and cpu are not available with the current config. Removed GFPGANer filter to run task.') if queued_task.request.use_cpu:
queued_task.error = Exception('Cpu cannot be used to run this task. Remove GFPGANer filter to run task.')
task = queued_task
continue continue
if not queued_task.request.use_cpu:
if first_device_alive:
if not runtime.is_first_cuda_device(runtime.thread_data.device): if not runtime.is_first_cuda_device(runtime.thread_data.device):
continue # Wait for cuda:0 continue # Wait for cuda:0
elif cpu_alive > 0: if queued_task.request.use_cpu and runtime.thread_data.device != 'cpu' and is_alive('cpu') > 0:
print('cuda:0 is not available with the current config. Forcing task requiring GFPGANer to cpu.')
queued_task.request.use_cpu = True
continue
if queued_task.request.use_cpu and runtime.thread_data.device != 'cpu' and cpu_alive > 0:
continue # CPU Tasks, Skip GPU device continue # CPU Tasks, Skip GPU device
if not queued_task.request.use_cpu and runtime.thread_data.device == 'cpu' and is_alive() > 1: # cpu is alive, so need more than one. if not queued_task.request.use_cpu and runtime.thread_data.device == 'cpu' and is_alive() > 1: # cpu is alive, so need more than one.
continue # GPU Tasks, don't run on CPU unless there is nothing else. continue # GPU Tasks, don't run on CPU unless there is nothing else.
@ -254,6 +249,11 @@ def thread_render(device):
if task is None: if task is None:
time.sleep(1) time.sleep(1)
continue continue
if task.error is not None:
print(task.error)
task.response = { "status": 'failed', "detail": str(task.error) }
task.buffer_queue.put(json.dumps(task.response))
continue
#if current_model_path != task.request.use_stable_diffusion_model: #if current_model_path != task.request.use_stable_diffusion_model:
# preload_model(task.request.use_stable_diffusion_model) # preload_model(task.request.use_stable_diffusion_model)
if current_state_error: if current_state_error: