Display the failure detail when there is one at that step.

Was checking the json object, not the server response.
This commit is contained in:
Marc-Andre Ferland 2022-10-19 05:10:37 -04:00
parent 3bdc90451a
commit 4e5ddca3bd
2 changed files with 16 additions and 6 deletions

View File

@ -509,10 +509,10 @@ async function doMakeImage(task) {
}) })
renderRequest = await res.json() renderRequest = await res.json()
// status_code 503, already a task running. // status_code 503, already a task running.
} while (renderRequest.status_code === 503 && await asyncDelay(30 * 1000)) } while (res.status === 503 && await asyncDelay(30 * 1000))
if (typeof renderRequest?.stream !== 'string') { if (typeof renderRequest?.stream !== 'string') {
console.log('Endpoint response: ', renderRequest) console.log('Endpoint response: ', renderRequest)
throw new Error('Endpoint response does not contains a response stream url.') throw new Error(renderRequest.detail || 'Endpoint response does not contains a response stream url.')
} }
task['taskStatusLabel'].innerText = "Waiting" task['taskStatusLabel'].innerText = "Waiting"
task['taskStatusLabel'].classList.add('waitingTaskLabel') task['taskStatusLabel'].classList.add('waitingTaskLabel')

View File

@ -237,10 +237,20 @@ def thread_render(device):
continue continue
if not runtime.is_first_cuda_device(runtime.thread_data.device): if not runtime.is_first_cuda_device(runtime.thread_data.device):
continue # Wait for cuda:0 continue # Wait for cuda:0
if queued_task.request.use_cpu and runtime.thread_data.device != 'cpu' and is_alive('cpu') > 0: if queued_task.request.use_cpu and runtime.thread_data.device != 'cpu':
continue # CPU Tasks, Skip GPU device if is_alive('cpu') > 0:
if not queued_task.request.use_cpu and runtime.thread_data.device == 'cpu' and is_alive() > 1: # cpu is alive, so need more than one. continue # CPU Tasks, Skip GPU device
continue # GPU Tasks, don't run on CPU unless there is nothing else. else:
queued_task.error = Exception('Cpu is not enabled in render_devices.')
task = queued_task
continue
if not queued_task.request.use_cpu and runtime.thread_data.device == 'cpu':
if is_alive() > 1: # cpu is alive, so need more than one.
continue # GPU Tasks, don't run on CPU unless there is nothing else.
else:
queued_task.error = Exception('No active gpu found. Please check the error message in the command-line window at startup.')
task = queued_task
continue
task = queued_task task = queued_task
break break
if task is not None: if task is not None: