Better reporting of model load errors - sends the report to the browser UI during the next image rendering task

This commit is contained in:
cmdr2 2023-05-24 16:02:53 +05:30
parent f641e6e69d
commit 8554b0eab2
3 changed files with 24 additions and 1 deletions

View File

@ -53,15 +53,21 @@ def load_default_models(context: Context):
scan_model=context.model_paths[model_type] != None scan_model=context.model_paths[model_type] != None
and not context.model_paths[model_type].endswith(".safetensors"), and not context.model_paths[model_type].endswith(".safetensors"),
) )
if model_type in context.model_load_errors:
del context.model_load_errors[model_type]
except Exception as e: except Exception as e:
log.error(f"[red]Error while loading {model_type} model: {context.model_paths[model_type]}[/red]") log.error(f"[red]Error while loading {model_type} model: {context.model_paths[model_type]}[/red]")
log.exception(e) log.exception(e)
del context.model_paths[model_type] del context.model_paths[model_type]
context.model_load_errors[model_type] = str(e) # storing the entire Exception can lead to memory leaks
def unload_all(context: Context): def unload_all(context: Context):
for model_type in KNOWN_MODEL_TYPES: for model_type in KNOWN_MODEL_TYPES:
unload_model(context, model_type) unload_model(context, model_type)
if model_type in context.model_load_errors:
del context.model_load_errors[model_type]
def resolve_model_to_use(model_name: str = None, model_type: str = None): def resolve_model_to_use(model_name: str = None, model_type: str = None):
@ -132,7 +138,14 @@ def reload_models_if_necessary(context: Context, task_data: TaskData):
context.model_paths[model_type] = model_path_in_req context.model_paths[model_type] = model_path_in_req
action_fn = unload_model if context.model_paths[model_type] is None else load_model action_fn = unload_model if context.model_paths[model_type] is None else load_model
try:
action_fn(context, model_type, scan_model=False) # we've scanned them already action_fn(context, model_type, scan_model=False) # we've scanned them already
if model_type in context.model_load_errors:
del context.model_load_errors[model_type]
except Exception as e:
log.exception(e)
if action_fn == load_model:
context.model_load_errors[model_type] = str(e) # storing the entire Exception can lead to memory leaks
def resolve_model_paths(task_data: TaskData): def resolve_model_paths(task_data: TaskData):
@ -149,6 +162,14 @@ def resolve_model_paths(task_data: TaskData):
task_data.use_upscale = resolve_model_to_use(task_data.use_upscale, "realesrgan") task_data.use_upscale = resolve_model_to_use(task_data.use_upscale, "realesrgan")
def fail_if_models_did_not_load(context: Context):
for model_type in KNOWN_MODEL_TYPES:
if model_type in context.model_load_errors:
e = context.model_load_errors[model_type]
raise Exception(f"Could not load the {model_type} model! Reason: " + e)
# concat 'e', don't use in format string (injection attack)
def set_vram_optimizations(context: Context): def set_vram_optimizations(context: Context):
config = app.getConfig() config = app.getConfig()
vram_usage_level = config.get("vram_usage_level", "balanced") vram_usage_level = config.get("vram_usage_level", "balanced")

View File

@ -33,6 +33,7 @@ def init(device):
context.stop_processing = False context.stop_processing = False
context.temp_images = {} context.temp_images = {}
context.partial_x_samples = None context.partial_x_samples = None
context.model_load_errors = {}
from easydiffusion import app from easydiffusion import app

View File

@ -336,6 +336,7 @@ def thread_render(device):
current_state = ServerStates.LoadingModel current_state = ServerStates.LoadingModel
model_manager.resolve_model_paths(task.task_data) model_manager.resolve_model_paths(task.task_data)
model_manager.reload_models_if_necessary(renderer.context, task.task_data) model_manager.reload_models_if_necessary(renderer.context, task.task_data)
model_manager.fail_if_models_did_not_load(renderer.context)
current_state = ServerStates.Rendering current_state = ServerStates.Rendering
task.response = renderer.make_images( task.response = renderer.make_images(