Use the turbo setting if requested

This commit is contained in:
cmdr2 2022-12-11 20:42:31 +05:30
parent 1a5b6ef260
commit e45cbbf1ca
2 changed files with 10 additions and 0 deletions

View File

@ -37,6 +37,9 @@ def load_default_models(context: Context):
for model_type in KNOWN_MODEL_TYPES:
context.model_paths[model_type] = resolve_model_to_use(model_type=model_type)
# disable TURBO initially (this should be read from the config eventually)
context.vram_optimizations -= {'TURBO'}
# load mandatory models
model_loader.load_model(context, 'stable-diffusion')
@ -116,6 +119,12 @@ def resolve_model_paths(task_data: TaskData):
if task_data.use_face_correction: task_data.use_face_correction = resolve_model_to_use(task_data.use_face_correction, 'gfpgan')
if task_data.use_upscale: task_data.use_upscale = resolve_model_to_use(task_data.use_upscale, 'gfpgan')
def set_vram_optimizations(context: Context, task_data: TaskData):
if task_data.turbo:
context.vram_optimizations += {'TURBO'}
else:
context.vram_optimizations -= {'TURBO'}
def make_model_folders():
for model_type in KNOWN_MODEL_TYPES:
model_dir_path = os.path.join(app.MODELS_DIR, model_type)

View File

@ -278,6 +278,7 @@ def thread_render(device):
current_state = ServerStates.LoadingModel
model_manager.resolve_model_paths(task.task_data)
model_manager.set_vram_optimizations(renderer.context, task.task_data)
model_manager.reload_models_if_necessary(renderer.context, task.task_data)
current_state = ServerStates.Rendering