Don't set the specific vram optimizations to use, instead use the new sdkit API for setting the vram usage level directly

This commit is contained in:
cmdr2 2023-01-17 21:33:15 +05:30
parent 0a1197055c
commit 42f9abdfe3

View File

@ -24,11 +24,6 @@ DEFAULT_MODELS = {
'gfpgan': ['GFPGANv1.3'], 'gfpgan': ['GFPGANv1.3'],
'realesrgan': ['RealESRGAN_x4plus'], 'realesrgan': ['RealESRGAN_x4plus'],
} }
VRAM_USAGE_LEVEL_TO_OPTIMIZATIONS = {
'balanced': {'KEEP_FS_AND_CS_IN_CPU', 'SET_ATTENTION_STEP_TO_4'},
'low': {'KEEP_ENTIRE_MODEL_IN_CPU'},
'high': {},
}
MODELS_TO_LOAD_ON_START = ['stable-diffusion', 'vae', 'hypernetwork'] MODELS_TO_LOAD_ON_START = ['stable-diffusion', 'vae', 'hypernetwork']
known_models = {} known_models = {}
@ -133,10 +128,8 @@ def set_vram_optimizations(context: Context):
f'possible ({max_usage_level}) on this device ({context.device}). Using "{max_usage_level}" instead') f'possible ({max_usage_level}) on this device ({context.device}). Using "{max_usage_level}" instead')
vram_usage_level = max_usage_level vram_usage_level = max_usage_level
vram_optimizations = VRAM_USAGE_LEVEL_TO_OPTIMIZATIONS[vram_usage_level] if vram_usage_level != context.vram_usage_level:
context.vram_usage_level = vram_usage_level
if vram_optimizations != context.vram_optimizations:
context.vram_optimizations = vram_optimizations
return True return True
return False return False