mirror of
https://github.com/easydiffusion/easydiffusion.git
synced 2025-06-20 18:08:00 +02:00
Change the performance field to GPU Memory Usage instead, and use the 'balanced' profile by default, since it's just 5% slower than 'high', and uses nearly 50% less VRAM
This commit is contained in:
parent
aa01fd058e
commit
7982a9ae25
@ -36,7 +36,7 @@ const SETTINGS_IDS_LIST = [
|
|||||||
"save_to_disk",
|
"save_to_disk",
|
||||||
"diskPath",
|
"diskPath",
|
||||||
"sound_toggle",
|
"sound_toggle",
|
||||||
"performance_level",
|
"vram_usage_level",
|
||||||
"confirm_dangerous_actions",
|
"confirm_dangerous_actions",
|
||||||
"metadata_output_format",
|
"metadata_output_format",
|
||||||
"auto_save_settings",
|
"auto_save_settings",
|
||||||
|
@ -602,7 +602,7 @@ function onTaskCompleted(task, reqBody, instance, outputContainer, stepUpdate) {
|
|||||||
<b>Suggestions</b>:
|
<b>Suggestions</b>:
|
||||||
<br/>
|
<br/>
|
||||||
1. If you have set an initial image, please try reducing its dimension to ${MAX_INIT_IMAGE_DIMENSION}x${MAX_INIT_IMAGE_DIMENSION} or smaller.<br/>
|
1. If you have set an initial image, please try reducing its dimension to ${MAX_INIT_IMAGE_DIMENSION}x${MAX_INIT_IMAGE_DIMENSION} or smaller.<br/>
|
||||||
2. Try picking a lower performance level in the '<em>Performance Level</em>' setting (in the '<em>Settings</em>' tab).<br/>
|
2. Try picking a lower level in the '<em>GPU Memory Usage</em>' setting (in the '<em>Settings</em>' tab).<br/>
|
||||||
3. Try generating a smaller image.<br/>`
|
3. Try generating a smaller image.<br/>`
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -887,7 +887,7 @@ function getCurrentUserRequest() {
|
|||||||
width: parseInt(widthField.value),
|
width: parseInt(widthField.value),
|
||||||
height: parseInt(heightField.value),
|
height: parseInt(heightField.value),
|
||||||
// allow_nsfw: allowNSFWField.checked,
|
// allow_nsfw: allowNSFWField.checked,
|
||||||
performance_level: perfLevelField.value,
|
vram_usage_level: vramUsageLevelField.value,
|
||||||
//render_device: undefined, // Set device affinity. Prefer this device, but wont activate.
|
//render_device: undefined, // Set device affinity. Prefer this device, but wont activate.
|
||||||
use_stable_diffusion_model: stableDiffusionModelField.value,
|
use_stable_diffusion_model: stableDiffusionModelField.value,
|
||||||
use_vae_model: vaeModelField.value,
|
use_vae_model: vaeModelField.value,
|
||||||
|
@ -94,18 +94,18 @@ var PARAMETERS = [
|
|||||||
default: true,
|
default: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: "performance_level",
|
id: "vram_usage_level",
|
||||||
type: ParameterType.select,
|
type: ParameterType.select,
|
||||||
label: "Performance Level",
|
label: "GPU Memory Usage",
|
||||||
note: "Faster performance requires more GPU memory<br/><br/>" +
|
note: "Faster performance requires more GPU memory<br/><br/>" +
|
||||||
|
"<b>Balanced:</b> almost as fast as High, significantly lower GPU memory usage<br/>" +
|
||||||
"<b>High:</b> fastest, maximum GPU memory usage</br>" +
|
"<b>High:</b> fastest, maximum GPU memory usage</br>" +
|
||||||
"<b>Medium:</b> decent speed, uses 1 GB more memory than Low<br/>" +
|
"<b>Low:</b> slowest, force-used for GPUs with 4 GB (or less) memory",
|
||||||
"<b>Low:</b> slowest, for GPUs with 4 GB (or less) memory",
|
|
||||||
icon: "fa-forward",
|
icon: "fa-forward",
|
||||||
default: "high",
|
default: "balanced",
|
||||||
options: [
|
options: [
|
||||||
|
{value: "balanced", label: "Balanced"},
|
||||||
{value: "high", label: "High"},
|
{value: "high", label: "High"},
|
||||||
{value: "medium", label: "Medium"},
|
|
||||||
{value: "low", label: "Low"}
|
{value: "low", label: "Low"}
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
@ -227,7 +227,7 @@ function initParameters() {
|
|||||||
|
|
||||||
initParameters()
|
initParameters()
|
||||||
|
|
||||||
let perfLevelField = document.querySelector('#performance_level')
|
let vramUsageLevelField = document.querySelector('#vram_usage_level')
|
||||||
let useCPUField = document.querySelector('#use_cpu')
|
let useCPUField = document.querySelector('#use_cpu')
|
||||||
let autoPickGPUsField = document.querySelector('#auto_pick_gpus')
|
let autoPickGPUsField = document.querySelector('#auto_pick_gpus')
|
||||||
let useGPUsField = document.querySelector('#use_gpus')
|
let useGPUsField = document.querySelector('#use_gpus')
|
||||||
|
@ -6,7 +6,7 @@ class TaskData(BaseModel):
|
|||||||
request_id: str = None
|
request_id: str = None
|
||||||
session_id: str = "session"
|
session_id: str = "session"
|
||||||
save_to_disk_path: str = None
|
save_to_disk_path: str = None
|
||||||
performance_level: str = "high" # or "low" or "medium"
|
vram_usage_level: str = "balanced" # or "low" or "medium"
|
||||||
|
|
||||||
use_face_correction: str = None # or "GFPGANv1.3"
|
use_face_correction: str = None # or "GFPGANv1.3"
|
||||||
use_upscale: str = None # or "RealESRGAN_x4plus" or "RealESRGAN_x4plus_anime_6B"
|
use_upscale: str = None # or "RealESRGAN_x4plus" or "RealESRGAN_x4plus_anime_6B"
|
||||||
|
@ -110,7 +110,7 @@ def setConfig(config):
|
|||||||
except:
|
except:
|
||||||
log.error(traceback.format_exc())
|
log.error(traceback.format_exc())
|
||||||
|
|
||||||
def save_to_config(ckpt_model_name, vae_model_name, hypernetwork_model_name, performance_level):
|
def save_to_config(ckpt_model_name, vae_model_name, hypernetwork_model_name, vram_usage_level):
|
||||||
config = getConfig()
|
config = getConfig()
|
||||||
if 'model' not in config:
|
if 'model' not in config:
|
||||||
config['model'] = {}
|
config['model'] = {}
|
||||||
@ -124,7 +124,7 @@ def save_to_config(ckpt_model_name, vae_model_name, hypernetwork_model_name, per
|
|||||||
if hypernetwork_model_name is None or hypernetwork_model_name == "":
|
if hypernetwork_model_name is None or hypernetwork_model_name == "":
|
||||||
del config['model']['hypernetwork']
|
del config['model']['hypernetwork']
|
||||||
|
|
||||||
config['performance_level'] = performance_level
|
config['vram_usage_level'] = vram_usage_level
|
||||||
|
|
||||||
setConfig(config)
|
setConfig(config)
|
||||||
|
|
||||||
|
@ -128,7 +128,7 @@ def needs_to_force_full_precision(context):
|
|||||||
device_name = context.device_name.lower()
|
device_name = context.device_name.lower()
|
||||||
return (('nvidia' in device_name or 'geforce' in device_name) and (' 1660' in device_name or ' 1650' in device_name)) or ('Quadro T2000' in device_name)
|
return (('nvidia' in device_name or 'geforce' in device_name) and (' 1660' in device_name or ' 1650' in device_name)) or ('Quadro T2000' in device_name)
|
||||||
|
|
||||||
def get_max_perf_level(device):
|
def get_max_vram_usage_level(device):
|
||||||
if device != 'cpu':
|
if device != 'cpu':
|
||||||
_, mem_total = torch.cuda.mem_get_info(device)
|
_, mem_total = torch.cuda.mem_get_info(device)
|
||||||
mem_total /= float(10**9)
|
mem_total /= float(10**9)
|
||||||
@ -136,7 +136,7 @@ def get_max_perf_level(device):
|
|||||||
if mem_total < 4.5:
|
if mem_total < 4.5:
|
||||||
return 'low'
|
return 'low'
|
||||||
elif mem_total < 6.5:
|
elif mem_total < 6.5:
|
||||||
return 'medium'
|
return 'balanced'
|
||||||
|
|
||||||
return 'high'
|
return 'high'
|
||||||
|
|
||||||
|
@ -25,9 +25,9 @@ DEFAULT_MODELS = {
|
|||||||
'gfpgan': ['GFPGANv1.3'],
|
'gfpgan': ['GFPGANv1.3'],
|
||||||
'realesrgan': ['RealESRGAN_x4plus'],
|
'realesrgan': ['RealESRGAN_x4plus'],
|
||||||
}
|
}
|
||||||
PERF_LEVEL_TO_VRAM_OPTIMIZATIONS = {
|
VRAM_USAGE_LEVEL_TO_OPTIMIZATIONS = {
|
||||||
|
'balanced': {'KEEP_FS_AND_CS_IN_CPU', 'SET_ATTENTION_STEP_TO_4'},
|
||||||
'low': {'KEEP_ENTIRE_MODEL_IN_CPU'},
|
'low': {'KEEP_ENTIRE_MODEL_IN_CPU'},
|
||||||
'medium': {'KEEP_FS_AND_CS_IN_CPU', 'SET_ATTENTION_STEP_TO_4'},
|
|
||||||
'high': {},
|
'high': {},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -125,9 +125,24 @@ def resolve_model_paths(task_data: TaskData):
|
|||||||
if task_data.use_upscale: task_data.use_upscale = resolve_model_to_use(task_data.use_upscale, 'gfpgan')
|
if task_data.use_upscale: task_data.use_upscale = resolve_model_to_use(task_data.use_upscale, 'gfpgan')
|
||||||
|
|
||||||
def set_vram_optimizations(context: Context):
|
def set_vram_optimizations(context: Context):
|
||||||
|
def is_greater(a, b): # is a > b?
|
||||||
|
if a == "low": # b will be "low", "balanced" or "high"
|
||||||
|
return False
|
||||||
|
elif a == "balanced" and b != "low": # b will be "balanced" or "high"
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
config = app.getConfig()
|
config = app.getConfig()
|
||||||
perf_level = config.get('performance_level', device_manager.get_max_perf_level(context.device))
|
|
||||||
vram_optimizations = PERF_LEVEL_TO_VRAM_OPTIMIZATIONS[perf_level]
|
max_usage_level = device_manager.get_max_vram_usage_level(context.device)
|
||||||
|
vram_usage_level = config.get('vram_usage_level', 'balanced')
|
||||||
|
|
||||||
|
if is_greater(vram_usage_level, max_usage_level):
|
||||||
|
log.error(f'Requested GPU Memory Usage level ({vram_usage_level}) is higher than what is ' + \
|
||||||
|
f'possible ({max_usage_level}) on this device ({context.device}). Using "{max_usage_level}" instead')
|
||||||
|
vram_usage_level = max_usage_level
|
||||||
|
|
||||||
|
vram_optimizations = VRAM_USAGE_LEVEL_TO_OPTIMIZATIONS[vram_usage_level]
|
||||||
|
|
||||||
if vram_optimizations != context.vram_optimizations:
|
if vram_optimizations != context.vram_optimizations:
|
||||||
context.vram_optimizations = vram_optimizations
|
context.vram_optimizations = vram_optimizations
|
||||||
|
@ -341,7 +341,7 @@ def get_devices():
|
|||||||
'name': torch.cuda.get_device_name(device),
|
'name': torch.cuda.get_device_name(device),
|
||||||
'mem_free': mem_free,
|
'mem_free': mem_free,
|
||||||
'mem_total': mem_total,
|
'mem_total': mem_total,
|
||||||
'max_perf_level': device_manager.get_max_perf_level(device),
|
'max_vram_usage_level': device_manager.get_max_vram_usage_level(device),
|
||||||
}
|
}
|
||||||
|
|
||||||
# list the compatible devices
|
# list the compatible devices
|
||||||
|
@ -134,7 +134,7 @@ def render(req: dict):
|
|||||||
|
|
||||||
render_req.init_image_mask = req.get('mask') # hack: will rename this in the HTTP API in a future revision
|
render_req.init_image_mask = req.get('mask') # hack: will rename this in the HTTP API in a future revision
|
||||||
|
|
||||||
app.save_to_config(task_data.use_stable_diffusion_model, task_data.use_vae_model, task_data.use_hypernetwork_model, task_data.performance_level)
|
app.save_to_config(task_data.use_stable_diffusion_model, task_data.use_vae_model, task_data.use_hypernetwork_model, task_data.vram_usage_level)
|
||||||
|
|
||||||
# enqueue the task
|
# enqueue the task
|
||||||
new_task = task_manager.render(render_req, task_data)
|
new_task = task_manager.render(render_req, task_data)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user