diff --git a/ui/easydiffusion/model_manager.py b/ui/easydiffusion/model_manager.py
index 0ca61c5d..70bcdacf 100644
--- a/ui/easydiffusion/model_manager.py
+++ b/ui/easydiffusion/model_manager.py
@@ -118,16 +118,8 @@ def resolve_model_paths(task_data: TaskData):
def set_vram_optimizations(context: Context):
config = app.getConfig()
-
- max_usage_level = device_manager.get_max_vram_usage_level(context.device)
vram_usage_level = config.get('vram_usage_level', 'balanced')
- v = {'low': 0, 'balanced': 1, 'high': 2}
- if v[vram_usage_level] > v[max_usage_level]:
- log.error(f'Requested GPU Memory Usage level ({vram_usage_level}) is higher than what is ' + \
- f'possible ({max_usage_level}) on this device ({context.device}). Using "{max_usage_level}" instead')
- vram_usage_level = max_usage_level
-
if vram_usage_level != context.vram_usage_level:
context.vram_usage_level = vram_usage_level
return True
diff --git a/ui/media/js/parameters.js b/ui/media/js/parameters.js
index 522706af..7f54591a 100644
--- a/ui/media/js/parameters.js
+++ b/ui/media/js/parameters.js
@@ -101,7 +101,7 @@ var PARAMETERS = [
note: "Faster performance requires more GPU memory (VRAM)
" +
"Balanced: nearly as fast as High, much lower VRAM usage
" +
"High: fastest, maximum GPU memory usage" +
- "Low: slowest, force-used for GPUs with 3 to 4 GB memory",
+ "Low: slowest, recommended for GPUs with 3 to 4 GB memory",
icon: "fa-forward",
default: "balanced",
options: [