mirror of
https://github.com/easydiffusion/easydiffusion.git
synced 2025-06-21 10:27:47 +02:00
Merge remote-tracking branch 'upstream' into feat-change-img-count
This commit is contained in:
commit
8d6021e6c3
@ -17,6 +17,9 @@
|
||||
- **Major rewrite of the code** - We've switched to using diffusers under-the-hood, which allows us to release new features faster, and focus on making the UI and installer even easier to use.
|
||||
|
||||
### Detailed changelog
|
||||
* 3.0.9c - 6 Feb 2025 - (Internal code change) Remove hardcoded references to `torch.cuda`, and replace with torchruntime's device utilities.
|
||||
* 3.0.9b - 28 Jan 2025 - Fix a bug affecting older versions of Easy Diffusion, which tried to upgrade to an incompatible version of PyTorch.
|
||||
* 3.0.9b - 4 Jan 2025 - Replace the use of WMIC (deprecated) with a powershell call.
|
||||
* 3.0.9 - 28 May 2024 - Slider for controlling the strength of controlnets.
|
||||
* 3.0.8 - 27 May 2024 - SDXL ControlNets for Img2Img and Inpainting.
|
||||
* 3.0.7 - 11 Dec 2023 - Setting to enable/disable VAE tiling (in the Image Settings panel). Sometimes VAE tiling reduces the quality of the image, so this setting will help control that.
|
||||
|
@ -3,7 +3,7 @@
|
||||
|
||||
Does not require technical knowledge, does not require pre-installed software. 1-click install, powerful features, friendly community.
|
||||
|
||||
️🔥🎉 **New!** Support for SDXL, ControlNet, multiple LoRA files, embeddings (and a lot more) have been added!
|
||||
️🔥🎉 **New!** Support for Flux has been added in the beta branch (v3.5 engine)!
|
||||
|
||||
[Installation guide](#installation) | [Troubleshooting guide](https://github.com/easydiffusion/easydiffusion/wiki/Troubleshooting) | [User guide](https://github.com/easydiffusion/easydiffusion/wiki) | <sub>[](https://discord.com/invite/u9yhsFmEkB)</sub> <sup>(for support queries, and development discussions)</sup>
|
||||
|
||||
@ -21,15 +21,15 @@ Click the download button for your operating system:
|
||||
</p>
|
||||
|
||||
**Hardware requirements:**
|
||||
- **Windows:** NVIDIA graphics card¹ (minimum 2 GB RAM), or run on your CPU.
|
||||
- **Windows:** NVIDIA¹ or AMD graphics card (minimum 2 GB RAM), or run on your CPU.
|
||||
- **Linux:** NVIDIA¹ or AMD² graphics card (minimum 2 GB RAM), or run on your CPU.
|
||||
- **Mac:** M1 or M2, or run on your CPU.
|
||||
- **Mac:** M1/M2/M3/M4 or AMD graphics card (Intel Mac), or run on your CPU.
|
||||
- Minimum 8 GB of system RAM.
|
||||
- Atleast 25 GB of space on the hard disk.
|
||||
|
||||
¹) [CUDA Compute capability](https://en.wikipedia.org/wiki/CUDA#GPUs_supported) level of 3.7 or higher required.
|
||||
|
||||
²) ROCm 5.2 support required.
|
||||
²) ROCm 5.2 (or newer) support required.
|
||||
|
||||
The installer will take care of whatever is needed. If you face any problems, you can join the friendly [Discord community](https://discord.com/invite/u9yhsFmEkB) and ask for assistance.
|
||||
|
||||
|
@ -4,7 +4,7 @@ echo "Opening Stable Diffusion UI - Developer Console.." & echo.
|
||||
|
||||
cd /d %~dp0
|
||||
|
||||
set PATH=C:\Windows\System32;%PATH%
|
||||
set PATH=C:\Windows\System32;C:\Windows\System32\WindowsPowerShell\v1.0;%PATH%
|
||||
|
||||
@rem set legacy and new installer's PATH, if they exist
|
||||
if exist "installer" set PATH=%cd%\installer;%cd%\installer\Library\bin;%cd%\installer\Scripts;%cd%\installer\Library\usr\bin;%PATH%
|
||||
@ -26,6 +26,7 @@ call conda --version
|
||||
echo.
|
||||
echo COMSPEC=%COMSPEC%
|
||||
echo.
|
||||
powershell -Command "(Get-WmiObject Win32_VideoController | Select-Object Name, AdapterRAM, DriverDate, DriverVersion)"
|
||||
|
||||
@rem activate the legacy environment (if present) and set PYTHONPATH
|
||||
if exist "installer_files\env" (
|
||||
|
@ -3,7 +3,7 @@
|
||||
cd /d %~dp0
|
||||
echo Install dir: %~dp0
|
||||
|
||||
set PATH=C:\Windows\System32;%PATH%
|
||||
set PATH=C:\Windows\System32;C:\Windows\System32\WindowsPowerShell\v1.0;%PATH%
|
||||
set PYTHONHOME=
|
||||
|
||||
if exist "on_sd_start.bat" (
|
||||
@ -39,7 +39,7 @@ call where conda
|
||||
call conda --version
|
||||
echo .
|
||||
echo COMSPEC=%COMSPEC%
|
||||
wmic path win32_VideoController get name,AdapterRAM,DriverDate,DriverVersion
|
||||
powershell -Command "(Get-WmiObject Win32_VideoController | Select-Object Name, AdapterRAM, DriverDate, DriverVersion)"
|
||||
|
||||
@rem Download the rest of the installer and UI
|
||||
call scripts\on_env_start.bat
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -70,6 +70,8 @@ echo PYTHONPATH=%PYTHONPATH%
|
||||
call where python
|
||||
call python --version
|
||||
|
||||
call python -m pip install -q torchruntime
|
||||
|
||||
call python scripts\check_modules.py --launch-uvicorn
|
||||
pause
|
||||
exit /b
|
||||
|
@ -46,6 +46,8 @@ fi
|
||||
if [ -e "src" ]; then mv src src-old; fi
|
||||
if [ -e "ldm" ]; then mv ldm ldm-old; fi
|
||||
|
||||
python -m pip install -q torchruntime
|
||||
|
||||
cd ..
|
||||
# Download the required packages
|
||||
python scripts/check_modules.py --launch-uvicorn
|
||||
|
@ -54,8 +54,7 @@ OUTPUT_DIRNAME = "Stable Diffusion UI" # in the user's home folder
|
||||
PRESERVE_CONFIG_VARS = ["FORCE_FULL_PRECISION"]
|
||||
TASK_TTL = 15 * 60 # Discard last session's task timeout
|
||||
APP_CONFIG_DEFAULTS = {
|
||||
# auto: selects the cuda device with the most free memory, cuda: use the currently active cuda device.
|
||||
"render_devices": "auto", # valid entries: 'auto', 'cpu' or 'cuda:N' (where N is a GPU index)
|
||||
"render_devices": "auto",
|
||||
"update_branch": "main",
|
||||
"ui": {
|
||||
"open_browser_on_start": True,
|
||||
|
@ -6,6 +6,15 @@ import traceback
|
||||
import torch
|
||||
from easydiffusion.utils import log
|
||||
|
||||
from torchruntime.utils import (
|
||||
get_installed_torch_platform,
|
||||
get_device,
|
||||
get_device_count,
|
||||
get_device_name,
|
||||
SUPPORTED_BACKENDS,
|
||||
)
|
||||
from sdkit.utils import mem_get_info, is_cpu_device, has_half_precision_bug
|
||||
|
||||
"""
|
||||
Set `FORCE_FULL_PRECISION` in the environment variables, or in `config.bat`/`config.sh` to set full precision (i.e. float32).
|
||||
Otherwise the models will load at half-precision (i.e. float16).
|
||||
@ -22,33 +31,15 @@ mem_free_threshold = 0
|
||||
|
||||
def get_device_delta(render_devices, active_devices):
|
||||
"""
|
||||
render_devices: 'cpu', or 'auto', or 'mps' or ['cuda:N'...]
|
||||
active_devices: ['cpu', 'mps', 'cuda:N'...]
|
||||
render_devices: 'auto' or backends listed in `torchruntime.utils.SUPPORTED_BACKENDS`
|
||||
active_devices: [backends listed in `torchruntime.utils.SUPPORTED_BACKENDS`]
|
||||
"""
|
||||
|
||||
if render_devices in ("cpu", "auto", "mps"):
|
||||
render_devices = [render_devices]
|
||||
elif render_devices is not None:
|
||||
if isinstance(render_devices, str):
|
||||
render_devices = [render_devices]
|
||||
if isinstance(render_devices, list) and len(render_devices) > 0:
|
||||
render_devices = list(filter(lambda x: x.startswith("cuda:") or x == "mps", render_devices))
|
||||
if len(render_devices) == 0:
|
||||
raise Exception(
|
||||
'Invalid render_devices value in config.json. Valid: {"render_devices": ["cuda:0", "cuda:1"...]}, or {"render_devices": "cpu"} or {"render_devices": "mps"} or {"render_devices": "auto"}'
|
||||
)
|
||||
render_devices = render_devices or "auto"
|
||||
render_devices = [render_devices] if isinstance(render_devices, str) else render_devices
|
||||
|
||||
render_devices = list(filter(lambda x: is_device_compatible(x), render_devices))
|
||||
if len(render_devices) == 0:
|
||||
raise Exception(
|
||||
"Sorry, none of the render_devices configured in config.json are compatible with Stable Diffusion"
|
||||
)
|
||||
else:
|
||||
raise Exception(
|
||||
'Invalid render_devices value in config.json. Valid: {"render_devices": ["cuda:0", "cuda:1"...]}, or {"render_devices": "cpu"} or {"render_devices": "auto"}'
|
||||
)
|
||||
else:
|
||||
render_devices = ["auto"]
|
||||
# check for backend support
|
||||
validate_render_devices(render_devices)
|
||||
|
||||
if "auto" in render_devices:
|
||||
render_devices = auto_pick_devices(active_devices)
|
||||
@ -64,47 +55,39 @@ def get_device_delta(render_devices, active_devices):
|
||||
return devices_to_start, devices_to_stop
|
||||
|
||||
|
||||
def is_mps_available():
|
||||
return (
|
||||
platform.system() == "Darwin"
|
||||
and hasattr(torch.backends, "mps")
|
||||
and torch.backends.mps.is_available()
|
||||
and torch.backends.mps.is_built()
|
||||
)
|
||||
def validate_render_devices(render_devices):
|
||||
supported_backends = ("auto",) + SUPPORTED_BACKENDS
|
||||
unsupported_render_devices = [d for d in render_devices if not d.lower().startswith(supported_backends)]
|
||||
|
||||
|
||||
def is_cuda_available():
|
||||
return torch.cuda.is_available()
|
||||
if unsupported_render_devices:
|
||||
raise ValueError(
|
||||
f"Invalid render devices in config: {unsupported_render_devices}. Valid render devices: {supported_backends}"
|
||||
)
|
||||
|
||||
|
||||
def auto_pick_devices(currently_active_devices):
|
||||
global mem_free_threshold
|
||||
|
||||
if is_mps_available():
|
||||
return ["mps"]
|
||||
torch_platform_name = get_installed_torch_platform()[0]
|
||||
|
||||
if not is_cuda_available():
|
||||
return ["cpu"]
|
||||
|
||||
device_count = torch.cuda.device_count()
|
||||
if device_count == 1:
|
||||
return ["cuda:0"] if is_device_compatible("cuda:0") else ["cpu"]
|
||||
if is_cpu_device(torch_platform_name):
|
||||
return [torch_platform_name]
|
||||
|
||||
device_count = get_device_count()
|
||||
log.debug("Autoselecting GPU. Using most free memory.")
|
||||
devices = []
|
||||
for device in range(device_count):
|
||||
device = f"cuda:{device}"
|
||||
if not is_device_compatible(device):
|
||||
continue
|
||||
for device_id in range(device_count):
|
||||
device_id = f"{torch_platform_name}:{device_id}" if device_count > 1 else torch_platform_name
|
||||
device = get_device(device_id)
|
||||
|
||||
mem_free, mem_total = torch.cuda.mem_get_info(device)
|
||||
mem_free, mem_total = mem_get_info(device)
|
||||
mem_free /= float(10**9)
|
||||
mem_total /= float(10**9)
|
||||
device_name = torch.cuda.get_device_name(device)
|
||||
device_name = get_device_name(device)
|
||||
log.debug(
|
||||
f"{device} detected: {device_name} - Memory (free/total): {round(mem_free, 2)}Gb / {round(mem_total, 2)}Gb"
|
||||
f"{device_id} detected: {device_name} - Memory (free/total): {round(mem_free, 2)}Gb / {round(mem_total, 2)}Gb"
|
||||
)
|
||||
devices.append({"device": device, "device_name": device_name, "mem_free": mem_free})
|
||||
devices.append({"device": device_id, "device_name": device_name, "mem_free": mem_free})
|
||||
|
||||
devices.sort(key=lambda x: x["mem_free"], reverse=True)
|
||||
max_mem_free = devices[0]["mem_free"]
|
||||
@ -117,69 +100,45 @@ def auto_pick_devices(currently_active_devices):
|
||||
# always be very low (since their VRAM contains the model).
|
||||
# These already-running devices probably aren't terrible, since they were picked in the past.
|
||||
# Worst case, the user can restart the program and that'll get rid of them.
|
||||
devices = list(
|
||||
filter(
|
||||
(lambda x: x["mem_free"] > mem_free_threshold or x["device"] in currently_active_devices),
|
||||
devices,
|
||||
)
|
||||
)
|
||||
devices = list(map(lambda x: x["device"], devices))
|
||||
devices = [
|
||||
x["device"] for x in devices if x["mem_free"] >= mem_free_threshold or x["device"] in currently_active_devices
|
||||
]
|
||||
return devices
|
||||
|
||||
|
||||
def device_init(context, device):
|
||||
"""
|
||||
This function assumes the 'device' has already been verified to be compatible.
|
||||
`get_device_delta()` has already filtered out incompatible devices.
|
||||
"""
|
||||
def device_init(context, device_id):
|
||||
context.device = device_id
|
||||
|
||||
validate_device_id(device, log_prefix="device_init")
|
||||
|
||||
if "cuda" not in device:
|
||||
context.device = device
|
||||
if is_cpu_device(context.torch_device):
|
||||
context.device_name = get_processor_name()
|
||||
context.half_precision = False
|
||||
log.debug(f"Render device available as {context.device_name}")
|
||||
return
|
||||
else:
|
||||
context.device_name = get_device_name(context.torch_device)
|
||||
|
||||
context.device_name = torch.cuda.get_device_name(device)
|
||||
context.device = device
|
||||
# Some graphics cards have bugs in their firmware that prevent image generation at half precision
|
||||
if needs_to_force_full_precision(context.device_name):
|
||||
log.warn(f"forcing full precision on this GPU, to avoid corrupted images. GPU: {context.device_name}")
|
||||
context.half_precision = False
|
||||
|
||||
# Force full precision on 1660 and 1650 NVIDIA cards to avoid creating green images
|
||||
if needs_to_force_full_precision(context):
|
||||
log.warn(f"forcing full precision on this GPU, to avoid green images. GPU detected: {context.device_name}")
|
||||
# Apply force_full_precision now before models are loaded.
|
||||
context.half_precision = False
|
||||
|
||||
log.info(f'Setting {device} as active, with precision: {"half" if context.half_precision else "full"}')
|
||||
torch.cuda.device(device)
|
||||
log.info(f'Setting {device_id} as active, with precision: {"half" if context.half_precision else "full"}')
|
||||
|
||||
|
||||
def needs_to_force_full_precision(context):
|
||||
def needs_to_force_full_precision(device_name):
|
||||
if "FORCE_FULL_PRECISION" in os.environ:
|
||||
return True
|
||||
|
||||
device_name = context.device_name.lower()
|
||||
return (
|
||||
("nvidia" in device_name or "geforce" in device_name or "quadro" in device_name)
|
||||
and (
|
||||
" 1660" in device_name
|
||||
or " 1650" in device_name
|
||||
or " 1630" in device_name
|
||||
or " t400" in device_name
|
||||
or " t550" in device_name
|
||||
or " t600" in device_name
|
||||
or " t1000" in device_name
|
||||
or " t1200" in device_name
|
||||
or " t2000" in device_name
|
||||
)
|
||||
) or ("tesla k40m" in device_name)
|
||||
return has_half_precision_bug(device_name.lower())
|
||||
|
||||
|
||||
def get_max_vram_usage_level(device):
|
||||
if "cuda" in device:
|
||||
_, mem_total = torch.cuda.mem_get_info(device)
|
||||
else:
|
||||
"Expects a torch.device as the argument"
|
||||
|
||||
if is_cpu_device(device):
|
||||
return "high"
|
||||
|
||||
_, mem_total = mem_get_info(device)
|
||||
|
||||
if mem_total < 0.001: # probably a torch platform without a mem_get_info() implementation
|
||||
return "high"
|
||||
|
||||
mem_total /= float(10**9)
|
||||
@ -191,51 +150,6 @@ def get_max_vram_usage_level(device):
|
||||
return "high"
|
||||
|
||||
|
||||
def validate_device_id(device, log_prefix=""):
|
||||
def is_valid():
|
||||
if not isinstance(device, str):
|
||||
return False
|
||||
if device == "cpu" or device == "mps":
|
||||
return True
|
||||
if not device.startswith("cuda:") or not device[5:].isnumeric():
|
||||
return False
|
||||
return True
|
||||
|
||||
if not is_valid():
|
||||
raise EnvironmentError(
|
||||
f"{log_prefix}: device id should be 'cpu', 'mps', or 'cuda:N' (where N is an integer index for the GPU). Got: {device}"
|
||||
)
|
||||
|
||||
|
||||
def is_device_compatible(device):
|
||||
"""
|
||||
Returns True/False, and prints any compatibility errors
|
||||
"""
|
||||
# static variable "history".
|
||||
is_device_compatible.history = getattr(is_device_compatible, "history", {})
|
||||
try:
|
||||
validate_device_id(device, log_prefix="is_device_compatible")
|
||||
except:
|
||||
log.error(str(e))
|
||||
return False
|
||||
|
||||
if device in ("cpu", "mps"):
|
||||
return True
|
||||
# Memory check
|
||||
try:
|
||||
_, mem_total = torch.cuda.mem_get_info(device)
|
||||
mem_total /= float(10**9)
|
||||
if mem_total < 1.9:
|
||||
if is_device_compatible.history.get(device) == None:
|
||||
log.warn(f"GPU {device} with less than 2 GB of VRAM is not compatible with Stable Diffusion")
|
||||
is_device_compatible.history[device] = 1
|
||||
return False
|
||||
except RuntimeError as e:
|
||||
log.error(str(e))
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def get_processor_name():
|
||||
try:
|
||||
import subprocess
|
||||
@ -243,7 +157,8 @@ def get_processor_name():
|
||||
if platform.system() == "Windows":
|
||||
return platform.processor()
|
||||
elif platform.system() == "Darwin":
|
||||
os.environ["PATH"] = os.environ["PATH"] + os.pathsep + "/usr/sbin"
|
||||
if "/usr/sbin" not in os.environ["PATH"].split(os.pathsep):
|
||||
os.environ["PATH"] = os.environ["PATH"] + os.pathsep + "/usr/sbin"
|
||||
command = "sysctl -n machdep.cpu.brand_string"
|
||||
return subprocess.check_output(command, shell=True).decode().strip()
|
||||
elif platform.system() == "Linux":
|
||||
|
@ -37,7 +37,7 @@ MODEL_EXTENSIONS = {
|
||||
}
|
||||
DEFAULT_MODELS = {
|
||||
"stable-diffusion": [
|
||||
{"file_name": "sd-v1-5.safetensors", "model_id": "1.5-pruned-emaonly-fp16"},
|
||||
{"file_name": "sd-v1-4.ckpt", "model_id": "1.4"},
|
||||
],
|
||||
"gfpgan": [
|
||||
{"file_name": "GFPGANv1.4.pth", "model_id": "1.4"},
|
||||
@ -76,7 +76,7 @@ def load_default_models(context: Context):
|
||||
scan_model=context.model_paths[model_type] != None
|
||||
and not context.model_paths[model_type].endswith(".safetensors"),
|
||||
)
|
||||
if model_type in context.model_load_errors:
|
||||
if hasattr(context, "model_load_errors") and model_type in context.model_load_errors:
|
||||
del context.model_load_errors[model_type]
|
||||
except Exception as e:
|
||||
log.error(f"[red]Error while loading {model_type} model: {context.model_paths[model_type]}[/red]")
|
||||
@ -88,6 +88,8 @@ def load_default_models(context: Context):
|
||||
log.exception(e)
|
||||
del context.model_paths[model_type]
|
||||
|
||||
if not hasattr(context, "model_load_errors"):
|
||||
context.model_load_errors = {}
|
||||
context.model_load_errors[model_type] = str(e) # storing the entire Exception can lead to memory leaks
|
||||
|
||||
|
||||
@ -179,11 +181,13 @@ def reload_models_if_necessary(context: Context, models_data: ModelsData, models
|
||||
extra_params = models_data.model_params.get(model_type, {})
|
||||
try:
|
||||
action_fn(context, model_type, scan_model=False, **extra_params) # we've scanned them already
|
||||
if model_type in context.model_load_errors:
|
||||
if hasattr(context, "model_load_errors") and model_type in context.model_load_errors:
|
||||
del context.model_load_errors[model_type]
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
if action_fn == load_model:
|
||||
if not hasattr(context, "model_load_errors"):
|
||||
context.model_load_errors = {}
|
||||
context.model_load_errors[model_type] = str(e) # storing the entire Exception can lead to memory leaks
|
||||
|
||||
|
||||
@ -207,7 +211,7 @@ def resolve_model_paths(models_data: ModelsData):
|
||||
|
||||
def fail_if_models_did_not_load(context: Context):
|
||||
for model_type in KNOWN_MODEL_TYPES:
|
||||
if model_type in context.model_load_errors:
|
||||
if hasattr(context, "model_load_errors") and model_type in context.model_load_errors:
|
||||
e = context.model_load_errors[model_type]
|
||||
raise Exception(f"Could not load the {model_type} model! Reason: " + e)
|
||||
|
||||
|
@ -196,11 +196,13 @@ def set_app_config_internal(req: SetAppConfigRequest):
|
||||
|
||||
|
||||
def update_render_devices_in_config(config, render_devices):
|
||||
if render_devices not in ("cpu", "auto") and not render_devices.startswith("cuda:"):
|
||||
raise HTTPException(status_code=400, detail=f"Invalid render device requested: {render_devices}")
|
||||
from easydiffusion.device_manager import validate_render_devices
|
||||
|
||||
if render_devices.startswith("cuda:"):
|
||||
try:
|
||||
render_devices = render_devices.split(",")
|
||||
validate_render_devices(render_devices)
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
|
||||
config["render_devices"] = render_devices
|
||||
|
||||
|
@ -21,6 +21,9 @@ from easydiffusion.tasks import Task
|
||||
from easydiffusion.utils import log
|
||||
from sdkit.utils import gc
|
||||
|
||||
from torchruntime.utils import get_device_count, get_device, get_device_name, get_installed_torch_platform
|
||||
from sdkit.utils import is_cpu_device, mem_get_info
|
||||
|
||||
THREAD_NAME_PREFIX = ""
|
||||
ERR_LOCK_FAILED = " failed to acquire lock within timeout."
|
||||
LOCK_TIMEOUT = 15 # Maximum locking time in seconds before failing a task.
|
||||
@ -329,34 +332,33 @@ def get_devices():
|
||||
"active": {},
|
||||
}
|
||||
|
||||
def get_device_info(device):
|
||||
if device in ("cpu", "mps"):
|
||||
def get_device_info(device_id):
|
||||
if is_cpu_device(device_id):
|
||||
return {"name": device_manager.get_processor_name()}
|
||||
|
||||
mem_free, mem_total = torch.cuda.mem_get_info(device)
|
||||
device = get_device(device_id)
|
||||
|
||||
mem_free, mem_total = mem_get_info(device)
|
||||
mem_free /= float(10**9)
|
||||
mem_total /= float(10**9)
|
||||
|
||||
return {
|
||||
"name": torch.cuda.get_device_name(device),
|
||||
"name": get_device_name(device),
|
||||
"mem_free": mem_free,
|
||||
"mem_total": mem_total,
|
||||
"max_vram_usage_level": device_manager.get_max_vram_usage_level(device),
|
||||
}
|
||||
|
||||
# list the compatible devices
|
||||
cuda_count = torch.cuda.device_count()
|
||||
for device in range(cuda_count):
|
||||
device = f"cuda:{device}"
|
||||
if not device_manager.is_device_compatible(device):
|
||||
continue
|
||||
torch_platform_name = get_installed_torch_platform()[0]
|
||||
device_count = get_device_count()
|
||||
for device_id in range(device_count):
|
||||
device_id = f"{torch_platform_name}:{device_id}" if device_count > 1 else torch_platform_name
|
||||
|
||||
devices["all"].update({device: get_device_info(device)})
|
||||
devices["all"].update({device_id: get_device_info(device_id)})
|
||||
|
||||
if device_manager.is_mps_available():
|
||||
devices["all"].update({"mps": get_device_info("mps")})
|
||||
|
||||
devices["all"].update({"cpu": get_device_info("cpu")})
|
||||
if torch_platform_name != "cpu":
|
||||
devices["all"].update({"cpu": get_device_info("cpu")})
|
||||
|
||||
# list the activated devices
|
||||
if not manager_lock.acquire(blocking=True, timeout=LOCK_TIMEOUT):
|
||||
@ -368,8 +370,8 @@ def get_devices():
|
||||
weak_data = weak_thread_data.get(rthread)
|
||||
if not weak_data or not "device" in weak_data or not "device_name" in weak_data:
|
||||
continue
|
||||
device = weak_data["device"]
|
||||
devices["active"].update({device: get_device_info(device)})
|
||||
device_id = weak_data["device"]
|
||||
devices["active"].update({device_id: get_device_info(device_id)})
|
||||
finally:
|
||||
manager_lock.release()
|
||||
|
||||
@ -427,12 +429,6 @@ def start_render_thread(device):
|
||||
|
||||
|
||||
def stop_render_thread(device):
|
||||
try:
|
||||
device_manager.validate_device_id(device, log_prefix="stop_render_thread")
|
||||
except:
|
||||
log.error(traceback.format_exc())
|
||||
return False
|
||||
|
||||
if not manager_lock.acquire(blocking=True, timeout=LOCK_TIMEOUT):
|
||||
raise Exception("stop_render_thread" + ERR_LOCK_FAILED)
|
||||
log.info(f"Stopping Rendering Thread on device: {device}")
|
||||
|
@ -20,8 +20,8 @@ class GenerateImageRequest(BaseModel):
|
||||
control_image: Any = None
|
||||
control_alpha: Union[float, List[float]] = None
|
||||
prompt_strength: float = 0.8
|
||||
preserve_init_image_color_profile = False
|
||||
strict_mask_border = False
|
||||
preserve_init_image_color_profile: bool = False
|
||||
strict_mask_border: bool = False
|
||||
|
||||
sampler_name: str = None # "ddim", "plms", "heun", "euler", "euler_a", "dpm2", "dpm2_a", "lms"
|
||||
hypernetwork_strength: float = 0
|
||||
@ -100,7 +100,7 @@ class MergeRequest(BaseModel):
|
||||
model1: str = None
|
||||
ratio: float = None
|
||||
out_path: str = "mix"
|
||||
use_fp16 = True
|
||||
use_fp16: bool = True
|
||||
|
||||
|
||||
class Image:
|
||||
|
@ -35,7 +35,7 @@
|
||||
<h1>
|
||||
<img id="logo_img" src="/media/images/icon-512x512.png" >
|
||||
Easy Diffusion
|
||||
<small><span id="version">v3.0.9</span> <span id="updateBranchLabel"></span></small>
|
||||
<small><span id="version">v3.0.9c</span> <span id="updateBranchLabel"></span></small>
|
||||
</h1>
|
||||
</div>
|
||||
<div id="server-status">
|
||||
|
@ -178,23 +178,6 @@ function loadSettings() {
|
||||
}
|
||||
})
|
||||
CURRENTLY_LOADING_SETTINGS = false
|
||||
} else if (localStorage.length < 2) {
|
||||
// localStorage is too short for OldSettings
|
||||
// So this is likely the first time Easy Diffusion is running.
|
||||
// Initialize vram_usage_level based on the available VRAM
|
||||
function initGPUProfile(event) {
|
||||
if (
|
||||
"detail" in event &&
|
||||
"active" in event.detail &&
|
||||
"cuda:0" in event.detail.active &&
|
||||
event.detail.active["cuda:0"].mem_total < 4.5
|
||||
) {
|
||||
vramUsageLevelField.value = "low"
|
||||
vramUsageLevelField.dispatchEvent(new Event("change"))
|
||||
}
|
||||
document.removeEventListener("system_info_update", initGPUProfile)
|
||||
}
|
||||
document.addEventListener("system_info_update", initGPUProfile)
|
||||
} else {
|
||||
CURRENTLY_LOADING_SETTINGS = true
|
||||
tryLoadOldSettings()
|
||||
|
@ -611,6 +611,13 @@ function onUseAsInputClick(req, img) {
|
||||
initImagePreview.src = imgData
|
||||
|
||||
maskSetting.checked = false
|
||||
|
||||
//Force the image settings size to match the input, as inpaint currently only works correctly
|
||||
//if input image and generate sizes match.
|
||||
addImageSizeOption(img.naturalWidth);
|
||||
addImageSizeOption(img.naturalHeight);
|
||||
widthField.value = img.naturalWidth;
|
||||
heightField.value = img.naturalHeight;
|
||||
}
|
||||
|
||||
function onUseForControlnetClick(req, img) {
|
||||
|
@ -642,7 +642,7 @@ function setDeviceInfo(devices) {
|
||||
|
||||
function ID_TO_TEXT(d) {
|
||||
let info = devices.all[d]
|
||||
if ("mem_free" in info && "mem_total" in info) {
|
||||
if ("mem_free" in info && "mem_total" in info && info["mem_total"] > 0) {
|
||||
return `${info.name} <small>(${d}) (${info.mem_free.toFixed(1)}Gb free / ${info.mem_total.toFixed(
|
||||
1
|
||||
)} Gb total)</small>`
|
||||
|
80
ui/plugins/ui/snow.plugin.js
Normal file
80
ui/plugins/ui/snow.plugin.js
Normal file
@ -0,0 +1,80 @@
|
||||
// christmas hack, courtesy: https://pajasevi.github.io/CSSnowflakes/
|
||||
|
||||
;(function(){
|
||||
"use strict";
|
||||
|
||||
function makeItSnow() {
|
||||
const styleSheet = document.createElement("style")
|
||||
styleSheet.textContent = `
|
||||
/* customizable snowflake styling */
|
||||
.snowflake {
|
||||
color: #fff;
|
||||
font-size: 1em;
|
||||
font-family: Arial, sans-serif;
|
||||
text-shadow: 0 0 5px #000;
|
||||
}
|
||||
|
||||
.snowflake,.snowflake .inner{animation-iteration-count:infinite;animation-play-state:running}@keyframes snowflakes-fall{0%{transform:translateY(0)}100%{transform:translateY(110vh)}}@keyframes snowflakes-shake{0%,100%{transform:translateX(0)}50%{transform:translateX(80px)}}.snowflake{position:fixed;top:-10%;z-index:9999;-webkit-user-select:none;user-select:none;cursor:default;animation-name:snowflakes-shake;animation-duration:3s;animation-timing-function:ease-in-out}.snowflake .inner{animation-duration:10s;animation-name:snowflakes-fall;animation-timing-function:linear}.snowflake:nth-of-type(0){left:1%;animation-delay:0s}.snowflake:nth-of-type(0) .inner{animation-delay:0s}.snowflake:first-of-type{left:10%;animation-delay:1s}.snowflake:first-of-type .inner,.snowflake:nth-of-type(8) .inner{animation-delay:1s}.snowflake:nth-of-type(2){left:20%;animation-delay:.5s}.snowflake:nth-of-type(2) .inner,.snowflake:nth-of-type(6) .inner{animation-delay:6s}.snowflake:nth-of-type(3){left:30%;animation-delay:2s}.snowflake:nth-of-type(11) .inner,.snowflake:nth-of-type(3) .inner{animation-delay:4s}.snowflake:nth-of-type(4){left:40%;animation-delay:2s}.snowflake:nth-of-type(10) .inner,.snowflake:nth-of-type(4) .inner{animation-delay:2s}.snowflake:nth-of-type(5){left:50%;animation-delay:3s}.snowflake:nth-of-type(5) .inner{animation-delay:8s}.snowflake:nth-of-type(6){left:60%;animation-delay:2s}.snowflake:nth-of-type(7){left:70%;animation-delay:1s}.snowflake:nth-of-type(7) .inner{animation-delay:2.5s}.snowflake:nth-of-type(8){left:80%;animation-delay:0s}.snowflake:nth-of-type(9){left:90%;animation-delay:1.5s}.snowflake:nth-of-type(9) .inner{animation-delay:3s}.snowflake:nth-of-type(10){left:25%;animation-delay:0s}.snowflake:nth-of-type(11){left:65%;animation-delay:2.5s}
|
||||
`
|
||||
document.head.appendChild(styleSheet)
|
||||
|
||||
const snowflakes = document.createElement("div")
|
||||
snowflakes.id = "snowflakes-container"
|
||||
snowflakes.innerHTML = `
|
||||
<div class="snowflakes" aria-hidden="true">
|
||||
<div class="snowflake">
|
||||
<div class="inner">❅</div>
|
||||
</div>
|
||||
<div class="snowflake">
|
||||
<div class="inner">❅</div>
|
||||
</div>
|
||||
<div class="snowflake">
|
||||
<div class="inner">❅</div>
|
||||
</div>
|
||||
<div class="snowflake">
|
||||
<div class="inner">❅</div>
|
||||
</div>
|
||||
<div class="snowflake">
|
||||
<div class="inner">❅</div>
|
||||
</div>
|
||||
<div class="snowflake">
|
||||
<div class="inner">❅</div>
|
||||
</div>
|
||||
<div class="snowflake">
|
||||
<div class="inner">❅</div>
|
||||
</div>
|
||||
<div class="snowflake">
|
||||
<div class="inner">❅</div>
|
||||
</div>
|
||||
<div class="snowflake">
|
||||
<div class="inner">❅</div>
|
||||
</div>
|
||||
<div class="snowflake">
|
||||
<div class="inner">❅</div>
|
||||
</div>
|
||||
<div class="snowflake">
|
||||
<div class="inner">❅</div>
|
||||
</div>
|
||||
<div class="snowflake">
|
||||
<div class="inner">❅</div>
|
||||
</div>
|
||||
</div>`
|
||||
|
||||
document.body.appendChild(snowflakes)
|
||||
|
||||
const script = document.createElement("script")
|
||||
script.innerHTML = `
|
||||
$(document).ready(function() {
|
||||
setTimeout(function() {
|
||||
$("#snowflakes-container").fadeOut("slow", function() {$(this).remove()})
|
||||
}, 10 * 1000)
|
||||
})
|
||||
`
|
||||
document.body.appendChild(script)
|
||||
}
|
||||
|
||||
let date = new Date()
|
||||
if (date.getMonth() === 11 && date.getDate() >= 12) {
|
||||
makeItSnow()
|
||||
}
|
||||
})()
|
Loading…
x
Reference in New Issue
Block a user