forked from extern/easydiffusion
Don't install xformers for AMD on Linux; changelog
This commit is contained in:
parent
534bb2dd84
commit
6c148f1791
@ -21,6 +21,7 @@
|
|||||||
Our focus continues to remain on an easy installation experience, and an easy user-interface. While still remaining pretty powerful, in terms of features and speed.
|
Our focus continues to remain on an easy installation experience, and an easy user-interface. While still remaining pretty powerful, in terms of features and speed.
|
||||||
|
|
||||||
### Detailed changelog
|
### Detailed changelog
|
||||||
|
* 2.5.33 - 20 Apr 2023 - Use `xformers` to speed up image generation.
|
||||||
* 2.5.32 - 19 Apr 2023 - Automatically check for black images, and set full-precision if necessary (for attn). This means custom models based on Stable Diffusion v2.1 will just work, without needing special command-line arguments or editing of yaml config files.
|
* 2.5.32 - 19 Apr 2023 - Automatically check for black images, and set full-precision if necessary (for attn). This means custom models based on Stable Diffusion v2.1 will just work, without needing special command-line arguments or editing of yaml config files.
|
||||||
* 2.5.31 - 10 Apr 2023 - Reduce VRAM usage while upscaling.
|
* 2.5.31 - 10 Apr 2023 - Reduce VRAM usage while upscaling.
|
||||||
* 2.5.31 - 6 Apr 2023 - Allow seeds upto `4,294,967,295`. Thanks @ogmaresca.
|
* 2.5.31 - 6 Apr 2023 - Allow seeds upto `4,294,967,295`. Thanks @ogmaresca.
|
||||||
|
@ -35,7 +35,7 @@ def version(module_name: str) -> str:
|
|||||||
|
|
||||||
|
|
||||||
def install(module_name: str, module_version: str):
|
def install(module_name: str, module_version: str):
|
||||||
if module_name == "xformers" and os_name == "Darwin": # xformers is not available on mac
|
if module_name == "xformers" and (os_name == "Darwin" or is_amd_on_linux()):
|
||||||
return
|
return
|
||||||
|
|
||||||
index_url = None
|
index_url = None
|
||||||
@ -87,10 +87,7 @@ def apply_torch_install_overrides(module_version: str):
|
|||||||
if os_name == "Windows":
|
if os_name == "Windows":
|
||||||
module_version += "+cu117"
|
module_version += "+cu117"
|
||||||
index_url = "https://download.pytorch.org/whl/cu117"
|
index_url = "https://download.pytorch.org/whl/cu117"
|
||||||
elif os_name == "Linux":
|
elif is_amd_on_linux():
|
||||||
with open("/proc/bus/pci/devices", "r") as f:
|
|
||||||
device_info = f.read()
|
|
||||||
if "amdgpu" in device_info and "nvidia" not in device_info:
|
|
||||||
index_url = "https://download.pytorch.org/whl/rocm5.4.2"
|
index_url = "https://download.pytorch.org/whl/rocm5.4.2"
|
||||||
|
|
||||||
return module_version, index_url
|
return module_version, index_url
|
||||||
@ -107,6 +104,16 @@ def include_cuda_versions(module_versions: tuple) -> tuple:
|
|||||||
return allowed_versions
|
return allowed_versions
|
||||||
|
|
||||||
|
|
||||||
|
def is_amd_on_linux():
|
||||||
|
if os_name == "Linux":
|
||||||
|
with open("/proc/bus/pci/devices", "r") as f:
|
||||||
|
device_info = f.read()
|
||||||
|
if "amdgpu" in device_info and "nvidia" not in device_info:
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
def fail(module_name):
|
def fail(module_name):
|
||||||
print(
|
print(
|
||||||
f"""Error installing {module_name}. Sorry about that, please try to:
|
f"""Error installing {module_name}. Sorry about that, please try to:
|
||||||
|
@ -30,7 +30,7 @@
|
|||||||
<h1>
|
<h1>
|
||||||
<img id="logo_img" src="/media/images/icon-512x512.png" >
|
<img id="logo_img" src="/media/images/icon-512x512.png" >
|
||||||
Easy Diffusion
|
Easy Diffusion
|
||||||
<small>v2.5.32 <span id="updateBranchLabel"></span></small>
|
<small>v2.5.33 <span id="updateBranchLabel"></span></small>
|
||||||
</h1>
|
</h1>
|
||||||
</div>
|
</div>
|
||||||
<div id="server-status">
|
<div id="server-status">
|
||||||
|
Loading…
Reference in New Issue
Block a user