mirror of
https://github.com/easydiffusion/easydiffusion.git
synced 2024-11-23 00:33:28 +01:00
Merge branch 'beta' into serverip
This commit is contained in:
commit
3452d7852a
@ -23,6 +23,7 @@
|
|||||||
|
|
||||||
### Detailed changelog
|
### Detailed changelog
|
||||||
* 2.4.14 - 23 Nov 2022 - Button to retrieve the network addresses of the server in the systems setting dialog
|
* 2.4.14 - 23 Nov 2022 - Button to retrieve the network addresses of the server in the systems setting dialog
|
||||||
|
* 2.4.14 - 22 Nov 2022 - Change the backend to a custom fork of Stable Diffusion
|
||||||
* 2.4.13 - 21 Nov 2022 - Change the modifier weight via mouse wheel, drag to reorder selected modifiers, and some more modifier-related fixes. Thanks @patriceac
|
* 2.4.13 - 21 Nov 2022 - Change the modifier weight via mouse wheel, drag to reorder selected modifiers, and some more modifier-related fixes. Thanks @patriceac
|
||||||
* 2.4.12 - 21 Nov 2022 - Another fix for improving how long images take to generate. Reduces the time taken for an enqueued task to start processing.
|
* 2.4.12 - 21 Nov 2022 - Another fix for improving how long images take to generate. Reduces the time taken for an enqueued task to start processing.
|
||||||
* 2.4.11 - 21 Nov 2022 - Installer improvements: avoid crashing if the username contains a space or special characters, allow moving/renaming the folder after installation on Windows, whitespace fix on git apply
|
* 2.4.11 - 21 Nov 2022 - Installer improvements: avoid crashing if the username contains a space or special characters, allow moving/renaming the folder after installation on Windows, whitespace fix on git apply
|
||||||
|
@ -71,7 +71,7 @@ Useful for judging (and stopping) an image quickly, without waiting for it to fi
|
|||||||
You don't need to install or struggle with Python, Anaconda, Docker etc. The installer will take care of whatever is needed.
|
You don't need to install or struggle with Python, Anaconda, Docker etc. The installer will take care of whatever is needed.
|
||||||
|
|
||||||
# Installation
|
# Installation
|
||||||
1. **Download** [for Windows](https://github.com/cmdr2/stable-diffusion-ui/releases/download/v2.3.5/stable-diffusion-ui-windows.zip) or [for Linux](https://github.com/cmdr2/stable-diffusion-ui/releases/download/v2.3.5/stable-diffusion-ui-linux.zip).
|
1. **Download** [for Windows](https://github.com/cmdr2/stable-diffusion-ui/releases/download/v2.4.13/stable-diffusion-ui-windows.zip) or [for Linux](https://github.com/cmdr2/stable-diffusion-ui/releases/download/v2.4.13/stable-diffusion-ui-linux.zip).
|
||||||
|
|
||||||
2. **Extract**:
|
2. **Extract**:
|
||||||
- For Windows: After unzipping the file, please move the `stable-diffusion-ui` folder to your `C:` (or any drive like D:, at the top root level), e.g. `C:\stable-diffusion-ui`. This will avoid a common problem with Windows (file path length limits).
|
- For Windows: After unzipping the file, please move the `stable-diffusion-ui` folder to your `C:` (or any drive like D:, at the top root level), e.g. `C:\stable-diffusion-ui`. This will avoid a common problem with Windows (file path length limits).
|
||||||
|
@ -42,11 +42,11 @@ if "%PACKAGES_TO_INSTALL%" NEQ "" (
|
|||||||
mkdir "%MAMBA_ROOT_PREFIX%"
|
mkdir "%MAMBA_ROOT_PREFIX%"
|
||||||
call curl -Lk "%MICROMAMBA_DOWNLOAD_URL%" > "%MAMBA_ROOT_PREFIX%\micromamba.exe"
|
call curl -Lk "%MICROMAMBA_DOWNLOAD_URL%" > "%MAMBA_ROOT_PREFIX%\micromamba.exe"
|
||||||
|
|
||||||
@REM if "%ERRORLEVEL%" NEQ "0" (
|
if "%ERRORLEVEL%" NEQ "0" (
|
||||||
@REM echo "There was a problem downloading micromamba. Cannot continue."
|
echo "There was a problem downloading micromamba. Cannot continue."
|
||||||
@REM pause
|
pause
|
||||||
@REM exit /b
|
exit /b
|
||||||
@REM )
|
)
|
||||||
|
|
||||||
mkdir "%APPDATA%"
|
mkdir "%APPDATA%"
|
||||||
mkdir "%USERPROFILE%"
|
mkdir "%USERPROFILE%"
|
||||||
|
@ -16,11 +16,11 @@ if exist "%cd%\profile" (
|
|||||||
|
|
||||||
@rem activate the installer env
|
@rem activate the installer env
|
||||||
call conda activate
|
call conda activate
|
||||||
@rem @if "%ERRORLEVEL%" NEQ "0" (
|
@if "%ERRORLEVEL%" NEQ "0" (
|
||||||
@rem @echo. & echo "Error activating conda for Stable Diffusion. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
@echo. & echo "Error activating conda for Stable Diffusion. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
||||||
@rem pause
|
pause
|
||||||
@rem exit /b
|
exit /b
|
||||||
@rem )
|
)
|
||||||
|
|
||||||
@REM remove the old version of the dev console script, if it's still present
|
@REM remove the old version of the dev console script, if it's still present
|
||||||
if exist "Open Developer Console.cmd" del "Open Developer Console.cmd"
|
if exist "Open Developer Console.cmd" del "Open Developer Console.cmd"
|
||||||
@ -33,18 +33,19 @@ if exist "Open Developer Console.cmd" del "Open Developer Console.cmd"
|
|||||||
|
|
||||||
@cd stable-diffusion
|
@cd stable-diffusion
|
||||||
|
|
||||||
|
@call git remote set-url origin https://github.com/easydiffusion/diffusion-kit.git
|
||||||
|
|
||||||
@call git reset --hard
|
@call git reset --hard
|
||||||
@call git pull
|
@call git pull
|
||||||
@call git -c advice.detachedHead=false checkout f6cfebffa752ee11a7b07497b8529d5971de916c
|
@call git -c advice.detachedHead=false checkout 7f32368ed1030a6e710537047bacd908adea183a
|
||||||
|
|
||||||
@call git apply --whitespace=nowarn ..\ui\sd_internal\ddim_callback.patch
|
@call git apply --whitespace=warn ..\ui\sd_internal\ddim_callback.patch
|
||||||
@call git apply --whitespace=nowarn ..\ui\sd_internal\env_yaml.patch
|
|
||||||
|
|
||||||
@cd ..
|
@cd ..
|
||||||
) else (
|
) else (
|
||||||
@echo. & echo "Downloading Stable Diffusion.." & echo.
|
@echo. & echo "Downloading Stable Diffusion.." & echo.
|
||||||
|
|
||||||
@call git clone https://github.com/basujindal/stable-diffusion.git && (
|
@call git clone https://github.com/easydiffusion/diffusion-kit.git stable-diffusion && (
|
||||||
@echo sd_git_cloned >> scripts\install_status.txt
|
@echo sd_git_cloned >> scripts\install_status.txt
|
||||||
) || (
|
) || (
|
||||||
@echo "Error downloading Stable Diffusion. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!"
|
@echo "Error downloading Stable Diffusion. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!"
|
||||||
@ -53,10 +54,9 @@ if exist "Open Developer Console.cmd" del "Open Developer Console.cmd"
|
|||||||
)
|
)
|
||||||
|
|
||||||
@cd stable-diffusion
|
@cd stable-diffusion
|
||||||
@call git -c advice.detachedHead=false checkout f6cfebffa752ee11a7b07497b8529d5971de916c
|
@call git -c advice.detachedHead=false checkout 7f32368ed1030a6e710537047bacd908adea183a
|
||||||
|
|
||||||
@call git apply --whitespace=nowarn ..\ui\sd_internal\ddim_callback.patch
|
@call git apply --whitespace=warn ..\ui\sd_internal\ddim_callback.patch
|
||||||
@call git apply --whitespace=nowarn ..\ui\sd_internal\env_yaml.patch
|
|
||||||
|
|
||||||
@cd ..
|
@cd ..
|
||||||
)
|
)
|
||||||
@ -88,12 +88,6 @@ if exist "Open Developer Console.cmd" del "Open Developer Console.cmd"
|
|||||||
|
|
||||||
@call conda activate .\env
|
@call conda activate .\env
|
||||||
|
|
||||||
@call conda install -c conda-forge -y --prefix env antlr4-python3-runtime=4.8 || (
|
|
||||||
@echo. & echo "Error installing antlr4-python3-runtime for Stable Diffusion. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
|
||||||
pause
|
|
||||||
exit /b
|
|
||||||
)
|
|
||||||
|
|
||||||
for /f "tokens=*" %%a in ('python -c "import torch; import ldm; import transformers; import numpy; import antlr4; print(42)"') do if "%%a" NEQ "42" (
|
for /f "tokens=*" %%a in ('python -c "import torch; import ldm; import transformers; import numpy; import antlr4; print(42)"') do if "%%a" NEQ "42" (
|
||||||
@echo. & echo "Dependency test failed! Error installing the packages necessary for Stable Diffusion. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
@echo. & echo "Dependency test failed! Error installing the packages necessary for Stable Diffusion. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
||||||
pause
|
pause
|
||||||
@ -117,18 +111,6 @@ set PATH=C:\Windows\System32;%PATH%
|
|||||||
|
|
||||||
set PYTHONPATH=%cd%;%cd%\env\lib\site-packages
|
set PYTHONPATH=%cd%;%cd%\env\lib\site-packages
|
||||||
|
|
||||||
@call pip install -e git+https://github.com/TencentARC/GFPGAN#egg=GFPGAN || (
|
|
||||||
@echo. & echo "Error installing the packages necessary for GFPGAN (Face Correction). Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
|
||||||
pause
|
|
||||||
exit /b
|
|
||||||
)
|
|
||||||
|
|
||||||
@call pip install basicsr==1.4.2 || (
|
|
||||||
@echo. & echo "Error installing the basicsr package necessary for GFPGAN (Face Correction). Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
|
||||||
pause
|
|
||||||
exit /b
|
|
||||||
)
|
|
||||||
|
|
||||||
for /f "tokens=*" %%a in ('python -c "from gfpgan import GFPGANer; print(42)"') do if "%%a" NEQ "42" (
|
for /f "tokens=*" %%a in ('python -c "from gfpgan import GFPGANer; print(42)"') do if "%%a" NEQ "42" (
|
||||||
@echo. & echo "Dependency test failed! Error installing the packages necessary for GFPGAN (Face Correction). Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
@echo. & echo "Dependency test failed! Error installing the packages necessary for GFPGAN (Face Correction). Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
||||||
pause
|
pause
|
||||||
@ -150,12 +132,6 @@ set PATH=C:\Windows\System32;%PATH%
|
|||||||
|
|
||||||
set PYTHONPATH=%cd%;%cd%\env\lib\site-packages
|
set PYTHONPATH=%cd%;%cd%\env\lib\site-packages
|
||||||
|
|
||||||
@call pip install -e git+https://github.com/xinntao/Real-ESRGAN#egg=realesrgan || (
|
|
||||||
@echo. & echo "Error installing the packages necessary for ESRGAN (Resolution Upscaling). Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
|
||||||
pause
|
|
||||||
exit /b
|
|
||||||
)
|
|
||||||
|
|
||||||
for /f "tokens=*" %%a in ('python -c "from basicsr.archs.rrdbnet_arch import RRDBNet; from realesrgan import RealESRGANer; print(42)"') do if "%%a" NEQ "42" (
|
for /f "tokens=*" %%a in ('python -c "from basicsr.archs.rrdbnet_arch import RRDBNet; from realesrgan import RealESRGANer; print(42)"') do if "%%a" NEQ "42" (
|
||||||
@echo. & echo "Dependency test failed! Error installing the packages necessary for ESRGAN (Resolution Upscaling). Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
@echo. & echo "Dependency test failed! Error installing the packages necessary for ESRGAN (Resolution Upscaling). Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
||||||
pause
|
pause
|
||||||
|
32
scripts/on_sd_start.sh
Executable file → Normal file
32
scripts/on_sd_start.sh
Executable file → Normal file
@ -26,28 +26,28 @@ if [ -e "scripts/install_status.txt" ] && [ `grep -c sd_git_cloned scripts/insta
|
|||||||
|
|
||||||
cd stable-diffusion
|
cd stable-diffusion
|
||||||
|
|
||||||
|
git remote set-url origin https://github.com/easydiffusion/diffusion-kit.git
|
||||||
|
|
||||||
git reset --hard
|
git reset --hard
|
||||||
git pull
|
git pull
|
||||||
git -c advice.detachedHead=false checkout f6cfebffa752ee11a7b07497b8529d5971de916c
|
git -c advice.detachedHead=false checkout 7f32368ed1030a6e710537047bacd908adea183a
|
||||||
|
|
||||||
git apply --whitespace=nowarn ../ui/sd_internal/ddim_callback.patch || fail "ddim patch failed"
|
git apply --whitespace=warn ../ui/sd_internal/ddim_callback.patch || fail "ddim patch failed"
|
||||||
git apply --whitespace=nowarn ../ui/sd_internal/env_yaml.patch || fail "yaml patch failed"
|
|
||||||
|
|
||||||
cd ..
|
cd ..
|
||||||
else
|
else
|
||||||
printf "\n\nDownloading Stable Diffusion..\n\n"
|
printf "\n\nDownloading Stable Diffusion..\n\n"
|
||||||
|
|
||||||
if git clone https://github.com/basujindal/stable-diffusion.git ; then
|
if git clone https://github.com/easydiffusion/diffusion-kit.git stable-diffusion ; then
|
||||||
echo sd_git_cloned >> scripts/install_status.txt
|
echo sd_git_cloned >> scripts/install_status.txt
|
||||||
else
|
else
|
||||||
fail "git clone of basujindal/stable-diffusion.git failed"
|
fail "git clone of basujindal/stable-diffusion.git failed"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
cd stable-diffusion
|
cd stable-diffusion
|
||||||
git -c advice.detachedHead=false checkout f6cfebffa752ee11a7b07497b8529d5971de916c
|
git -c advice.detachedHead=false checkout 7f32368ed1030a6e710537047bacd908adea183a
|
||||||
|
|
||||||
git apply --whitespace=nowarn ../ui/sd_internal/ddim_callback.patch || fail "ddim patch failed"
|
git apply --whitespace=warn ../ui/sd_internal/ddim_callback.patch || fail "ddim patch failed"
|
||||||
git apply --whitespace=nowarn ../ui/sd_internal/env_yaml.patch || fail "yaml patch failed"
|
|
||||||
|
|
||||||
cd ..
|
cd ..
|
||||||
fi
|
fi
|
||||||
@ -74,12 +74,6 @@ else
|
|||||||
|
|
||||||
conda activate ./env || fail "conda activate failed"
|
conda activate ./env || fail "conda activate failed"
|
||||||
|
|
||||||
if conda install -c conda-forge --prefix ./env -y antlr4-python3-runtime=4.8 ; then
|
|
||||||
echo "Installed. Testing.."
|
|
||||||
else
|
|
||||||
fail "Error installing antlr4-python3-runtime"
|
|
||||||
fi
|
|
||||||
|
|
||||||
out_test=`python -c "import torch; import ldm; import transformers; import numpy; import antlr4; print(42)"`
|
out_test=`python -c "import torch; import ldm; import transformers; import numpy; import antlr4; print(42)"`
|
||||||
if [ "$out_test" != "42" ]; then
|
if [ "$out_test" != "42" ]; then
|
||||||
fail "Dependency test failed"
|
fail "Dependency test failed"
|
||||||
@ -96,12 +90,6 @@ else
|
|||||||
export PYTHONNOUSERSITE=1
|
export PYTHONNOUSERSITE=1
|
||||||
export PYTHONPATH="$(pwd):$(pwd)/env/lib/site-packages"
|
export PYTHONPATH="$(pwd):$(pwd)/env/lib/site-packages"
|
||||||
|
|
||||||
if pip install -e git+https://github.com/TencentARC/GFPGAN#egg=GFPGAN ; then
|
|
||||||
echo "Installed. Testing.."
|
|
||||||
else
|
|
||||||
fail "Error installing the packages necessary for GFPGAN (Face Correction)."
|
|
||||||
fi
|
|
||||||
|
|
||||||
out_test=`python -c "from gfpgan import GFPGANer; print(42)"`
|
out_test=`python -c "from gfpgan import GFPGANer; print(42)"`
|
||||||
if [ "$out_test" != "42" ]; then
|
if [ "$out_test" != "42" ]; then
|
||||||
echo "EE The dependency check has failed. This usually means that some system libraries are missing."
|
echo "EE The dependency check has failed. This usually means that some system libraries are missing."
|
||||||
@ -121,12 +109,6 @@ else
|
|||||||
export PYTHONNOUSERSITE=1
|
export PYTHONNOUSERSITE=1
|
||||||
export PYTHONPATH="$(pwd):$(pwd)/env/lib/site-packages"
|
export PYTHONPATH="$(pwd):$(pwd)/env/lib/site-packages"
|
||||||
|
|
||||||
if pip install -e git+https://github.com/xinntao/Real-ESRGAN#egg=realesrgan ; then
|
|
||||||
echo "Installed. Testing.."
|
|
||||||
else
|
|
||||||
fail "Error installing the packages necessary for ESRGAN"
|
|
||||||
fi
|
|
||||||
|
|
||||||
out_test=`python -c "from basicsr.archs.rrdbnet_arch import RRDBNet; from realesrgan import RealESRGANer; print(42)"`
|
out_test=`python -c "from basicsr.archs.rrdbnet_arch import RRDBNet; from realesrgan import RealESRGANer; print(42)"`
|
||||||
if [ "$out_test" != "42" ]; then
|
if [ "$out_test" != "42" ]; then
|
||||||
fail "ESRGAN dependency test failed"
|
fail "ESRGAN dependency test failed"
|
||||||
|
@ -20,7 +20,10 @@
|
|||||||
<div id="container">
|
<div id="container">
|
||||||
<div id="top-nav">
|
<div id="top-nav">
|
||||||
<div id="logo">
|
<div id="logo">
|
||||||
<h1>Stable Diffusion UI <small>v2.4.13 <span id="updateBranchLabel"></span></small></h1>
|
<h1>
|
||||||
|
Stable Diffusion UI
|
||||||
|
<small>v2.4.14 <span id="updateBranchLabel"></span></small>
|
||||||
|
</h1>
|
||||||
</div>
|
</div>
|
||||||
<div id="server-status">
|
<div id="server-status">
|
||||||
<div id="server-status-color">●</div>
|
<div id="server-status-color">●</div>
|
||||||
|
@ -51,6 +51,13 @@ const TASK_MAPPING = {
|
|||||||
readUI: () => negativePromptField.value,
|
readUI: () => negativePromptField.value,
|
||||||
parse: (val) => val
|
parse: (val) => val
|
||||||
},
|
},
|
||||||
|
active_tags: { name: "Image Modifiers",
|
||||||
|
setUI: (active_tags) => {
|
||||||
|
refreshModifiersState(active_tags)
|
||||||
|
},
|
||||||
|
readUI: () => activeTags.map(x => x.name),
|
||||||
|
parse: (val) => val
|
||||||
|
},
|
||||||
width: { name: 'Width',
|
width: { name: 'Width',
|
||||||
setUI: (width) => {
|
setUI: (width) => {
|
||||||
const oldVal = widthField.value
|
const oldVal = widthField.value
|
||||||
@ -267,11 +274,6 @@ function restoreTaskToUI(task, fieldsToSkip) {
|
|||||||
// restore the original tag
|
// restore the original tag
|
||||||
promptField.value = task.reqBody.original_prompt || task.reqBody.prompt
|
promptField.value = task.reqBody.original_prompt || task.reqBody.prompt
|
||||||
|
|
||||||
// Restore modifiers
|
|
||||||
if (task.reqBody.active_tags) {
|
|
||||||
refreshModifiersState(task.reqBody.active_tags)
|
|
||||||
}
|
|
||||||
|
|
||||||
// properly reset checkboxes
|
// properly reset checkboxes
|
||||||
if (!('use_face_correction' in task.reqBody)) {
|
if (!('use_face_correction' in task.reqBody)) {
|
||||||
useFaceCorrectionField.checked = false
|
useFaceCorrectionField.checked = false
|
||||||
@ -406,7 +408,7 @@ async function parseContent(text) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async function readFile(file, i) {
|
async function readFile(file, i) {
|
||||||
console.log(`Event %o reading file[${i}]:${file.name}...`, e)
|
console.log(`Event %o reading file[${i}]:${file.name}...`)
|
||||||
const fileContent = (await file.text()).trim()
|
const fileContent = (await file.text()).trim()
|
||||||
return await parseContent(fileContent)
|
return await parseContent(fileContent)
|
||||||
}
|
}
|
||||||
|
@ -1,72 +1,13 @@
|
|||||||
diff --git a/optimizedSD/ddpm.py b/optimizedSD/ddpm.py
|
diff --git a/optimizedSD/ddpm.py b/optimizedSD/ddpm.py
|
||||||
index b967b55..35ef520 100644
|
index 79058bc..a473411 100644
|
||||||
--- a/optimizedSD/ddpm.py
|
--- a/optimizedSD/ddpm.py
|
||||||
+++ b/optimizedSD/ddpm.py
|
+++ b/optimizedSD/ddpm.py
|
||||||
@@ -22,7 +22,7 @@ from ldm.util import exists, default, instantiate_from_config
|
@@ -564,12 +564,12 @@ class UNet(DDPM):
|
||||||
from ldm.modules.diffusionmodules.util import make_beta_schedule
|
unconditional_guidance_scale=unconditional_guidance_scale,
|
||||||
from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like
|
callback=callback, img_callback=img_callback)
|
||||||
from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
|
|
||||||
-from samplers import CompVisDenoiser, get_ancestral_step, to_d, append_dims,linear_multistep_coeff
|
|
||||||
+from .samplers import CompVisDenoiser, get_ancestral_step, to_d, append_dims,linear_multistep_coeff
|
|
||||||
|
|
||||||
def disabled_train(self):
|
|
||||||
"""Overwrite model.train with this function to make sure train/eval mode
|
|
||||||
@@ -506,6 +506,8 @@ class UNet(DDPM):
|
|
||||||
|
|
||||||
x_latent = noise if x0 is None else x0
|
|
||||||
# sampling
|
|
||||||
+ if sampler in ('ddim', 'dpm2', 'heun', 'dpm2_a', 'lms') and not hasattr(self, 'ddim_timesteps'):
|
|
||||||
+ self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=False)
|
|
||||||
|
|
||||||
if sampler == "plms":
|
|
||||||
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=False)
|
|
||||||
@@ -528,39 +530,46 @@ class UNet(DDPM):
|
|
||||||
elif sampler == "ddim":
|
|
||||||
samples = self.ddim_sampling(x_latent, conditioning, S, unconditional_guidance_scale=unconditional_guidance_scale,
|
|
||||||
unconditional_conditioning=unconditional_conditioning,
|
|
||||||
- mask = mask,init_latent=x_T,use_original_steps=False)
|
|
||||||
+ mask = mask,init_latent=x_T,use_original_steps=False,
|
|
||||||
+ callback=callback, img_callback=img_callback)
|
|
||||||
|
|
||||||
elif sampler == "euler":
|
|
||||||
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=False)
|
|
||||||
samples = self.euler_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
|
||||||
- unconditional_guidance_scale=unconditional_guidance_scale)
|
|
||||||
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
|
||||||
+ img_callback=img_callback)
|
|
||||||
elif sampler == "euler_a":
|
|
||||||
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=False)
|
|
||||||
samples = self.euler_ancestral_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
|
||||||
- unconditional_guidance_scale=unconditional_guidance_scale)
|
|
||||||
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
|
||||||
+ img_callback=img_callback)
|
|
||||||
|
|
||||||
elif sampler == "dpm2":
|
|
||||||
samples = self.dpm_2_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
|
||||||
- unconditional_guidance_scale=unconditional_guidance_scale)
|
|
||||||
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
|
||||||
+ img_callback=img_callback)
|
|
||||||
elif sampler == "heun":
|
|
||||||
samples = self.heun_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
|
||||||
- unconditional_guidance_scale=unconditional_guidance_scale)
|
|
||||||
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
|
||||||
+ img_callback=img_callback)
|
|
||||||
|
|
||||||
elif sampler == "dpm2_a":
|
|
||||||
samples = self.dpm_2_ancestral_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
|
||||||
- unconditional_guidance_scale=unconditional_guidance_scale)
|
|
||||||
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
|
||||||
+ img_callback=img_callback)
|
|
||||||
|
|
||||||
|
|
||||||
elif sampler == "lms":
|
|
||||||
samples = self.lms_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
|
||||||
- unconditional_guidance_scale=unconditional_guidance_scale)
|
|
||||||
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
|
||||||
+ img_callback=img_callback)
|
|
||||||
+
|
|
||||||
+ yield from samples
|
+ yield from samples
|
||||||
|
+
|
||||||
if(self.turbo):
|
if(self.turbo):
|
||||||
self.model1.to("cpu")
|
self.model1.to("cpu")
|
||||||
self.model2.to("cpu")
|
self.model2.to("cpu")
|
||||||
@ -76,7 +17,7 @@ index b967b55..35ef520 100644
|
|||||||
@torch.no_grad()
|
@torch.no_grad()
|
||||||
def plms_sampling(self, cond,b, img,
|
def plms_sampling(self, cond,b, img,
|
||||||
ddim_use_original_steps=False,
|
ddim_use_original_steps=False,
|
||||||
@@ -599,10 +608,10 @@ class UNet(DDPM):
|
@@ -608,10 +608,10 @@ class UNet(DDPM):
|
||||||
old_eps.append(e_t)
|
old_eps.append(e_t)
|
||||||
if len(old_eps) >= 4:
|
if len(old_eps) >= 4:
|
||||||
old_eps.pop(0)
|
old_eps.pop(0)
|
||||||
@ -90,23 +31,15 @@ index b967b55..35ef520 100644
|
|||||||
|
|
||||||
@torch.no_grad()
|
@torch.no_grad()
|
||||||
def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
|
def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
|
||||||
@@ -706,7 +715,8 @@ class UNet(DDPM):
|
@@ -740,13 +740,13 @@ class UNet(DDPM):
|
||||||
|
|
||||||
@torch.no_grad()
|
|
||||||
def ddim_sampling(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,
|
|
||||||
- mask = None,init_latent=None,use_original_steps=False):
|
|
||||||
+ mask = None,init_latent=None,use_original_steps=False,
|
|
||||||
+ callback=None, img_callback=None):
|
|
||||||
|
|
||||||
timesteps = self.ddim_timesteps
|
|
||||||
timesteps = timesteps[:t_start]
|
|
||||||
@@ -730,10 +740,13 @@ class UNet(DDPM):
|
|
||||||
unconditional_guidance_scale=unconditional_guidance_scale,
|
unconditional_guidance_scale=unconditional_guidance_scale,
|
||||||
unconditional_conditioning=unconditional_conditioning)
|
unconditional_conditioning=unconditional_conditioning)
|
||||||
|
|
||||||
|
- if callback: callback(i)
|
||||||
|
- if img_callback: img_callback(x_dec, i)
|
||||||
+ if callback: yield from callback(i)
|
+ if callback: yield from callback(i)
|
||||||
+ if img_callback: yield from img_callback(x_dec, i)
|
+ if img_callback: yield from img_callback(x_dec, i)
|
||||||
+
|
|
||||||
if mask is not None:
|
if mask is not None:
|
||||||
- return x0 * mask + (1. - mask) * x_dec
|
- return x0 * mask + (1. - mask) * x_dec
|
||||||
+ x_dec = x0 * mask + (1. - mask) * x_dec
|
+ x_dec = x0 * mask + (1. - mask) * x_dec
|
||||||
@ -116,217 +49,114 @@ index b967b55..35ef520 100644
|
|||||||
|
|
||||||
|
|
||||||
@torch.no_grad()
|
@torch.no_grad()
|
||||||
@@ -779,13 +792,16 @@ class UNet(DDPM):
|
@@ -820,12 +820,12 @@ class UNet(DDPM):
|
||||||
|
|
||||||
|
|
||||||
@torch.no_grad()
|
|
||||||
- def euler_sampling(self, ac, x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None,callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.):
|
|
||||||
+ def euler_sampling(self, ac, x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None,callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.,
|
|
||||||
+ img_callback=None):
|
|
||||||
"""Implements Algorithm 2 (Euler steps) from Karras et al. (2022)."""
|
|
||||||
extra_args = {} if extra_args is None else extra_args
|
|
||||||
cvd = CompVisDenoiser(ac)
|
|
||||||
sigmas = cvd.get_sigmas(S)
|
|
||||||
x = x*sigmas[0]
|
|
||||||
|
|
||||||
+ print(f"Running Euler Sampling with {len(sigmas) - 1} timesteps")
|
|
||||||
+
|
|
||||||
s_in = x.new_ones([x.shape[0]]).half()
|
|
||||||
for i in trange(len(sigmas) - 1, disable=disable):
|
|
||||||
gamma = min(s_churn / (len(sigmas) - 1), 2 ** 0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.
|
|
||||||
@@ -807,13 +823,18 @@ class UNet(DDPM):
|
|
||||||
d = to_d(x, sigma_hat, denoised)
|
d = to_d(x, sigma_hat, denoised)
|
||||||
if callback is not None:
|
- if callback: callback(i)
|
||||||
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})
|
- if img_callback: img_callback(x, i)
|
||||||
+
|
+ if callback: yield from callback(i)
|
||||||
+ if img_callback: yield from img_callback(x, i)
|
+ if img_callback: yield from img_callback(x, i)
|
||||||
+
|
|
||||||
dt = sigmas[i + 1] - sigma_hat
|
dt = sigmas[i + 1] - sigma_hat
|
||||||
# Euler method
|
# Euler method
|
||||||
x = x + d * dt
|
x = x + d * dt
|
||||||
- return x
|
- return x
|
||||||
+
|
|
||||||
+ yield from img_callback(x, len(sigmas)-1)
|
+ yield from img_callback(x, len(sigmas)-1)
|
||||||
|
|
||||||
@torch.no_grad()
|
@torch.no_grad()
|
||||||
- def euler_ancestral_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None, callback=None, disable=None):
|
def euler_ancestral_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None, callback=None, disable=None, img_callback=None):
|
||||||
+ def euler_ancestral_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None, callback=None, disable=None,
|
@@ -852,14 +852,14 @@ class UNet(DDPM):
|
||||||
+ img_callback=None):
|
denoised = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
|
||||||
"""Ancestral sampling with Euler method steps."""
|
|
||||||
extra_args = {} if extra_args is None else extra_args
|
|
||||||
|
|
||||||
@@ -822,6 +843,8 @@ class UNet(DDPM):
|
|
||||||
sigmas = cvd.get_sigmas(S)
|
|
||||||
x = x*sigmas[0]
|
|
||||||
|
|
||||||
+ print(f"Running Euler Ancestral Sampling with {len(sigmas) - 1} timesteps")
|
|
||||||
+
|
|
||||||
s_in = x.new_ones([x.shape[0]]).half()
|
|
||||||
for i in trange(len(sigmas) - 1, disable=disable):
|
|
||||||
|
|
||||||
@@ -837,17 +860,22 @@ class UNet(DDPM):
|
|
||||||
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1])
|
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1])
|
||||||
if callback is not None:
|
- if callback: callback(i)
|
||||||
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
|
- if img_callback: img_callback(x, i)
|
||||||
+
|
+ if callback: yield from callback(i)
|
||||||
+ if img_callback: yield from img_callback(x, i)
|
+ if img_callback: yield from img_callback(x, i)
|
||||||
+
|
|
||||||
d = to_d(x, sigmas[i], denoised)
|
d = to_d(x, sigmas[i], denoised)
|
||||||
# Euler method
|
# Euler method
|
||||||
dt = sigma_down - sigmas[i]
|
dt = sigma_down - sigmas[i]
|
||||||
x = x + d * dt
|
x = x + d * dt
|
||||||
x = x + torch.randn_like(x) * sigma_up
|
x = x + torch.randn_like(x) * sigma_up
|
||||||
- return x
|
- return x
|
||||||
+
|
|
||||||
+ yield from img_callback(x, len(sigmas)-1)
|
+ yield from img_callback(x, len(sigmas)-1)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@torch.no_grad()
|
@@ -892,8 +892,8 @@ class UNet(DDPM):
|
||||||
- def heun_sampling(self, ac, x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.):
|
denoised = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
|
||||||
+ def heun_sampling(self, ac, x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.,
|
|
||||||
+ img_callback=None):
|
|
||||||
"""Implements Algorithm 2 (Heun steps) from Karras et al. (2022)."""
|
|
||||||
extra_args = {} if extra_args is None else extra_args
|
|
||||||
|
|
||||||
@@ -855,6 +883,8 @@ class UNet(DDPM):
|
|
||||||
sigmas = cvd.get_sigmas(S)
|
|
||||||
x = x*sigmas[0]
|
|
||||||
|
|
||||||
+ print(f"Running Heun Sampling with {len(sigmas) - 1} timesteps")
|
|
||||||
+
|
|
||||||
|
|
||||||
s_in = x.new_ones([x.shape[0]]).half()
|
|
||||||
for i in trange(len(sigmas) - 1, disable=disable):
|
|
||||||
@@ -876,6 +906,9 @@ class UNet(DDPM):
|
|
||||||
d = to_d(x, sigma_hat, denoised)
|
d = to_d(x, sigma_hat, denoised)
|
||||||
if callback is not None:
|
- if callback: callback(i)
|
||||||
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})
|
- if img_callback: img_callback(x, i)
|
||||||
+
|
+ if callback: yield from callback(i)
|
||||||
+ if img_callback: yield from img_callback(x, i)
|
+ if img_callback: yield from img_callback(x, i)
|
||||||
+
|
|
||||||
dt = sigmas[i + 1] - sigma_hat
|
dt = sigmas[i + 1] - sigma_hat
|
||||||
if sigmas[i + 1] == 0:
|
if sigmas[i + 1] == 0:
|
||||||
# Euler method
|
# Euler method
|
||||||
@@ -895,11 +928,13 @@ class UNet(DDPM):
|
@@ -913,7 +913,7 @@ class UNet(DDPM):
|
||||||
d_2 = to_d(x_2, sigmas[i + 1], denoised_2)
|
d_2 = to_d(x_2, sigmas[i + 1], denoised_2)
|
||||||
d_prime = (d + d_2) / 2
|
d_prime = (d + d_2) / 2
|
||||||
x = x + d_prime * dt
|
x = x + d_prime * dt
|
||||||
- return x
|
- return x
|
||||||
+
|
|
||||||
+ yield from img_callback(x, len(sigmas)-1)
|
+ yield from img_callback(x, len(sigmas)-1)
|
||||||
|
|
||||||
|
|
||||||
@torch.no_grad()
|
@torch.no_grad()
|
||||||
- def dpm_2_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.):
|
@@ -944,8 +944,8 @@ class UNet(DDPM):
|
||||||
+ def dpm_2_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.,
|
|
||||||
+ img_callback=None):
|
|
||||||
"""A sampler inspired by DPM-Solver-2 and Algorithm 2 from Karras et al. (2022)."""
|
|
||||||
extra_args = {} if extra_args is None else extra_args
|
|
||||||
|
|
||||||
@@ -907,6 +942,8 @@ class UNet(DDPM):
|
|
||||||
sigmas = cvd.get_sigmas(S)
|
|
||||||
x = x*sigmas[0]
|
|
||||||
|
|
||||||
+ print(f"Running DPM2 Sampling with {len(sigmas) - 1} timesteps")
|
|
||||||
+
|
|
||||||
s_in = x.new_ones([x.shape[0]]).half()
|
|
||||||
for i in trange(len(sigmas) - 1, disable=disable):
|
|
||||||
gamma = min(s_churn / (len(sigmas) - 1), 2 ** 0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.
|
|
||||||
@@ -924,7 +961,7 @@ class UNet(DDPM):
|
|
||||||
e_t_uncond, e_t = (x_in + eps * c_out).chunk(2)
|
e_t_uncond, e_t = (x_in + eps * c_out).chunk(2)
|
||||||
denoised = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
|
denoised = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
|
||||||
|
|
||||||
-
|
- if callback: callback(i)
|
||||||
|
- if img_callback: img_callback(x, i)
|
||||||
|
+ if callback: yield from callback(i)
|
||||||
+ if img_callback: yield from img_callback(x, i)
|
+ if img_callback: yield from img_callback(x, i)
|
||||||
|
|
||||||
d = to_d(x, sigma_hat, denoised)
|
d = to_d(x, sigma_hat, denoised)
|
||||||
# Midpoint method, where the midpoint is chosen according to a rho=3 Karras schedule
|
# Midpoint method, where the midpoint is chosen according to a rho=3 Karras schedule
|
||||||
@@ -945,11 +982,13 @@ class UNet(DDPM):
|
@@ -966,7 +966,7 @@ class UNet(DDPM):
|
||||||
|
|
||||||
d_2 = to_d(x_2, sigma_mid, denoised_2)
|
d_2 = to_d(x_2, sigma_mid, denoised_2)
|
||||||
x = x + d_2 * dt_2
|
x = x + d_2 * dt_2
|
||||||
- return x
|
- return x
|
||||||
+
|
|
||||||
+ yield from img_callback(x, len(sigmas)-1)
|
+ yield from img_callback(x, len(sigmas)-1)
|
||||||
|
|
||||||
|
|
||||||
@torch.no_grad()
|
@torch.no_grad()
|
||||||
- def dpm_2_ancestral_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None):
|
@@ -994,8 +994,8 @@ class UNet(DDPM):
|
||||||
+ def dpm_2_ancestral_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None,
|
|
||||||
+ img_callback=None):
|
|
||||||
"""Ancestral sampling with DPM-Solver inspired second-order steps."""
|
|
||||||
extra_args = {} if extra_args is None else extra_args
|
|
||||||
|
|
||||||
@@ -957,6 +996,8 @@ class UNet(DDPM):
|
|
||||||
sigmas = cvd.get_sigmas(S)
|
|
||||||
x = x*sigmas[0]
|
|
||||||
|
|
||||||
+ print(f"Running DPM2 Ancestral Sampling with {len(sigmas) - 1} timesteps")
|
|
||||||
+
|
|
||||||
s_in = x.new_ones([x.shape[0]]).half()
|
|
||||||
for i in trange(len(sigmas) - 1, disable=disable):
|
|
||||||
|
|
||||||
@@ -973,6 +1014,9 @@ class UNet(DDPM):
|
|
||||||
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1])
|
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1])
|
||||||
if callback is not None:
|
- if callback: callback(i)
|
||||||
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
|
- if img_callback: img_callback(x, i)
|
||||||
+
|
+ if callback: yield from callback(i)
|
||||||
+ if img_callback: yield from img_callback(x, i)
|
+ if img_callback: yield from img_callback(x, i)
|
||||||
+
|
|
||||||
d = to_d(x, sigmas[i], denoised)
|
d = to_d(x, sigmas[i], denoised)
|
||||||
# Midpoint method, where the midpoint is chosen according to a rho=3 Karras schedule
|
# Midpoint method, where the midpoint is chosen according to a rho=3 Karras schedule
|
||||||
sigma_mid = ((sigmas[i] ** (1 / 3) + sigma_down ** (1 / 3)) / 2) ** 3
|
sigma_mid = ((sigmas[i] ** (1 / 3) + sigma_down ** (1 / 3)) / 2) ** 3
|
||||||
@@ -993,11 +1037,13 @@ class UNet(DDPM):
|
@@ -1016,7 +1016,7 @@ class UNet(DDPM):
|
||||||
d_2 = to_d(x_2, sigma_mid, denoised_2)
|
d_2 = to_d(x_2, sigma_mid, denoised_2)
|
||||||
x = x + d_2 * dt_2
|
x = x + d_2 * dt_2
|
||||||
x = x + torch.randn_like(x) * sigma_up
|
x = x + torch.randn_like(x) * sigma_up
|
||||||
- return x
|
- return x
|
||||||
+
|
|
||||||
+ yield from img_callback(x, len(sigmas)-1)
|
+ yield from img_callback(x, len(sigmas)-1)
|
||||||
|
|
||||||
|
|
||||||
@torch.no_grad()
|
@torch.no_grad()
|
||||||
- def lms_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None, order=4):
|
@@ -1042,8 +1042,8 @@ class UNet(DDPM):
|
||||||
+ def lms_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None, order=4,
|
|
||||||
+ img_callback=None):
|
|
||||||
extra_args = {} if extra_args is None else extra_args
|
|
||||||
s_in = x.new_ones([x.shape[0]])
|
|
||||||
|
|
||||||
@@ -1005,6 +1051,8 @@ class UNet(DDPM):
|
|
||||||
sigmas = cvd.get_sigmas(S)
|
|
||||||
x = x*sigmas[0]
|
|
||||||
|
|
||||||
+ print(f"Running LMS Sampling with {len(sigmas) - 1} timesteps")
|
|
||||||
+
|
|
||||||
ds = []
|
|
||||||
for i in trange(len(sigmas) - 1, disable=disable):
|
|
||||||
|
|
||||||
@@ -1017,6 +1065,7 @@ class UNet(DDPM):
|
|
||||||
e_t_uncond, e_t = (x_in + eps * c_out).chunk(2)
|
e_t_uncond, e_t = (x_in + eps * c_out).chunk(2)
|
||||||
denoised = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
|
denoised = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
|
||||||
|
|
||||||
|
- if callback: callback(i)
|
||||||
|
- if img_callback: img_callback(x, i)
|
||||||
|
+ if callback: yield from callback(i)
|
||||||
+ if img_callback: yield from img_callback(x, i)
|
+ if img_callback: yield from img_callback(x, i)
|
||||||
|
|
||||||
d = to_d(x, sigmas[i], denoised)
|
d = to_d(x, sigmas[i], denoised)
|
||||||
ds.append(d)
|
ds.append(d)
|
||||||
@@ -1027,4 +1076,5 @@ class UNet(DDPM):
|
@@ -1054,4 +1054,4 @@ class UNet(DDPM):
|
||||||
cur_order = min(i + 1, order)
|
cur_order = min(i + 1, order)
|
||||||
coeffs = [linear_multistep_coeff(cur_order, sigmas.cpu(), i, j) for j in range(cur_order)]
|
coeffs = [linear_multistep_coeff(cur_order, sigmas.cpu(), i, j) for j in range(cur_order)]
|
||||||
x = x + sum(coeff * d for coeff, d in zip(coeffs, reversed(ds)))
|
x = x + sum(coeff * d for coeff, d in zip(coeffs, reversed(ds)))
|
||||||
- return x
|
- return x
|
||||||
+
|
|
||||||
+ yield from img_callback(x, len(sigmas)-1)
|
+ yield from img_callback(x, len(sigmas)-1)
|
||||||
diff --git a/optimizedSD/openaimodelSplit.py b/optimizedSD/openaimodelSplit.py
|
|
||||||
index abc3098..7a32ffe 100644
|
|
||||||
--- a/optimizedSD/openaimodelSplit.py
|
|
||||||
+++ b/optimizedSD/openaimodelSplit.py
|
|
||||||
@@ -13,7 +13,7 @@ from ldm.modules.diffusionmodules.util import (
|
|
||||||
normalization,
|
|
||||||
timestep_embedding,
|
|
||||||
)
|
|
||||||
-from splitAttention import SpatialTransformer
|
|
||||||
+from .splitAttention import SpatialTransformer
|
|
||||||
|
|
||||||
|
|
||||||
class AttentionPool2d(nn.Module):
|
|
||||||
|
@ -1,13 +0,0 @@
|
|||||||
diff --git a/environment.yaml b/environment.yaml
|
|
||||||
index 7f25da8..306750f 100644
|
|
||||||
--- a/environment.yaml
|
|
||||||
+++ b/environment.yaml
|
|
||||||
@@ -23,6 +23,8 @@ dependencies:
|
|
||||||
- torch-fidelity==0.3.0
|
|
||||||
- transformers==4.19.2
|
|
||||||
- torchmetrics==0.6.0
|
|
||||||
+ - pywavelets==1.3.0
|
|
||||||
+ - pandas==1.4.4
|
|
||||||
- kornia==0.6
|
|
||||||
- -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
|
|
||||||
- -e git+https://github.com/openai/CLIP.git@main#egg=clip
|
|
Loading…
Reference in New Issue
Block a user