Merge pull request #489 from cmdr2/main

v2.4.6 - merge hotfix from main - bring back the VRAM usage (during startup) to what it was in the previous main version (v2.3.5)
This commit is contained in:
cmdr2 2022-11-16 19:36:49 +05:30 committed by GitHub
commit ee31519552
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 13 additions and 13 deletions

View File

@ -20,7 +20,7 @@
<div id="container">
<div id="top-nav">
<div id="logo">
<h1>Stable Diffusion UI <small>v2.4.5 <span id="updateBranchLabel"></span></small></h1>
<h1>Stable Diffusion UI <small>v2.4.6 <span id="updateBranchLabel"></span></small></h1>
</div>
<div id="server-status">
<div id="server-status-color"></div>

View File

@ -118,8 +118,8 @@ def load_model_ckpt():
model.cdevice = torch.device(thread_data.device)
model.unet_bs = thread_data.unet_bs
model.turbo = thread_data.turbo
if thread_data.device != 'cpu':
model.to(thread_data.device)
# if thread_data.device != 'cpu':
# model.to(thread_data.device)
#if thread_data.reduced_memory:
#model.model1.to("cpu")
#model.model2.to("cpu")
@ -129,11 +129,11 @@ def load_model_ckpt():
_, _ = modelCS.load_state_dict(sd, strict=False)
modelCS.eval()
modelCS.cond_stage_model.device = torch.device(thread_data.device)
if thread_data.device != 'cpu':
if thread_data.reduced_memory:
modelCS.to('cpu')
else:
modelCS.to(thread_data.device) # Preload on device if not already there.
# if thread_data.device != 'cpu':
# if thread_data.reduced_memory:
# modelCS.to('cpu')
# else:
# modelCS.to(thread_data.device) # Preload on device if not already there.
thread_data.modelCS = modelCS
modelFS = instantiate_from_config(config.modelFirstStage)
@ -151,11 +151,11 @@ def load_model_ckpt():
print(f'Cannot find VAE file: {thread_data.vae_file}{model_extension}')
modelFS.eval()
if thread_data.device != 'cpu':
if thread_data.reduced_memory:
modelFS.to('cpu')
else:
modelFS.to(thread_data.device) # Preload on device if not already there.
# if thread_data.device != 'cpu':
# if thread_data.reduced_memory:
# modelFS.to('cpu')
# else:
# modelFS.to(thread_data.device) # Preload on device if not already there.
thread_data.modelFS = modelFS
del sd