Use full precision automatically for NVIDIA 1650 and 1660

This commit is contained in:
cmdr2 2022-09-07 15:32:34 +05:30
parent f44c1d4536
commit 1d4e06b884
2 changed files with 16 additions and 5 deletions

View File

@ -269,7 +269,7 @@
<div id="server-status-color">&nbsp;</div>
<span id="server-status-msg">Stable Diffusion is starting..</span>
</div>
<h1>Stable Diffusion UI <small>v2.07 (beta)</small></h1>
<h1>Stable Diffusion UI <small>v2.08 (beta)</small></h1>
</div>
<div id="editor-inputs">
<div id="editor-inputs-prompt" class="row">
@ -347,7 +347,7 @@
<li><input id="sound_toggle" name="sound_toggle" type="checkbox" checked> <label for="sound_toggle">Play sound on task completion</label></li>
<li><input id="turbo" name="turbo" type="checkbox" checked> <label for="turbo">Turbo mode (generates images faster, but uses an additional 1 GB of GPU memory)</label></li>
<li><input id="use_cpu" name="use_cpu" type="checkbox"> <label for="use_cpu">Use CPU instead of GPU (warning: this will be *very* slow)</label></li>
<li><input id="use_full_precision" name="use_full_precision" type="checkbox"> <label for="use_full_precision">Use full precision (for GPU-only. warning: this will consume more VRAM. Use this for NVIDIA 1650 and 1660)</label></li>
<li><input id="use_full_precision" name="use_full_precision" type="checkbox"> <label for="use_full_precision">Use full precision (for GPU-only. warning: this will consume more VRAM)</label></li>
<!-- <li><input id="allow_nsfw" name="allow_nsfw" type="checkbox"> <label for="allow_nsfw">Allow NSFW Content (You confirm you are above 18 years of age)</label></li> -->
</ul>
</div>

View File

@ -43,13 +43,22 @@ precision = 'autocast'
sampler_plms = None
sampler_ddim = None
force_full_precision = False
try:
gpu_name = torch.cuda.get_device_name(torch.cuda.current_device())
force_full_precision = ('nvidia' in gpu_name.lower()) and (' 1660' in gpu_name or ' 1650' in gpu_name) # otherwise these NVIDIA cards create green images
if force_full_precision:
print('forcing full precision on NVIDIA 16xx cards, to avoid green images')
except:
pass
# api
def load_model(ckpt_to_use, device_to_use='cuda', turbo=False, unet_bs_to_use=1, precision_to_use='autocast', half_model_fs=False):
global ckpt, model, modelCS, modelFS, model_is_half, device, unet_bs, precision, model_fs_is_half
ckpt = ckpt_to_use
device = device_to_use
precision = precision_to_use
precision = precision_to_use if not force_full_precision else 'full'
unet_bs = unet_bs_to_use
sd = load_model_from_config(f"{ckpt}")
@ -119,9 +128,9 @@ def mk_img(req: Request):
device = 'cuda'
if (precision == 'autocast' and (req.use_full_precision or not model_is_half)) or \
(precision == 'full' and not req.use_full_precision) or \
(precision == 'full' and not req.use_full_precision and not force_full_precision) or \
(req.init_image is None and model_fs_is_half) or \
(req.init_image is not None and not model_fs_is_half):
(req.init_image is not None and not model_fs_is_half and not force_full_precision):
print('reloading model for cuda')
load_model(ckpt, device, model.turbo, unet_bs, ('full' if req.use_full_precision else 'autocast'), half_model_fs=(req.init_image is not None and not req.use_full_precision))
@ -147,6 +156,8 @@ def mk_img(req: Request):
print(req.to_string(), '\n device', device)
print('\n\n Using precision:', precision)
seed_everything(opt_seed)
batch_size = opt_n_samples