Revert "Merge pull request #294 from cmdr2/main"

This reverts commit 1e3b4f9969, reversing
changes made to e0c0935d3a.
This commit is contained in:
cmdr2
2022-10-07 20:07:15 +05:30
parent 1e3b4f9969
commit 355870a750
531 changed files with 17640 additions and 478 deletions

View File

@ -22,9 +22,7 @@ class Request:
use_full_precision: bool = False
use_face_correction: str = None # or "GFPGANv1.3"
use_upscale: str = None # or "RealESRGAN_x4plus" or "RealESRGAN_x4plus_anime_6B"
use_stable_diffusion_model: str = "sd-v1-4"
show_only_filtered_image: bool = False
output_format: str = "jpeg" # or "png"
stream_progress_updates: bool = False
stream_image_progress: bool = False
@ -44,8 +42,6 @@ class Request:
"sampler": self.sampler,
"use_face_correction": self.use_face_correction,
"use_upscale": self.use_upscale,
"use_stable_diffusion_model": self.use_stable_diffusion_model,
"output_format": self.output_format,
}
def to_string(self):
@ -66,9 +62,7 @@ class Request:
use_full_precision: {self.use_full_precision}
use_face_correction: {self.use_face_correction}
use_upscale: {self.use_upscale}
use_stable_diffusion_model: {self.use_stable_diffusion_model}
show_only_filtered_image: {self.show_only_filtered_image}
output_format: {self.output_format}
stream_progress_updates: {self.stream_progress_updates}
stream_image_progress: {self.stream_image_progress}'''

View File

@ -79,7 +79,7 @@ except:
print('WARNING: No compatible GPU found. Using the CPU, but this will be very slow!')
pass
def load_model_ckpt(ckpt_to_use, device_to_use='cuda', turbo=False, unet_bs_to_use=1, precision_to_use='autocast'):
def load_model_ckpt(ckpt_to_use, device_to_use='cuda', turbo=False, unet_bs_to_use=1, precision_to_use='autocast', half_model_fs=False):
global ckpt_file, model, modelCS, modelFS, model_is_half, device, unet_bs, precision, model_fs_is_half
ckpt_file = ckpt_to_use
@ -130,11 +130,14 @@ def load_model_ckpt(ckpt_to_use, device_to_use='cuda', turbo=False, unet_bs_to_u
if device != "cpu" and precision == "autocast":
model.half()
modelCS.half()
modelFS.half()
model_is_half = True
model_fs_is_half = True
else:
model_is_half = False
if half_model_fs:
modelFS.half()
model_fs_is_half = True
else:
model_fs_is_half = False
print('loaded ', ckpt_file, 'to', device, 'precision', precision)
@ -205,7 +208,6 @@ def mk_img(req: Request):
})
def do_mk_img(req: Request):
global ckpt_file
global model, modelCS, modelFS, device
global model_gfpgan, model_real_esrgan
global stop_processing
@ -218,15 +220,6 @@ def do_mk_img(req: Request):
temp_images.clear()
# custom model support:
# the req.use_stable_diffusion_model needs to be a valid path
# to the ckpt file (without the extension).
needs_model_reload = False
if ckpt_file != req.use_stable_diffusion_model:
ckpt_file = req.use_stable_diffusion_model
needs_model_reload = True
model.turbo = req.turbo
if req.use_cpu:
if device != 'cpu':
@ -235,7 +228,6 @@ def do_mk_img(req: Request):
if model_is_half:
del model, modelCS, modelFS
load_model_ckpt(ckpt_file, device)
needs_model_reload = False
load_model_gfpgan(gfpgan_file)
load_model_real_esrgan(real_esrgan_file)
@ -245,19 +237,17 @@ def do_mk_img(req: Request):
device = 'cuda'
if (precision == 'autocast' and (req.use_full_precision or not model_is_half)) or \
(precision == 'full' and not req.use_full_precision and not force_full_precision):
(precision == 'full' and not req.use_full_precision and not force_full_precision) or \
(req.init_image is None and model_fs_is_half) or \
(req.init_image is not None and not model_fs_is_half and not force_full_precision):
del model, modelCS, modelFS
load_model_ckpt(ckpt_file, device, req.turbo, unet_bs, ('full' if req.use_full_precision else 'autocast'))
needs_model_reload = False
load_model_ckpt(ckpt_file, device, req.turbo, unet_bs, ('full' if req.use_full_precision else 'autocast'), half_model_fs=(req.init_image is not None and not req.use_full_precision))
if prev_device != device:
load_model_gfpgan(gfpgan_file)
load_model_real_esrgan(real_esrgan_file)
if needs_model_reload:
load_model_ckpt(ckpt_file, device, req.turbo, unet_bs, precision)
if req.use_face_correction != gfpgan_file:
load_model_gfpgan(req.use_face_correction)
@ -284,7 +274,7 @@ def do_mk_img(req: Request):
opt_use_face_correction = req.use_face_correction
opt_use_upscale = req.use_upscale
opt_show_only_filtered = req.show_only_filtered_image
opt_format = req.output_format
opt_format = 'png'
opt_sampler_name = req.sampler
print(req.to_string(), '\n device', device)
@ -380,7 +370,13 @@ def do_mk_img(req: Request):
if req.stream_progress_updates:
n_steps = opt_ddim_steps if req.init_image is None else t_enc
progress = {"step": i, "total_steps": n_steps}
progress = {
"status": "progress",
#"progress": (i + 1) / n_steps,
"progress": {
"step": i, "total_steps": n_steps
}
}
if req.stream_image_progress and i % 5 == 0:
partial_images = []
@ -454,10 +450,10 @@ def do_mk_img(req: Request):
if return_orig_img:
save_image(img, img_out_path)
save_metadata(meta_out_path, prompts, opt_seed, opt_W, opt_H, opt_ddim_steps, opt_scale, opt_strength, opt_use_face_correction, opt_use_upscale, opt_sampler_name, req.negative_prompt, ckpt_file)
save_metadata(meta_out_path, prompts, opt_seed, opt_W, opt_H, opt_ddim_steps, opt_scale, opt_strength, opt_use_face_correction, opt_use_upscale, opt_sampler_name, req.negative_prompt)
if return_orig_img:
img_data = img_to_base64_str(img, opt_format)
img_data = img_to_base64_str(img)
res_image_orig = ResponseImage(data=img_data, seed=opt_seed)
res.images.append(res_image_orig)
@ -484,7 +480,7 @@ def do_mk_img(req: Request):
filtered_image = Image.fromarray(x_sample)
filtered_img_data = img_to_base64_str(filtered_image, opt_format)
filtered_img_data = img_to_base64_str(filtered_image)
res_image_filtered = ResponseImage(data=filtered_img_data, seed=opt_seed)
res.images.append(res_image_filtered)
@ -515,8 +511,8 @@ def save_image(img, img_out_path):
except:
print('could not save the file', traceback.format_exc())
def save_metadata(meta_out_path, prompts, opt_seed, opt_W, opt_H, opt_ddim_steps, opt_scale, opt_prompt_strength, opt_correct_face, opt_upscale, sampler_name, negative_prompt, ckpt_file):
metadata = f"{prompts[0]}\nWidth: {opt_W}\nHeight: {opt_H}\nSeed: {opt_seed}\nSteps: {opt_ddim_steps}\nGuidance Scale: {opt_scale}\nPrompt Strength: {opt_prompt_strength}\nUse Face Correction: {opt_correct_face}\nUse Upscaling: {opt_upscale}\nSampler: {sampler_name}\nNegative Prompt: {negative_prompt}\nStable Diffusion Model: {ckpt_file + '.ckpt'}"
def save_metadata(meta_out_path, prompts, opt_seed, opt_W, opt_H, opt_ddim_steps, opt_scale, opt_prompt_strength, opt_correct_face, opt_upscale, sampler_name, negative_prompt):
metadata = f"{prompts[0]}\nWidth: {opt_W}\nHeight: {opt_H}\nSeed: {opt_seed}\nSteps: {opt_ddim_steps}\nGuidance Scale: {opt_scale}\nPrompt Strength: {opt_prompt_strength}\nUse Face Correction: {opt_correct_face}\nUse Upscaling: {opt_upscale}\nSampler: {sampler_name}\nNegative Prompt: {negative_prompt}"
try:
with open(meta_out_path, 'w') as f:
@ -652,9 +648,9 @@ def load_mask(mask_str, h0, w0, newH, newW, invert=False):
return image
# https://stackoverflow.com/a/61114178
def img_to_base64_str(img, output_format="PNG"):
def img_to_base64_str(img):
buffered = BytesIO()
img.save(buffered, format=output_format)
img.save(buffered, format="PNG")
buffered.seek(0)
img_byte = buffered.getvalue()
img_str = "data:image/png;base64," + base64.b64encode(img_byte).decode()