Move color correction to diffusionkit; Rename color correction to 'Preserve color profile'

This commit is contained in:
cmdr2 2022-12-11 19:34:07 +05:30
parent 6ce6dc3ff6
commit 0aa7968503
3 changed files with 6 additions and 20 deletions

View File

@ -213,7 +213,7 @@
<div><ul> <div><ul>
<li><b class="settings-subheader">Render Settings</b></li> <li><b class="settings-subheader">Render Settings</b></li>
<li class="pl-5"><input id="stream_image_progress" name="stream_image_progress" type="checkbox"> <label for="stream_image_progress">Show a live preview <small>(uses more VRAM, slower images)</small></label></li> <li class="pl-5"><input id="stream_image_progress" name="stream_image_progress" type="checkbox"> <label for="stream_image_progress">Show a live preview <small>(uses more VRAM, slower images)</small></label></li>
<li id="apply_color_correction_setting" class="pl-5"><input id="apply_color_correction" name="apply_color_correction" type="checkbox"> <label for="apply_color_correction">Apply color correction <small>(helps during inpainting)</small></label></li> <li id="apply_color_correction_setting" class="pl-5"><input id="apply_color_correction" name="apply_color_correction" type="checkbox"> <label for="apply_color_correction">Preserve color profile <small>(helps during inpainting)</small></label></li>
<li class="pl-5"><input id="use_face_correction" name="use_face_correction" type="checkbox"> <label for="use_face_correction">Fix incorrect faces and eyes <small>(uses GFPGAN)</small></label></li> <li class="pl-5"><input id="use_face_correction" name="use_face_correction" type="checkbox"> <label for="use_face_correction">Fix incorrect faces and eyes <small>(uses GFPGAN)</small></label></li>
<li class="pl-5"> <li class="pl-5">
<input id="use_upscale" name="use_upscale" type="checkbox"> <label for="use_upscale">Upscale image by 4x with </label> <input id="use_upscale" name="use_upscale" type="checkbox"> <label for="use_upscale">Upscale image by 4x with </label>

View File

@ -761,8 +761,8 @@ function createTask(task) {
taskConfig += `, <b>Hypernetwork:</b> ${task.reqBody.use_hypernetwork_model}` taskConfig += `, <b>Hypernetwork:</b> ${task.reqBody.use_hypernetwork_model}`
taskConfig += `, <b>Hypernetwork Strength:</b> ${task.reqBody.hypernetwork_strength}` taskConfig += `, <b>Hypernetwork Strength:</b> ${task.reqBody.hypernetwork_strength}`
} }
if (task.reqBody.apply_color_correction) { if (task.reqBody.preserve_init_image_color_profile) {
taskConfig += `, <b>Color Correction:</b> true` taskConfig += `, <b>Preserve Color Profile:</b> true`
} }
let taskEntry = document.createElement('div') let taskEntry = document.createElement('div')
@ -871,7 +871,7 @@ function getCurrentUserRequest() {
if (maskSetting.checked) { if (maskSetting.checked) {
newTask.reqBody.mask = imageInpainter.getImg() newTask.reqBody.mask = imageInpainter.getImg()
} }
newTask.reqBody.apply_color_correction = applyColorCorrectionField.checked newTask.reqBody.preserve_init_image_color_profile = applyColorCorrectionField.checked
newTask.reqBody.sampler = 'ddim' newTask.reqBody.sampler = 'ddim'
} else { } else {
newTask.reqBody.sampler = samplerField.value newTask.reqBody.sampler = samplerField.value

View File

@ -62,10 +62,8 @@ def reload_models_if_necessary(task_data: TaskData):
if thread_data.model_paths.get(model_type) != model_path_in_req: if thread_data.model_paths.get(model_type) != model_path_in_req:
thread_data.model_paths[model_type] = model_path_in_req thread_data.model_paths[model_type] = model_path_in_req
if thread_data.model_paths[model_type] is not None: action_fn = model_loader.unload_model if thread_data.model_paths[model_type] is None else model_loader.load_model
model_loader.load_model(thread_data, model_type) action_fn(thread_data, model_type)
else:
model_loader.unload_model(thread_data, model_type)
def make_images(req: GenerateImageRequest, task_data: TaskData, data_queue: queue.Queue, task_temp_images: list, step_callback): def make_images(req: GenerateImageRequest, task_data: TaskData, data_queue: queue.Queue, task_temp_images: list, step_callback):
try: try:
@ -94,7 +92,6 @@ def _make_images_internal(req: GenerateImageRequest, task_data: TaskData, data_q
print(metadata) print(metadata)
images, user_stopped = generate_images(req, data_queue, task_temp_images, step_callback, task_data.stream_image_progress) images, user_stopped = generate_images(req, data_queue, task_temp_images, step_callback, task_data.stream_image_progress)
images = apply_color_correction(req, images, user_stopped)
images = apply_filters(task_data, images, user_stopped, task_data.show_only_filtered_image) images = apply_filters(task_data, images, user_stopped, task_data.show_only_filtered_image)
if task_data.save_to_disk_path is not None: if task_data.save_to_disk_path is not None:
@ -139,17 +136,6 @@ def generate_images(req: GenerateImageRequest, data_queue: queue.Queue, task_tem
return images, user_stopped return images, user_stopped
def apply_color_correction(req: GenerateImageRequest, images: list, user_stopped):
if user_stopped or req.init_image is None or not req.apply_color_correction:
return images
for i, img_info in enumerate(images):
img, seed, filtered = img_info
img = image_utils.apply_color_correction(orig_image=req.init_image, image_to_correct=img)
images[i] = (img, seed, filtered)
return images
def apply_filters(task_data: TaskData, images: list, user_stopped, show_only_filtered_image): def apply_filters(task_data: TaskData, images: list, user_stopped, show_only_filtered_image):
if user_stopped or (task_data.use_face_correction is None and task_data.use_upscale is None): if user_stopped or (task_data.use_face_correction is None and task_data.use_upscale is None):
return images return images