diff --git a/CHANGES.md b/CHANGES.md index a581127a..b3fe2564 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -18,6 +18,7 @@ Our focus continues to remain on an easy installation experience, and an easy user-interface. While still remaining pretty powerful, in terms of features and speed. ### Detailed changelog +* 2.5.5 - 9 Jan 2022 - Lots of bug fixes. Thanks @patriceac and @JeLuf. * 2.5.4 - 29 Dec 2022 - Press Esc key on the keyboard to close the Image Editor. Thanks @patriceac. * 2.5.4 - 29 Dec 2022 - Lots of bug fixes in the UI. Thanks @patriceac. * 2.5.4 - 28 Dec 2022 - Full support for running tasks in parallel on multiple GPUs. Warning: 'Euler Ancestral', 'DPM2 Ancestral' and 'DPM++ 2s Ancestral' may produce slight variations in the image (if run in parallel), so we recommend using the other samplers. @@ -59,6 +60,8 @@ Our focus continues to remain on an easy installation experience, and an easy us - Support loading models in the safetensor format, for improved safety ### Detailed changelog +* 2.4.24 - 9 Jan 2022 - Urgent fix for failures on old/long-term-support browsers. Thanks @JeLuf. +* 2.4.23/22 - 29 Dec 2022 - Allow rolling back from the upcoming v2.5 change (in beta). * 2.4.21 - 23 Dec 2022 - Speed up image creation, by removing a delay (regression) of 4-5 seconds between clicking the `Make Image` button and calling the server. * 2.4.20 - 22 Dec 2022 - `Pause All` button to pause all the pending tasks. Thanks @JeLuf * 2.4.20 - 22 Dec 2022 - `Undo`/`Redo` buttons in the image editor. Thanks @JeLuf diff --git a/ui/easydiffusion/device_manager.py b/ui/easydiffusion/device_manager.py index b7406408..a261a62f 100644 --- a/ui/easydiffusion/device_manager.py +++ b/ui/easydiffusion/device_manager.py @@ -156,6 +156,8 @@ def is_device_compatible(device): ''' Returns True/False, and prints any compatibility errors ''' + # static variable "history". + is_device_compatible.history = getattr(is_device_compatible, 'history', {}) try: validate_device_id(device, log_prefix='is_device_compatible') except: @@ -168,7 +170,9 @@ def is_device_compatible(device): _, mem_total = torch.cuda.mem_get_info(device) mem_total /= float(10**9) if mem_total < 3.0: - log.warn(f'GPU {device} with less than 3 GB of VRAM is not compatible with Stable Diffusion') + if is_device_compatible.history.get(device) == None: + log.warn(f'GPU {device} with less than 3 GB of VRAM is not compatible with Stable Diffusion') + is_device_compatible.history[device] = 1 return False except RuntimeError as e: log.error(str(e)) diff --git a/ui/easydiffusion/model_manager.py b/ui/easydiffusion/model_manager.py index fefcc916..4f7cea22 100644 --- a/ui/easydiffusion/model_manager.py +++ b/ui/easydiffusion/model_manager.py @@ -44,7 +44,13 @@ def load_default_models(context: Context): for model_type in MODELS_TO_LOAD_ON_START: context.model_paths[model_type] = resolve_model_to_use(model_type=model_type) set_model_config_path(context, model_type) - load_model(context, model_type) + try: + load_model(context, model_type) + except Exception as e: + log.error(f'[red]Error while loading {model_type} model: {context.model_paths[model_type]}[/red]') + log.error(f'[red]Error: {e}[/red]') + log.error(f'[red]Consider to remove the model from the model folder.[red]') + def unload_all(context: Context): for model_type in KNOWN_MODEL_TYPES: @@ -190,6 +196,30 @@ def getModels(): } models_scanned = 0 + + class MaliciousModelException(Exception): + "Raised when picklescan reports a problem with a model" + pass + + def scan_directory(directory, suffixes): + nonlocal models_scanned + tree = [] + for entry in os.scandir(directory): + if entry.is_file() and True in [entry.name.endswith(s) for s in suffixes]: + mtime = entry.stat().st_mtime + mod_time = known_models[entry.path] if entry.path in known_models else -1 + if mod_time != mtime: + models_scanned += 1 + if is_malicious_model(entry.path): + raise MaliciousModelException(entry.path) + known_models[entry.path] = mtime + tree.append(entry.name.rsplit('.',1)[0]) + elif entry.is_dir(): + scan=scan_directory(entry.path, suffixes) + if len(scan) != 0: + tree.append( (entry.name, scan ) ) + return tree + def listModels(model_type): nonlocal models_scanned @@ -198,26 +228,10 @@ def getModels(): if not os.path.exists(models_dir): os.makedirs(models_dir) - for file in os.listdir(models_dir): - for model_extension in model_extensions: - if not file.endswith(model_extension): - continue - - model_path = os.path.join(models_dir, file) - mtime = os.path.getmtime(model_path) - mod_time = known_models[model_path] if model_path in known_models else -1 - if mod_time != mtime: - models_scanned += 1 - if is_malicious_model(model_path): - models['scan-error'] = file - return - known_models[model_path] = mtime - - model_name = file[:-len(model_extension)] - models['options'][model_type].append(model_name) - - models['options'][model_type] = [*set(models['options'][model_type])] # remove duplicates - models['options'][model_type].sort() + try: + models['options'][model_type] = scan_directory(models_dir, model_extensions) + except MaliciousModelException as e: + models['scan-error'] = e # custom models listModels(model_type='stable-diffusion') diff --git a/ui/easydiffusion/renderer.py b/ui/easydiffusion/renderer.py index b2eb2e0c..9bbe5d89 100644 --- a/ui/easydiffusion/renderer.py +++ b/ui/easydiffusion/renderer.py @@ -31,9 +31,9 @@ def make_images(req: GenerateImageRequest, task_data: TaskData, data_queue: queu context.stop_processing = False print_task_info(req, task_data) - images = make_images_internal(req, task_data, data_queue, task_temp_images, step_callback) + images, seeds = make_images_internal(req, task_data, data_queue, task_temp_images, step_callback) - res = Response(req, task_data, images=construct_response(images, task_data, base_seed=req.seed)) + res = Response(req, task_data, images=construct_response(images, seeds, task_data, base_seed=req.seed)) res = res.json() data_queue.put(json.dumps(res)) log.info('Task completed') @@ -53,7 +53,11 @@ def make_images_internal(req: GenerateImageRequest, task_data: TaskData, data_qu if task_data.save_to_disk_path is not None: save_images_to_disk(images, filtered_images, req, task_data) - return filtered_images if task_data.show_only_filtered_image or (task_data.use_face_correction is None and task_data.use_upscale is None) else images + filtered_images + seeds = [*range(req.seed, req.seed + len(images))] + if task_data.show_only_filtered_image or filtered_images is images: + return filtered_images, seeds + else: + return images + filtered_images, seeds + seeds def generate_images_internal(req: GenerateImageRequest, task_data: TaskData, data_queue: queue.Queue, task_temp_images: list, step_callback, stream_image_progress: bool): context.temp_images.clear() @@ -84,12 +88,12 @@ def filter_images(task_data: TaskData, images: list, user_stopped): return apply_filters(context, filters_to_apply, images, scale=task_data.upscale_amount) -def construct_response(images: list, task_data: TaskData, base_seed: int): +def construct_response(images: list, seeds: list, task_data: TaskData, base_seed: int): return [ ResponseImage( data=img_to_base64_str(img, task_data.output_format, task_data.output_quality), - seed=base_seed + i - ) for i, img in enumerate(images) + seed=seed, + ) for img, seed in zip(images, seeds) ] def make_step_callback(req: GenerateImageRequest, task_data: TaskData, data_queue: queue.Queue, task_temp_images: list, step_callback, stream_image_progress: bool): diff --git a/ui/easydiffusion/utils/save_utils.py b/ui/easydiffusion/utils/save_utils.py index 977697c6..9a59e73c 100644 --- a/ui/easydiffusion/utils/save_utils.py +++ b/ui/easydiffusion/utils/save_utils.py @@ -32,7 +32,7 @@ def save_images_to_disk(images: list, filtered_images: list, req: GenerateImageR save_dir_path = os.path.join(task_data.save_to_disk_path, filename_regex.sub('_', task_data.session_id)) metadata_entries = get_metadata_entries_for_request(req, task_data) - if task_data.show_only_filtered_image or filtered_images == images: + if task_data.show_only_filtered_image or filtered_images is images: make_filename = make_filename_callback(req) save_images(filtered_images, save_dir_path, file_name=make_filename, output_format=task_data.output_format, output_quality=task_data.output_quality) save_dicts(metadata_entries, save_dir_path, file_name=make_filename, output_format=task_data.metadata_output_format) diff --git a/ui/index.html b/ui/index.html index a927ba1f..9f2fc14f 100644 --- a/ui/index.html +++ b/ui/index.html @@ -25,7 +25,7 @@
diff --git a/ui/media/css/image-editor.css b/ui/media/css/image-editor.css index 5fa9e7c7..37bf3a23 100644 --- a/ui/media/css/image-editor.css +++ b/ui/media/css/image-editor.css @@ -2,12 +2,12 @@ padding-left: 32px; text-align: left; padding-bottom: 20px; + max-width: min-content; } .editor-options-container { display: flex; row-gap: 10px; - max-width: 210px; } .editor-options-container > * { diff --git a/ui/media/css/main.css b/ui/media/css/main.css index dc2d6b8c..261c5673 100644 --- a/ui/media/css/main.css +++ b/ui/media/css/main.css @@ -251,6 +251,10 @@ button#resume { img { box-shadow: 0 4px 8px 0 rgba(0, 0, 0, 0.15), 0 6px 20px 0 rgba(0, 0, 0, 0.15); } +div.img-preview img { + width:100%; + height: 100%; +} .line-separator { background: var(--background-color3); height: 1pt; diff --git a/ui/media/js/dnd.js b/ui/media/js/dnd.js index e0487822..573e068c 100644 --- a/ui/media/js/dnd.js +++ b/ui/media/js/dnd.js @@ -144,7 +144,14 @@ const TASK_MAPPING = { readUI: () => (maskSetting.checked ? imageInpainter.getImg() : undefined), parse: (val) => val }, - + preserve_init_image_color_profile: { name: 'Preserve Color Profile', + setUI: (preserve_init_image_color_profile) => { + applyColorCorrectionField.checked = parseBoolean(preserve_init_image_color_profile) + }, + readUI: () => applyColorCorrectionField.checked, + parse: (val) => parseBoolean(val) + }, + use_face_correction: { name: 'Use Face Correction', setUI: (use_face_correction) => { useFaceCorrectionField.checked = parseBoolean(use_face_correction) @@ -282,6 +289,7 @@ const TASK_MAPPING = { parse: (val) => val } } + function restoreTaskToUI(task, fieldsToSkip) { fieldsToSkip = fieldsToSkip || [] @@ -320,20 +328,26 @@ function restoreTaskToUI(task, fieldsToSkip) { if (!('use_upscale' in task.reqBody)) { useUpscalingField.checked = false } - if (!('mask' in task.reqBody)) { + if (!('mask' in task.reqBody) && maskSetting.checked) { maskSetting.checked = false + maskSetting.dispatchEvent(new Event("click")) } upscaleModelField.disabled = !useUpscalingField.checked upscaleAmountField.disabled = !useUpscalingField.checked - // Show the source picture if present - initImagePreview.src = (task.reqBody.init_image == undefined ? '' : task.reqBody.init_image) - if (IMAGE_REGEX.test(initImagePreview.src)) { - if (Boolean(task.reqBody.mask)) { - setTimeout(() => { // add a delay to insure this happens AFTER the main image loads (which reloads the inpainter) + // hide/show source picture as needed + if (IMAGE_REGEX.test(initImagePreview.src) && task.reqBody.init_image == undefined) { + // hide source image + initImageClearBtn.dispatchEvent(new Event("click")) + } + else if (task.reqBody.init_image !== undefined) { + // listen for inpainter loading event, which happens AFTER the main image loads (which reloads the inpainter) + initImagePreview.addEventListener('load', function() { + if (Boolean(task.reqBody.mask)) { imageInpainter.setImg(task.reqBody.mask) - }, 250) - } + } + }, { once: true }) + initImagePreview.src = task.reqBody.init_image } } function readUI() { @@ -451,7 +465,7 @@ async function parseContent(text) { } // Normal txt file. const task = parseTaskFromText(text) - if (task) { + if (text.toLowerCase().includes('seed:') && task) { // only parse valid task content restoreTaskToUI(task) return true } else { diff --git a/ui/media/js/engine.js b/ui/media/js/engine.js index 865b8c72..cec2eb36 100644 --- a/ui/media/js/engine.js +++ b/ui/media/js/engine.js @@ -835,10 +835,13 @@ * @memberof Task */ async post(timeout=-1) { - performance.mark('make-render-request') - if (performance.getEntriesByName('click-makeImage', 'mark').length > 0) { - console.log('delay between clicking and making the server request:', performance.measure('diff', 'click-makeImage', 'make-render-request').duration + ' ms') + if (typeof performance == "object" && performance.mark && performance.measure) { + performance.mark('make-render-request') + if (performance.getEntriesByName('click-makeImage', 'mark').length > 0) { + console.log('delay between clicking and making the server request:', performance.measure('diff', 'click-makeImage', 'make-render-request').duration + ' ms') + } } + let jsonResponse = await super.post('/render', timeout) if (typeof jsonResponse?.task !== 'number') { console.warn('Endpoint error response: ', jsonResponse) diff --git a/ui/media/js/main.js b/ui/media/js/main.js index 7d70233e..cf366d33 100644 --- a/ui/media/js/main.js +++ b/ui/media/js/main.js @@ -288,7 +288,7 @@ function showImages(reqBody, res, outputContainer, livePreview) { imageSeedLabel.innerText = 'Seed: ' + req.seed let buttons = [ - { text: 'Remove', on_click: onRemoveClick, class: 'secondaryButton' }, + { text: 'Remove', on_click: onRemoveClick, class: 'secondaryButton' }, { text: 'Use as Input', on_click: onUseAsInputClick }, { text: 'Download', on_click: onDownloadImageClick }, { text: 'Make Similar Images', on_click: onMakeSimilarClick }, @@ -440,7 +440,10 @@ function getUncompletedTaskEntries() { } function makeImage() { - performance.mark('click-makeImage') + if (typeof performance == "object" && performance.mark) { + performance.mark('click-makeImage') + } + if (!SD.isServerAvailable()) { alert('The server is not available.') return @@ -1303,17 +1306,23 @@ async function getModels() { vaeOptions.unshift('') // add a None option hypernetworkOptions.unshift('') // add a None option - function createModelOptions(modelField, selectedModel) { - return function(modelName) { - const modelOption = document.createElement('option') - modelOption.value = modelName - modelOption.innerText = modelName !== '' ? modelName : 'None' + function createModelOptions(modelField, selectedModel, path="") { + return function fn(modelName) { + if (typeof(modelName) == 'string') { + const modelOption = document.createElement('option') + modelOption.value = path + modelName + modelOption.innerHTML = modelName !== '' ? (path != "" ? "  "+modelName : modelName) : 'None' - if (modelName === selectedModel) { - modelOption.selected = true + if (modelName === selectedModel) { + modelOption.selected = true + } + modelField.appendChild(modelOption) + } else { + const modelGroup = document.createElement('optgroup') + modelGroup.label = path + modelName[0] + modelField.appendChild(modelGroup) + modelName[1].forEach( createModelOptions(modelField, selectedModel, path + modelName[0] + "/" ) ) } - - modelField.appendChild(modelOption) } }