Merge branch 'beta' into sync-fn

This commit is contained in:
cmdr2 2023-01-10 20:06:58 +05:30 committed by GitHub
commit 9ea51b174a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 110 additions and 55 deletions

View File

@ -18,6 +18,7 @@
Our focus continues to remain on an easy installation experience, and an easy user-interface. While still remaining pretty powerful, in terms of features and speed. Our focus continues to remain on an easy installation experience, and an easy user-interface. While still remaining pretty powerful, in terms of features and speed.
### Detailed changelog ### Detailed changelog
* 2.5.5 - 9 Jan 2022 - Lots of bug fixes. Thanks @patriceac and @JeLuf.
* 2.5.4 - 29 Dec 2022 - Press Esc key on the keyboard to close the Image Editor. Thanks @patriceac. * 2.5.4 - 29 Dec 2022 - Press Esc key on the keyboard to close the Image Editor. Thanks @patriceac.
* 2.5.4 - 29 Dec 2022 - Lots of bug fixes in the UI. Thanks @patriceac. * 2.5.4 - 29 Dec 2022 - Lots of bug fixes in the UI. Thanks @patriceac.
* 2.5.4 - 28 Dec 2022 - Full support for running tasks in parallel on multiple GPUs. Warning: 'Euler Ancestral', 'DPM2 Ancestral' and 'DPM++ 2s Ancestral' may produce slight variations in the image (if run in parallel), so we recommend using the other samplers. * 2.5.4 - 28 Dec 2022 - Full support for running tasks in parallel on multiple GPUs. Warning: 'Euler Ancestral', 'DPM2 Ancestral' and 'DPM++ 2s Ancestral' may produce slight variations in the image (if run in parallel), so we recommend using the other samplers.
@ -59,6 +60,8 @@ Our focus continues to remain on an easy installation experience, and an easy us
- Support loading models in the safetensor format, for improved safety - Support loading models in the safetensor format, for improved safety
### Detailed changelog ### Detailed changelog
* 2.4.24 - 9 Jan 2022 - Urgent fix for failures on old/long-term-support browsers. Thanks @JeLuf.
* 2.4.23/22 - 29 Dec 2022 - Allow rolling back from the upcoming v2.5 change (in beta).
* 2.4.21 - 23 Dec 2022 - Speed up image creation, by removing a delay (regression) of 4-5 seconds between clicking the `Make Image` button and calling the server. * 2.4.21 - 23 Dec 2022 - Speed up image creation, by removing a delay (regression) of 4-5 seconds between clicking the `Make Image` button and calling the server.
* 2.4.20 - 22 Dec 2022 - `Pause All` button to pause all the pending tasks. Thanks @JeLuf * 2.4.20 - 22 Dec 2022 - `Pause All` button to pause all the pending tasks. Thanks @JeLuf
* 2.4.20 - 22 Dec 2022 - `Undo`/`Redo` buttons in the image editor. Thanks @JeLuf * 2.4.20 - 22 Dec 2022 - `Undo`/`Redo` buttons in the image editor. Thanks @JeLuf

View File

@ -156,6 +156,8 @@ def is_device_compatible(device):
''' '''
Returns True/False, and prints any compatibility errors Returns True/False, and prints any compatibility errors
''' '''
# static variable "history".
is_device_compatible.history = getattr(is_device_compatible, 'history', {})
try: try:
validate_device_id(device, log_prefix='is_device_compatible') validate_device_id(device, log_prefix='is_device_compatible')
except: except:
@ -168,7 +170,9 @@ def is_device_compatible(device):
_, mem_total = torch.cuda.mem_get_info(device) _, mem_total = torch.cuda.mem_get_info(device)
mem_total /= float(10**9) mem_total /= float(10**9)
if mem_total < 3.0: if mem_total < 3.0:
log.warn(f'GPU {device} with less than 3 GB of VRAM is not compatible with Stable Diffusion') if is_device_compatible.history.get(device) == None:
log.warn(f'GPU {device} with less than 3 GB of VRAM is not compatible with Stable Diffusion')
is_device_compatible.history[device] = 1
return False return False
except RuntimeError as e: except RuntimeError as e:
log.error(str(e)) log.error(str(e))

View File

@ -44,7 +44,13 @@ def load_default_models(context: Context):
for model_type in MODELS_TO_LOAD_ON_START: for model_type in MODELS_TO_LOAD_ON_START:
context.model_paths[model_type] = resolve_model_to_use(model_type=model_type) context.model_paths[model_type] = resolve_model_to_use(model_type=model_type)
set_model_config_path(context, model_type) set_model_config_path(context, model_type)
load_model(context, model_type) try:
load_model(context, model_type)
except Exception as e:
log.error(f'[red]Error while loading {model_type} model: {context.model_paths[model_type]}[/red]')
log.error(f'[red]Error: {e}[/red]')
log.error(f'[red]Consider to remove the model from the model folder.[red]')
def unload_all(context: Context): def unload_all(context: Context):
for model_type in KNOWN_MODEL_TYPES: for model_type in KNOWN_MODEL_TYPES:
@ -190,6 +196,30 @@ def getModels():
} }
models_scanned = 0 models_scanned = 0
class MaliciousModelException(Exception):
"Raised when picklescan reports a problem with a model"
pass
def scan_directory(directory, suffixes):
nonlocal models_scanned
tree = []
for entry in os.scandir(directory):
if entry.is_file() and True in [entry.name.endswith(s) for s in suffixes]:
mtime = entry.stat().st_mtime
mod_time = known_models[entry.path] if entry.path in known_models else -1
if mod_time != mtime:
models_scanned += 1
if is_malicious_model(entry.path):
raise MaliciousModelException(entry.path)
known_models[entry.path] = mtime
tree.append(entry.name.rsplit('.',1)[0])
elif entry.is_dir():
scan=scan_directory(entry.path, suffixes)
if len(scan) != 0:
tree.append( (entry.name, scan ) )
return tree
def listModels(model_type): def listModels(model_type):
nonlocal models_scanned nonlocal models_scanned
@ -198,26 +228,10 @@ def getModels():
if not os.path.exists(models_dir): if not os.path.exists(models_dir):
os.makedirs(models_dir) os.makedirs(models_dir)
for file in os.listdir(models_dir): try:
for model_extension in model_extensions: models['options'][model_type] = scan_directory(models_dir, model_extensions)
if not file.endswith(model_extension): except MaliciousModelException as e:
continue models['scan-error'] = e
model_path = os.path.join(models_dir, file)
mtime = os.path.getmtime(model_path)
mod_time = known_models[model_path] if model_path in known_models else -1
if mod_time != mtime:
models_scanned += 1
if is_malicious_model(model_path):
models['scan-error'] = file
return
known_models[model_path] = mtime
model_name = file[:-len(model_extension)]
models['options'][model_type].append(model_name)
models['options'][model_type] = [*set(models['options'][model_type])] # remove duplicates
models['options'][model_type].sort()
# custom models # custom models
listModels(model_type='stable-diffusion') listModels(model_type='stable-diffusion')

View File

@ -31,9 +31,9 @@ def make_images(req: GenerateImageRequest, task_data: TaskData, data_queue: queu
context.stop_processing = False context.stop_processing = False
print_task_info(req, task_data) print_task_info(req, task_data)
images = make_images_internal(req, task_data, data_queue, task_temp_images, step_callback) images, seeds = make_images_internal(req, task_data, data_queue, task_temp_images, step_callback)
res = Response(req, task_data, images=construct_response(images, task_data, base_seed=req.seed)) res = Response(req, task_data, images=construct_response(images, seeds, task_data, base_seed=req.seed))
res = res.json() res = res.json()
data_queue.put(json.dumps(res)) data_queue.put(json.dumps(res))
log.info('Task completed') log.info('Task completed')
@ -53,7 +53,11 @@ def make_images_internal(req: GenerateImageRequest, task_data: TaskData, data_qu
if task_data.save_to_disk_path is not None: if task_data.save_to_disk_path is not None:
save_images_to_disk(images, filtered_images, req, task_data) save_images_to_disk(images, filtered_images, req, task_data)
return filtered_images if task_data.show_only_filtered_image or (task_data.use_face_correction is None and task_data.use_upscale is None) else images + filtered_images seeds = [*range(req.seed, req.seed + len(images))]
if task_data.show_only_filtered_image or filtered_images is images:
return filtered_images, seeds
else:
return images + filtered_images, seeds + seeds
def generate_images_internal(req: GenerateImageRequest, task_data: TaskData, data_queue: queue.Queue, task_temp_images: list, step_callback, stream_image_progress: bool): def generate_images_internal(req: GenerateImageRequest, task_data: TaskData, data_queue: queue.Queue, task_temp_images: list, step_callback, stream_image_progress: bool):
context.temp_images.clear() context.temp_images.clear()
@ -84,12 +88,12 @@ def filter_images(task_data: TaskData, images: list, user_stopped):
return apply_filters(context, filters_to_apply, images, scale=task_data.upscale_amount) return apply_filters(context, filters_to_apply, images, scale=task_data.upscale_amount)
def construct_response(images: list, task_data: TaskData, base_seed: int): def construct_response(images: list, seeds: list, task_data: TaskData, base_seed: int):
return [ return [
ResponseImage( ResponseImage(
data=img_to_base64_str(img, task_data.output_format, task_data.output_quality), data=img_to_base64_str(img, task_data.output_format, task_data.output_quality),
seed=base_seed + i seed=seed,
) for i, img in enumerate(images) ) for img, seed in zip(images, seeds)
] ]
def make_step_callback(req: GenerateImageRequest, task_data: TaskData, data_queue: queue.Queue, task_temp_images: list, step_callback, stream_image_progress: bool): def make_step_callback(req: GenerateImageRequest, task_data: TaskData, data_queue: queue.Queue, task_temp_images: list, step_callback, stream_image_progress: bool):

View File

@ -32,7 +32,7 @@ def save_images_to_disk(images: list, filtered_images: list, req: GenerateImageR
save_dir_path = os.path.join(task_data.save_to_disk_path, filename_regex.sub('_', task_data.session_id)) save_dir_path = os.path.join(task_data.save_to_disk_path, filename_regex.sub('_', task_data.session_id))
metadata_entries = get_metadata_entries_for_request(req, task_data) metadata_entries = get_metadata_entries_for_request(req, task_data)
if task_data.show_only_filtered_image or filtered_images == images: if task_data.show_only_filtered_image or filtered_images is images:
make_filename = make_filename_callback(req) make_filename = make_filename_callback(req)
save_images(filtered_images, save_dir_path, file_name=make_filename, output_format=task_data.output_format, output_quality=task_data.output_quality) save_images(filtered_images, save_dir_path, file_name=make_filename, output_format=task_data.output_format, output_quality=task_data.output_quality)
save_dicts(metadata_entries, save_dir_path, file_name=make_filename, output_format=task_data.metadata_output_format) save_dicts(metadata_entries, save_dir_path, file_name=make_filename, output_format=task_data.metadata_output_format)

View File

@ -25,7 +25,7 @@
<div id="logo"> <div id="logo">
<h1> <h1>
Easy Diffusion Easy Diffusion
<small>v2.5.4 <span id="updateBranchLabel"></span></small> <small>v2.5.5 <span id="updateBranchLabel"></span></small>
</h1> </h1>
</div> </div>
<div id="server-status"> <div id="server-status">

View File

@ -2,12 +2,12 @@
padding-left: 32px; padding-left: 32px;
text-align: left; text-align: left;
padding-bottom: 20px; padding-bottom: 20px;
max-width: min-content;
} }
.editor-options-container { .editor-options-container {
display: flex; display: flex;
row-gap: 10px; row-gap: 10px;
max-width: 210px;
} }
.editor-options-container > * { .editor-options-container > * {

View File

@ -251,6 +251,10 @@ button#resume {
img { img {
box-shadow: 0 4px 8px 0 rgba(0, 0, 0, 0.15), 0 6px 20px 0 rgba(0, 0, 0, 0.15); box-shadow: 0 4px 8px 0 rgba(0, 0, 0, 0.15), 0 6px 20px 0 rgba(0, 0, 0, 0.15);
} }
div.img-preview img {
width:100%;
height: 100%;
}
.line-separator { .line-separator {
background: var(--background-color3); background: var(--background-color3);
height: 1pt; height: 1pt;

View File

@ -144,6 +144,13 @@ const TASK_MAPPING = {
readUI: () => (maskSetting.checked ? imageInpainter.getImg() : undefined), readUI: () => (maskSetting.checked ? imageInpainter.getImg() : undefined),
parse: (val) => val parse: (val) => val
}, },
preserve_init_image_color_profile: { name: 'Preserve Color Profile',
setUI: (preserve_init_image_color_profile) => {
applyColorCorrectionField.checked = parseBoolean(preserve_init_image_color_profile)
},
readUI: () => applyColorCorrectionField.checked,
parse: (val) => parseBoolean(val)
},
use_face_correction: { name: 'Use Face Correction', use_face_correction: { name: 'Use Face Correction',
setUI: (use_face_correction) => { setUI: (use_face_correction) => {
@ -282,6 +289,7 @@ const TASK_MAPPING = {
parse: (val) => val parse: (val) => val
} }
} }
function restoreTaskToUI(task, fieldsToSkip) { function restoreTaskToUI(task, fieldsToSkip) {
fieldsToSkip = fieldsToSkip || [] fieldsToSkip = fieldsToSkip || []
@ -320,20 +328,26 @@ function restoreTaskToUI(task, fieldsToSkip) {
if (!('use_upscale' in task.reqBody)) { if (!('use_upscale' in task.reqBody)) {
useUpscalingField.checked = false useUpscalingField.checked = false
} }
if (!('mask' in task.reqBody)) { if (!('mask' in task.reqBody) && maskSetting.checked) {
maskSetting.checked = false maskSetting.checked = false
maskSetting.dispatchEvent(new Event("click"))
} }
upscaleModelField.disabled = !useUpscalingField.checked upscaleModelField.disabled = !useUpscalingField.checked
upscaleAmountField.disabled = !useUpscalingField.checked upscaleAmountField.disabled = !useUpscalingField.checked
// Show the source picture if present // hide/show source picture as needed
initImagePreview.src = (task.reqBody.init_image == undefined ? '' : task.reqBody.init_image) if (IMAGE_REGEX.test(initImagePreview.src) && task.reqBody.init_image == undefined) {
if (IMAGE_REGEX.test(initImagePreview.src)) { // hide source image
if (Boolean(task.reqBody.mask)) { initImageClearBtn.dispatchEvent(new Event("click"))
setTimeout(() => { // add a delay to insure this happens AFTER the main image loads (which reloads the inpainter) }
else if (task.reqBody.init_image !== undefined) {
// listen for inpainter loading event, which happens AFTER the main image loads (which reloads the inpainter)
initImagePreview.addEventListener('load', function() {
if (Boolean(task.reqBody.mask)) {
imageInpainter.setImg(task.reqBody.mask) imageInpainter.setImg(task.reqBody.mask)
}, 250) }
} }, { once: true })
initImagePreview.src = task.reqBody.init_image
} }
} }
function readUI() { function readUI() {
@ -451,7 +465,7 @@ async function parseContent(text) {
} }
// Normal txt file. // Normal txt file.
const task = parseTaskFromText(text) const task = parseTaskFromText(text)
if (task) { if (text.toLowerCase().includes('seed:') && task) { // only parse valid task content
restoreTaskToUI(task) restoreTaskToUI(task)
return true return true
} else { } else {

View File

@ -835,10 +835,13 @@
* @memberof Task * @memberof Task
*/ */
async post(timeout=-1) { async post(timeout=-1) {
performance.mark('make-render-request') if (typeof performance == "object" && performance.mark && performance.measure) {
if (performance.getEntriesByName('click-makeImage', 'mark').length > 0) { performance.mark('make-render-request')
console.log('delay between clicking and making the server request:', performance.measure('diff', 'click-makeImage', 'make-render-request').duration + ' ms') if (performance.getEntriesByName('click-makeImage', 'mark').length > 0) {
console.log('delay between clicking and making the server request:', performance.measure('diff', 'click-makeImage', 'make-render-request').duration + ' ms')
}
} }
let jsonResponse = await super.post('/render', timeout) let jsonResponse = await super.post('/render', timeout)
if (typeof jsonResponse?.task !== 'number') { if (typeof jsonResponse?.task !== 'number') {
console.warn('Endpoint error response: ', jsonResponse) console.warn('Endpoint error response: ', jsonResponse)

View File

@ -288,7 +288,7 @@ function showImages(reqBody, res, outputContainer, livePreview) {
imageSeedLabel.innerText = 'Seed: ' + req.seed imageSeedLabel.innerText = 'Seed: ' + req.seed
let buttons = [ let buttons = [
{ text: 'Remove', on_click: onRemoveClick, class: 'secondaryButton' }, { text: 'Remove', on_click: onRemoveClick, class: 'secondaryButton' },
{ text: 'Use as Input', on_click: onUseAsInputClick }, { text: 'Use as Input', on_click: onUseAsInputClick },
{ text: 'Download', on_click: onDownloadImageClick }, { text: 'Download', on_click: onDownloadImageClick },
{ text: 'Make Similar Images', on_click: onMakeSimilarClick }, { text: 'Make Similar Images', on_click: onMakeSimilarClick },
@ -440,7 +440,10 @@ function getUncompletedTaskEntries() {
} }
function makeImage() { function makeImage() {
performance.mark('click-makeImage') if (typeof performance == "object" && performance.mark) {
performance.mark('click-makeImage')
}
if (!SD.isServerAvailable()) { if (!SD.isServerAvailable()) {
alert('The server is not available.') alert('The server is not available.')
return return
@ -1303,17 +1306,23 @@ async function getModels() {
vaeOptions.unshift('') // add a None option vaeOptions.unshift('') // add a None option
hypernetworkOptions.unshift('') // add a None option hypernetworkOptions.unshift('') // add a None option
function createModelOptions(modelField, selectedModel) { function createModelOptions(modelField, selectedModel, path="") {
return function(modelName) { return function fn(modelName) {
const modelOption = document.createElement('option') if (typeof(modelName) == 'string') {
modelOption.value = modelName const modelOption = document.createElement('option')
modelOption.innerText = modelName !== '' ? modelName : 'None' modelOption.value = path + modelName
modelOption.innerHTML = modelName !== '' ? (path != "" ? "&nbsp;&nbsp;"+modelName : modelName) : 'None'
if (modelName === selectedModel) { if (modelName === selectedModel) {
modelOption.selected = true modelOption.selected = true
}
modelField.appendChild(modelOption)
} else {
const modelGroup = document.createElement('optgroup')
modelGroup.label = path + modelName[0]
modelField.appendChild(modelGroup)
modelName[1].forEach( createModelOptions(modelField, selectedModel, path + modelName[0] + "/" ) )
} }
modelField.appendChild(modelOption)
} }
} }