mirror of
https://github.com/easydiffusion/easydiffusion.git
synced 2025-02-09 06:59:30 +01:00
commit
3045f5211f
@ -22,6 +22,7 @@
|
|||||||
Our focus continues to remain on an easy installation experience, and an easy user-interface. While still remaining pretty powerful, in terms of features and speed.
|
Our focus continues to remain on an easy installation experience, and an easy user-interface. While still remaining pretty powerful, in terms of features and speed.
|
||||||
|
|
||||||
### Detailed changelog
|
### Detailed changelog
|
||||||
|
* 2.5.39 - 25 May 2023 - (beta-only) Seamless Tiling - make seamlessly tiled images, e.g. rock and grass textures. Thanks @JeLuf.
|
||||||
* 2.5.38 - 24 May 2023 - Better reporting of errors, and show an explanation if the user cannot disable the "Use CPU" setting.
|
* 2.5.38 - 24 May 2023 - Better reporting of errors, and show an explanation if the user cannot disable the "Use CPU" setting.
|
||||||
* 2.5.38 - 23 May 2023 - Add Latent Upscaler as another option for upscaling images. Thanks @JeLuf for the implementation of the Latent Upscaler model.
|
* 2.5.38 - 23 May 2023 - Add Latent Upscaler as another option for upscaling images. Thanks @JeLuf for the implementation of the Latent Upscaler model.
|
||||||
* 2.5.37 - 19 May 2023 - (beta-only) Two more samplers: DDPM and DEIS. Also disables the samplers that aren't working yet in the Diffusers version. Thanks @ogmaresca.
|
* 2.5.37 - 19 May 2023 - (beta-only) Two more samplers: DDPM and DEIS. Also disables the samplers that aren't working yet in the Diffusers version. Thanks @ogmaresca.
|
||||||
|
@ -18,7 +18,7 @@ os_name = platform.system()
|
|||||||
modules_to_check = {
|
modules_to_check = {
|
||||||
"torch": ("1.11.0", "1.13.1", "2.0.0"),
|
"torch": ("1.11.0", "1.13.1", "2.0.0"),
|
||||||
"torchvision": ("0.12.0", "0.14.1", "0.15.1"),
|
"torchvision": ("0.12.0", "0.14.1", "0.15.1"),
|
||||||
"sdkit": "1.0.97",
|
"sdkit": "1.0.98",
|
||||||
"stable-diffusion-sdkit": "2.1.4",
|
"stable-diffusion-sdkit": "2.1.4",
|
||||||
"rich": "12.6.0",
|
"rich": "12.6.0",
|
||||||
"uvicorn": "0.19.0",
|
"uvicorn": "0.19.0",
|
||||||
|
@ -23,6 +23,7 @@ class GenerateImageRequest(BaseModel):
|
|||||||
sampler_name: str = None # "ddim", "plms", "heun", "euler", "euler_a", "dpm2", "dpm2_a", "lms"
|
sampler_name: str = None # "ddim", "plms", "heun", "euler", "euler_a", "dpm2", "dpm2_a", "lms"
|
||||||
hypernetwork_strength: float = 0
|
hypernetwork_strength: float = 0
|
||||||
lora_alpha: float = 0
|
lora_alpha: float = 0
|
||||||
|
tiling: str = "none" # "none", "x", "y", "xy"
|
||||||
|
|
||||||
|
|
||||||
class TaskData(BaseModel):
|
class TaskData(BaseModel):
|
||||||
|
@ -30,9 +30,11 @@ TASK_TEXT_MAPPING = {
|
|||||||
"lora_alpha": "LoRA Strength",
|
"lora_alpha": "LoRA Strength",
|
||||||
"use_hypernetwork_model": "Hypernetwork model",
|
"use_hypernetwork_model": "Hypernetwork model",
|
||||||
"hypernetwork_strength": "Hypernetwork Strength",
|
"hypernetwork_strength": "Hypernetwork Strength",
|
||||||
|
"tiling": "Seamless Tiling",
|
||||||
"use_face_correction": "Use Face Correction",
|
"use_face_correction": "Use Face Correction",
|
||||||
"use_upscale": "Use Upscaling",
|
"use_upscale": "Use Upscaling",
|
||||||
"upscale_amount": "Upscale By",
|
"upscale_amount": "Upscale By",
|
||||||
|
"latent_upscaler_steps": "Latent Upscaler Steps"
|
||||||
}
|
}
|
||||||
|
|
||||||
time_placeholders = {
|
time_placeholders = {
|
||||||
@ -169,21 +171,23 @@ def save_images_to_disk(images: list, filtered_images: list, req: GenerateImageR
|
|||||||
output_quality=task_data.output_quality,
|
output_quality=task_data.output_quality,
|
||||||
output_lossless=task_data.output_lossless,
|
output_lossless=task_data.output_lossless,
|
||||||
)
|
)
|
||||||
if task_data.metadata_output_format.lower() in ["json", "txt", "embed"]:
|
if task_data.metadata_output_format:
|
||||||
save_dicts(
|
for metadata_output_format in task_data.metadata_output_format.split(","):
|
||||||
metadata_entries,
|
if metadata_output_format.lower() in ["json", "txt", "embed"]:
|
||||||
save_dir_path,
|
save_dicts(
|
||||||
file_name=make_filter_filename,
|
metadata_entries,
|
||||||
output_format=task_data.metadata_output_format,
|
save_dir_path,
|
||||||
file_format=task_data.output_format,
|
file_name=make_filter_filename,
|
||||||
)
|
output_format=task_data.metadata_output_format,
|
||||||
|
file_format=task_data.output_format,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def get_metadata_entries_for_request(req: GenerateImageRequest, task_data: TaskData):
|
def get_metadata_entries_for_request(req: GenerateImageRequest, task_data: TaskData):
|
||||||
metadata = get_printable_request(req, task_data)
|
metadata = get_printable_request(req, task_data)
|
||||||
|
|
||||||
# if text, format it in the text format expected by the UI
|
# if text, format it in the text format expected by the UI
|
||||||
is_txt_format = task_data.metadata_output_format.lower() == "txt"
|
is_txt_format = task_data.metadata_output_format and "txt" in task_data.metadata_output_format.lower().split(",")
|
||||||
if is_txt_format:
|
if is_txt_format:
|
||||||
metadata = {TASK_TEXT_MAPPING[key]: val for key, val in metadata.items() if key in TASK_TEXT_MAPPING}
|
metadata = {TASK_TEXT_MAPPING[key]: val for key, val in metadata.items() if key in TASK_TEXT_MAPPING}
|
||||||
|
|
||||||
@ -215,10 +219,12 @@ def get_printable_request(req: GenerateImageRequest, task_data: TaskData):
|
|||||||
del metadata["hypernetwork_strength"]
|
del metadata["hypernetwork_strength"]
|
||||||
if task_data.use_lora_model is None and "lora_alpha" in metadata:
|
if task_data.use_lora_model is None and "lora_alpha" in metadata:
|
||||||
del metadata["lora_alpha"]
|
del metadata["lora_alpha"]
|
||||||
|
if task_data.use_upscale != "latent_upscaler" and "latent_upscaler_steps" in metadata:
|
||||||
|
del metadata["latent_upscaler_steps"]
|
||||||
|
|
||||||
app_config = app.getConfig()
|
app_config = app.getConfig()
|
||||||
if not app_config.get("test_diffusers", False):
|
if not app_config.get("test_diffusers", False):
|
||||||
for key in (x for x in ["use_lora_model", "lora_alpha", "clip_skip"] if x in metadata):
|
for key in (x for x in ["use_lora_model", "lora_alpha", "clip_skip", "tiling", "latent_upscaler_steps"] if x in metadata):
|
||||||
del metadata[key]
|
del metadata[key]
|
||||||
|
|
||||||
return metadata
|
return metadata
|
||||||
|
@ -30,7 +30,7 @@
|
|||||||
<h1>
|
<h1>
|
||||||
<img id="logo_img" src="/media/images/icon-512x512.png" >
|
<img id="logo_img" src="/media/images/icon-512x512.png" >
|
||||||
Easy Diffusion
|
Easy Diffusion
|
||||||
<small>v2.5.38 <span id="updateBranchLabel"></span></small>
|
<small>v2.5.39 <span id="updateBranchLabel"></span></small>
|
||||||
</h1>
|
</h1>
|
||||||
</div>
|
</div>
|
||||||
<div id="server-status">
|
<div id="server-status">
|
||||||
@ -167,7 +167,7 @@
|
|||||||
<option value="unipc_snr" class="k_diffusion-only">UniPC SNR</option>
|
<option value="unipc_snr" class="k_diffusion-only">UniPC SNR</option>
|
||||||
<option value="unipc_tu">UniPC TU</option>
|
<option value="unipc_tu">UniPC TU</option>
|
||||||
<option value="unipc_snr_2" class="k_diffusion-only">UniPC SNR 2</option>
|
<option value="unipc_snr_2" class="k_diffusion-only">UniPC SNR 2</option>
|
||||||
<option value="unipc_tu_2">UniPC TU 2</option>
|
<option value="unipc_tu_2" class="k_diffusion-only">UniPC TU 2</option>
|
||||||
<option value="unipc_tq" class="k_diffusion-only">UniPC TQ</option>
|
<option value="unipc_tq" class="k_diffusion-only">UniPC TQ</option>
|
||||||
</select>
|
</select>
|
||||||
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/How-to-Use#samplers" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Click to learn more about samplers</span></i></a>
|
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/How-to-Use#samplers" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Click to learn more about samplers</span></i></a>
|
||||||
@ -236,6 +236,15 @@
|
|||||||
<td><label for="hypernetwork_strength_slider">Hypernetwork Strength:</label></td>
|
<td><label for="hypernetwork_strength_slider">Hypernetwork Strength:</label></td>
|
||||||
<td> <input id="hypernetwork_strength_slider" name="hypernetwork_strength_slider" class="editor-slider" value="100" type="range" min="0" max="100"> <input id="hypernetwork_strength" name="hypernetwork_strength" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"><br/></td>
|
<td> <input id="hypernetwork_strength_slider" name="hypernetwork_strength_slider" class="editor-slider" value="100" type="range" min="0" max="100"> <input id="hypernetwork_strength" name="hypernetwork_strength" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"><br/></td>
|
||||||
</tr>
|
</tr>
|
||||||
|
<tr id="tiling_container" class="pl-5"><td><label for="tiling">Seamless Tiling:</label></td><td>
|
||||||
|
<select id="tiling" name="tiling">
|
||||||
|
<option value="none" selected>None</option>
|
||||||
|
<option value="x">Horizontal</option>
|
||||||
|
<option value="y">Vertical</option>
|
||||||
|
<option value="xy">Both</option>
|
||||||
|
</select>
|
||||||
|
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/Seamless-Tiling" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Click to learn more about Seamless Tiling</span></i></a>
|
||||||
|
</td></tr>
|
||||||
<tr class="pl-5"><td><label for="output_format">Output Format:</label></td><td>
|
<tr class="pl-5"><td><label for="output_format">Output Format:</label></td><td>
|
||||||
<select id="output_format" name="output_format">
|
<select id="output_format" name="output_format">
|
||||||
<option value="jpeg" selected>jpeg</option>
|
<option value="jpeg" selected>jpeg</option>
|
||||||
|
@ -25,6 +25,7 @@ const SETTINGS_IDS_LIST = [
|
|||||||
"prompt_strength",
|
"prompt_strength",
|
||||||
"hypernetwork_strength",
|
"hypernetwork_strength",
|
||||||
"lora_alpha",
|
"lora_alpha",
|
||||||
|
"tiling",
|
||||||
"output_format",
|
"output_format",
|
||||||
"output_quality",
|
"output_quality",
|
||||||
"output_lossless",
|
"output_lossless",
|
||||||
@ -34,6 +35,7 @@ const SETTINGS_IDS_LIST = [
|
|||||||
"gfpgan_model",
|
"gfpgan_model",
|
||||||
"use_upscale",
|
"use_upscale",
|
||||||
"upscale_amount",
|
"upscale_amount",
|
||||||
|
"latent_upscaler_steps",
|
||||||
"block_nsfw",
|
"block_nsfw",
|
||||||
"show_only_filtered_image",
|
"show_only_filtered_image",
|
||||||
"upscale_model",
|
"upscale_model",
|
||||||
|
@ -79,6 +79,7 @@ const TASK_MAPPING = {
|
|||||||
if (!widthField.value) {
|
if (!widthField.value) {
|
||||||
widthField.value = oldVal
|
widthField.value = oldVal
|
||||||
}
|
}
|
||||||
|
widthField.dispatchEvent(new Event("change"))
|
||||||
},
|
},
|
||||||
readUI: () => parseInt(widthField.value),
|
readUI: () => parseInt(widthField.value),
|
||||||
parse: (val) => parseInt(val),
|
parse: (val) => parseInt(val),
|
||||||
@ -91,6 +92,7 @@ const TASK_MAPPING = {
|
|||||||
if (!heightField.value) {
|
if (!heightField.value) {
|
||||||
heightField.value = oldVal
|
heightField.value = oldVal
|
||||||
}
|
}
|
||||||
|
heightField.dispatchEvent(new Event("change"))
|
||||||
},
|
},
|
||||||
readUI: () => parseInt(heightField.value),
|
readUI: () => parseInt(heightField.value),
|
||||||
parse: (val) => parseInt(val),
|
parse: (val) => parseInt(val),
|
||||||
@ -172,16 +174,22 @@ const TASK_MAPPING = {
|
|||||||
name: "Use Face Correction",
|
name: "Use Face Correction",
|
||||||
setUI: (use_face_correction) => {
|
setUI: (use_face_correction) => {
|
||||||
const oldVal = gfpganModelField.value
|
const oldVal = gfpganModelField.value
|
||||||
gfpganModelField.value = getModelPath(use_face_correction, [".pth"])
|
console.log("use face correction", use_face_correction)
|
||||||
if (gfpganModelField.value) {
|
if (use_face_correction == null || use_face_correction == "None") {
|
||||||
// Is a valid value for the field.
|
|
||||||
useFaceCorrectionField.checked = true
|
|
||||||
gfpganModelField.disabled = false
|
|
||||||
} else {
|
|
||||||
// Not a valid value, restore the old value and disable the filter.
|
|
||||||
gfpganModelField.disabled = true
|
gfpganModelField.disabled = true
|
||||||
gfpganModelField.value = oldVal
|
|
||||||
useFaceCorrectionField.checked = false
|
useFaceCorrectionField.checked = false
|
||||||
|
} else {
|
||||||
|
gfpganModelField.value = getModelPath(use_face_correction, [".pth"])
|
||||||
|
if (gfpganModelField.value) {
|
||||||
|
// Is a valid value for the field.
|
||||||
|
useFaceCorrectionField.checked = true
|
||||||
|
gfpganModelField.disabled = false
|
||||||
|
} else {
|
||||||
|
// Not a valid value, restore the old value and disable the filter.
|
||||||
|
gfpganModelField.disabled = true
|
||||||
|
gfpganModelField.value = oldVal
|
||||||
|
useFaceCorrectionField.checked = false
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//useFaceCorrectionField.checked = parseBoolean(use_face_correction)
|
//useFaceCorrectionField.checked = parseBoolean(use_face_correction)
|
||||||
@ -218,6 +226,14 @@ const TASK_MAPPING = {
|
|||||||
readUI: () => upscaleAmountField.value,
|
readUI: () => upscaleAmountField.value,
|
||||||
parse: (val) => val,
|
parse: (val) => val,
|
||||||
},
|
},
|
||||||
|
latent_upscaler_steps: {
|
||||||
|
name: "Latent Upscaler Steps",
|
||||||
|
setUI: (latent_upscaler_steps) => {
|
||||||
|
latentUpscalerStepsField.value = latent_upscaler_steps
|
||||||
|
},
|
||||||
|
readUI: () => latentUpscalerStepsField.value,
|
||||||
|
parse: (val) => val,
|
||||||
|
},
|
||||||
sampler_name: {
|
sampler_name: {
|
||||||
name: "Sampler",
|
name: "Sampler",
|
||||||
setUI: (sampler_name) => {
|
setUI: (sampler_name) => {
|
||||||
@ -249,6 +265,14 @@ const TASK_MAPPING = {
|
|||||||
readUI: () => clip_skip.checked,
|
readUI: () => clip_skip.checked,
|
||||||
parse: (val) => Boolean(val),
|
parse: (val) => Boolean(val),
|
||||||
},
|
},
|
||||||
|
tiling: {
|
||||||
|
name: "Tiling",
|
||||||
|
setUI: (val) => {
|
||||||
|
tilingField.value = val
|
||||||
|
},
|
||||||
|
readUI: () => tilingField.value,
|
||||||
|
parse: (val) => val,
|
||||||
|
},
|
||||||
use_vae_model: {
|
use_vae_model: {
|
||||||
name: "VAE model",
|
name: "VAE model",
|
||||||
setUI: (use_vae_model) => {
|
setUI: (use_vae_model) => {
|
||||||
@ -411,6 +435,7 @@ function restoreTaskToUI(task, fieldsToSkip) {
|
|||||||
if (!("original_prompt" in task.reqBody)) {
|
if (!("original_prompt" in task.reqBody)) {
|
||||||
promptField.value = task.reqBody.prompt
|
promptField.value = task.reqBody.prompt
|
||||||
}
|
}
|
||||||
|
promptField.dispatchEvent(new Event("input"))
|
||||||
|
|
||||||
// properly reset checkboxes
|
// properly reset checkboxes
|
||||||
if (!("use_face_correction" in task.reqBody)) {
|
if (!("use_face_correction" in task.reqBody)) {
|
||||||
|
@ -789,9 +789,10 @@
|
|||||||
use_hypernetwork_model: "string",
|
use_hypernetwork_model: "string",
|
||||||
hypernetwork_strength: "number",
|
hypernetwork_strength: "number",
|
||||||
output_lossless: "boolean",
|
output_lossless: "boolean",
|
||||||
|
tiling: "string",
|
||||||
}
|
}
|
||||||
|
|
||||||
// Higer values will result in...
|
// Higher values will result in...
|
||||||
// pytorch_lightning/utilities/seed.py:60: UserWarning: X is not in bounds, numpy accepts from 0 to 4294967295
|
// pytorch_lightning/utilities/seed.py:60: UserWarning: X is not in bounds, numpy accepts from 0 to 4294967295
|
||||||
const MAX_SEED_VALUE = 4294967295
|
const MAX_SEED_VALUE = 4294967295
|
||||||
|
|
||||||
|
@ -18,6 +18,11 @@ const taskConfigSetup = {
|
|||||||
visible: ({ reqBody }) => reqBody?.clip_skip,
|
visible: ({ reqBody }) => reqBody?.clip_skip,
|
||||||
value: ({ reqBody }) => "yes",
|
value: ({ reqBody }) => "yes",
|
||||||
},
|
},
|
||||||
|
tiling: {
|
||||||
|
label: "Tiling",
|
||||||
|
visible: ({ reqBody }) => reqBody?.tiling != "none",
|
||||||
|
value: ({ reqBody }) => reqBody?.tiling,
|
||||||
|
},
|
||||||
use_vae_model: {
|
use_vae_model: {
|
||||||
label: "VAE",
|
label: "VAE",
|
||||||
visible: ({ reqBody }) => reqBody?.use_vae_model !== undefined && reqBody?.use_vae_model.trim() !== "",
|
visible: ({ reqBody }) => reqBody?.use_vae_model !== undefined && reqBody?.use_vae_model.trim() !== "",
|
||||||
@ -91,6 +96,7 @@ let latentUpscalerStepsSlider = document.querySelector("#latent_upscaler_steps_s
|
|||||||
let latentUpscalerStepsField = document.querySelector("#latent_upscaler_steps")
|
let latentUpscalerStepsField = document.querySelector("#latent_upscaler_steps")
|
||||||
let stableDiffusionModelField = new ModelDropdown(document.querySelector("#stable_diffusion_model"), "stable-diffusion")
|
let stableDiffusionModelField = new ModelDropdown(document.querySelector("#stable_diffusion_model"), "stable-diffusion")
|
||||||
let clipSkipField = document.querySelector("#clip_skip")
|
let clipSkipField = document.querySelector("#clip_skip")
|
||||||
|
let tilingField = document.querySelector("#tiling")
|
||||||
let vaeModelField = new ModelDropdown(document.querySelector("#vae_model"), "vae", "None")
|
let vaeModelField = new ModelDropdown(document.querySelector("#vae_model"), "vae", "None")
|
||||||
let hypernetworkModelField = new ModelDropdown(document.querySelector("#hypernetwork_model"), "hypernetwork", "None")
|
let hypernetworkModelField = new ModelDropdown(document.querySelector("#hypernetwork_model"), "hypernetwork", "None")
|
||||||
let hypernetworkStrengthSlider = document.querySelector("#hypernetwork_strength_slider")
|
let hypernetworkStrengthSlider = document.querySelector("#hypernetwork_strength_slider")
|
||||||
@ -1221,6 +1227,7 @@ function getCurrentUserRequest() {
|
|||||||
//render_device: undefined, // Set device affinity. Prefer this device, but wont activate.
|
//render_device: undefined, // Set device affinity. Prefer this device, but wont activate.
|
||||||
use_stable_diffusion_model: stableDiffusionModelField.value,
|
use_stable_diffusion_model: stableDiffusionModelField.value,
|
||||||
clip_skip: clipSkipField.checked,
|
clip_skip: clipSkipField.checked,
|
||||||
|
tiling: tilingField.value,
|
||||||
use_vae_model: vaeModelField.value,
|
use_vae_model: vaeModelField.value,
|
||||||
stream_progress_updates: true,
|
stream_progress_updates: true,
|
||||||
stream_image_progress: numOutputsTotal > 50 ? false : streamImageProgressField.checked,
|
stream_image_progress: numOutputsTotal > 50 ? false : streamImageProgressField.checked,
|
||||||
|
@ -396,6 +396,7 @@ async function getAppConfig() {
|
|||||||
if (!testDiffusersEnabled) {
|
if (!testDiffusersEnabled) {
|
||||||
document.querySelector("#lora_model_container").style.display = "none"
|
document.querySelector("#lora_model_container").style.display = "none"
|
||||||
document.querySelector("#lora_alpha_container").style.display = "none"
|
document.querySelector("#lora_alpha_container").style.display = "none"
|
||||||
|
document.querySelector("#tiling_container").style.display = "none"
|
||||||
|
|
||||||
document.querySelectorAll("#sampler_name option.diffusers-only").forEach((option) => {
|
document.querySelectorAll("#sampler_name option.diffusers-only").forEach((option) => {
|
||||||
option.style.display = "none"
|
option.style.display = "none"
|
||||||
@ -403,6 +404,7 @@ async function getAppConfig() {
|
|||||||
} else {
|
} else {
|
||||||
document.querySelector("#lora_model_container").style.display = ""
|
document.querySelector("#lora_model_container").style.display = ""
|
||||||
document.querySelector("#lora_alpha_container").style.display = loraModelField.value ? "" : "none"
|
document.querySelector("#lora_alpha_container").style.display = loraModelField.value ? "" : "none"
|
||||||
|
document.querySelector("#tiling_container").style.display = ""
|
||||||
|
|
||||||
document.querySelectorAll("#sampler_name option.k_diffusion-only").forEach((option) => {
|
document.querySelectorAll("#sampler_name option.k_diffusion-only").forEach((option) => {
|
||||||
option.disabled = true
|
option.disabled = true
|
||||||
|
@ -403,16 +403,19 @@
|
|||||||
// Batch main loop
|
// Batch main loop
|
||||||
for (let i = 0; i < iterations; i++) {
|
for (let i = 0; i < iterations; i++) {
|
||||||
let alpha = (start + i * step) / 100
|
let alpha = (start + i * step) / 100
|
||||||
switch (document.querySelector("#merge-interpolation").value) {
|
|
||||||
case "SmoothStep":
|
if (isTabActive(tabSettingsBatch)) {
|
||||||
alpha = smoothstep(alpha)
|
switch (document.querySelector("#merge-interpolation").value) {
|
||||||
break
|
case "SmoothStep":
|
||||||
case "SmootherStep":
|
alpha = smoothstep(alpha)
|
||||||
alpha = smootherstep(alpha)
|
break
|
||||||
break
|
case "SmootherStep":
|
||||||
case "SmoothestStep":
|
alpha = smootherstep(alpha)
|
||||||
alpha = smootheststep(alpha)
|
break
|
||||||
break
|
case "SmoothestStep":
|
||||||
|
alpha = smootheststep(alpha)
|
||||||
|
break
|
||||||
|
}
|
||||||
}
|
}
|
||||||
addLogMessage(`merging batch job ${i + 1}/${iterations}, alpha = ${alpha.toFixed(5)}...`)
|
addLogMessage(`merging batch job ${i + 1}/${iterations}, alpha = ${alpha.toFixed(5)}...`)
|
||||||
|
|
||||||
@ -420,7 +423,8 @@
|
|||||||
request["out_path"] += "-" + alpha.toFixed(5) + "." + document.querySelector("#merge-format").value
|
request["out_path"] += "-" + alpha.toFixed(5) + "." + document.querySelector("#merge-format").value
|
||||||
addLogMessage(` filename: ${request["out_path"]}`)
|
addLogMessage(` filename: ${request["out_path"]}`)
|
||||||
|
|
||||||
request["ratio"] = alpha
|
// sdkit documentation: "ratio - the ratio of the second model. 1 means only the second model will be used."
|
||||||
|
request["ratio"] = 1-alpha
|
||||||
let res = await fetch("/model/merge", {
|
let res = await fetch("/model/merge", {
|
||||||
method: "POST",
|
method: "POST",
|
||||||
headers: { "Content-Type": "application/json" },
|
headers: { "Content-Type": "application/json" },
|
||||||
|
Loading…
Reference in New Issue
Block a user