mirror of
https://github.com/easydiffusion/easydiffusion.git
synced 2025-04-08 01:29:40 +02:00
Support an arbitrary number of custom models, placed in the models/stable-diffusion folder. Shows an option in the UI to select which model to use
This commit is contained in:
parent
703f987825
commit
201a053025
@ -165,6 +165,8 @@ call WHERE uvicorn > .tmp
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if not exist "..\models\stable-diffusion" mkdir "..\models\stable-diffusion"
|
||||||
|
|
||||||
@if exist "sd-v1-4.ckpt" (
|
@if exist "sd-v1-4.ckpt" (
|
||||||
for %%I in ("sd-v1-4.ckpt") do if "%%~zI" EQU "4265380512" (
|
for %%I in ("sd-v1-4.ckpt") do if "%%~zI" EQU "4265380512" (
|
||||||
echo "Data files (weights) necessary for Stable Diffusion were already downloaded. Using the HuggingFace 4 GB Model."
|
echo "Data files (weights) necessary for Stable Diffusion were already downloaded. Using the HuggingFace 4 GB Model."
|
||||||
|
@ -159,6 +159,8 @@ fi
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
mkdir -p "../models/stable-diffusion"
|
||||||
|
|
||||||
if [ -f "sd-v1-4.ckpt" ]; then
|
if [ -f "sd-v1-4.ckpt" ]; then
|
||||||
model_size=`find "sd-v1-4.ckpt" -printf "%s"`
|
model_size=`find "sd-v1-4.ckpt" -printf "%s"`
|
||||||
|
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
<div id="container">
|
<div id="container">
|
||||||
<div id="top-nav">
|
<div id="top-nav">
|
||||||
<div id="logo">
|
<div id="logo">
|
||||||
<h1>Stable Diffusion UI <small>v2.195 <span id="updateBranchLabel"></span></small></h1>
|
<h1>Stable Diffusion UI <small>v2.2 <span id="updateBranchLabel"></span></small></h1>
|
||||||
</div>
|
</div>
|
||||||
<ul id="top-nav-items">
|
<ul id="top-nav-items">
|
||||||
<li class="dropdown">
|
<li class="dropdown">
|
||||||
@ -89,6 +89,11 @@
|
|||||||
<li><b class="settings-subheader">Image Settings</b></li>
|
<li><b class="settings-subheader">Image Settings</b></li>
|
||||||
<li class="pl-5"><label for="seed">Seed:</label> <input id="seed" name="seed" size="10" value="30000"> <input id="random_seed" name="random_seed" type="checkbox" checked> <label for="random_seed">Random Image</label></li>
|
<li class="pl-5"><label for="seed">Seed:</label> <input id="seed" name="seed" size="10" value="30000"> <input id="random_seed" name="random_seed" type="checkbox" checked> <label for="random_seed">Random Image</label></li>
|
||||||
<li class="pl-5"><label for="num_outputs_total">Number of images to make:</label> <input id="num_outputs_total" name="num_outputs_total" value="1" size="1"> <label for="num_outputs_parallel">Generate in parallel:</label> <input id="num_outputs_parallel" name="num_outputs_parallel" value="1" size="1"> (images at once)</li>
|
<li class="pl-5"><label for="num_outputs_total">Number of images to make:</label> <input id="num_outputs_total" name="num_outputs_total" value="1" size="1"> <label for="num_outputs_parallel">Generate in parallel:</label> <input id="num_outputs_parallel" name="num_outputs_parallel" value="1" size="1"> (images at once)</li>
|
||||||
|
<li class="pl-5"><label for="stable_diffusion_model">Model:</label>
|
||||||
|
<select id="stable_diffusion_model" name="stable_diffusion_model">
|
||||||
|
<!-- <option value="sd-v1-4" selected>sd-v1-4</option> -->
|
||||||
|
</select>
|
||||||
|
</li>
|
||||||
<li id="samplerSelection" class="pl-5"><label for="sampler">Sampler:</label>
|
<li id="samplerSelection" class="pl-5"><label for="sampler">Sampler:</label>
|
||||||
<select id="sampler" name="sampler">
|
<select id="sampler" name="sampler">
|
||||||
<option value="plms" selected>plms</option>
|
<option value="plms" selected>plms</option>
|
||||||
@ -213,12 +218,13 @@
|
|||||||
</div>
|
</div>
|
||||||
</body>
|
</body>
|
||||||
|
|
||||||
<script src="media/main.js?v=22"></script>
|
<script src="media/main.js?v=30"></script>
|
||||||
<script>
|
<script>
|
||||||
async function init() {
|
async function init() {
|
||||||
await loadModifiers()
|
await loadModifiers()
|
||||||
await getDiskPath()
|
await getDiskPath()
|
||||||
await getAppConfig()
|
await getAppConfig()
|
||||||
|
await getModels()
|
||||||
|
|
||||||
setInterval(healthCheck, HEALTH_PING_INTERVAL * 1000)
|
setInterval(healthCheck, HEALTH_PING_INTERVAL * 1000)
|
||||||
healthCheck()
|
healthCheck()
|
||||||
|
@ -46,6 +46,7 @@ let samplerSelectionContainer = document.querySelector("#samplerSelection")
|
|||||||
let useFaceCorrectionField = document.querySelector("#use_face_correction")
|
let useFaceCorrectionField = document.querySelector("#use_face_correction")
|
||||||
let useUpscalingField = document.querySelector("#use_upscale")
|
let useUpscalingField = document.querySelector("#use_upscale")
|
||||||
let upscaleModelField = document.querySelector("#upscale_model")
|
let upscaleModelField = document.querySelector("#upscale_model")
|
||||||
|
let stableDiffusionModelField = document.querySelector('#stable_diffusion_model')
|
||||||
let showOnlyFilteredImageField = document.querySelector("#show_only_filtered_image")
|
let showOnlyFilteredImageField = document.querySelector("#show_only_filtered_image")
|
||||||
let updateBranchLabel = document.querySelector("#updateBranchLabel")
|
let updateBranchLabel = document.querySelector("#updateBranchLabel")
|
||||||
let streamImageProgressField = document.querySelector("#stream_image_progress")
|
let streamImageProgressField = document.querySelector("#stream_image_progress")
|
||||||
@ -624,6 +625,7 @@ async function makeImage() {
|
|||||||
turbo: turboField.checked,
|
turbo: turboField.checked,
|
||||||
use_cpu: useCPUField.checked,
|
use_cpu: useCPUField.checked,
|
||||||
use_full_precision: useFullPrecisionField.checked,
|
use_full_precision: useFullPrecisionField.checked,
|
||||||
|
use_stable_diffusion_model: stableDiffusionModelField.value,
|
||||||
stream_progress_updates: true,
|
stream_progress_updates: true,
|
||||||
stream_image_progress: streamImageProgress,
|
stream_image_progress: streamImageProgress,
|
||||||
show_only_filtered_image: showOnlyFilteredImageField.checked
|
show_only_filtered_image: showOnlyFilteredImageField.checked
|
||||||
@ -657,7 +659,7 @@ async function makeImage() {
|
|||||||
reqBody['use_upscale'] = upscaleModelField.value
|
reqBody['use_upscale'] = upscaleModelField.value
|
||||||
}
|
}
|
||||||
|
|
||||||
let taskConfig = `Seed: ${seed}, Sampler: ${reqBody['sampler']}, Inference Steps: ${numInferenceStepsField.value}, Guidance Scale: ${guidanceScaleField.value}`
|
let taskConfig = `Seed: ${seed}, Sampler: ${reqBody['sampler']}, Inference Steps: ${numInferenceStepsField.value}, Guidance Scale: ${guidanceScaleField.value}, Model: ${stableDiffusionModelField.value}`
|
||||||
|
|
||||||
if (negativePromptField.value.trim() !== '') {
|
if (negativePromptField.value.trim() !== '') {
|
||||||
taskConfig += `, Negative Prompt: ${negativePromptField.value.trim()}`
|
taskConfig += `, Negative Prompt: ${negativePromptField.value.trim()}`
|
||||||
@ -933,6 +935,33 @@ async function getAppConfig() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async function getModels() {
|
||||||
|
try {
|
||||||
|
let res = await fetch('/models')
|
||||||
|
models = await res.json()
|
||||||
|
|
||||||
|
let activeModel = models['active']
|
||||||
|
let modelOptions = models['options']
|
||||||
|
let stableDiffusionOptions = modelOptions['stable-diffusion']
|
||||||
|
|
||||||
|
stableDiffusionOptions.forEach(modelName => {
|
||||||
|
let modelOption = document.createElement('option')
|
||||||
|
modelOption.value = modelName
|
||||||
|
modelOption.innerText = modelName
|
||||||
|
|
||||||
|
if (modelName === activeModel['stable-diffusion']) {
|
||||||
|
modelOption.selected = true
|
||||||
|
}
|
||||||
|
|
||||||
|
stableDiffusionModelField.appendChild(modelOption)
|
||||||
|
})
|
||||||
|
|
||||||
|
console.log('get models response', config)
|
||||||
|
} catch (e) {
|
||||||
|
console.log('get models error', e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
function checkRandomSeed() {
|
function checkRandomSeed() {
|
||||||
if (randomSeedField.checked) {
|
if (randomSeedField.checked) {
|
||||||
seedField.disabled = true
|
seedField.disabled = true
|
||||||
|
@ -22,6 +22,7 @@ class Request:
|
|||||||
use_full_precision: bool = False
|
use_full_precision: bool = False
|
||||||
use_face_correction: str = None # or "GFPGANv1.3"
|
use_face_correction: str = None # or "GFPGANv1.3"
|
||||||
use_upscale: str = None # or "RealESRGAN_x4plus" or "RealESRGAN_x4plus_anime_6B"
|
use_upscale: str = None # or "RealESRGAN_x4plus" or "RealESRGAN_x4plus_anime_6B"
|
||||||
|
use_stable_diffusion_model: str = "sd-v1-4"
|
||||||
show_only_filtered_image: bool = False
|
show_only_filtered_image: bool = False
|
||||||
|
|
||||||
stream_progress_updates: bool = False
|
stream_progress_updates: bool = False
|
||||||
@ -42,6 +43,7 @@ class Request:
|
|||||||
"sampler": self.sampler,
|
"sampler": self.sampler,
|
||||||
"use_face_correction": self.use_face_correction,
|
"use_face_correction": self.use_face_correction,
|
||||||
"use_upscale": self.use_upscale,
|
"use_upscale": self.use_upscale,
|
||||||
|
"use_stable_diffusion_model": self.use_stable_diffusion_model,
|
||||||
}
|
}
|
||||||
|
|
||||||
def to_string(self):
|
def to_string(self):
|
||||||
@ -62,6 +64,7 @@ class Request:
|
|||||||
use_full_precision: {self.use_full_precision}
|
use_full_precision: {self.use_full_precision}
|
||||||
use_face_correction: {self.use_face_correction}
|
use_face_correction: {self.use_face_correction}
|
||||||
use_upscale: {self.use_upscale}
|
use_upscale: {self.use_upscale}
|
||||||
|
use_stable_diffusion_model: {self.use_stable_diffusion_model}
|
||||||
show_only_filtered_image: {self.show_only_filtered_image}
|
show_only_filtered_image: {self.show_only_filtered_image}
|
||||||
|
|
||||||
stream_progress_updates: {self.stream_progress_updates}
|
stream_progress_updates: {self.stream_progress_updates}
|
||||||
|
@ -208,6 +208,7 @@ def mk_img(req: Request):
|
|||||||
})
|
})
|
||||||
|
|
||||||
def do_mk_img(req: Request):
|
def do_mk_img(req: Request):
|
||||||
|
global ckpt_file
|
||||||
global model, modelCS, modelFS, device
|
global model, modelCS, modelFS, device
|
||||||
global model_gfpgan, model_real_esrgan
|
global model_gfpgan, model_real_esrgan
|
||||||
global stop_processing
|
global stop_processing
|
||||||
@ -220,6 +221,15 @@ def do_mk_img(req: Request):
|
|||||||
|
|
||||||
temp_images.clear()
|
temp_images.clear()
|
||||||
|
|
||||||
|
# custom model support:
|
||||||
|
# the req.use_stable_diffusion_model needs to be a valid path
|
||||||
|
# to the ckpt file (without the extension).
|
||||||
|
|
||||||
|
needs_model_reload = False
|
||||||
|
if ckpt_file != req.use_stable_diffusion_model:
|
||||||
|
ckpt_file = req.use_stable_diffusion_model
|
||||||
|
needs_model_reload = True
|
||||||
|
|
||||||
model.turbo = req.turbo
|
model.turbo = req.turbo
|
||||||
if req.use_cpu:
|
if req.use_cpu:
|
||||||
if device != 'cpu':
|
if device != 'cpu':
|
||||||
@ -228,6 +238,7 @@ def do_mk_img(req: Request):
|
|||||||
if model_is_half:
|
if model_is_half:
|
||||||
del model, modelCS, modelFS
|
del model, modelCS, modelFS
|
||||||
load_model_ckpt(ckpt_file, device)
|
load_model_ckpt(ckpt_file, device)
|
||||||
|
needs_model_reload = False
|
||||||
|
|
||||||
load_model_gfpgan(gfpgan_file)
|
load_model_gfpgan(gfpgan_file)
|
||||||
load_model_real_esrgan(real_esrgan_file)
|
load_model_real_esrgan(real_esrgan_file)
|
||||||
@ -243,11 +254,15 @@ def do_mk_img(req: Request):
|
|||||||
|
|
||||||
del model, modelCS, modelFS
|
del model, modelCS, modelFS
|
||||||
load_model_ckpt(ckpt_file, device, req.turbo, unet_bs, ('full' if req.use_full_precision else 'autocast'), half_model_fs=(req.init_image is not None and not req.use_full_precision))
|
load_model_ckpt(ckpt_file, device, req.turbo, unet_bs, ('full' if req.use_full_precision else 'autocast'), half_model_fs=(req.init_image is not None and not req.use_full_precision))
|
||||||
|
needs_model_reload = False
|
||||||
|
|
||||||
if prev_device != device:
|
if prev_device != device:
|
||||||
load_model_gfpgan(gfpgan_file)
|
load_model_gfpgan(gfpgan_file)
|
||||||
load_model_real_esrgan(real_esrgan_file)
|
load_model_real_esrgan(real_esrgan_file)
|
||||||
|
|
||||||
|
if needs_model_reload:
|
||||||
|
load_model_ckpt(ckpt_file, device, req.turbo, unet_bs, precision, model_fs_is_half)
|
||||||
|
|
||||||
if req.use_face_correction != gfpgan_file:
|
if req.use_face_correction != gfpgan_file:
|
||||||
load_model_gfpgan(req.use_face_correction)
|
load_model_gfpgan(req.use_face_correction)
|
||||||
|
|
||||||
@ -444,7 +459,7 @@ def do_mk_img(req: Request):
|
|||||||
if return_orig_img:
|
if return_orig_img:
|
||||||
save_image(img, img_out_path)
|
save_image(img, img_out_path)
|
||||||
|
|
||||||
save_metadata(meta_out_path, prompts, opt_seed, opt_W, opt_H, opt_ddim_steps, opt_scale, opt_strength, opt_use_face_correction, opt_use_upscale, opt_sampler_name, req.negative_prompt)
|
save_metadata(meta_out_path, prompts, opt_seed, opt_W, opt_H, opt_ddim_steps, opt_scale, opt_strength, opt_use_face_correction, opt_use_upscale, opt_sampler_name, req.negative_prompt, ckpt_file)
|
||||||
|
|
||||||
if return_orig_img:
|
if return_orig_img:
|
||||||
img_data = img_to_base64_str(img)
|
img_data = img_to_base64_str(img)
|
||||||
@ -505,8 +520,8 @@ def save_image(img, img_out_path):
|
|||||||
except:
|
except:
|
||||||
print('could not save the file', traceback.format_exc())
|
print('could not save the file', traceback.format_exc())
|
||||||
|
|
||||||
def save_metadata(meta_out_path, prompts, opt_seed, opt_W, opt_H, opt_ddim_steps, opt_scale, opt_prompt_strength, opt_correct_face, opt_upscale, sampler_name, negative_prompt):
|
def save_metadata(meta_out_path, prompts, opt_seed, opt_W, opt_H, opt_ddim_steps, opt_scale, opt_prompt_strength, opt_correct_face, opt_upscale, sampler_name, negative_prompt, ckpt_file):
|
||||||
metadata = f"{prompts[0]}\nWidth: {opt_W}\nHeight: {opt_H}\nSeed: {opt_seed}\nSteps: {opt_ddim_steps}\nGuidance Scale: {opt_scale}\nPrompt Strength: {opt_prompt_strength}\nUse Face Correction: {opt_correct_face}\nUse Upscaling: {opt_upscale}\nSampler: {sampler_name}\nNegative Prompt: {negative_prompt}"
|
metadata = f"{prompts[0]}\nWidth: {opt_W}\nHeight: {opt_H}\nSeed: {opt_seed}\nSteps: {opt_ddim_steps}\nGuidance Scale: {opt_scale}\nPrompt Strength: {opt_prompt_strength}\nUse Face Correction: {opt_correct_face}\nUse Upscaling: {opt_upscale}\nSampler: {sampler_name}\nNegative Prompt: {negative_prompt}\nStable Diffusion Model: {ckpt_file + '.ckpt'}"
|
||||||
|
|
||||||
try:
|
try:
|
||||||
with open(meta_out_path, 'w') as f:
|
with open(meta_out_path, 'w') as f:
|
||||||
|
111
ui/server.py
111
ui/server.py
@ -4,13 +4,14 @@ import traceback
|
|||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
|
|
||||||
SCRIPT_DIR = os.getcwd()
|
SD_DIR = os.getcwd()
|
||||||
print('started in ', SCRIPT_DIR)
|
print('started in ', SD_DIR)
|
||||||
|
|
||||||
SD_UI_DIR = os.getenv('SD_UI_PATH', None)
|
SD_UI_DIR = os.getenv('SD_UI_PATH', None)
|
||||||
sys.path.append(os.path.dirname(SD_UI_DIR))
|
sys.path.append(os.path.dirname(SD_UI_DIR))
|
||||||
|
|
||||||
CONFIG_DIR = os.path.join(SD_UI_DIR, '..', 'scripts')
|
CONFIG_DIR = os.path.abspath(os.path.join(SD_UI_DIR, '..', 'scripts'))
|
||||||
|
MODELS_DIR = os.path.abspath(os.path.join(SD_DIR, '..', 'models'))
|
||||||
|
|
||||||
OUTPUT_DIRNAME = "Stable Diffusion UI" # in the user's home folder
|
OUTPUT_DIRNAME = "Stable Diffusion UI" # in the user's home folder
|
||||||
|
|
||||||
@ -57,6 +58,7 @@ class ImageRequest(BaseModel):
|
|||||||
use_full_precision: bool = False
|
use_full_precision: bool = False
|
||||||
use_face_correction: str = None # or "GFPGANv1.3"
|
use_face_correction: str = None # or "GFPGANv1.3"
|
||||||
use_upscale: str = None # or "RealESRGAN_x4plus" or "RealESRGAN_x4plus_anime_6B"
|
use_upscale: str = None # or "RealESRGAN_x4plus" or "RealESRGAN_x4plus_anime_6B"
|
||||||
|
use_stable_diffusion_model: str = "sd-v1-4"
|
||||||
show_only_filtered_image: bool = False
|
show_only_filtered_image: bool = False
|
||||||
|
|
||||||
stream_progress_updates: bool = False
|
stream_progress_updates: bool = False
|
||||||
@ -85,9 +87,7 @@ async def ping():
|
|||||||
|
|
||||||
from sd_internal import runtime
|
from sd_internal import runtime
|
||||||
|
|
||||||
custom_weight_path = os.path.join(SCRIPT_DIR, 'custom-model.ckpt')
|
runtime.load_model_ckpt(ckpt_to_use=get_initial_model_to_load())
|
||||||
ckpt_to_use = "sd-v1-4" if not os.path.exists(custom_weight_path) else "custom-model"
|
|
||||||
runtime.load_model_ckpt(ckpt_to_use=ckpt_to_use)
|
|
||||||
|
|
||||||
model_loaded = True
|
model_loaded = True
|
||||||
model_is_loading = False
|
model_is_loading = False
|
||||||
@ -97,6 +97,46 @@ async def ping():
|
|||||||
print(traceback.format_exc())
|
print(traceback.format_exc())
|
||||||
return HTTPException(status_code=500, detail=str(e))
|
return HTTPException(status_code=500, detail=str(e))
|
||||||
|
|
||||||
|
# needs to support the legacy installations
|
||||||
|
def get_initial_model_to_load():
|
||||||
|
custom_weight_path = os.path.join(SD_DIR, 'custom-model.ckpt')
|
||||||
|
ckpt_to_use = "sd-v1-4" if not os.path.exists(custom_weight_path) else "custom-model"
|
||||||
|
|
||||||
|
ckpt_to_use = os.path.join(SD_DIR, ckpt_to_use)
|
||||||
|
|
||||||
|
config = getConfig()
|
||||||
|
if 'model' in config and 'stable-diffusion' in config['model']:
|
||||||
|
model_name = config['model']['stable-diffusion']
|
||||||
|
model_path = resolve_model_to_use(model_name)
|
||||||
|
|
||||||
|
if os.path.exists(model_path + '.ckpt'):
|
||||||
|
ckpt_to_use = model_path
|
||||||
|
else:
|
||||||
|
print('Could not find the configured custom model at:', model_path + '.ckpt', '. Using the default one:', ckpt_to_use + '.ckpt')
|
||||||
|
|
||||||
|
return ckpt_to_use
|
||||||
|
|
||||||
|
def resolve_model_to_use(model_name):
|
||||||
|
if model_name in ('sd-v1-4', 'custom-model'):
|
||||||
|
model_path = os.path.join(MODELS_DIR, 'stable-diffusion', model_name)
|
||||||
|
|
||||||
|
legacy_model_path = os.path.join(SD_DIR, model_name)
|
||||||
|
if not os.path.exists(model_path + '.ckpt') and os.path.exists(legacy_model_path + '.ckpt'):
|
||||||
|
model_path = legacy_model_path
|
||||||
|
else:
|
||||||
|
model_path = os.path.join(MODELS_DIR, 'stable-diffusion', model_name)
|
||||||
|
|
||||||
|
return model_path
|
||||||
|
|
||||||
|
def save_model_to_config(model_name):
|
||||||
|
config = getConfig()
|
||||||
|
if 'model' not in config:
|
||||||
|
config['model'] = {}
|
||||||
|
|
||||||
|
config['model']['stable-diffusion'] = model_name
|
||||||
|
|
||||||
|
setConfig(config)
|
||||||
|
|
||||||
@app.post('/image')
|
@app.post('/image')
|
||||||
def image(req : ImageRequest):
|
def image(req : ImageRequest):
|
||||||
from sd_internal import runtime
|
from sd_internal import runtime
|
||||||
@ -127,6 +167,10 @@ def image(req : ImageRequest):
|
|||||||
r.stream_progress_updates = True # the underlying implementation only supports streaming
|
r.stream_progress_updates = True # the underlying implementation only supports streaming
|
||||||
r.stream_image_progress = req.stream_image_progress
|
r.stream_image_progress = req.stream_image_progress
|
||||||
|
|
||||||
|
r.use_stable_diffusion_model = resolve_model_to_use(req.use_stable_diffusion_model)
|
||||||
|
|
||||||
|
save_model_to_config(req.use_stable_diffusion_model)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if not req.stream_progress_updates:
|
if not req.stream_progress_updates:
|
||||||
r.stream_image_progress = False
|
r.stream_image_progress = False
|
||||||
@ -205,13 +249,62 @@ def getAppConfig():
|
|||||||
return HTTPException(status_code=500, detail="No config file")
|
return HTTPException(status_code=500, detail="No config file")
|
||||||
|
|
||||||
with open(config_json_path, 'r') as f:
|
with open(config_json_path, 'r') as f:
|
||||||
config_json_str = f.read()
|
return json.load(f)
|
||||||
config = json.loads(config_json_str)
|
|
||||||
return config
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(traceback.format_exc())
|
print(traceback.format_exc())
|
||||||
return HTTPException(status_code=500, detail=str(e))
|
return HTTPException(status_code=500, detail=str(e))
|
||||||
|
|
||||||
|
def getConfig():
|
||||||
|
try:
|
||||||
|
config_json_path = os.path.join(CONFIG_DIR, 'config.json')
|
||||||
|
|
||||||
|
if not os.path.exists(config_json_path):
|
||||||
|
return {}
|
||||||
|
|
||||||
|
with open(config_json_path, 'r') as f:
|
||||||
|
return json.load(f)
|
||||||
|
except Exception as e:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
def setConfig(config):
|
||||||
|
try:
|
||||||
|
config_json_path = os.path.join(CONFIG_DIR, 'config.json')
|
||||||
|
|
||||||
|
with open(config_json_path, 'w') as f:
|
||||||
|
return json.dump(config, f)
|
||||||
|
except:
|
||||||
|
print(traceback.format_exc())
|
||||||
|
|
||||||
|
@app.get('/models')
|
||||||
|
def getModels():
|
||||||
|
models = {
|
||||||
|
'active': {
|
||||||
|
'stable-diffusion': 'sd-v1-4',
|
||||||
|
},
|
||||||
|
'options': {
|
||||||
|
'stable-diffusion': ['sd-v1-4'],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
# custom models
|
||||||
|
sd_models_dir = os.path.join(MODELS_DIR, 'stable-diffusion')
|
||||||
|
for file in os.listdir(sd_models_dir):
|
||||||
|
if file.endswith('.ckpt'):
|
||||||
|
model_name = os.path.splitext(file)[0]
|
||||||
|
models['options']['stable-diffusion'].append(model_name)
|
||||||
|
|
||||||
|
# legacy
|
||||||
|
custom_weight_path = os.path.join(SD_DIR, 'custom-model.ckpt')
|
||||||
|
if os.path.exists(custom_weight_path):
|
||||||
|
models['active']['stable-diffusion'] = 'custom-model'
|
||||||
|
models['options']['stable-diffusion'].append('custom-model')
|
||||||
|
|
||||||
|
config = getConfig()
|
||||||
|
if 'model' in config and 'stable-diffusion' in config['model']:
|
||||||
|
models['active']['stable-diffusion'] = config['model']['stable-diffusion']
|
||||||
|
|
||||||
|
return models
|
||||||
|
|
||||||
@app.get('/modifiers.json')
|
@app.get('/modifiers.json')
|
||||||
def read_modifiers():
|
def read_modifiers():
|
||||||
headers = {"Cache-Control": "no-cache, no-store, must-revalidate", "Pragma": "no-cache", "Expires": "0"}
|
headers = {"Cache-Control": "no-cache, no-store, must-revalidate", "Pragma": "no-cache", "Expires": "0"}
|
||||||
|
Loading…
Reference in New Issue
Block a user