mirror of
https://github.com/easydiffusion/easydiffusion.git
synced 2025-08-13 17:57:20 +02:00
Compare commits
389 Commits
v2.5.48-fi
...
forge
Author | SHA1 | Date | |
---|---|---|---|
5d0e5e96d6 | |||
7f9394b621 | |||
76c8e18fcf | |||
f514fe6c11 | |||
c947004ec9 | |||
154b550e0e | |||
739ad3a964 | |||
648187d2aa | |||
79cfee0447 | |||
7e00e3c260 | |||
4ab9d7aebb | |||
3ff9d9c3bb | |||
02a2dce049 | |||
72095a6d97 | |||
3580fa5e32 | |||
926c55cf69 | |||
8bf8d0b1a1 | |||
b3f714923f | |||
7e208fb682 | |||
d0cd340cbd | |||
696b65049f | |||
cabf4a4f07 | |||
f6e6b1ae5f | |||
ec18bae5e4 | |||
e804247acb | |||
07483891b6 | |||
dfa552585e | |||
05cf4be89b | |||
e8ee9275bc | |||
c46f29abb0 | |||
8bd5eb5ce4 | |||
78dcc7cb03 | |||
e32b34e2f4 | |||
16463431dd | |||
9761b172de | |||
d6adb17746 | |||
3327244da2 | |||
d283fb0776 | |||
90bc1456c9 | |||
84c8284a90 | |||
c1193377b6 | |||
5a5d37ba52 | |||
b6ba782c35 | |||
9abc76482c | |||
6f4e2017f4 | |||
6ea7dd36da | |||
391e12e20d | |||
4e3a5cb6d9 | |||
f51ab909ff | |||
754a5f5e52 | |||
9a12a8618c | |||
2eb0c9106a | |||
a0de0b5814 | |||
6559c41b2e | |||
5fe3acd44b | |||
716f30fecb | |||
364902f8a1 | |||
a261a2d47d | |||
dea962dc89 | |||
d062c2149a | |||
7d49dc105e | |||
fcdc3f2dd0 | |||
d17b167a81 | |||
1fa83eda0e | |||
969751a195 | |||
1ae8675487 | |||
05f0bfebba | |||
91ad53cd94 | |||
de680dfd09 | |||
4edeb14e94 | |||
e64cf9c9eb | |||
66d0c4726e | |||
c923b44f56 | |||
b9c343195b | |||
4427e8d3dd | |||
87c8fe2758 | |||
70acde7809 | |||
c4b938f132 | |||
d6fdb8d5a9 | |||
54ac1f7169 | |||
deebfc6850 | |||
21644adbe1 | |||
fe3c648a24 | |||
05f3523364 | |||
4d9b023378 | |||
44789bf16b | |||
ad649a8050 | |||
723304204e | |||
ddf54d589e | |||
a5c9c44e53 | |||
4d28c78fcc | |||
7dc01370ea | |||
21ff109632 | |||
9b0a654d32 | |||
fb749dbe24 | |||
17ef1e04f7 | |||
a5b9eefcf9 | |||
e5519cda37 | |||
d1bd9e2a16 | |||
5924d01789 | |||
47432fe54e | |||
8660a79ccd | |||
dfb26ed781 | |||
547febafba | |||
85eaa305cc | |||
25272ce083 | |||
212fa77b47 | |||
e77629c525 | |||
097780be26 | |||
6489cd785d | |||
a4e651e27e | |||
bedf176e62 | |||
398a0509d7 | |||
52cc99bf1f | |||
824e057d7b | |||
9bd4b3a6d0 | |||
307b00cc05 | |||
8a98df4673 | |||
45a14a9be9 | |||
e419276e34 | |||
0a92b7b1d5 | |||
f110168366 | |||
ce24a05909 | |||
45facf64e5 | |||
e999832c26 | |||
4c8d5a7077 | |||
81643cb3af | |||
7a9bc883df | |||
6280a80129 | |||
a33908b6de | |||
0ea5620413 | |||
e23eb1fea8 | |||
41f2c82eaf | |||
91e3bfe58f | |||
83d5519a31 | |||
cc2666b9d6 | |||
954493fef5 | |||
967c3681cd | |||
87c9df5c0d | |||
62136768d2 | |||
b71b7804fc | |||
e8b7751374 | |||
54d4433141 | |||
14dbebbc35 | |||
d6a02a31a7 | |||
86e2ac40ae | |||
a12ed7533b | |||
9fb0ee2d1b | |||
6311b80474 | |||
c13d1093ee | |||
dd7deeba53 | |||
338aef3e95 | |||
134c98ccb5 | |||
d12877987f | |||
676316e5e4 | |||
52761ad88c | |||
f5e489ba87 | |||
982af1fff3 | |||
1cff398c20 | |||
a6271d2c4e | |||
60f8cc6883 | |||
ffb8feba6b | |||
4aca3c4639 | |||
120f9e567c | |||
c0492511df | |||
1075a5ed93 | |||
58d3507155 | |||
ae0c9b6a6b | |||
ad1374af1d | |||
8436e8a71e | |||
ea07483465 | |||
51f857c3f3 | |||
74c0ca0902 | |||
ad5641fa3e | |||
b0294f8cbd | |||
5d4498ff85 | |||
d52fb15746 | |||
ee6be74e72 | |||
4cbc86f945 | |||
3a5e0cb2d2 | |||
7916b8d26a | |||
a0842b4659 | |||
14ee87ca80 | |||
cec1d7d6c9 | |||
9aeae4d16e | |||
9c1b741d89 | |||
c71a74f857 | |||
524612cee5 | |||
11e47b3871 | |||
4a1b2be45c | |||
d641aa2f6e | |||
237c7a5348 | |||
19f37907d9 | |||
b8706da990 | |||
b458d57355 | |||
a5962dae33 | |||
670768e5b3 | |||
f02b915cd0 | |||
71bbbeb936 | |||
e084b78b53 | |||
013860e3c0 | |||
7a118eeb15 | |||
df408b25e5 | |||
536082c1a6 | |||
b986ca3059 | |||
4bf9e577b9 | |||
a7c12e61d8 | |||
847d27bffb | |||
781e812f22 | |||
e49b5e0e6b | |||
8f1c1b128e | |||
04cbb052d7 | |||
16f0950ebd | |||
e959a3d7ab | |||
fc9941abaa | |||
f177011395 | |||
80e47be5a5 | |||
9a9f6e3559 | |||
1a6e0234b3 | |||
56bea46e3a | |||
a09441b2c8 | |||
105994d96d | |||
d641647b1e | |||
672574d278 | |||
f1ded17399 | |||
d254e3e2fd | |||
ab5450bb27 | |||
a2e9e5eb57 | |||
8965f11ab4 | |||
1dd5644e7a | |||
37f813506e | |||
a5d5ed90e6 | |||
3792a1bc0d | |||
fbafa56ecb | |||
2f910c69b8 | |||
bf06cc48bb | |||
3ef67ebc73 | |||
0c4318fb31 | |||
c55ced93db | |||
4bd89ab2e1 | |||
807d940001 | |||
d4427b97ae | |||
4f336d9f25 | |||
1565530b0f | |||
a21b01a0cd | |||
1c7e90576d | |||
8c27fa136c | |||
c8de1cd49b | |||
5eb36e131d | |||
b5d1adaa19 | |||
b89d152540 | |||
e49772030d | |||
b1cb03962c | |||
a7b0858b22 | |||
ad227ca190 | |||
a8360484b2 | |||
80c4a50ca1 | |||
768b88a0ac | |||
82607573fa | |||
d07e00cd74 | |||
dfdd2b32e0 | |||
844edbc865 | |||
2bc66cc640 | |||
f9f9aba92d | |||
3f278cf2ad | |||
cb7ba96dad | |||
31edce4a60 | |||
1b6aae9678 | |||
9572ddf1c1 | |||
1f44cebd0e | |||
843ea58c15 | |||
1e13c4e808 | |||
c62161770d | |||
15b828b0f5 | |||
faa83a87df | |||
50da182e30 | |||
dba573bf1a | |||
6a0eef3fe4 | |||
98f58e8672 | |||
04274f5839 | |||
f387b9f464 | |||
b8f533d0ea | |||
5a49818a10 | |||
ad9d9e0b04 | |||
c92470ff7e | |||
1cd9c7fdac | |||
e607035c65 | |||
bde8113414 | |||
1fd011b1be | |||
061380742c | |||
8f9feb3ed9 | |||
0dc01cb974 | |||
55af328181 | |||
a8c0abfd5d | |||
4807744aa7 | |||
669d40a9d2 | |||
18049d529a | |||
f2b441d9fc | |||
d2078d4dde | |||
41d4ad2096 | |||
29ec8291ad | |||
b93a206a48 | |||
be83336cf7 | |||
19fdba7d73 | |||
2c2b3b75d5 | |||
47d5cb9e33 | |||
7b8e1bc919 | |||
77aa7a0148 | |||
bdd7d2599f | |||
ca8a96f956 | |||
8957250db8 | |||
1b6ec418a1 | |||
3759d77945 | |||
ab4d34e509 | |||
7f878f365b | |||
5efabfaea6 | |||
4cd8ae45e3 | |||
8999f9450f | |||
1d54943d71 | |||
767d8fc35d | |||
894f34678e | |||
1190bedafd | |||
e80db71d1c | |||
846bb2134e | |||
38b2eec4be | |||
8dafe486a2 | |||
c895a96a43 | |||
67cae9725e | |||
a2d06f87f6 | |||
8e4afc8374 | |||
afd879a692 | |||
83de2b8de7 | |||
4930f36a1a | |||
fa3f196add | |||
95004be0e9 | |||
281a849c8f | |||
5d82ce665c | |||
d632cfcde9 | |||
07f797a5e4 | |||
121107dd13 | |||
a2479b74be | |||
7ee1d3cd91 | |||
4b28ddd691 | |||
7270b5fe0c | |||
285792f692 | |||
23a0a48b81 | |||
2baad73bb9 | |||
097dc99e77 | |||
edd10bcfe7 | |||
ac1c65fba1 | |||
b4cc21ea89 | |||
3dfc3f5ff7 | |||
7c012df1d5 | |||
a1854d3734 | |||
074c566826 | |||
a2e7bfb30e | |||
01c1c77564 | |||
34de4fe8fe | |||
4975f8167e | |||
6777459e62 | |||
253d0dbd5e | |||
e98bd70871 | |||
6a216be5cb | |||
0adb7831e7 | |||
30ca98b597 | |||
e80001e8c8 | |||
b5490f7712 | |||
dc5748624f | |||
84c5a759d4 | |||
ec43aa2f18 | |||
8e7a6077e5 | |||
53a79c1a81 | |||
e9f54c8bae | |||
c978863e5f | |||
12fa08d7a7 | |||
50dea4cb52 | |||
20b06db359 | |||
b6e512e65f | |||
7d71c353b2 | |||
3216a68d63 | |||
df518f822c | |||
abdf0b6719 | |||
2d2a75f23c | |||
fcb59c68d4 | |||
d47816e7b9 | |||
21297d98f2 | |||
cc7452374d | |||
c0dcf1633c | |||
d8447ef1a9 |
4
.gitignore
vendored
4
.gitignore
vendored
@ -3,4 +3,6 @@ installer
|
||||
installer.tar
|
||||
dist
|
||||
.idea/*
|
||||
node_modules/*
|
||||
node_modules/*
|
||||
.tmp1
|
||||
.tmp2
|
||||
|
@ -712,3 +712,411 @@ FileSaver.js is licensed under the MIT license:
|
||||
SOFTWARE.
|
||||
|
||||
[1]: http://eligrey.com
|
||||
|
||||
croppr.js
|
||||
=========
|
||||
https://github.com/jamesssooi/Croppr.js
|
||||
|
||||
croppr.js is licensed under the MIT license:
|
||||
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2017 James Ooi
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
ExifReader
|
||||
==========
|
||||
https://github.com/mattiasw/ExifReader
|
||||
|
||||
ExifReader is licensed under the Mozilla Public License:
|
||||
|
||||
Mozilla Public License Version 2.0
|
||||
==================================
|
||||
|
||||
1. Definitions
|
||||
--------------
|
||||
|
||||
1.1. "Contributor"
|
||||
means each individual or legal entity that creates, contributes to
|
||||
the creation of, or owns Covered Software.
|
||||
|
||||
1.2. "Contributor Version"
|
||||
means the combination of the Contributions of others (if any) used
|
||||
by a Contributor and that particular Contributor's Contribution.
|
||||
|
||||
1.3. "Contribution"
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. "Covered Software"
|
||||
means Source Code Form to which the initial Contributor has attached
|
||||
the notice in Exhibit A, the Executable Form of such Source Code
|
||||
Form, and Modifications of such Source Code Form, in each case
|
||||
including portions thereof.
|
||||
|
||||
1.5. "Incompatible With Secondary Licenses"
|
||||
means
|
||||
|
||||
(a) that the initial Contributor has attached the notice described
|
||||
in Exhibit B to the Covered Software; or
|
||||
|
||||
(b) that the Covered Software was made available under the terms of
|
||||
version 1.1 or earlier of the License, but not also under the
|
||||
terms of a Secondary License.
|
||||
|
||||
1.6. "Executable Form"
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. "Larger Work"
|
||||
means a work that combines Covered Software with other material, in
|
||||
a separate file or files, that is not Covered Software.
|
||||
|
||||
1.8. "License"
|
||||
means this document.
|
||||
|
||||
1.9. "Licensable"
|
||||
means having the right to grant, to the maximum extent possible,
|
||||
whether at the time of the initial grant or subsequently, any and
|
||||
all of the rights conveyed by this License.
|
||||
|
||||
1.10. "Modifications"
|
||||
means any of the following:
|
||||
|
||||
(a) any file in Source Code Form that results from an addition to,
|
||||
deletion from, or modification of the contents of Covered
|
||||
Software; or
|
||||
|
||||
(b) any new file in Source Code Form that contains any Covered
|
||||
Software.
|
||||
|
||||
1.11. "Patent Claims" of a Contributor
|
||||
means any patent claim(s), including without limitation, method,
|
||||
process, and apparatus claims, in any patent Licensable by such
|
||||
Contributor that would be infringed, but for the grant of the
|
||||
License, by the making, using, selling, offering for sale, having
|
||||
made, import, or transfer of either its Contributions or its
|
||||
Contributor Version.
|
||||
|
||||
1.12. "Secondary License"
|
||||
means either the GNU General Public License, Version 2.0, the GNU
|
||||
Lesser General Public License, Version 2.1, the GNU Affero General
|
||||
Public License, Version 3.0, or any later versions of those
|
||||
licenses.
|
||||
|
||||
1.13. "Source Code Form"
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. "You" (or "Your")
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, "You" includes any entity that
|
||||
controls, is controlled by, or is under common control with You. For
|
||||
purposes of this definition, "control" means (a) the power, direct
|
||||
or indirect, to cause the direction or management of such entity,
|
||||
whether by contract or otherwise, or (b) ownership of more than
|
||||
fifty percent (50%) of the outstanding shares or beneficial
|
||||
ownership of such entity.
|
||||
|
||||
2. License Grants and Conditions
|
||||
--------------------------------
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
(a) under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or
|
||||
as part of a Larger Work; and
|
||||
|
||||
(b) under Patent Claims of such Contributor to make, use, sell, offer
|
||||
for sale, have made, import, and otherwise transfer either its
|
||||
Contributions or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution
|
||||
become effective for each Contribution on the date the Contributor first
|
||||
distributes such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under
|
||||
this License. No additional rights or licenses will be implied from the
|
||||
distribution or licensing of Covered Software under this License.
|
||||
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
||||
Contributor:
|
||||
|
||||
(a) for any code that a Contributor has removed from Covered Software;
|
||||
or
|
||||
|
||||
(b) for infringements caused by: (i) Your and any other third party's
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
(c) under Patent Claims infringed by Covered Software in the absence of
|
||||
its Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks,
|
||||
or logos of any Contributor (except as may be necessary to comply with
|
||||
the notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this
|
||||
License (see Section 10.2) or under the terms of a Secondary License (if
|
||||
permitted under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its
|
||||
Contributions are its original creation(s) or it has sufficient rights
|
||||
to grant the rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under
|
||||
applicable copyright doctrines of fair use, fair dealing, or other
|
||||
equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
|
||||
in Section 2.1.
|
||||
|
||||
3. Responsibilities
|
||||
-------------------
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under
|
||||
the terms of this License. You must inform recipients that the Source
|
||||
Code Form of the Covered Software is governed by the terms of this
|
||||
License, and how they can obtain a copy of this License. You may not
|
||||
attempt to alter or restrict the recipients' rights in the Source Code
|
||||
Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
(a) such Covered Software must also be made available in Source Code
|
||||
Form, as described in Section 3.1, and You must inform recipients of
|
||||
the Executable Form how they can obtain a copy of such Source Code
|
||||
Form by reasonable means in a timely manner, at a charge no more
|
||||
than the cost of distribution to the recipient; and
|
||||
|
||||
(b) You may distribute such Executable Form under the terms of this
|
||||
License, or sublicense it under different terms, provided that the
|
||||
license for the Executable Form does not attempt to limit or alter
|
||||
the recipients' rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for
|
||||
the Covered Software. If the Larger Work is a combination of Covered
|
||||
Software with a work governed by one or more Secondary Licenses, and the
|
||||
Covered Software is not Incompatible With Secondary Licenses, this
|
||||
License permits You to additionally distribute such Covered Software
|
||||
under the terms of such Secondary License(s), so that the recipient of
|
||||
the Larger Work may, at their option, further distribute the Covered
|
||||
Software under the terms of either this License or such Secondary
|
||||
License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices
|
||||
(including copyright notices, patent notices, disclaimers of warranty,
|
||||
or limitations of liability) contained within the Source Code Form of
|
||||
the Covered Software, except that You may alter any license notices to
|
||||
the extent required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on
|
||||
behalf of any Contributor. You must make it absolutely clear that any
|
||||
such warranty, support, indemnity, or liability obligation is offered by
|
||||
You alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
---------------------------------------------------
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this
|
||||
License with respect to some or all of the Covered Software due to
|
||||
statute, judicial order, or regulation then You must: (a) comply with
|
||||
the terms of this License to the maximum extent possible; and (b)
|
||||
describe the limitations and the code they affect. Such description must
|
||||
be placed in a text file included with all distributions of the Covered
|
||||
Software under this License. Except to the extent prohibited by statute
|
||||
or regulation, such description must be sufficiently detailed for a
|
||||
recipient of ordinary skill to be able to understand it.
|
||||
|
||||
5. Termination
|
||||
--------------
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically
|
||||
if You fail to comply with any of its terms. However, if You become
|
||||
compliant, then the rights granted under this License from a particular
|
||||
Contributor are reinstated (a) provisionally, unless and until such
|
||||
Contributor explicitly and finally terminates Your grants, and (b) on an
|
||||
ongoing basis, if such Contributor fails to notify You of the
|
||||
non-compliance by some reasonable means prior to 60 days after You have
|
||||
come back into compliance. Moreover, Your grants from a particular
|
||||
Contributor are reinstated on an ongoing basis if such Contributor
|
||||
notifies You of the non-compliance by some reasonable means, this is the
|
||||
first time You have received notice of non-compliance with this License
|
||||
from such Contributor, and You become compliant prior to 30 days after
|
||||
Your receipt of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions,
|
||||
counter-claims, and cross-claims) alleging that a Contributor Version
|
||||
directly or indirectly infringes any patent, then the rights granted to
|
||||
You by any and all Contributors for the Covered Software under Section
|
||||
2.1 of this License shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all
|
||||
end user license agreements (excluding distributors and resellers) which
|
||||
have been validly granted by You or Your distributors under this License
|
||||
prior to termination shall survive termination.
|
||||
|
||||
************************************************************************
|
||||
* *
|
||||
* 6. Disclaimer of Warranty *
|
||||
* ------------------------- *
|
||||
* *
|
||||
* Covered Software is provided under this License on an "as is" *
|
||||
* basis, without warranty of any kind, either expressed, implied, or *
|
||||
* statutory, including, without limitation, warranties that the *
|
||||
* Covered Software is free of defects, merchantable, fit for a *
|
||||
* particular purpose or non-infringing. The entire risk as to the *
|
||||
* quality and performance of the Covered Software is with You. *
|
||||
* Should any Covered Software prove defective in any respect, You *
|
||||
* (not any Contributor) assume the cost of any necessary servicing, *
|
||||
* repair, or correction. This disclaimer of warranty constitutes an *
|
||||
* essential part of this License. No use of any Covered Software is *
|
||||
* authorized under this License except under this disclaimer. *
|
||||
* *
|
||||
************************************************************************
|
||||
|
||||
************************************************************************
|
||||
* *
|
||||
* 7. Limitation of Liability *
|
||||
* -------------------------- *
|
||||
* *
|
||||
* Under no circumstances and under no legal theory, whether tort *
|
||||
* (including negligence), contract, or otherwise, shall any *
|
||||
* Contributor, or anyone who distributes Covered Software as *
|
||||
* permitted above, be liable to You for any direct, indirect, *
|
||||
* special, incidental, or consequential damages of any character *
|
||||
* including, without limitation, damages for lost profits, loss of *
|
||||
* goodwill, work stoppage, computer failure or malfunction, or any *
|
||||
* and all other commercial damages or losses, even if such party *
|
||||
* shall have been informed of the possibility of such damages. This *
|
||||
* limitation of liability shall not apply to liability for death or *
|
||||
* personal injury resulting from such party's negligence to the *
|
||||
* extent applicable law prohibits such limitation. Some *
|
||||
* jurisdictions do not allow the exclusion or limitation of *
|
||||
* incidental or consequential damages, so this exclusion and *
|
||||
* limitation may not apply to You. *
|
||||
* *
|
||||
************************************************************************
|
||||
|
||||
8. Litigation
|
||||
-------------
|
||||
|
||||
Any litigation relating to this License may be brought only in the
|
||||
courts of a jurisdiction where the defendant maintains its principal
|
||||
place of business and such litigation shall be governed by laws of that
|
||||
jurisdiction, without reference to its conflict-of-law provisions.
|
||||
Nothing in this Section shall prevent a party's ability to bring
|
||||
cross-claims or counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
----------------
|
||||
|
||||
This License represents the complete agreement concerning the subject
|
||||
matter hereof. If any provision of this License is held to be
|
||||
unenforceable, such provision shall be reformed only to the extent
|
||||
necessary to make it enforceable. Any law or regulation which provides
|
||||
that the language of a contract shall be construed against the drafter
|
||||
shall not be used to construe this License against a Contributor.
|
||||
|
||||
10. Versions of the License
|
||||
---------------------------
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version
|
||||
of the License under which You originally received the Covered Software,
|
||||
or under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a
|
||||
modified version of this License if you rename the license and remove
|
||||
any references to the name of the license steward (except to note that
|
||||
such modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
||||
Licenses
|
||||
|
||||
If You choose to distribute Source Code Form that is Incompatible With
|
||||
Secondary Licenses under the terms of this version of the License, the
|
||||
notice described in Exhibit B of this License must be attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
-------------------------------------------
|
||||
|
||||
This Source Code Form is subject to the terms of the Mozilla Public
|
||||
License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular
|
||||
file, then You may include the notice in a location (such as a LICENSE
|
||||
file in a relevant directory) where a recipient would be likely to look
|
||||
for such a notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
||||
---------------------------------------------------------
|
||||
|
||||
This Source Code Form is "Incompatible With Secondary Licenses", as
|
||||
defined by the Mozilla Public License, v. 2.0.
|
||||
|
70
CHANGES.md
70
CHANGES.md
@ -1,5 +1,75 @@
|
||||
# What's new?
|
||||
|
||||
## v3.5 (preview)
|
||||
### Major Changes
|
||||
- **Flux** - full support for the Flux model, including quantized bnb and nf4 models.
|
||||
- **LyCORIS** - including `LoCon`, `Hada`, `IA3` and `Lokr`.
|
||||
- **11 new samplers** - `DDIM CFG++`, `DPM Fast`, `DPM++ 2m SDE Heun`, `DPM++ 3M SDE`, `Restart`, `Heun PP2`, `IPNDM`, `IPNDM_V`, `LCM`, `[Forge] Flux Realistic`, `[Forge] Flux Realistic (Slow)`.
|
||||
- **15 new schedulers** - `Uniform`, `Karras`, `Exponential`, `Polyexponential`, `SGM Uniform`, `KL Optimal`, `Align Your Steps`, `Normal`, `DDIM`, `Beta`, `Turbo`, `Align Your Steps GITS`, `Align Your Steps 11`, `Align Your Steps 32`.
|
||||
- **42 new Controlnet filters, and support for lots of new ControlNet models** (including QR ControlNets).
|
||||
- **5 upscalers** - `SwinIR`, `ScuNET`, `Nearest`, `Lanczos`, `ESRGAN`.
|
||||
- **Faster than v3.0**
|
||||
- **Major rewrite of the code** - We've switched to `Forge WebUI` under the hood, which brings a lot of new features, faster image generation, and support for all the extensions in the Forge/Automatic1111 community. This allows Easy Diffusion to stay up-to-date with the latest features, and focus on making the UI and installation experience even easier.
|
||||
|
||||
v3.5 is currently an optional upgrade, and you can switch between the v3.0 (diffusers) engine and the v3.5 (webui) engine using the `Settings` tab in the UI.
|
||||
|
||||
### Detailed changelog
|
||||
* 3.5.0 - 11 Oct 2024 - **Preview release** of the new v3.5 engine, powered by Forge WebUI (a fork of Automatic1111). This enables Flux, SD3, LyCORIS and lots of new features, while using the same familiar Easy Diffusion interface.
|
||||
|
||||
## v3.0
|
||||
### Major Changes
|
||||
- **ControlNet** - Full support for ControlNet, with native integration of the common ControlNet models. Just select a control image, then choose the ControlNet filter/model and run. No additional configuration or download necessary. Supports custom ControlNets as well.
|
||||
- **SDXL** - Full support for SDXL. No configuration necessary, just put the SDXL model in the `models/stable-diffusion` folder.
|
||||
- **Multiple LoRAs** - Use multiple LoRAs, including SDXL and SD2-compatible LoRAs. Put them in the `models/lora` folder.
|
||||
- **Embeddings** - Use textual inversion embeddings easily, by putting them in the `models/embeddings` folder and using their names in the prompt (or by clicking the `+ Embeddings` button to select embeddings visually). Thanks @JeLuf.
|
||||
- **Seamless Tiling** - Generate repeating textures that can be useful for games and other art projects. Works best in 512x512 resolution. Thanks @JeLuf.
|
||||
- **Inpainting Models** - Full support for inpainting models, including custom inpainting models. No configuration (or yaml files) necessary.
|
||||
- **Faster than v2.5** - Nearly 40% faster than Easy Diffusion v2.5, and can be even faster if you enable xFormers.
|
||||
- **Even less VRAM usage** - Less than 2 GB for 512x512 images on 'low' VRAM usage setting (SD 1.5). Can generate large images with SDXL.
|
||||
- **WebP images** - Supports saving images in the lossless webp format.
|
||||
- **Undo/Redo in the UI** - Remove tasks or images from the queue easily, and undo the action if you removed anything accidentally. Thanks @JeLuf.
|
||||
- **Three new samplers, and latent upscaler** - Added `DEIS`, `DDPM` and `DPM++ 2m SDE` as additional samplers. Thanks @ogmaresca and @rbertus2000.
|
||||
- **Significantly faster 'Upscale' and 'Fix Faces' buttons on the images**
|
||||
- **Major rewrite of the code** - We've switched to using diffusers under-the-hood, which allows us to release new features faster, and focus on making the UI and installer even easier to use.
|
||||
|
||||
### Detailed changelog
|
||||
* 3.0.10 - 11 Oct 2024 - **Major Update** - An option to upgrade to v3.5, which enables Flux, Stable Diffusion 3, LyCORIS models and lots more.
|
||||
* 3.0.9 - 28 May 2024 - Slider for controlling the strength of controlnets.
|
||||
* 3.0.8 - 27 May 2024 - SDXL ControlNets for Img2Img and Inpainting.
|
||||
* 3.0.7 - 11 Dec 2023 - Setting to enable/disable VAE tiling (in the Image Settings panel). Sometimes VAE tiling reduces the quality of the image, so this setting will help control that.
|
||||
* 3.0.6 - 18 Sep 2023 - Add thumbnails to embeddings from the UI, using the new `Upload Thumbnail` button in the Embeddings popup. Thanks @JeLuf.
|
||||
* 3.0.6 - 15 Sep 2023 - Fix broken embeddings dialog when LoRA information couldn't be fetched.
|
||||
* 3.0.6 - 14 Sep 2023 - UI for adding notes to LoRA files (to help you remember which prompts to use). Also added a button to automatically fetch prompts from Civitai for a LoRA file, using the `Import from Civitai` button. Thanks @JeLuf.
|
||||
* 3.0.5 - 2 Sep 2023 - Support SDXL ControlNets.
|
||||
* 3.0.4 - 1 Sep 2023 - Fix incorrect metadata generated for embeddings, when the exact word doesn't match the case, or is part of a larger word.
|
||||
* 3.0.4 - 1 Sep 2023 - Simplify the installation for AMD users on Linux. Thanks @JeLuf.
|
||||
* 3.0.4 - 1 Sep 2023 - Allow using a different folder for models. This is useful if you want to share a models folder across different software, or on a different drive. You can change this path in the Settings tab.
|
||||
* 3.0.3 - 31 Aug 2023 - Auto-save images to disk (if enabled by the user) when upscaling/fixing using the buttons on the image.
|
||||
* 3.0.3 - 30 Aug 2023 - Allow loading NovelAI-based custom models.
|
||||
* 3.0.3 - 30 Aug 2023 - Fix broken VAE tiling. This allows you to create larger images with lesser VRAM usage.
|
||||
* 3.0.3 - 30 Aug 2023 - Allow blocking NSFW images using a server-side config. This prevents the browser from generating NSFW images or changing the config. Open `config.yaml` in a text editor (e.g. Notepad), and add `block_nsfw: true` at the end, and save the file.
|
||||
* 3.0.2 - 29 Aug 2023 - Fixed incorrect matching of embeddings from prompts.
|
||||
* 3.0.2 - 24 Aug 2023 - Fix broken seamless tiling.
|
||||
* 3.0.2 - 23 Aug 2023 - Fix styling on mobile devices.
|
||||
* 3.0.2 - 22 Aug 2023 - Full support for inpainting models, including custom models. Support SD 1.x and SD 2.x inpainting models. Does not require you to specify a yaml config file.
|
||||
* 3.0.2 - 22 Aug 2023 - Reduce VRAM consumption of controlnet in 'low' VRAM mode, and allow accelerating controlnets using xformers.
|
||||
* 3.0.2 - 22 Aug 2023 - Improve auto-detection of SD 2.0 and 2.1 models, removing the need for custom yaml files for SD 2.x models. Improve the model load time by speeding-up the black image test.
|
||||
* 3.0.1 - 18 Aug 2023 - Rotate an image if EXIF rotation is present. For e.g. this is common in images taken with a smartphone.
|
||||
* 3.0.1 - 18 Aug 2023 - Resize control images to the task dimensions, to avoid memory errors with high-res control images.
|
||||
* 3.0.1 - 18 Aug 2023 - Show controlnet filter preview in the task entry.
|
||||
* 3.0.1 - 18 Aug 2023 - Fix drag-and-drop and 'Use these Settings' for LoRA and ControlNet.
|
||||
* 3.0.1 - 18 Aug 2023 - Auto-save LoRA models and strengths.
|
||||
* 3.0.1 - 17 Aug 2023 - Automatically use the correct yaml config file for custom SDXL models, even if a yaml file isn't present in the folder.
|
||||
* 3.0.1 - 17 Aug 2023 - Fix broken embeddings with SDXL.
|
||||
* 3.0.1 - 16 Aug 2023 - Fix broken LoRA with SDXL.
|
||||
* 3.0.1 - 15 Aug 2023 - Fix broken seamless tiling.
|
||||
* 3.0.1 - 15 Aug 2023 - Fix textual inversion embeddings not working in `low` VRAM usage mode.
|
||||
* 3.0.1 - 15 Aug 2023 - Fix for custom VAEs not working in `low` VRAM usage mode.
|
||||
* 3.0.1 - 14 Aug 2023 - Slider to change the image dimensions proportionally (in Image Settings). Thanks @JeLuf.
|
||||
* 3.0.1 - 14 Aug 2023 - Show an error to the user if an embedding isn't compatible with the model, instead of failing silently without informing the user. Thanks @JeLuf.
|
||||
* 3.0.1 - 14 Aug 2023 - Disable watermarking for SDXL img2img. Thanks @AvidGameFan.
|
||||
* 3.0.0 - 3 Aug 2023 - Enabled diffusers for everyone by default. The old v2 engine can be used by disabling the "Use v3 engine" option in the Settings tab.
|
||||
|
||||
## v2.5
|
||||
### Major Changes
|
||||
- **Nearly twice as fast** - significantly faster speed of image generation. Code contributions are welcome to make our project even faster: https://github.com/easydiffusion/sdkit/#is-it-fast
|
||||
|
@ -47,3 +47,5 @@ Build the Windows installer using Windows, and the Linux installer using Linux.
|
||||
|
||||
1. Run `build.bat` or `./build.sh` depending on whether you're in Windows or Linux.
|
||||
2. Make a new GitHub release and upload the Windows and Linux installer builds created inside the `dist` folder.
|
||||
|
||||
For NSIS (on Windows), you need to have these plugins in the `nsis/Plugins` folder: `amd64-unicode`, `x86-ansi`, `x86-unicode`
|
||||
|
@ -1,18 +1,18 @@
|
||||
Congrats on downloading Stable Diffusion UI, version 2!
|
||||
Congrats on downloading Easy Diffusion, version 3!
|
||||
|
||||
If you haven't downloaded Stable Diffusion UI yet, please download from https://github.com/easydiffusion/easydiffusion#installation
|
||||
If you haven't downloaded Easy Diffusion yet, please download from https://github.com/easydiffusion/easydiffusion#installation
|
||||
|
||||
After downloading, to install please follow these instructions:
|
||||
|
||||
For Windows:
|
||||
- Please double-click the "Easy-Diffusion-Windows.exe" file and follow the instructions.
|
||||
|
||||
For Linux:
|
||||
- Please open a terminal, unzip the Easy-Diffusion-Linux.zip file and go to the "easy-diffusion" directory. Then run ./start.sh
|
||||
For Linux and Mac:
|
||||
- Please open a terminal, and go to the "easy-diffusion" directory. Then run ./start.sh
|
||||
|
||||
That file will automatically install everything. After that it will start the Stable Diffusion interface in a web browser.
|
||||
That file will automatically install everything. After that it will start the Easy Diffusion interface in a web browser.
|
||||
|
||||
To start the UI in the future, please run the same command mentioned above.
|
||||
To start Easy Diffusion in the future, please run the same command mentioned above.
|
||||
|
||||
|
||||
If you have any problems, please:
|
||||
|
BIN
NSIS/astro.bmp
BIN
NSIS/astro.bmp
Binary file not shown.
Before Width: | Height: | Size: 288 KiB |
@ -1 +0,0 @@
|
||||
!define EXISTING_INSTALLATION_DIR "D:\path\to\installed\easy-diffusion"
|
BIN
NSIS/sd.ico
BIN
NSIS/sd.ico
Binary file not shown.
Before Width: | Height: | Size: 200 KiB |
@ -7,9 +7,9 @@ RequestExecutionLevel user
|
||||
!AddPluginDir /amd64-unicode "."
|
||||
; HM NIS Edit Wizard helper defines
|
||||
!define PRODUCT_NAME "Easy Diffusion"
|
||||
!define PRODUCT_VERSION "2.5"
|
||||
!define PRODUCT_VERSION "3.0"
|
||||
!define PRODUCT_PUBLISHER "cmdr2 and contributors"
|
||||
!define PRODUCT_WEB_SITE "https://stable-diffusion-ui.github.io"
|
||||
!define PRODUCT_WEB_SITE "https://easydiffusion.github.io"
|
||||
!define PRODUCT_DIR_REGKEY "Software\Microsoft\Easy Diffusion\App Paths\installer.exe"
|
||||
|
||||
; MUI 1.67 compatible ------
|
||||
@ -165,9 +165,9 @@ FunctionEnd
|
||||
; MUI Settings
|
||||
;---------------------------------------------------------------------------------------------------------
|
||||
!define MUI_ABORTWARNING
|
||||
!define MUI_ICON "cyborg_flower_girl.ico"
|
||||
!define MUI_ICON "${EXISTING_INSTALLATION_DIR}\installer_files\cyborg_flower_girl.ico"
|
||||
|
||||
!define MUI_WELCOMEFINISHPAGE_BITMAP "cyborg_flower_girl.bmp"
|
||||
!define MUI_WELCOMEFINISHPAGE_BITMAP "${EXISTING_INSTALLATION_DIR}\installer_files\cyborg_flower_girl.bmp"
|
||||
|
||||
; Welcome page
|
||||
!define MUI_WELCOMEPAGE_TEXT "This installer will guide you through the installation of Easy Diffusion.$\n$\n\
|
||||
@ -176,8 +176,8 @@ Click Next to continue."
|
||||
Page custom MediaPackDialog
|
||||
|
||||
; License page
|
||||
!insertmacro MUI_PAGE_LICENSE "..\LICENSE"
|
||||
!insertmacro MUI_PAGE_LICENSE "..\CreativeML Open RAIL-M License"
|
||||
!insertmacro MUI_PAGE_LICENSE "${EXISTING_INSTALLATION_DIR}\LICENSE"
|
||||
!insertmacro MUI_PAGE_LICENSE "${EXISTING_INSTALLATION_DIR}\CreativeML Open RAIL-M License"
|
||||
; Directory page
|
||||
!define MUI_PAGE_CUSTOMFUNCTION_LEAVE "DirectoryLeave"
|
||||
!insertmacro MUI_PAGE_DIRECTORY
|
||||
@ -210,29 +210,33 @@ ShowInstDetails show
|
||||
; List of files to be installed
|
||||
Section "MainSection" SEC01
|
||||
SetOutPath "$INSTDIR"
|
||||
File "..\CreativeML Open RAIL-M License"
|
||||
File "..\How to install and run.txt"
|
||||
File "..\LICENSE"
|
||||
File "..\scripts\Start Stable Diffusion UI.cmd"
|
||||
File "${EXISTING_INSTALLATION_DIR}\CreativeML Open RAIL-M License"
|
||||
File "${EXISTING_INSTALLATION_DIR}\How to install and run.txt"
|
||||
File "${EXISTING_INSTALLATION_DIR}\LICENSE"
|
||||
File "${EXISTING_INSTALLATION_DIR}\Start Stable Diffusion UI.cmd"
|
||||
File /r "${EXISTING_INSTALLATION_DIR}\installer_files"
|
||||
File /r "${EXISTING_INSTALLATION_DIR}\profile"
|
||||
File /r "${EXISTING_INSTALLATION_DIR}\sd-ui-files"
|
||||
SetOutPath "$INSTDIR\installer_files"
|
||||
File "cyborg_flower_girl.ico"
|
||||
|
||||
SetOutPath "$INSTDIR\scripts"
|
||||
File "${EXISTING_INSTALLATION_DIR}\scripts\install_status.txt"
|
||||
File "..\scripts\on_env_start.bat"
|
||||
File "${EXISTING_INSTALLATION_DIR}\scripts\on_env_start.bat"
|
||||
File "C:\windows\system32\curl.exe"
|
||||
CreateDirectory "$INSTDIR\models"
|
||||
File "${EXISTING_INSTALLATION_DIR}\scripts\config.yaml.sample"
|
||||
|
||||
CreateDirectory "$INSTDIR\models\stable-diffusion"
|
||||
CreateDirectory "$INSTDIR\models\gfpgan"
|
||||
CreateDirectory "$INSTDIR\models\realesrgan"
|
||||
CreateDirectory "$INSTDIR\models\vae"
|
||||
|
||||
CreateDirectory "$INSTDIR\profile\.cache\huggingface\hub"
|
||||
SetOutPath "$INSTDIR\profile\.cache\huggingface\hub"
|
||||
File /r /x pytorch_model.bin "${EXISTING_INSTALLATION_DIR}\profile\.cache\huggingface\hub\models--openai--clip-vit-large-patch14"
|
||||
|
||||
CreateDirectory "$SMPROGRAMS\Easy Diffusion"
|
||||
CreateShortCut "$SMPROGRAMS\Easy Diffusion\Easy Diffusion.lnk" "$INSTDIR\Start Stable Diffusion UI.cmd" "" "$INSTDIR\installer_files\cyborg_flower_girl.ico"
|
||||
|
||||
DetailPrint 'Downloading the Stable Diffusion 1.4 model...'
|
||||
NScurl::http get "https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt" "$INSTDIR\models\stable-diffusion\sd-v1-4.ckpt" /CANCEL /INSIST /END
|
||||
DetailPrint 'Downloading the Stable Diffusion 1.5 model...'
|
||||
NScurl::http get "https://github.com/easydiffusion/sdkit-test-data/releases/download/assets/sd-v1-5.safetensors" "$INSTDIR\models\stable-diffusion\sd-v1-5.safetensors" /CANCEL /INSIST /END
|
||||
|
||||
DetailPrint 'Downloading the GFPGAN model...'
|
||||
NScurl::http get "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/GFPGANv1.4.pth" "$INSTDIR\models\gfpgan\GFPGANv1.4.pth" /CANCEL /INSIST /END
|
||||
|
59
README.md
59
README.md
@ -1,19 +1,23 @@
|
||||
# Easy Diffusion 2.5
|
||||
# Easy Diffusion 3.0
|
||||
### The easiest way to install and use [Stable Diffusion](https://github.com/CompVis/stable-diffusion) on your computer.
|
||||
|
||||
Does not require technical knowledge, does not require pre-installed software. 1-click install, powerful features, friendly community.
|
||||
|
||||
[Installation guide](#installation) | [Troubleshooting guide](https://github.com/easydiffusion/easydiffusion/wiki/Troubleshooting) | <sub>[](https://discord.com/invite/u9yhsFmEkB)</sub> <sup>(for support queries, and development discussions)</sup>
|
||||
️🔥🎉 **New!** Support for SDXL, ControlNet, multiple LoRA files, embeddings (and a lot more) have been added!
|
||||
|
||||
[Installation guide](#installation) | [Troubleshooting guide](https://github.com/easydiffusion/easydiffusion/wiki/Troubleshooting) | [User guide](https://github.com/easydiffusion/easydiffusion/wiki) | <sub>[](https://discord.com/invite/u9yhsFmEkB)</sub> <sup>(for support queries, and development discussions)</sup>
|
||||
|
||||
---
|
||||

|
||||
|
||||

|
||||
|
||||
# Installation
|
||||
Click the download button for your operating system:
|
||||
|
||||
<p float="left">
|
||||
<a href="https://github.com/cmdr2/stable-diffusion-ui/releases/download/v2.5.41a/Easy-Diffusion-Windows.exe"><img src="https://github.com/cmdr2/stable-diffusion-ui/raw/main/media/download-win.png" width="200" /></a>
|
||||
<a href="https://github.com/cmdr2/stable-diffusion-ui/releases/download/v2.5.41a/Easy-Diffusion-Linux.zip"><img src="https://github.com/cmdr2/stable-diffusion-ui/raw/main/media/download-linux.png" width="200" /></a>
|
||||
<a href="https://github.com/cmdr2/stable-diffusion-ui/releases/download/v2.5.41a/Easy-Diffusion-Mac.zip"><img src="https://github.com/cmdr2/stable-diffusion-ui/raw/main/media/download-mac.png" width="200" /></a>
|
||||
<a href="https://github.com/cmdr2/stable-diffusion-ui/releases/latest/download/Easy-Diffusion-Linux.zip"><img src="https://github.com/cmdr2/stable-diffusion-ui/raw/main/media/download-linux.png" width="200" /></a>
|
||||
<a href="https://github.com/cmdr2/stable-diffusion-ui/releases/latest/download/Easy-Diffusion-Mac.zip"><img src="https://github.com/cmdr2/stable-diffusion-ui/raw/main/media/download-mac.png" width="200" /></a>
|
||||
<a href="https://github.com/cmdr2/stable-diffusion-ui/releases/latest/download/Easy-Diffusion-Windows.exe"><img src="https://github.com/cmdr2/stable-diffusion-ui/raw/main/media/download-win.png" width="200" /></a>
|
||||
</p>
|
||||
|
||||
**Hardware requirements:**
|
||||
@ -62,17 +66,19 @@ Just delete the `EasyDiffusion` folder to uninstall all the downloaded packages.
|
||||
- **UI Themes**: Customize the program to your liking.
|
||||
- **Searchable models dropdown**: organize your models into sub-folders, and search through them in the UI.
|
||||
|
||||
### Image generation
|
||||
- **Supports**: "*Text to Image*" and "*Image to Image*".
|
||||
- **21 Samplers**: `ddim`, `plms`, `heun`, `euler`, `euler_a`, `dpm2`, `dpm2_a`, `lms`, `dpm_solver_stability`, `dpmpp_2s_a`, `dpmpp_2m`, `dpmpp_sde`, `dpm_fast`, `dpm_adaptive`, `ddpm`, `deis`, `unipc_snr`, `unipc_tu`, `unipc_tq`, `unipc_snr_2`, `unipc_tu_2`.
|
||||
- **In-Painting**: Specify areas of your image to paint into.
|
||||
### Powerful image generation
|
||||
- **Supports**: "*Text to Image*", "*Image to Image*" and "*InPainting*"
|
||||
- **ControlNet**: For advanced control over the image, e.g. by setting the pose or drawing the outline for the AI to fill in.
|
||||
- **16 Samplers**: `PLMS`, `DDIM`, `DEIS`, `Heun`, `Euler`, `Euler Ancestral`, `DPM2`, `DPM2 Ancestral`, `LMS`, `DPM Solver`, `DPM++ 2s Ancestral`, `DPM++ 2m`, `DPM++ 2m SDE`, `DPM++ SDE`, `DDPM`, `UniPC`.
|
||||
- **Stable Diffusion XL and 2.1**: Generate higher-quality images using the latest Stable Diffusion XL models.
|
||||
- **Textual Inversion Embeddings**: For guiding the AI strongly towards a particular concept.
|
||||
- **Simple Drawing Tool**: Draw basic images to guide the AI, without needing an external drawing program.
|
||||
- **Face Correction (GFPGAN)**
|
||||
- **Upscaling (RealESRGAN)**
|
||||
- **Loopback**: Use the output image as the input image for the next img2img task.
|
||||
- **Loopback**: Use the output image as the input image for the next image task.
|
||||
- **Negative Prompt**: Specify aspects of the image to *remove*.
|
||||
- **Attention/Emphasis**: () in the prompt increases the model's attention to enclosed words, and [] decreases it.
|
||||
- **Weighted Prompts**: Use weights for specific words in your prompt to change their importance, e.g. `red:2.4 dragon:1.2`.
|
||||
- **Attention/Emphasis**: `+` in the prompt increases the model's attention to enclosed words, and `-` decreases it. E.g. `apple++ falling from a tree`.
|
||||
- **Weighted Prompts**: Use weights for specific words in your prompt to change their importance, e.g. `(red)2.4 (dragon)1.2`.
|
||||
- **Prompt Matrix**: Quickly create multiple variations of your prompt, e.g. `a photograph of an astronaut riding a horse | illustration | cinematic lighting`.
|
||||
- **Prompt Set**: Quickly create multiple variations of your prompt, e.g. `a photograph of an astronaut on the {moon,earth}`
|
||||
- **1-click Upscale/Face Correction**: Upscale or correct an image after it has been generated.
|
||||
@ -82,10 +88,11 @@ Just delete the `EasyDiffusion` folder to uninstall all the downloaded packages.
|
||||
|
||||
### Advanced features
|
||||
- **Custom Models**: Use your own `.ckpt` or `.safetensors` file, by placing it inside the `models/stable-diffusion` folder!
|
||||
- **Stable Diffusion 2.1 support**
|
||||
- **Stable Diffusion XL and 2.1 support**
|
||||
- **Merge Models**
|
||||
- **Use custom VAE models**
|
||||
- **Use pre-trained Hypernetworks**
|
||||
- **Textual Inversion Embeddings**
|
||||
- **ControlNet**
|
||||
- **Use custom GFPGAN models**
|
||||
- **UI Plugins**: Choose from a growing list of [community-generated UI plugins](https://github.com/easydiffusion/easydiffusion/wiki/UI-Plugins), or write your own plugin to add features to the project!
|
||||
|
||||
@ -97,24 +104,14 @@ Just delete the `EasyDiffusion` folder to uninstall all the downloaded packages.
|
||||
- **Auto scan for malicious models**: Uses picklescan to prevent malicious models.
|
||||
- **Safetensors support**: Support loading models in the safetensor format, for improved safety.
|
||||
- **Auto-updater**: Gets you the latest improvements and bug-fixes to a rapidly evolving project.
|
||||
- **Developer Console**: A developer-mode for those who want to modify their Stable Diffusion code, and edit the conda environment.
|
||||
- **Developer Console**: A developer-mode for those who want to modify their Stable Diffusion code, modify packages, and edit the conda environment.
|
||||
|
||||
**(and a lot more)**
|
||||
|
||||
----
|
||||
|
||||
## Easy for new users:
|
||||

|
||||
|
||||
|
||||
## Powerful features for advanced users:
|
||||

|
||||
|
||||
|
||||
## Live Preview
|
||||
Useful for judging (and stopping) an image quickly, without waiting for it to finish rendering.
|
||||
|
||||

|
||||
## Easy for new users, powerful features for advanced users:
|
||||

|
||||
|
||||
## Task Queue
|
||||

|
||||
@ -128,12 +125,6 @@ Please refer to our [guide](https://github.com/easydiffusion/easydiffusion/wiki/
|
||||
# Bugs reports and code contributions welcome
|
||||
If there are any problems or suggestions, please feel free to ask on the [discord server](https://discord.com/invite/u9yhsFmEkB) or [file an issue](https://github.com/easydiffusion/easydiffusion/issues).
|
||||
|
||||
We could really use help on these aspects (click to view tasks that need your help):
|
||||
* [User Interface](https://github.com/users/cmdr2/projects/1/views/1)
|
||||
* [Engine](https://github.com/users/cmdr2/projects/3/views/1)
|
||||
* [Installer](https://github.com/users/cmdr2/projects/4/views/1)
|
||||
* [Documentation](https://github.com/users/cmdr2/projects/5/views/1)
|
||||
|
||||
If you have any code contributions in mind, please feel free to say Hi to us on the [discord server](https://discord.com/invite/u9yhsFmEkB). We use the Discord server for development-related discussions, and for helping users.
|
||||
|
||||
# Credits
|
||||
|
94
build.bat
94
build.bat
@ -1,48 +1,78 @@
|
||||
@echo off
|
||||
setlocal enabledelayedexpansion
|
||||
|
||||
@echo "Hi there, what you are running is meant for the developers of this project, not for users." & echo.
|
||||
@echo "If you only want to use the Stable Diffusion UI, you've downloaded the wrong file."
|
||||
@echo "If you only want to use Easy Diffusion, you've downloaded the wrong file."
|
||||
@echo "Please download and follow the instructions at https://github.com/easydiffusion/easydiffusion#installation" & echo.
|
||||
@echo "If you are actually a developer of this project, please type Y and press enter" & echo.
|
||||
|
||||
set /p answer=Are you a developer of this project (Y/N)?
|
||||
if /i "%answer:~,1%" NEQ "Y" exit /b
|
||||
|
||||
mkdir dist\win\stable-diffusion-ui\scripts
|
||||
@REM mkdir dist\linux-mac\stable-diffusion-ui\scripts
|
||||
@rem verify dependencies
|
||||
call makensis /VERSION >.tmp1 2>.tmp2
|
||||
if "!ERRORLEVEL!" NEQ "0" (
|
||||
echo makensis.exe not found! Download it from https://sourceforge.net/projects/nsisbi/files/ and set it on the PATH variable.
|
||||
pause
|
||||
exit
|
||||
)
|
||||
|
||||
@rem copy the installer files for Windows
|
||||
set /p OUT_DIR=Output folder path (will create the installer files inside this, e.g. F:\EasyDiffusion):
|
||||
|
||||
copy scripts\on_env_start.bat dist\win\stable-diffusion-ui\scripts\
|
||||
copy scripts\bootstrap.bat dist\win\stable-diffusion-ui\scripts\
|
||||
copy scripts\config.yaml.sample dist\win\stable-diffusion-ui\scripts\config.yaml
|
||||
copy "scripts\Start Stable Diffusion UI.cmd" dist\win\stable-diffusion-ui\
|
||||
copy LICENSE dist\win\stable-diffusion-ui\
|
||||
copy "CreativeML Open RAIL-M License" dist\win\stable-diffusion-ui\
|
||||
copy "How to install and run.txt" dist\win\stable-diffusion-ui\
|
||||
echo. > dist\win\stable-diffusion-ui\scripts\install_status.txt
|
||||
mkdir "%OUT_DIR%\scripts"
|
||||
mkdir "%OUT_DIR%\installer_files"
|
||||
|
||||
@rem copy the installer files for Linux and Mac
|
||||
set BASE_DIR=%cd%
|
||||
|
||||
@REM copy scripts\on_env_start.sh dist\linux-mac\stable-diffusion-ui\scripts\
|
||||
@REM copy scripts\bootstrap.sh dist\linux-mac\stable-diffusion-ui\scripts\
|
||||
@REM copy scripts\start.sh dist\linux-mac\stable-diffusion-ui\
|
||||
@REM copy LICENSE dist\linux-mac\stable-diffusion-ui\
|
||||
@REM copy "CreativeML Open RAIL-M License" dist\linux-mac\stable-diffusion-ui\
|
||||
@REM copy "How to install and run.txt" dist\linux-mac\stable-diffusion-ui\
|
||||
@REM echo. > dist\linux-mac\stable-diffusion-ui\scripts\install_status.txt
|
||||
@rem STEP 1: copy the installer files for Windows
|
||||
|
||||
@rem make the zip
|
||||
|
||||
cd dist\win
|
||||
call powershell Compress-Archive -Path stable-diffusion-ui -DestinationPath ..\stable-diffusion-ui-windows.zip
|
||||
cd ..\..
|
||||
|
||||
@REM cd dist\linux-mac
|
||||
@REM call powershell Compress-Archive -Path stable-diffusion-ui -DestinationPath ..\stable-diffusion-ui-linux.zip
|
||||
@REM call powershell Compress-Archive -Path stable-diffusion-ui -DestinationPath ..\stable-diffusion-ui-mac.zip
|
||||
@REM cd ..\..
|
||||
|
||||
echo "Build ready. Upload the zip files inside the 'dist' folder."
|
||||
copy "%BASE_DIR%\scripts\on_env_start.bat" "%OUT_DIR%\scripts\"
|
||||
copy "%BASE_DIR%\scripts\config.yaml.sample" "%OUT_DIR%\scripts\config.yaml.sample"
|
||||
copy "%BASE_DIR%\scripts\Start Stable Diffusion UI.cmd" "%OUT_DIR%\"
|
||||
copy "%BASE_DIR%\LICENSE" "%OUT_DIR%\"
|
||||
copy "%BASE_DIR%\CreativeML Open RAIL-M License" "%OUT_DIR%\"
|
||||
copy "%BASE_DIR%\How to install and run.txt" "%OUT_DIR%\"
|
||||
copy "%BASE_DIR%\NSIS\cyborg_flower_girl.ico" "%OUT_DIR%\installer_files\"
|
||||
copy "%BASE_DIR%\NSIS\cyborg_flower_girl.bmp" "%OUT_DIR%\installer_files\"
|
||||
echo. > "%OUT_DIR%\scripts\install_status.txt"
|
||||
|
||||
echo ----
|
||||
echo Basic files ready. Verify the files in %OUT_DIR%, then press Enter to initialize the environment, or close to quit.
|
||||
echo ----
|
||||
pause
|
||||
|
||||
@rem STEP 2: Initialize the environment with git, python and conda
|
||||
|
||||
cd /d "%OUT_DIR%\"
|
||||
call "%BASE_DIR%\scripts\bootstrap.bat"
|
||||
|
||||
echo ----
|
||||
echo Environment ready. Verify the environment, then press Enter to download the necessary packages, or close to quit.
|
||||
echo ----
|
||||
pause
|
||||
|
||||
@rem STEP 3: Download the packages and create a working installation
|
||||
|
||||
cd /d "%OUT_DIR%\"
|
||||
start "Install Easy Diffusion" /D "%OUT_DIR%" "Start Stable Diffusion UI.cmd"
|
||||
|
||||
echo ----
|
||||
echo Installation in progress (in a new window). Once complete, verify the installation, then press Enter to create an installer from these files, or close to quit.
|
||||
echo ----
|
||||
pause
|
||||
|
||||
@rem STEP 4: Build the installer from a working installation
|
||||
|
||||
cd /d "%OUT_DIR%\"
|
||||
|
||||
echo ^^!define EXISTING_INSTALLATION_DIR "%OUT_DIR%" > nsisconf.nsh
|
||||
call makensis /NOCD /V4 "%BASE_DIR%\NSIS\sdui.nsi"
|
||||
|
||||
echo ----
|
||||
if "!ERRORLEVEL!" EQU "0" (
|
||||
echo Installer built successfully at %OUT_DIR%
|
||||
) else (
|
||||
echo Installer failed to build at %OUT_DIR%
|
||||
)
|
||||
echo ----
|
||||
pause
|
46
build.sh
46
build.sh
@ -1,7 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
printf "Hi there, what you are running is meant for the developers of this project, not for users.\n\n"
|
||||
printf "If you only want to use the Stable Diffusion UI, you've downloaded the wrong file.\n"
|
||||
printf "If you only want to use Easy Diffusion, you've downloaded the wrong file.\n"
|
||||
printf "Please download and follow the instructions at https://github.com/easydiffusion/easydiffusion#installation \n\n"
|
||||
printf "If you are actually a developer of this project, please type Y and press enter\n\n"
|
||||
|
||||
@ -11,40 +11,30 @@ case $yn in
|
||||
* ) exit;;
|
||||
esac
|
||||
|
||||
# mkdir -p dist/win/stable-diffusion-ui/scripts
|
||||
mkdir -p dist/linux-mac/stable-diffusion-ui/scripts
|
||||
|
||||
# copy the installer files for Windows
|
||||
|
||||
# cp scripts/on_env_start.bat dist/win/stable-diffusion-ui/scripts/
|
||||
# cp scripts/bootstrap.bat dist/win/stable-diffusion-ui/scripts/
|
||||
# cp "scripts/Start Stable Diffusion UI.cmd" dist/win/stable-diffusion-ui/
|
||||
# cp LICENSE dist/win/stable-diffusion-ui/
|
||||
# cp "CreativeML Open RAIL-M License" dist/win/stable-diffusion-ui/
|
||||
# cp "How to install and run.txt" dist/win/stable-diffusion-ui/
|
||||
# echo "" > dist/win/stable-diffusion-ui/scripts/install_status.txt
|
||||
mkdir -p dist/linux-mac/easy-diffusion/scripts
|
||||
|
||||
# copy the installer files for Linux and Mac
|
||||
|
||||
cp scripts/on_env_start.sh dist/linux-mac/stable-diffusion-ui/scripts/
|
||||
cp scripts/bootstrap.sh dist/linux-mac/stable-diffusion-ui/scripts/
|
||||
cp scripts/functions.sh dist/linux-mac/stable-diffusion-ui/scripts/
|
||||
cp scripts/config.yaml.sample dist/linux-mac/stable-diffusion-ui/scripts/config.yaml
|
||||
cp scripts/start.sh dist/linux-mac/stable-diffusion-ui/
|
||||
cp LICENSE dist/linux-mac/stable-diffusion-ui/
|
||||
cp "CreativeML Open RAIL-M License" dist/linux-mac/stable-diffusion-ui/
|
||||
cp "How to install and run.txt" dist/linux-mac/stable-diffusion-ui/
|
||||
echo "" > dist/linux-mac/stable-diffusion-ui/scripts/install_status.txt
|
||||
cp scripts/on_env_start.sh dist/linux-mac/easy-diffusion/scripts/
|
||||
cp scripts/bootstrap.sh dist/linux-mac/easy-diffusion/scripts/
|
||||
cp scripts/functions.sh dist/linux-mac/easy-diffusion/scripts/
|
||||
cp scripts/config.yaml.sample dist/linux-mac/easy-diffusion/scripts/config.yaml.sample
|
||||
cp scripts/start.sh dist/linux-mac/easy-diffusion/
|
||||
cp LICENSE dist/linux-mac/easy-diffusion/
|
||||
cp "CreativeML Open RAIL-M License" dist/linux-mac/easy-diffusion/
|
||||
cp "How to install and run.txt" dist/linux-mac/easy-diffusion/
|
||||
echo "" > dist/linux-mac/easy-diffusion/scripts/install_status.txt
|
||||
|
||||
# set the permissions
|
||||
chmod u+x dist/linux-mac/easy-diffusion/scripts/on_env_start.sh
|
||||
chmod u+x dist/linux-mac/easy-diffusion/scripts/bootstrap.sh
|
||||
chmod u+x dist/linux-mac/easy-diffusion/start.sh
|
||||
|
||||
# make the zip
|
||||
|
||||
# cd dist/win
|
||||
# zip -r ../stable-diffusion-ui-windows.zip stable-diffusion-ui
|
||||
# cd ../..
|
||||
|
||||
cd dist/linux-mac
|
||||
zip -r ../stable-diffusion-ui-linux.zip stable-diffusion-ui
|
||||
zip -r ../stable-diffusion-ui-mac.zip stable-diffusion-ui
|
||||
zip -r ../Easy-Diffusion-Linux.zip easy-diffusion
|
||||
zip -r ../Easy-Diffusion-Mac.zip easy-diffusion
|
||||
cd ../..
|
||||
|
||||
echo "Build ready. Upload the zip files inside the 'dist' folder."
|
||||
|
@ -3,7 +3,8 @@
|
||||
cd /d %~dp0
|
||||
echo Install dir: %~dp0
|
||||
|
||||
set PATH=C:\Windows\System32;%PATH%
|
||||
set PATH=C:\Windows\System32;C:\Windows\System32\wbem;%PATH%
|
||||
set PYTHONHOME=
|
||||
|
||||
if exist "on_sd_start.bat" (
|
||||
echo ================================================================================
|
||||
@ -14,7 +15,7 @@ if exist "on_sd_start.bat" (
|
||||
echo download. This will not work.
|
||||
echo.
|
||||
echo Recommended: Please close this window and download the installer from
|
||||
echo https://stable-diffusion-ui.github.io/docs/installation/
|
||||
echo https://easydiffusion.github.io/docs/installation/
|
||||
echo.
|
||||
echo ================================================================================
|
||||
echo.
|
||||
@ -38,6 +39,7 @@ call where conda
|
||||
call conda --version
|
||||
echo .
|
||||
echo COMSPEC=%COMSPEC%
|
||||
wmic path win32_VideoController get name,AdapterRAM,DriverDate,DriverVersion
|
||||
|
||||
@rem Download the rest of the installer and UI
|
||||
call scripts\on_env_start.bat
|
||||
|
@ -14,6 +14,8 @@ set LEGACY_INSTALL_ENV_DIR=%cd%\installer
|
||||
set MICROMAMBA_DOWNLOAD_URL=https://github.com/easydiffusion/easydiffusion/releases/download/v1.1/micromamba.exe
|
||||
set umamba_exists=F
|
||||
|
||||
set PYTHONHOME=
|
||||
|
||||
set OLD_APPDATA=%APPDATA%
|
||||
set OLD_USERPROFILE=%USERPROFILE%
|
||||
set APPDATA=%cd%\installer_files\appdata
|
||||
@ -22,15 +24,12 @@ set USERPROFILE=%cd%\profile
|
||||
@rem figure out whether git and conda needs to be installed
|
||||
if exist "%INSTALL_ENV_DIR%" set PATH=%INSTALL_ENV_DIR%;%INSTALL_ENV_DIR%\Library\bin;%INSTALL_ENV_DIR%\Scripts;%INSTALL_ENV_DIR%\Library\usr\bin;%PATH%
|
||||
|
||||
set PACKAGES_TO_INSTALL=
|
||||
set PACKAGES_TO_INSTALL=git python=3.8.5
|
||||
|
||||
if not exist "%LEGACY_INSTALL_ENV_DIR%\etc\profile.d\conda.sh" (
|
||||
if not exist "%INSTALL_ENV_DIR%\etc\profile.d\conda.sh" set PACKAGES_TO_INSTALL=%PACKAGES_TO_INSTALL% conda python=3.8.5
|
||||
if not exist "%INSTALL_ENV_DIR%\etc\profile.d\conda.sh" set PACKAGES_TO_INSTALL=%PACKAGES_TO_INSTALL% conda
|
||||
)
|
||||
|
||||
call git --version >.tmp1 2>.tmp2
|
||||
if "!ERRORLEVEL!" NEQ "0" set PACKAGES_TO_INSTALL=%PACKAGES_TO_INSTALL% git
|
||||
|
||||
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" --version >.tmp1 2>.tmp2
|
||||
if "!ERRORLEVEL!" EQU "0" set umamba_exists=T
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -6,6 +6,7 @@ import shutil
|
||||
# The config file is in the same directory as this script
|
||||
config_directory = os.path.dirname(__file__)
|
||||
config_yaml = os.path.join(config_directory, "..", "config.yaml")
|
||||
config_yaml = os.path.abspath(config_yaml)
|
||||
config_json = os.path.join(config_directory, "config.json")
|
||||
|
||||
parser = argparse.ArgumentParser(description='Get values from config file')
|
||||
|
@ -1,6 +1,6 @@
|
||||
@echo off
|
||||
|
||||
@echo. & echo "Easy Diffusion - v2" & echo.
|
||||
@echo. & echo "Easy Diffusion - v3" & echo.
|
||||
|
||||
set PATH=C:\Windows\System32;%PATH%
|
||||
|
||||
@ -46,6 +46,8 @@ if "%update_branch%"=="" (
|
||||
|
||||
@cd sd-ui-files
|
||||
|
||||
@call git add -A .
|
||||
@call git stash
|
||||
@call git reset --hard
|
||||
@call git -c advice.detachedHead=false checkout "%update_branch%"
|
||||
@call git pull
|
||||
@ -69,6 +71,7 @@ if "%update_branch%"=="" (
|
||||
@copy sd-ui-files\scripts\check_modules.py scripts\ /Y
|
||||
@copy sd-ui-files\scripts\get_config.py scripts\ /Y
|
||||
@copy sd-ui-files\scripts\config.yaml.sample scripts\ /Y
|
||||
@copy sd-ui-files\scripts\webui_console.py scripts\ /Y
|
||||
@copy "sd-ui-files\scripts\Start Stable Diffusion UI.cmd" . /Y
|
||||
@copy "sd-ui-files\scripts\Developer Console.cmd" . /Y
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
source ./scripts/functions.sh
|
||||
|
||||
printf "\n\nEasy Diffusion\n\n"
|
||||
printf "\n\nEasy Diffusion - v3\n\n"
|
||||
|
||||
export PYTHONNOUSERSITE=y
|
||||
|
||||
@ -29,6 +29,8 @@ if [ -f "scripts/install_status.txt" ] && [ `grep -c sd_ui_git_cloned scripts/in
|
||||
|
||||
cd sd-ui-files
|
||||
|
||||
git add -A .
|
||||
git stash
|
||||
git reset --hard
|
||||
git -c advice.detachedHead=false checkout "$update_branch"
|
||||
git pull
|
||||
@ -52,6 +54,7 @@ cp sd-ui-files/scripts/bootstrap.sh scripts/
|
||||
cp sd-ui-files/scripts/check_modules.py scripts/
|
||||
cp sd-ui-files/scripts/get_config.py scripts/
|
||||
cp sd-ui-files/scripts/config.yaml.sample scripts/
|
||||
cp sd-ui-files/scripts/webui_console.py scripts/
|
||||
cp sd-ui-files/scripts/start.sh .
|
||||
cp sd-ui-files/scripts/developer_console.sh .
|
||||
cp sd-ui-files/scripts/functions.sh scripts/
|
||||
|
@ -7,6 +7,7 @@
|
||||
@copy sd-ui-files\scripts\check_modules.py scripts\ /Y
|
||||
@copy sd-ui-files\scripts\get_config.py scripts\ /Y
|
||||
@copy sd-ui-files\scripts\config.yaml.sample scripts\ /Y
|
||||
@copy sd-ui-files\scripts\webui_console.py scripts\ /Y
|
||||
|
||||
if exist "%cd%\profile" (
|
||||
set HF_HOME=%cd%\profile\.cache\huggingface
|
||||
@ -34,6 +35,7 @@ call conda activate
|
||||
|
||||
@REM remove the old version of the dev console script, if it's still present
|
||||
if exist "Open Developer Console.cmd" del "Open Developer Console.cmd"
|
||||
if exist "ui\plugins\ui\merge.plugin.js" del "ui\plugins\ui\merge.plugin.js"
|
||||
|
||||
@rem create the stable-diffusion folder, to work with legacy installations
|
||||
if not exist "stable-diffusion" mkdir stable-diffusion
|
||||
@ -52,73 +54,24 @@ if exist ldm rename ldm ldm-old
|
||||
if not exist "%INSTALL_ENV_DIR%\DLLs\libssl-1_1-x64.dll" copy "%INSTALL_ENV_DIR%\Library\bin\libssl-1_1-x64.dll" "%INSTALL_ENV_DIR%\DLLs\"
|
||||
if not exist "%INSTALL_ENV_DIR%\DLLs\libcrypto-1_1-x64.dll" copy "%INSTALL_ENV_DIR%\Library\bin\libcrypto-1_1-x64.dll" "%INSTALL_ENV_DIR%\DLLs\"
|
||||
|
||||
cd ..
|
||||
|
||||
@rem set any overrides
|
||||
set HF_HUB_DISABLE_SYMLINKS_WARNING=true
|
||||
|
||||
@rem install or upgrade the required modules
|
||||
set PATH=C:\Windows\System32;%PATH%
|
||||
|
||||
@REM prevent from using packages from the user's home directory, to avoid conflicts
|
||||
set PYTHONNOUSERSITE=1
|
||||
set PYTHONPATH=%INSTALL_ENV_DIR%\lib\site-packages
|
||||
|
||||
@rem Download the required packages
|
||||
call python ..\scripts\check_modules.py
|
||||
if "%ERRORLEVEL%" NEQ "0" (
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
|
||||
call WHERE uvicorn > .tmp
|
||||
@>nul findstr /m "uvicorn" .tmp
|
||||
@if "%ERRORLEVEL%" NEQ "0" (
|
||||
@echo. & echo "UI packages not found! Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/easydiffusion/easydiffusion/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/easydiffusion/easydiffusion/issues" & echo "Thanks!" & echo.
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
|
||||
@>nul findstr /m "conda_sd_ui_deps_installed" ..\scripts\install_status.txt
|
||||
@if "%ERRORLEVEL%" NEQ "0" (
|
||||
@echo conda_sd_ui_deps_installed >> ..\scripts\install_status.txt
|
||||
)
|
||||
|
||||
@>nul findstr /m "sd_install_complete" ..\scripts\install_status.txt
|
||||
@if "%ERRORLEVEL%" NEQ "0" (
|
||||
@echo sd_weights_downloaded >> ..\scripts\install_status.txt
|
||||
@echo sd_install_complete >> ..\scripts\install_status.txt
|
||||
)
|
||||
|
||||
@echo. & echo "Easy Diffusion installation complete! Starting the server!" & echo.
|
||||
|
||||
@set SD_DIR=%cd%
|
||||
|
||||
set PYTHONPATH=%INSTALL_ENV_DIR%\lib\site-packages
|
||||
echo PYTHONPATH=%PYTHONPATH%
|
||||
|
||||
@rem Download the required packages
|
||||
call where python
|
||||
call python --version
|
||||
|
||||
@cd ..
|
||||
@set SD_UI_PATH=%cd%\ui
|
||||
call python scripts\check_modules.py --launch-uvicorn
|
||||
pause
|
||||
exit /b
|
||||
|
||||
@FOR /F "tokens=* USEBACKQ" %%F IN (`python scripts\get_config.py --default=9000 net listen_port`) DO (
|
||||
@SET ED_BIND_PORT=%%F
|
||||
)
|
||||
|
||||
@FOR /F "tokens=* USEBACKQ" %%F IN (`python scripts\get_config.py --default=False net listen_to_network`) DO (
|
||||
if "%%F" EQU "True" (
|
||||
@FOR /F "tokens=* USEBACKQ" %%G IN (`python scripts\get_config.py --default=0.0.0.0 net bind_ip`) DO (
|
||||
@SET ED_BIND_IP=%%G
|
||||
)
|
||||
) else (
|
||||
@SET ED_BIND_IP=127.0.0.1
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@cd stable-diffusion
|
||||
|
||||
@rem set any overrides
|
||||
set HF_HUB_DISABLE_SYMLINKS_WARNING=true
|
||||
|
||||
@python -m uvicorn main:server_api --app-dir "%SD_UI_PATH%" --port %ED_BIND_PORT% --host %ED_BIND_IP% --log-level error
|
||||
|
||||
|
||||
@pause
|
||||
|
@ -6,20 +6,29 @@ cp sd-ui-files/scripts/bootstrap.sh scripts/
|
||||
cp sd-ui-files/scripts/check_modules.py scripts/
|
||||
cp sd-ui-files/scripts/get_config.py scripts/
|
||||
cp sd-ui-files/scripts/config.yaml.sample scripts/
|
||||
cp sd-ui-files/scripts/webui_console.py scripts/
|
||||
|
||||
|
||||
source ./scripts/functions.sh
|
||||
|
||||
# activate the installer env
|
||||
CONDA_BASEPATH=$(conda info --base)
|
||||
export CONDA_BASEPATH=$(conda info --base)
|
||||
source "$CONDA_BASEPATH/etc/profile.d/conda.sh" # avoids the 'shell not initialized' error
|
||||
|
||||
conda activate || fail "Failed to activate conda"
|
||||
|
||||
# hack to fix conda 4.14 on older installations
|
||||
cp $CONDA_BASEPATH/condabin/conda $CONDA_BASEPATH/bin/conda
|
||||
|
||||
# remove the old version of the dev console script, if it's still present
|
||||
if [ -e "open_dev_console.sh" ]; then
|
||||
rm "open_dev_console.sh"
|
||||
fi
|
||||
|
||||
if [ -e "ui/plugins/ui/merge.plugin.js" ]; then
|
||||
rm "ui/plugins/ui/merge.plugin.js"
|
||||
fi
|
||||
|
||||
# set the correct installer path (current vs legacy)
|
||||
if [ -e "installer_files/env" ]; then
|
||||
export INSTALL_ENV_DIR="$(pwd)/installer_files/env"
|
||||
@ -41,45 +50,8 @@ fi
|
||||
if [ -e "src" ]; then mv src src-old; fi
|
||||
if [ -e "ldm" ]; then mv ldm ldm-old; fi
|
||||
|
||||
# Download the required packages
|
||||
if ! python ../scripts/check_modules.py; then
|
||||
read -p "Press any key to continue"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v uvicorn &> /dev/null; then
|
||||
fail "UI packages not found!"
|
||||
fi
|
||||
|
||||
if [ `grep -c sd_install_complete ../scripts/install_status.txt` -gt "0" ]; then
|
||||
echo sd_weights_downloaded >> ../scripts/install_status.txt
|
||||
echo sd_install_complete >> ../scripts/install_status.txt
|
||||
fi
|
||||
|
||||
printf "\n\nEasy Diffusion installation complete, starting the server!\n\n"
|
||||
|
||||
SD_PATH=`pwd`
|
||||
|
||||
export PYTORCH_ENABLE_MPS_FALLBACK=1
|
||||
export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages"
|
||||
echo "PYTHONPATH=$PYTHONPATH"
|
||||
|
||||
which python
|
||||
python --version
|
||||
|
||||
cd ..
|
||||
export SD_UI_PATH=`pwd`/ui
|
||||
export ED_BIND_PORT="$( python scripts/get_config.py --default=9000 net listen_port )"
|
||||
case "$( python scripts/get_config.py --default=False net listen_to_network )" in
|
||||
"True")
|
||||
export ED_BIND_IP=$( python scripts/get_config.py --default=0.0.0.0 net bind_ip)
|
||||
;;
|
||||
"False")
|
||||
export ED_BIND_IP=127.0.0.1
|
||||
;;
|
||||
esac
|
||||
cd stable-diffusion
|
||||
|
||||
uvicorn main:server_api --app-dir "$SD_UI_PATH" --port "$ED_BIND_PORT" --host "$ED_BIND_IP" --log-level error
|
||||
# Download the required packages
|
||||
python scripts/check_modules.py --launch-uvicorn
|
||||
|
||||
read -p "Press any key to continue"
|
||||
|
@ -11,7 +11,7 @@ if [ -f "on_sd_start.bat" ]; then
|
||||
echo download. This will not work.
|
||||
echo
|
||||
echo Recommended: Please close this window and download the installer from
|
||||
echo https://stable-diffusion-ui.github.io/docs/installation/
|
||||
echo https://easydiffusion.github.io/docs/installation/
|
||||
echo
|
||||
echo ================================================================================
|
||||
echo
|
||||
@ -19,6 +19,7 @@ if [ -f "on_sd_start.bat" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
unset PYTHONHOME
|
||||
|
||||
# set legacy installer's PATH, if it exists
|
||||
if [ -e "installer" ]; then export PATH="$(pwd)/installer/bin:$PATH"; fi
|
||||
|
101
scripts/webui_console.py
Normal file
101
scripts/webui_console.py
Normal file
@ -0,0 +1,101 @@
|
||||
import os
|
||||
import platform
|
||||
import subprocess
|
||||
|
||||
|
||||
def configure_env(dir):
|
||||
env_entries = {
|
||||
"PATH": [
|
||||
f"{dir}",
|
||||
f"{dir}/bin",
|
||||
f"{dir}/Library/bin",
|
||||
f"{dir}/Scripts",
|
||||
f"{dir}/usr/bin",
|
||||
],
|
||||
"PYTHONPATH": [
|
||||
f"{dir}",
|
||||
f"{dir}/lib/site-packages",
|
||||
f"{dir}/lib/python3.10/site-packages",
|
||||
],
|
||||
"PYTHONHOME": [],
|
||||
"PY_LIBS": [
|
||||
f"{dir}/Scripts/Lib",
|
||||
f"{dir}/Scripts/Lib/site-packages",
|
||||
f"{dir}/lib",
|
||||
f"{dir}/lib/python3.10/site-packages",
|
||||
],
|
||||
"PY_PIP": [f"{dir}/Scripts", f"{dir}/bin"],
|
||||
}
|
||||
|
||||
if platform.system() == "Windows":
|
||||
env_entries["PATH"].append("C:/Windows/System32")
|
||||
env_entries["PATH"].append("C:/Windows/System32/wbem")
|
||||
env_entries["PYTHONNOUSERSITE"] = ["1"]
|
||||
env_entries["PYTHON"] = [f"{dir}/python"]
|
||||
env_entries["GIT"] = [f"{dir}/Library/bin/git"]
|
||||
else:
|
||||
env_entries["PATH"].append("/bin")
|
||||
env_entries["PATH"].append("/usr/bin")
|
||||
env_entries["PATH"].append("/usr/sbin")
|
||||
env_entries["PYTHONNOUSERSITE"] = ["y"]
|
||||
env_entries["PYTHON"] = [f"{dir}/bin/python"]
|
||||
env_entries["GIT"] = [f"{dir}/bin/git"]
|
||||
|
||||
env = {}
|
||||
for key, paths in env_entries.items():
|
||||
paths = [p.replace("/", os.path.sep) for p in paths]
|
||||
paths = os.pathsep.join(paths)
|
||||
|
||||
os.environ[key] = paths
|
||||
|
||||
return env
|
||||
|
||||
|
||||
def print_env_info():
|
||||
which_cmd = "where" if platform.system() == "Windows" else "which"
|
||||
|
||||
python = "python"
|
||||
|
||||
def locate_python():
|
||||
nonlocal python
|
||||
|
||||
python = subprocess.getoutput(f"{which_cmd} python")
|
||||
python = python.split("\n")
|
||||
python = python[0].strip()
|
||||
print("python: ", python)
|
||||
|
||||
locate_python()
|
||||
|
||||
def run(cmd):
|
||||
with subprocess.Popen(cmd) as p:
|
||||
p.wait()
|
||||
|
||||
run([which_cmd, "git"])
|
||||
run(["git", "--version"])
|
||||
run([which_cmd, "python"])
|
||||
run([python, "--version"])
|
||||
|
||||
print(f"PATH={os.environ['PATH']}")
|
||||
|
||||
if platform.system() == "Windows":
|
||||
print(f"COMSPEC={os.environ['COMSPEC']}")
|
||||
print("")
|
||||
run("wmic path win32_VideoController get name,AdapterRAM,DriverDate,DriverVersion".split(" "))
|
||||
|
||||
print(f"PYTHONPATH={os.environ['PYTHONPATH']}")
|
||||
print("")
|
||||
|
||||
|
||||
def open_dev_shell():
|
||||
if platform.system() == "Windows":
|
||||
subprocess.Popen("cmd").communicate()
|
||||
else:
|
||||
subprocess.Popen("bash").communicate()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
env_dir = os.path.abspath(os.path.join("webui", "system"))
|
||||
|
||||
configure_env(env_dir)
|
||||
print_env_info()
|
||||
open_dev_shell()
|
@ -11,7 +11,7 @@ from ruamel.yaml import YAML
|
||||
import urllib
|
||||
import warnings
|
||||
|
||||
from easydiffusion import task_manager
|
||||
from easydiffusion import task_manager, backend_manager
|
||||
from easydiffusion.utils import log
|
||||
from rich.logging import RichHandler
|
||||
from rich.console import Console
|
||||
@ -36,10 +36,10 @@ ROOT_DIR = os.path.abspath(os.path.join(SD_DIR, ".."))
|
||||
|
||||
SD_UI_DIR = os.getenv("SD_UI_PATH", None)
|
||||
|
||||
CONFIG_DIR = os.path.abspath(os.path.join(SD_UI_DIR, "..", "scripts"))
|
||||
MODELS_DIR = os.path.abspath(os.path.join(SD_DIR, "..", "models"))
|
||||
CONFIG_DIR = os.path.abspath(os.path.join(ROOT_DIR, "scripts"))
|
||||
BUCKET_DIR = os.path.abspath(os.path.join(ROOT_DIR, "bucket"))
|
||||
|
||||
USER_PLUGINS_DIR = os.path.abspath(os.path.join(SD_DIR, "..", "plugins"))
|
||||
USER_PLUGINS_DIR = os.path.abspath(os.path.join(ROOT_DIR, "plugins"))
|
||||
CORE_PLUGINS_DIR = os.path.abspath(os.path.join(SD_UI_DIR, "plugins"))
|
||||
|
||||
USER_UI_PLUGINS_DIR = os.path.join(USER_PLUGINS_DIR, "ui")
|
||||
@ -60,6 +60,7 @@ APP_CONFIG_DEFAULTS = {
|
||||
"ui": {
|
||||
"open_browser_on_start": True,
|
||||
},
|
||||
"backend": "ed_diffusers",
|
||||
}
|
||||
|
||||
IMAGE_EXTENSIONS = [
|
||||
@ -76,7 +77,7 @@ IMAGE_EXTENSIONS = [
|
||||
".avif",
|
||||
".svg",
|
||||
]
|
||||
CUSTOM_MODIFIERS_DIR = os.path.abspath(os.path.join(SD_DIR, "..", "modifiers"))
|
||||
CUSTOM_MODIFIERS_DIR = os.path.abspath(os.path.join(ROOT_DIR, "modifiers"))
|
||||
CUSTOM_MODIFIERS_PORTRAIT_EXTENSIONS = [
|
||||
".portrait",
|
||||
"_portrait",
|
||||
@ -90,14 +91,25 @@ CUSTOM_MODIFIERS_LANDSCAPE_EXTENSIONS = [
|
||||
"-landscape",
|
||||
]
|
||||
|
||||
MODELS_DIR = os.path.abspath(os.path.join(ROOT_DIR, "models"))
|
||||
|
||||
|
||||
def init():
|
||||
global MODELS_DIR
|
||||
|
||||
os.makedirs(USER_UI_PLUGINS_DIR, exist_ok=True)
|
||||
os.makedirs(USER_SERVER_PLUGINS_DIR, exist_ok=True)
|
||||
|
||||
# https://pytorch.org/docs/stable/storage.html
|
||||
warnings.filterwarnings("ignore", category=UserWarning, message="TypedStorage is deprecated")
|
||||
|
||||
config = getConfig()
|
||||
config_models_dir = config.get("models_dir", None)
|
||||
if config_models_dir is not None and config_models_dir != "":
|
||||
MODELS_DIR = config_models_dir
|
||||
|
||||
backend_manager.start_backend()
|
||||
|
||||
|
||||
def init_render_threads():
|
||||
load_server_plugins()
|
||||
@ -107,6 +119,7 @@ def init_render_threads():
|
||||
|
||||
def getConfig(default_val=APP_CONFIG_DEFAULTS):
|
||||
config_yaml_path = os.path.join(CONFIG_DIR, "..", "config.yaml")
|
||||
config_yaml_path = os.path.abspath(config_yaml_path)
|
||||
|
||||
# migrate the old config yaml location
|
||||
config_legacy_yaml = os.path.join(CONFIG_DIR, "config.yaml")
|
||||
@ -114,9 +127,9 @@ def getConfig(default_val=APP_CONFIG_DEFAULTS):
|
||||
shutil.move(config_legacy_yaml, config_yaml_path)
|
||||
|
||||
def set_config_on_startup(config: dict):
|
||||
if getConfig.__test_diffusers_on_startup is None:
|
||||
getConfig.__test_diffusers_on_startup = config.get("test_diffusers", False)
|
||||
config["config_on_startup"] = {"test_diffusers": getConfig.__test_diffusers_on_startup}
|
||||
if getConfig.__use_backend_on_startup is None:
|
||||
getConfig.__use_backend_on_startup = config.get("backend", "ed_diffusers")
|
||||
config["config_on_startup"] = {"backend": getConfig.__use_backend_on_startup}
|
||||
|
||||
if os.path.isfile(config_yaml_path):
|
||||
try:
|
||||
@ -134,6 +147,15 @@ def getConfig(default_val=APP_CONFIG_DEFAULTS):
|
||||
else:
|
||||
config["net"]["listen_to_network"] = True
|
||||
|
||||
if "backend" not in config:
|
||||
if "use_v3_engine" in config:
|
||||
config["backend"] = "ed_diffusers" if config["use_v3_engine"] else "ed_classic"
|
||||
else:
|
||||
config["backend"] = "ed_diffusers"
|
||||
# this default will need to be smarter when WebUI becomes the main backend, but needs to maintain backwards
|
||||
# compatibility with existing ED 3.0 installations that haven't opted into the WebUI backend, and haven't
|
||||
# set a "use_v3_engine" flag in their config
|
||||
|
||||
set_config_on_startup(config)
|
||||
|
||||
return config
|
||||
@ -164,12 +186,15 @@ def getConfig(default_val=APP_CONFIG_DEFAULTS):
|
||||
return default_val
|
||||
|
||||
|
||||
getConfig.__test_diffusers_on_startup = None
|
||||
getConfig.__use_backend_on_startup = None
|
||||
|
||||
|
||||
def setConfig(config):
|
||||
global MODELS_DIR
|
||||
|
||||
try: # config.yaml
|
||||
config_yaml_path = os.path.join(CONFIG_DIR, "..", "config.yaml")
|
||||
config_yaml_path = os.path.abspath(config_yaml_path)
|
||||
yaml = YAML()
|
||||
|
||||
if not hasattr(config, "_yaml_comment"):
|
||||
@ -203,6 +228,9 @@ def setConfig(config):
|
||||
except:
|
||||
log.error(traceback.format_exc())
|
||||
|
||||
if config.get("models_dir"):
|
||||
MODELS_DIR = config["models_dir"]
|
||||
|
||||
|
||||
def save_to_config(ckpt_model_name, vae_model_name, hypernetwork_model_name, vram_usage_level):
|
||||
config = getConfig()
|
||||
@ -291,28 +319,43 @@ def getIPConfig():
|
||||
|
||||
|
||||
def open_browser():
|
||||
from easydiffusion.backend_manager import backend
|
||||
|
||||
config = getConfig()
|
||||
ui = config.get("ui", {})
|
||||
net = config.get("net", {})
|
||||
port = net.get("listen_port", 9000)
|
||||
|
||||
if ui.get("open_browser_on_start", True):
|
||||
import webbrowser
|
||||
if backend.is_installed():
|
||||
if ui.get("open_browser_on_start", True):
|
||||
import webbrowser
|
||||
|
||||
log.info("Opening browser..")
|
||||
log.info("Opening browser..")
|
||||
|
||||
webbrowser.open(f"http://localhost:{port}")
|
||||
webbrowser.open(f"http://localhost:{port}")
|
||||
|
||||
Console().print(
|
||||
Panel(
|
||||
"\n"
|
||||
+ "[white]Easy Diffusion is ready to serve requests.\n\n"
|
||||
+ "A new browser tab should have been opened by now.\n"
|
||||
+ f"If not, please open your web browser and navigate to [bold yellow underline]http://localhost:{port}/\n",
|
||||
title="Easy Diffusion is ready",
|
||||
style="bold yellow on blue",
|
||||
Console().print(
|
||||
Panel(
|
||||
"\n"
|
||||
+ "[white]Easy Diffusion is ready to serve requests.\n\n"
|
||||
+ "A new browser tab should have been opened by now.\n"
|
||||
+ f"If not, please open your web browser and navigate to [bold yellow underline]http://localhost:{port}/\n",
|
||||
title="Easy Diffusion is ready",
|
||||
style="bold yellow on blue",
|
||||
)
|
||||
)
|
||||
else:
|
||||
backend_name = config["backend"]
|
||||
Console().print(
|
||||
Panel(
|
||||
"\n"
|
||||
+ f"[white]Backend: {backend_name} is still installing..\n\n"
|
||||
+ "A new browser tab will open automatically after it finishes.\n"
|
||||
+ f"If it does not, please open your web browser and navigate to [bold yellow underline]http://localhost:{port}/\n",
|
||||
title=f"Backend engine is installing",
|
||||
style="bold yellow on blue",
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def fail_and_die(fail_type: str, data: str):
|
||||
|
105
ui/easydiffusion/backend_manager.py
Normal file
105
ui/easydiffusion/backend_manager.py
Normal file
@ -0,0 +1,105 @@
|
||||
import os
|
||||
import ast
|
||||
import sys
|
||||
import importlib.util
|
||||
import traceback
|
||||
|
||||
from easydiffusion.utils import log
|
||||
|
||||
backend = None
|
||||
curr_backend_name = None
|
||||
|
||||
|
||||
def is_valid_backend(file_path):
|
||||
with open(file_path, "r", encoding="utf-8") as file:
|
||||
node = ast.parse(file.read())
|
||||
|
||||
# Check for presence of a dictionary named 'ed_info'
|
||||
for item in node.body:
|
||||
if isinstance(item, ast.Assign):
|
||||
for target in item.targets:
|
||||
if isinstance(target, ast.Name) and target.id == "ed_info":
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def find_valid_backends(root_dir) -> dict:
|
||||
backends_path = os.path.join(root_dir, "backends")
|
||||
valid_backends = {}
|
||||
|
||||
if not os.path.exists(backends_path):
|
||||
return valid_backends
|
||||
|
||||
for item in os.listdir(backends_path):
|
||||
item_path = os.path.join(backends_path, item)
|
||||
|
||||
if os.path.isdir(item_path):
|
||||
init_file = os.path.join(item_path, "__init__.py")
|
||||
if os.path.exists(init_file) and is_valid_backend(init_file):
|
||||
valid_backends[item] = item_path
|
||||
elif item.endswith(".py"):
|
||||
if is_valid_backend(item_path):
|
||||
backend_name = os.path.splitext(item)[0] # strip the .py extension
|
||||
valid_backends[backend_name] = item_path
|
||||
|
||||
return valid_backends
|
||||
|
||||
|
||||
def load_backend_module(backend_name, backend_dict):
|
||||
if backend_name not in backend_dict:
|
||||
raise ValueError(f"Backend '{backend_name}' not found.")
|
||||
|
||||
module_path = backend_dict[backend_name]
|
||||
|
||||
mod_dir = os.path.dirname(module_path)
|
||||
|
||||
sys.path.insert(0, mod_dir)
|
||||
|
||||
# If it's a package (directory), add its parent directory to sys.path
|
||||
if os.path.isdir(module_path):
|
||||
module_path = os.path.join(module_path, "__init__.py")
|
||||
|
||||
spec = importlib.util.spec_from_file_location(backend_name, module_path)
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(module)
|
||||
|
||||
if mod_dir in sys.path:
|
||||
sys.path.remove(mod_dir)
|
||||
|
||||
log.info(f"Loaded backend: {module}")
|
||||
|
||||
return module
|
||||
|
||||
|
||||
def start_backend():
|
||||
global backend, curr_backend_name
|
||||
|
||||
from easydiffusion.app import getConfig, ROOT_DIR
|
||||
|
||||
curr_dir = os.path.dirname(__file__)
|
||||
|
||||
backends = find_valid_backends(curr_dir)
|
||||
plugin_backends = find_valid_backends(ROOT_DIR)
|
||||
backends.update(plugin_backends)
|
||||
|
||||
config = getConfig()
|
||||
backend_name = config["backend"]
|
||||
|
||||
if backend_name not in backends:
|
||||
raise RuntimeError(
|
||||
f"Couldn't find the backend configured in config.yaml: {backend_name}. Please check the name!"
|
||||
)
|
||||
|
||||
if backend is not None and backend_name != curr_backend_name:
|
||||
try:
|
||||
backend.stop_backend()
|
||||
except:
|
||||
log.exception(traceback.format_exc())
|
||||
|
||||
log.info(f"Loading backend: {backend_name}")
|
||||
backend = load_backend_module(backend_name, backends)
|
||||
|
||||
try:
|
||||
backend.start_backend()
|
||||
except:
|
||||
log.exception(traceback.format_exc())
|
28
ui/easydiffusion/backends/ed_classic.py
Normal file
28
ui/easydiffusion/backends/ed_classic.py
Normal file
@ -0,0 +1,28 @@
|
||||
from sdkit_common import (
|
||||
start_backend,
|
||||
stop_backend,
|
||||
install_backend,
|
||||
uninstall_backend,
|
||||
is_installed,
|
||||
create_sdkit_context,
|
||||
ping,
|
||||
load_model,
|
||||
unload_model,
|
||||
set_options,
|
||||
generate_images,
|
||||
filter_images,
|
||||
get_url,
|
||||
stop_rendering,
|
||||
refresh_models,
|
||||
list_controlnet_filters,
|
||||
)
|
||||
|
||||
ed_info = {
|
||||
"name": "Classic backend for Easy Diffusion v2",
|
||||
"version": (1, 0, 0),
|
||||
"type": "backend",
|
||||
}
|
||||
|
||||
|
||||
def create_context():
|
||||
return create_sdkit_context(use_diffusers=False)
|
28
ui/easydiffusion/backends/ed_diffusers.py
Normal file
28
ui/easydiffusion/backends/ed_diffusers.py
Normal file
@ -0,0 +1,28 @@
|
||||
from sdkit_common import (
|
||||
start_backend,
|
||||
stop_backend,
|
||||
install_backend,
|
||||
uninstall_backend,
|
||||
is_installed,
|
||||
create_sdkit_context,
|
||||
ping,
|
||||
load_model,
|
||||
unload_model,
|
||||
set_options,
|
||||
generate_images,
|
||||
filter_images,
|
||||
get_url,
|
||||
stop_rendering,
|
||||
refresh_models,
|
||||
list_controlnet_filters,
|
||||
)
|
||||
|
||||
ed_info = {
|
||||
"name": "Diffusers Backend for Easy Diffusion v3",
|
||||
"version": (1, 0, 0),
|
||||
"type": "backend",
|
||||
}
|
||||
|
||||
|
||||
def create_context():
|
||||
return create_sdkit_context(use_diffusers=True)
|
246
ui/easydiffusion/backends/sdkit_common.py
Normal file
246
ui/easydiffusion/backends/sdkit_common.py
Normal file
@ -0,0 +1,246 @@
|
||||
from sdkit import Context
|
||||
|
||||
from easydiffusion.types import UserInitiatedStop
|
||||
|
||||
from sdkit.utils import (
|
||||
diffusers_latent_samples_to_images,
|
||||
gc,
|
||||
img_to_base64_str,
|
||||
latent_samples_to_images,
|
||||
)
|
||||
|
||||
opts = {}
|
||||
|
||||
|
||||
def install_backend():
|
||||
pass
|
||||
|
||||
|
||||
def start_backend():
|
||||
print("Started sdkit backend")
|
||||
|
||||
|
||||
def stop_backend():
|
||||
pass
|
||||
|
||||
|
||||
def uninstall_backend():
|
||||
pass
|
||||
|
||||
|
||||
def is_installed():
|
||||
return True
|
||||
|
||||
|
||||
def create_sdkit_context(use_diffusers):
|
||||
c = Context()
|
||||
c.test_diffusers = use_diffusers
|
||||
return c
|
||||
|
||||
|
||||
def ping(timeout=1):
|
||||
return True
|
||||
|
||||
|
||||
def load_model(context, model_type, **kwargs):
|
||||
from sdkit.models import load_model
|
||||
|
||||
load_model(context, model_type, **kwargs)
|
||||
|
||||
|
||||
def unload_model(context, model_type, **kwargs):
|
||||
from sdkit.models import unload_model
|
||||
|
||||
unload_model(context, model_type, **kwargs)
|
||||
|
||||
|
||||
def set_options(context, **kwargs):
|
||||
if "vae_tiling" in kwargs and context.test_diffusers:
|
||||
pipe = context.models["stable-diffusion"]["default"]
|
||||
vae_tiling = kwargs["vae_tiling"]
|
||||
|
||||
if vae_tiling:
|
||||
if hasattr(pipe, "enable_vae_tiling"):
|
||||
pipe.enable_vae_tiling()
|
||||
else:
|
||||
if hasattr(pipe, "disable_vae_tiling"):
|
||||
pipe.disable_vae_tiling()
|
||||
|
||||
for key in (
|
||||
"output_format",
|
||||
"output_quality",
|
||||
"output_lossless",
|
||||
"stream_image_progress",
|
||||
"stream_image_progress_interval",
|
||||
):
|
||||
if key in kwargs:
|
||||
opts[key] = kwargs[key]
|
||||
|
||||
|
||||
def generate_images(
|
||||
context: Context,
|
||||
callback=None,
|
||||
controlnet_filter=None,
|
||||
distilled_guidance_scale: float = 3.5,
|
||||
scheduler_name: str = "simple",
|
||||
output_type="pil",
|
||||
**req,
|
||||
):
|
||||
from sdkit.generate import generate_images
|
||||
|
||||
if req["init_image"] is not None and not context.test_diffusers:
|
||||
req["sampler_name"] = "ddim"
|
||||
|
||||
gc(context)
|
||||
|
||||
context.stop_processing = False
|
||||
|
||||
if req["control_image"] and controlnet_filter:
|
||||
controlnet_filter = convert_ED_controlnet_filter_name(controlnet_filter)
|
||||
req["control_image"] = filter_images(context, req["control_image"], controlnet_filter)[0]
|
||||
|
||||
callback = make_step_callback(context, callback)
|
||||
|
||||
try:
|
||||
images = generate_images(context, callback=callback, **req)
|
||||
except UserInitiatedStop:
|
||||
images = []
|
||||
if context.partial_x_samples is not None:
|
||||
if context.test_diffusers:
|
||||
images = diffusers_latent_samples_to_images(context, context.partial_x_samples)
|
||||
else:
|
||||
images = latent_samples_to_images(context, context.partial_x_samples)
|
||||
finally:
|
||||
if hasattr(context, "partial_x_samples") and context.partial_x_samples is not None:
|
||||
if not context.test_diffusers:
|
||||
del context.partial_x_samples
|
||||
context.partial_x_samples = None
|
||||
|
||||
gc(context)
|
||||
|
||||
if output_type == "base64":
|
||||
output_format = opts.get("output_format", "jpeg")
|
||||
output_quality = opts.get("output_quality", 75)
|
||||
output_lossless = opts.get("output_lossless", False)
|
||||
images = [img_to_base64_str(img, output_format, output_quality, output_lossless) for img in images]
|
||||
|
||||
return images
|
||||
|
||||
|
||||
def filter_images(context: Context, images, filters, filter_params={}, input_type="pil"):
|
||||
gc(context)
|
||||
|
||||
if "nsfw_checker" in filters:
|
||||
filters.remove("nsfw_checker") # handled by ED directly
|
||||
|
||||
if len(filters) == 0:
|
||||
return images
|
||||
|
||||
images = _filter_images(context, images, filters, filter_params)
|
||||
|
||||
if input_type == "base64":
|
||||
output_format = opts.get("output_format", "jpg")
|
||||
output_quality = opts.get("output_quality", 75)
|
||||
output_lossless = opts.get("output_lossless", False)
|
||||
images = [img_to_base64_str(img, output_format, output_quality, output_lossless) for img in images]
|
||||
|
||||
return images
|
||||
|
||||
|
||||
def _filter_images(context, images, filters, filter_params={}):
|
||||
from sdkit.filter import apply_filters
|
||||
|
||||
filters = filters if isinstance(filters, list) else [filters]
|
||||
filters = convert_ED_controlnet_filter_name(filters)
|
||||
|
||||
for filter_name in filters:
|
||||
params = filter_params.get(filter_name, {})
|
||||
|
||||
previous_state = before_filter(context, filter_name, params)
|
||||
|
||||
try:
|
||||
images = apply_filters(context, filter_name, images, **params)
|
||||
finally:
|
||||
after_filter(context, filter_name, params, previous_state)
|
||||
|
||||
return images
|
||||
|
||||
|
||||
def before_filter(context, filter_name, filter_params):
|
||||
if filter_name == "codeformer":
|
||||
from easydiffusion.model_manager import DEFAULT_MODELS, resolve_model_to_use
|
||||
|
||||
default_realesrgan = DEFAULT_MODELS["realesrgan"][0]["file_name"]
|
||||
prev_realesrgan_path = None
|
||||
|
||||
upscale_faces = filter_params.get("upscale_faces", False)
|
||||
if upscale_faces and default_realesrgan not in context.model_paths["realesrgan"]:
|
||||
prev_realesrgan_path = context.model_paths.get("realesrgan")
|
||||
context.model_paths["realesrgan"] = resolve_model_to_use(default_realesrgan, "realesrgan")
|
||||
load_model(context, "realesrgan")
|
||||
|
||||
return prev_realesrgan_path
|
||||
|
||||
|
||||
def after_filter(context, filter_name, filter_params, previous_state):
|
||||
if filter_name == "codeformer":
|
||||
prev_realesrgan_path = previous_state
|
||||
if prev_realesrgan_path:
|
||||
context.model_paths["realesrgan"] = prev_realesrgan_path
|
||||
load_model(context, "realesrgan")
|
||||
|
||||
|
||||
def get_url():
|
||||
pass
|
||||
|
||||
|
||||
def stop_rendering(context):
|
||||
context.stop_processing = True
|
||||
|
||||
|
||||
def refresh_models():
|
||||
pass
|
||||
|
||||
|
||||
def list_controlnet_filters():
|
||||
from sdkit.models.model_loader.controlnet_filters import filters as cn_filters
|
||||
|
||||
return cn_filters
|
||||
|
||||
|
||||
def make_step_callback(context, callback):
|
||||
def on_step(x_samples, i, *args):
|
||||
stream_image_progress = opts.get("stream_image_progress", False)
|
||||
stream_image_progress_interval = opts.get("stream_image_progress_interval", 3)
|
||||
|
||||
if context.test_diffusers:
|
||||
context.partial_x_samples = (x_samples, args[0])
|
||||
else:
|
||||
context.partial_x_samples = x_samples
|
||||
|
||||
if stream_image_progress and stream_image_progress_interval > 0 and i % stream_image_progress_interval == 0:
|
||||
if context.test_diffusers:
|
||||
images = diffusers_latent_samples_to_images(context, context.partial_x_samples)
|
||||
else:
|
||||
images = latent_samples_to_images(context, context.partial_x_samples)
|
||||
else:
|
||||
images = None
|
||||
|
||||
if callback:
|
||||
callback(images, i, *args)
|
||||
|
||||
if context.stop_processing:
|
||||
raise UserInitiatedStop("User requested that we stop processing")
|
||||
|
||||
return on_step
|
||||
|
||||
|
||||
def convert_ED_controlnet_filter_name(filter):
|
||||
def cn(n):
|
||||
if n.startswith("controlnet_"):
|
||||
return n[len("controlnet_") :]
|
||||
return n
|
||||
|
||||
if isinstance(filter, list):
|
||||
return [cn(f) for f in filter]
|
||||
return cn(filter)
|
450
ui/easydiffusion/backends/webui/__init__.py
Normal file
450
ui/easydiffusion/backends/webui/__init__.py
Normal file
@ -0,0 +1,450 @@
|
||||
import os
|
||||
import platform
|
||||
import subprocess
|
||||
import threading
|
||||
from threading import local
|
||||
import psutil
|
||||
import time
|
||||
import shutil
|
||||
|
||||
from easydiffusion.app import ROOT_DIR, getConfig
|
||||
from easydiffusion.model_manager import get_model_dirs
|
||||
from easydiffusion.utils import log
|
||||
|
||||
from . import impl
|
||||
from .impl import (
|
||||
ping,
|
||||
load_model,
|
||||
unload_model,
|
||||
set_options,
|
||||
generate_images,
|
||||
filter_images,
|
||||
get_url,
|
||||
stop_rendering,
|
||||
refresh_models,
|
||||
list_controlnet_filters,
|
||||
)
|
||||
|
||||
|
||||
ed_info = {
|
||||
"name": "WebUI backend for Easy Diffusion",
|
||||
"version": (1, 0, 0),
|
||||
"type": "backend",
|
||||
}
|
||||
|
||||
WEBUI_REPO = "https://github.com/lllyasviel/stable-diffusion-webui-forge.git"
|
||||
WEBUI_COMMIT = "f4d5e8cac16a42fa939e78a0956b4c30e2b47bb5"
|
||||
|
||||
BACKEND_DIR = os.path.abspath(os.path.join(ROOT_DIR, "webui"))
|
||||
SYSTEM_DIR = os.path.join(BACKEND_DIR, "system")
|
||||
WEBUI_DIR = os.path.join(BACKEND_DIR, "webui")
|
||||
|
||||
OS_NAME = platform.system()
|
||||
|
||||
MODELS_TO_OVERRIDE = {
|
||||
"stable-diffusion": "--ckpt-dir",
|
||||
"vae": "--vae-dir",
|
||||
"hypernetwork": "--hypernetwork-dir",
|
||||
"gfpgan": "--gfpgan-models-path",
|
||||
"realesrgan": "--realesrgan-models-path",
|
||||
"lora": "--lora-dir",
|
||||
"codeformer": "--codeformer-models-path",
|
||||
"embeddings": "--embeddings-dir",
|
||||
"controlnet": "--controlnet-dir",
|
||||
}
|
||||
|
||||
backend_process = None
|
||||
conda = "conda"
|
||||
|
||||
|
||||
def locate_conda():
|
||||
global conda
|
||||
|
||||
which = "where" if OS_NAME == "Windows" else "which"
|
||||
conda = subprocess.getoutput(f"{which} conda")
|
||||
conda = conda.split("\n")
|
||||
conda = conda[0].strip()
|
||||
print("conda: ", conda)
|
||||
|
||||
|
||||
locate_conda()
|
||||
|
||||
|
||||
def install_backend():
|
||||
print("Installing the WebUI backend..")
|
||||
|
||||
# create the conda env
|
||||
run([conda, "create", "-y", "--prefix", SYSTEM_DIR], cwd=ROOT_DIR)
|
||||
|
||||
print("Installing packages..")
|
||||
|
||||
# install python 3.10 and git in the conda env
|
||||
run([conda, "install", "-y", "--prefix", SYSTEM_DIR, "-c", "conda-forge", "python=3.10", "git"], cwd=ROOT_DIR)
|
||||
|
||||
# print info
|
||||
run_in_conda(["git", "--version"], cwd=ROOT_DIR)
|
||||
run_in_conda(["python", "--version"], cwd=ROOT_DIR)
|
||||
|
||||
# clone webui
|
||||
run_in_conda(["git", "clone", WEBUI_REPO, WEBUI_DIR], cwd=ROOT_DIR)
|
||||
|
||||
# install cpu-only torch if the PC doesn't have a graphics card (for Windows and Linux).
|
||||
# this avoids WebUI installing a CUDA version and trying to activate it
|
||||
if OS_NAME in ("Windows", "Linux") and not has_discrete_graphics_card():
|
||||
run_in_conda(["python", "-m", "pip", "install", "torch", "torchvision"], cwd=WEBUI_DIR)
|
||||
|
||||
|
||||
def start_backend():
|
||||
config = getConfig()
|
||||
backend_config = config.get("backend_config", {})
|
||||
|
||||
if not os.path.exists(BACKEND_DIR):
|
||||
install_backend()
|
||||
|
||||
was_still_installing = not is_installed()
|
||||
|
||||
if backend_config.get("auto_update", True):
|
||||
run_in_conda(["git", "add", "-A", "."], cwd=WEBUI_DIR)
|
||||
run_in_conda(["git", "stash"], cwd=WEBUI_DIR)
|
||||
run_in_conda(["git", "reset", "--hard"], cwd=WEBUI_DIR)
|
||||
run_in_conda(["git", "fetch"], cwd=WEBUI_DIR)
|
||||
run_in_conda(["git", "-c", "advice.detachedHead=false", "checkout", WEBUI_COMMIT], cwd=WEBUI_DIR)
|
||||
|
||||
# hack to prevent webui-macos-env.sh from overwriting the COMMANDLINE_ARGS env variable
|
||||
mac_webui_file = os.path.join(WEBUI_DIR, "webui-macos-env.sh")
|
||||
if os.path.exists(mac_webui_file):
|
||||
os.remove(mac_webui_file)
|
||||
|
||||
impl.WEBUI_HOST = backend_config.get("host", "localhost")
|
||||
impl.WEBUI_PORT = backend_config.get("port", "7860")
|
||||
|
||||
env = dict(os.environ)
|
||||
env.update(get_env())
|
||||
|
||||
def restart_if_webui_dies_after_starting():
|
||||
has_started = False
|
||||
|
||||
while True:
|
||||
try:
|
||||
impl.ping(timeout=1)
|
||||
|
||||
is_first_start = not has_started
|
||||
has_started = True
|
||||
|
||||
if was_still_installing and is_first_start:
|
||||
ui = config.get("ui", {})
|
||||
net = config.get("net", {})
|
||||
port = net.get("listen_port", 9000)
|
||||
|
||||
if ui.get("open_browser_on_start", True):
|
||||
import webbrowser
|
||||
|
||||
log.info("Opening browser..")
|
||||
|
||||
webbrowser.open(f"http://localhost:{port}")
|
||||
except (TimeoutError, ConnectionError):
|
||||
if has_started: # process probably died
|
||||
print("######################## WebUI probably died. Restarting...")
|
||||
stop_backend()
|
||||
backend_thread = threading.Thread(target=target)
|
||||
backend_thread.start()
|
||||
break
|
||||
except Exception:
|
||||
import traceback
|
||||
|
||||
log.exception(traceback.format_exc())
|
||||
|
||||
time.sleep(1)
|
||||
|
||||
def target():
|
||||
global backend_process
|
||||
|
||||
cmd = "webui.bat" if OS_NAME == "Windows" else "./webui.sh"
|
||||
|
||||
print("starting", cmd, WEBUI_DIR)
|
||||
backend_process = run_in_conda([cmd], cwd=WEBUI_DIR, env=env, wait=False, output_prefix="[WebUI] ")
|
||||
|
||||
restart_if_dead_thread = threading.Thread(target=restart_if_webui_dies_after_starting)
|
||||
restart_if_dead_thread.start()
|
||||
|
||||
backend_process.wait()
|
||||
|
||||
backend_thread = threading.Thread(target=target)
|
||||
backend_thread.start()
|
||||
|
||||
start_proxy()
|
||||
|
||||
|
||||
def start_proxy():
|
||||
# proxy
|
||||
from easydiffusion.server import server_api
|
||||
from fastapi import FastAPI, Request
|
||||
from fastapi.responses import Response
|
||||
import json
|
||||
|
||||
URI_PREFIX = "/webui"
|
||||
|
||||
webui_proxy = FastAPI(root_path=f"{URI_PREFIX}", docs_url="/swagger")
|
||||
|
||||
@webui_proxy.get("{uri:path}")
|
||||
def proxy_get(uri: str, req: Request):
|
||||
if uri == "/openapi-proxy.json":
|
||||
uri = "/openapi.json"
|
||||
|
||||
res = impl.webui_get(uri, headers=req.headers)
|
||||
|
||||
content = res.content
|
||||
headers = dict(res.headers)
|
||||
|
||||
if uri == "/docs":
|
||||
content = res.text.replace("url: '/openapi.json'", f"url: '{URI_PREFIX}/openapi-proxy.json'")
|
||||
elif uri == "/openapi.json":
|
||||
content = res.json()
|
||||
content["paths"] = {f"{URI_PREFIX}{k}": v for k, v in content["paths"].items()}
|
||||
content = json.dumps(content)
|
||||
|
||||
if isinstance(content, str):
|
||||
content = bytes(content, encoding="utf-8")
|
||||
headers["content-length"] = str(len(content))
|
||||
|
||||
# Return the same response back to the client
|
||||
return Response(content=content, status_code=res.status_code, headers=headers)
|
||||
|
||||
@webui_proxy.post("{uri:path}")
|
||||
async def proxy_post(uri: str, req: Request):
|
||||
body = await req.body()
|
||||
res = impl.webui_post(uri, data=body, headers=req.headers)
|
||||
|
||||
# Return the same response back to the client
|
||||
return Response(content=res.content, status_code=res.status_code, headers=dict(res.headers))
|
||||
|
||||
server_api.mount(f"{URI_PREFIX}", webui_proxy)
|
||||
|
||||
|
||||
def stop_backend():
|
||||
global backend_process
|
||||
|
||||
if backend_process:
|
||||
try:
|
||||
kill(backend_process.pid)
|
||||
except:
|
||||
pass
|
||||
|
||||
backend_process = None
|
||||
|
||||
|
||||
def uninstall_backend():
|
||||
shutil.rmtree(BACKEND_DIR)
|
||||
|
||||
|
||||
def is_installed():
|
||||
if not os.path.exists(BACKEND_DIR) or not os.path.exists(SYSTEM_DIR) or not os.path.exists(WEBUI_DIR):
|
||||
return True
|
||||
|
||||
env = dict(os.environ)
|
||||
env.update(get_env())
|
||||
|
||||
try:
|
||||
out = check_output_in_conda(["python", "-m", "pip", "show", "torch"], env=env)
|
||||
return "Version" in out.decode()
|
||||
except subprocess.CalledProcessError:
|
||||
pass
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def read_output(pipe, prefix=""):
|
||||
while True:
|
||||
output = pipe.readline()
|
||||
if output:
|
||||
print(f"{prefix}{output.decode('utf-8')}", end="")
|
||||
else:
|
||||
break # Pipe is closed, subprocess has likely exited
|
||||
|
||||
|
||||
def run(cmds: list, cwd=None, env=None, stream_output=True, wait=True, output_prefix=""):
|
||||
p = subprocess.Popen(cmds, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
if stream_output:
|
||||
output_thread = threading.Thread(target=read_output, args=(p.stdout, output_prefix))
|
||||
output_thread.start()
|
||||
|
||||
if wait:
|
||||
p.wait()
|
||||
|
||||
return p
|
||||
|
||||
|
||||
def run_in_conda(cmds: list, *args, **kwargs):
|
||||
cmds = [conda, "run", "--no-capture-output", "--prefix", SYSTEM_DIR] + cmds
|
||||
return run(cmds, *args, **kwargs)
|
||||
|
||||
|
||||
def check_output_in_conda(cmds: list, cwd=None, env=None):
|
||||
cmds = [conda, "run", "--no-capture-output", "--prefix", SYSTEM_DIR] + cmds
|
||||
return subprocess.check_output(cmds, cwd=cwd, env=env, stderr=subprocess.PIPE)
|
||||
|
||||
|
||||
def create_context():
|
||||
context = local()
|
||||
|
||||
# temp hack, throws an attribute not found error otherwise
|
||||
context.device = "cuda:0"
|
||||
context.half_precision = True
|
||||
context.vram_usage_level = None
|
||||
|
||||
context.models = {}
|
||||
context.model_paths = {}
|
||||
context.model_configs = {}
|
||||
context.device_name = None
|
||||
context.vram_optimizations = set()
|
||||
context.vram_usage_level = "balanced"
|
||||
context.test_diffusers = False
|
||||
context.enable_codeformer = False
|
||||
|
||||
return context
|
||||
|
||||
|
||||
def get_env():
|
||||
dir = os.path.abspath(SYSTEM_DIR)
|
||||
|
||||
if not os.path.exists(dir):
|
||||
raise RuntimeError("The system folder is missing!")
|
||||
|
||||
config = getConfig()
|
||||
models_dir = config.get("models_dir", os.path.join(ROOT_DIR, "models"))
|
||||
|
||||
model_path_args = get_model_path_args()
|
||||
|
||||
env_entries = {
|
||||
"PATH": [
|
||||
f"{dir}",
|
||||
f"{dir}/bin",
|
||||
f"{dir}/Library/bin",
|
||||
f"{dir}/Scripts",
|
||||
f"{dir}/usr/bin",
|
||||
],
|
||||
"PYTHONPATH": [
|
||||
f"{dir}",
|
||||
f"{dir}/lib/site-packages",
|
||||
f"{dir}/lib/python3.10/site-packages",
|
||||
],
|
||||
"PYTHONHOME": [],
|
||||
"PY_LIBS": [
|
||||
f"{dir}/Scripts/Lib",
|
||||
f"{dir}/Scripts/Lib/site-packages",
|
||||
f"{dir}/lib",
|
||||
f"{dir}/lib/python3.10/site-packages",
|
||||
],
|
||||
"PY_PIP": [f"{dir}/Scripts", f"{dir}/bin"],
|
||||
"PIP_INSTALLER_LOCATION": [], # [f"{dir}/python/get-pip.py"],
|
||||
"TRANSFORMERS_CACHE": [f"{dir}/transformers-cache"],
|
||||
"HF_HUB_DISABLE_SYMLINKS_WARNING": ["true"],
|
||||
"COMMANDLINE_ARGS": [f'--api --models-dir "{models_dir}" {model_path_args} --skip-torch-cuda-test'],
|
||||
"SKIP_VENV": ["1"],
|
||||
"SD_WEBUI_RESTARTING": ["1"],
|
||||
}
|
||||
|
||||
if OS_NAME == "Windows":
|
||||
env_entries["PATH"].append("C:/Windows/System32")
|
||||
env_entries["PATH"].append("C:/Windows/System32/wbem")
|
||||
env_entries["PYTHONNOUSERSITE"] = ["1"]
|
||||
env_entries["PYTHON"] = [f"{dir}/python"]
|
||||
env_entries["GIT"] = [f"{dir}/Library/bin/git"]
|
||||
else:
|
||||
env_entries["PATH"].append("/bin")
|
||||
env_entries["PATH"].append("/usr/bin")
|
||||
env_entries["PATH"].append("/usr/sbin")
|
||||
env_entries["PYTHONNOUSERSITE"] = ["y"]
|
||||
env_entries["PYTHON"] = [f"{dir}/bin/python"]
|
||||
env_entries["GIT"] = [f"{dir}/bin/git"]
|
||||
env_entries["venv_dir"] = ["-"]
|
||||
|
||||
if OS_NAME == "Darwin":
|
||||
# based on https://github.com/lllyasviel/stable-diffusion-webui-forge/blob/e26abf87ecd1eefd9ab0a198eee56f9c643e4001/webui-macos-env.sh
|
||||
# hack - have to define these here, otherwise webui-macos-env.sh will overwrite COMMANDLINE_ARGS
|
||||
env_entries["COMMANDLINE_ARGS"][0] += " --upcast-sampling --no-half-vae --use-cpu interrogate"
|
||||
env_entries["PYTORCH_ENABLE_MPS_FALLBACK"] = ["1"]
|
||||
|
||||
cpu_name = str(subprocess.check_output(["sysctl", "-n", "machdep.cpu.brand_string"]))
|
||||
if "Intel" in cpu_name:
|
||||
env_entries["TORCH_COMMAND"] = ["pip install torch==2.1.2 torchvision==0.16.2"]
|
||||
else:
|
||||
env_entries["TORCH_COMMAND"] = ["pip install torch==2.3.1 torchvision==0.18.1"]
|
||||
else:
|
||||
import torch
|
||||
from easydiffusion.device_manager import needs_to_force_full_precision, is_cuda_available
|
||||
|
||||
vram_usage_level = config.get("vram_usage_level", "balanced")
|
||||
if config.get("render_devices", "auto") == "cpu" or not has_discrete_graphics_card() or not is_cuda_available():
|
||||
env_entries["COMMANDLINE_ARGS"][0] += " --always-cpu"
|
||||
else:
|
||||
c = local()
|
||||
c.device_name = torch.cuda.get_device_name()
|
||||
|
||||
if needs_to_force_full_precision(c):
|
||||
env_entries["COMMANDLINE_ARGS"][0] += " --no-half --precision full"
|
||||
|
||||
if vram_usage_level == "low":
|
||||
env_entries["COMMANDLINE_ARGS"][0] += " --always-low-vram"
|
||||
elif vram_usage_level == "high":
|
||||
env_entries["COMMANDLINE_ARGS"][0] += " --always-high-vram"
|
||||
|
||||
env = {}
|
||||
for key, paths in env_entries.items():
|
||||
paths = [p.replace("/", os.path.sep) for p in paths]
|
||||
paths = os.pathsep.join(paths)
|
||||
|
||||
env[key] = paths
|
||||
|
||||
return env
|
||||
|
||||
|
||||
def has_discrete_graphics_card():
|
||||
system = OS_NAME
|
||||
|
||||
if system == "Windows":
|
||||
try:
|
||||
output = subprocess.check_output(
|
||||
["wmic", "path", "win32_videocontroller", "get", "name"], stderr=subprocess.STDOUT
|
||||
)
|
||||
# Filter for discrete graphics cards (NVIDIA, AMD, etc.)
|
||||
discrete_gpus = ["NVIDIA", "AMD", "ATI"]
|
||||
return any(gpu in output.decode() for gpu in discrete_gpus)
|
||||
except subprocess.CalledProcessError:
|
||||
return False
|
||||
|
||||
elif system == "Linux":
|
||||
try:
|
||||
output = subprocess.check_output(["lspci"], stderr=subprocess.STDOUT)
|
||||
# Check for discrete GPUs (NVIDIA, AMD)
|
||||
discrete_gpus = ["NVIDIA", "AMD", "Advanced Micro Devices"]
|
||||
return any(gpu in line for line in output.decode().splitlines() for gpu in discrete_gpus)
|
||||
except subprocess.CalledProcessError:
|
||||
return False
|
||||
|
||||
elif system == "Darwin": # macOS
|
||||
try:
|
||||
output = subprocess.check_output(["system_profiler", "SPDisplaysDataType"], stderr=subprocess.STDOUT)
|
||||
# Check for discrete GPU in the output
|
||||
return "NVIDIA" in output.decode() or "AMD" in output.decode()
|
||||
except subprocess.CalledProcessError:
|
||||
return False
|
||||
|
||||
return False
|
||||
|
||||
|
||||
# https://stackoverflow.com/a/25134985
|
||||
def kill(proc_pid):
|
||||
process = psutil.Process(proc_pid)
|
||||
for proc in process.children(recursive=True):
|
||||
proc.kill()
|
||||
process.kill()
|
||||
|
||||
|
||||
def get_model_path_args():
|
||||
args = []
|
||||
for model_type, flag in MODELS_TO_OVERRIDE.items():
|
||||
model_dir = get_model_dirs(model_type)[0]
|
||||
args.append(f'{flag} "{model_dir}"')
|
||||
|
||||
return " ".join(args)
|
654
ui/easydiffusion/backends/webui/impl.py
Normal file
654
ui/easydiffusion/backends/webui/impl.py
Normal file
@ -0,0 +1,654 @@
|
||||
import os
|
||||
import requests
|
||||
from requests.exceptions import ConnectTimeout, ConnectionError
|
||||
from typing import Union, List
|
||||
from threading import local as Context
|
||||
from threading import Thread
|
||||
import uuid
|
||||
import time
|
||||
from copy import deepcopy
|
||||
|
||||
from sdkit.utils import base64_str_to_img, img_to_base64_str
|
||||
|
||||
WEBUI_HOST = "localhost"
|
||||
WEBUI_PORT = "7860"
|
||||
|
||||
DEFAULT_WEBUI_OPTIONS = {
|
||||
"show_progress_every_n_steps": 3,
|
||||
"show_progress_grid": True,
|
||||
"live_previews_enable": False,
|
||||
"forge_additional_modules": [],
|
||||
}
|
||||
|
||||
|
||||
webui_opts: dict = None
|
||||
|
||||
|
||||
curr_models = {
|
||||
"stable-diffusion": None,
|
||||
"vae": None,
|
||||
}
|
||||
|
||||
|
||||
def set_options(context, **kwargs):
|
||||
changed_opts = {}
|
||||
|
||||
opts_mapping = {
|
||||
"stream_image_progress": ("live_previews_enable", bool),
|
||||
"stream_image_progress_interval": ("show_progress_every_n_steps", int),
|
||||
"clip_skip": ("CLIP_stop_at_last_layers", int),
|
||||
"clip_skip_sdxl": ("sdxl_clip_l_skip", bool),
|
||||
"output_format": ("samples_format", str),
|
||||
}
|
||||
|
||||
for ed_key, webui_key in opts_mapping.items():
|
||||
webui_key, webui_type = webui_key
|
||||
|
||||
if ed_key in kwargs and (webui_opts is None or webui_opts.get(webui_key, False) != webui_type(kwargs[ed_key])):
|
||||
changed_opts[webui_key] = webui_type(kwargs[ed_key])
|
||||
|
||||
if changed_opts:
|
||||
changed_opts["sd_model_checkpoint"] = curr_models["stable-diffusion"]
|
||||
|
||||
print(f"Got options: {kwargs}. Sending options: {changed_opts}")
|
||||
|
||||
try:
|
||||
res = webui_post("/sdapi/v1/options", json=changed_opts)
|
||||
if res.status_code != 200:
|
||||
raise Exception(res.text)
|
||||
|
||||
webui_opts.update(changed_opts)
|
||||
except Exception as e:
|
||||
print(f"Error setting options: {e}")
|
||||
|
||||
|
||||
def ping(timeout=1):
|
||||
"timeout (in seconds)"
|
||||
|
||||
global webui_opts
|
||||
|
||||
try:
|
||||
res = webui_get("/internal/ping", timeout=timeout)
|
||||
|
||||
if res.status_code != 200:
|
||||
raise ConnectTimeout(res.text)
|
||||
|
||||
if webui_opts is None:
|
||||
try:
|
||||
res = webui_post("/sdapi/v1/options", json=DEFAULT_WEBUI_OPTIONS)
|
||||
if res.status_code != 200:
|
||||
raise Exception(res.text)
|
||||
except Exception as e:
|
||||
print(f"Error setting options: {e}")
|
||||
|
||||
try:
|
||||
res = webui_get("/sdapi/v1/options")
|
||||
if res.status_code != 200:
|
||||
raise Exception(res.text)
|
||||
|
||||
webui_opts = res.json()
|
||||
except Exception as e:
|
||||
print(f"Error getting options: {e}")
|
||||
|
||||
return True
|
||||
except (ConnectTimeout, ConnectionError) as e:
|
||||
raise TimeoutError(e)
|
||||
|
||||
|
||||
def load_model(context, model_type, **kwargs):
|
||||
model_path = context.model_paths[model_type]
|
||||
|
||||
if webui_opts is None:
|
||||
print("Server not ready, can't set the model")
|
||||
return
|
||||
|
||||
if model_type == "stable-diffusion":
|
||||
model_name = os.path.basename(model_path)
|
||||
model_name = os.path.splitext(model_name)[0]
|
||||
print(f"setting sd model: {model_name}")
|
||||
if curr_models[model_type] != model_name:
|
||||
try:
|
||||
res = webui_post("/sdapi/v1/options", json={"sd_model_checkpoint": model_name})
|
||||
if res.status_code != 200:
|
||||
raise Exception(res.text)
|
||||
except Exception as e:
|
||||
raise RuntimeError(
|
||||
f"The engine failed to set the required options. Please check the logs in the command line window for more details."
|
||||
)
|
||||
|
||||
curr_models[model_type] = model_name
|
||||
elif model_type == "vae":
|
||||
if curr_models[model_type] != model_path:
|
||||
vae_model = [model_path] if model_path else []
|
||||
|
||||
opts = {"sd_model_checkpoint": curr_models["stable-diffusion"], "forge_additional_modules": vae_model}
|
||||
print("setting opts 2", opts)
|
||||
|
||||
try:
|
||||
res = webui_post("/sdapi/v1/options", json=opts)
|
||||
if res.status_code != 200:
|
||||
raise Exception(res.text)
|
||||
except Exception as e:
|
||||
raise RuntimeError(
|
||||
f"The engine failed to set the required options. Please check the logs in the command line window for more details."
|
||||
)
|
||||
|
||||
curr_models[model_type] = model_path
|
||||
|
||||
|
||||
def unload_model(context, model_type, **kwargs):
|
||||
if model_type == "vae":
|
||||
context.model_paths[model_type] = None
|
||||
load_model(context, model_type)
|
||||
|
||||
|
||||
def generate_images(
|
||||
context: Context,
|
||||
prompt: str = "",
|
||||
negative_prompt: str = "",
|
||||
seed: int = 42,
|
||||
width: int = 512,
|
||||
height: int = 512,
|
||||
num_outputs: int = 1,
|
||||
num_inference_steps: int = 25,
|
||||
guidance_scale: float = 7.5,
|
||||
distilled_guidance_scale: float = 3.5,
|
||||
init_image=None,
|
||||
init_image_mask=None,
|
||||
control_image=None,
|
||||
control_alpha=1.0,
|
||||
controlnet_filter=None,
|
||||
prompt_strength: float = 0.8,
|
||||
preserve_init_image_color_profile=False,
|
||||
strict_mask_border=False,
|
||||
sampler_name: str = "euler_a",
|
||||
scheduler_name: str = "simple",
|
||||
hypernetwork_strength: float = 0,
|
||||
tiling=None,
|
||||
lora_alpha: Union[float, List[float]] = 0,
|
||||
sampler_params={},
|
||||
callback=None,
|
||||
output_type="pil",
|
||||
):
|
||||
|
||||
task_id = str(uuid.uuid4())
|
||||
|
||||
sampler_name = convert_ED_sampler_names(sampler_name)
|
||||
controlnet_filter = convert_ED_controlnet_filter_name(controlnet_filter)
|
||||
|
||||
cmd = {
|
||||
"force_task_id": task_id,
|
||||
"prompt": prompt,
|
||||
"negative_prompt": negative_prompt,
|
||||
"sampler_name": sampler_name,
|
||||
"scheduler": scheduler_name,
|
||||
"steps": num_inference_steps,
|
||||
"seed": seed,
|
||||
"cfg_scale": guidance_scale,
|
||||
"distilled_cfg_scale": distilled_guidance_scale,
|
||||
"batch_size": num_outputs,
|
||||
"width": width,
|
||||
"height": height,
|
||||
}
|
||||
|
||||
if init_image:
|
||||
cmd["init_images"] = [init_image]
|
||||
cmd["denoising_strength"] = prompt_strength
|
||||
if init_image_mask:
|
||||
cmd["mask"] = init_image_mask
|
||||
cmd["include_init_images"] = True
|
||||
cmd["inpainting_fill"] = 1
|
||||
cmd["initial_noise_multiplier"] = 1
|
||||
cmd["inpaint_full_res"] = 1
|
||||
|
||||
if context.model_paths.get("lora"):
|
||||
lora_model = context.model_paths["lora"]
|
||||
lora_model = lora_model if isinstance(lora_model, list) else [lora_model]
|
||||
lora_alpha = lora_alpha if isinstance(lora_alpha, list) else [lora_alpha]
|
||||
|
||||
for lora, alpha in zip(lora_model, lora_alpha):
|
||||
lora = os.path.basename(lora)
|
||||
lora = os.path.splitext(lora)[0]
|
||||
cmd["prompt"] += f" <lora:{lora}:{alpha}>"
|
||||
|
||||
if controlnet_filter and control_image and context.model_paths.get("controlnet"):
|
||||
controlnet_model = context.model_paths["controlnet"]
|
||||
|
||||
model_hash = auto1111_hash(controlnet_model)
|
||||
controlnet_model = os.path.basename(controlnet_model)
|
||||
controlnet_model = os.path.splitext(controlnet_model)[0]
|
||||
print(f"setting controlnet model: {controlnet_model}")
|
||||
controlnet_model = f"{controlnet_model} [{model_hash}]"
|
||||
|
||||
cmd["alwayson_scripts"] = {
|
||||
"controlnet": {
|
||||
"args": [
|
||||
{
|
||||
"image": control_image,
|
||||
"weight": control_alpha,
|
||||
"module": controlnet_filter,
|
||||
"model": controlnet_model,
|
||||
"resize_mode": "Crop and Resize",
|
||||
"threshold_a": 50,
|
||||
"threshold_b": 130,
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
operation_to_apply = "img2img" if init_image else "txt2img"
|
||||
|
||||
stream_image_progress = webui_opts.get("live_previews_enable", False)
|
||||
|
||||
progress_thread = Thread(
|
||||
target=image_progress_thread, args=(task_id, callback, stream_image_progress, num_outputs, num_inference_steps)
|
||||
)
|
||||
progress_thread.start()
|
||||
|
||||
print(f"task id: {task_id}")
|
||||
print_request(operation_to_apply, cmd)
|
||||
|
||||
res = webui_post(f"/sdapi/v1/{operation_to_apply}", json=cmd)
|
||||
if res.status_code == 200:
|
||||
res = res.json()
|
||||
else:
|
||||
raise Exception(
|
||||
"The engine failed while generating this image. Please check the logs in the command-line window for more details."
|
||||
)
|
||||
|
||||
import json
|
||||
|
||||
print(json.loads(res["info"])["infotexts"])
|
||||
|
||||
images = res["images"]
|
||||
if output_type == "pil":
|
||||
images = [base64_str_to_img(img) for img in images]
|
||||
elif output_type == "base64":
|
||||
images = [base64_buffer_to_base64_img(img) for img in images]
|
||||
|
||||
return images
|
||||
|
||||
|
||||
def filter_images(context: Context, images, filters, filter_params={}, input_type="pil"):
|
||||
"""
|
||||
* context: Context
|
||||
* images: str or PIL.Image or list of str/PIL.Image - image to filter. if a string is passed, it needs to be a base64-encoded image
|
||||
* filters: filter_type (string) or list of strings
|
||||
* filter_params: dict
|
||||
|
||||
returns: [PIL.Image] - list of filtered images
|
||||
"""
|
||||
images = images if isinstance(images, list) else [images]
|
||||
filters = filters if isinstance(filters, list) else [filters]
|
||||
|
||||
if "nsfw_checker" in filters:
|
||||
filters.remove("nsfw_checker") # handled by ED directly
|
||||
|
||||
args = {}
|
||||
controlnet_filters = []
|
||||
|
||||
print(filter_params)
|
||||
|
||||
for filter_name in filters:
|
||||
params = filter_params.get(filter_name, {})
|
||||
|
||||
if filter_name == "gfpgan":
|
||||
args["gfpgan_visibility"] = 1
|
||||
|
||||
if filter_name in ("realesrgan", "esrgan_4x", "lanczos", "nearest", "scunet", "swinir"):
|
||||
args["upscaler_1"] = params.get("upscaler", "RealESRGAN_x4plus")
|
||||
args["upscaling_resize"] = params.get("scale", 4)
|
||||
|
||||
if args["upscaler_1"] == "RealESRGAN_x4plus":
|
||||
args["upscaler_1"] = "R-ESRGAN 4x+"
|
||||
elif args["upscaler_1"] == "RealESRGAN_x4plus_anime_6B":
|
||||
args["upscaler_1"] = "R-ESRGAN 4x+ Anime6B"
|
||||
|
||||
if filter_name == "codeformer":
|
||||
args["codeformer_visibility"] = 1
|
||||
args["codeformer_weight"] = params.get("codeformer_fidelity", 0.5)
|
||||
|
||||
if filter_name.startswith("controlnet_"):
|
||||
filter_name = convert_ED_controlnet_filter_name(filter_name)
|
||||
controlnet_filters.append(filter_name)
|
||||
|
||||
print(f"filtering {len(images)} images with {args}. {controlnet_filters=}")
|
||||
|
||||
if len(filters) > len(controlnet_filters):
|
||||
filtered_images = extra_batch_images(images, input_type=input_type, **args)
|
||||
else:
|
||||
filtered_images = images
|
||||
|
||||
for filter_name in controlnet_filters:
|
||||
filtered_images = controlnet_filter(filtered_images, module=filter_name, input_type=input_type)
|
||||
|
||||
return filtered_images
|
||||
|
||||
|
||||
def get_url():
|
||||
return f"//{WEBUI_HOST}:{WEBUI_PORT}/?__theme=dark"
|
||||
|
||||
|
||||
def stop_rendering(context):
|
||||
try:
|
||||
res = webui_post("/sdapi/v1/interrupt")
|
||||
if res.status_code != 200:
|
||||
raise Exception(res.text)
|
||||
except Exception as e:
|
||||
print(f"Error interrupting webui: {e}")
|
||||
|
||||
|
||||
def refresh_models():
|
||||
def make_refresh_call(type):
|
||||
try:
|
||||
webui_post(f"/sdapi/v1/refresh-{type}")
|
||||
except:
|
||||
pass
|
||||
|
||||
try:
|
||||
for type in ("checkpoints", "vae"):
|
||||
t = Thread(target=make_refresh_call, args=(type,))
|
||||
t.start()
|
||||
except Exception as e:
|
||||
print(f"Error refreshing models: {e}")
|
||||
|
||||
|
||||
def list_controlnet_filters():
|
||||
return [
|
||||
"openpose",
|
||||
"openpose_face",
|
||||
"openpose_faceonly",
|
||||
"openpose_hand",
|
||||
"openpose_full",
|
||||
"animal_openpose",
|
||||
"densepose_parula (black bg & blue torso)",
|
||||
"densepose (pruple bg & purple torso)",
|
||||
"dw_openpose_full",
|
||||
"mediapipe_face",
|
||||
"instant_id_face_keypoints",
|
||||
"InsightFace+CLIP-H (IPAdapter)",
|
||||
"InsightFace (InstantID)",
|
||||
"canny",
|
||||
"mlsd",
|
||||
"scribble_hed",
|
||||
"scribble_hedsafe",
|
||||
"scribble_pidinet",
|
||||
"scribble_pidsafe",
|
||||
"scribble_xdog",
|
||||
"softedge_hed",
|
||||
"softedge_hedsafe",
|
||||
"softedge_pidinet",
|
||||
"softedge_pidsafe",
|
||||
"softedge_teed",
|
||||
"normal_bae",
|
||||
"depth_midas",
|
||||
"normal_midas",
|
||||
"depth_zoe",
|
||||
"depth_leres",
|
||||
"depth_leres++",
|
||||
"depth_anything_v2",
|
||||
"depth_anything",
|
||||
"depth_hand_refiner",
|
||||
"depth_marigold",
|
||||
"lineart_coarse",
|
||||
"lineart_realistic",
|
||||
"lineart_anime",
|
||||
"lineart_standard (from white bg & black line)",
|
||||
"lineart_anime_denoise",
|
||||
"reference_adain",
|
||||
"reference_only",
|
||||
"reference_adain+attn",
|
||||
"tile_colorfix",
|
||||
"tile_resample",
|
||||
"tile_colorfix+sharp",
|
||||
"CLIP-ViT-H (IPAdapter)",
|
||||
"CLIP-G (Revision)",
|
||||
"CLIP-G (Revision ignore prompt)",
|
||||
"CLIP-ViT-bigG (IPAdapter)",
|
||||
"InsightFace+CLIP-H (IPAdapter)",
|
||||
"inpaint_only",
|
||||
"inpaint_only+lama",
|
||||
"inpaint_global_harmonious",
|
||||
"seg_ufade20k",
|
||||
"seg_ofade20k",
|
||||
"seg_anime_face",
|
||||
"seg_ofcoco",
|
||||
"shuffle",
|
||||
"segment",
|
||||
"invert (from white bg & black line)",
|
||||
"threshold",
|
||||
"t2ia_sketch_pidi",
|
||||
"t2ia_color_grid",
|
||||
"recolor_intensity",
|
||||
"recolor_luminance",
|
||||
"blur_gaussian",
|
||||
]
|
||||
|
||||
|
||||
def controlnet_filter(images, module="none", processor_res=512, threshold_a=64, threshold_b=64, input_type="pil"):
|
||||
if input_type == "pil":
|
||||
images = [img_to_base64_str(x) for x in images]
|
||||
|
||||
payload = {
|
||||
"controlnet_module": module,
|
||||
"controlnet_input_images": images,
|
||||
"controlnet_processor_res": processor_res,
|
||||
"controlnet_threshold_a": threshold_a,
|
||||
"controlnet_threshold_b": threshold_b,
|
||||
}
|
||||
res = webui_post("/controlnet/detect", json=payload)
|
||||
res = res.json()
|
||||
filtered_images = res["images"]
|
||||
|
||||
if input_type == "pil":
|
||||
filtered_images = [base64_str_to_img(img) for img in filtered_images]
|
||||
elif input_type == "base64":
|
||||
filtered_images = [base64_buffer_to_base64_img(img) for img in filtered_images]
|
||||
|
||||
return filtered_images
|
||||
|
||||
|
||||
def image_progress_thread(task_id, callback, stream_image_progress, total_images, total_steps):
|
||||
from PIL import Image
|
||||
|
||||
last_preview_id = -1
|
||||
|
||||
EMPTY_IMAGE = Image.new("RGB", (1, 1))
|
||||
|
||||
while True:
|
||||
res = webui_post(
|
||||
f"/internal/progress",
|
||||
json={"id_task": task_id, "live_preview": stream_image_progress, "id_live_preview": last_preview_id},
|
||||
)
|
||||
if res.status_code == 200:
|
||||
res = res.json()
|
||||
else:
|
||||
raise RuntimeError(f"Unexpected progress response. Status code: {res.status_code}. Res: {res.text}")
|
||||
|
||||
last_preview_id = res["id_live_preview"]
|
||||
|
||||
if res["progress"] is not None:
|
||||
step_num = int(res["progress"] * total_steps)
|
||||
|
||||
if res["live_preview"] is not None:
|
||||
img = res["live_preview"]
|
||||
img = base64_str_to_img(img)
|
||||
images = [EMPTY_IMAGE] * total_images
|
||||
images[0] = img
|
||||
else:
|
||||
images = None
|
||||
|
||||
callback(images, step_num)
|
||||
|
||||
if res["completed"] == True:
|
||||
print("Complete!")
|
||||
break
|
||||
|
||||
time.sleep(0.5)
|
||||
|
||||
|
||||
def webui_get(uri, *args, **kwargs):
|
||||
url = f"http://{WEBUI_HOST}:{WEBUI_PORT}{uri}"
|
||||
return requests.get(url, *args, **kwargs)
|
||||
|
||||
|
||||
def webui_post(uri, *args, **kwargs):
|
||||
url = f"http://{WEBUI_HOST}:{WEBUI_PORT}{uri}"
|
||||
return requests.post(url, *args, **kwargs)
|
||||
|
||||
|
||||
def print_request(operation_to_apply, args):
|
||||
args = deepcopy(args)
|
||||
if "init_images" in args:
|
||||
args["init_images"] = ["img" for _ in args["init_images"]]
|
||||
if "mask" in args:
|
||||
args["mask"] = "mask_img"
|
||||
|
||||
controlnet_args = args.get("alwayson_scripts", {}).get("controlnet", {}).get("args", [])
|
||||
if controlnet_args:
|
||||
controlnet_args[0]["image"] = "control_image"
|
||||
|
||||
print(f"operation: {operation_to_apply}, args: {args}")
|
||||
|
||||
|
||||
def auto1111_hash(file_path):
|
||||
import hashlib
|
||||
|
||||
with open(file_path, "rb") as f:
|
||||
f.seek(0x100000)
|
||||
b = f.read(0x10000)
|
||||
return hashlib.sha256(b).hexdigest()[:8]
|
||||
|
||||
|
||||
def extra_batch_images(
|
||||
images, # list of PIL images
|
||||
name_list=None, # list of image names
|
||||
resize_mode=0,
|
||||
show_extras_results=True,
|
||||
gfpgan_visibility=0,
|
||||
codeformer_visibility=0,
|
||||
codeformer_weight=0,
|
||||
upscaling_resize=2,
|
||||
upscaling_resize_w=512,
|
||||
upscaling_resize_h=512,
|
||||
upscaling_crop=True,
|
||||
upscaler_1="None",
|
||||
upscaler_2="None",
|
||||
extras_upscaler_2_visibility=0,
|
||||
upscale_first=False,
|
||||
use_async=False,
|
||||
input_type="pil",
|
||||
):
|
||||
if name_list is not None:
|
||||
if len(name_list) != len(images):
|
||||
raise RuntimeError("len(images) != len(name_list)")
|
||||
else:
|
||||
name_list = [f"image{i + 1:05}" for i in range(len(images))]
|
||||
|
||||
if input_type == "pil":
|
||||
images = [img_to_base64_str(x) for x in images]
|
||||
|
||||
image_list = []
|
||||
for name, image in zip(name_list, images):
|
||||
image_list.append({"data": image, "name": name})
|
||||
|
||||
payload = {
|
||||
"resize_mode": resize_mode,
|
||||
"show_extras_results": show_extras_results,
|
||||
"gfpgan_visibility": gfpgan_visibility,
|
||||
"codeformer_visibility": codeformer_visibility,
|
||||
"codeformer_weight": codeformer_weight,
|
||||
"upscaling_resize": upscaling_resize,
|
||||
"upscaling_resize_w": upscaling_resize_w,
|
||||
"upscaling_resize_h": upscaling_resize_h,
|
||||
"upscaling_crop": upscaling_crop,
|
||||
"upscaler_1": upscaler_1,
|
||||
"upscaler_2": upscaler_2,
|
||||
"extras_upscaler_2_visibility": extras_upscaler_2_visibility,
|
||||
"upscale_first": upscale_first,
|
||||
"imageList": image_list,
|
||||
}
|
||||
|
||||
res = webui_post("/sdapi/v1/extra-batch-images", json=payload)
|
||||
if res.status_code == 200:
|
||||
res = res.json()
|
||||
else:
|
||||
raise Exception(
|
||||
"The engine failed while filtering this image. Please check the logs in the command-line window for more details."
|
||||
)
|
||||
|
||||
images = res["images"]
|
||||
|
||||
if input_type == "pil":
|
||||
images = [base64_str_to_img(img) for img in images]
|
||||
elif input_type == "base64":
|
||||
images = [base64_buffer_to_base64_img(img) for img in images]
|
||||
|
||||
return images
|
||||
|
||||
|
||||
def base64_buffer_to_base64_img(img):
|
||||
output_format = webui_opts.get("samples_format", "jpeg")
|
||||
mime_type = f"image/{output_format.lower()}"
|
||||
return f"data:{mime_type};base64," + img
|
||||
|
||||
|
||||
def convert_ED_sampler_names(sampler_name):
|
||||
name_mapping = {
|
||||
"dpmpp_2m": "DPM++ 2M",
|
||||
"dpmpp_sde": "DPM++ SDE",
|
||||
"dpmpp_2m_sde": "DPM++ 2M SDE",
|
||||
"dpmpp_2m_sde_heun": "DPM++ 2M SDE Heun",
|
||||
"dpmpp_2s_a": "DPM++ 2S a",
|
||||
"dpmpp_3m_sde": "DPM++ 3M SDE",
|
||||
"euler_a": "Euler a",
|
||||
"euler": "Euler",
|
||||
"lms": "LMS",
|
||||
"heun": "Heun",
|
||||
"dpm2": "DPM2",
|
||||
"dpm2_a": "DPM2 a",
|
||||
"dpm_fast": "DPM fast",
|
||||
"dpm_adaptive": "DPM adaptive",
|
||||
"restart": "Restart",
|
||||
"heun_pp2": "HeunPP2",
|
||||
"ipndm": "IPNDM",
|
||||
"ipndm_v": "IPNDM_V",
|
||||
"deis": "DEIS",
|
||||
"ddim": "DDIM",
|
||||
"ddim_cfgpp": "DDIM CFG++",
|
||||
"plms": "PLMS",
|
||||
"unipc": "UniPC",
|
||||
"lcm": "LCM",
|
||||
"ddpm": "DDPM",
|
||||
"forge_flux_realistic": "[Forge] Flux Realistic",
|
||||
"forge_flux_realistic_slow": "[Forge] Flux Realistic (Slow)",
|
||||
# deprecated samplers in 3.5
|
||||
"dpm_solver_stability": None,
|
||||
"unipc_snr": None,
|
||||
"unipc_tu": None,
|
||||
"unipc_snr_2": None,
|
||||
"unipc_tu_2": None,
|
||||
"unipc_tq": None,
|
||||
}
|
||||
return name_mapping.get(sampler_name)
|
||||
|
||||
|
||||
def convert_ED_controlnet_filter_name(filter):
|
||||
if filter is None:
|
||||
return None
|
||||
|
||||
def cn(n):
|
||||
if n.startswith("controlnet_"):
|
||||
return n[len("controlnet_") :]
|
||||
return n
|
||||
|
||||
mapping = {
|
||||
"controlnet_scribble_hedsafe": None,
|
||||
"controlnet_scribble_pidsafe": None,
|
||||
"controlnet_softedge_pidsafe": "controlnet_softedge_pidisafe",
|
||||
"controlnet_normal_bae": "controlnet_normalbae",
|
||||
"controlnet_segment": None,
|
||||
}
|
||||
if isinstance(filter, list):
|
||||
return [cn(mapping.get(f, f)) for f in filter]
|
||||
return cn(mapping.get(filter, filter))
|
107
ui/easydiffusion/bucket_manager.py
Normal file
107
ui/easydiffusion/bucket_manager.py
Normal file
@ -0,0 +1,107 @@
|
||||
from typing import List
|
||||
|
||||
from fastapi import Depends, FastAPI, HTTPException, Response, File
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from easydiffusion.easydb import crud, models, schemas
|
||||
from easydiffusion.easydb.database import SessionLocal, engine
|
||||
|
||||
from requests.compat import urlparse
|
||||
|
||||
import base64, json
|
||||
|
||||
MIME_TYPES = {
|
||||
"jpg": "image/jpeg",
|
||||
"jpeg": "image/jpeg",
|
||||
"gif": "image/gif",
|
||||
"png": "image/png",
|
||||
"webp": "image/webp",
|
||||
"js": "text/javascript",
|
||||
"htm": "text/html",
|
||||
"html": "text/html",
|
||||
"css": "text/css",
|
||||
"json": "application/json",
|
||||
"mjs": "application/json",
|
||||
"yaml": "application/yaml",
|
||||
"svg": "image/svg+xml",
|
||||
"txt": "text/plain",
|
||||
}
|
||||
|
||||
def init():
|
||||
from easydiffusion.server import server_api
|
||||
|
||||
models.BucketBase.metadata.create_all(bind=engine)
|
||||
|
||||
|
||||
# Dependency
|
||||
def get_db():
|
||||
db = SessionLocal()
|
||||
try:
|
||||
yield db
|
||||
finally:
|
||||
db.close()
|
||||
|
||||
@server_api.get("/bucket/{obj_path:path}")
|
||||
def bucket_get_object(obj_path: str, db: Session = Depends(get_db)):
|
||||
filename = get_filename_from_url(obj_path)
|
||||
path = get_path_from_url(obj_path)
|
||||
|
||||
if filename==None:
|
||||
bucket = crud.get_bucket_by_path(db, path=path)
|
||||
if bucket == None:
|
||||
raise HTTPException(status_code=404, detail="Bucket not found")
|
||||
bucketfiles = db.query(models.BucketFile).with_entities(models.BucketFile.filename).filter(models.BucketFile.bucket_id == bucket.id).all()
|
||||
bucketfiles = [ x.filename for x in bucketfiles ]
|
||||
return bucketfiles
|
||||
|
||||
else:
|
||||
bucket = crud.get_bucket_by_path(db, path)
|
||||
if bucket == None:
|
||||
raise HTTPException(status_code=404, detail="Bucket not found")
|
||||
bucket_id = bucket.id
|
||||
bucketfile = db.query(models.BucketFile).filter(models.BucketFile.bucket_id == bucket_id, models.BucketFile.filename == filename).first()
|
||||
if bucketfile == None:
|
||||
raise HTTPException(status_code=404, detail="File not found")
|
||||
|
||||
suffix = get_suffix_from_filename(filename)
|
||||
|
||||
return Response(content=bucketfile.data, media_type=MIME_TYPES.get(suffix, "application/octet-stream"))
|
||||
|
||||
@server_api.post("/bucket/{obj_path:path}")
|
||||
def bucket_post_object(obj_path: str, file: bytes = File(), db: Session = Depends(get_db)):
|
||||
filename = get_filename_from_url(obj_path)
|
||||
path = get_path_from_url(obj_path)
|
||||
bucket = crud.get_bucket_by_path(db, path)
|
||||
|
||||
if bucket == None:
|
||||
bucket = crud.create_bucket(db=db, bucket=schemas.BucketCreate(path=path))
|
||||
bucket_id = bucket.id
|
||||
|
||||
bucketfile = schemas.BucketFileCreate(filename=filename, data=file)
|
||||
result = crud.create_bucketfile(db=db, bucketfile=bucketfile, bucket_id=bucket_id)
|
||||
result.data = base64.encodestring(result.data)
|
||||
return result
|
||||
|
||||
|
||||
@server_api.post("/buckets/{bucket_id}/items/", response_model=schemas.BucketFile)
|
||||
def create_bucketfile_in_bucket(
|
||||
bucket_id: int, bucketfile: schemas.BucketFileCreate, db: Session = Depends(get_db)
|
||||
):
|
||||
bucketfile.data = base64.decodestring(bucketfile.data)
|
||||
result = crud.create_bucketfile(db=db, bucketfile=bucketfile, bucket_id=bucket_id)
|
||||
result.data = base64.encodestring(result.data)
|
||||
return result
|
||||
|
||||
|
||||
def get_filename_from_url(url):
|
||||
path = urlparse(url).path
|
||||
name = path[path.rfind('/')+1:]
|
||||
return name or None
|
||||
|
||||
def get_path_from_url(url):
|
||||
path = urlparse(url).path
|
||||
path = path[0:path.rfind('/')]
|
||||
return path or None
|
||||
|
||||
def get_suffix_from_filename(filename):
|
||||
return filename[filename.rfind('.')+1:]
|
@ -243,7 +243,8 @@ def get_processor_name():
|
||||
if platform.system() == "Windows":
|
||||
return platform.processor()
|
||||
elif platform.system() == "Darwin":
|
||||
os.environ["PATH"] = os.environ["PATH"] + os.pathsep + "/usr/sbin"
|
||||
if "/usr/sbin" not in os.environ["PATH"].split(os.pathsep):
|
||||
os.environ["PATH"] = os.environ["PATH"] + os.pathsep + "/usr/sbin"
|
||||
command = "sysctl -n machdep.cpu.brand_string"
|
||||
return subprocess.check_output(command, shell=True).decode().strip()
|
||||
elif platform.system() == "Linux":
|
||||
|
24
ui/easydiffusion/easydb/crud.py
Normal file
24
ui/easydiffusion/easydb/crud.py
Normal file
@ -0,0 +1,24 @@
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from easydiffusion.easydb import models, schemas
|
||||
|
||||
|
||||
def get_bucket_by_path(db: Session, path: str):
|
||||
return db.query(models.Bucket).filter(models.Bucket.path == path).first()
|
||||
|
||||
|
||||
def create_bucket(db: Session, bucket: schemas.BucketCreate):
|
||||
db_bucket = models.Bucket(path=bucket.path)
|
||||
db.add(db_bucket)
|
||||
db.commit()
|
||||
db.refresh(db_bucket)
|
||||
return db_bucket
|
||||
|
||||
|
||||
def create_bucketfile(db: Session, bucketfile: schemas.BucketFileCreate, bucket_id: int):
|
||||
db_bucketfile = models.BucketFile(**bucketfile.dict(), bucket_id=bucket_id)
|
||||
db.merge(db_bucketfile)
|
||||
db.commit()
|
||||
db_bucketfile = db.query(models.BucketFile).filter(models.BucketFile.bucket_id==bucket_id, models.BucketFile.filename==bucketfile.filename).first()
|
||||
return db_bucketfile
|
||||
|
14
ui/easydiffusion/easydb/database.py
Normal file
14
ui/easydiffusion/easydb/database.py
Normal file
@ -0,0 +1,14 @@
|
||||
import os
|
||||
from easydiffusion import app
|
||||
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
|
||||
os.makedirs(app.BUCKET_DIR, exist_ok=True)
|
||||
SQLALCHEMY_DATABASE_URL = "sqlite:///"+os.path.join(app.BUCKET_DIR, "bucket.db")
|
||||
|
||||
engine = create_engine(SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False})
|
||||
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
|
||||
|
||||
BucketBase = declarative_base()
|
25
ui/easydiffusion/easydb/models.py
Normal file
25
ui/easydiffusion/easydb/models.py
Normal file
@ -0,0 +1,25 @@
|
||||
from sqlalchemy import Boolean, Column, ForeignKey, Integer, String, BLOB
|
||||
from sqlalchemy.orm import relationship
|
||||
|
||||
from easydiffusion.easydb.database import BucketBase
|
||||
|
||||
|
||||
class Bucket(BucketBase):
|
||||
__tablename__ = "bucket"
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
path = Column(String, unique=True, index=True)
|
||||
|
||||
bucketfiles = relationship("BucketFile", back_populates="bucket")
|
||||
|
||||
|
||||
class BucketFile(BucketBase):
|
||||
__tablename__ = "bucketfile"
|
||||
|
||||
filename = Column(String, index=True, primary_key=True)
|
||||
bucket_id = Column(Integer, ForeignKey("bucket.id"), primary_key=True)
|
||||
|
||||
data = Column(BLOB, index=False)
|
||||
|
||||
bucket = relationship("Bucket", back_populates="bucketfiles")
|
||||
|
35
ui/easydiffusion/easydb/schemas.py
Normal file
35
ui/easydiffusion/easydb/schemas.py
Normal file
@ -0,0 +1,35 @@
|
||||
from typing import List, Union
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class BucketFileBase(BaseModel):
|
||||
filename: str
|
||||
data: bytes
|
||||
|
||||
|
||||
class BucketFileCreate(BucketFileBase):
|
||||
pass
|
||||
|
||||
|
||||
class BucketFile(BucketFileBase):
|
||||
bucket_id: int
|
||||
|
||||
class Config:
|
||||
orm_mode = True
|
||||
|
||||
|
||||
class BucketBase(BaseModel):
|
||||
path: str
|
||||
|
||||
|
||||
class BucketCreate(BucketBase):
|
||||
pass
|
||||
|
||||
|
||||
class Bucket(BucketBase):
|
||||
id: int
|
||||
bucketfiles: List[BucketFile] = []
|
||||
|
||||
class Config:
|
||||
orm_mode = True
|
@ -8,9 +8,10 @@ from easydiffusion import app
|
||||
from easydiffusion.types import ModelsData
|
||||
from easydiffusion.utils import log
|
||||
from sdkit import Context
|
||||
from sdkit.models import load_model, scan_model, unload_model, download_model, get_model_info_from_db
|
||||
from sdkit.models import scan_model, download_model, get_model_info_from_db
|
||||
from sdkit.models.model_loader.controlnet_filters import filters as cn_filters
|
||||
from sdkit.utils import hash_file_quick
|
||||
from sdkit.models.model_loader.embeddings import get_embedding_token
|
||||
|
||||
KNOWN_MODEL_TYPES = [
|
||||
"stable-diffusion",
|
||||
@ -24,15 +25,15 @@ KNOWN_MODEL_TYPES = [
|
||||
"controlnet",
|
||||
]
|
||||
MODEL_EXTENSIONS = {
|
||||
"stable-diffusion": [".ckpt", ".safetensors"],
|
||||
"vae": [".vae.pt", ".ckpt", ".safetensors"],
|
||||
"hypernetwork": [".pt", ".safetensors"],
|
||||
"stable-diffusion": [".ckpt", ".safetensors", ".sft", ".gguf"],
|
||||
"vae": [".vae.pt", ".ckpt", ".safetensors", ".sft"],
|
||||
"hypernetwork": [".pt", ".safetensors", ".sft"],
|
||||
"gfpgan": [".pth"],
|
||||
"realesrgan": [".pth"],
|
||||
"lora": [".ckpt", ".safetensors"],
|
||||
"lora": [".ckpt", ".safetensors", ".sft", ".pt"],
|
||||
"codeformer": [".pth"],
|
||||
"embeddings": [".pt", ".bin", ".safetensors"],
|
||||
"controlnet": [".pth", ".safetensors"],
|
||||
"embeddings": [".pt", ".bin", ".safetensors", ".sft"],
|
||||
"controlnet": [".pth", ".safetensors", ".sft"],
|
||||
}
|
||||
DEFAULT_MODELS = {
|
||||
"stable-diffusion": [
|
||||
@ -50,6 +51,16 @@ DEFAULT_MODELS = {
|
||||
],
|
||||
}
|
||||
MODELS_TO_LOAD_ON_START = ["stable-diffusion", "vae", "hypernetwork", "lora"]
|
||||
ALTERNATE_FOLDER_NAMES = { # for WebUI compatibility
|
||||
"stable-diffusion": "Stable-diffusion",
|
||||
"vae": "VAE",
|
||||
"hypernetwork": "hypernetworks",
|
||||
"codeformer": "Codeformer",
|
||||
"gfpgan": "GFPGAN",
|
||||
"realesrgan": "RealESRGAN",
|
||||
"lora": "Lora",
|
||||
"controlnet": "ControlNet",
|
||||
}
|
||||
|
||||
known_models = {}
|
||||
|
||||
@ -62,17 +73,15 @@ def init():
|
||||
|
||||
def load_default_models(context: Context):
|
||||
from easydiffusion import runtime
|
||||
from easydiffusion.backend_manager import backend
|
||||
|
||||
runtime.set_vram_optimizations(context)
|
||||
|
||||
config = app.getConfig()
|
||||
context.embeddings_path = os.path.join(app.MODELS_DIR, "embeddings")
|
||||
|
||||
# init default model paths
|
||||
for model_type in MODELS_TO_LOAD_ON_START:
|
||||
context.model_paths[model_type] = resolve_model_to_use(model_type=model_type, fail_if_not_found=False)
|
||||
try:
|
||||
load_model(
|
||||
backend.load_model(
|
||||
context,
|
||||
model_type,
|
||||
scan_model=context.model_paths[model_type] != None
|
||||
@ -94,15 +103,26 @@ def load_default_models(context: Context):
|
||||
|
||||
|
||||
def unload_all(context: Context):
|
||||
from easydiffusion.backend_manager import backend
|
||||
|
||||
for model_type in KNOWN_MODEL_TYPES:
|
||||
unload_model(context, model_type)
|
||||
if model_type in context.model_load_errors:
|
||||
backend.unload_model(context, model_type)
|
||||
if hasattr(context, "model_load_errors") and model_type in context.model_load_errors:
|
||||
del context.model_load_errors[model_type]
|
||||
|
||||
|
||||
def resolve_model_to_use(model_name: Union[str, list] = None, model_type: str = None, fail_if_not_found: bool = True):
|
||||
model_names = model_name if isinstance(model_name, list) else [model_name]
|
||||
model_paths = [resolve_model_to_use_single(m, model_type, fail_if_not_found) for m in model_names]
|
||||
model_paths = []
|
||||
for m in model_names:
|
||||
if model_type == "embeddings":
|
||||
try:
|
||||
resolve_model_to_use_single(m, model_type)
|
||||
except FileNotFoundError: # try with spaces
|
||||
m = m.replace("_", " ")
|
||||
|
||||
path = resolve_model_to_use_single(m, model_type, fail_if_not_found)
|
||||
model_paths.append(path)
|
||||
|
||||
return model_paths[0] if len(model_paths) == 1 else model_paths
|
||||
|
||||
@ -112,39 +132,43 @@ def resolve_model_to_use_single(model_name: str = None, model_type: str = None,
|
||||
default_models = DEFAULT_MODELS.get(model_type, [])
|
||||
config = app.getConfig()
|
||||
|
||||
model_dir = os.path.join(app.MODELS_DIR, model_type)
|
||||
if not model_name: # When None try user configured model.
|
||||
# config = getConfig()
|
||||
if "model" in config and model_type in config["model"]:
|
||||
model_name = config["model"][model_type]
|
||||
|
||||
if model_name:
|
||||
# Check models directory
|
||||
model_path = os.path.join(model_dir, model_name)
|
||||
if os.path.exists(model_path):
|
||||
return model_path
|
||||
for model_extension in model_extensions:
|
||||
if os.path.exists(model_path + model_extension):
|
||||
return model_path + model_extension
|
||||
if os.path.exists(model_name + model_extension):
|
||||
return os.path.abspath(model_name + model_extension)
|
||||
for model_dir in get_model_dirs(model_type):
|
||||
if model_name:
|
||||
# Check models directory
|
||||
model_path = os.path.join(model_dir, model_name)
|
||||
if os.path.exists(model_path):
|
||||
return model_path
|
||||
for model_extension in model_extensions:
|
||||
if os.path.exists(model_path + model_extension):
|
||||
return model_path + model_extension
|
||||
if os.path.exists(model_name + model_extension):
|
||||
return os.path.abspath(model_name + model_extension)
|
||||
|
||||
# Can't find requested model, check the default paths.
|
||||
if model_type == "stable-diffusion" and not fail_if_not_found:
|
||||
for default_model in default_models:
|
||||
default_model_path = os.path.join(model_dir, default_model["file_name"])
|
||||
if os.path.exists(default_model_path):
|
||||
if model_name is not None:
|
||||
log.warn(
|
||||
f"Could not find the configured custom model {model_name}. Using the default one: {default_model_path}"
|
||||
)
|
||||
return default_model_path
|
||||
# Can't find requested model, check the default paths.
|
||||
if model_type == "stable-diffusion" and not fail_if_not_found:
|
||||
for default_model in default_models:
|
||||
default_model_path = os.path.join(model_dir, default_model["file_name"])
|
||||
if os.path.exists(default_model_path):
|
||||
if model_name is not None:
|
||||
log.warn(
|
||||
f"Could not find the configured custom model {model_name}. Using the default one: {default_model_path}"
|
||||
)
|
||||
return default_model_path
|
||||
|
||||
if model_name and fail_if_not_found:
|
||||
raise Exception(f"Could not find the desired model {model_name}! Is it present in the {model_dir} folder?")
|
||||
raise FileNotFoundError(
|
||||
f"Could not find the desired model {model_name}! Is it present in the {model_dir} folder?"
|
||||
)
|
||||
|
||||
|
||||
def reload_models_if_necessary(context: Context, models_data: ModelsData, models_to_force_reload: list = []):
|
||||
from easydiffusion.backend_manager import backend
|
||||
|
||||
models_to_reload = {
|
||||
model_type: path
|
||||
for model_type, path in models_data.model_paths.items()
|
||||
@ -166,7 +190,7 @@ def reload_models_if_necessary(context: Context, models_data: ModelsData, models
|
||||
for model_type, model_path_in_req in models_to_reload.items():
|
||||
context.model_paths[model_type] = model_path_in_req
|
||||
|
||||
action_fn = unload_model if context.model_paths[model_type] is None else load_model
|
||||
action_fn = backend.unload_model if context.model_paths[model_type] is None else backend.load_model
|
||||
extra_params = models_data.model_params.get(model_type, {})
|
||||
try:
|
||||
action_fn(context, model_type, scan_model=False, **extra_params) # we've scanned them already
|
||||
@ -174,19 +198,28 @@ def reload_models_if_necessary(context: Context, models_data: ModelsData, models
|
||||
del context.model_load_errors[model_type]
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
if action_fn == load_model:
|
||||
if action_fn == backend.load_model:
|
||||
context.model_load_errors[model_type] = str(e) # storing the entire Exception can lead to memory leaks
|
||||
|
||||
|
||||
def resolve_model_paths(models_data: ModelsData):
|
||||
model_paths = models_data.model_paths
|
||||
skip_models = cn_filters + [
|
||||
"latent_upscaler",
|
||||
"nsfw_checker",
|
||||
"esrgan_4x",
|
||||
"lanczos",
|
||||
"nearest",
|
||||
"scunet",
|
||||
"swinir",
|
||||
]
|
||||
|
||||
for model_type in model_paths:
|
||||
skip_models = cn_filters + ["latent_upscaler", "nsfw_checker"]
|
||||
if model_type in skip_models: # doesn't use model paths
|
||||
continue
|
||||
if model_type == "codeformer":
|
||||
if model_type == "codeformer" and model_paths[model_type]:
|
||||
download_if_necessary("codeformer", "codeformer.pth", "codeformer-0.1.0")
|
||||
elif model_type == "controlnet":
|
||||
elif model_type == "controlnet" and model_paths[model_type]:
|
||||
model_id = model_paths[model_type]
|
||||
model_info = get_model_info_from_db(model_type=model_type, model_id=model_id)
|
||||
if model_info:
|
||||
@ -216,7 +249,8 @@ def download_default_models_if_necessary():
|
||||
|
||||
|
||||
def download_if_necessary(model_type: str, file_name: str, model_id: str, skip_if_others_exist=True):
|
||||
model_path = os.path.join(app.MODELS_DIR, model_type, file_name)
|
||||
model_dir = get_model_dirs(model_type)[0]
|
||||
model_path = os.path.join(model_dir, file_name)
|
||||
expected_hash = get_model_info_from_db(model_type=model_type, model_id=model_id)["quick_hash"]
|
||||
|
||||
other_models_exist = any_model_exists(model_type) and skip_if_others_exist
|
||||
@ -236,23 +270,42 @@ def migrate_legacy_model_location():
|
||||
file_name = model["file_name"]
|
||||
legacy_path = os.path.join(app.SD_DIR, file_name)
|
||||
if os.path.exists(legacy_path):
|
||||
shutil.move(legacy_path, os.path.join(app.MODELS_DIR, model_type, file_name))
|
||||
model_dir = get_model_dirs(model_type)[0]
|
||||
shutil.move(legacy_path, os.path.join(model_dir, file_name))
|
||||
|
||||
|
||||
def any_model_exists(model_type: str) -> bool:
|
||||
extensions = MODEL_EXTENSIONS.get(model_type, [])
|
||||
for ext in extensions:
|
||||
if any(glob(f"{app.MODELS_DIR}/{model_type}/**/*{ext}", recursive=True)):
|
||||
return True
|
||||
for model_dir in get_model_dirs(model_type):
|
||||
for ext in extensions:
|
||||
if any(glob(f"{model_dir}/**/*{ext}", recursive=True)):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def make_model_folders():
|
||||
for model_type in KNOWN_MODEL_TYPES:
|
||||
model_dir_path = os.path.join(app.MODELS_DIR, model_type)
|
||||
model_dir_path = get_model_dirs(model_type)[0]
|
||||
|
||||
os.makedirs(model_dir_path, exist_ok=True)
|
||||
try:
|
||||
os.makedirs(model_dir_path, exist_ok=True)
|
||||
except Exception as e:
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
|
||||
Console().print(
|
||||
Panel(
|
||||
"\n"
|
||||
+ f"Error while creating the models directory: '{model_dir_path}'\n"
|
||||
+ f"Error: {e}\n\n"
|
||||
+ f"[white]Check the 'models_dir:' line in the file '{os.path.join(app.ROOT_DIR, 'config.yaml')}'.[/white]\n",
|
||||
title="Fatal Error starting Easy Diffusion",
|
||||
style="bold yellow on red",
|
||||
)
|
||||
)
|
||||
input("Press Enter to terminate...")
|
||||
exit(1)
|
||||
|
||||
help_file_name = f"Place your {model_type} model files here.txt"
|
||||
help_file_contents = f'Supported extensions: {" or ".join(MODEL_EXTENSIONS.get(model_type))}'
|
||||
@ -294,27 +347,32 @@ def is_malicious_model(file_path):
|
||||
|
||||
|
||||
def getModels(scan_for_malicious: bool = True):
|
||||
from easydiffusion.backend_manager import backend
|
||||
|
||||
backend.refresh_models()
|
||||
|
||||
models = {
|
||||
"options": {
|
||||
"stable-diffusion": [{"sd-v1-4": "SD 1.4"}],
|
||||
"stable-diffusion": [],
|
||||
"vae": [],
|
||||
"hypernetwork": [],
|
||||
"lora": [],
|
||||
"codeformer": [{"codeformer": "CodeFormer"}],
|
||||
"embeddings": [],
|
||||
"controlnet": [
|
||||
{"control_v11p_sd15_canny": "Canny (*)"},
|
||||
{"control_v11p_sd15_openpose": "OpenPose (*)"},
|
||||
{"control_v11p_sd15_normalbae": "Normal BAE (*)"},
|
||||
{"control_v11f1p_sd15_depth": "Depth (*)"},
|
||||
{"control_v11p_sd15_scribble": "Scribble"},
|
||||
{"control_v11p_sd15_softedge": "Soft Edge"},
|
||||
{"control_v11p_sd15_inpaint": "Inpaint"},
|
||||
{"control_v11p_sd15_lineart": "Line Art"},
|
||||
{"control_v11p_sd15s2_lineart_anime": "Line Art Anime"},
|
||||
{"control_v11p_sd15_mlsd": "Straight Lines"},
|
||||
{"control_v11p_sd15_seg": "Segment"},
|
||||
{"control_v11e_sd15_shuffle": "Shuffle"},
|
||||
# {"control_v11p_sd15_canny": "Canny (*)"},
|
||||
# {"control_v11p_sd15_openpose": "OpenPose (*)"},
|
||||
# {"control_v11p_sd15_normalbae": "Normal BAE (*)"},
|
||||
# {"control_v11f1p_sd15_depth": "Depth (*)"},
|
||||
# {"control_v11p_sd15_scribble": "Scribble"},
|
||||
# {"control_v11p_sd15_softedge": "Soft Edge"},
|
||||
# {"control_v11p_sd15_inpaint": "Inpaint"},
|
||||
# {"control_v11p_sd15_lineart": "Line Art"},
|
||||
# {"control_v11p_sd15s2_lineart_anime": "Line Art Anime"},
|
||||
# {"control_v11p_sd15_mlsd": "Straight Lines"},
|
||||
# {"control_v11p_sd15_seg": "Segment"},
|
||||
# {"control_v11e_sd15_shuffle": "Shuffle"},
|
||||
# {"control_v11f1e_sd15_tile": "Tile"},
|
||||
],
|
||||
},
|
||||
}
|
||||
@ -324,9 +382,14 @@ def getModels(scan_for_malicious: bool = True):
|
||||
class MaliciousModelException(Exception):
|
||||
"Raised when picklescan reports a problem with a model"
|
||||
|
||||
def scan_directory(directory, suffixes, directoriesFirst: bool = True, default_entries=[]):
|
||||
tree = list(default_entries)
|
||||
def scan_directory(directory, suffixes, directoriesFirst: bool = True, default_entries=[], nameFilter=None):
|
||||
nonlocal models_scanned
|
||||
|
||||
tree = list(default_entries)
|
||||
|
||||
if not os.path.exists(directory):
|
||||
return tree
|
||||
|
||||
for entry in sorted(
|
||||
os.scandir(directory),
|
||||
key=lambda entry: (entry.is_file() == directoriesFirst, entry.name.lower()),
|
||||
@ -345,7 +408,13 @@ def getModels(scan_for_malicious: bool = True):
|
||||
raise MaliciousModelException(entry.path)
|
||||
if scan_for_malicious:
|
||||
known_models[entry.path] = mtime
|
||||
|
||||
model_id = entry.name[: -len(matching_suffix)]
|
||||
if callable(nameFilter):
|
||||
model_id = nameFilter(model_id)
|
||||
if model_id is None:
|
||||
continue
|
||||
|
||||
model_exists = False
|
||||
for m in tree: # allows default "named" models, like CodeFormer and known ControlNet models
|
||||
if (isinstance(m, str) and model_id == m) or (isinstance(m, dict) and model_id in m):
|
||||
@ -353,26 +422,30 @@ def getModels(scan_for_malicious: bool = True):
|
||||
break
|
||||
if not model_exists:
|
||||
tree.append(model_id)
|
||||
|
||||
elif entry.is_dir():
|
||||
scan = scan_directory(entry.path, suffixes, directoriesFirst=False)
|
||||
scan = scan_directory(entry.path, suffixes, directoriesFirst=False, nameFilter=nameFilter)
|
||||
|
||||
if len(scan) != 0:
|
||||
tree.append((entry.name, scan))
|
||||
return tree
|
||||
|
||||
def listModels(model_type):
|
||||
def listModels(model_type, nameFilter=None):
|
||||
nonlocal models_scanned
|
||||
|
||||
model_extensions = MODEL_EXTENSIONS.get(model_type, [])
|
||||
models_dir = os.path.join(app.MODELS_DIR, model_type)
|
||||
if not os.path.exists(models_dir):
|
||||
os.makedirs(models_dir)
|
||||
models_dirs = get_model_dirs(model_type)
|
||||
if not os.path.exists(models_dirs[0]):
|
||||
os.makedirs(models_dirs[0])
|
||||
|
||||
try:
|
||||
default_tree = models["options"].get(model_type, [])
|
||||
models["options"][model_type] = scan_directory(models_dir, model_extensions, default_entries=default_tree)
|
||||
except MaliciousModelException as e:
|
||||
models["scan-error"] = str(e)
|
||||
for model_dir in models_dirs:
|
||||
try:
|
||||
default_tree = models["options"].get(model_type, [])
|
||||
models["options"][model_type] = scan_directory(
|
||||
model_dir, model_extensions, default_entries=default_tree, nameFilter=nameFilter
|
||||
)
|
||||
except MaliciousModelException as e:
|
||||
models["scan-error"] = str(e)
|
||||
|
||||
if scan_for_malicious:
|
||||
log.info(f"[green]Scanning all model folders for models...[/]")
|
||||
@ -380,12 +453,29 @@ def getModels(scan_for_malicious: bool = True):
|
||||
listModels(model_type="stable-diffusion")
|
||||
listModels(model_type="vae")
|
||||
listModels(model_type="hypernetwork")
|
||||
listModels(model_type="gfpgan")
|
||||
listModels(model_type="gfpgan", nameFilter=lambda x: (x if "gfpgan" in x.lower() else None))
|
||||
listModels(model_type="lora")
|
||||
listModels(model_type="embeddings")
|
||||
listModels(model_type="embeddings", nameFilter=get_embedding_token)
|
||||
listModels(model_type="controlnet")
|
||||
|
||||
if scan_for_malicious and models_scanned > 0:
|
||||
log.info(f"[green]Scanned {models_scanned} models. Nothing infected[/]")
|
||||
|
||||
return models
|
||||
|
||||
|
||||
def get_model_dirs(model_type: str, base_dir=None):
|
||||
"Returns the possible model directory paths for the given model type. Mainly used for WebUI compatibility"
|
||||
|
||||
if base_dir is None:
|
||||
base_dir = app.MODELS_DIR
|
||||
|
||||
dirs = [os.path.join(base_dir, model_type)]
|
||||
|
||||
if model_type in ALTERNATE_FOLDER_NAMES:
|
||||
alt_dir = ALTERNATE_FOLDER_NAMES[model_type]
|
||||
alt_dir = os.path.join(base_dir, alt_dir)
|
||||
if os.path.exists(alt_dir) and os.path.isdir(alt_dir):
|
||||
dirs.append(alt_dir)
|
||||
|
||||
return dirs
|
||||
|
@ -3,8 +3,6 @@ import os
|
||||
import platform
|
||||
from importlib.metadata import version as pkg_version
|
||||
|
||||
from sdkit.utils import log
|
||||
|
||||
from easydiffusion import app
|
||||
|
||||
# future home of scripts/check_modules.py
|
||||
@ -12,9 +10,9 @@ from easydiffusion import app
|
||||
manifest = {
|
||||
"tensorrt": {
|
||||
"install": [
|
||||
"nvidia-cudnn --extra-index-url=https://pypi.ngc.nvidia.com --trusted-host pypi.ngc.nvidia.com",
|
||||
"tensorrt-libs --extra-index-url=https://pypi.ngc.nvidia.com --trusted-host pypi.ngc.nvidia.com",
|
||||
"tensorrt --extra-index-url=https://pypi.ngc.nvidia.com --trusted-host pypi.ngc.nvidia.com",
|
||||
"wheel",
|
||||
"nvidia-cudnn-cu11==8.9.4.25",
|
||||
"tensorrt==9.0.0.post11.dev1 --pre --extra-index-url=https://pypi.nvidia.com --trusted-host pypi.nvidia.com",
|
||||
],
|
||||
"uninstall": ["tensorrt"],
|
||||
# TODO also uninstall tensorrt-libs and nvidia-cudnn, but do it upon restarting (avoid 'file in use' error)
|
||||
@ -25,7 +23,7 @@ installing = []
|
||||
# remove this once TRT releases on pypi
|
||||
if platform.system() == "Windows":
|
||||
trt_dir = os.path.join(app.ROOT_DIR, "tensorrt")
|
||||
if os.path.exists(trt_dir):
|
||||
if os.path.exists(trt_dir) and os.path.isdir(trt_dir) and len(os.listdir(trt_dir)) > 0:
|
||||
files = os.listdir(trt_dir)
|
||||
|
||||
packages = manifest["tensorrt"]["install"]
|
||||
@ -50,6 +48,8 @@ def is_installed(module_name) -> bool:
|
||||
|
||||
|
||||
def install(module_name):
|
||||
from easydiffusion.utils import log
|
||||
|
||||
if is_installed(module_name):
|
||||
log.info(f"{module_name} has already been installed!")
|
||||
return
|
||||
@ -61,6 +61,10 @@ def install(module_name):
|
||||
raise RuntimeError(f"Can't install unknown package: {module_name}!")
|
||||
|
||||
commands = manifest[module_name]["install"]
|
||||
if module_name == "tensorrt":
|
||||
commands += [
|
||||
"protobuf==3.20.3 polygraphy==0.47.1 onnx==1.14.0 --extra-index-url=https://pypi.ngc.nvidia.com --trusted-host pypi.ngc.nvidia.com"
|
||||
]
|
||||
commands = [f"python -m pip install --upgrade {cmd}" for cmd in commands]
|
||||
|
||||
installing.append(module_name)
|
||||
@ -75,6 +79,8 @@ def install(module_name):
|
||||
|
||||
|
||||
def uninstall(module_name):
|
||||
from easydiffusion.utils import log
|
||||
|
||||
if not is_installed(module_name):
|
||||
log.info(f"{module_name} hasn't been installed!")
|
||||
return
|
||||
|
@ -1,4 +1,5 @@
|
||||
"""
|
||||
(OUTDATED DOC)
|
||||
A runtime that runs on a specific device (in a thread).
|
||||
|
||||
It can run various tasks like image generation, image filtering, model merge etc by using that thread-local context.
|
||||
@ -6,44 +7,35 @@ It can run various tasks like image generation, image filtering, model merge etc
|
||||
This creates an `sdkit.Context` that's bound to the device specified while calling the `init()` function.
|
||||
"""
|
||||
|
||||
from easydiffusion import device_manager
|
||||
from easydiffusion.utils import log
|
||||
from sdkit import Context
|
||||
from sdkit.utils import get_device_usage
|
||||
|
||||
context = Context() # thread-local
|
||||
"""
|
||||
runtime data (bound locally to this thread), for e.g. device, references to loaded models, optimization flags etc
|
||||
"""
|
||||
context = None
|
||||
|
||||
|
||||
def init(device):
|
||||
"""
|
||||
Initializes the fields that will be bound to this runtime's context, and sets the current torch device
|
||||
"""
|
||||
|
||||
global context
|
||||
|
||||
from easydiffusion import device_manager
|
||||
from easydiffusion.backend_manager import backend
|
||||
from easydiffusion.app import getConfig
|
||||
|
||||
context = backend.create_context()
|
||||
|
||||
context.stop_processing = False
|
||||
context.temp_images = {}
|
||||
context.partial_x_samples = None
|
||||
context.model_load_errors = {}
|
||||
context.enable_codeformer = True
|
||||
|
||||
from easydiffusion import app
|
||||
|
||||
app_config = app.getConfig()
|
||||
context.test_diffusers = (
|
||||
app_config.get("test_diffusers", False) and app_config.get("update_branch", "main") != "main"
|
||||
)
|
||||
|
||||
log.info("Device usage during initialization:")
|
||||
get_device_usage(device, log_info=True, process_usage_only=False)
|
||||
|
||||
device_manager.device_init(context, device)
|
||||
|
||||
|
||||
def set_vram_optimizations(context: Context):
|
||||
from easydiffusion import app
|
||||
def set_vram_optimizations(context):
|
||||
from easydiffusion.app import getConfig
|
||||
|
||||
config = app.getConfig()
|
||||
config = getConfig()
|
||||
vram_usage_level = config.get("vram_usage_level", "balanced")
|
||||
|
||||
if vram_usage_level != context.vram_usage_level:
|
||||
|
@ -2,6 +2,7 @@
|
||||
Notes:
|
||||
async endpoints always run on the main thread. Without they run on the thread pool.
|
||||
"""
|
||||
|
||||
import datetime
|
||||
import mimetypes
|
||||
import os
|
||||
@ -15,9 +16,12 @@ from easydiffusion.types import (
|
||||
FilterImageRequest,
|
||||
MergeRequest,
|
||||
TaskData,
|
||||
RenderTaskData,
|
||||
ModelsData,
|
||||
OutputFormatData,
|
||||
SaveToDiskData,
|
||||
convert_legacy_render_req_to_new,
|
||||
convert_legacy_controlnet_filter_name,
|
||||
)
|
||||
from easydiffusion.utils import log
|
||||
from fastapi import FastAPI, HTTPException
|
||||
@ -36,6 +40,7 @@ NOCACHE_HEADERS = {
|
||||
"Pragma": "no-cache",
|
||||
"Expires": "0",
|
||||
}
|
||||
PROTECTED_CONFIG_KEYS = ("block_nsfw",) # can't change these via the HTTP API
|
||||
|
||||
|
||||
class NoCacheStaticFiles(StaticFiles):
|
||||
@ -63,7 +68,10 @@ class SetAppConfigRequest(BaseModel, extra=Extra.allow):
|
||||
ui_open_browser_on_start: bool = None
|
||||
listen_to_network: bool = None
|
||||
listen_port: int = None
|
||||
test_diffusers: bool = False
|
||||
use_v3_engine: bool = True
|
||||
backend: str = "ed_diffusers"
|
||||
models_dir: str = None
|
||||
vram_usage_level: str = "balanced"
|
||||
|
||||
|
||||
def init():
|
||||
@ -139,6 +147,10 @@ def init():
|
||||
def modify_package(package_name: str, req: dict):
|
||||
return modify_package_internal(package_name, req)
|
||||
|
||||
@server_api.get("/sha256/{obj_path:path}")
|
||||
def get_sha256(obj_path: str):
|
||||
return get_sha256_internal(obj_path)
|
||||
|
||||
@server_api.get("/")
|
||||
def read_root():
|
||||
return FileResponse(os.path.join(app.SD_UI_DIR, "index.html"), headers=NOCACHE_HEADERS)
|
||||
@ -147,6 +159,12 @@ def init():
|
||||
def shutdown_event(): # Signal render thread to close on shutdown
|
||||
task_manager.current_state_error = SystemExit("Application shutting down.")
|
||||
|
||||
@server_api.on_event("startup")
|
||||
def start_event():
|
||||
from easydiffusion.app import open_browser
|
||||
|
||||
open_browser()
|
||||
|
||||
|
||||
# API implementations
|
||||
def set_app_config_internal(req: SetAppConfigRequest):
|
||||
@ -168,10 +186,13 @@ def set_app_config_internal(req: SetAppConfigRequest):
|
||||
config["net"] = {}
|
||||
config["net"]["listen_port"] = int(req.listen_port)
|
||||
|
||||
config["test_diffusers"] = req.test_diffusers
|
||||
config["use_v3_engine"] = req.backend == "ed_diffusers"
|
||||
config["backend"] = req.backend
|
||||
config["models_dir"] = req.models_dir
|
||||
config["vram_usage_level"] = req.vram_usage_level
|
||||
|
||||
for property, property_value in req.dict().items():
|
||||
if property_value is not None and property not in req.__fields__:
|
||||
if property_value is not None and property not in req.__fields__ and property not in PROTECTED_CONFIG_KEYS:
|
||||
config[property] = property_value
|
||||
|
||||
try:
|
||||
@ -200,8 +221,15 @@ def read_web_data_internal(key: str = None, **kwargs):
|
||||
if not key: # /get without parameters, stable-diffusion easter egg.
|
||||
raise HTTPException(status_code=418, detail="StableDiffusion is drawing a teapot!") # HTTP418 I'm a teapot
|
||||
elif key == "app_config":
|
||||
return JSONResponse(app.getConfig(), headers=NOCACHE_HEADERS)
|
||||
config = app.getConfig()
|
||||
|
||||
if "models_dir" not in config:
|
||||
config["models_dir"] = app.MODELS_DIR
|
||||
|
||||
return JSONResponse(config, headers=NOCACHE_HEADERS)
|
||||
elif key == "system_info":
|
||||
from easydiffusion.backend_manager import backend
|
||||
|
||||
config = app.getConfig()
|
||||
|
||||
output_dir = config.get("force_save_path", os.path.join(os.path.expanduser("~"), app.OUTPUT_DIRNAME))
|
||||
@ -211,6 +239,8 @@ def read_web_data_internal(key: str = None, **kwargs):
|
||||
"hosts": app.getIPConfig(),
|
||||
"default_output_dir": output_dir,
|
||||
"enforce_output_dir": ("force_save_path" in config),
|
||||
"enforce_output_metadata": ("force_save_metadata" in config),
|
||||
"backend_url": backend.get_url(),
|
||||
}
|
||||
system_info["devices"]["config"] = config.get("render_devices", "auto")
|
||||
return JSONResponse(system_info, headers=NOCACHE_HEADERS)
|
||||
@ -257,14 +287,15 @@ def render_internal(req: dict):
|
||||
|
||||
# separate out the request data into rendering and task-specific data
|
||||
render_req: GenerateImageRequest = GenerateImageRequest.parse_obj(req)
|
||||
task_data: TaskData = TaskData.parse_obj(req)
|
||||
task_data: RenderTaskData = RenderTaskData.parse_obj(req)
|
||||
models_data: ModelsData = ModelsData.parse_obj(req)
|
||||
output_format: OutputFormatData = OutputFormatData.parse_obj(req)
|
||||
save_data: SaveToDiskData = SaveToDiskData.parse_obj(req)
|
||||
|
||||
# Overwrite user specified save path
|
||||
config = app.getConfig()
|
||||
if "force_save_path" in config:
|
||||
task_data.save_to_disk_path = config["force_save_path"]
|
||||
save_data.save_to_disk_path = config["force_save_path"]
|
||||
|
||||
render_req.init_image_mask = req.get("mask") # hack: will rename this in the HTTP API in a future revision
|
||||
|
||||
@ -276,7 +307,7 @@ def render_internal(req: dict):
|
||||
)
|
||||
|
||||
# enqueue the task
|
||||
task = RenderTask(render_req, task_data, models_data, output_format)
|
||||
task = RenderTask(render_req, task_data, models_data, output_format, save_data)
|
||||
return enqueue_task(task)
|
||||
except HTTPException as e:
|
||||
raise e
|
||||
@ -287,13 +318,23 @@ def render_internal(req: dict):
|
||||
|
||||
def filter_internal(req: dict):
|
||||
try:
|
||||
session_id = req.get("session_id", "session")
|
||||
filter_req: FilterImageRequest = FilterImageRequest.parse_obj(req)
|
||||
task_data: TaskData = TaskData.parse_obj(req)
|
||||
models_data: ModelsData = ModelsData.parse_obj(req)
|
||||
output_format: OutputFormatData = OutputFormatData.parse_obj(req)
|
||||
save_data: SaveToDiskData = SaveToDiskData.parse_obj(req)
|
||||
|
||||
filter_req.filter = convert_legacy_controlnet_filter_name(filter_req.filter)
|
||||
|
||||
for model_name in ("realesrgan", "esrgan_4x", "lanczos", "nearest", "scunet", "swinir"):
|
||||
if models_data.model_paths.get(model_name):
|
||||
if model_name not in filter_req.filter_params:
|
||||
filter_req.filter_params[model_name] = {}
|
||||
|
||||
filter_req.filter_params[model_name]["upscaler"] = models_data.model_paths[model_name]
|
||||
|
||||
# enqueue the task
|
||||
task = FilterTask(filter_req, session_id, models_data, output_format)
|
||||
task = FilterTask(filter_req, task_data, models_data, output_format, save_data)
|
||||
return enqueue_task(task)
|
||||
except HTTPException as e:
|
||||
raise e
|
||||
@ -325,15 +366,13 @@ def model_merge_internal(req: dict):
|
||||
|
||||
mergeReq: MergeRequest = MergeRequest.parse_obj(req)
|
||||
|
||||
sd_model_dir = model_manager.get_model_dir("stable-diffusion")[0]
|
||||
|
||||
merge_models(
|
||||
model_manager.resolve_model_to_use(mergeReq.model0, "stable-diffusion"),
|
||||
model_manager.resolve_model_to_use(mergeReq.model1, "stable-diffusion"),
|
||||
mergeReq.ratio,
|
||||
os.path.join(
|
||||
app.MODELS_DIR,
|
||||
"stable-diffusion",
|
||||
filename_regex.sub("_", mergeReq.out_path),
|
||||
),
|
||||
os.path.join(sd_model_dir, filename_regex.sub("_", mergeReq.out_path)),
|
||||
mergeReq.use_fp16,
|
||||
)
|
||||
return JSONResponse({"status": "OK"}, headers=NOCACHE_HEADERS)
|
||||
@ -451,3 +490,25 @@ def modify_package_internal(package_name: str, req: dict):
|
||||
log.error(str(e))
|
||||
log.error(traceback.format_exc())
|
||||
return HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
def get_sha256_internal(obj_path):
|
||||
from easydiffusion.utils import sha256sum
|
||||
|
||||
path = obj_path.split("/")
|
||||
type = path.pop(0)
|
||||
|
||||
try:
|
||||
model_path = model_manager.resolve_model_to_use("/".join(path), type)
|
||||
except Exception as e:
|
||||
log.error(str(e))
|
||||
log.error(traceback.format_exc())
|
||||
|
||||
return HTTPException(status_code=404)
|
||||
try:
|
||||
digest = sha256sum(model_path)
|
||||
return {"digest": digest}
|
||||
except Exception as e:
|
||||
log.error(str(e))
|
||||
log.error(traceback.format_exc())
|
||||
return HTTPException(status_code=500, detail=str(e))
|
||||
|
@ -4,6 +4,7 @@ Notes:
|
||||
Use weak_thread_data to store all other data using weak keys.
|
||||
This will allow for garbage collection after the thread dies.
|
||||
"""
|
||||
|
||||
import json
|
||||
import traceback
|
||||
|
||||
@ -19,7 +20,6 @@ import torch
|
||||
from easydiffusion import device_manager
|
||||
from easydiffusion.tasks import Task
|
||||
from easydiffusion.utils import log
|
||||
from sdkit.utils import gc
|
||||
|
||||
THREAD_NAME_PREFIX = ""
|
||||
ERR_LOCK_FAILED = " failed to acquire lock within timeout."
|
||||
@ -233,6 +233,8 @@ def thread_render(device):
|
||||
global current_state, current_state_error
|
||||
|
||||
from easydiffusion import model_manager, runtime
|
||||
from easydiffusion.backend_manager import backend
|
||||
from requests import ConnectionError
|
||||
|
||||
try:
|
||||
runtime.init(device)
|
||||
@ -244,8 +246,17 @@ def thread_render(device):
|
||||
}
|
||||
|
||||
current_state = ServerStates.LoadingModel
|
||||
model_manager.load_default_models(runtime.context)
|
||||
|
||||
while True:
|
||||
try:
|
||||
if backend.ping(timeout=1):
|
||||
break
|
||||
|
||||
time.sleep(1)
|
||||
except (TimeoutError, ConnectionError):
|
||||
time.sleep(1)
|
||||
|
||||
model_manager.load_default_models(runtime.context)
|
||||
current_state = ServerStates.Online
|
||||
except Exception as e:
|
||||
log.error(traceback.format_exc())
|
||||
@ -291,7 +302,6 @@ def thread_render(device):
|
||||
task.buffer_queue.put(json.dumps(task.response))
|
||||
log.error(traceback.format_exc())
|
||||
finally:
|
||||
gc(runtime.context)
|
||||
task.lock.release()
|
||||
|
||||
keep_task_alive(task)
|
||||
|
@ -1,12 +1,24 @@
|
||||
import os
|
||||
import json
|
||||
import pprint
|
||||
import time
|
||||
|
||||
from sdkit.filter import apply_filters
|
||||
from sdkit.models import load_model
|
||||
from sdkit.utils import img_to_base64_str, log
|
||||
from numpy import base_repr
|
||||
|
||||
from sdkit.utils import img_to_base64_str, log, save_images, base64_str_to_img
|
||||
|
||||
from easydiffusion import model_manager, runtime
|
||||
from easydiffusion.types import FilterImageRequest, FilterImageResponse, ModelsData, OutputFormatData
|
||||
from easydiffusion.types import (
|
||||
FilterImageRequest,
|
||||
FilterImageResponse,
|
||||
ModelsData,
|
||||
OutputFormatData,
|
||||
SaveToDiskData,
|
||||
TaskData,
|
||||
GenerateImageRequest,
|
||||
)
|
||||
from easydiffusion.utils import filter_nsfw
|
||||
from easydiffusion.utils.save_utils import format_folder_name
|
||||
|
||||
from .task import Task
|
||||
|
||||
@ -15,17 +27,28 @@ class FilterTask(Task):
|
||||
"For applying filters to input images"
|
||||
|
||||
def __init__(
|
||||
self, req: FilterImageRequest, session_id: str, models_data: ModelsData, output_format: OutputFormatData
|
||||
self,
|
||||
req: FilterImageRequest,
|
||||
task_data: TaskData,
|
||||
models_data: ModelsData,
|
||||
output_format: OutputFormatData,
|
||||
save_data: SaveToDiskData,
|
||||
):
|
||||
super().__init__(session_id)
|
||||
super().__init__(task_data.session_id)
|
||||
|
||||
task_data.request_id = self.id
|
||||
|
||||
self.request = req
|
||||
self.task_data = task_data
|
||||
self.models_data = models_data
|
||||
self.output_format = output_format
|
||||
self.save_data = save_data
|
||||
|
||||
# convert to multi-filter format, if necessary
|
||||
if isinstance(req.filter, str):
|
||||
req.filter_params = {req.filter: req.filter_params}
|
||||
if req.filter not in req.filter_params:
|
||||
req.filter_params = {req.filter: req.filter_params}
|
||||
|
||||
req.filter = [req.filter]
|
||||
|
||||
if not isinstance(req.image, list):
|
||||
@ -34,77 +57,73 @@ class FilterTask(Task):
|
||||
def run(self):
|
||||
"Runs the image filtering task on the assigned thread"
|
||||
|
||||
from easydiffusion import app
|
||||
from easydiffusion.backend_manager import backend
|
||||
|
||||
context = runtime.context
|
||||
|
||||
model_manager.resolve_model_paths(self.models_data)
|
||||
model_manager.reload_models_if_necessary(context, self.models_data)
|
||||
model_manager.fail_if_models_did_not_load(context)
|
||||
|
||||
print_task_info(self.request, self.models_data, self.output_format)
|
||||
print_task_info(self.request, self.models_data, self.output_format, self.save_data)
|
||||
|
||||
images = filter_images(context, self.request.image, self.request.filter, self.request.filter_params)
|
||||
has_nsfw_filter = "nsfw_filter" in self.request.filter
|
||||
|
||||
output_format = self.output_format
|
||||
images = [
|
||||
img_to_base64_str(
|
||||
img, output_format.output_format, output_format.output_quality, output_format.output_lossless
|
||||
|
||||
backend.set_options(
|
||||
context,
|
||||
output_format=output_format.output_format,
|
||||
output_quality=output_format.output_quality,
|
||||
output_lossless=output_format.output_lossless,
|
||||
)
|
||||
|
||||
images = backend.filter_images(
|
||||
context, self.request.image, self.request.filter, self.request.filter_params, input_type="base64"
|
||||
)
|
||||
|
||||
if has_nsfw_filter:
|
||||
images = filter_nsfw(images)
|
||||
|
||||
if self.save_data.save_to_disk_path is not None:
|
||||
app_config = app.getConfig()
|
||||
folder_format = app_config.get("folder_format", "$id")
|
||||
|
||||
dummy_req = GenerateImageRequest()
|
||||
img_id = base_repr(int(time.time() * 10000), 36)[-7:] # Base 36 conversion, 0-9, A-Z
|
||||
|
||||
save_dir_path = os.path.join(
|
||||
self.save_data.save_to_disk_path, format_folder_name(folder_format, dummy_req, self.task_data)
|
||||
)
|
||||
images_pil = [base64_str_to_img(img) for img in images]
|
||||
save_images(
|
||||
images_pil,
|
||||
save_dir_path,
|
||||
file_name=img_id,
|
||||
output_format=output_format.output_format,
|
||||
output_quality=output_format.output_quality,
|
||||
output_lossless=output_format.output_lossless,
|
||||
)
|
||||
for img in images
|
||||
]
|
||||
|
||||
res = FilterImageResponse(self.request, self.models_data, images=images)
|
||||
res = res.json()
|
||||
self.buffer_queue.put(json.dumps(res))
|
||||
|
||||
log.info("Filter task completed")
|
||||
|
||||
self.response = res
|
||||
|
||||
|
||||
def filter_images(context, images, filters, filter_params={}):
|
||||
filters = filters if isinstance(filters, list) else [filters]
|
||||
|
||||
for filter_name in filters:
|
||||
params = filter_params.get(filter_name, {})
|
||||
|
||||
previous_state = before_filter(context, filter_name, params)
|
||||
|
||||
try:
|
||||
images = apply_filters(context, filter_name, images, **params)
|
||||
finally:
|
||||
after_filter(context, filter_name, params, previous_state)
|
||||
|
||||
return images
|
||||
|
||||
|
||||
def before_filter(context, filter_name, filter_params):
|
||||
if filter_name == "codeformer":
|
||||
from easydiffusion.model_manager import DEFAULT_MODELS, resolve_model_to_use
|
||||
|
||||
default_realesrgan = DEFAULT_MODELS["realesrgan"][0]["file_name"]
|
||||
prev_realesrgan_path = None
|
||||
|
||||
upscale_faces = filter_params.get("upscale_faces", False)
|
||||
if upscale_faces and default_realesrgan not in context.model_paths["realesrgan"]:
|
||||
prev_realesrgan_path = context.model_paths.get("realesrgan")
|
||||
context.model_paths["realesrgan"] = resolve_model_to_use(default_realesrgan, "realesrgan")
|
||||
load_model(context, "realesrgan")
|
||||
|
||||
return prev_realesrgan_path
|
||||
|
||||
|
||||
def after_filter(context, filter_name, filter_params, previous_state):
|
||||
if filter_name == "codeformer":
|
||||
prev_realesrgan_path = previous_state
|
||||
if prev_realesrgan_path:
|
||||
context.model_paths["realesrgan"] = prev_realesrgan_path
|
||||
load_model(context, "realesrgan")
|
||||
|
||||
|
||||
def print_task_info(req: FilterImageRequest, models_data: ModelsData, output_format: OutputFormatData):
|
||||
def print_task_info(
|
||||
req: FilterImageRequest, models_data: ModelsData, output_format: OutputFormatData, save_data: SaveToDiskData
|
||||
):
|
||||
req_str = pprint.pformat({"filter": req.filter, "filter_params": req.filter_params}).replace("[", "\[")
|
||||
models_data = pprint.pformat(models_data.dict()).replace("[", "\[")
|
||||
output_format = pprint.pformat(output_format.dict()).replace("[", "\[")
|
||||
save_data = pprint.pformat(save_data.dict()).replace("[", "\[")
|
||||
|
||||
log.info(f"request: {req_str}")
|
||||
log.info(f"models data: {models_data}")
|
||||
log.info(f"output format: {output_format}")
|
||||
log.info(f"save data: {save_data}")
|
||||
|
@ -2,47 +2,59 @@ import json
|
||||
import pprint
|
||||
import queue
|
||||
import time
|
||||
from PIL import Image
|
||||
|
||||
from easydiffusion import model_manager, runtime
|
||||
from easydiffusion.types import GenerateImageRequest, ModelsData, OutputFormatData
|
||||
from easydiffusion.types import GenerateImageRequest, ModelsData, OutputFormatData, SaveToDiskData
|
||||
from easydiffusion.types import Image as ResponseImage
|
||||
from easydiffusion.types import GenerateImageResponse, TaskData, UserInitiatedStop
|
||||
from easydiffusion.utils import get_printable_request, log, save_images_to_disk
|
||||
from sdkit.generate import generate_images
|
||||
from easydiffusion.types import GenerateImageResponse, RenderTaskData
|
||||
from easydiffusion.utils import get_printable_request, log, save_images_to_disk, filter_nsfw
|
||||
from sdkit.utils import (
|
||||
diffusers_latent_samples_to_images,
|
||||
gc,
|
||||
img_to_base64_str,
|
||||
base64_str_to_img,
|
||||
img_to_buffer,
|
||||
latent_samples_to_images,
|
||||
resize_img,
|
||||
get_image,
|
||||
log,
|
||||
)
|
||||
|
||||
from .task import Task
|
||||
from .filter_images import filter_images
|
||||
|
||||
|
||||
class RenderTask(Task):
|
||||
"For image generation"
|
||||
|
||||
def __init__(
|
||||
self, req: GenerateImageRequest, task_data: TaskData, models_data: ModelsData, output_format: OutputFormatData
|
||||
self,
|
||||
req: GenerateImageRequest,
|
||||
task_data: RenderTaskData,
|
||||
models_data: ModelsData,
|
||||
output_format: OutputFormatData,
|
||||
save_data: SaveToDiskData,
|
||||
):
|
||||
super().__init__(task_data.session_id)
|
||||
|
||||
task_data.request_id = self.id
|
||||
self.render_request: GenerateImageRequest = req # Initial Request
|
||||
self.task_data: TaskData = task_data
|
||||
|
||||
self.render_request = req # Initial Request
|
||||
self.task_data = task_data
|
||||
self.models_data = models_data
|
||||
self.output_format = output_format
|
||||
self.save_data = save_data
|
||||
|
||||
self.temp_images: list = [None] * req.num_outputs * (1 if task_data.show_only_filtered_image else 2)
|
||||
|
||||
def run(self):
|
||||
"Runs the image generation task on the assigned thread"
|
||||
|
||||
from easydiffusion import task_manager
|
||||
from easydiffusion import task_manager, app
|
||||
from easydiffusion.backend_manager import backend
|
||||
|
||||
context = runtime.context
|
||||
config = app.getConfig()
|
||||
|
||||
if config.get("block_nsfw", False): # override if set on the server
|
||||
self.task_data.block_nsfw = True
|
||||
|
||||
def step_callback():
|
||||
task_manager.keep_task_alive(self)
|
||||
@ -51,7 +63,7 @@ class RenderTask(Task):
|
||||
if isinstance(task_manager.current_state_error, (SystemExit, StopAsyncIteration)) or isinstance(
|
||||
self.error, StopAsyncIteration
|
||||
):
|
||||
context.stop_processing = True
|
||||
backend.stop_rendering(context)
|
||||
if isinstance(task_manager.current_state_error, StopAsyncIteration):
|
||||
self.error = task_manager.current_state_error
|
||||
task_manager.current_state_error = None
|
||||
@ -61,11 +73,7 @@ class RenderTask(Task):
|
||||
model_manager.resolve_model_paths(self.models_data)
|
||||
|
||||
models_to_force_reload = []
|
||||
if (
|
||||
runtime.set_vram_optimizations(context)
|
||||
or self.has_param_changed(context, "clip_skip")
|
||||
or self.trt_needs_reload(context)
|
||||
):
|
||||
if runtime.set_vram_optimizations(context) or self.has_param_changed(context, "clip_skip"):
|
||||
models_to_force_reload.append("stable-diffusion")
|
||||
|
||||
model_manager.reload_models_if_necessary(context, self.models_data, models_to_force_reload)
|
||||
@ -78,13 +86,15 @@ class RenderTask(Task):
|
||||
self.task_data,
|
||||
self.models_data,
|
||||
self.output_format,
|
||||
self.save_data,
|
||||
self.buffer_queue,
|
||||
self.temp_images,
|
||||
step_callback,
|
||||
self,
|
||||
)
|
||||
|
||||
def has_param_changed(self, context, param_name):
|
||||
if not context.test_diffusers:
|
||||
if not getattr(context, "test_diffusers", False):
|
||||
return False
|
||||
if "stable-diffusion" not in context.models or "params" not in context.models["stable-diffusion"]:
|
||||
return True
|
||||
@ -93,49 +103,36 @@ class RenderTask(Task):
|
||||
new_val = self.models_data.model_params.get("stable-diffusion", {}).get(param_name, False)
|
||||
return model["params"].get(param_name) != new_val
|
||||
|
||||
def trt_needs_reload(self, context):
|
||||
if not context.test_diffusers:
|
||||
return False
|
||||
if "stable-diffusion" not in context.models or "params" not in context.models["stable-diffusion"]:
|
||||
return True
|
||||
|
||||
model = context.models["stable-diffusion"]
|
||||
|
||||
# curr_convert_to_trt = model["params"].get("convert_to_tensorrt")
|
||||
new_convert_to_trt = self.models_data.model_params.get("stable-diffusion", {}).get("convert_to_tensorrt", False)
|
||||
|
||||
pipe = model["default"]
|
||||
is_trt_loaded = hasattr(pipe.unet, "_allocate_trt_buffers") or hasattr(
|
||||
pipe.unet, "_allocate_trt_buffers_backup"
|
||||
)
|
||||
if new_convert_to_trt and not is_trt_loaded:
|
||||
return True
|
||||
|
||||
curr_build_config = model["params"].get("trt_build_config")
|
||||
new_build_config = self.models_data.model_params.get("stable-diffusion", {}).get("trt_build_config", {})
|
||||
|
||||
return new_convert_to_trt and curr_build_config != new_build_config
|
||||
|
||||
|
||||
def make_images(
|
||||
context,
|
||||
req: GenerateImageRequest,
|
||||
task_data: TaskData,
|
||||
task_data: RenderTaskData,
|
||||
models_data: ModelsData,
|
||||
output_format: OutputFormatData,
|
||||
save_data: SaveToDiskData,
|
||||
data_queue: queue.Queue,
|
||||
task_temp_images: list,
|
||||
step_callback,
|
||||
task,
|
||||
):
|
||||
context.stop_processing = False
|
||||
print_task_info(req, task_data, models_data, output_format)
|
||||
print_task_info(req, task_data, models_data, output_format, save_data)
|
||||
|
||||
images, seeds = make_images_internal(
|
||||
context, req, task_data, models_data, output_format, data_queue, task_temp_images, step_callback
|
||||
context,
|
||||
req,
|
||||
task_data,
|
||||
models_data,
|
||||
output_format,
|
||||
save_data,
|
||||
data_queue,
|
||||
task_temp_images,
|
||||
step_callback,
|
||||
task,
|
||||
)
|
||||
|
||||
res = GenerateImageResponse(
|
||||
req, task_data, models_data, output_format, images=construct_response(images, seeds, output_format)
|
||||
req, task_data, models_data, output_format, save_data, images=construct_response(images, seeds, output_format)
|
||||
)
|
||||
res = res.json()
|
||||
data_queue.put(json.dumps(res))
|
||||
@ -145,48 +142,72 @@ def make_images(
|
||||
|
||||
|
||||
def print_task_info(
|
||||
req: GenerateImageRequest, task_data: TaskData, models_data: ModelsData, output_format: OutputFormatData
|
||||
req: GenerateImageRequest,
|
||||
task_data: RenderTaskData,
|
||||
models_data: ModelsData,
|
||||
output_format: OutputFormatData,
|
||||
save_data: SaveToDiskData,
|
||||
):
|
||||
req_str = pprint.pformat(get_printable_request(req, task_data, output_format)).replace("[", "\[")
|
||||
req_str = pprint.pformat(get_printable_request(req, task_data, models_data, output_format, save_data)).replace(
|
||||
"[", "\["
|
||||
)
|
||||
task_str = pprint.pformat(task_data.dict()).replace("[", "\[")
|
||||
models_data = pprint.pformat(models_data.dict()).replace("[", "\[")
|
||||
output_format = pprint.pformat(output_format.dict()).replace("[", "\[")
|
||||
save_data = pprint.pformat(save_data.dict()).replace("[", "\[")
|
||||
|
||||
log.info(f"request: {req_str}")
|
||||
log.info(f"task data: {task_str}")
|
||||
# log.info(f"models data: {models_data}")
|
||||
log.info(f"models data: {models_data}")
|
||||
log.info(f"output format: {output_format}")
|
||||
log.info(f"save data: {save_data}")
|
||||
|
||||
|
||||
def make_images_internal(
|
||||
context,
|
||||
req: GenerateImageRequest,
|
||||
task_data: TaskData,
|
||||
task_data: RenderTaskData,
|
||||
models_data: ModelsData,
|
||||
output_format: OutputFormatData,
|
||||
save_data: SaveToDiskData,
|
||||
data_queue: queue.Queue,
|
||||
task_temp_images: list,
|
||||
step_callback,
|
||||
task,
|
||||
):
|
||||
images, user_stopped = generate_images_internal(
|
||||
from easydiffusion.backend_manager import backend
|
||||
|
||||
# prep the nsfw_filter
|
||||
if task_data.block_nsfw:
|
||||
filter_nsfw([Image.new("RGB", (1, 1))]) # hack - ensures that the model is available
|
||||
|
||||
images = generate_images_internal(
|
||||
context,
|
||||
req,
|
||||
task_data,
|
||||
models_data,
|
||||
output_format,
|
||||
data_queue,
|
||||
task_temp_images,
|
||||
step_callback,
|
||||
task_data.stream_image_progress,
|
||||
task_data.stream_image_progress_interval,
|
||||
)
|
||||
|
||||
gc(context)
|
||||
user_stopped = isinstance(task.error, StopAsyncIteration)
|
||||
|
||||
filters, filter_params = task_data.filters, task_data.filter_params
|
||||
filtered_images = filter_images(context, images, filters, filter_params) if not user_stopped else images
|
||||
if len(filters) > 0 and not user_stopped:
|
||||
filtered_images = backend.filter_images(context, images, filters, filter_params, input_type="base64")
|
||||
else:
|
||||
filtered_images = images
|
||||
|
||||
if task_data.save_to_disk_path is not None:
|
||||
save_images_to_disk(images, filtered_images, req, task_data, output_format)
|
||||
if task_data.block_nsfw:
|
||||
filtered_images = filter_nsfw(filtered_images)
|
||||
|
||||
if save_data.save_to_disk_path is not None:
|
||||
images_pil = [base64_str_to_img(img) for img in images]
|
||||
filtered_images_pil = [base64_str_to_img(img) for img in filtered_images]
|
||||
save_images_to_disk(images_pil, filtered_images_pil, req, task_data, models_data, output_format, save_data)
|
||||
|
||||
seeds = [*range(req.seed, req.seed + len(images))]
|
||||
if task_data.show_only_filtered_image or filtered_images is images:
|
||||
@ -198,143 +219,92 @@ def make_images_internal(
|
||||
def generate_images_internal(
|
||||
context,
|
||||
req: GenerateImageRequest,
|
||||
task_data: TaskData,
|
||||
task_data: RenderTaskData,
|
||||
models_data: ModelsData,
|
||||
output_format: OutputFormatData,
|
||||
data_queue: queue.Queue,
|
||||
task_temp_images: list,
|
||||
step_callback,
|
||||
stream_image_progress: bool,
|
||||
stream_image_progress_interval: int,
|
||||
):
|
||||
context.temp_images.clear()
|
||||
from easydiffusion.backend_manager import backend
|
||||
|
||||
callback = make_step_callback(
|
||||
callback = make_step_callback(context, req, task_data, data_queue, task_temp_images, step_callback)
|
||||
|
||||
req.width, req.height = map(lambda x: x - x % 8, (req.width, req.height)) # clamp to 8
|
||||
|
||||
if req.control_image and task_data.control_filter_to_apply:
|
||||
req.controlnet_filter = task_data.control_filter_to_apply
|
||||
|
||||
if req.init_image is not None and int(req.num_inference_steps * req.prompt_strength) == 0:
|
||||
req.prompt_strength = 1 / req.num_inference_steps if req.num_inference_steps > 0 else 1
|
||||
|
||||
backend.set_options(
|
||||
context,
|
||||
req,
|
||||
task_data,
|
||||
data_queue,
|
||||
task_temp_images,
|
||||
step_callback,
|
||||
stream_image_progress,
|
||||
stream_image_progress_interval,
|
||||
output_format=output_format.output_format,
|
||||
output_quality=output_format.output_quality,
|
||||
output_lossless=output_format.output_lossless,
|
||||
vae_tiling=task_data.enable_vae_tiling,
|
||||
stream_image_progress=stream_image_progress,
|
||||
stream_image_progress_interval=stream_image_progress_interval,
|
||||
clip_skip=2 if task_data.clip_skip else 1,
|
||||
)
|
||||
|
||||
try:
|
||||
if req.init_image is not None and not context.test_diffusers:
|
||||
req.sampler_name = "ddim"
|
||||
images = backend.generate_images(context, callback=callback, output_type="base64", **req.dict())
|
||||
|
||||
req.width, req.height = map(lambda x: x - x % 8, (req.width, req.height)) # clamp to 8
|
||||
|
||||
if req.control_image and task_data.control_filter_to_apply:
|
||||
req.control_image = filter_images(context, req.control_image, task_data.control_filter_to_apply)[0]
|
||||
|
||||
if context.test_diffusers:
|
||||
pipe = context.models["stable-diffusion"]["default"]
|
||||
if hasattr(pipe.unet, "_allocate_trt_buffers_backup"):
|
||||
setattr(pipe.unet, "_allocate_trt_buffers", pipe.unet._allocate_trt_buffers_backup)
|
||||
delattr(pipe.unet, "_allocate_trt_buffers_backup")
|
||||
|
||||
if hasattr(pipe.unet, "_allocate_trt_buffers"):
|
||||
convert_to_trt = models_data.model_params["stable-diffusion"].get("convert_to_tensorrt", False)
|
||||
if convert_to_trt:
|
||||
pipe.unet.forward = pipe.unet._trt_forward
|
||||
# pipe.vae.decoder.forward = pipe.vae.decoder._trt_forward
|
||||
log.info(f"Setting unet.forward to TensorRT")
|
||||
else:
|
||||
log.info(f"Not using TensorRT for unet.forward")
|
||||
pipe.unet.forward = pipe.unet._non_trt_forward
|
||||
# pipe.vae.decoder.forward = pipe.vae.decoder._non_trt_forward
|
||||
setattr(pipe.unet, "_allocate_trt_buffers_backup", pipe.unet._allocate_trt_buffers)
|
||||
delattr(pipe.unet, "_allocate_trt_buffers")
|
||||
|
||||
images = generate_images(context, callback=callback, **req.dict())
|
||||
user_stopped = False
|
||||
except UserInitiatedStop:
|
||||
images = []
|
||||
user_stopped = True
|
||||
if context.partial_x_samples is not None:
|
||||
if context.test_diffusers:
|
||||
images = diffusers_latent_samples_to_images(context, context.partial_x_samples)
|
||||
else:
|
||||
images = latent_samples_to_images(context, context.partial_x_samples)
|
||||
finally:
|
||||
if hasattr(context, "partial_x_samples") and context.partial_x_samples is not None:
|
||||
if not context.test_diffusers:
|
||||
del context.partial_x_samples
|
||||
context.partial_x_samples = None
|
||||
|
||||
return images, user_stopped
|
||||
return images
|
||||
|
||||
|
||||
def construct_response(images: list, seeds: list, output_format: OutputFormatData):
|
||||
return [
|
||||
ResponseImage(
|
||||
data=img_to_base64_str(
|
||||
img,
|
||||
output_format.output_format,
|
||||
output_format.output_quality,
|
||||
output_format.output_lossless,
|
||||
),
|
||||
seed=seed,
|
||||
)
|
||||
for img, seed in zip(images, seeds)
|
||||
]
|
||||
return [ResponseImage(data=img, seed=seed) for img, seed in zip(images, seeds)]
|
||||
|
||||
|
||||
def make_step_callback(
|
||||
context,
|
||||
req: GenerateImageRequest,
|
||||
task_data: TaskData,
|
||||
task_data: RenderTaskData,
|
||||
data_queue: queue.Queue,
|
||||
task_temp_images: list,
|
||||
step_callback,
|
||||
stream_image_progress: bool,
|
||||
stream_image_progress_interval: int,
|
||||
):
|
||||
from easydiffusion.backend_manager import backend
|
||||
|
||||
n_steps = req.num_inference_steps if req.init_image is None else int(req.num_inference_steps * req.prompt_strength)
|
||||
last_callback_time = -1
|
||||
|
||||
def update_temp_img(x_samples, task_temp_images: list):
|
||||
def update_temp_img(images, task_temp_images: list):
|
||||
partial_images = []
|
||||
|
||||
if context.test_diffusers:
|
||||
images = diffusers_latent_samples_to_images(context, x_samples)
|
||||
else:
|
||||
images = latent_samples_to_images(context, x_samples)
|
||||
if images is None:
|
||||
return []
|
||||
|
||||
if task_data.block_nsfw:
|
||||
images = filter_images(context, images, "nsfw_checker")
|
||||
images = filter_nsfw(images, print_log=False)
|
||||
|
||||
for i, img in enumerate(images):
|
||||
img = img.convert("RGB")
|
||||
img = resize_img(img, req.width, req.height)
|
||||
buf = img_to_buffer(img, output_format="JPEG")
|
||||
|
||||
context.temp_images[f"{task_data.request_id}/{i}"] = buf
|
||||
task_temp_images[i] = buf
|
||||
partial_images.append({"path": f"/image/tmp/{task_data.request_id}/{i}"})
|
||||
del images
|
||||
return partial_images
|
||||
|
||||
def on_image_step(x_samples, i, *args):
|
||||
def on_image_step(images, i, *args):
|
||||
nonlocal last_callback_time
|
||||
|
||||
if context.test_diffusers:
|
||||
context.partial_x_samples = (x_samples, args[0])
|
||||
else:
|
||||
context.partial_x_samples = x_samples
|
||||
|
||||
step_time = time.time() - last_callback_time if last_callback_time != -1 else -1
|
||||
last_callback_time = time.time()
|
||||
|
||||
progress = {"step": i, "step_time": step_time, "total_steps": n_steps}
|
||||
|
||||
if stream_image_progress and stream_image_progress_interval > 0 and i % stream_image_progress_interval == 0:
|
||||
progress["output"] = update_temp_img(context.partial_x_samples, task_temp_images)
|
||||
if images is not None:
|
||||
progress["output"] = update_temp_img(images, task_temp_images)
|
||||
|
||||
data_queue.put(json.dumps(progress))
|
||||
|
||||
step_callback()
|
||||
|
||||
if context.stop_processing:
|
||||
raise UserInitiatedStop("User requested that we stop processing")
|
||||
|
||||
return on_image_step
|
||||
|
@ -14,19 +14,22 @@ class GenerateImageRequest(BaseModel):
|
||||
num_outputs: int = 1
|
||||
num_inference_steps: int = 50
|
||||
guidance_scale: float = 7.5
|
||||
distilled_guidance_scale: float = 3.5
|
||||
|
||||
init_image: Any = None
|
||||
init_image_mask: Any = None
|
||||
control_image: Any = None
|
||||
control_alpha: Union[float, List[float]] = None
|
||||
controlnet_filter: str = None
|
||||
prompt_strength: float = 0.8
|
||||
preserve_init_image_color_profile = False
|
||||
strict_mask_border = False
|
||||
preserve_init_image_color_profile: bool = False
|
||||
strict_mask_border: bool = False
|
||||
|
||||
sampler_name: str = None # "ddim", "plms", "heun", "euler", "euler_a", "dpm2", "dpm2_a", "lms"
|
||||
scheduler_name: str = None
|
||||
hypernetwork_strength: float = 0
|
||||
lora_alpha: Union[float, List[float]] = 0
|
||||
tiling: str = "none" # "none", "x", "y", "xy"
|
||||
tiling: str = None # None, "x", "y", "xy"
|
||||
|
||||
|
||||
class FilterImageRequest(BaseModel):
|
||||
@ -58,10 +61,17 @@ class OutputFormatData(BaseModel):
|
||||
output_lossless: bool = False
|
||||
|
||||
|
||||
class SaveToDiskData(BaseModel):
|
||||
save_to_disk_path: str = None
|
||||
metadata_output_format: str = "txt" # or "json"
|
||||
|
||||
|
||||
class TaskData(BaseModel):
|
||||
request_id: str = None
|
||||
session_id: str = "session"
|
||||
save_to_disk_path: str = None
|
||||
|
||||
|
||||
class RenderTaskData(TaskData):
|
||||
vram_usage_level: str = "balanced" # or "low" or "medium"
|
||||
|
||||
use_face_correction: Union[str, List[str]] = None # or "GFPGANv1.3"
|
||||
@ -73,13 +83,14 @@ class TaskData(BaseModel):
|
||||
use_hypernetwork_model: Union[str, List[str]] = None
|
||||
use_lora_model: Union[str, List[str]] = None
|
||||
use_controlnet_model: Union[str, List[str]] = None
|
||||
use_embeddings_model: Union[str, List[str]] = None
|
||||
filters: List[str] = []
|
||||
filter_params: Dict[str, Dict[str, Any]] = {}
|
||||
control_filter_to_apply: Union[str, List[str]] = None
|
||||
enable_vae_tiling: bool = True
|
||||
|
||||
show_only_filtered_image: bool = False
|
||||
block_nsfw: bool = False
|
||||
metadata_output_format: str = "txt" # or "json"
|
||||
stream_image_progress: bool = False
|
||||
stream_image_progress_interval: int = 5
|
||||
clip_skip: bool = False
|
||||
@ -92,7 +103,7 @@ class MergeRequest(BaseModel):
|
||||
model1: str = None
|
||||
ratio: float = None
|
||||
out_path: str = "mix"
|
||||
use_fp16 = True
|
||||
use_fp16: bool = True
|
||||
|
||||
|
||||
class Image:
|
||||
@ -125,12 +136,14 @@ class GenerateImageResponse:
|
||||
task_data: TaskData,
|
||||
models_data: ModelsData,
|
||||
output_format: OutputFormatData,
|
||||
save_data: SaveToDiskData,
|
||||
images: list,
|
||||
):
|
||||
self.render_request = render_request
|
||||
self.task_data = task_data
|
||||
self.models_data = models_data
|
||||
self.output_format = output_format
|
||||
self.save_data = save_data
|
||||
self.images = images
|
||||
|
||||
def json(self):
|
||||
@ -140,6 +153,7 @@ class GenerateImageResponse:
|
||||
|
||||
task_data = self.task_data.dict()
|
||||
task_data.update(self.output_format.dict())
|
||||
task_data.update(self.save_data.dict())
|
||||
|
||||
res = {
|
||||
"status": "succeeded",
|
||||
@ -200,23 +214,21 @@ def convert_legacy_render_req_to_new(old_req: dict):
|
||||
model_paths["hypernetwork"] = old_req.get("use_hypernetwork_model")
|
||||
model_paths["lora"] = old_req.get("use_lora_model")
|
||||
model_paths["controlnet"] = old_req.get("use_controlnet_model")
|
||||
model_paths["embeddings"] = old_req.get("use_embeddings_model")
|
||||
|
||||
model_paths["gfpgan"] = old_req.get("use_face_correction", "")
|
||||
model_paths["gfpgan"] = model_paths["gfpgan"] if "gfpgan" in model_paths["gfpgan"].lower() else None
|
||||
## ensure that the model name is in the model path
|
||||
for model_name in ("gfpgan", "codeformer"):
|
||||
model_paths[model_name] = old_req.get("use_face_correction", "")
|
||||
model_paths[model_name] = model_paths[model_name] if model_name in model_paths[model_name].lower() else None
|
||||
|
||||
model_paths["codeformer"] = old_req.get("use_face_correction", "")
|
||||
model_paths["codeformer"] = model_paths["codeformer"] if "codeformer" in model_paths["codeformer"].lower() else None
|
||||
for model_name in ("realesrgan", "latent_upscaler", "esrgan_4x", "lanczos", "nearest", "scunet", "swinir"):
|
||||
model_paths[model_name] = old_req.get("use_upscale", "")
|
||||
model_paths[model_name] = model_paths[model_name] if model_name in model_paths[model_name].lower() else None
|
||||
|
||||
model_paths["realesrgan"] = old_req.get("use_upscale", "")
|
||||
model_paths["realesrgan"] = model_paths["realesrgan"] if "realesrgan" in model_paths["realesrgan"].lower() else None
|
||||
|
||||
model_paths["latent_upscaler"] = old_req.get("use_upscale", "")
|
||||
model_paths["latent_upscaler"] = (
|
||||
model_paths["latent_upscaler"] if "latent_upscaler" in model_paths["latent_upscaler"].lower() else None
|
||||
)
|
||||
if "control_filter_to_apply" in old_req:
|
||||
filter_model = old_req["control_filter_to_apply"]
|
||||
model_paths[filter_model] = filter_model
|
||||
old_req["control_filter_to_apply"] = convert_legacy_controlnet_filter_name(old_req["control_filter_to_apply"])
|
||||
|
||||
if old_req.get("block_nsfw"):
|
||||
model_paths["nsfw_checker"] = "nsfw_checker"
|
||||
@ -232,8 +244,12 @@ def convert_legacy_render_req_to_new(old_req: dict):
|
||||
}
|
||||
|
||||
# move the filter params
|
||||
if model_paths["realesrgan"]:
|
||||
filter_params["realesrgan"] = {"scale": int(old_req.get("upscale_amount", 4))}
|
||||
for model_name in ("realesrgan", "esrgan_4x", "lanczos", "nearest", "scunet", "swinir"):
|
||||
if model_paths[model_name]:
|
||||
filter_params[model_name] = {
|
||||
"upscaler": model_paths[model_name],
|
||||
"scale": int(old_req.get("upscale_amount", 4)),
|
||||
}
|
||||
if model_paths["latent_upscaler"]:
|
||||
filter_params["latent_upscaler"] = {
|
||||
"prompt": old_req["prompt"],
|
||||
@ -252,14 +268,31 @@ def convert_legacy_render_req_to_new(old_req: dict):
|
||||
if old_req.get("block_nsfw"):
|
||||
filters.append("nsfw_checker")
|
||||
|
||||
if model_paths["codeformer"]:
|
||||
filters.append("codeformer")
|
||||
elif model_paths["gfpgan"]:
|
||||
filters.append("gfpgan")
|
||||
for model_name in ("gfpgan", "codeformer"):
|
||||
if model_paths[model_name]:
|
||||
filters.append(model_name)
|
||||
break
|
||||
|
||||
if model_paths["realesrgan"]:
|
||||
filters.append("realesrgan")
|
||||
elif model_paths["latent_upscaler"]:
|
||||
filters.append("latent_upscaler")
|
||||
for model_name in ("realesrgan", "latent_upscaler", "esrgan_4x", "lanczos", "nearest", "scunet", "swinir"):
|
||||
if model_paths[model_name]:
|
||||
filters.append(model_name)
|
||||
break
|
||||
|
||||
return new_req
|
||||
|
||||
|
||||
def convert_legacy_controlnet_filter_name(filter):
|
||||
from easydiffusion.backend_manager import backend
|
||||
|
||||
if filter is None:
|
||||
return None
|
||||
|
||||
controlnet_filter_names = backend.list_controlnet_filters()
|
||||
|
||||
def apply(f):
|
||||
return f"controlnet_{f}" if f in controlnet_filter_names else f
|
||||
|
||||
if isinstance(filter, list):
|
||||
return [apply(f) for f in filter]
|
||||
|
||||
return apply(filter)
|
||||
|
@ -1,4 +1,5 @@
|
||||
import logging
|
||||
import hashlib
|
||||
|
||||
log = logging.getLogger("easydiffusion")
|
||||
|
||||
@ -6,3 +7,16 @@ from .save_utils import (
|
||||
save_images_to_disk,
|
||||
get_printable_request,
|
||||
)
|
||||
from .nsfw_checker import filter_nsfw
|
||||
|
||||
|
||||
def sha256sum(filename):
|
||||
sha256 = hashlib.sha256()
|
||||
with open(filename, "rb") as f:
|
||||
while True:
|
||||
data = f.read(8192) # Read in chunks of 8192 bytes
|
||||
if not data:
|
||||
break
|
||||
sha256.update(data)
|
||||
|
||||
return sha256.hexdigest()
|
||||
|
80
ui/easydiffusion/utils/nsfw_checker.py
Normal file
80
ui/easydiffusion/utils/nsfw_checker.py
Normal file
@ -0,0 +1,80 @@
|
||||
# possibly move this to sdkit in the future
|
||||
import os
|
||||
|
||||
# mirror of https://huggingface.co/AdamCodd/vit-base-nsfw-detector/blob/main/onnx/model_quantized.onnx
|
||||
NSFW_MODEL_URL = (
|
||||
"https://github.com/easydiffusion/sdkit-test-data/releases/download/assets/vit-base-nsfw-detector-quantized.onnx"
|
||||
)
|
||||
MODEL_HASH_QUICK = "220123559305b1b07b7a0894c3471e34dccd090d71cdf337dd8012f9e40d6c28"
|
||||
|
||||
nsfw_check_model = None
|
||||
|
||||
|
||||
def filter_nsfw(images, blur_radius: float = 75, print_log=True):
|
||||
global nsfw_check_model
|
||||
|
||||
from easydiffusion.model_manager import get_model_dirs
|
||||
from sdkit.utils import base64_str_to_img, img_to_base64_str, download_file, log, hash_file_quick
|
||||
|
||||
import onnxruntime as ort
|
||||
from PIL import ImageFilter
|
||||
import numpy as np
|
||||
|
||||
if nsfw_check_model is None:
|
||||
model_dir = get_model_dirs("nsfw-checker")[0]
|
||||
model_path = os.path.join(model_dir, "vit-base-nsfw-detector-quantized.onnx")
|
||||
|
||||
os.makedirs(model_dir, exist_ok=True)
|
||||
|
||||
if not os.path.exists(model_path) or hash_file_quick(model_path) != MODEL_HASH_QUICK:
|
||||
download_file(NSFW_MODEL_URL, model_path)
|
||||
|
||||
nsfw_check_model = ort.InferenceSession(model_path, providers=["CPUExecutionProvider"])
|
||||
|
||||
# Preprocess the input image
|
||||
def preprocess_image(img):
|
||||
img = img.convert("RGB")
|
||||
|
||||
# config based on based on https://huggingface.co/AdamCodd/vit-base-nsfw-detector/blob/main/onnx/preprocessor_config.json
|
||||
# Resize the image
|
||||
img = img.resize((384, 384))
|
||||
|
||||
# Normalize the image
|
||||
img = np.array(img) / 255.0 # Scale pixel values to [0, 1]
|
||||
mean = np.array([0.5, 0.5, 0.5])
|
||||
std = np.array([0.5, 0.5, 0.5])
|
||||
img = (img - mean) / std
|
||||
|
||||
# Transpose to match input shape (batch_size, channels, height, width)
|
||||
img = np.transpose(img, (2, 0, 1)).astype(np.float32)
|
||||
|
||||
# Add batch dimension
|
||||
img = np.expand_dims(img, axis=0)
|
||||
|
||||
return img
|
||||
|
||||
# Run inference
|
||||
input_name = nsfw_check_model.get_inputs()[0].name
|
||||
output_name = nsfw_check_model.get_outputs()[0].name
|
||||
|
||||
if print_log:
|
||||
log.info("Running NSFW checker (onnx)")
|
||||
|
||||
results = []
|
||||
for img in images:
|
||||
is_base64 = isinstance(img, str)
|
||||
|
||||
input_img = base64_str_to_img(img) if is_base64 else img
|
||||
|
||||
result = nsfw_check_model.run([output_name], {input_name: preprocess_image(input_img)})
|
||||
is_nsfw = [np.argmax(arr) == 1 for arr in result][0]
|
||||
|
||||
if is_nsfw:
|
||||
output_img = input_img.filter(ImageFilter.GaussianBlur(blur_radius))
|
||||
output_img = img_to_base64_str(output_img) if is_base64 else output_img
|
||||
else:
|
||||
output_img = img
|
||||
|
||||
results.append(output_img)
|
||||
|
||||
return results
|
@ -7,9 +7,17 @@ from datetime import datetime
|
||||
from functools import reduce
|
||||
|
||||
from easydiffusion import app
|
||||
from easydiffusion.types import GenerateImageRequest, TaskData, OutputFormatData
|
||||
from easydiffusion.types import (
|
||||
GenerateImageRequest,
|
||||
TaskData,
|
||||
RenderTaskData,
|
||||
OutputFormatData,
|
||||
SaveToDiskData,
|
||||
ModelsData,
|
||||
)
|
||||
from numpy import base_repr
|
||||
from sdkit.utils import save_dicts, save_images
|
||||
from sdkit.models.model_loader.embeddings import get_embedding_token
|
||||
|
||||
filename_regex = re.compile("[^a-zA-Z0-9._-]")
|
||||
img_number_regex = re.compile("([0-9]{5,})")
|
||||
@ -23,18 +31,21 @@ TASK_TEXT_MAPPING = {
|
||||
"clip_skip": "Clip Skip",
|
||||
"use_controlnet_model": "ControlNet model",
|
||||
"control_filter_to_apply": "ControlNet Filter",
|
||||
"control_alpha": "ControlNet Strength",
|
||||
"use_vae_model": "VAE model",
|
||||
"sampler_name": "Sampler",
|
||||
"scheduler_name": "Scheduler",
|
||||
"width": "Width",
|
||||
"height": "Height",
|
||||
"num_inference_steps": "Steps",
|
||||
"guidance_scale": "Guidance Scale",
|
||||
"distilled_guidance_scale": "Distilled Guidance",
|
||||
"prompt_strength": "Prompt Strength",
|
||||
"use_lora_model": "LoRA model",
|
||||
"lora_alpha": "LoRA Strength",
|
||||
"use_hypernetwork_model": "Hypernetwork model",
|
||||
"hypernetwork_strength": "Hypernetwork Strength",
|
||||
"use_embedding_models": "Embedding models",
|
||||
"use_embeddings_model": "Embedding models",
|
||||
"tiling": "Seamless Tiling",
|
||||
"use_face_correction": "Use Face Correction",
|
||||
"use_upscale": "Use Upscaling",
|
||||
@ -94,7 +105,7 @@ def format_folder_name(format: str, req: GenerateImageRequest, task_data: TaskDa
|
||||
def format_file_name(
|
||||
format: str,
|
||||
req: GenerateImageRequest,
|
||||
task_data: TaskData,
|
||||
task_data: RenderTaskData,
|
||||
now: float,
|
||||
batch_file_number: int,
|
||||
folder_img_number: ImageNumber,
|
||||
@ -117,13 +128,19 @@ def format_file_name(
|
||||
|
||||
|
||||
def save_images_to_disk(
|
||||
images: list, filtered_images: list, req: GenerateImageRequest, task_data: TaskData, output_format: OutputFormatData
|
||||
images: list,
|
||||
filtered_images: list,
|
||||
req: GenerateImageRequest,
|
||||
task_data: RenderTaskData,
|
||||
models_data: ModelsData,
|
||||
output_format: OutputFormatData,
|
||||
save_data: SaveToDiskData,
|
||||
):
|
||||
now = time.time()
|
||||
app_config = app.getConfig()
|
||||
folder_format = app_config.get("folder_format", "$id")
|
||||
save_dir_path = os.path.join(task_data.save_to_disk_path, format_folder_name(folder_format, req, task_data))
|
||||
metadata_entries = get_metadata_entries_for_request(req, task_data, output_format)
|
||||
save_dir_path = os.path.join(save_data.save_to_disk_path, format_folder_name(folder_format, req, task_data))
|
||||
metadata_entries = get_metadata_entries_for_request(req, task_data, models_data, output_format, save_data)
|
||||
file_number = calculate_img_number(save_dir_path, task_data)
|
||||
make_filename = make_filename_callback(
|
||||
app_config.get("filename_format", "$p_$tsb64"),
|
||||
@ -142,8 +159,8 @@ def save_images_to_disk(
|
||||
output_quality=output_format.output_quality,
|
||||
output_lossless=output_format.output_lossless,
|
||||
)
|
||||
if task_data.metadata_output_format:
|
||||
for metadata_output_format in task_data.metadata_output_format.split(","):
|
||||
if save_data.metadata_output_format:
|
||||
for metadata_output_format in save_data.metadata_output_format.split(","):
|
||||
if metadata_output_format.lower() in ["json", "txt", "embed"]:
|
||||
save_dicts(
|
||||
metadata_entries,
|
||||
@ -178,8 +195,8 @@ def save_images_to_disk(
|
||||
output_quality=output_format.output_quality,
|
||||
output_lossless=output_format.output_lossless,
|
||||
)
|
||||
if task_data.metadata_output_format:
|
||||
for metadata_output_format in task_data.metadata_output_format.split(","):
|
||||
if save_data.metadata_output_format:
|
||||
for metadata_output_format in save_data.metadata_output_format.split(","):
|
||||
if metadata_output_format.lower() in ["json", "txt", "embed"]:
|
||||
save_dicts(
|
||||
metadata_entries,
|
||||
@ -190,11 +207,17 @@ def save_images_to_disk(
|
||||
)
|
||||
|
||||
|
||||
def get_metadata_entries_for_request(req: GenerateImageRequest, task_data: TaskData, output_format: OutputFormatData):
|
||||
metadata = get_printable_request(req, task_data, output_format)
|
||||
def get_metadata_entries_for_request(
|
||||
req: GenerateImageRequest,
|
||||
task_data: RenderTaskData,
|
||||
models_data: ModelsData,
|
||||
output_format: OutputFormatData,
|
||||
save_data: SaveToDiskData,
|
||||
):
|
||||
metadata = get_printable_request(req, task_data, models_data, output_format, save_data)
|
||||
|
||||
# if text, format it in the text format expected by the UI
|
||||
is_txt_format = task_data.metadata_output_format and "txt" in task_data.metadata_output_format.lower().split(",")
|
||||
is_txt_format = save_data.metadata_output_format and "txt" in save_data.metadata_output_format.lower().split(",")
|
||||
if is_txt_format:
|
||||
|
||||
def format_value(value):
|
||||
@ -213,13 +236,20 @@ def get_metadata_entries_for_request(req: GenerateImageRequest, task_data: TaskD
|
||||
return entries
|
||||
|
||||
|
||||
def get_printable_request(req: GenerateImageRequest, task_data: TaskData, output_format: OutputFormatData):
|
||||
def get_printable_request(
|
||||
req: GenerateImageRequest,
|
||||
task_data: RenderTaskData,
|
||||
models_data: ModelsData,
|
||||
output_format: OutputFormatData,
|
||||
save_data: SaveToDiskData,
|
||||
):
|
||||
req_metadata = req.dict()
|
||||
task_data_metadata = task_data.dict()
|
||||
task_data_metadata.update(output_format.dict())
|
||||
task_data_metadata.update(save_data.dict())
|
||||
|
||||
app_config = app.getConfig()
|
||||
using_diffusers = app_config.get("test_diffusers", False)
|
||||
using_diffusers = app_config.get("backend", "ed_diffusers") in ("ed_diffusers", "webui")
|
||||
|
||||
# Save the metadata in the order defined in TASK_TEXT_MAPPING
|
||||
metadata = {}
|
||||
@ -228,28 +258,12 @@ def get_printable_request(req: GenerateImageRequest, task_data: TaskData, output
|
||||
metadata[key] = req_metadata[key]
|
||||
elif key in task_data_metadata:
|
||||
metadata[key] = task_data_metadata[key]
|
||||
elif key == "use_embedding_models" and using_diffusers:
|
||||
embeddings_extensions = {".pt", ".bin", ".safetensors"}
|
||||
|
||||
def scan_directory(directory_path: str):
|
||||
used_embeddings = []
|
||||
for entry in os.scandir(directory_path):
|
||||
if entry.is_file():
|
||||
entry_extension = os.path.splitext(entry.name)[1]
|
||||
if entry_extension not in embeddings_extensions:
|
||||
continue
|
||||
if key == "use_embeddings_model" and task_data_metadata[key] and using_diffusers:
|
||||
embeddings_used = models_data.model_paths["embeddings"]
|
||||
embeddings_used = embeddings_used if isinstance(embeddings_used, list) else [embeddings_used]
|
||||
|
||||
embedding_name_regex = regex.compile(
|
||||
r"(^|[\s,])" + regex.escape(os.path.splitext(entry.name)[0]) + r"([+-]*$|[\s,]|[+-]+[\s,])"
|
||||
)
|
||||
if embedding_name_regex.search(req.prompt) or embedding_name_regex.search(req.negative_prompt):
|
||||
used_embeddings.append(entry.path)
|
||||
elif entry.is_dir():
|
||||
used_embeddings.extend(scan_directory(entry.path))
|
||||
return used_embeddings
|
||||
|
||||
used_embeddings = scan_directory(os.path.join(app.MODELS_DIR, "embeddings"))
|
||||
metadata["use_embedding_models"] = used_embeddings if len(used_embeddings) > 0 else None
|
||||
metadata["use_embeddings_model"] = embeddings_used if len(embeddings_used) > 0 else None
|
||||
|
||||
# Clean up the metadata
|
||||
if req.init_image is None and "prompt_strength" in metadata:
|
||||
@ -265,9 +279,22 @@ def get_printable_request(req: GenerateImageRequest, task_data: TaskData, output
|
||||
if task_data.use_controlnet_model is None and "control_filter_to_apply" in metadata:
|
||||
del metadata["control_filter_to_apply"]
|
||||
|
||||
if not using_diffusers:
|
||||
if using_diffusers:
|
||||
for key in (x for x in ["use_hypernetwork_model", "hypernetwork_strength"] if x in metadata):
|
||||
del metadata[key]
|
||||
else:
|
||||
for key in (
|
||||
x for x in ["use_lora_model", "lora_alpha", "clip_skip", "tiling", "latent_upscaler_steps", "use_controlnet_model", "control_filter_to_apply"] if x in metadata
|
||||
x
|
||||
for x in [
|
||||
"use_lora_model",
|
||||
"lora_alpha",
|
||||
"clip_skip",
|
||||
"tiling",
|
||||
"latent_upscaler_steps",
|
||||
"use_controlnet_model",
|
||||
"control_filter_to_apply",
|
||||
]
|
||||
if x in metadata
|
||||
):
|
||||
del metadata[key]
|
||||
|
||||
@ -277,7 +304,7 @@ def get_printable_request(req: GenerateImageRequest, task_data: TaskData, output
|
||||
def make_filename_callback(
|
||||
filename_format: str,
|
||||
req: GenerateImageRequest,
|
||||
task_data: TaskData,
|
||||
task_data: RenderTaskData,
|
||||
folder_img_number: int,
|
||||
suffix=None,
|
||||
now=None,
|
||||
@ -294,7 +321,7 @@ def make_filename_callback(
|
||||
return make_filename
|
||||
|
||||
|
||||
def _calculate_img_number(save_dir_path: str, task_data: TaskData):
|
||||
def _calculate_img_number(save_dir_path: str, task_data: RenderTaskData):
|
||||
def get_highest_img_number(accumulator: int, file: os.DirEntry) -> int:
|
||||
if not file.is_file:
|
||||
return accumulator
|
||||
@ -338,5 +365,5 @@ def _calculate_img_number(save_dir_path: str, task_data: TaskData):
|
||||
_calculate_img_number.session_img_numbers = {}
|
||||
|
||||
|
||||
def calculate_img_number(save_dir_path: str, task_data: TaskData):
|
||||
def calculate_img_number(save_dir_path: str, task_data: RenderTaskData):
|
||||
return ImageNumber(lambda: _calculate_img_number(save_dir_path, task_data))
|
||||
|
331
ui/index.html
331
ui/index.html
@ -18,12 +18,15 @@
|
||||
<link rel="stylesheet" href="/media/css/image-modal.css">
|
||||
<link rel="stylesheet" href="/media/css/plugins.css">
|
||||
<link rel="stylesheet" href="/media/css/animations.css">
|
||||
<link rel="stylesheet" href="/media/css/croppr.css" rel="stylesheet"/>
|
||||
<link rel="manifest" href="/media/manifest.webmanifest">
|
||||
<script src="/media/js/jquery-3.6.1.min.js"></script>
|
||||
<script src="/media/js/jquery-confirm.min.js"></script>
|
||||
<script src="/media/js/jszip.min.js"></script>
|
||||
<script src="/media/js/FileSaver.min.js"></script>
|
||||
<script src="/media/js/marked.min.js"></script>
|
||||
<script src="/media/js/croppr.js"></script>
|
||||
<script src="/media/js/exif-reader.js"></script>
|
||||
</head>
|
||||
<body>
|
||||
<div id="container">
|
||||
@ -32,7 +35,13 @@
|
||||
<h1>
|
||||
<img id="logo_img" src="/media/images/icon-512x512.png" >
|
||||
Easy Diffusion
|
||||
<small><span id="version">v2.5.48</span> <span id="updateBranchLabel"></span></small>
|
||||
<small>
|
||||
<span id="version">
|
||||
<span class="gated-feature" data-feature-keys="backend_ed_classic backend_ed_diffusers">v3.0.10</span>
|
||||
<span class="gated-feature" data-feature-keys="backend_webui">v3.5.0</span>
|
||||
</span> <span id="updateBranchLabel"></span>
|
||||
<div id="engine-logo" class="gated-feature" data-feature-keys="backend_webui">(Powered by <a id="backend-url" href="https://github.com/lllyasviel/stable-diffusion-webui-forge" target="_blank">Stable Diffusion WebUI Forge</a>)</div>
|
||||
</small>
|
||||
</h1>
|
||||
</div>
|
||||
<div id="server-status">
|
||||
@ -59,11 +68,18 @@
|
||||
<div id="editor-inputs-prompt" class="row">
|
||||
<div id="prompt-toolbar" class="split-toolbar">
|
||||
<div id="prompt-toolbar-left" class="toolbar-left">
|
||||
<label for="prompt"><b>Enter Prompt</b></label> <small>or</small> <button id="promptsFromFileBtn" class="tertiaryButton smallButton">Load from a file</button>
|
||||
<label for="prompt"><b>Enter Prompt</b>
|
||||
<i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip right">
|
||||
You can type your prompts in the below textbox or load them from a file. You can also
|
||||
reload tasks from metadata embedded in PNG, WEBP and JPEG images (enable embedding from the Settings).
|
||||
</span></i>
|
||||
</label>
|
||||
<small>or</small>
|
||||
<button id="promptsFromFileBtn" class="tertiaryButton smallButton">Load from a file</button>
|
||||
</div>
|
||||
<div id="prompt-toolbar-right" class="toolbar-right">
|
||||
<button id="image-modifier-dropdown" class="tertiaryButton smallButton">+ Image Modifiers</button>
|
||||
<button id="embeddings-button" class="tertiaryButton smallButton displayNone">+ Embedding</button>
|
||||
<button id="embeddings-button" class="tertiaryButton smallButton gated-feature" data-feature-keys="backend_ed_diffusers backend_webui">+ Embedding</button>
|
||||
</div>
|
||||
</div>
|
||||
<textarea id="prompt" class="col-free">a photograph of an astronaut riding a horse</textarea>
|
||||
@ -73,7 +89,7 @@
|
||||
<a href="https://github.com/easydiffusion/easydiffusion/wiki/Writing-prompts#negative-prompts" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top">Click to learn more about Negative Prompts</span></i></a>
|
||||
<small>(optional)</small>
|
||||
</label>
|
||||
<button id="negative-embeddings-button" class="tertiaryButton smallButton displayNone">+ Embedding</button>
|
||||
<button id="negative-embeddings-button" class="tertiaryButton smallButton gated-feature" data-feature-keys="backend_ed_diffusers backend_webui">+ Negative Embedding</button>
|
||||
<div class="collapsible-content">
|
||||
<textarea id="negative_prompt" name="negative_prompt" placeholder="list the things to remove from the image (e.g. fog, green)"></textarea>
|
||||
</div>
|
||||
@ -81,6 +97,11 @@
|
||||
|
||||
<div id="editor-inputs-init-image" class="row">
|
||||
<label for="init_image">Initial Image (img2img) <small>(optional)</small> </label>
|
||||
<i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top">
|
||||
Add img2img source image using the Browse button, via drag & drop from external file or browser image (incl.
|
||||
rendered image) or by pasting an image from the clipboard using Ctrl+V.<br /><br />
|
||||
You may also reload the metadata embedded in a PNG, WEBP or JPEG image (enable embedding from the Settings).
|
||||
</span></i>
|
||||
|
||||
<div id="init_image_preview_container" class="image_preview_container">
|
||||
<div id="init_image_wrapper" class="preview_image_wrapper">
|
||||
@ -140,8 +161,13 @@
|
||||
<div id="editor-settings-entries" class="collapsible-content">
|
||||
<div><table>
|
||||
<tr><b class="settings-subheader">Image Settings</b></tr>
|
||||
<tr class="pl-5"><td><label for="seed">Seed:</label></td><td><input id="seed" name="seed" size="10" value="0" onkeypress="preventNonNumericalInput(event)"> <input id="random_seed" name="random_seed" type="checkbox" checked><label for="random_seed">Random</label></td></tr>
|
||||
<tr class="pl-5"><td><label for="num_outputs_total">Number of Images:</label></td><td><input id="num_outputs_total" name="num_outputs_total" value="1" size="1" onkeypress="preventNonNumericalInput(event)"> <label><small>(total)</small></label> <input id="num_outputs_parallel" name="num_outputs_parallel" value="1" size="1" onkeypress="preventNonNumericalInput(event)"> <label id="num_outputs_parallel_label" for="num_outputs_parallel"><small>(in parallel)</small></label></td></tr>
|
||||
<tr class="pl-5"><td><label for="seed">Seed:</label></td><td><input id="seed" name="seed" size="10" value="0" onkeypress="preventNonNumericalInput(event)" inputmode="numeric"> <input id="random_seed" name="random_seed" type="checkbox" checked><label for="random_seed">Random</label></td></tr>
|
||||
<tr class="pl-5"><td><label for="num_outputs_total">Number of Images:</label></td>
|
||||
<td><input id="num_outputs_total" name="num_outputs_total" value="1" type="number" value="1" min="1" step="1" onkeypres"="preventNonNumericalInput(event)" inputmode="numeric">
|
||||
<label><small>(total)</small></label>
|
||||
<input id="num_outputs_parallel" name="num_outputs_parallel" value="1" type="number" value="1" min="1" step="1" onkeypress="preventNonNumericalInput(event)" inputmode="numeric">
|
||||
<label id="num_outputs_parallel_label" for="num_outputs_parallel"><small>(in parallel)</small></label></td>
|
||||
</tr>
|
||||
<tr class="pl-5"><td><label for="stable_diffusion_model">Model:</label></td><td class="model-input">
|
||||
<input id="stable_diffusion_model" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
|
||||
<button id="reload-models" class="secondaryButton reloadModels"><i class='fa-solid fa-rotate'></i></button>
|
||||
@ -154,14 +180,14 @@
|
||||
<!-- <label><small>Takes upto 20 mins the first time</small></label> -->
|
||||
</td>
|
||||
</tr>
|
||||
<tr class="pl-5 displayNone" id="clip_skip_config">
|
||||
<tr class="pl-5 gated-feature" id="clip_skip_config" data-feature-keys="backend_ed_diffusers backend_webui">
|
||||
<td><label for="clip_skip">Clip Skip:</label></td>
|
||||
<td class="diffusers-restart-needed">
|
||||
<input id="clip_skip" name="clip_skip" type="checkbox">
|
||||
<a href="https://github.com/easydiffusion/easydiffusion/wiki/Clip-Skip" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Click to learn more about Clip Skip</span></i></a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr id="controlnet_model_container" class="pl-5">
|
||||
<tr id="controlnet_model_container" class="pl-5 gated-feature" data-feature-keys="backend_ed_diffusers backend_webui">
|
||||
<td><label for="controlnet_model">ControlNet Image:</label></td>
|
||||
<td class="diffusers-restart-needed">
|
||||
<div id="control_image_wrapper" class="preview_image_wrapper">
|
||||
@ -181,40 +207,94 @@
|
||||
<option value="openpose_faceonly">OpenPose face-only</option>
|
||||
<option value="openpose_hand">OpenPose hand</option>
|
||||
<option value="openpose_full">OpenPose full</option>
|
||||
<option value="animal_openpose" class="gated-feature" data-feature-keys="backend_webui">animal_openpose</option>
|
||||
<option value="densepose_parula (black bg & blue torso)" class="gated-feature" data-feature-keys="backend_webui">densepose_parula (black bg & blue torso)</option>
|
||||
<option value="densepose (pruple bg & purple torso)" class="gated-feature" data-feature-keys="backend_webui">densepose (pruple bg & purple torso)</option>
|
||||
<option value="dw_openpose_full" class="gated-feature" data-feature-keys="backend_webui">dw_openpose_full</option>
|
||||
<option value="mediapipe_face" class="gated-feature" data-feature-keys="backend_webui">mediapipe_face</option>
|
||||
<option value="instant_id_face_keypoints" class="gated-feature" data-feature-keys="backend_webui">instant_id_face_keypoints</option>
|
||||
<option value="InsightFace+CLIP-H (IPAdapter)" class="gated-feature" data-feature-keys="backend_webui">InsightFace+CLIP-H (IPAdapter)</option>
|
||||
<option value="InsightFace (InstantID)" class="gated-feature" data-feature-keys="backend_webui">InsightFace (InstantID)</option>
|
||||
</optgroup>
|
||||
<optgroup label="Outline">
|
||||
<option value="canny">Canny (*)</option>
|
||||
<option value="mlsd">Straight lines</option>
|
||||
<option value="scribble_hed">Scribble hed (*)</option>
|
||||
<option value="scribble_hedsafe">Scribble hedsafe</option>
|
||||
<option value="scribble_hedsafe" class="gated-feature" data-feature-keys="backend_diffusers">Scribble hedsafe</option>
|
||||
<option value="scribble_pidinet">Scribble pidinet</option>
|
||||
<option value="scribble_pidsafe">Scribble pidsafe</option>
|
||||
<option value="scribble_pidsafe" class="gated-feature" data-feature-keys="backend_diffusers">Scribble pidsafe</option>
|
||||
<option value="scribble_xdog" class="gated-feature" data-feature-keys="backend_webui">scribble_xdog</option>
|
||||
<option value="softedge_hed">Softedge hed</option>
|
||||
<option value="softedge_hedsafe">Softedge hedsafe</option>
|
||||
<option value="softedge_pidinet">Softedge pidinet</option>
|
||||
<option value="softedge_pidsafe">Softedge pidsafe</option>
|
||||
<option value="softedge_teed" class="gated-feature" data-feature-keys="backend_webui">softedge_teed</option>
|
||||
</optgroup>
|
||||
<optgroup label="Depth">
|
||||
<option value="normal_bae">Normal bae (*)</option>
|
||||
<option value="depth_midas">Depth midas</option>
|
||||
<option value="normal_midas" class="gated-feature" data-feature-keys="backend_webui">normal_midas</option>
|
||||
<option value="depth_zoe">Depth zoe</option>
|
||||
<option value="depth_leres">Depth leres</option>
|
||||
<option value="depth_leres++">Depth leres++</option>
|
||||
<option value="depth_anything_v2" class="gated-feature" data-feature-keys="backend_webui">depth_anything_v2</option>
|
||||
<option value="depth_anything" class="gated-feature" data-feature-keys="backend_webui">depth_anything</option>
|
||||
<option value="depth_hand_refiner" class="gated-feature" data-feature-keys="backend_webui">depth_hand_refiner</option>
|
||||
<option value="depth_marigold" class="gated-feature" data-feature-keys="backend_webui">depth_marigold</option>
|
||||
</optgroup>
|
||||
<optgroup label="Line art">
|
||||
<option value="lineart_coarse">Lineart coarse</option>
|
||||
<option value="lineart_realistic">Lineart realistic</option>
|
||||
<option value="lineart_anime">Lineart anime</option>
|
||||
<option value="lineart_standard (from white bg & black line)" class="gated-feature" data-feature-keys="backend_webui">lineart_standard (from white bg & black line)</option>
|
||||
<option value="lineart_anime_denoise" class="gated-feature" data-feature-keys="backend_webui">lineart_anime_denoise</option>
|
||||
</optgroup>
|
||||
<optgroup label="Reference" class="gated-feature" data-feature-keys="backend_webui">
|
||||
<option value="reference_adain">reference_adain</option>
|
||||
<option value="reference_only">reference_only</option>
|
||||
<option value="reference_adain+attn">reference_adain+attn</option>
|
||||
</optgroup>
|
||||
<optgroup label="Tile" class="gated-feature" data-feature-keys="backend_webui">
|
||||
<option value="tile_colorfix">tile_colorfix</option>
|
||||
<option value="tile_resample">tile_resample</option>
|
||||
<option value="tile_colorfix+sharp">tile_colorfix+sharp</option>
|
||||
</optgroup>
|
||||
<optgroup label="CLIP (IPAdapter)" class="gated-feature" data-feature-keys="backend_webui">
|
||||
<option value="CLIP-ViT-H (IPAdapter)">CLIP-ViT-H (IPAdapter)</option>
|
||||
<option value="CLIP-G (Revision)">CLIP-G (Revision)</option>
|
||||
<option value="CLIP-G (Revision ignore prompt)">CLIP-G (Revision ignore prompt)</option>
|
||||
<option value="CLIP-ViT-bigG (IPAdapter)">CLIP-ViT-bigG (IPAdapter)</option>
|
||||
<option value="InsightFace+CLIP-H (IPAdapter)">InsightFace+CLIP-H (IPAdapter)</option>
|
||||
</optgroup>
|
||||
<optgroup label="Inpaint" class="gated-feature" data-feature-keys="backend_webui">
|
||||
<option value="inpaint_only">inpaint_only</option>
|
||||
<option value="inpaint_only+lama">inpaint_only+lama</option>
|
||||
<option value="inpaint_global_harmonious">inpaint_global_harmonious</option>
|
||||
</optgroup>
|
||||
<optgroup label="Segment" class="gated-feature" data-feature-keys="backend_webui">
|
||||
<option value="seg_ufade20k">seg_ufade20k</option>
|
||||
<option value="seg_ofade20k">seg_ofade20k</option>
|
||||
<option value="seg_anime_face">seg_anime_face</option>
|
||||
<option value="seg_ofcoco">seg_ofcoco</option>
|
||||
</optgroup>
|
||||
<optgroup label="Misc">
|
||||
<option value="shuffle">Shuffle</option>
|
||||
<option value="segment">Segment</option>
|
||||
<option value="segment" class="gated-feature" data-feature-keys="backend_diffusers">Segment</option>
|
||||
<option value="invert (from white bg & black line)" class="gated-feature" data-feature-keys="backend_webui">invert (from white bg & black line)</option>
|
||||
<option value="threshold" class="gated-feature" data-feature-keys="backend_webui">threshold</option>
|
||||
<option value="t2ia_sketch_pidi" class="gated-feature" data-feature-keys="backend_webui">t2ia_sketch_pidi</option>
|
||||
<option value="t2ia_color_grid" class="gated-feature" data-feature-keys="backend_webui">t2ia_color_grid</option>
|
||||
<option value="recolor_intensity" class="gated-feature" data-feature-keys="backend_webui">recolor_intensity</option>
|
||||
<option value="recolor_luminance" class="gated-feature" data-feature-keys="backend_webui">recolor_luminance</option>
|
||||
<option value="blur_gaussian" class="gated-feature" data-feature-keys="backend_webui">blur_gaussian</option>
|
||||
</optgroup>
|
||||
</select>
|
||||
<br/>
|
||||
<label for="controlnet_model"><small>Model:</small></label> <input id="controlnet_model" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
|
||||
<!-- <br/>
|
||||
<label><small>Will download the necessary models, the first time.</small></label> -->
|
||||
<br/>
|
||||
<label><small>Will download the necessary models, the first time.</small></label>
|
||||
<label for="controlnet_alpha_slider"><small>Strength:</small></label> <input id="controlnet_alpha_slider" name="controlnet_alpha_slider" class="editor-slider" value="10" type="range" min="0" max="10"> <input id="controlnet_alpha" name="controlnet_alpha" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)" inputmode="decimal">
|
||||
</div>
|
||||
</td>
|
||||
</tr>
|
||||
@ -226,28 +306,60 @@
|
||||
<select id="sampler_name" name="sampler_name">
|
||||
<option value="plms">PLMS</option>
|
||||
<option value="ddim">DDIM</option>
|
||||
<option value="ddim_cfgpp" class="gated-feature" data-feature-keys="backend_webui">DDIM CFG++</option>
|
||||
<option value="heun">Heun</option>
|
||||
<option value="euler">Euler</option>
|
||||
<option value="euler_a" selected>Euler Ancestral</option>
|
||||
<option value="dpm2">DPM2</option>
|
||||
<option value="dpm2_a">DPM2 Ancestral</option>
|
||||
<option value="dpm_fast" class="gated-feature" data-feature-keys="backend_webui">DPM Fast</option>
|
||||
<option value="dpm_adaptive" class="gated-feature" data-feature-keys="backend_ed_classic backend_webui">DPM Adaptive</option>
|
||||
<option value="lms">LMS</option>
|
||||
<option value="dpm_solver_stability">DPM Solver (Stability AI)</option>
|
||||
<option value="dpm_solver_stability" class="gated-feature" data-feature-keys="backend_ed_classic backend_ed_diffusers">DPM Solver (Stability AI)</option>
|
||||
<option value="dpmpp_2s_a">DPM++ 2s Ancestral (Karras)</option>
|
||||
<option value="dpmpp_2m">DPM++ 2m (Karras)</option>
|
||||
<option value="dpmpp_2m_sde" class="diffusers-only">DPM++ 2m SDE (Karras)</option>
|
||||
<option value="dpmpp_2m_sde" class="gated-feature" data-feature-keys="backend_ed_diffusers backend_webui">DPM++ 2m SDE</option>
|
||||
<option value="dpmpp_2m_sde_heun" class="gated-feature" data-feature-keys="backend_webui">DPM++ 2m SDE Heun</option>
|
||||
<option value="dpmpp_3m_sde" class="gated-feature" data-feature-keys="backend_webui">DPM++ 3M SDE</option>
|
||||
<option value="dpmpp_sde">DPM++ SDE (Karras)</option>
|
||||
<option value="dpm_adaptive" class="k_diffusion-only">DPM Adaptive (Karras)</option>
|
||||
<option value="ddpm" class="diffusers-only">DDPM</option>
|
||||
<option value="deis" class="diffusers-only">DEIS</option>
|
||||
<option value="unipc_snr" class="k_diffusion-only">UniPC SNR</option>
|
||||
<option value="unipc_tu">UniPC TU</option>
|
||||
<option value="unipc_snr_2" class="k_diffusion-only">UniPC SNR 2</option>
|
||||
<option value="unipc_tu_2" class="k_diffusion-only">UniPC TU 2</option>
|
||||
<option value="unipc_tq" class="k_diffusion-only">UniPC TQ</option>
|
||||
<option value="restart" class="gated-feature" data-feature-keys="backend_webui">Restart</option>
|
||||
<option value="heun_pp2" class="gated-feature" data-feature-keys="backend_webui">Heun PP2</option>
|
||||
<option value="ipndm" class="gated-feature" data-feature-keys="backend_webui">IPNDM</option>
|
||||
<option value="ipndm_v" class="gated-feature" data-feature-keys="backend_webui">IPNDM_V</option>
|
||||
<option value="ddpm" class="gated-feature" data-feature-keys="backend_ed_diffusers backend_webui">DDPM</option>
|
||||
<option value="deis" class="gated-feature" data-feature-keys="backend_ed_diffusers backend_webui">DEIS</option>
|
||||
<option value="lcm" class="gated-feature" data-feature-keys="backend_webui">LCM</option>
|
||||
<option value="forge_flux_realistic" class="gated-feature" data-feature-keys="backend_webui">[Forge] Flux Realistic</option>
|
||||
<option value="forge_flux_realistic_slow" class="gated-feature" data-feature-keys="backend_webui">[Forge] Flux Realistic (Slow)</option>
|
||||
<option value="unipc_snr" class="gated-feature" data-feature-keys="backend_ed_classic">UniPC SNR</option>
|
||||
<option value="unipc_tu" class="gated-feature" data-feature-keys="backend_ed_classic backend_ed_diffusers">UniPC TU</option>
|
||||
<option value="unipc_snr_2" class="gated-feature" data-feature-keys="backend_ed_classic">UniPC SNR 2</option>
|
||||
<option value="unipc_tu_2" class="gated-feature" data-feature-keys="backend_ed_classic">UniPC TU 2</option>
|
||||
<option value="unipc_tq" class="gated-feature" data-feature-keys="backend_ed_classic">UniPC TQ</option>
|
||||
</select>
|
||||
<a href="https://github.com/easydiffusion/easydiffusion/wiki/How-to-Use#samplers" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Click to learn more about samplers</span></i></a>
|
||||
</td></tr>
|
||||
<tr class="pl-5 warning-label displayNone" id="fluxSamplerWarning"><td></td><td>Please avoid 'Euler Ancestral' with Flux!</td></tr>
|
||||
<tr id="schedulerSelection" class="pl-5 gated-feature" data-feature-keys="backend_webui"><td><label for="scheduler_name">Scheduler:</label></td><td>
|
||||
<select id="scheduler_name" name="scheduler_name">
|
||||
<option value="automatic">Automatic</option>
|
||||
<option value="uniform">Uniform</option>
|
||||
<option value="karras">Karras</option>
|
||||
<option value="exponential">Exponential</option>
|
||||
<option value="polyexponential">Polyexponential</option>
|
||||
<option value="sgm_uniform">SGM Uniform</option>
|
||||
<option value="kl_optimal">KL Optimal</option>
|
||||
<option value="align_your_steps">Align Your Steps</option>
|
||||
<option value="simple" selected>Simple</option>
|
||||
<option value="normal">Normal</option>
|
||||
<option value="ddim">DDIM</option>
|
||||
<option value="beta">Beta</option>
|
||||
<option value="turbo">Turbo</option>
|
||||
<option value="align_your_steps_GITS">Align Your Steps GITS</option>
|
||||
<option value="align_your_steps_11">Align Your Steps 11</option>
|
||||
<option value="align_your_steps_32">Align Your Steps 32</option>
|
||||
</select>
|
||||
</td></tr>
|
||||
<tr class="pl-5"><td><label>Image Size: </label></td><td id="image-size-options">
|
||||
<select id="width" name="width" value="512">
|
||||
<option value="128">128</option>
|
||||
@ -270,8 +382,10 @@
|
||||
<option value="1792">1792</option>
|
||||
<option value="2048">2048</option>
|
||||
</select>
|
||||
<label for="width"><small>(width)</small></label>
|
||||
<span id="swap-width-height" class="clickable smallButton" style="margin-left: 2px; margin-right:2px;"><i class="fa-solid fa-right-left"><span class="simple-tooltip top-left"> Swap width and height </span></i></span>
|
||||
<label id="widthLabel" for="width"><small><span>(width)</span></small></label>
|
||||
<div class="tooltip-container">
|
||||
<span id="swap-width-height" class="clickable smallButton" style="margin-left: 2px; margin-right:2px;"><i class="fa-solid fa-right-left"><span class="simple-tooltip top-left"> Swap width and height </span></i></span>
|
||||
</div>
|
||||
<select id="height" name="height" value="512">
|
||||
<option value="128">128</option>
|
||||
<option value="192">192</option>
|
||||
@ -293,45 +407,56 @@
|
||||
<option value="1792">1792</option>
|
||||
<option value="2048">2048</option>
|
||||
</select>
|
||||
<label for="height"><small>(height)</small></label>
|
||||
<label id="heightLabel" for="height"><small><span>(height)</span></small></label>
|
||||
<div id="recent-resolutions-container">
|
||||
<span id="recent-resolutions-button" class="clickable"><i class="fa-solid fa-sliders"><span class="simple-tooltip top-left"> Advanced sizes </span></i></span>
|
||||
<div id="recent-resolutions-popup" class="displayNone">
|
||||
<small>Custom size:</small><br>
|
||||
<input id="custom-width" name="custom-width" type="number" min="128" value="512" onkeypress="preventNonNumericalInput(event)">
|
||||
<input id="custom-width" name="custom-width" type="number" min="128" value="512" onkeypress="preventNonNumericalInput(event)" inputmode="numeric">
|
||||
×
|
||||
<input id="custom-height" name="custom-height" type="number" min="128" value="512" onkeypress="preventNonNumericalInput(event)"><br>
|
||||
<input id="custom-height" name="custom-height" type="number" min="128" value="512" onkeypress="preventNonNumericalInput(event)" inputmode="numeric"><br>
|
||||
<small>Resize:</small><br>
|
||||
<input id="resize-slider" name="resize-slider" class="editor-slider" value="1" type="range" min="0.4" max="2" step="0.005" style="width:100%;"><br>
|
||||
<div id="enlarge-buttons"><button data-factor="0.5" class="tertiaryButton smallButton">×0.5</button> <button data-factor="1.2" class="tertiaryButton smallButton">×1.2</button> <button data-factor="1.5" class="tertiaryButton smallButton">×1.5</button> <button data-factor="2" class="tertiaryButton smallButton">×2</button> <button data-factor="3" class="tertiaryButton smallButton">×3</button></div>
|
||||
|
||||
<small>Enlarge:</small><br>
|
||||
<div id="enlarge-buttons"><button id="enlarge15" class="tertiaryButton smallButton">×1.5</button> <button id="enlarge2" class="tertiaryButton smallButton">×2</button> <button id="enlarge3" class="tertiaryButton smallButton">×3</button></div>
|
||||
<div class="two-column">
|
||||
<div class="left-column">
|
||||
<small>Recently used:</small><br>
|
||||
<div id="recent-resolution-list">
|
||||
</div>
|
||||
</div>
|
||||
<div class="right-column">
|
||||
<small>Common sizes:</small><br>
|
||||
<div id="common-resolution-list">
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<small>Recently used:</small><br>
|
||||
<div id="recent-resolution-list">
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div id="small_image_warning" class="displayNone">Small image sizes can cause bad image quality</div>
|
||||
<div id="small_image_warning" class="displayNone warning-label">Small image sizes can cause bad image quality</div>
|
||||
</td></tr>
|
||||
<tr class="pl-5"><td><label for="num_inference_steps">Inference Steps:</label></td><td> <input id="num_inference_steps" name="num_inference_steps" type="number" min="1" step="1" style="width: 42pt" value="25" onkeypress="preventNonNumericalInput(event)"></td></tr>
|
||||
<tr class="pl-5"><td><label for="guidance_scale_slider">Guidance Scale:</label></td><td> <input id="guidance_scale_slider" name="guidance_scale_slider" class="editor-slider" value="75" type="range" min="11" max="500"> <input id="guidance_scale" name="guidance_scale" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"></td></tr>
|
||||
<tr id="prompt_strength_container" class="pl-5"><td><label for="prompt_strength_slider">Prompt Strength:</label></td><td> <input id="prompt_strength_slider" name="prompt_strength_slider" class="editor-slider" value="80" type="range" min="0" max="99"> <input id="prompt_strength" name="prompt_strength" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"><br/></td></tr>
|
||||
<tr id="lora_model_container" class="pl-5">
|
||||
<tr class="pl-5"><td><label for="num_inference_steps">Inference Steps:</label></td><td> <input id="num_inference_steps" name="num_inference_steps" type="number" min="1" step="1" style="width: 42pt" value="25" onkeypress="preventNonNumericalInput(event)" inputmode="numeric"></td></tr>
|
||||
<tr class="pl-5"><td><label for="guidance_scale_slider">Guidance Scale:</label></td><td> <input id="guidance_scale_slider" name="guidance_scale_slider" class="editor-slider" value="75" type="range" min="11" max="500"> <input id="guidance_scale" name="guidance_scale" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)" inputmode="decimal"></td></tr>
|
||||
<tr class="pl-5 displayNone warning-label" id="guidanceWarning"><td></td><td id="guidanceWarningText"></td></tr>
|
||||
<tr id="prompt_strength_container" class="pl-5"><td><label for="prompt_strength_slider">Prompt Strength:</label></td><td> <input id="prompt_strength_slider" name="prompt_strength_slider" class="editor-slider" value="80" type="range" min="0" max="99"> <input id="prompt_strength" name="prompt_strength" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)" inputmode="decimal"><br/></td></tr>
|
||||
<tr id="distilled_guidance_scale_container" class="pl-5 displayNone"><td><label for="distilled_guidance_scale_slider">Distilled Guidance:</label></td><td> <input id="distilled_guidance_scale_slider" name="distilled_guidance_scale_slider" class="editor-slider" value="35" type="range" min="11" max="500"> <input id="distilled_guidance_scale" name="distilled_guidance_scale" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)" inputmode="decimal"></td></tr>
|
||||
<tr id="lora_model_container" class="pl-5 gated-feature" data-feature-keys="backend_ed_diffusers backend_webui">
|
||||
<td>
|
||||
<label for="lora_model">LoRA:</label>
|
||||
</td>
|
||||
<td class="diffusers-restart-needed">
|
||||
<div class="model_entries"></div>
|
||||
<button class="add_model_entry"><i class="fa-solid fa-plus"></i> add another LoRA</button>
|
||||
<div id="lora_model" data-path=""></div>
|
||||
</td>
|
||||
</tr>
|
||||
<tr class="pl-5"><td><label for="hypernetwork_model">Hypernetwork:</label></td><td>
|
||||
<tr id="hypernetwork_model_container" class="pl-5 gated-feature" data-feature-keys="backend_ed_classic"><td><label for="hypernetwork_model">Hypernetwork:</label></td><td>
|
||||
<input id="hypernetwork_model" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
|
||||
</td></tr>
|
||||
<tr id="hypernetwork_strength_container" class="pl-5">
|
||||
<tr id="hypernetwork_strength_container" class="pl-5 gated-feature" data-feature-keys="backend_ed_classic">
|
||||
<td><label for="hypernetwork_strength_slider">Hypernetwork Strength:</label></td>
|
||||
<td> <input id="hypernetwork_strength_slider" name="hypernetwork_strength_slider" class="editor-slider" value="100" type="range" min="0" max="100"> <input id="hypernetwork_strength" name="hypernetwork_strength" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"><br/></td>
|
||||
<td> <input id="hypernetwork_strength_slider" name="hypernetwork_strength_slider" class="editor-slider" value="100" type="range" min="0" max="100"> <input id="hypernetwork_strength" name="hypernetwork_strength" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)" inputmode="decimal"><br/></td>
|
||||
</tr>
|
||||
<tr id="tiling_container" class="pl-5">
|
||||
<tr id="tiling_container" class="pl-5 gated-feature" data-feature-keys="backend_ed_diffusers">
|
||||
<td><label for="tiling">Seamless Tiling:</label></td>
|
||||
<td class="diffusers-restart-needed">
|
||||
<select id="tiling" name="tiling">
|
||||
@ -354,8 +479,15 @@
|
||||
</span>
|
||||
</td></tr>
|
||||
<tr class="pl-5" id="output_quality_row"><td><label for="output_quality">Image Quality:</label></td><td>
|
||||
<input id="output_quality_slider" name="output_quality" class="editor-slider" value="75" type="range" min="10" max="95"> <input id="output_quality" name="output_quality" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)">
|
||||
<input id="output_quality_slider" name="output_quality" class="editor-slider" value="75" type="range" min="10" max="95"> <input id="output_quality" name="output_quality" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)" inputmode="numeric">
|
||||
</td></tr>
|
||||
<tr class="pl-5 gated-feature" data-feature-keys="backend_ed_diffusers">
|
||||
<td><label for="tiling">Enable VAE Tiling:</label></td>
|
||||
<td class="diffusers-restart-needed">
|
||||
<input id="enable_vae_tiling" name="enable_vae_tiling" type="checkbox" checked>
|
||||
<label><small>Optimizes memory for larger images</small></label>
|
||||
</td>
|
||||
</tr>
|
||||
</table></div>
|
||||
|
||||
<div><ul>
|
||||
@ -364,8 +496,8 @@
|
||||
<li class="pl-5" id="use_face_correction_container">
|
||||
<input id="use_face_correction" name="use_face_correction" type="checkbox"> <label for="use_face_correction">Fix incorrect faces and eyes</label> <div style="display:inline-block;"><input id="gfpgan_model" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" /></div>
|
||||
<table id="codeformer_settings" class="displayNone sub-settings">
|
||||
<tr class="pl-5"><td><label for="codeformer_fidelity_slider">Strength:</label></td><td><input id="codeformer_fidelity_slider" name="codeformer_fidelity_slider" class="editor-slider" value="5" type="range" min="0" max="10"> <input id="codeformer_fidelity" name="codeformer_fidelity" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"></td></tr>
|
||||
<tr class="pl-5"><td><label for="codeformer_upscale_faces">Upscale Faces:</label></td><td><input id="codeformer_upscale_faces" name="codeformer_upscale_faces" type="checkbox" checked> <label><small>(improves the resolution of faces)</small></label></td></tr>
|
||||
<tr class="pl-5"><td><label for="codeformer_fidelity_slider">Strength:</label></td><td><input id="codeformer_fidelity_slider" name="codeformer_fidelity_slider" class="editor-slider" value="5" type="range" min="0" max="10"> <input id="codeformer_fidelity" name="codeformer_fidelity" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)" inputmode="decimal"></td></tr>
|
||||
<tr class="pl-5 gated-feature" data-feature-keys="backend_ed_diffusers"><td><label for="codeformer_upscale_faces">Upscale Faces:</label></td><td><input id="codeformer_upscale_faces" name="codeformer_upscale_faces" type="checkbox" checked> <label><small>(improves the resolution of faces)</small></label></td></tr>
|
||||
</table>
|
||||
</li>
|
||||
<li class="pl-5">
|
||||
@ -378,10 +510,16 @@
|
||||
<select id="upscale_model" name="upscale_model">
|
||||
<option value="RealESRGAN_x4plus" selected>RealESRGAN_x4plus</option>
|
||||
<option value="RealESRGAN_x4plus_anime_6B">RealESRGAN_x4plus_anime_6B</option>
|
||||
<option value="latent_upscaler">Latent Upscaler 2x</option>
|
||||
<option value="ESRGAN_4x" class="pl-5 gated-feature" data-feature-keys="backend_webui">ESRGAN_4x</option>
|
||||
<option value="Lanczos" class="pl-5 gated-feature" data-feature-keys="backend_webui">Lanczos</option>
|
||||
<option value="Nearest" class="pl-5 gated-feature" data-feature-keys="backend_webui">Nearest</option>
|
||||
<option value="ScuNET" class="pl-5 gated-feature" data-feature-keys="backend_webui">ScuNET GAN</option>
|
||||
<option value="ScuNET PSNR" class="pl-5 gated-feature" data-feature-keys="backend_webui">ScuNET PSNR</option>
|
||||
<option value="SwinIR_4x" class="pl-5 gated-feature" data-feature-keys="backend_webui">SwinIR 4x</option>
|
||||
<option value="latent_upscaler" class="pl-5 gated-feature" data-feature-keys="backend_ed_classic backend_ed_diffusers">Latent Upscaler 2x</option>
|
||||
</select>
|
||||
<table id="latent_upscaler_settings" class="displayNone sub-settings">
|
||||
<tr class="pl-5"><td><label for="latent_upscaler_steps_slider">Upscaling Steps:</label></td><td><input id="latent_upscaler_steps_slider" name="latent_upscaler_steps_slider" class="editor-slider" value="10" type="range" min="1" max="50"> <input id="latent_upscaler_steps" name="latent_upscaler_steps" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"></td></tr>
|
||||
<tr class="pl-5"><td><label for="latent_upscaler_steps_slider">Upscaling Steps:</label></td><td><input id="latent_upscaler_steps_slider" name="latent_upscaler_steps_slider" class="editor-slider" value="10" type="range" min="1" max="50"> <input id="latent_upscaler_steps" name="latent_upscaler_steps" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)" inputmode="numeric"></td></tr>
|
||||
</table>
|
||||
</li>
|
||||
<li class="pl-5"><input id="show_only_filtered_image" name="show_only_filtered_image" type="checkbox" checked> <label for="show_only_filtered_image">Show only the corrected/upscaled image</label></li>
|
||||
@ -389,7 +527,7 @@
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<label><small><b>Note:</b> The Image Modifiers section has moved to the <code>+ Image Modifiers</code> button at the top, just below the Prompt textbox.</small></label>
|
||||
<label><small><b>Note:</b> The Image Modifiers section has moved to the <code>+ Image Modifiers</code> button at the top, just above the Prompt textbox.</small></label>
|
||||
</div>
|
||||
|
||||
<div id="preview" class="col-free">
|
||||
@ -403,7 +541,7 @@
|
||||
<div id="preview-content">
|
||||
<div id="preview-tools" class="displayNone">
|
||||
<button id="clear-all-previews" class="secondaryButton"><i class="fa-solid fa-trash-can icon"></i> Clear All</button>
|
||||
<button class="tertiaryButton" id="show-download-popup"><i class="fa-solid fa-download"></i> Download images</button>
|
||||
<button class="tertiaryButton" id="show-download-popup"><i class="fa-solid fa-download"></i><span> Download images</span></button>
|
||||
<div class="display-settings">
|
||||
<button id="undo" class="displayNone primaryButton">
|
||||
Undo <i class="fa-solid fa-rotate-left icon"></i>
|
||||
@ -426,12 +564,15 @@
|
||||
<div class="dropdown-content">
|
||||
<div class="dropdown-item">
|
||||
<input id="thumbnail_size" name="thumbnail_size" class="editor-slider" type="range" value="70" min="5" max="200" oninput="sliderUpdate(event)">
|
||||
<input id="thumbnail_size-input" name="thumbnail_size-input" size="3" value="70" pattern="^[0-9.]+$" onkeypress="preventNonNumericalInput(event)" oninput="sliderUpdate(event)"> %
|
||||
<input id="thumbnail_size-input" name="thumbnail_size-input" size="3" value="70" pattern="^[0-9.]+$" onkeypress="preventNonNumericalInput(event)" oninput="sliderUpdate(event)" inputmode="numeric"> %
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="clearfix" style="clear: both;"></div>
|
||||
</div>
|
||||
<div id="supportBanner" class="displayNone">
|
||||
If you found this project useful and want to help keep it alive, please consider <a href="https://ko-fi.com/easydiffusion" target="_blank">buying me a coffee</a> to help cover the cost of development and maintenance! Thanks for your support!
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@ -476,28 +617,44 @@
|
||||
<div class="float-container">
|
||||
<div class="float-child">
|
||||
<h1>Help</h1>
|
||||
<ul id="help-links">
|
||||
<li><span class="help-section">Using the software</span>
|
||||
<div id="help-links">
|
||||
<h4><span class="help-section"><b>Basics</b></span></h4>
|
||||
<ul>
|
||||
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/How-To-Use" target="_blank"><i class="fa-solid fa-book fa-fw"></i> How to use</a>
|
||||
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/UI-Overview" target="_blank"><i class="fa-solid fa-list fa-fw"></i> UI Overview</a>
|
||||
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/Writing-Prompts" target="_blank"><i class="fa-solid fa-pen-to-square fa-fw"></i> Writing prompts</a>
|
||||
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/Inpainting" target="_blank"><i class="fa-solid fa-paintbrush fa-fw"></i> Inpainting</a>
|
||||
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/Run-on-Multiple-GPUs" target="_blank"><i class="fa-solid fa-paintbrush fa-fw"></i> Run on Multiple GPUs</a>
|
||||
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/How-To-Use" target="_blank">How to use</a></li>
|
||||
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/Writing-Prompts" target="_blank">Writing prompts</a></li>
|
||||
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/Image-Modifiers" target="_blank">Image Modifiers</a></li>
|
||||
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/Inpainting" target="_blank">Inpainting</a></li>
|
||||
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/Samplers" target="_blank">Samplers</a></li>
|
||||
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/UI-Overview" target="_blank">Summary of every UI option</a></li>
|
||||
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/Troubleshooting" target="_blank">Common error messages (and solutions)</a></li>
|
||||
</ul>
|
||||
|
||||
<li><span class="help-section">Installation</span>
|
||||
<h4><span class="help-section"><b>Intermediate</b></span></h4>
|
||||
<ul>
|
||||
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/Troubleshooting" target="_blank"><i class="fa-solid fa-circle-question fa-fw"></i> Troubleshooting</a>
|
||||
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/Custom-Models" target="_blank">Custom Models</a></li>
|
||||
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/Prompt-Syntax" target="_blank">Prompt Syntax (weights, emphasis etc)</a></li>
|
||||
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/UI-Plugins" target="_blank">UI Plugins</a></li>
|
||||
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/Embeddings" target="_blank">Embeddings</a></li>
|
||||
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/LoRA" target="_blank">LoRA</a></li>
|
||||
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/SDXL" target="_blank">SDXL</a></li>
|
||||
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/ControlNet" target="_blank">ControlNet</a></li>
|
||||
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/Seamless-Tiling" target="_blank">Seamless Tiling</a></li>
|
||||
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/xFormers" target="_blank">xFormers</a></li>
|
||||
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/The-beta-channel" target="_blank">The beta channel</a></li>
|
||||
</ul>
|
||||
|
||||
<li><span class="help-section">Downloadable Content</span>
|
||||
<h4><span class="help-section"><b>Advanced topics</b></span></h4>
|
||||
<ul>
|
||||
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/Custom-Models" target="_blank"><i class="fa-solid fa-images fa-fw"></i> Custom Models</a>
|
||||
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/UI-Plugins" target="_blank"><i class="fa-solid fa-puzzle-piece fa-fw"></i> UI Plugins</a>
|
||||
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/VAE-Variational-Auto-Encoder" target="_blank"><i class="fa-solid fa-hand-sparkles fa-fw"></i> VAE Variational Auto Encoder</a>
|
||||
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/Run-on-Multiple-GPUs" target="_blank">Run on Multiple GPUs</a></li>
|
||||
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/Model-Merging" target="_blank">Model Merging</a></li>
|
||||
<li> <a href="https://github.com/easydiffusion/easydiffusion/wiki/Custom-Modifiers" target="_blank">Custom Modifiers</a></li>
|
||||
</ul>
|
||||
</ul>
|
||||
|
||||
<h4><span class="help-section"><b>Misc</b></span></h4>
|
||||
<ul>
|
||||
<li> <a href="https://theally.notion.site/The-Definitive-Stable-Diffusion-Glossary-1d1e6d15059c41e6a6b4306b4ecd9df9" target="_blank">Glossary of Stable Diffusion related terms</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="float-child">
|
||||
@ -669,6 +826,8 @@
|
||||
<span>
|
||||
</div>
|
||||
<div id="embeddings-dialog-header-right">
|
||||
<button id="add-embeddings-thumb" class="tertiaryButton smallButton" style="background-color: var(--background-color4);"><i class="fa-solid fa-folder-plus"></i> Add thumbnail</button>
|
||||
<input id="add-embeddings-thumb-input" name="add-embeddings-thumb-input" type="file" class="displayNone">
|
||||
<i id="embeddings-dialog-close-button" class="fa-solid fa-xmark fa-lg"></i>
|
||||
</div>
|
||||
</div>
|
||||
@ -678,7 +837,16 @@
|
||||
<span class="embeddings-action-text">Expand Categories</span>
|
||||
</button>
|
||||
<i class="fa-solid fa-magnifying-glass"></i>
|
||||
<input id="embeddings-search-box" type="text" spellcheck="false" autocomplete="off" placeholder="Search...">
|
||||
<input id="embeddings-search-box" type="text" spellcheck="false" autocomplete="off" placeholder="Search..." inputmode="search">
|
||||
<label for="embedding-card-size-selector"><small>Thumbnail Size:</small></label>
|
||||
<select id="embedding-card-size-selector" name="embedding-card-size-selector">
|
||||
<option value="-2">0</option>
|
||||
<option value="-1" selected>1</option>
|
||||
<option value="0">2</option>
|
||||
<option value="1">3</option>
|
||||
<option value="2">4</option>
|
||||
<option value="3">5</option>
|
||||
</select>
|
||||
<span style="float:right;"><label>Mode:</label> <select id="embeddings-mode"><option value="insert">Insert at cursor position</option><option value="append">Append at the end</option></select>
|
||||
</div>
|
||||
<div id="embeddings-list">
|
||||
@ -686,6 +854,34 @@
|
||||
</div>
|
||||
</dialog>
|
||||
|
||||
<dialog id="use-as-thumb-dialog">
|
||||
<div id="use-as-thumb-dialog-header" class="dialog-header">
|
||||
<div id="use-as-thumb-dialog-header-left" class="dialog-header-left">
|
||||
<h4>Use as thumbnail</h4>
|
||||
<span>Use a pictures as thumbnail for embeddings, LORAs, etc.</span>
|
||||
</div>
|
||||
<div id="use-as-thumb-dialog-header-right">
|
||||
<i id="use-as-thumb-dialog-close-button" class="fa-solid fa-xmark fa-lg"></i>
|
||||
</div>
|
||||
</div>
|
||||
<div>
|
||||
<div class="use-as-thumb-grid">
|
||||
<div class="use-as-thumb-preview">
|
||||
<div id="use-as-thumb-img-container"><img id="use-as-thumb-image" src="/media/images/noimg.png" width="512" height="512"></div>
|
||||
</div>
|
||||
<div class="use-as-thumb-select">
|
||||
<label for="use-as-thumb-select">Use the thumbnail for:</label><br>
|
||||
<select id="use-as-thumb-select" size="16" multiple>
|
||||
</select>
|
||||
</div>
|
||||
<div class="use-as-thumb-buttons">
|
||||
<button class="tertiaryButton" id="use-as-thumb-save">Save thumbnail</button>
|
||||
<button class="tertiaryButton" id="use-as-thumb-cancel">Cancel</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</dialog>
|
||||
|
||||
<div id="image-editor" class="popup image-editor-popup">
|
||||
<div>
|
||||
<i class="close-button fa-solid fa-xmark"></i>
|
||||
@ -721,13 +917,14 @@
|
||||
<div id="footer-spacer"></div>
|
||||
<div id="footer">
|
||||
<div class="line-separator"> </div>
|
||||
<p>If you found this project useful and want to help keep it alive, please <a href="https://ko-fi.com/easydiffusion" target="_blank"><img src="/media/images/kofi.png" id="coffeeButton"></a> to help cover the cost of development and maintenance! Thank you for your support!</p>
|
||||
<p>Please feel free to join the <a href="https://discord.com/invite/u9yhsFmEkB" target="_blank">discord community</a> or <a href="https://github.com/easydiffusion/easydiffusion/issues" target="_blank">file an issue</a> if you have any problems or suggestions in using this interface.</p>
|
||||
<div id="footer-legal">
|
||||
<p><b>Disclaimer:</b> The authors of this project are not responsible for any content generated using this interface.</p>
|
||||
<p>This license of this software forbids you from sharing any content that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, <br/>spread misinformation and target vulnerable groups. For the full list of restrictions please read <a href="https://github.com/easydiffusion/easydiffusion/blob/main/LICENSE" target="_blank">the license</a>.</p>
|
||||
<p>By using this software, you consent to the terms and conditions of the license.</p>
|
||||
</div>
|
||||
<input id="test_diffusers" type="checkbox" style="display: none" checked /> <!-- for backwards compatibility -->
|
||||
<input id="use_v3_engine" type="checkbox" style="display: none" checked /> <!-- for backwards compatibility -->
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
@ -739,6 +936,8 @@
|
||||
<script src="media/js/auto-save.js"></script>
|
||||
|
||||
<script src="media/js/searchable-models.js"></script>
|
||||
<script src="media/js/multi-model-selector.js"></script>
|
||||
<script src="media/js/task-manager.js"></script>
|
||||
<script src="media/js/main.js"></script>
|
||||
<script src="media/js/plugins.js"></script>
|
||||
<script src="media/js/themes.js"></script>
|
||||
|
@ -1,4 +1,4 @@
|
||||
from easydiffusion import model_manager, app, server
|
||||
from easydiffusion import model_manager, app, server, bucket_manager
|
||||
from easydiffusion.server import server_api # required for uvicorn
|
||||
|
||||
app.init()
|
||||
@ -8,6 +8,4 @@ server.init()
|
||||
# Init the app
|
||||
model_manager.init()
|
||||
app.init_render_threads()
|
||||
|
||||
# start the browser ui
|
||||
app.open_browser()
|
||||
bucket_manager.init()
|
||||
|
@ -79,6 +79,7 @@
|
||||
}
|
||||
|
||||
.parameters-table .fa-fire,
|
||||
.parameters-table .fa-bolt {
|
||||
.parameters-table .fa-bolt,
|
||||
.parameters-table .fa-robot {
|
||||
color: #F7630C;
|
||||
}
|
||||
|
58
ui/media/css/croppr.css
Normal file
58
ui/media/css/croppr.css
Normal file
@ -0,0 +1,58 @@
|
||||
.croppr-container * {
|
||||
user-select: none;
|
||||
-moz-user-select: none;
|
||||
-webkit-user-select: none;
|
||||
-ms-user-select: none;
|
||||
box-sizing: border-box;
|
||||
-webkit-box-sizing: border-box;
|
||||
-moz-box-sizing: border-box;
|
||||
}
|
||||
|
||||
.croppr-container img {
|
||||
vertical-align: middle;
|
||||
max-width: 100%;
|
||||
}
|
||||
|
||||
.croppr {
|
||||
position: relative;
|
||||
display: inline-block;
|
||||
}
|
||||
|
||||
.croppr-overlay {
|
||||
background: rgba(0,0,0,0.5);
|
||||
position: absolute;
|
||||
top: 0;
|
||||
right: 0;
|
||||
bottom: 0;
|
||||
left: 0;
|
||||
z-index: 1;
|
||||
cursor: crosshair;
|
||||
}
|
||||
|
||||
.croppr-region {
|
||||
border: 1px dashed rgba(0, 0, 0, 0.5);
|
||||
position: absolute;
|
||||
z-index: 3;
|
||||
cursor: move;
|
||||
top: 0;
|
||||
}
|
||||
|
||||
.croppr-imageClipped {
|
||||
position: absolute;
|
||||
top: 0;
|
||||
right: 0;
|
||||
bottom: 0;
|
||||
left: 0;
|
||||
z-index: 2;
|
||||
pointer-events: none;
|
||||
}
|
||||
|
||||
.croppr-handle {
|
||||
border: 1px solid black;
|
||||
background-color: white;
|
||||
width: 10px;
|
||||
height: 10px;
|
||||
position: absolute;
|
||||
z-index: 4;
|
||||
top: 0;
|
||||
}
|
@ -229,4 +229,27 @@
|
||||
}
|
||||
.inpainter .load_mask {
|
||||
display: flex;
|
||||
}
|
||||
}
|
||||
|
||||
.editor-canvas-overlay {
|
||||
cursor: none;
|
||||
}
|
||||
|
||||
.image-brush-preview {
|
||||
position: fixed;
|
||||
background: black;
|
||||
opacity: 0.3;
|
||||
borderRadius: 50%;
|
||||
cursor: none;
|
||||
pointer-events: none;
|
||||
transform: translate(-50%, -50%);
|
||||
}
|
||||
|
||||
.editor-options-container > * > *:not(.active):not(.button) {
|
||||
border: 1px dotted slategray;
|
||||
}
|
||||
|
||||
.image_editor_opacity .editor-options-container > * > *:not(.active):not(.button) {
|
||||
border: 1px dotted slategray;
|
||||
}
|
||||
|
||||
|
@ -34,6 +34,16 @@ code {
|
||||
width: 32px;
|
||||
height: 32px;
|
||||
transform: translateY(4px);
|
||||
cursor: pointer;
|
||||
}
|
||||
#engine-logo {
|
||||
font-size: 8pt;
|
||||
padding-left: 10pt;
|
||||
color: var(--small-label-color);
|
||||
}
|
||||
#engine-logo a {
|
||||
text-decoration: none;
|
||||
/* color: var(--small-label-color); */
|
||||
}
|
||||
#prompt {
|
||||
width: 100%;
|
||||
@ -476,6 +486,7 @@ dialog {
|
||||
background: var(--background-color2);
|
||||
color: var(--text-color);
|
||||
border-radius: 6px;
|
||||
box-shadow: 0px 0px 30px black;
|
||||
border: 2px solid rgb(255 255 255 / 10%);
|
||||
padding: 0px;
|
||||
}
|
||||
@ -539,7 +550,7 @@ div.img-preview img {
|
||||
position: relative;
|
||||
background: var(--background-color4);
|
||||
display: flex;
|
||||
padding: 12px 0 0;
|
||||
padding: 6px 0 0;
|
||||
}
|
||||
.tab .icon {
|
||||
padding-right: 4pt;
|
||||
@ -607,11 +618,18 @@ div.img-preview img {
|
||||
margin: auto;
|
||||
padding: 0px;
|
||||
}
|
||||
#help-links ul {
|
||||
list-style-type: disc;
|
||||
padding-left: 12pt;
|
||||
}
|
||||
#help-links li {
|
||||
padding-bottom: 12pt;
|
||||
padding-bottom: 6pt;
|
||||
display: block;
|
||||
font-size: 10pt;
|
||||
}
|
||||
#help-links ul li {
|
||||
display: list-item;
|
||||
}
|
||||
#help-links li .fa-fw {
|
||||
padding-right: 2pt;
|
||||
}
|
||||
@ -648,6 +666,15 @@ div.img-preview img {
|
||||
display: block;
|
||||
}
|
||||
|
||||
.gated-feature {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.warning-label {
|
||||
font-size: smaller;
|
||||
color: var(--status-orange);
|
||||
}
|
||||
|
||||
.display-settings {
|
||||
float: right;
|
||||
position: relative;
|
||||
@ -1088,7 +1115,7 @@ input::file-selector-button {
|
||||
.tab-content-inner {
|
||||
margin: 0px;
|
||||
}
|
||||
.tab {
|
||||
#top-nav .tab {
|
||||
font-size: 0;
|
||||
}
|
||||
.tab .icon {
|
||||
@ -1114,6 +1141,9 @@ input::file-selector-button {
|
||||
#preview-tools button .icon {
|
||||
font-size: 12pt;
|
||||
}
|
||||
#show-download-popup .fa-solid {
|
||||
font-size: 12pt;
|
||||
}
|
||||
}
|
||||
|
||||
@media screen and (max-width: 500px) {
|
||||
@ -1202,6 +1232,12 @@ input::file-selector-button {
|
||||
visibility: visible;
|
||||
}
|
||||
}
|
||||
|
||||
.tooltip-container {
|
||||
display: inline-block;
|
||||
position: relative;
|
||||
}
|
||||
|
||||
.simple-tooltip.right {
|
||||
right: 0px;
|
||||
top: 50%;
|
||||
@ -1418,6 +1454,10 @@ div.task-fs-initimage {
|
||||
display: none;
|
||||
position: absolute;
|
||||
}
|
||||
div.task-fs-initimage img {
|
||||
max-height: 70vH;
|
||||
max-width: 70vW;
|
||||
}
|
||||
div.task-initimg:hover div.task-fs-initimage {
|
||||
display: block;
|
||||
position: absolute;
|
||||
@ -1433,9 +1473,8 @@ div.top-right {
|
||||
right: 8px;
|
||||
}
|
||||
|
||||
#small_image_warning {
|
||||
font-size: smaller;
|
||||
color: var(--status-orange);
|
||||
.task-fs-initimage .top-right button {
|
||||
margin-top: 6px;
|
||||
}
|
||||
|
||||
button#save-system-settings-btn {
|
||||
@ -1460,6 +1499,9 @@ button#save-system-settings-btn {
|
||||
cursor: pointer;;
|
||||
}
|
||||
|
||||
.validation-failed {
|
||||
border: solid 2px red;
|
||||
}
|
||||
/* SCROLLBARS */
|
||||
:root {
|
||||
--scrollbar-width: 14px;
|
||||
@ -1650,6 +1692,35 @@ body.wait-pause {
|
||||
}
|
||||
}
|
||||
|
||||
.spinner-container {
|
||||
width: 80px;
|
||||
height: 100px;
|
||||
margin: 100px auto;
|
||||
margin-top: 30vH;
|
||||
}
|
||||
|
||||
.spinner-block {
|
||||
position: relative;
|
||||
box-sizing: border-box;
|
||||
float: left;
|
||||
margin: 0 10px 10px 0;
|
||||
width: 12px;
|
||||
height: 12px;
|
||||
border-radius: 3px;
|
||||
background: var(--accent-color);
|
||||
}
|
||||
|
||||
.spinner-block:nth-child(4n+1) { animation: spinner-wave 2s ease .0s infinite; }
|
||||
.spinner-block:nth-child(4n+2) { animation: spinner-wave 2s ease .2s infinite; }
|
||||
.spinner-block:nth-child(4n+3) { animation: spinner-wave 2s ease .4s infinite; }
|
||||
.spinner-block:nth-child(4n+4) { animation: spinner-wave 2s ease .6s infinite; margin-right: 0; }
|
||||
|
||||
@keyframes spinner-wave {
|
||||
0% { top: 0; opacity: 1; }
|
||||
50% { top: 30px; opacity: .2; }
|
||||
100% { top: 0; opacity: 1; }
|
||||
}
|
||||
|
||||
#embeddings-dialog {
|
||||
overflow: clip;
|
||||
}
|
||||
@ -1664,6 +1735,12 @@ body.wait-pause {
|
||||
overflow-y: scroll;
|
||||
}
|
||||
|
||||
@media screen and (max-width: 1400px) {
|
||||
#embeddings-list {
|
||||
width: 80vW;
|
||||
}
|
||||
}
|
||||
|
||||
#embeddings-list button {
|
||||
margin: 2px;
|
||||
color: var(--button-color);
|
||||
@ -1741,6 +1818,32 @@ body.wait-pause {
|
||||
float: right;
|
||||
}
|
||||
|
||||
.use-as-thumb-grid { display: grid;
|
||||
grid-template-columns: 1fr auto;
|
||||
grid-template-rows: 1fr auto;
|
||||
gap: 8px 8px;
|
||||
grid-auto-flow: row;
|
||||
grid-template-areas:
|
||||
"uat-preview uat-select"
|
||||
"uat-preview uat-buttons";
|
||||
}
|
||||
|
||||
.use-as-thumb-preview {
|
||||
justify-self: center;
|
||||
align-self: center;
|
||||
grid-area: uat-preview;
|
||||
}
|
||||
|
||||
.use-as-thumb-select {
|
||||
grid-area: uat-select;
|
||||
}
|
||||
|
||||
.use-as-thumb-buttons {
|
||||
justify-self: center;
|
||||
grid-area: uat-buttons;
|
||||
}
|
||||
|
||||
|
||||
.diffusers-disabled-on-startup .diffusers-restart-needed {
|
||||
font-size: 0;
|
||||
}
|
||||
@ -1778,6 +1881,10 @@ div#recent-resolutions-popup small {
|
||||
opacity: 0.7;
|
||||
}
|
||||
|
||||
div#common-resolution-list button {
|
||||
background: var(--background-color1);
|
||||
}
|
||||
|
||||
td#image-size-options small {
|
||||
margin-right: 0px !important;
|
||||
}
|
||||
@ -1794,6 +1901,27 @@ div#enlarge-buttons {
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.two-column { display: grid;
|
||||
grid-template-columns: 1fr 1fr;
|
||||
grid-template-rows: 1fr;
|
||||
gap: 0px 0.5em;
|
||||
grid-auto-flow: row;
|
||||
grid-template-areas:
|
||||
"left-column right-column";
|
||||
}
|
||||
|
||||
.left-column {
|
||||
justify-self: center;
|
||||
align-self: center;
|
||||
grid-area: left-column;
|
||||
}
|
||||
|
||||
.right-column {
|
||||
justify-self: center;
|
||||
align-self: center;
|
||||
grid-area: right-column;
|
||||
}
|
||||
|
||||
.clickable {
|
||||
cursor: pointer;
|
||||
}
|
||||
@ -1828,7 +1956,100 @@ div#enlarge-buttons {
|
||||
width: 77%;
|
||||
}
|
||||
|
||||
.drop-area {
|
||||
width: 45%;
|
||||
height: 50px;
|
||||
border: 2px dashed #ccc;
|
||||
text-align: center;
|
||||
line-height: 50px;
|
||||
font-size: small;
|
||||
color: #ccc;
|
||||
border-radius: 10px;
|
||||
display: none;
|
||||
margin: 12px 10px;
|
||||
}
|
||||
|
||||
#num_outputs_total {
|
||||
width: 42pt;
|
||||
}
|
||||
#num_outputs_parallel {
|
||||
width: 42pt;
|
||||
}
|
||||
.model_entry .model_weight {
|
||||
width: 50pt;
|
||||
}
|
||||
|
||||
/* hack for fixing Image Modifier Improvements plugin */
|
||||
#imageTagPopupContainer {
|
||||
position: absolute;
|
||||
}
|
||||
}
|
||||
|
||||
@media screen and (max-width: 400px) {
|
||||
.editor-slider {
|
||||
width: 40%;
|
||||
}
|
||||
input::-webkit-outer-spin-button,
|
||||
input::-webkit-inner-spin-button {
|
||||
-webkit-appearance: none;
|
||||
margin: 0;
|
||||
}
|
||||
input[type=number] {
|
||||
-moz-appearance: textfield;
|
||||
/* Firefox */
|
||||
}
|
||||
#num_outputs_total {
|
||||
width: 27pt;
|
||||
}
|
||||
#num_outputs_parallel {
|
||||
width: 27pt;
|
||||
margin-left: -4pt;
|
||||
}
|
||||
.model_entry .model_weight {
|
||||
width: 30pt;
|
||||
}
|
||||
#width {
|
||||
width: 50pt;
|
||||
}
|
||||
#height {
|
||||
width: 50pt;
|
||||
}
|
||||
}
|
||||
@media screen and (max-width: 460px) {
|
||||
#widthLabel small span {
|
||||
display: none;
|
||||
}
|
||||
#widthLabel small:after {
|
||||
content: "(w)";
|
||||
}
|
||||
#heightLabel small span {
|
||||
display: none;
|
||||
}
|
||||
#heightLabel small:after {
|
||||
content: "(h)";
|
||||
}
|
||||
#prompt-toolbar-right {
|
||||
text-align: right;
|
||||
}
|
||||
#editor-settings label {
|
||||
font-size: 9pt;
|
||||
}
|
||||
#editor-settings .model-filter {
|
||||
width: 56%;
|
||||
}
|
||||
#vae_model {
|
||||
width: 65% !important;
|
||||
}
|
||||
.model_entry .model_name {
|
||||
width: 60% !important;
|
||||
}
|
||||
}
|
||||
|
||||
#supportBanner {
|
||||
font-size: 9pt;
|
||||
padding: 5pt;
|
||||
border: 1px solid var(--background-color2);
|
||||
margin-bottom: 5pt;
|
||||
border-radius: 4pt;
|
||||
padding-top: 6pt;
|
||||
color: var(--small-label-color);
|
||||
}
|
||||
|
BIN
ui/media/images/noimg.png
Normal file
BIN
ui/media/images/noimg.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 1.3 KiB |
@ -15,14 +15,14 @@ const SETTINGS_IDS_LIST = [
|
||||
"stable_diffusion_model",
|
||||
"clip_skip",
|
||||
"vae_model",
|
||||
"hypernetwork_model",
|
||||
"sampler_name",
|
||||
"scheduler_name",
|
||||
"width",
|
||||
"height",
|
||||
"num_inference_steps",
|
||||
"guidance_scale",
|
||||
"distilled_guidance_scale",
|
||||
"prompt_strength",
|
||||
"hypernetwork_strength",
|
||||
"tiling",
|
||||
"output_format",
|
||||
"output_quality",
|
||||
@ -45,6 +45,7 @@ const SETTINGS_IDS_LIST = [
|
||||
"sound_toggle",
|
||||
"vram_usage_level",
|
||||
"confirm_dangerous_actions",
|
||||
"profileName",
|
||||
"metadata_output_format",
|
||||
"auto_save_settings",
|
||||
"apply_color_correction",
|
||||
@ -54,10 +55,20 @@ const SETTINGS_IDS_LIST = [
|
||||
"zip_toggle",
|
||||
"tree_toggle",
|
||||
"json_toggle",
|
||||
"extract_lora_from_prompt",
|
||||
"embedding-card-size-selector",
|
||||
"lora_model",
|
||||
"enable_vae_tiling",
|
||||
"controlnet_alpha",
|
||||
]
|
||||
|
||||
const IGNORE_BY_DEFAULT = ["prompt"]
|
||||
|
||||
if (!testDiffusers.checked) {
|
||||
SETTINGS_IDS_LIST.push("hypernetwork_model")
|
||||
SETTINGS_IDS_LIST.push("hypernetwork_strength")
|
||||
}
|
||||
|
||||
const SETTINGS_SECTIONS = [
|
||||
// gets the "keys" property filled in with an ordered list of settings in this section via initSettings
|
||||
{ id: "editor-inputs", name: "Prompt" },
|
||||
|
1189
ui/media/js/croppr.js
Executable file
1189
ui/media/js/croppr.js
Executable file
File diff suppressed because it is too large
Load Diff
@ -131,6 +131,15 @@ const TASK_MAPPING = {
|
||||
readUI: () => parseFloat(guidanceScaleField.value),
|
||||
parse: (val) => parseFloat(val),
|
||||
},
|
||||
distilled_guidance_scale: {
|
||||
name: "Distilled Guidance",
|
||||
setUI: (distilled_guidance_scale) => {
|
||||
distilledGuidanceScaleField.value = distilled_guidance_scale
|
||||
updateDistilledGuidanceScaleSlider()
|
||||
},
|
||||
readUI: () => parseFloat(distilledGuidanceScaleField.value),
|
||||
parse: (val) => parseFloat(val),
|
||||
},
|
||||
prompt_strength: {
|
||||
name: "Prompt Strength",
|
||||
setUI: (prompt_strength) => {
|
||||
@ -242,6 +251,14 @@ const TASK_MAPPING = {
|
||||
readUI: () => samplerField.value,
|
||||
parse: (val) => val,
|
||||
},
|
||||
scheduler_name: {
|
||||
name: "Scheduler",
|
||||
setUI: (scheduler_name) => {
|
||||
schedulerField.value = scheduler_name
|
||||
},
|
||||
readUI: () => schedulerField.value,
|
||||
parse: (val) => val,
|
||||
},
|
||||
use_stable_diffusion_model: {
|
||||
name: "Stable Diffusion model",
|
||||
setUI: (use_stable_diffusion_model) => {
|
||||
@ -268,7 +285,11 @@ const TASK_MAPPING = {
|
||||
tiling: {
|
||||
name: "Tiling",
|
||||
setUI: (val) => {
|
||||
tilingField.value = val
|
||||
if (val === null || val === "None") {
|
||||
tilingField.value = "none"
|
||||
} else {
|
||||
tilingField.value = val
|
||||
}
|
||||
},
|
||||
readUI: () => tilingField.value,
|
||||
parse: (val) => val,
|
||||
@ -289,42 +310,67 @@ const TASK_MAPPING = {
|
||||
readUI: () => vaeModelField.value,
|
||||
parse: (val) => val,
|
||||
},
|
||||
use_controlnet_model: {
|
||||
name: "ControlNet model",
|
||||
setUI: (use_controlnet_model) => {
|
||||
controlnetModelField.value = getModelPath(use_controlnet_model, [".pth", ".safetensors"])
|
||||
},
|
||||
readUI: () => controlnetModelField.value,
|
||||
parse: (val) => val,
|
||||
},
|
||||
control_filter_to_apply: {
|
||||
name: "ControlNet Filter",
|
||||
setUI: (control_filter_to_apply) => {
|
||||
controlImageFilterField.value = control_filter_to_apply
|
||||
},
|
||||
readUI: () => controlImageFilterField.value,
|
||||
parse: (val) => val,
|
||||
},
|
||||
control_alpha: {
|
||||
name: "ControlNet Strength",
|
||||
setUI: (control_alpha) => {
|
||||
control_alpha = control_alpha || 1.0
|
||||
controlAlphaField.value = control_alpha
|
||||
updateControlAlphaSlider()
|
||||
},
|
||||
readUI: () => parseFloat(controlAlphaField.value),
|
||||
parse: (val) => val === null ? 1.0 : parseFloat(val),
|
||||
},
|
||||
use_lora_model: {
|
||||
name: "LoRA model",
|
||||
setUI: (use_lora_model) => {
|
||||
// create rows
|
||||
for (let i = loraModels.length; i < use_lora_model.length; i++) {
|
||||
createLoraEntry()
|
||||
}
|
||||
|
||||
use_lora_model.forEach((model_name, i) => {
|
||||
let field = loraModels[i][0]
|
||||
const oldVal = field.value
|
||||
|
||||
if (model_name !== "") {
|
||||
model_name = getModelPath(model_name, [".ckpt", ".safetensors"])
|
||||
model_name = model_name !== "" ? model_name : oldVal
|
||||
let modelPaths = []
|
||||
use_lora_model = use_lora_model === null ? "" : use_lora_model
|
||||
use_lora_model = Array.isArray(use_lora_model) ? use_lora_model : [use_lora_model]
|
||||
use_lora_model.forEach((m) => {
|
||||
if (m.includes("models\\lora\\")) {
|
||||
m = m.split("models\\lora\\")[1]
|
||||
} else if (m.includes("models\\\\lora\\\\")) {
|
||||
m = m.split("models\\\\lora\\\\")[1]
|
||||
} else if (m.includes("models/lora/")) {
|
||||
m = m.split("models/lora/")[1]
|
||||
}
|
||||
field.value = model_name
|
||||
m = m.replaceAll("\\\\", "/")
|
||||
m = getModelPath(m, [".ckpt", ".safetensors"])
|
||||
modelPaths.push(m)
|
||||
})
|
||||
|
||||
// clear the remaining entries
|
||||
let container = document.querySelector("#lora_model_container .model_entries")
|
||||
for (let i = use_lora_model.length; i < loraModels.length; i++) {
|
||||
let modelEntry = loraModels[i][2]
|
||||
container.removeChild(modelEntry)
|
||||
}
|
||||
|
||||
loraModels.splice(use_lora_model.length)
|
||||
loraModelField.modelNames = modelPaths
|
||||
},
|
||||
readUI: () => {
|
||||
let values = loraModels.map((e) => e[0].value)
|
||||
values = values.filter((e) => e.trim() !== "")
|
||||
values = values.length > 0 ? values : "None"
|
||||
return values
|
||||
return loraModelField.modelNames
|
||||
},
|
||||
parse: (val) => {
|
||||
val = !val || val === "None" ? "" : val
|
||||
if (typeof val === "string" && val.includes(",")) {
|
||||
val = val.split(",")
|
||||
val = val.map((v) => v.trim())
|
||||
val = val.map((v) => v.replaceAll("\\", "\\\\"))
|
||||
val = val.map((v) => v.replaceAll('"', ""))
|
||||
val = val.map((v) => v.replaceAll("'", ""))
|
||||
val = val.map((v) => '"' + v + '"')
|
||||
val = "[" + val + "]"
|
||||
val = JSON.parse(val)
|
||||
}
|
||||
val = Array.isArray(val) ? val : [val]
|
||||
return val
|
||||
},
|
||||
@ -332,31 +378,17 @@ const TASK_MAPPING = {
|
||||
lora_alpha: {
|
||||
name: "LoRA Strength",
|
||||
setUI: (lora_alpha) => {
|
||||
for (let i = loraModels.length; i < lora_alpha.length; i++) {
|
||||
createLoraEntry()
|
||||
}
|
||||
|
||||
lora_alpha.forEach((model_strength, i) => {
|
||||
let field = loraModels[i][1]
|
||||
field.value = model_strength
|
||||
})
|
||||
|
||||
// clear the remaining entries
|
||||
let container = document.querySelector("#lora_model_container .model_entries")
|
||||
for (let i = lora_alpha.length; i < loraModels.length; i++) {
|
||||
let modelEntry = loraModels[i][2]
|
||||
container.removeChild(modelEntry)
|
||||
}
|
||||
|
||||
loraModels.splice(lora_alpha.length)
|
||||
lora_alpha = Array.isArray(lora_alpha) ? lora_alpha : [lora_alpha]
|
||||
loraModelField.modelWeights = lora_alpha
|
||||
},
|
||||
readUI: () => {
|
||||
let models = loraModels.filter((e) => e[0].value.trim() !== "")
|
||||
let values = models.map((e) => e[1].value)
|
||||
values = values.length > 0 ? values : 0
|
||||
return values
|
||||
return loraModelField.modelWeights
|
||||
},
|
||||
parse: (val) => {
|
||||
if (typeof val === "string" && val.includes(",")) {
|
||||
val = "[" + val.replaceAll("'", '"') + "]"
|
||||
val = JSON.parse(val)
|
||||
}
|
||||
val = Array.isArray(val) ? val : [val]
|
||||
val = val.map((e) => parseFloat(e))
|
||||
return val
|
||||
@ -472,11 +504,8 @@ function restoreTaskToUI(task, fieldsToSkip) {
|
||||
}
|
||||
|
||||
if (!("use_lora_model" in task.reqBody)) {
|
||||
loraModels.forEach((e) => {
|
||||
e[0].value = ""
|
||||
e[1].value = 0
|
||||
e[0].dispatchEvent(new Event("change"))
|
||||
})
|
||||
loraModelField.modelNames = []
|
||||
loraModelField.modelWeights = []
|
||||
}
|
||||
|
||||
// restore the original prompt if provided (e.g. use settings), fallback to prompt as needed (e.g. copy/paste or d&d)
|
||||
@ -519,10 +548,28 @@ function restoreTaskToUI(task, fieldsToSkip) {
|
||||
)
|
||||
initImagePreview.src = task.reqBody.init_image
|
||||
}
|
||||
|
||||
// hide/show controlnet picture as needed
|
||||
if (IMAGE_REGEX.test(controlImagePreview.src) && task.reqBody.control_image == undefined) {
|
||||
// hide source image
|
||||
controlImageClearBtn.dispatchEvent(new Event("click"))
|
||||
} else if (task.reqBody.control_image !== undefined) {
|
||||
// listen for inpainter loading event, which happens AFTER the main image loads (which reloads the inpai
|
||||
controlImagePreview.src = task.reqBody.control_image
|
||||
}
|
||||
|
||||
if ("use_controlnet_model" in task.reqBody && task.reqBody.use_controlnet_model && !("control_alpha" in task.reqBody)) {
|
||||
controlAlphaField.value = 1.0
|
||||
updateControlAlphaSlider()
|
||||
}
|
||||
}
|
||||
function readUI() {
|
||||
const reqBody = {}
|
||||
for (const key in TASK_MAPPING) {
|
||||
if (testDiffusers.checked && (key === "use_hypernetwork_model" || key === "hypernetwork_strength")) {
|
||||
continue
|
||||
}
|
||||
|
||||
reqBody[key] = TASK_MAPPING[key].readUI()
|
||||
}
|
||||
return {
|
||||
@ -560,15 +607,23 @@ const TASK_TEXT_MAPPING = {
|
||||
seed: "Seed",
|
||||
num_inference_steps: "Steps",
|
||||
guidance_scale: "Guidance Scale",
|
||||
distilled_guidance_scale: "Distilled Guidance",
|
||||
prompt_strength: "Prompt Strength",
|
||||
use_face_correction: "Use Face Correction",
|
||||
use_upscale: "Use Upscaling",
|
||||
upscale_amount: "Upscale By",
|
||||
sampler_name: "Sampler",
|
||||
scheduler_name: "Scheduler",
|
||||
negative_prompt: "Negative Prompt",
|
||||
use_stable_diffusion_model: "Stable Diffusion model",
|
||||
use_hypernetwork_model: "Hypernetwork model",
|
||||
hypernetwork_strength: "Hypernetwork Strength",
|
||||
use_lora_model: "LoRA model",
|
||||
lora_alpha: "LoRA Strength",
|
||||
use_controlnet_model: "ControlNet model",
|
||||
control_filter_to_apply: "ControlNet Filter",
|
||||
control_alpha: "ControlNet Strength",
|
||||
tiling: "Seamless Tiling",
|
||||
}
|
||||
function parseTaskFromText(str) {
|
||||
const taskReqBody = {}
|
||||
|
2
ui/media/js/exif-reader.js
Normal file
2
ui/media/js/exif-reader.js
Normal file
File diff suppressed because one or more lines are too long
1461
ui/media/js/main.js
1461
ui/media/js/main.js
File diff suppressed because it is too large
Load Diff
256
ui/media/js/multi-model-selector.js
Normal file
256
ui/media/js/multi-model-selector.js
Normal file
@ -0,0 +1,256 @@
|
||||
/**
|
||||
* A component consisting of multiple model dropdowns, along with a "weight" field per model.
|
||||
*
|
||||
* Behaves like a single input element, giving an object in response to the .value field.
|
||||
*
|
||||
* Inspired by the design of the ModelDropdown component (searchable-models.js).
|
||||
*/
|
||||
|
||||
class MultiModelSelector {
|
||||
root
|
||||
modelType
|
||||
modelNameFriendly
|
||||
defaultWeight
|
||||
weightStep
|
||||
|
||||
modelContainer
|
||||
addNewButton
|
||||
|
||||
counter = 0
|
||||
|
||||
/* MIMIC A REGULAR INPUT FIELD */
|
||||
get id() {
|
||||
return this.root.id
|
||||
}
|
||||
get parentElement() {
|
||||
return this.root.parentElement
|
||||
}
|
||||
get parentNode() {
|
||||
return this.root.parentNode
|
||||
}
|
||||
get value() {
|
||||
return { modelNames: this.modelNames, modelWeights: this.modelWeights }
|
||||
}
|
||||
set value(modelData) {
|
||||
if (typeof modelData !== "object") {
|
||||
throw new Error("Multi-model selector expects an object containing modelNames and modelWeights as keys!")
|
||||
}
|
||||
if (!("modelNames" in modelData) || !("modelWeights" in modelData)) {
|
||||
throw new Error("modelNames or modelWeights not present in the data passed to the multi-model selector")
|
||||
}
|
||||
|
||||
let newModelNames = modelData["modelNames"]
|
||||
let newModelWeights = modelData["modelWeights"]
|
||||
if (newModelNames.length !== newModelWeights.length) {
|
||||
throw new Error("Need to pass an equal number of modelNames and modelWeights!")
|
||||
}
|
||||
|
||||
// update weight first, name second.
|
||||
// for some unholy reason this order matters for dispatch chains
|
||||
// the root of all this unholiness is because searchable-models automatically dispatches an update event
|
||||
// as soon as the value is updated via JS, which is against the DOM pattern of not dispatching an event automatically
|
||||
// unless the caller explicitly dispatches the event.
|
||||
this.modelWeights = newModelWeights
|
||||
this.modelNames = newModelNames
|
||||
}
|
||||
get disabled() {
|
||||
return false
|
||||
}
|
||||
set disabled(state) {
|
||||
// do nothing
|
||||
}
|
||||
getModelElements(ignoreEmpty = false) {
|
||||
let entries = this.root.querySelectorAll(".model_entry")
|
||||
entries = [...entries]
|
||||
let elements = entries.map((e) => {
|
||||
let modelName = e.querySelector(".model_name").field
|
||||
let modelWeight = e.querySelector(".model_weight")
|
||||
if (ignoreEmpty && modelName.value.trim() === "") {
|
||||
return null
|
||||
}
|
||||
|
||||
return { name: modelName, weight: modelWeight }
|
||||
})
|
||||
elements = elements.filter((e) => e !== null)
|
||||
return elements
|
||||
}
|
||||
addEventListener(type, listener, options) {
|
||||
// do nothing
|
||||
}
|
||||
dispatchEvent(event) {
|
||||
// do nothing
|
||||
}
|
||||
appendChild(option) {
|
||||
// do nothing
|
||||
}
|
||||
|
||||
// remember 'this' - http://blog.niftysnippets.org/2008/04/you-must-remember-this.html
|
||||
bind(f, obj) {
|
||||
return function() {
|
||||
return f.apply(obj, arguments)
|
||||
}
|
||||
}
|
||||
|
||||
constructor(root, modelType, modelNameFriendly = undefined, defaultWeight = 0.5, weightStep = 0.02) {
|
||||
this.root = root
|
||||
this.modelType = modelType
|
||||
this.modelNameFriendly = modelNameFriendly || modelType
|
||||
this.defaultWeight = defaultWeight
|
||||
this.weightStep = weightStep
|
||||
|
||||
let self = this
|
||||
document.addEventListener("refreshModels", function() {
|
||||
setTimeout(self.bind(self.populateModels, self), 1)
|
||||
})
|
||||
|
||||
this.createStructure()
|
||||
this.populateModels()
|
||||
}
|
||||
|
||||
createStructure() {
|
||||
this.modelContainer = document.createElement("div")
|
||||
this.modelContainer.className = "model_entries"
|
||||
this.root.appendChild(this.modelContainer)
|
||||
|
||||
this.addNewButton = document.createElement("button")
|
||||
this.addNewButton.className = "add_model_entry"
|
||||
this.addNewButton.innerHTML = '<i class="fa-solid fa-plus"></i> add another ' + this.modelNameFriendly
|
||||
this.addNewButton.addEventListener("click", this.bind(this.addModelEntry, this))
|
||||
this.root.appendChild(this.addNewButton)
|
||||
}
|
||||
|
||||
populateModels() {
|
||||
if (this.root.dataset.path === "") {
|
||||
if (this.length === 0) {
|
||||
this.addModelEntry() // create a single blank entry
|
||||
}
|
||||
} else {
|
||||
this.value = JSON.parse(this.root.dataset.path)
|
||||
}
|
||||
}
|
||||
|
||||
addModelEntry() {
|
||||
let idx = this.counter++
|
||||
let currLength = this.length
|
||||
|
||||
const modelElement = document.createElement("div")
|
||||
modelElement.className = "model_entry"
|
||||
modelElement.innerHTML = `
|
||||
<input id="${this.modelType}_${idx}" class="model_name model-filter" type="text" spellcheck="false" autocomplete="off" data-path="" />
|
||||
<input class="model_weight" type="number" step="${this.weightStep}" value="${this.defaultWeight}" pattern="^-?[0-9]*\.?[0-9]*$" onkeypress="preventNonNumericalInput(event)">
|
||||
`
|
||||
this.modelContainer.appendChild(modelElement)
|
||||
|
||||
let modelNameEl = modelElement.querySelector(".model_name")
|
||||
modelNameEl.field = new ModelDropdown(modelNameEl, this.modelType, "None")
|
||||
let modelWeightEl = modelElement.querySelector(".model_weight")
|
||||
|
||||
let self = this
|
||||
|
||||
function makeUpdateEvent(type) {
|
||||
return function(e) {
|
||||
e.stopPropagation()
|
||||
|
||||
let modelData = self.value
|
||||
self.root.dataset.path = JSON.stringify(modelData)
|
||||
|
||||
self.root.dispatchEvent(new Event(type))
|
||||
}
|
||||
}
|
||||
|
||||
modelNameEl.addEventListener("change", makeUpdateEvent("change"))
|
||||
modelNameEl.addEventListener("input", makeUpdateEvent("input"))
|
||||
modelWeightEl.addEventListener("change", makeUpdateEvent("change"))
|
||||
modelWeightEl.addEventListener("input", makeUpdateEvent("input"))
|
||||
|
||||
let removeBtn = document.createElement("button")
|
||||
removeBtn.className = "remove_model_btn"
|
||||
removeBtn.setAttribute("title", "Remove model")
|
||||
removeBtn.innerHTML = '<i class="fa-solid fa-minus"></i>'
|
||||
|
||||
if (currLength === 0) {
|
||||
removeBtn.classList.add("displayNone")
|
||||
}
|
||||
|
||||
removeBtn.addEventListener(
|
||||
"click",
|
||||
this.bind(function(e) {
|
||||
this.modelContainer.removeChild(modelElement)
|
||||
|
||||
makeUpdateEvent("change")(e)
|
||||
}, this)
|
||||
)
|
||||
|
||||
modelElement.appendChild(removeBtn)
|
||||
}
|
||||
|
||||
removeModelEntry() {
|
||||
if (this.length === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
let lastEntry = this.modelContainer.lastElementChild
|
||||
lastEntry.remove()
|
||||
}
|
||||
|
||||
get length() {
|
||||
return this.getModelElements().length
|
||||
}
|
||||
|
||||
get modelNames() {
|
||||
return this.getModelElements(true).map((e) => e.name.value)
|
||||
}
|
||||
|
||||
set modelNames(newModelNames) {
|
||||
this.resizeEntryList(newModelNames.length)
|
||||
|
||||
if (newModelNames.length === 0) {
|
||||
this.getModelElements()[0].name.value = ""
|
||||
}
|
||||
|
||||
// assign to the corresponding elements
|
||||
let currElements = this.getModelElements()
|
||||
for (let i = 0; i < newModelNames.length; i++) {
|
||||
let curr = currElements[i]
|
||||
|
||||
curr.name.value = newModelNames[i]
|
||||
}
|
||||
}
|
||||
|
||||
get modelWeights() {
|
||||
return this.getModelElements(true).map((e) => e.weight.value)
|
||||
}
|
||||
|
||||
set modelWeights(newModelWeights) {
|
||||
this.resizeEntryList(newModelWeights.length)
|
||||
|
||||
if (newModelWeights.length === 0) {
|
||||
this.getModelElements()[0].weight.value = this.defaultWeight
|
||||
}
|
||||
|
||||
// assign to the corresponding elements
|
||||
let currElements = this.getModelElements()
|
||||
for (let i = 0; i < newModelWeights.length; i++) {
|
||||
let curr = currElements[i]
|
||||
|
||||
curr.weight.value = newModelWeights[i]
|
||||
}
|
||||
}
|
||||
|
||||
resizeEntryList(newLength) {
|
||||
if (newLength === 0) {
|
||||
newLength = 1
|
||||
}
|
||||
|
||||
let currLength = this.length
|
||||
if (currLength < newLength) {
|
||||
for (let i = currLength; i < newLength; i++) {
|
||||
this.addModelEntry()
|
||||
}
|
||||
} else {
|
||||
for (let i = newLength; i < currLength; i++) {
|
||||
this.removeModelEntry()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -97,6 +97,17 @@ var PARAMETERS = [
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
id: "models_dir",
|
||||
type: ParameterType.custom,
|
||||
icon: "fa-folder-tree",
|
||||
label: "Models Folder",
|
||||
note: "Path to the 'models' folder. Please save and restart Easy Diffusion after changing this.",
|
||||
saveInAppConfig: true,
|
||||
render: (parameter) => {
|
||||
return `<input id="${parameter.id}" name="${parameter.id}" size="30">`
|
||||
},
|
||||
},
|
||||
{
|
||||
id: "block_nsfw",
|
||||
type: ParameterType.checkbox,
|
||||
@ -121,6 +132,15 @@ var PARAMETERS = [
|
||||
icon: "fa-arrow-down-short-wide",
|
||||
default: false,
|
||||
},
|
||||
{
|
||||
id: "extract_lora_from_prompt",
|
||||
type: ParameterType.checkbox,
|
||||
label: "Extract LoRA tags from the prompt",
|
||||
note:
|
||||
"Automatically extract lora tags like <lora:name:0.4> from the prompt, and apply the correct LoRA (if present)",
|
||||
icon: "fa-code",
|
||||
default: true,
|
||||
},
|
||||
{
|
||||
id: "ui_open_browser_on_start",
|
||||
type: ParameterType.checkbox,
|
||||
@ -141,6 +161,7 @@ var PARAMETERS = [
|
||||
"<b>Low:</b> slowest, recommended for GPUs with 3 to 4 GB memory",
|
||||
icon: "fa-forward",
|
||||
default: "balanced",
|
||||
saveInAppConfig: true,
|
||||
options: [
|
||||
{ value: "balanced", label: "Balanced" },
|
||||
{ value: "high", label: "High" },
|
||||
@ -185,6 +206,17 @@ var PARAMETERS = [
|
||||
icon: "fa-check-double",
|
||||
default: true,
|
||||
},
|
||||
{
|
||||
id: "profileName",
|
||||
type: ParameterType.custom,
|
||||
label: "Profile Name",
|
||||
note:
|
||||
"Name of the profile for model manager settings, e.g. thumbnails for embeddings. Use this to have different settings for different users.",
|
||||
render: (parameter) => {
|
||||
return `<input id="${parameter.id}" name="${parameter.id}" value="default" size="12">`
|
||||
},
|
||||
icon: "fa-user-gear",
|
||||
},
|
||||
{
|
||||
id: "listen_to_network",
|
||||
type: ParameterType.checkbox,
|
||||
@ -218,14 +250,19 @@ var PARAMETERS = [
|
||||
default: false,
|
||||
},
|
||||
{
|
||||
id: "test_diffusers",
|
||||
type: ParameterType.checkbox,
|
||||
label: "Test Diffusers",
|
||||
id: "backend",
|
||||
type: ParameterType.select,
|
||||
label: "Engine to use",
|
||||
note:
|
||||
"<b>Experimental! Can have bugs!</b> Use upcoming features (like LoRA) in our new engine. Please press Save, then restart the program after changing this.",
|
||||
icon: "fa-bolt",
|
||||
default: false,
|
||||
"Use our new v3.5 engine (Forge), with additional features like Flux, SD3, Lycoris and lots more! Please press Save, then restart the program after changing this.",
|
||||
icon: "fa-robot",
|
||||
saveInAppConfig: true,
|
||||
default: "ed_diffusers",
|
||||
options: [
|
||||
{ value: "webui", label: "v3.5 (latest)" },
|
||||
{ value: "ed_diffusers", label: "v3.0" },
|
||||
{ value: "ed_classic", label: "v2.0" },
|
||||
],
|
||||
},
|
||||
{
|
||||
id: "cloudflare",
|
||||
@ -400,7 +437,10 @@ let listenPortField = document.querySelector("#listen_port")
|
||||
let useBetaChannelField = document.querySelector("#use_beta_channel")
|
||||
let uiOpenBrowserOnStartField = document.querySelector("#ui_open_browser_on_start")
|
||||
let confirmDangerousActionsField = document.querySelector("#confirm_dangerous_actions")
|
||||
let testDiffusers = document.querySelector("#test_diffusers")
|
||||
let testDiffusers = document.querySelector("#use_v3_engine")
|
||||
let backendEngine = document.querySelector("#backend")
|
||||
let profileNameField = document.querySelector("#profileName")
|
||||
let modelsDirField = document.querySelector("#models_dir")
|
||||
|
||||
let saveSettingsBtn = document.querySelector("#save-system-settings-btn")
|
||||
|
||||
@ -421,6 +461,23 @@ async function changeAppConfig(configDelta) {
|
||||
}
|
||||
}
|
||||
|
||||
function getDefaultDisplay(element) {
|
||||
const tag = element.tagName.toLowerCase();
|
||||
const defaultDisplays = {
|
||||
div: 'block',
|
||||
span: 'inline',
|
||||
p: 'block',
|
||||
tr: 'table-row',
|
||||
table: 'table',
|
||||
li: 'list-item',
|
||||
ul: 'block',
|
||||
ol: 'block',
|
||||
button: 'inline',
|
||||
// Add more if needed
|
||||
};
|
||||
return defaultDisplays[tag] || 'block'; // Default to 'block' if not listed
|
||||
}
|
||||
|
||||
async function getAppConfig() {
|
||||
try {
|
||||
let res = await fetch("/get/app_config")
|
||||
@ -432,8 +489,6 @@ async function getAppConfig() {
|
||||
if (config.update_branch === "beta") {
|
||||
useBetaChannelField.checked = true
|
||||
document.querySelector("#updateBranchLabel").innerText = "(beta)"
|
||||
} else {
|
||||
getParameterSettingsEntry("test_diffusers").classList.add("displayNone")
|
||||
}
|
||||
if (config.ui && config.ui.open_browser_on_start === false) {
|
||||
uiOpenBrowserOnStartField.checked = false
|
||||
@ -444,12 +499,19 @@ async function getAppConfig() {
|
||||
if (config.net && config.net.listen_port !== undefined) {
|
||||
listenPortField.value = config.net.listen_port
|
||||
}
|
||||
modelsDirField.value = config.models_dir
|
||||
|
||||
const testDiffusersEnabled = config.test_diffusers && config.update_branch !== "main"
|
||||
let testDiffusersEnabled = true
|
||||
if (config.backend === "ed_classic") {
|
||||
testDiffusersEnabled = false
|
||||
}
|
||||
testDiffusers.checked = testDiffusersEnabled
|
||||
backendEngine.value = config.backend
|
||||
document.querySelector("#test_diffusers").checked = testDiffusers.checked // don't break plugins
|
||||
document.querySelector("#use_v3_engine").checked = testDiffusers.checked // don't break plugins
|
||||
|
||||
if (config.config_on_startup) {
|
||||
if (config.config_on_startup?.test_diffusers && config.update_branch !== "main") {
|
||||
if (config.config_on_startup?.backend !== "ed_classic") {
|
||||
document.body.classList.add("diffusers-enabled-on-startup")
|
||||
document.body.classList.remove("diffusers-disabled-on-startup")
|
||||
} else {
|
||||
@ -458,33 +520,29 @@ async function getAppConfig() {
|
||||
}
|
||||
}
|
||||
|
||||
if (!testDiffusersEnabled) {
|
||||
document.querySelector("#lora_model_container").style.display = "none"
|
||||
document.querySelector("#tiling_container").style.display = "none"
|
||||
document.querySelector("#controlnet_model_container").style.display = "none"
|
||||
document.querySelector("#hypernetwork_strength_container").style.display = ""
|
||||
|
||||
document.querySelectorAll("#sampler_name option.diffusers-only").forEach((option) => {
|
||||
option.style.display = "none"
|
||||
})
|
||||
if (config.backend === "ed_classic") {
|
||||
IMAGE_STEP_SIZE = 64
|
||||
customWidthField.step = IMAGE_STEP_SIZE
|
||||
customHeightField.step = IMAGE_STEP_SIZE
|
||||
} else {
|
||||
document.querySelector("#lora_model_container").style.display = ""
|
||||
document.querySelector("#tiling_container").style.display = ""
|
||||
document.querySelector("#controlnet_model_container").style.display = ""
|
||||
document.querySelector("#hypernetwork_strength_container").style.display = "none"
|
||||
|
||||
document.querySelectorAll("#sampler_name option.k_diffusion-only").forEach((option) => {
|
||||
option.style.display = "none"
|
||||
})
|
||||
document.querySelector("#clip_skip_config").classList.remove("displayNone")
|
||||
document.querySelector("#embeddings-button").classList.remove("displayNone")
|
||||
document.querySelector("#negative-embeddings-button").classList.remove("displayNone")
|
||||
IMAGE_STEP_SIZE = 8
|
||||
customWidthField.step = IMAGE_STEP_SIZE
|
||||
customHeightField.step = IMAGE_STEP_SIZE
|
||||
}
|
||||
|
||||
customWidthField.step = IMAGE_STEP_SIZE
|
||||
customHeightField.step = IMAGE_STEP_SIZE
|
||||
|
||||
const currentBackendKey = "backend_" + config.backend
|
||||
|
||||
document.querySelectorAll('.gated-feature').forEach((element) => {
|
||||
const featureKeys = element.getAttribute('data-feature-keys').split(' ')
|
||||
|
||||
if (featureKeys.includes(currentBackendKey)) {
|
||||
element.style.display = getDefaultDisplay(element)
|
||||
} else {
|
||||
element.style.display = 'none'
|
||||
}
|
||||
});
|
||||
|
||||
if (config.force_save_metadata) {
|
||||
metadataOutputFormatField.value = config.force_save_metadata
|
||||
}
|
||||
|
||||
console.log("get config status response", config)
|
||||
@ -698,12 +756,20 @@ async function getSystemInfo() {
|
||||
force = res["enforce_output_dir"]
|
||||
if (force == true) {
|
||||
saveToDiskField.checked = true
|
||||
metadataOutputFormatField.disabled = false
|
||||
metadataOutputFormatField.disabled = res["enforce_output_metadata"]
|
||||
diskPathField.disabled = true
|
||||
}
|
||||
saveToDiskField.disabled = force
|
||||
diskPathField.disabled = force
|
||||
} else {
|
||||
diskPathField.disabled = !saveToDiskField.checked
|
||||
metadataOutputFormatField.disabled = !saveToDiskField.checked
|
||||
}
|
||||
setDiskPath(res["default_output_dir"], force)
|
||||
|
||||
// backend info
|
||||
if (res["backend_url"]) {
|
||||
document.querySelector("#backend-url").setAttribute("href", res["backend_url"])
|
||||
}
|
||||
} catch (e) {
|
||||
console.log("error fetching devices", e)
|
||||
}
|
||||
@ -794,11 +860,3 @@ navigator.permissions.query({ name: "clipboard-write" }).then(function(result) {
|
||||
})
|
||||
|
||||
document.addEventListener("system_info_update", (e) => setDeviceInfo(e.detail))
|
||||
|
||||
useBetaChannelField.addEventListener("change", (e) => {
|
||||
if (e.target.checked) {
|
||||
getParameterSettingsEntry("test_diffusers").classList.remove("displayNone")
|
||||
} else {
|
||||
getParameterSettingsEntry("test_diffusers").classList.add("displayNone")
|
||||
}
|
||||
})
|
||||
|
@ -118,13 +118,16 @@ class ModelDropdown {
|
||||
)
|
||||
}
|
||||
|
||||
saveCurrentSelection(elem, value, path) {
|
||||
saveCurrentSelection(elem, value, path, dispatchEvent = true) {
|
||||
this.currentSelection.elem = elem
|
||||
this.currentSelection.value = value
|
||||
this.currentSelection.path = path
|
||||
this.modelFilter.dataset.path = path
|
||||
this.modelFilter.value = value
|
||||
this.modelFilter.dispatchEvent(new Event("change"))
|
||||
|
||||
if (dispatchEvent) {
|
||||
this.modelFilter.dispatchEvent(new Event("change"))
|
||||
}
|
||||
}
|
||||
|
||||
processClick(e) {
|
||||
@ -348,13 +351,13 @@ class ModelDropdown {
|
||||
}
|
||||
}
|
||||
|
||||
selectEntry(path) {
|
||||
selectEntry(path, dispatchEvent = true) {
|
||||
if (path !== undefined) {
|
||||
const entries = this.modelElements
|
||||
|
||||
for (const elem of entries) {
|
||||
if (elem.dataset.path == path) {
|
||||
this.saveCurrentSelection(elem, elem.innerText, elem.dataset.path)
|
||||
this.saveCurrentSelection(elem, elem.innerText, elem.dataset.path, dispatchEvent)
|
||||
this.highlightedModelEntry = elem
|
||||
elem.scrollIntoView({ block: "nearest" })
|
||||
break
|
||||
@ -529,7 +532,7 @@ class ModelDropdown {
|
||||
rootModelList.style.minWidth = modelFilterStyle.width
|
||||
})
|
||||
|
||||
this.selectEntry(this.activeModel)
|
||||
this.selectEntry(this.activeModel, false)
|
||||
}
|
||||
|
||||
/**
|
||||
|
409
ui/media/js/task-manager.js
Normal file
409
ui/media/js/task-manager.js
Normal file
@ -0,0 +1,409 @@
|
||||
const htmlTaskMap = new WeakMap()
|
||||
|
||||
const pauseBtn = document.querySelector("#pause")
|
||||
const resumeBtn = document.querySelector("#resume")
|
||||
const processOrder = document.querySelector("#process_order_toggle")
|
||||
|
||||
let pauseClient = false
|
||||
|
||||
async function onIdle() {
|
||||
const serverCapacity = SD.serverCapacity
|
||||
if (pauseClient === true) {
|
||||
await resumeClient()
|
||||
}
|
||||
|
||||
for (const taskEntry of getUncompletedTaskEntries()) {
|
||||
if (SD.activeTasks.size >= serverCapacity) {
|
||||
break
|
||||
}
|
||||
const task = htmlTaskMap.get(taskEntry)
|
||||
if (!task) {
|
||||
const taskStatusLabel = taskEntry.querySelector(".taskStatusLabel")
|
||||
taskStatusLabel.style.display = "none"
|
||||
continue
|
||||
}
|
||||
await onTaskStart(task)
|
||||
}
|
||||
}
|
||||
|
||||
function getUncompletedTaskEntries() {
|
||||
const taskEntries = Array.from(document.querySelectorAll("#preview .imageTaskContainer .taskStatusLabel"))
|
||||
.filter((taskLabel) => taskLabel.style.display !== "none")
|
||||
.map(function(taskLabel) {
|
||||
let imageTaskContainer = taskLabel.parentNode
|
||||
while (!imageTaskContainer.classList.contains("imageTaskContainer") && imageTaskContainer.parentNode) {
|
||||
imageTaskContainer = imageTaskContainer.parentNode
|
||||
}
|
||||
return imageTaskContainer
|
||||
})
|
||||
if (!processOrder.checked) {
|
||||
taskEntries.reverse()
|
||||
}
|
||||
return taskEntries
|
||||
}
|
||||
|
||||
async function onTaskStart(task) {
|
||||
if (!task.isProcessing || task.batchesDone >= task.batchCount) {
|
||||
return
|
||||
}
|
||||
|
||||
if (typeof task.startTime !== "number") {
|
||||
task.startTime = Date.now()
|
||||
}
|
||||
if (!("instances" in task)) {
|
||||
task["instances"] = []
|
||||
}
|
||||
|
||||
task["stopTask"].innerHTML = '<i class="fa-solid fa-circle-stop"></i> Stop'
|
||||
task["taskStatusLabel"].innerText = "Starting"
|
||||
task["taskStatusLabel"].classList.add("waitingTaskLabel")
|
||||
|
||||
if (task.previewTaskReq !== undefined) {
|
||||
let controlImagePreview = task.taskConfig.querySelector(".controlnet-img-preview > img")
|
||||
try {
|
||||
let result = await SD.filter(task.previewTaskReq)
|
||||
|
||||
controlImagePreview.src = result.output[0]
|
||||
let controlImageLargePreview = task.taskConfig.querySelector(
|
||||
".controlnet-img-preview .task-fs-initimage img"
|
||||
)
|
||||
controlImageLargePreview.src = controlImagePreview.src
|
||||
} catch (error) {
|
||||
console.log("filter error", error)
|
||||
}
|
||||
|
||||
delete task.previewTaskReq
|
||||
}
|
||||
|
||||
let newTaskReqBody = task.reqBody
|
||||
if (task.batchCount > 1) {
|
||||
// Each output render batch needs it's own task reqBody instance to avoid altering the other runs after they are completed.
|
||||
newTaskReqBody = Object.assign({}, task.reqBody)
|
||||
if (task.batchesDone == task.batchCount - 1) {
|
||||
// Last batch of the task
|
||||
// If the number of parallel jobs is no factor of the total number of images, the last batch must create less than "parallel jobs count" images
|
||||
// E.g. with numOutputsTotal = 6 and num_outputs = 5, the last batch shall only generate 1 image.
|
||||
newTaskReqBody.num_outputs = task.numOutputsTotal - task.reqBody.num_outputs * (task.batchCount - 1)
|
||||
}
|
||||
}
|
||||
|
||||
const startSeed = task.seed || newTaskReqBody.seed
|
||||
const genSeeds = Boolean(
|
||||
typeof newTaskReqBody.seed !== "number" || (newTaskReqBody.seed === task.seed && task.numOutputsTotal > 1)
|
||||
)
|
||||
if (genSeeds) {
|
||||
newTaskReqBody.seed = parseInt(startSeed) + task.batchesDone * task.reqBody.num_outputs
|
||||
}
|
||||
|
||||
const outputContainer = document.createElement("div")
|
||||
outputContainer.className = "img-batch"
|
||||
task.outputContainer.insertBefore(outputContainer, task.outputContainer.firstChild)
|
||||
|
||||
const eventInfo = { reqBody: newTaskReqBody }
|
||||
const callbacksPromises = PLUGINS["TASK_CREATE"].map((hook) => {
|
||||
if (typeof hook !== "function") {
|
||||
console.error("The provided TASK_CREATE hook is not a function. Hook: %o", hook)
|
||||
return Promise.reject(new Error("hook is not a function."))
|
||||
}
|
||||
try {
|
||||
return Promise.resolve(hook.call(task, eventInfo))
|
||||
} catch (err) {
|
||||
console.error(err)
|
||||
return Promise.reject(err)
|
||||
}
|
||||
})
|
||||
await Promise.allSettled(callbacksPromises)
|
||||
let instance = eventInfo.instance
|
||||
if (!instance) {
|
||||
const factory = PLUGINS.OUTPUTS_FORMATS.get(eventInfo.reqBody?.output_format || newTaskReqBody.output_format)
|
||||
if (factory) {
|
||||
instance = await Promise.resolve(factory(eventInfo.reqBody || newTaskReqBody))
|
||||
}
|
||||
if (!instance) {
|
||||
console.error(
|
||||
`${factory ? "Factory " + String(factory) : "No factory defined"} for output format ${eventInfo.reqBody
|
||||
?.output_format || newTaskReqBody.output_format}. Instance is ${instance ||
|
||||
"undefined"}. Using default renderer.`
|
||||
)
|
||||
instance = new SD.RenderTask(eventInfo.reqBody || newTaskReqBody)
|
||||
}
|
||||
}
|
||||
|
||||
task["instances"].push(instance)
|
||||
task.batchesDone++
|
||||
|
||||
document.dispatchEvent(new CustomEvent("before_task_start", { detail: { task: task } }))
|
||||
|
||||
instance.enqueue(getTaskUpdater(task, newTaskReqBody, outputContainer)).then(
|
||||
(renderResult) => {
|
||||
onRenderTaskCompleted(task, newTaskReqBody, instance, outputContainer, renderResult)
|
||||
},
|
||||
(reason) => {
|
||||
onTaskErrorHandler(task, newTaskReqBody, instance, reason)
|
||||
}
|
||||
)
|
||||
|
||||
document.dispatchEvent(new CustomEvent("after_task_start", { detail: { task: task } }))
|
||||
}
|
||||
|
||||
function getTaskUpdater(task, reqBody, outputContainer) {
|
||||
const outputMsg = task["outputMsg"]
|
||||
const progressBar = task["progressBar"]
|
||||
const progressBarInner = progressBar.querySelector("div")
|
||||
|
||||
const batchCount = task.batchCount
|
||||
let lastStatus = undefined
|
||||
return async function(event) {
|
||||
if (this.status !== lastStatus) {
|
||||
lastStatus = this.status
|
||||
switch (this.status) {
|
||||
case SD.TaskStatus.pending:
|
||||
task["taskStatusLabel"].innerText = "Pending"
|
||||
task["taskStatusLabel"].classList.add("waitingTaskLabel")
|
||||
break
|
||||
case SD.TaskStatus.waiting:
|
||||
task["taskStatusLabel"].innerText = "Waiting"
|
||||
task["taskStatusLabel"].classList.add("waitingTaskLabel")
|
||||
task["taskStatusLabel"].classList.remove("activeTaskLabel")
|
||||
break
|
||||
case SD.TaskStatus.processing:
|
||||
case SD.TaskStatus.completed:
|
||||
task["taskStatusLabel"].innerText = "Processing"
|
||||
task["taskStatusLabel"].classList.add("activeTaskLabel")
|
||||
task["taskStatusLabel"].classList.remove("waitingTaskLabel")
|
||||
break
|
||||
case SD.TaskStatus.stopped:
|
||||
break
|
||||
case SD.TaskStatus.failed:
|
||||
if (!SD.isServerAvailable()) {
|
||||
logError(
|
||||
"Stable Diffusion is still starting up, please wait. If this goes on beyond a few minutes, Stable Diffusion has probably crashed. Please check the error message in the command-line window.",
|
||||
event,
|
||||
outputMsg
|
||||
)
|
||||
} else if (typeof event?.response === "object") {
|
||||
let msg = "Stable Diffusion had an error reading the response:<br/><pre>"
|
||||
if (this.exception) {
|
||||
msg += `Error: ${this.exception.message}<br/>`
|
||||
}
|
||||
try {
|
||||
// 'Response': body stream already read
|
||||
msg += "Read: " + (await event.response.text())
|
||||
} catch (e) {
|
||||
msg += "Unexpected end of stream. "
|
||||
}
|
||||
const bufferString = event.reader.bufferedString
|
||||
if (bufferString) {
|
||||
msg += "Buffered data: " + bufferString
|
||||
}
|
||||
msg += "</pre>"
|
||||
logError(msg, event, outputMsg)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if ("update" in event) {
|
||||
const stepUpdate = event.update
|
||||
if (!("step" in stepUpdate)) {
|
||||
return
|
||||
}
|
||||
// task.instances can be a mix of different tasks with uneven number of steps (Render Vs Filter Tasks)
|
||||
const instancesWithProgressUpdates = task.instances.filter((instance) => instance.step !== undefined)
|
||||
const overallStepCount =
|
||||
instancesWithProgressUpdates.reduce(
|
||||
(sum, instance) =>
|
||||
sum +
|
||||
(instance.isPending
|
||||
? Math.max(0, instance.step || stepUpdate.step) /
|
||||
(instance.total_steps || stepUpdate.total_steps)
|
||||
: 1),
|
||||
0 // Initial value
|
||||
) * stepUpdate.total_steps // Scale to current number of steps.
|
||||
const totalSteps = instancesWithProgressUpdates.reduce(
|
||||
(sum, instance) => sum + (instance.total_steps || stepUpdate.total_steps),
|
||||
stepUpdate.total_steps * (batchCount - task.batchesDone) // Initial value at (unstarted task count * Nbr of steps)
|
||||
)
|
||||
const percent = Math.min(100, 100 * (overallStepCount / totalSteps)).toFixed(0)
|
||||
|
||||
const timeTaken = stepUpdate.step_time // sec
|
||||
const stepsRemaining = Math.max(0, totalSteps - overallStepCount)
|
||||
const timeRemaining = timeTaken < 0 ? "" : millisecondsToStr(stepsRemaining * timeTaken * 1000)
|
||||
outputMsg.innerHTML = `Batch ${task.batchesDone} of ${batchCount}. Generating image(s): ${percent}%. Time remaining (approx): ${timeRemaining}`
|
||||
outputMsg.style.display = "block"
|
||||
progressBarInner.style.width = `${percent}%`
|
||||
|
||||
if (stepUpdate.output) {
|
||||
document.dispatchEvent(
|
||||
new CustomEvent("on_task_step", {
|
||||
detail: {
|
||||
task: task,
|
||||
reqBody: reqBody,
|
||||
stepUpdate: stepUpdate,
|
||||
outputContainer: outputContainer,
|
||||
},
|
||||
})
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function onRenderTaskCompleted(task, reqBody, instance, outputContainer, stepUpdate) {
|
||||
if (typeof stepUpdate === "object") {
|
||||
if (stepUpdate.status === "succeeded") {
|
||||
document.dispatchEvent(
|
||||
new CustomEvent("on_render_task_success", {
|
||||
detail: {
|
||||
task: task,
|
||||
reqBody: reqBody,
|
||||
stepUpdate: stepUpdate,
|
||||
outputContainer: outputContainer,
|
||||
},
|
||||
})
|
||||
)
|
||||
} else {
|
||||
task.isProcessing = false
|
||||
document.dispatchEvent(
|
||||
new CustomEvent("on_render_task_fail", {
|
||||
detail: {
|
||||
task: task,
|
||||
reqBody: reqBody,
|
||||
stepUpdate: stepUpdate,
|
||||
outputContainer: outputContainer,
|
||||
},
|
||||
})
|
||||
)
|
||||
}
|
||||
}
|
||||
if (task.isProcessing && task.batchesDone < task.batchCount) {
|
||||
task["taskStatusLabel"].innerText = "Pending"
|
||||
task["taskStatusLabel"].classList.add("waitingTaskLabel")
|
||||
task["taskStatusLabel"].classList.remove("activeTaskLabel")
|
||||
return
|
||||
}
|
||||
if ("instances" in task && task.instances.some((ins) => ins != instance && ins.isPending)) {
|
||||
return
|
||||
}
|
||||
|
||||
task.isProcessing = false
|
||||
task["stopTask"].innerHTML = '<i class="fa-solid fa-trash-can"></i> Remove'
|
||||
task["taskStatusLabel"].style.display = "none"
|
||||
|
||||
let time = millisecondsToStr(Date.now() - task.startTime)
|
||||
|
||||
if (task.batchesDone == task.batchCount) {
|
||||
if (!task.outputMsg.innerText.toLowerCase().includes("error")) {
|
||||
task.outputMsg.innerText = `Processed ${task.numOutputsTotal} images in ${time}`
|
||||
}
|
||||
task.progressBar.style.height = "0px"
|
||||
task.progressBar.style.border = "0px solid var(--background-color3)"
|
||||
task.progressBar.classList.remove("active")
|
||||
// setStatus("request", "done", "success")
|
||||
} else {
|
||||
task.outputMsg.innerText += `. Task ended after ${time}`
|
||||
}
|
||||
|
||||
// if (randomSeedField.checked) { // we already update this before the task starts
|
||||
// seedField.value = task.seed
|
||||
// }
|
||||
|
||||
if (SD.activeTasks.size > 0) {
|
||||
return
|
||||
}
|
||||
const uncompletedTasks = getUncompletedTaskEntries()
|
||||
if (uncompletedTasks && uncompletedTasks.length > 0) {
|
||||
return
|
||||
}
|
||||
|
||||
if (pauseClient) {
|
||||
resumeBtn.click()
|
||||
}
|
||||
|
||||
document.dispatchEvent(
|
||||
new CustomEvent("on_all_tasks_complete", {
|
||||
detail: {},
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
function resumeClient() {
|
||||
if (pauseClient) {
|
||||
document.body.classList.remove("wait-pause")
|
||||
document.body.classList.add("pause")
|
||||
}
|
||||
return new Promise((resolve) => {
|
||||
let playbuttonclick = function() {
|
||||
resumeBtn.removeEventListener("click", playbuttonclick)
|
||||
resolve("resolved")
|
||||
}
|
||||
resumeBtn.addEventListener("click", playbuttonclick)
|
||||
})
|
||||
}
|
||||
|
||||
function abortTask(task) {
|
||||
if (!task.isProcessing) {
|
||||
return false
|
||||
}
|
||||
task.isProcessing = false
|
||||
task.progressBar.classList.remove("active")
|
||||
task["taskStatusLabel"].style.display = "none"
|
||||
task["stopTask"].innerHTML = '<i class="fa-solid fa-trash-can"></i> Remove'
|
||||
if (!task.instances?.some((r) => r.isPending)) {
|
||||
return
|
||||
}
|
||||
task.instances.forEach((instance) => {
|
||||
try {
|
||||
instance.abort()
|
||||
} catch (e) {
|
||||
console.error(e)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
async function stopAllTasks() {
|
||||
getUncompletedTaskEntries().forEach((taskEntry) => {
|
||||
const taskStatusLabel = taskEntry.querySelector(".taskStatusLabel")
|
||||
if (taskStatusLabel) {
|
||||
taskStatusLabel.style.display = "none"
|
||||
}
|
||||
const task = htmlTaskMap.get(taskEntry)
|
||||
if (!task) {
|
||||
return
|
||||
}
|
||||
abortTask(task)
|
||||
})
|
||||
}
|
||||
|
||||
function onTaskErrorHandler(task, reqBody, instance, reason) {
|
||||
if (!task.isProcessing) {
|
||||
return
|
||||
}
|
||||
console.log("Render request %o, Instance: %o, Error: %s", reqBody, instance, reason)
|
||||
abortTask(task)
|
||||
const outputMsg = task["outputMsg"]
|
||||
logError(
|
||||
"Stable Diffusion had an error. Please check the logs in the command-line window. <br/><br/>" +
|
||||
reason +
|
||||
"<br/><pre>" +
|
||||
reason.stack +
|
||||
"</pre>",
|
||||
task,
|
||||
outputMsg
|
||||
)
|
||||
// setStatus("request", "error", "error")
|
||||
}
|
||||
|
||||
pauseBtn.addEventListener("click", function() {
|
||||
pauseClient = true
|
||||
pauseBtn.style.display = "none"
|
||||
resumeBtn.style.display = "inline"
|
||||
document.body.classList.add("wait-pause")
|
||||
})
|
||||
|
||||
resumeBtn.addEventListener("click", function() {
|
||||
pauseClient = false
|
||||
resumeBtn.style.display = "none"
|
||||
pauseBtn.style.display = "inline"
|
||||
document.body.classList.remove("pause")
|
||||
document.body.classList.remove("wait-pause")
|
||||
})
|
@ -1097,6 +1097,48 @@ async function deleteKeys(keyToDelete) {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @param {String} Data URL of the image
|
||||
* @param {Integer} Top left X-coordinate of the crop area
|
||||
* @param {Integer} Top left Y-coordinate of the crop area
|
||||
* @param {Integer} Width of the crop area
|
||||
* @param {Integer} Height of the crop area
|
||||
* @return {String}
|
||||
*/
|
||||
function cropImageDataUrl(dataUrl, x, y, width, height) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const image = new Image()
|
||||
image.src = dataUrl
|
||||
|
||||
image.onload = () => {
|
||||
const canvas = document.createElement('canvas')
|
||||
canvas.width = width
|
||||
canvas.height = height
|
||||
|
||||
const ctx = canvas.getContext('2d')
|
||||
ctx.drawImage(image, x, y, width, height, 0, 0, width, height)
|
||||
|
||||
const croppedDataUrl = canvas.toDataURL('image/png')
|
||||
resolve(croppedDataUrl)
|
||||
}
|
||||
|
||||
image.onerror = (error) => {
|
||||
reject(error)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* @param {String} HTML representing a single element
|
||||
* @return {Element}
|
||||
*/
|
||||
function htmlToElement(html) {
|
||||
var template = document.createElement('template');
|
||||
html = html.trim(); // Never return a text node of whitespace as the result
|
||||
template.innerHTML = html;
|
||||
return template.content.firstChild;
|
||||
}
|
||||
|
||||
function modalDialogCloseOnBackdropClick(dialog) {
|
||||
dialog.addEventListener('mousedown', function (event) {
|
||||
// Firefox creates an event with clientX|Y = 0|0 when choosing an <option>.
|
||||
@ -1156,4 +1198,37 @@ function makeDialogDraggable(element) {
|
||||
})() )
|
||||
}
|
||||
|
||||
function logMsg(msg, level, outputMsg) {
|
||||
if (outputMsg.hasChildNodes()) {
|
||||
outputMsg.appendChild(document.createElement("br"))
|
||||
}
|
||||
if (level === "error") {
|
||||
outputMsg.innerHTML += '<span style="color: red">Error: ' + msg + "</span>"
|
||||
} else if (level === "warn") {
|
||||
outputMsg.innerHTML += '<span style="color: orange">Warning: ' + msg + "</span>"
|
||||
} else {
|
||||
outputMsg.innerText += msg
|
||||
}
|
||||
console.log(level, msg)
|
||||
}
|
||||
|
||||
function logError(msg, res, outputMsg) {
|
||||
logMsg(msg, "error", outputMsg)
|
||||
|
||||
console.log("request error", res)
|
||||
console.trace()
|
||||
// setStatus("request", "error", "error")
|
||||
}
|
||||
|
||||
function playSound() {
|
||||
const audio = new Audio("/media/ding.mp3")
|
||||
audio.volume = 0.2
|
||||
var promise = audio.play()
|
||||
if (promise !== undefined) {
|
||||
promise
|
||||
.then((_) => {})
|
||||
.catch((error) => {
|
||||
console.warn("browser blocked autoplay")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -8,6 +8,11 @@
|
||||
"use strict"
|
||||
|
||||
promptField.addEventListener('input', function(e) {
|
||||
let loraExtractSetting = document.getElementById("extract_lora_from_prompt")
|
||||
if (!loraExtractSetting.checked) {
|
||||
return
|
||||
}
|
||||
|
||||
const { LoRA, prompt } = extractLoraTags(e.target.value);
|
||||
//console.log('e.target: ' + JSON.stringify(LoRA));
|
||||
|
||||
@ -20,44 +25,17 @@
|
||||
}
|
||||
|
||||
if (LoRA !== null && LoRA.length > 0 && testDiffusers?.checked) {
|
||||
for (let i = 0; i < LoRA.length; i++) {
|
||||
//if (loraModelField.value !== LoRA[0].lora_model) {
|
||||
// Set the new LoRA value
|
||||
//console.log("Loading info");
|
||||
//console.log(LoRA[0].lora_model_0);
|
||||
//console.log(JSON.stringify(LoRa));
|
||||
|
||||
let lora = `lora_model_${i}`;
|
||||
let alpha = `lora_alpha_${i}`;
|
||||
let loramodel = document.getElementById(lora);
|
||||
let alphavalue = document.getElementById(alpha);
|
||||
loramodel.setAttribute("data-path", LoRA[i].lora_model_0);
|
||||
loramodel.value = LoRA[i].lora_model_0;
|
||||
alphavalue.value = LoRA[i].lora_alpha_0;
|
||||
if (i != LoRA.length - 1)
|
||||
createLoraEntry();
|
||||
}
|
||||
//loraAlphaSlider.value = loraAlphaField.value * 100;
|
||||
//TBD.value = LoRA[0].blockweights; // block weights not supported by ED at this time
|
||||
//}
|
||||
showToast("Prompt successfully processed", LoRA[0].lora_model_0);
|
||||
//console.log('LoRa: ' + LoRA[0].lora_model_0);
|
||||
//showToast("Prompt successfully processed", lora_model_0.value);
|
||||
let modelNames = LoRA.map(e => e.lora_model_0)
|
||||
let modelWeights = LoRA.map(e => e.lora_alpha_0)
|
||||
loraModelField.value = {modelNames: modelNames, modelWeights: modelWeights}
|
||||
|
||||
showToast("Prompt successfully processed")
|
||||
|
||||
}
|
||||
|
||||
//promptField.dispatchEvent(new Event('change'));
|
||||
});
|
||||
|
||||
function isModelAvailable(array, searchString) {
|
||||
const foundItem = array.find(function(item) {
|
||||
item = item.toString().toLowerCase();
|
||||
return item === searchString.toLowerCase()
|
||||
});
|
||||
|
||||
return foundItem || "";
|
||||
}
|
||||
|
||||
// extract LoRA tags from strings
|
||||
function extractLoraTags(prompt) {
|
||||
// Define the regular expression for the tags
|
||||
@ -68,11 +46,13 @@
|
||||
|
||||
// Iterate over the string, finding matches
|
||||
for (const match of prompt.matchAll(regex)) {
|
||||
const modelFileName = isModelAvailable(modelsCache.options.lora, match[1].trim())
|
||||
if (modelFileName !== "") {
|
||||
const modelFileName = match[1].trim()
|
||||
const loraPathes = getAllModelPathes("lora", modelFileName)
|
||||
if (loraPathes.length > 0) {
|
||||
const loraPath = loraPathes[0]
|
||||
// Initialize an object to hold a match
|
||||
let loraTag = {
|
||||
lora_model_0: modelFileName,
|
||||
lora_model_0: loraPath,
|
||||
}
|
||||
//console.log("Model:" + modelFileName);
|
||||
|
||||
|
@ -1,454 +0,0 @@
|
||||
;(function() {
|
||||
"use strict"
|
||||
|
||||
///////////////////// Function section
|
||||
function smoothstep(x) {
|
||||
return x * x * (3 - 2 * x)
|
||||
}
|
||||
|
||||
function smootherstep(x) {
|
||||
return x * x * x * (x * (x * 6 - 15) + 10)
|
||||
}
|
||||
|
||||
function smootheststep(x) {
|
||||
let y = -20 * Math.pow(x, 7)
|
||||
y += 70 * Math.pow(x, 6)
|
||||
y -= 84 * Math.pow(x, 5)
|
||||
y += 35 * Math.pow(x, 4)
|
||||
return y
|
||||
}
|
||||
function getCurrentTime() {
|
||||
const now = new Date()
|
||||
let hours = now.getHours()
|
||||
let minutes = now.getMinutes()
|
||||
let seconds = now.getSeconds()
|
||||
|
||||
hours = hours < 10 ? `0${hours}` : hours
|
||||
minutes = minutes < 10 ? `0${minutes}` : minutes
|
||||
seconds = seconds < 10 ? `0${seconds}` : seconds
|
||||
|
||||
return `${hours}:${minutes}:${seconds}`
|
||||
}
|
||||
|
||||
function addLogMessage(message) {
|
||||
const logContainer = document.getElementById("merge-log")
|
||||
logContainer.innerHTML += `<i>${getCurrentTime()}</i> ${message}<br>`
|
||||
|
||||
// Scroll to the bottom of the log
|
||||
logContainer.scrollTop = logContainer.scrollHeight
|
||||
|
||||
document.querySelector("#merge-log-container").style.display = "block"
|
||||
}
|
||||
|
||||
function addLogSeparator() {
|
||||
const logContainer = document.getElementById("merge-log")
|
||||
logContainer.innerHTML += "<hr>"
|
||||
|
||||
logContainer.scrollTop = logContainer.scrollHeight
|
||||
}
|
||||
|
||||
function drawDiagram(fn) {
|
||||
const SIZE = 300
|
||||
const canvas = document.getElementById("merge-canvas")
|
||||
canvas.height = canvas.width = SIZE
|
||||
const ctx = canvas.getContext("2d")
|
||||
|
||||
// Draw coordinate system
|
||||
ctx.scale(1, -1)
|
||||
ctx.translate(0, -canvas.height)
|
||||
ctx.lineWidth = 1
|
||||
ctx.beginPath()
|
||||
|
||||
ctx.strokeStyle = "white"
|
||||
ctx.moveTo(0, 0)
|
||||
ctx.lineTo(0, SIZE)
|
||||
ctx.lineTo(SIZE, SIZE)
|
||||
ctx.lineTo(SIZE, 0)
|
||||
ctx.lineTo(0, 0)
|
||||
ctx.lineTo(SIZE, SIZE)
|
||||
ctx.stroke()
|
||||
ctx.beginPath()
|
||||
ctx.setLineDash([1, 2])
|
||||
const n = SIZE / 10
|
||||
for (let i = n; i < SIZE; i += n) {
|
||||
ctx.moveTo(0, i)
|
||||
ctx.lineTo(SIZE, i)
|
||||
ctx.moveTo(i, 0)
|
||||
ctx.lineTo(i, SIZE)
|
||||
}
|
||||
ctx.stroke()
|
||||
ctx.beginPath()
|
||||
ctx.setLineDash([])
|
||||
ctx.beginPath()
|
||||
ctx.strokeStyle = "black"
|
||||
ctx.lineWidth = 3
|
||||
// Plot function
|
||||
const numSamples = 20
|
||||
for (let i = 0; i <= numSamples; i++) {
|
||||
const x = i / numSamples
|
||||
const y = fn(x)
|
||||
|
||||
const canvasX = x * SIZE
|
||||
const canvasY = y * SIZE
|
||||
|
||||
if (i === 0) {
|
||||
ctx.moveTo(canvasX, canvasY)
|
||||
} else {
|
||||
ctx.lineTo(canvasX, canvasY)
|
||||
}
|
||||
}
|
||||
ctx.stroke()
|
||||
// Plot alpha values (yellow boxes)
|
||||
let start = parseFloat(document.querySelector("#merge-start").value)
|
||||
let step = parseFloat(document.querySelector("#merge-step").value)
|
||||
let iterations = document.querySelector("#merge-count").value >> 0
|
||||
ctx.beginPath()
|
||||
ctx.fillStyle = "yellow"
|
||||
for (let i = 0; i < iterations; i++) {
|
||||
const alpha = (start + i * step) / 100
|
||||
const x = alpha * SIZE
|
||||
const y = fn(alpha) * SIZE
|
||||
if (x <= SIZE) {
|
||||
ctx.rect(x - 3, y - 3, 6, 6)
|
||||
ctx.fill()
|
||||
} else {
|
||||
ctx.strokeStyle = "red"
|
||||
ctx.moveTo(0, 0)
|
||||
ctx.lineTo(0, SIZE)
|
||||
ctx.lineTo(SIZE, SIZE)
|
||||
ctx.lineTo(SIZE, 0)
|
||||
ctx.lineTo(0, 0)
|
||||
ctx.lineTo(SIZE, SIZE)
|
||||
ctx.stroke()
|
||||
addLogMessage("<i>Warning: maximum ratio is ≥ 100%</i>")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function updateChart() {
|
||||
let fn = (x) => x
|
||||
switch (document.querySelector("#merge-interpolation").value) {
|
||||
case "SmoothStep":
|
||||
fn = smoothstep
|
||||
break
|
||||
case "SmootherStep":
|
||||
fn = smootherstep
|
||||
break
|
||||
case "SmoothestStep":
|
||||
fn = smootheststep
|
||||
break
|
||||
}
|
||||
drawDiagram(fn)
|
||||
}
|
||||
createTab({
|
||||
id: "merge",
|
||||
icon: "fa-code-merge",
|
||||
label: "Merge models",
|
||||
css: `
|
||||
#tab-content-merge .tab-content-inner {
|
||||
max-width: 100%;
|
||||
padding: 10pt;
|
||||
}
|
||||
.merge-container {
|
||||
margin-left: 15%;
|
||||
margin-right: 15%;
|
||||
text-align: left;
|
||||
display: inline-grid;
|
||||
grid-template-columns: 1fr 1fr;
|
||||
grid-template-rows: auto auto auto;
|
||||
gap: 0px 0px;
|
||||
grid-auto-flow: row;
|
||||
grid-template-areas:
|
||||
"merge-input merge-config"
|
||||
"merge-buttons merge-buttons";
|
||||
}
|
||||
.merge-container p {
|
||||
margin-top: 3pt;
|
||||
margin-bottom: 3pt;
|
||||
}
|
||||
.merge-config .tab-content {
|
||||
background: var(--background-color1);
|
||||
border-radius: 3pt;
|
||||
}
|
||||
.merge-config .tab-content-inner {
|
||||
text-align: left;
|
||||
}
|
||||
|
||||
.merge-input {
|
||||
grid-area: merge-input;
|
||||
padding-left:1em;
|
||||
}
|
||||
.merge-config {
|
||||
grid-area: merge-config;
|
||||
padding:1em;
|
||||
}
|
||||
.merge-config input {
|
||||
margin-bottom: 3px;
|
||||
}
|
||||
.merge-config select {
|
||||
margin-bottom: 3px;
|
||||
}
|
||||
.merge-buttons {
|
||||
grid-area: merge-buttons;
|
||||
padding:1em;
|
||||
text-align: center;
|
||||
}
|
||||
#merge-button {
|
||||
padding: 8px;
|
||||
width:20em;
|
||||
}
|
||||
div#merge-log {
|
||||
height:150px;
|
||||
overflow-x:hidden;
|
||||
overflow-y:scroll;
|
||||
background:var(--background-color1);
|
||||
border-radius: 3pt;
|
||||
}
|
||||
div#merge-log i {
|
||||
color: hsl(var(--accent-hue), 100%, calc(2*var(--accent-lightness)));
|
||||
font-family: monospace;
|
||||
}
|
||||
.disabled {
|
||||
background: var(--background-color4);
|
||||
color: var(--text-color);
|
||||
}
|
||||
#merge-type-tabs {
|
||||
border-bottom: 1px solid black;
|
||||
}
|
||||
#merge-log-container {
|
||||
display: none;
|
||||
}
|
||||
.merge-container #merge-warning {
|
||||
color: rgb(153, 153, 153);
|
||||
}`,
|
||||
content: `
|
||||
<div class="merge-container panel-box">
|
||||
<div class="merge-input">
|
||||
<p><label for="#mergeModelA">Select Model A:</label></p>
|
||||
<input id="mergeModelA" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
|
||||
<p><label for="#mergeModelB">Select Model B:</label></p>
|
||||
<input id="mergeModelB" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
|
||||
<br/><br/>
|
||||
<p id="merge-warning"><small><b>Important:</b> Please merge models of similar type.<br/>For e.g. <code>SD 1.4</code> models with only <code>SD 1.4/1.5</code> models,<br/><code>SD 2.0</code> with <code>SD 2.0</code>-type, and <code>SD 2.1</code> with <code>SD 2.1</code>-type models.</small></p>
|
||||
<br/>
|
||||
<table>
|
||||
<tr>
|
||||
<td><label for="#merge-filename">Output file name:</label></td>
|
||||
<td><input id="merge-filename" size=24> <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Base name of the output file.<br>Mix ratio and file suffix will be appended to this.</span></i></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><label for="#merge-fp">Output precision:</label></td>
|
||||
<td><select id="merge-fp">
|
||||
<option value="fp16">fp16 (smaller file size)</option>
|
||||
<option value="fp32">fp32 (larger file size)</option>
|
||||
</select>
|
||||
<i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Image generation uses fp16, so it's a good choice.<br>Use fp32 if you want to use the result models for more mixes</span></i>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><label for="#merge-format">Output file format:</label></td>
|
||||
<td><select id="merge-format">
|
||||
<option value="safetensors">Safetensors (recommended)</option>
|
||||
<option value="ckpt">CKPT/Pickle (legacy format)</option>
|
||||
</select>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
<br/>
|
||||
<div id="merge-log-container">
|
||||
<p><label for="#merge-log">Log messages:</label></p>
|
||||
<div id="merge-log"></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="merge-config">
|
||||
<div class="tab-container">
|
||||
<span id="tab-merge-opts-single" class="tab active">
|
||||
<span>Make a single file</small></span>
|
||||
</span>
|
||||
<span id="tab-merge-opts-batch" class="tab">
|
||||
<span>Make multiple variations</small></span>
|
||||
</span>
|
||||
</div>
|
||||
<div>
|
||||
<div id="tab-content-merge-opts-single" class="tab-content active">
|
||||
<div class="tab-content-inner">
|
||||
<small>Saves a single merged model file, at the specified merge ratio.</small><br/><br/>
|
||||
<label for="#single-merge-ratio-slider">Merge ratio:</label>
|
||||
<input id="single-merge-ratio-slider" name="single-merge-ratio-slider" class="editor-slider" value="50" type="range" min="1" max="1000">
|
||||
<input id="single-merge-ratio" size=2 value="5">%
|
||||
<i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Model A's contribution to the mix. The rest will be from Model B.</span></i>
|
||||
</div>
|
||||
</div>
|
||||
<div id="tab-content-merge-opts-batch" class="tab-content">
|
||||
<div class="tab-content-inner">
|
||||
<small>Saves multiple variations of the model, at different merge ratios.<br/>Each variation will be saved as a separate file.</small><br/><br/>
|
||||
<table>
|
||||
<tr><td><label for="#merge-count">Number of variations:</label></td>
|
||||
<td> <input id="merge-count" size=2 value="5"></td>
|
||||
<td> <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Number of models to create</span></i></td></tr>
|
||||
<tr><td><label for="#merge-start">Starting merge ratio:</label></td>
|
||||
<td> <input id="merge-start" size=2 value="5">%</td>
|
||||
<td> <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Smallest share of model A in the mix</span></i></td></tr>
|
||||
<tr><td><label for="#merge-step">Increment each step:</label></td>
|
||||
<td> <input id="merge-step" size=2 value="10">%</td>
|
||||
<td> <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Share of model A added into the mix per step</span></i></td></tr>
|
||||
<tr><td><label for="#merge-interpolation">Interpolation model:</label></td>
|
||||
<td> <select id="merge-interpolation">
|
||||
<option>Exact</option>
|
||||
<option>SmoothStep</option>
|
||||
<option>SmootherStep</option>
|
||||
<option>SmoothestStep</option>
|
||||
</select></td>
|
||||
<td> <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Sigmoid function to be applied to the model share before mixing</span></i></td></tr>
|
||||
</table>
|
||||
<br/>
|
||||
<small>Preview of variation ratios:</small><br/>
|
||||
<canvas id="merge-canvas" width="400" height="400"></canvas>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="merge-buttons">
|
||||
<button id="merge-button" class="primaryButton">Merge models</button>
|
||||
</div>
|
||||
</div>`,
|
||||
onOpen: ({ firstOpen }) => {
|
||||
if (!firstOpen) {
|
||||
return
|
||||
}
|
||||
|
||||
const tabSettingsSingle = document.querySelector("#tab-merge-opts-single")
|
||||
const tabSettingsBatch = document.querySelector("#tab-merge-opts-batch")
|
||||
linkTabContents(tabSettingsSingle)
|
||||
linkTabContents(tabSettingsBatch)
|
||||
|
||||
console.log("Activate")
|
||||
let mergeModelAField = new ModelDropdown(document.querySelector("#mergeModelA"), "stable-diffusion")
|
||||
let mergeModelBField = new ModelDropdown(document.querySelector("#mergeModelB"), "stable-diffusion")
|
||||
updateChart()
|
||||
|
||||
// slider
|
||||
const singleMergeRatioField = document.querySelector("#single-merge-ratio")
|
||||
const singleMergeRatioSlider = document.querySelector("#single-merge-ratio-slider")
|
||||
|
||||
function updateSingleMergeRatio() {
|
||||
singleMergeRatioField.value = singleMergeRatioSlider.value / 10
|
||||
singleMergeRatioField.dispatchEvent(new Event("change"))
|
||||
}
|
||||
|
||||
function updateSingleMergeRatioSlider() {
|
||||
if (singleMergeRatioField.value < 0) {
|
||||
singleMergeRatioField.value = 0
|
||||
} else if (singleMergeRatioField.value > 100) {
|
||||
singleMergeRatioField.value = 100
|
||||
}
|
||||
|
||||
singleMergeRatioSlider.value = singleMergeRatioField.value * 10
|
||||
singleMergeRatioSlider.dispatchEvent(new Event("change"))
|
||||
}
|
||||
|
||||
singleMergeRatioSlider.addEventListener("input", updateSingleMergeRatio)
|
||||
singleMergeRatioField.addEventListener("input", updateSingleMergeRatioSlider)
|
||||
updateSingleMergeRatio()
|
||||
|
||||
document.querySelector(".merge-config").addEventListener("change", updateChart)
|
||||
|
||||
document.querySelector("#merge-button").addEventListener("click", async function(e) {
|
||||
// Build request template
|
||||
let model0 = mergeModelAField.value
|
||||
let model1 = mergeModelBField.value
|
||||
let request = { model0: model0, model1: model1 }
|
||||
request["use_fp16"] = document.querySelector("#merge-fp").value == "fp16"
|
||||
let iterations = document.querySelector("#merge-count").value >> 0
|
||||
let start = parseFloat(document.querySelector("#merge-start").value)
|
||||
let step = parseFloat(document.querySelector("#merge-step").value)
|
||||
|
||||
if (isTabActive(tabSettingsSingle)) {
|
||||
start = parseFloat(singleMergeRatioField.value)
|
||||
step = 0
|
||||
iterations = 1
|
||||
addLogMessage(`merge ratio = ${start}%`)
|
||||
} else {
|
||||
addLogMessage(`start = ${start}%`)
|
||||
addLogMessage(`step = ${step}%`)
|
||||
}
|
||||
|
||||
if (start + (iterations - 1) * step >= 100) {
|
||||
addLogMessage("<i>Aborting: maximum ratio is ≥ 100%</i>")
|
||||
addLogMessage("Reduce the number of variations or the step size")
|
||||
addLogSeparator()
|
||||
document.querySelector("#merge-count").focus()
|
||||
return
|
||||
}
|
||||
|
||||
if (document.querySelector("#merge-filename").value == "") {
|
||||
addLogMessage("<i>Aborting: No output file name specified</i>")
|
||||
addLogSeparator()
|
||||
document.querySelector("#merge-filename").focus()
|
||||
return
|
||||
}
|
||||
|
||||
// Disable merge button
|
||||
e.target.disabled = true
|
||||
e.target.classList.add("disabled")
|
||||
let cursor = $("body").css("cursor")
|
||||
let label = document.querySelector("#merge-button").innerHTML
|
||||
$("body").css("cursor", "progress")
|
||||
document.querySelector("#merge-button").innerHTML = "Merging models ..."
|
||||
|
||||
addLogMessage("Merging models")
|
||||
addLogMessage("Model A: " + model0)
|
||||
addLogMessage("Model B: " + model1)
|
||||
|
||||
// Batch main loop
|
||||
for (let i = 0; i < iterations; i++) {
|
||||
let alpha = (start + i * step) / 100
|
||||
|
||||
if (isTabActive(tabSettingsBatch)) {
|
||||
switch (document.querySelector("#merge-interpolation").value) {
|
||||
case "SmoothStep":
|
||||
alpha = smoothstep(alpha)
|
||||
break
|
||||
case "SmootherStep":
|
||||
alpha = smootherstep(alpha)
|
||||
break
|
||||
case "SmoothestStep":
|
||||
alpha = smootheststep(alpha)
|
||||
break
|
||||
}
|
||||
}
|
||||
addLogMessage(`merging batch job ${i + 1}/${iterations}, alpha = ${alpha.toFixed(5)}...`)
|
||||
|
||||
request["out_path"] = document.querySelector("#merge-filename").value
|
||||
request["out_path"] += "-" + alpha.toFixed(5) + "." + document.querySelector("#merge-format").value
|
||||
addLogMessage(` filename: ${request["out_path"]}`)
|
||||
|
||||
// sdkit documentation: "ratio - the ratio of the second model. 1 means only the second model will be used."
|
||||
request["ratio"] = 1-alpha
|
||||
let res = await fetch("/model/merge", {
|
||||
method: "POST",
|
||||
headers: { "Content-Type": "application/json" },
|
||||
body: JSON.stringify(request),
|
||||
})
|
||||
const data = await res.json()
|
||||
addLogMessage(JSON.stringify(data))
|
||||
}
|
||||
addLogMessage(
|
||||
"<b>Done.</b> The models have been saved to your <tt>models/stable-diffusion</tt> folder."
|
||||
)
|
||||
addLogSeparator()
|
||||
// Re-enable merge button
|
||||
$("body").css("cursor", cursor)
|
||||
document.querySelector("#merge-button").innerHTML = label
|
||||
e.target.disabled = false
|
||||
e.target.classList.remove("disabled")
|
||||
|
||||
// Update model list
|
||||
stableDiffusionModelField.innerHTML = ""
|
||||
vaeModelField.innerHTML = ""
|
||||
hypernetworkModelField.innerHTML = ""
|
||||
await getModels()
|
||||
})
|
||||
},
|
||||
})
|
||||
})()
|
770
ui/plugins/ui/model-tools.plugin.js
Normal file
770
ui/plugins/ui/model-tools.plugin.js
Normal file
@ -0,0 +1,770 @@
|
||||
;(function() {
|
||||
"use strict"
|
||||
|
||||
let mergeCSS = `
|
||||
/*********** Main tab ***********/
|
||||
.tab-centered {
|
||||
justify-content: center;
|
||||
}
|
||||
|
||||
#model-tool-tab-content {
|
||||
background-color: var(--background-color3);
|
||||
}
|
||||
|
||||
#model-tool-tab-content .tab-content-inner {
|
||||
text-align: initial;
|
||||
}
|
||||
|
||||
#model-tool-tab-bar .tab {
|
||||
margin-bottom: 0px;
|
||||
border-top-left-radius: var(--input-border-radius);
|
||||
background-color: var(--background-color3);
|
||||
padding: 6px 6px 0.8em 6px;
|
||||
}
|
||||
#tab-content-merge .tab-content-inner {
|
||||
max-width: 100%;
|
||||
padding: 10pt;
|
||||
}
|
||||
|
||||
/*********** Merge UI ***********/
|
||||
.merge-model-container {
|
||||
margin-left: 15%;
|
||||
margin-right: 15%;
|
||||
text-align: left;
|
||||
display: inline-grid;
|
||||
grid-template-columns: 1fr 1fr;
|
||||
grid-template-rows: auto auto auto;
|
||||
gap: 0px 0px;
|
||||
grid-auto-flow: row;
|
||||
grid-template-areas:
|
||||
"merge-input merge-config"
|
||||
"merge-buttons merge-buttons";
|
||||
}
|
||||
.merge-model-container p {
|
||||
margin-top: 3pt;
|
||||
margin-bottom: 3pt;
|
||||
}
|
||||
.merge-config .tab-content {
|
||||
background: var(--background-color1);
|
||||
border-radius: 3pt;
|
||||
}
|
||||
.merge-config .tab-content-inner {
|
||||
text-align: left;
|
||||
}
|
||||
|
||||
.merge-input {
|
||||
grid-area: merge-input;
|
||||
padding-left:1em;
|
||||
}
|
||||
.merge-config {
|
||||
grid-area: merge-config;
|
||||
padding:1em;
|
||||
}
|
||||
.merge-config input {
|
||||
margin-bottom: 3px;
|
||||
}
|
||||
.merge-config select {
|
||||
margin-bottom: 3px;
|
||||
}
|
||||
.merge-buttons {
|
||||
grid-area: merge-buttons;
|
||||
padding:1em;
|
||||
text-align: center;
|
||||
}
|
||||
#merge-button {
|
||||
padding: 8px;
|
||||
width:20em;
|
||||
}
|
||||
div#merge-log {
|
||||
height:150px;
|
||||
overflow-x:hidden;
|
||||
overflow-y:scroll;
|
||||
background:var(--background-color1);
|
||||
border-radius: 3pt;
|
||||
}
|
||||
div#merge-log i {
|
||||
color: hsl(var(--accent-hue), 100%, calc(2*var(--accent-lightness)));
|
||||
font-family: monospace;
|
||||
}
|
||||
.disabled {
|
||||
background: var(--background-color4);
|
||||
color: var(--text-color);
|
||||
}
|
||||
#merge-type-tabs {
|
||||
border-bottom: 1px solid black;
|
||||
}
|
||||
#merge-log-container {
|
||||
display: none;
|
||||
}
|
||||
.merge-model-container #merge-warning {
|
||||
color: var(--small-label-color);
|
||||
}
|
||||
|
||||
/*********** LORA UI ***********/
|
||||
.lora-manager-grid {
|
||||
display: grid;
|
||||
gap: 0px 8px;
|
||||
grid-auto-flow: row;
|
||||
}
|
||||
|
||||
@media screen and (min-width: 1501px) {
|
||||
.lora-manager-grid textarea {
|
||||
height:350px;
|
||||
}
|
||||
|
||||
.lora-manager-grid {
|
||||
grid-template-columns: auto 1fr 1fr;
|
||||
grid-template-rows: auto 1fr;
|
||||
grid-template-areas:
|
||||
"selector selector selector"
|
||||
"thumbnail keywords notes";
|
||||
}
|
||||
}
|
||||
|
||||
@media screen and (min-width: 1001px) and (max-width: 1500px) {
|
||||
.lora-manager-grid textarea {
|
||||
height:250px;
|
||||
}
|
||||
|
||||
.lora-manager-grid {
|
||||
grid-template-columns: auto auto;
|
||||
grid-template-rows: auto auto auto;
|
||||
grid-template-areas:
|
||||
"selector selector"
|
||||
"thumbnail keywords"
|
||||
"thumbnail notes";
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@media screen and (max-width: 1000px) {
|
||||
.lora-manager-grid textarea {
|
||||
height:200px;
|
||||
}
|
||||
|
||||
.lora-manager-grid {
|
||||
grid-template-columns: auto;
|
||||
grid-template-rows: auto auto auto auto;
|
||||
grid-template-areas:
|
||||
"selector"
|
||||
"keywords"
|
||||
"thumbnail"
|
||||
"notes";
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
.lora-manager-grid-selector {
|
||||
grid-area: selector;
|
||||
justify-self: start;
|
||||
}
|
||||
|
||||
.lora-manager-grid-thumbnail {
|
||||
grid-area: thumbnail;
|
||||
justify-self: center;
|
||||
}
|
||||
|
||||
.lora-manager-grid-keywords {
|
||||
grid-area: keywords;
|
||||
}
|
||||
|
||||
.lora-manager-grid-notes {
|
||||
grid-area: notes;
|
||||
}
|
||||
|
||||
.lora-manager-grid p {
|
||||
margin-bottom: 2px;
|
||||
}
|
||||
|
||||
|
||||
`
|
||||
|
||||
let mergeUI = `
|
||||
<div class="merge-model-container panel-box">
|
||||
<div class="merge-input">
|
||||
<p><label for="#mergeModelA">Select Model A:</label></p>
|
||||
<input id="mergeModelA" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
|
||||
<p><label for="#mergeModelB">Select Model B:</label></p>
|
||||
<input id="mergeModelB" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
|
||||
<br/><br/>
|
||||
<p id="merge-warning"><small><b>Important:</b> Please merge models of similar type.<br/>For e.g. <code>SD 1.4</code> models with only <code>SD 1.4/1.5</code> models,<br/><code>SD 2.0</code> with <code>SD 2.0</code>-type, and <code>SD 2.1</code> with <code>SD 2.1</code>-type models.</small></p>
|
||||
<br/>
|
||||
<table>
|
||||
<tr>
|
||||
<td><label for="#merge-filename">Output file name:</label></td>
|
||||
<td><input id="merge-filename" size=24> <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Base name of the output file.<br>Mix ratio and file suffix will be appended to this.</span></i></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><label for="#merge-fp">Output precision:</label></td>
|
||||
<td><select id="merge-fp">
|
||||
<option value="fp16">fp16 (smaller file size)</option>
|
||||
<option value="fp32">fp32 (larger file size)</option>
|
||||
</select>
|
||||
<i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Image generation uses fp16, so it's a good choice.<br>Use fp32 if you want to use the result models for more mixes</span></i>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><label for="#merge-format">Output file format:</label></td>
|
||||
<td><select id="merge-format">
|
||||
<option value="safetensors">Safetensors (recommended)</option>
|
||||
<option value="ckpt">CKPT/Pickle (legacy format)</option>
|
||||
</select>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
<br/>
|
||||
<div id="merge-log-container">
|
||||
<p><label for="#merge-log">Log messages:</label></p>
|
||||
<div id="merge-log"></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="merge-config">
|
||||
<div class="tab-container">
|
||||
<span id="tab-merge-opts-single" class="tab active">
|
||||
<span>Make a single file</small></span>
|
||||
</span>
|
||||
<span id="tab-merge-opts-batch" class="tab">
|
||||
<span>Make multiple variations</small></span>
|
||||
</span>
|
||||
</div>
|
||||
<div>
|
||||
<div id="tab-content-merge-opts-single" class="tab-content active">
|
||||
<div class="tab-content-inner">
|
||||
<small>Saves a single merged model file, at the specified merge ratio.</small><br/><br/>
|
||||
<label for="#single-merge-ratio-slider">Merge ratio:</label>
|
||||
<input id="single-merge-ratio-slider" name="single-merge-ratio-slider" class="editor-slider" value="50" type="range" min="1" max="1000">
|
||||
<input id="single-merge-ratio" size=2 value="5">%
|
||||
<i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Model A's contribution to the mix. The rest will be from Model B.</span></i>
|
||||
</div>
|
||||
</div>
|
||||
<div id="tab-content-merge-opts-batch" class="tab-content">
|
||||
<div class="tab-content-inner">
|
||||
<small>Saves multiple variations of the model, at different merge ratios.<br/>Each variation will be saved as a separate file.</small><br/><br/>
|
||||
<table>
|
||||
<tr><td><label for="#merge-count">Number of variations:</label></td>
|
||||
<td> <input id="merge-count" size=2 value="5"></td>
|
||||
<td> <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Number of models to create</span></i></td></tr>
|
||||
<tr><td><label for="#merge-start">Starting merge ratio:</label></td>
|
||||
<td> <input id="merge-start" size=2 value="5">%</td>
|
||||
<td> <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Smallest share of model A in the mix</span></i></td></tr>
|
||||
<tr><td><label for="#merge-step">Increment each step:</label></td>
|
||||
<td> <input id="merge-step" size=2 value="10">%</td>
|
||||
<td> <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Share of model A added into the mix per step</span></i></td></tr>
|
||||
<tr><td><label for="#merge-interpolation">Interpolation model:</label></td>
|
||||
<td> <select id="merge-interpolation">
|
||||
<option>Exact</option>
|
||||
<option>SmoothStep</option>
|
||||
<option>SmootherStep</option>
|
||||
<option>SmoothestStep</option>
|
||||
</select></td>
|
||||
<td> <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Sigmoid function to be applied to the model share before mixing</span></i></td></tr>
|
||||
</table>
|
||||
<br/>
|
||||
<small>Preview of variation ratios:</small><br/>
|
||||
<canvas id="merge-canvas" width="400" height="400"></canvas>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="merge-buttons">
|
||||
<button id="merge-button" class="primaryButton">Merge models</button>
|
||||
</div>
|
||||
</div>`
|
||||
|
||||
|
||||
let loraUI=`
|
||||
<div class="panel-box lora-manager-grid">
|
||||
<div class="lora-manager-grid-selector">
|
||||
<label for="#loraModel">Select Lora:</label>
|
||||
<input id="loraModel" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
|
||||
</div>
|
||||
<div class="lora-manager-grid-thumbnail">
|
||||
<p style="height:2em;">Thumbnail:</p>
|
||||
<div style="position:relative; height:256px; width:256px;background-color:#222;border-radius:1em;margin-bottom:1em;">
|
||||
<i id="lora-manager-image-placeholder" class="fa-regular fa-image" style="font-size:500%;color:#555;position:absolute; top: 50%; left: 50%; transform: translate(-50%,-50%);"></i>
|
||||
<img id="lora-manager-image" class="displayNone" style="border-radius:6px;max-height:256px;max-width:256px;"/>
|
||||
</div>
|
||||
<div style="text-align:center;">
|
||||
<button class="tertiaryButton" id="lora-manager-upload-button"><i class="fa-solid fa-upload"></i> Upload new thumbnail</button>
|
||||
<input id="lora-manager-upload-input" name="lora-manager-upload-input" type="file" class="displayNone">
|
||||
<!-- button class="tertiaryButton"><i class="fa-solid fa-trash-can"></i> Remove</button -->
|
||||
</div>
|
||||
</div>
|
||||
<div class="lora-manager-grid-keywords">
|
||||
<p style="height:2em;">Keywords:
|
||||
<span style="float:right;margin-bottom:4px;"><button id="lora-keyword-from-civitai" class="tertiaryButton smallButton">Import from Civitai</button></span></p>
|
||||
<textarea style="width:100%;resize:vertical;" id="lora-manager-keywords" placeholder="Put LORA specific keywords here..."></textarea>
|
||||
<p style="color:var(--small-label-color);">
|
||||
<b>LORA model keywords</b> can be used via the <code>+ Embeddings</code> button. They get added to the embedding
|
||||
keyword menu when the LORA has been selected in the image settings.
|
||||
</p>
|
||||
</div>
|
||||
<div class="lora-manager-grid-notes">
|
||||
<p style="height:2em;">Notes:</p>
|
||||
<textarea style="width:100%;resize:vertical;" id="lora-manager-notes" placeholder="Place for things you want to remember..."></textarea>
|
||||
<p id="civitai-section" class="displayNone">
|
||||
<b>Civitai model page:</b>
|
||||
<a id="civitai-model-page" target="_blank"></a>
|
||||
</p>
|
||||
</div>
|
||||
</div>`
|
||||
|
||||
let tabHTML=`
|
||||
<div id="model-tool-tab-bar" class="tab-container tab-centered">
|
||||
<span id="tab-model-loraUI" class="tab active">
|
||||
<span><i class="fa-solid fa-key"></i> Lora Keywords</small></span>
|
||||
</span>
|
||||
<span id="tab-model-mergeUI" class="tab">
|
||||
<span><i class="fa-solid fa-code-merge"></i> Merge Models</small></span>
|
||||
</span>
|
||||
</div>
|
||||
<div id="model-tool-tab-content" class="panel-box">
|
||||
<div id="tab-content-model-loraUI" class="tab-content active">
|
||||
<div class="tab-content-inner">
|
||||
${loraUI}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div id="tab-content-model-mergeUI" class="tab-content">
|
||||
<div class="tab-content-inner">
|
||||
${mergeUI}
|
||||
</div>
|
||||
</div>
|
||||
</div>`
|
||||
|
||||
|
||||
///////////////////// Function section
|
||||
function smoothstep(x) {
|
||||
return x * x * (3 - 2 * x)
|
||||
}
|
||||
|
||||
function smootherstep(x) {
|
||||
return x * x * x * (x * (x * 6 - 15) + 10)
|
||||
}
|
||||
|
||||
function smootheststep(x) {
|
||||
let y = -20 * Math.pow(x, 7)
|
||||
y += 70 * Math.pow(x, 6)
|
||||
y -= 84 * Math.pow(x, 5)
|
||||
y += 35 * Math.pow(x, 4)
|
||||
return y
|
||||
}
|
||||
function getCurrentTime() {
|
||||
const now = new Date()
|
||||
let hours = now.getHours()
|
||||
let minutes = now.getMinutes()
|
||||
let seconds = now.getSeconds()
|
||||
|
||||
hours = hours < 10 ? `0${hours}` : hours
|
||||
minutes = minutes < 10 ? `0${minutes}` : minutes
|
||||
seconds = seconds < 10 ? `0${seconds}` : seconds
|
||||
|
||||
return `${hours}:${minutes}:${seconds}`
|
||||
}
|
||||
|
||||
function addLogMessage(message) {
|
||||
const logContainer = document.getElementById("merge-log")
|
||||
logContainer.innerHTML += `<i>${getCurrentTime()}</i> ${message}<br>`
|
||||
|
||||
// Scroll to the bottom of the log
|
||||
logContainer.scrollTop = logContainer.scrollHeight
|
||||
|
||||
document.querySelector("#merge-log-container").style.display = "block"
|
||||
}
|
||||
|
||||
function addLogSeparator() {
|
||||
const logContainer = document.getElementById("merge-log")
|
||||
logContainer.innerHTML += "<hr>"
|
||||
|
||||
logContainer.scrollTop = logContainer.scrollHeight
|
||||
}
|
||||
|
||||
function drawDiagram(fn) {
|
||||
const SIZE = 300
|
||||
const canvas = document.getElementById("merge-canvas")
|
||||
canvas.height = canvas.width = SIZE
|
||||
const ctx = canvas.getContext("2d")
|
||||
|
||||
// Draw coordinate system
|
||||
ctx.scale(1, -1)
|
||||
ctx.translate(0, -canvas.height)
|
||||
ctx.lineWidth = 1
|
||||
ctx.beginPath()
|
||||
|
||||
ctx.strokeStyle = "white"
|
||||
ctx.moveTo(0, 0)
|
||||
ctx.lineTo(0, SIZE)
|
||||
ctx.lineTo(SIZE, SIZE)
|
||||
ctx.lineTo(SIZE, 0)
|
||||
ctx.lineTo(0, 0)
|
||||
ctx.lineTo(SIZE, SIZE)
|
||||
ctx.stroke()
|
||||
ctx.beginPath()
|
||||
ctx.setLineDash([1, 2])
|
||||
const n = SIZE / 10
|
||||
for (let i = n; i < SIZE; i += n) {
|
||||
ctx.moveTo(0, i)
|
||||
ctx.lineTo(SIZE, i)
|
||||
ctx.moveTo(i, 0)
|
||||
ctx.lineTo(i, SIZE)
|
||||
}
|
||||
ctx.stroke()
|
||||
ctx.beginPath()
|
||||
ctx.setLineDash([])
|
||||
ctx.beginPath()
|
||||
ctx.strokeStyle = "black"
|
||||
ctx.lineWidth = 3
|
||||
// Plot function
|
||||
const numSamples = 20
|
||||
for (let i = 0; i <= numSamples; i++) {
|
||||
const x = i / numSamples
|
||||
const y = fn(x)
|
||||
|
||||
const canvasX = x * SIZE
|
||||
const canvasY = y * SIZE
|
||||
|
||||
if (i === 0) {
|
||||
ctx.moveTo(canvasX, canvasY)
|
||||
} else {
|
||||
ctx.lineTo(canvasX, canvasY)
|
||||
}
|
||||
}
|
||||
ctx.stroke()
|
||||
// Plot alpha values (yellow boxes)
|
||||
let start = parseFloat(document.querySelector("#merge-start").value)
|
||||
let step = parseFloat(document.querySelector("#merge-step").value)
|
||||
let iterations = document.querySelector("#merge-count").value >> 0
|
||||
ctx.beginPath()
|
||||
ctx.fillStyle = "yellow"
|
||||
for (let i = 0; i < iterations; i++) {
|
||||
const alpha = (start + i * step) / 100
|
||||
const x = alpha * SIZE
|
||||
const y = fn(alpha) * SIZE
|
||||
if (x <= SIZE) {
|
||||
ctx.rect(x - 3, y - 3, 6, 6)
|
||||
ctx.fill()
|
||||
} else {
|
||||
ctx.strokeStyle = "red"
|
||||
ctx.moveTo(0, 0)
|
||||
ctx.lineTo(0, SIZE)
|
||||
ctx.lineTo(SIZE, SIZE)
|
||||
ctx.lineTo(SIZE, 0)
|
||||
ctx.lineTo(0, 0)
|
||||
ctx.lineTo(SIZE, SIZE)
|
||||
ctx.stroke()
|
||||
addLogMessage("<i>Warning: maximum ratio is ≥ 100%</i>")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function updateChart() {
|
||||
let fn = (x) => x
|
||||
switch (document.querySelector("#merge-interpolation").value) {
|
||||
case "SmoothStep":
|
||||
fn = smoothstep
|
||||
break
|
||||
case "SmootherStep":
|
||||
fn = smootherstep
|
||||
break
|
||||
case "SmoothestStep":
|
||||
fn = smootheststep
|
||||
break
|
||||
}
|
||||
drawDiagram(fn)
|
||||
}
|
||||
|
||||
function initMergeUI() {
|
||||
const tabSettingsSingle = document.querySelector("#tab-merge-opts-single")
|
||||
const tabSettingsBatch = document.querySelector("#tab-merge-opts-batch")
|
||||
linkTabContents(tabSettingsSingle)
|
||||
linkTabContents(tabSettingsBatch)
|
||||
|
||||
let mergeModelAField = new ModelDropdown(document.querySelector("#mergeModelA"), "stable-diffusion")
|
||||
let mergeModelBField = new ModelDropdown(document.querySelector("#mergeModelB"), "stable-diffusion")
|
||||
updateChart()
|
||||
|
||||
// slider
|
||||
const singleMergeRatioField = document.querySelector("#single-merge-ratio")
|
||||
const singleMergeRatioSlider = document.querySelector("#single-merge-ratio-slider")
|
||||
|
||||
function updateSingleMergeRatio() {
|
||||
singleMergeRatioField.value = singleMergeRatioSlider.value / 10
|
||||
singleMergeRatioField.dispatchEvent(new Event("change"))
|
||||
}
|
||||
|
||||
function updateSingleMergeRatioSlider() {
|
||||
if (singleMergeRatioField.value < 0) {
|
||||
singleMergeRatioField.value = 0
|
||||
} else if (singleMergeRatioField.value > 100) {
|
||||
singleMergeRatioField.value = 100
|
||||
}
|
||||
|
||||
singleMergeRatioSlider.value = singleMergeRatioField.value * 10
|
||||
singleMergeRatioSlider.dispatchEvent(new Event("change"))
|
||||
}
|
||||
|
||||
singleMergeRatioSlider.addEventListener("input", updateSingleMergeRatio)
|
||||
singleMergeRatioField.addEventListener("input", updateSingleMergeRatioSlider)
|
||||
updateSingleMergeRatio()
|
||||
|
||||
document.querySelector(".merge-config").addEventListener("change", updateChart)
|
||||
|
||||
document.querySelector("#merge-button").addEventListener("click", async function(e) {
|
||||
// Build request template
|
||||
let model0 = mergeModelAField.value
|
||||
let model1 = mergeModelBField.value
|
||||
let request = { model0: model0, model1: model1 }
|
||||
request["use_fp16"] = document.querySelector("#merge-fp").value == "fp16"
|
||||
let iterations = document.querySelector("#merge-count").value >> 0
|
||||
let start = parseFloat(document.querySelector("#merge-start").value)
|
||||
let step = parseFloat(document.querySelector("#merge-step").value)
|
||||
|
||||
if (isTabActive(tabSettingsSingle)) {
|
||||
start = parseFloat(singleMergeRatioField.value)
|
||||
step = 0
|
||||
iterations = 1
|
||||
addLogMessage(`merge ratio = ${start}%`)
|
||||
} else {
|
||||
addLogMessage(`start = ${start}%`)
|
||||
addLogMessage(`step = ${step}%`)
|
||||
}
|
||||
|
||||
if (start + (iterations - 1) * step >= 100) {
|
||||
addLogMessage("<i>Aborting: maximum ratio is ≥ 100%</i>")
|
||||
addLogMessage("Reduce the number of variations or the step size")
|
||||
addLogSeparator()
|
||||
document.querySelector("#merge-count").focus()
|
||||
return
|
||||
}
|
||||
|
||||
if (document.querySelector("#merge-filename").value == "") {
|
||||
addLogMessage("<i>Aborting: No output file name specified</i>")
|
||||
addLogSeparator()
|
||||
document.querySelector("#merge-filename").focus()
|
||||
return
|
||||
}
|
||||
|
||||
// Disable merge button
|
||||
e.target.disabled = true
|
||||
e.target.classList.add("disabled")
|
||||
let cursor = $("body").css("cursor")
|
||||
let label = document.querySelector("#merge-button").innerHTML
|
||||
$("body").css("cursor", "progress")
|
||||
document.querySelector("#merge-button").innerHTML = "Merging models ..."
|
||||
|
||||
addLogMessage("Merging models")
|
||||
addLogMessage("Model A: " + model0)
|
||||
addLogMessage("Model B: " + model1)
|
||||
|
||||
// Batch main loop
|
||||
for (let i = 0; i < iterations; i++) {
|
||||
let alpha = (start + i * step) / 100
|
||||
|
||||
if (isTabActive(tabSettingsBatch)) {
|
||||
switch (document.querySelector("#merge-interpolation").value) {
|
||||
case "SmoothStep":
|
||||
alpha = smoothstep(alpha)
|
||||
break
|
||||
case "SmootherStep":
|
||||
alpha = smootherstep(alpha)
|
||||
break
|
||||
case "SmoothestStep":
|
||||
alpha = smootheststep(alpha)
|
||||
break
|
||||
}
|
||||
}
|
||||
addLogMessage(`merging batch job ${i + 1}/${iterations}, alpha = ${alpha.toFixed(5)}...`)
|
||||
|
||||
request["out_path"] = document.querySelector("#merge-filename").value
|
||||
request["out_path"] += "-" + alpha.toFixed(5) + "." + document.querySelector("#merge-format").value
|
||||
addLogMessage(` filename: ${request["out_path"]}`)
|
||||
|
||||
// sdkit documentation: "ratio - the ratio of the second model. 1 means only the second model will be used."
|
||||
request["ratio"] = 1-alpha
|
||||
let res = await fetch("/model/merge", {
|
||||
method: "POST",
|
||||
headers: { "Content-Type": "application/json" },
|
||||
body: JSON.stringify(request),
|
||||
})
|
||||
const data = await res.json()
|
||||
addLogMessage(JSON.stringify(data))
|
||||
}
|
||||
addLogMessage(
|
||||
"<b>Done.</b> The models have been saved to your <tt>models/stable-diffusion</tt> folder."
|
||||
)
|
||||
addLogSeparator()
|
||||
// Re-enable merge button
|
||||
$("body").css("cursor", cursor)
|
||||
document.querySelector("#merge-button").innerHTML = label
|
||||
e.target.disabled = false
|
||||
e.target.classList.remove("disabled")
|
||||
|
||||
// Update model list
|
||||
stableDiffusionModelField.innerHTML = ""
|
||||
vaeModelField.innerHTML = ""
|
||||
hypernetworkModelField.innerHTML = ""
|
||||
await getModels()
|
||||
})
|
||||
}
|
||||
|
||||
const LoraUI = {
|
||||
modelField: undefined,
|
||||
keywordsField: undefined,
|
||||
notesField: undefined,
|
||||
civitaiImportBtn: undefined,
|
||||
civitaiSecion: undefined,
|
||||
civitaiAnchor: undefined,
|
||||
image: undefined,
|
||||
imagePlaceholder: undefined,
|
||||
|
||||
init() {
|
||||
LoraUI.modelField = new ModelDropdown(document.querySelector("#loraModel"), "lora", "None")
|
||||
LoraUI.keywordsField = document.querySelector("#lora-manager-keywords")
|
||||
LoraUI.notesField = document.querySelector("#lora-manager-notes")
|
||||
LoraUI.civitaiImportBtn = document.querySelector("#lora-keyword-from-civitai")
|
||||
LoraUI.civitaiSection = document.querySelector("#civitai-section")
|
||||
LoraUI.civitaiAnchor = document.querySelector("#civitai-model-page")
|
||||
LoraUI.image = document.querySelector("#lora-manager-image")
|
||||
LoraUI.imagePlaceholder = document.querySelector("#lora-manager-image-placeholder")
|
||||
LoraUI.uploadBtn = document.querySelector("#lora-manager-upload-button")
|
||||
LoraUI.uploadInput = document.querySelector("#lora-manager-upload-input")
|
||||
|
||||
LoraUI.modelField.addEventListener("change", LoraUI.updateFields)
|
||||
LoraUI.keywordsField.addEventListener("focusout", LoraUI.saveInfos)
|
||||
LoraUI.notesField.addEventListener("focusout", LoraUI.saveInfos)
|
||||
LoraUI.civitaiImportBtn.addEventListener("click", LoraUI.importFromCivitai)
|
||||
|
||||
LoraUI.uploadBtn.addEventListener("click", (e) => LoraUI.uploadInput.click())
|
||||
LoraUI.uploadInput.addEventListener("change", LoraUI.uploadLoraThumb)
|
||||
|
||||
document.addEventListener("saveThumb", LoraUI.updateFields)
|
||||
|
||||
LoraUI.updateFields()
|
||||
},
|
||||
|
||||
uploadLoraThumb(e) {
|
||||
console.log(e)
|
||||
if (LoraUI.uploadInput.files.length === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
let reader = new FileReader()
|
||||
let file = LoraUI.uploadInput.files[0]
|
||||
|
||||
reader.addEventListener("load", (event) => {
|
||||
let img = document.createElement("img")
|
||||
img.src = reader.result
|
||||
onUseAsThumbnailClick(
|
||||
{
|
||||
use_lora_model: LoraUI.modelField.value,
|
||||
},
|
||||
img
|
||||
)
|
||||
})
|
||||
|
||||
if (file) {
|
||||
reader.readAsDataURL(file)
|
||||
}
|
||||
},
|
||||
|
||||
updateFields() {
|
||||
document.getElementById("civitai-section").classList.add("displayNone")
|
||||
Bucket.retrieve(`modelinfo/lora/${LoraUI.modelField.value}`)
|
||||
.then((info) => {
|
||||
if (info == null) {
|
||||
LoraUI.keywordsField.value = ""
|
||||
LoraUI.notesField.value = ""
|
||||
LoraUI.hideCivitaiLink()
|
||||
} else {
|
||||
LoraUI.keywordsField.value = info.keywords.join("\n")
|
||||
LoraUI.notesField.value = info.notes
|
||||
if ("civitai" in info && info["civitai"] != null) {
|
||||
LoraUI.showCivitaiLink(info.civitai)
|
||||
} else {
|
||||
LoraUI.hideCivitaiLink()
|
||||
}
|
||||
}
|
||||
})
|
||||
Bucket.getImageAsDataURL(`${profileNameField.value}/lora/${LoraUI.modelField.value}.png`)
|
||||
.then((data) => {
|
||||
LoraUI.image.src=data
|
||||
LoraUI.image.classList.remove("displayNone")
|
||||
LoraUI.imagePlaceholder.classList.add("displayNone")
|
||||
})
|
||||
.catch((error) => {
|
||||
LoraUI.image.classList.add("displayNone")
|
||||
LoraUI.imagePlaceholder.classList.remove("displayNone")
|
||||
})
|
||||
},
|
||||
|
||||
saveInfos() {
|
||||
let info = {
|
||||
keywords: LoraUI.keywordsField.value
|
||||
.split("\n")
|
||||
.filter((x) => (x != "")),
|
||||
notes: LoraUI.notesField.value,
|
||||
civitai: LoraUI.civitaiSection.checkVisibility() ? LoraUI.civitaiAnchor.href : null,
|
||||
}
|
||||
Bucket.store(`modelinfo/lora/${LoraUI.modelField.value}`, info)
|
||||
},
|
||||
|
||||
importFromCivitai() {
|
||||
document.body.style["cursor"] = "progress"
|
||||
fetch("/sha256/lora/"+LoraUI.modelField.value)
|
||||
.then((result) => result.json())
|
||||
.then((json) => fetch("https://civitai.com/api/v1/model-versions/by-hash/" + json.digest))
|
||||
.then((result) => result.json())
|
||||
.then((json) => {
|
||||
document.body.style["cursor"] = "default"
|
||||
if (json == null) {
|
||||
return
|
||||
}
|
||||
if ("trainedWords" in json) {
|
||||
LoraUI.keywordsField.value = json["trainedWords"].join("\n")
|
||||
} else {
|
||||
showToast("No keyword info found.")
|
||||
}
|
||||
if ("modelId" in json) {
|
||||
LoraUI.showCivitaiLink("https://civitai.com/models/" + json.modelId)
|
||||
} else {
|
||||
LoraUI.hideCivitaiLink()
|
||||
}
|
||||
|
||||
LoraUI.saveInfos()
|
||||
})
|
||||
},
|
||||
|
||||
showCivitaiLink(href) {
|
||||
LoraUI.civitaiSection.classList.remove("displayNone")
|
||||
LoraUI.civitaiAnchor.href = href
|
||||
LoraUI.civitaiAnchor.innerHTML = LoraUI.civitaiAnchor.href
|
||||
},
|
||||
|
||||
hideCivitaiLink() {
|
||||
LoraUI.civitaiSection.classList.add("displayNone")
|
||||
}
|
||||
}
|
||||
|
||||
createTab({
|
||||
id: "merge",
|
||||
icon: "fa-toolbox",
|
||||
label: "Model tools",
|
||||
css: mergeCSS,
|
||||
content: tabHTML,
|
||||
onOpen: ({ firstOpen }) => {
|
||||
if (!firstOpen) {
|
||||
return
|
||||
}
|
||||
initMergeUI()
|
||||
LoraUI.init()
|
||||
const tabMergeUI = document.querySelector("#tab-model-mergeUI")
|
||||
const tabLoraUI = document.querySelector("#tab-model-loraUI")
|
||||
linkTabContents(tabMergeUI)
|
||||
linkTabContents(tabLoraUI)
|
||||
},
|
||||
})
|
||||
})()
|
||||
async function getLoraKeywords(model) {
|
||||
return Bucket.retrieve(`modelinfo/lora/${model}`)
|
||||
.then((info) => info ? info.keywords : [])
|
||||
}
|
Reference in New Issue
Block a user