Compare commits
896 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e706fae648 | ||
|
|
118a4862ab | ||
|
|
5e2f31e3bf | ||
|
|
f78b31b1bc | ||
|
|
8d698cb997 | ||
|
|
8945aac319 | ||
|
|
f2a960136e | ||
|
|
7a1170f1dd | ||
|
|
24cce08580 | ||
|
|
b425b43d3e | ||
|
|
353fe88226 | ||
|
|
1a3086230e | ||
|
|
0e57487774 | ||
|
|
3024465086 | ||
|
|
c95b43253a | ||
|
|
aedf7856e5 | ||
|
|
d83e034d5e | ||
|
|
b9676b51cb | ||
|
|
5698473891 | ||
|
|
de1d1ad961 | ||
|
|
bd82480fa3 | ||
|
|
fce8b96d3b | ||
|
|
37b47e7f05 | ||
|
|
a6f94959fe | ||
|
|
45a2c9f7ef | ||
|
|
c49ac6880d | ||
|
|
e0258d9e7b | ||
|
|
e3ff6f183b | ||
|
|
e6ec7393c6 | ||
|
|
f733b53c25 | ||
|
|
204a68b17d | ||
|
|
1379dde1a7 | ||
|
|
79eee62d42 | ||
|
|
7c1f18b6cd | ||
|
|
b59371988d | ||
|
|
30dbadb2ab | ||
|
|
a342de0207 | ||
|
|
6e6d236819 | ||
|
|
0e41483564 | ||
|
|
1023f5f7cc | ||
|
|
4bc7bca60d | ||
|
|
de7dbd27c0 | ||
|
|
14118f142c | ||
|
|
9b99be4c1d | ||
|
|
91c4b5865c | ||
|
|
1b4c14af71 | ||
|
|
7b85e50604 | ||
|
|
d64b2d8fbe | ||
|
|
f1a7aed1b6 | ||
|
|
75f758e792 | ||
|
|
e25e1bfe10 | ||
|
|
09deaefab0 | ||
|
|
f80ecbde71 | ||
|
|
5e1e198a1f | ||
|
|
bdbb741716 | ||
|
|
2f0e8a8a4a | ||
|
|
4f8424c544 | ||
|
|
ce3355d6aa | ||
|
|
fb67ef2df0 | ||
|
|
380e9aaf13 | ||
|
|
255e90d125 | ||
|
|
504f7f3799 | ||
|
|
9970e505de | ||
|
|
0ccacd5eca | ||
|
|
50e4683492 | ||
|
|
bc14bdc010 | ||
|
|
14b0dabfdf | ||
|
|
e140acd2a4 | ||
|
|
facfed07fe | ||
|
|
41a3309cbe | ||
|
|
4df9a22dd6 | ||
|
|
31a1c4b2b2 | ||
|
|
c2c33b7df1 | ||
|
|
6a2c2152e2 | ||
|
|
37f2755611 | ||
|
|
aa70f2849b | ||
|
|
e7a2dfa57f | ||
|
|
b43f9fc4ee | ||
|
|
51b6a2fd2a | ||
|
|
5fffb82b16 | ||
|
|
e051dbc2c7 | ||
|
|
c2fba39cc7 | ||
|
|
1050b13bbb | ||
|
|
92d3d9cd33 | ||
|
|
d8dec3e56a | ||
|
|
130f9678b2 | ||
|
|
29d13cb06d | ||
|
|
620f521e0c | ||
|
|
a36fb55b05 | ||
|
|
23f9bcb38b | ||
|
|
e73e820237 | ||
|
|
7e4735ae0f | ||
|
|
66ffcbbee6 | ||
|
|
4754743c84 | ||
|
|
09c1dfd92b | ||
|
|
7fc46f3672 | ||
|
|
df93fee034 | ||
|
|
fc2cf742c8 | ||
|
|
9bec441e94 | ||
|
|
1caab1da85 | ||
|
|
d612d7ab53 | ||
|
|
3d3994bbad | ||
|
|
d643ae0299 | ||
|
|
0a099434a3 | ||
|
|
16905a8999 | ||
|
|
282c4cca82 | ||
|
|
f36b7ce016 | ||
|
|
9fb5cac5d4 | ||
|
|
9f5f213cd3 | ||
|
|
5d3b59b94e | ||
|
|
744c6e4725 | ||
|
|
c59745d346 | ||
|
|
9d1dd09a07 | ||
|
|
2eb317c6b6 | ||
|
|
0ad08c609d | ||
|
|
85f6f8b31d | ||
|
|
9799309db9 | ||
|
|
fa205f483a | ||
|
|
2df4286256 | ||
|
|
b89f689ea3 | ||
|
|
f58b21746e | ||
|
|
6971f9dcf1 | ||
|
|
3454a47f67 | ||
|
|
5922fd39c5 | ||
|
|
cdbddbae3b | ||
|
|
af4a26c1d0 | ||
|
|
d3f42e47a7 | ||
|
|
8821e471b5 | ||
|
|
d34aed0b14 | ||
|
|
b7391652ca | ||
|
|
074a14f056 | ||
|
|
b1db708af1 | ||
|
|
b2a66709b0 | ||
|
|
e3e43913ab | ||
|
|
c7fed0a42a | ||
|
|
c6c5e0734a | ||
|
|
73cbc58a50 | ||
|
|
8431395326 | ||
|
|
dd21c07d4a | ||
|
|
ce9591428e | ||
|
|
a801a5d8b6 | ||
|
|
04e8458ce2 | ||
|
|
4b4fa84879 | ||
|
|
1b3df8c4de | ||
|
|
7ce223771d | ||
|
|
ccf71ed445 | ||
|
|
aa7c031e8a | ||
|
|
8465bc1bc9 | ||
|
|
f2f3ed71d4 | ||
|
|
ab7ba35639 | ||
|
|
1cc09cbe5f | ||
|
|
fe7e398eb4 | ||
|
|
6ab3133b33 | ||
|
|
ef77c37a7e | ||
|
|
1dd165a9c9 | ||
|
|
3c74540615 | ||
|
|
ad249c4651 | ||
|
|
071a4d6f37 | ||
|
|
5f2fb19d71 | ||
|
|
ce61657f7a | ||
|
|
dc54e5bdce | ||
|
|
f7b8e000c5 | ||
|
|
73abf131a6 | ||
|
|
5741af2aba | ||
|
|
159af669f6 | ||
|
|
a517255653 | ||
|
|
573154633b | ||
|
|
baa8afd9eb | ||
|
|
9e718da70e | ||
|
|
4df442f169 | ||
|
|
1dc93c7a39 | ||
|
|
3d124986d3 | ||
|
|
a589d98cd4 | ||
|
|
ed9f18e22c | ||
|
|
14fb115fc8 | ||
|
|
c35a731a60 | ||
|
|
4f3d2bd120 | ||
|
|
69c8fc3236 | ||
|
|
840ff5c363 | ||
|
|
8386cd5cf7 | ||
|
|
666c2f8771 | ||
|
|
b342fa9661 | ||
|
|
63bf84fdd5 | ||
|
|
070e51fcab | ||
|
|
50fd64150e | ||
|
|
63c5de2612 | ||
|
|
c576d582e2 | ||
|
|
026a4b6c76 | ||
|
|
7bc95b68c8 | ||
|
|
0332cc8cb3 | ||
|
|
ce192f4ad7 | ||
|
|
cbdb715918 | ||
|
|
5537102fd3 | ||
|
|
1ea294f15c | ||
|
|
e7bf2ee58b | ||
|
|
a931aa59a3 | ||
|
|
4c8da67bb1 | ||
|
|
a0178e15b3 | ||
|
|
43a1c3901f | ||
|
|
a4c6f28a70 | ||
|
|
f8bca93170 | ||
|
|
f07d05a490 | ||
|
|
b3a988bc0b | ||
|
|
e0f22d29e8 | ||
|
|
07ee97b862 | ||
|
|
19b05659b5 | ||
|
|
7e5c7ca1b7 | ||
|
|
1156c159f9 | ||
|
|
5c6c2303ba | ||
|
|
a0a58bcfa8 | ||
|
|
8a28b265a3 | ||
|
|
86dc08130b | ||
|
|
5cd8a732c7 | ||
|
|
fafbbf68a4 | ||
|
|
0cbb553564 | ||
|
|
f4512bb291 | ||
|
|
99205b4d03 | ||
|
|
d48e6554d5 | ||
|
|
d0c4e95de3 | ||
|
|
0b3a35c4b6 | ||
|
|
ded6a41f86 | ||
|
|
f4063e63d3 | ||
|
|
23ba912db0 | ||
|
|
b99d9db8f9 | ||
|
|
b7047dafb2 | ||
|
|
368967fbcf | ||
|
|
a9d0fc9978 | ||
|
|
b6f3d2ec02 | ||
|
|
78e917a6fb | ||
|
|
96b45385e8 | ||
|
|
db47888a75 | ||
|
|
51443741b8 | ||
|
|
3e7f14af2c | ||
|
|
733439da07 | ||
|
|
6bff97d6fa | ||
|
|
efba81cb66 | ||
|
|
b2cc5dcf4b | ||
|
|
fab86ddf35 | ||
|
|
f3a90ce02d | ||
|
|
4886616c48 | ||
|
|
dcd8121009 | ||
|
|
59adaf6225 | ||
|
|
0055cd9b2e | ||
|
|
fe89d487f6 | ||
|
|
01368ac496 | ||
|
|
495064985e | ||
|
|
200f8fd245 | ||
|
|
64bf4356b4 | ||
|
|
8d4d409cd6 | ||
|
|
dd4937178f | ||
|
|
e12387a377 | ||
|
|
5d3fb9091a | ||
|
|
b044bc1791 | ||
|
|
409ec61be2 | ||
|
|
e2ae2715a3 | ||
|
|
52458ae273 | ||
|
|
79d112ca7b | ||
|
|
9b1a9cc7c8 | ||
|
|
42f9abdfe3 | ||
|
|
66d311258a | ||
|
|
0a1197055c | ||
|
|
649cbf07e3 | ||
|
|
5089ac5ad1 | ||
|
|
d99e3f7974 | ||
|
|
3d5133209b | ||
|
|
b5d1912c94 | ||
|
|
a8fba8f3fb | ||
|
|
9d9fc1683a | ||
|
|
8ee4364065 | ||
|
|
152aa7de09 | ||
|
|
85c90cbee1 | ||
|
|
7302927e4c | ||
|
|
df3d00ef94 | ||
|
|
bb47835256 | ||
|
|
037512ca5c | ||
|
|
a13713adaf | ||
|
|
ad073252e7 | ||
|
|
d24a7a5c5e | ||
|
|
192fd223b4 | ||
|
|
a671dd8e00 | ||
|
|
8b764a8fd3 | ||
|
|
aa576e68e3 | ||
|
|
ad5508a14d | ||
|
|
4fafc8aa67 | ||
|
|
0aab3d0f12 | ||
|
|
a5d88bdfcc | ||
|
|
5173957368 | ||
|
|
4b3e3d900d | ||
|
|
9ea51b174a | ||
|
|
80e265e547 | ||
|
|
c3e6e63023 | ||
|
|
9b5a262d63 | ||
|
|
1309f1480c | ||
|
|
12ba5b8096 | ||
|
|
156c5f4792 | ||
|
|
1da4b3d94a | ||
|
|
18aca98e41 | ||
|
|
a88afb0956 | ||
|
|
bfa1f57930 | ||
|
|
a5350eb3cc | ||
|
|
3ed4d792b3 | ||
|
|
fb0c9405cf | ||
|
|
a17a9044ad | ||
|
|
73af7f5481 | ||
|
|
57ead7f0c0 | ||
|
|
bf490c910a | ||
|
|
40f806efa8 | ||
|
|
226ba8b06e | ||
|
|
b11aa4833d | ||
|
|
8d9cd0e30b | ||
|
|
9532928998 | ||
|
|
420f7549a2 | ||
|
|
ed64b9bfed | ||
|
|
5d5ebfdef6 | ||
|
|
567c02bf5d | ||
|
|
60f7c73c8a | ||
|
|
ac4c5003f1 | ||
|
|
d5e76e662f | ||
|
|
23d5f85d17 | ||
|
|
f75adc1e22 | ||
|
|
15a1436c8b | ||
|
|
813edec808 | ||
|
|
21e3299b7a | ||
|
|
f7193966fb | ||
|
|
2d9853f1f4 | ||
|
|
ced79a187d | ||
|
|
7832524963 | ||
|
|
58c7f3ba15 | ||
|
|
90ec8f0575 | ||
|
|
64ced3b3f6 | ||
|
|
493526c478 | ||
|
|
b86617e3af | ||
|
|
f3db6d84fb | ||
|
|
f9b9ecf754 | ||
|
|
af43a92a2f | ||
|
|
4dbdc642f9 | ||
|
|
8f2c87ce94 | ||
|
|
5149040496 | ||
|
|
5b1078e0db | ||
|
|
ae31813239 | ||
|
|
f6b3cde286 | ||
|
|
0f05f9c32c | ||
|
|
89170af721 | ||
|
|
5fddae589b | ||
|
|
19c16af5fa | ||
|
|
019f8f69f4 | ||
|
|
ad8d1f77df | ||
|
|
e82a8a7f3d | ||
|
|
ad07aeb041 | ||
|
|
451ab7e84c | ||
|
|
083390da83 | ||
|
|
dc6d48580b | ||
|
|
27d69e2ac3 | ||
|
|
91274a4df8 | ||
|
|
6eafcdfafd | ||
|
|
5e44744ff7 | ||
|
|
37b293fe74 | ||
|
|
280f0be690 | ||
|
|
183bc8321c | ||
|
|
a973e4d1ef | ||
|
|
eed1066967 | ||
|
|
2859c94fea | ||
|
|
dbcce2ee5d | ||
|
|
25071c238c | ||
|
|
9995ffb5f3 | ||
|
|
c867c35e45 | ||
|
|
6f60e88ca6 | ||
|
|
11730dcbe4 | ||
|
|
e155bac445 | ||
|
|
15a4682665 | ||
|
|
08675b39f7 | ||
|
|
2c7d5adb80 | ||
|
|
51c7faee3c | ||
|
|
852e129f9c | ||
|
|
6eb2d800fa | ||
|
|
0a2c70595d | ||
|
|
f13e16af15 | ||
|
|
f364958c13 | ||
|
|
e65150647d | ||
|
|
3c435b9593 | ||
|
|
871b96a450 | ||
|
|
48a3254ad2 | ||
|
|
2c0bdd6377 | ||
|
|
8cedeb349d | ||
|
|
e241ef25e5 | ||
|
|
5e553dd958 | ||
|
|
19ee87d2cd | ||
|
|
72b3598687 | ||
|
|
33b120f6cd | ||
|
|
0bfb9d00c8 | ||
|
|
b1a2d36c2d | ||
|
|
517ddca22d | ||
|
|
41c7b08418 | ||
|
|
c7c1b5a570 | ||
|
|
87b6dfb1a9 | ||
|
|
46c56f3706 | ||
|
|
32bab80508 | ||
|
|
b6f1194c93 | ||
|
|
206f9b97bb | ||
|
|
13721f160e | ||
|
|
102e5623f7 | ||
|
|
9a975321db | ||
|
|
6743ec14f1 | ||
|
|
daec5e5426 | ||
|
|
a2b55c0df7 | ||
|
|
01320ac735 | ||
|
|
84bddee2ce | ||
|
|
e636dd3649 | ||
|
|
5f6b798e35 | ||
|
|
9137f3793e | ||
|
|
73e92a688f | ||
|
|
7a9f219037 | ||
|
|
a4728190c0 | ||
|
|
04d67a24b6 | ||
|
|
55049ba9d2 | ||
|
|
e0b33a4feb | ||
|
|
fb5c0a3db7 | ||
|
|
8154a5709b | ||
|
|
3a6780bd50 | ||
|
|
b7a76d4212 | ||
|
|
ba7cae683a | ||
|
|
243556656e | ||
|
|
6662dc66d5 | ||
|
|
107112d1c4 | ||
|
|
4eae540086 | ||
|
|
21108650f7 | ||
|
|
c5d343750c | ||
|
|
09b76dcd93 | ||
|
|
b87bc033f5 | ||
|
|
fb95d76e34 | ||
|
|
4e765a7948 | ||
|
|
cf2408013e | ||
|
|
d8543d1358 | ||
|
|
d8b79d8b5c | ||
|
|
c2bcf89f9a | ||
|
|
5cb24f992c | ||
|
|
21394b7d45 | ||
|
|
6d08082693 | ||
|
|
768fb2583a | ||
|
|
6e07b2354f | ||
|
|
00597879bc | ||
|
|
0cd0d6aadf | ||
|
|
9d201f82f1 | ||
|
|
d6c535c45c | ||
|
|
babdb5b718 | ||
|
|
0ea8d038be | ||
|
|
c804a9971e | ||
|
|
4d7f6e4236 | ||
|
|
5474d1786f | ||
|
|
7f36473544 | ||
|
|
9d19698bf3 | ||
|
|
582b2d936f | ||
|
|
6036ccdc1c | ||
|
|
5eeef41d8c | ||
|
|
bacf266f0d | ||
|
|
ba5c54043b | ||
|
|
e33c858829 | ||
|
|
e47e54de3f | ||
|
|
54f9e9bfe9 | ||
|
|
e1875c872c | ||
|
|
27b8e173e8 | ||
|
|
47e3884994 | ||
|
|
e483071894 | ||
|
|
af090cb289 | ||
|
|
9bbb25f16c | ||
|
|
3007f00c9b | ||
|
|
352dcfbe30 | ||
|
|
60b181a545 | ||
|
|
600482e2d7 | ||
|
|
39ccbbd72e | ||
|
|
6e69cbcdaf | ||
|
|
bf6c222a3b | ||
|
|
6afcf7570a | ||
|
|
c3126f7b4d | ||
|
|
cb3b542363 | ||
|
|
1a5e15608c | ||
|
|
64a751ad79 | ||
|
|
57efe31959 | ||
|
|
39350d554b | ||
|
|
8f4e03550c | ||
|
|
d03823fb20 | ||
|
|
00ec2b9d6f | ||
|
|
70e4bc4582 | ||
|
|
5e56a437ef | ||
|
|
22ffd25619 | ||
|
|
127949c56b | ||
|
|
cdfef16a0e | ||
|
|
1595f1ed05 | ||
|
|
1cae39b105 | ||
|
|
8189b38e6e | ||
|
|
c240d6932a | ||
|
|
c4548d9396 | ||
|
|
aea70e3dd4 | ||
|
|
3b01e65e11 | ||
|
|
341c810bbb | ||
|
|
85fd2dfaaa | ||
|
|
bf4bc38c6c | ||
|
|
aa8b50280b | ||
|
|
62553dc0fa | ||
|
|
25639cc3f8 | ||
|
|
7982a9ae25 | ||
|
|
aa01fd058e | ||
|
|
ef7e1575bd | ||
|
|
fb075a0013 | ||
|
|
d1738baf44 | ||
|
|
7eb29fa91b | ||
|
|
34c00fb77f | ||
|
|
7965318d9f | ||
|
|
e73a514e29 | ||
|
|
35ff4f439e | ||
|
|
12e0194c7f | ||
|
|
d1ac90e16d | ||
|
|
7dc7f70582 | ||
|
|
84d606408a | ||
|
|
d103693811 | ||
|
|
0dbce101ac | ||
|
|
cb81e2aacd | ||
|
|
6cd0b530c5 | ||
|
|
35571eb14d | ||
|
|
8e6102ad9a | ||
|
|
80bc80dc2c | ||
|
|
a483bd0800 | ||
|
|
47a39569bc | ||
|
|
f00e1a92d8 | ||
|
|
a289945e8e | ||
|
|
b750c0d7c3 | ||
|
|
a244a6873a | ||
|
|
ceff4f06c1 | ||
|
|
0307114c8e | ||
|
|
92030a3917 | ||
|
|
73ace121a4 | ||
|
|
44d5809e46 | ||
|
|
5c4e6f7e96 | ||
|
|
8c032579b8 | ||
|
|
b53935bfd4 | ||
|
|
d4db027cfa | ||
|
|
27963decc9 | ||
|
|
25f488c6e1 | ||
|
|
07bd580050 | ||
|
|
fb32a38d96 | ||
|
|
ac0961d7d4 | ||
|
|
6b943f88d1 | ||
|
|
4bbf683d15 | ||
|
|
d0e50584ea | ||
|
|
b57649828d | ||
|
|
1f44a283b3 | ||
|
|
9947c3bcfb | ||
|
|
8faf6b9f52 | ||
|
|
e45cbbf1ca | ||
|
|
1a5b6ef260 | ||
|
|
096556d8c9 | ||
|
|
97919c7e87 | ||
|
|
0aa7968503 | ||
|
|
bd1bc78953 | ||
|
|
6ce6dc3ff6 | ||
|
|
e6346775e7 | ||
|
|
d03eed3859 | ||
|
|
afb88616d8 | ||
|
|
543f13f9a3 | ||
|
|
af5c68051a | ||
|
|
5b7cd11de8 | ||
|
|
d3c3496e55 | ||
|
|
c08c8b2789 | ||
|
|
069315e434 | ||
|
|
7e4ad83a1c | ||
|
|
400f9fd680 | ||
|
|
38951f5581 | ||
|
|
b5329ee93d | ||
|
|
c568bca69e | ||
|
|
7b2be12587 | ||
|
|
099fde2652 | ||
|
|
83e5410945 | ||
|
|
b330c34b29 | ||
|
|
e3184622e8 | ||
|
|
28f822afe0 | ||
|
|
a2af811ad2 | ||
|
|
cde8c2d3bd | ||
|
|
79cc84b611 | ||
|
|
f1de0be679 | ||
|
|
854e3d3576 | ||
|
|
dbac2655f5 | ||
|
|
0f656dbf2f | ||
|
|
3fbb3f6773 | ||
|
|
8820814002 | ||
|
|
b40fb3a422 | ||
|
|
aa59575df3 | ||
|
|
accfec9007 | ||
|
|
16410d90b8 | ||
|
|
27c6113287 | ||
|
|
f4a6910ab4 | ||
|
|
bad89160cc | ||
|
|
5782966d63 | ||
|
|
ba2c966329 | ||
|
|
f8dee7e25f | ||
|
|
a8151176d7 | ||
|
|
9ee0b7fe2e | ||
|
|
fb6a7e04f5 | ||
|
|
bfdf487d52 | ||
|
|
b7aac1501d | ||
|
|
273525e6f9 | ||
|
|
064a4938c1 | ||
|
|
182236e742 | ||
|
|
75cb052cca | ||
|
|
d4a378827f | ||
|
|
592d5e8c40 | ||
|
|
733150111d | ||
|
|
cbe91251ac | ||
|
|
1283c6483d | ||
|
|
f24d3d69af | ||
|
|
7984327d81 | ||
|
|
ef90832aea | ||
|
|
9571b8addc | ||
|
|
9601f304a5 | ||
|
|
ff43dac2a7 | ||
|
|
0a43305455 | ||
|
|
54d8224de2 | ||
|
|
c9e34457cd | ||
|
|
47c8eb304f | ||
|
|
2dd39fa218 | ||
|
|
cb618efb98 | ||
|
|
e7ca8090fd | ||
|
|
7861c57317 | ||
|
|
f701b8dc29 | ||
|
|
bd10a850fa | ||
|
|
0f96688a54 | ||
|
|
8eeca90d55 | ||
|
|
367e7f7065 | ||
|
|
ee19eaae62 | ||
|
|
8eb3a3536b | ||
|
|
cfd50231e1 | ||
|
|
1c8ab9e1b4 | ||
|
|
6094cd8578 | ||
|
|
353c49a40b | ||
|
|
277140f218 | ||
|
|
ca9413ccf4 | ||
|
|
c9a0d090cb | ||
|
|
1cd783d3a3 | ||
|
|
1ead764a02 | ||
|
|
45f7b35954 | ||
|
|
6a41540749 | ||
|
|
5b47da67f6 | ||
|
|
292f68ff97 | ||
|
|
3b554d881a | ||
|
|
40ebf468d3 | ||
|
|
4bc6e51862 | ||
|
|
427861cf13 | ||
|
|
da3e7a2eb8 | ||
|
|
2979f04c82 | ||
|
|
1949d8a50c | ||
|
|
ee66c799e0 | ||
|
|
7c50b8bf94 | ||
|
|
141ff74ece | ||
|
|
321e5f1ed6 | ||
|
|
6d131d9d8e | ||
|
|
7e69b8eb31 | ||
|
|
4e0b33e6a4 | ||
|
|
54f7e6fcb8 | ||
|
|
529169c4da | ||
|
|
a2c8c99215 | ||
|
|
e8bf3fd009 | ||
|
|
465676e9ea | ||
|
|
af53b57047 | ||
|
|
54b5f75905 | ||
|
|
4348333497 | ||
|
|
cc31110bcf | ||
|
|
f7c04bf7a6 | ||
|
|
029509ebad | ||
|
|
65102bb64d | ||
|
|
b96b55c5ce | ||
|
|
1f5aba010e | ||
|
|
f0b3bea4e3 | ||
|
|
84fae2d9e0 | ||
|
|
0b96fa112d | ||
|
|
c64bcd23d3 | ||
|
|
efd9a22bb5 | ||
|
|
159c3edfe3 | ||
|
|
f74fa8657b | ||
|
|
648b142a4b | ||
|
|
426f92595e | ||
|
|
82a8d9b644 | ||
|
|
ff9430b8a2 | ||
|
|
2e69ffcb5e | ||
|
|
0ea38db7ef | ||
|
|
a69d4c279e | ||
|
|
2706149399 | ||
|
|
3d0cdc1cb6 | ||
|
|
ac605e9352 | ||
|
|
5432297691 | ||
|
|
e37be0f954 | ||
|
|
a99209b674 | ||
|
|
cb02b5ba18 | ||
|
|
69f14edd80 | ||
|
|
14714b950d | ||
|
|
13654cb8c0 | ||
|
|
00276228cf | ||
|
|
8583bb8d7b | ||
|
|
d48951fe00 | ||
|
|
99bdcfa0a5 | ||
|
|
e64e1a92e6 | ||
|
|
e278e639a3 | ||
|
|
c4bad5c454 | ||
|
|
da41a74efc | ||
|
|
0dc970562a | ||
|
|
2d8401473d | ||
|
|
9c91f57b19 | ||
|
|
f14afcd129 | ||
|
|
5c1a3d82d7 | ||
|
|
e02a917569 | ||
|
|
347fa0fda1 | ||
|
|
6510d4cb02 | ||
|
|
91e4ccf6f8 | ||
|
|
36249874bc | ||
|
|
d2b5d6cce9 | ||
|
|
b2922741c9 | ||
|
|
300f3e27db | ||
|
|
d7330b80a9 | ||
|
|
acdd7667b7 | ||
|
|
8114fa3f5d | ||
|
|
4bc5508f38 | ||
|
|
e503c6092e | ||
|
|
6a8985d8dd | ||
|
|
bee67fd883 | ||
|
|
a1d75d40aa | ||
|
|
29484867ca | ||
|
|
7fa983b971 | ||
|
|
617a8b2814 | ||
|
|
b924d323d4 | ||
|
|
a2efda41d3 | ||
|
|
642c114501 | ||
|
|
02dd3e457d | ||
|
|
ea7b28c9d5 | ||
|
|
472ab4a9ce | ||
|
|
fca84e3edf | ||
|
|
b70235ff92 | ||
|
|
6eff591df7 | ||
|
|
d0b2bf736e | ||
|
|
e5c11ea214 | ||
|
|
6b6443406d | ||
|
|
3452d7852a | ||
|
|
f1fa10badd | ||
|
|
1267621424 | ||
|
|
8a0ec95fe1 | ||
|
|
ba30a63407 | ||
|
|
c56a2adbcb | ||
|
|
2de96d4dc9 | ||
|
|
a486f20892 | ||
|
|
49535deb2e | ||
|
|
7cbf62cf12 | ||
|
|
3b0ace3410 | ||
|
|
5a9c8e1d87 | ||
|
|
daaa65dc0a | ||
|
|
ab4e371524 | ||
|
|
927fd304b0 | ||
|
|
5af84b8e90 | ||
|
|
d425dac499 | ||
|
|
d056459e76 | ||
|
|
3169485f33 | ||
|
|
d9b9f80a93 | ||
|
|
d429505b71 | ||
|
|
72ee708917 | ||
|
|
93bbfac29a | ||
|
|
040d7a6563 | ||
|
|
e8dd930a50 | ||
|
|
31c049ebfe | ||
|
|
d343a37fb2 | ||
|
|
7097175c6f | ||
|
|
8e57c49043 | ||
|
|
9f036ceefd | ||
|
|
ff3ca8b36b | ||
|
|
87a7b70a27 | ||
|
|
9c71c966ca | ||
|
|
6dc99e676e | ||
|
|
80ecb82cc2 | ||
|
|
7fc9509d4d | ||
|
|
3bf5e11f94 | ||
|
|
eef9af2266 | ||
|
|
fabdf5fe30 | ||
|
|
1cc27e524b | ||
|
|
8316a002da | ||
|
|
888c637f71 | ||
|
|
48b7d2587e | ||
|
|
923c889de8 | ||
|
|
8ae575d67a | ||
|
|
b51407486a | ||
|
|
a689b34ed1 | ||
|
|
aa98e60243 | ||
|
|
c3bf767024 | ||
|
|
b641f1a230 | ||
|
|
9499685dda | ||
|
|
80d23cbbbf | ||
|
|
efa684c5e8 | ||
|
|
2edf64985d | ||
|
|
5fe7807462 | ||
|
|
e96b9005ca | ||
|
|
8c29e735e7 | ||
|
|
497e073a8c | ||
|
|
d4ce54a3c2 | ||
|
|
de37a81902 | ||
|
|
d6b8cb718a | ||
|
|
ed435d2b72 | ||
|
|
2b1f8533b0 | ||
|
|
0a21a69a9f | ||
|
|
5ebc6b698c | ||
|
|
cbc48e31e1 | ||
|
|
577dd9048f | ||
|
|
ae409dd0ec | ||
|
|
cde855e1dc | ||
|
|
adcd4368e7 | ||
|
|
8bcdb205ed | ||
|
|
2cf8b2a453 | ||
|
|
6c156380f9 | ||
|
|
369d0ee502 | ||
|
|
4971a212e9 | ||
|
|
2111a81d18 | ||
|
|
6799b3d7da | ||
|
|
a3463274ee | ||
|
|
c10e773401 | ||
|
|
f7af259576 | ||
|
|
87c6a54634 | ||
|
|
d03521bf12 | ||
|
|
3eb1919c81 | ||
|
|
e53d6dbd5c | ||
|
|
01d2db8e96 | ||
|
|
b18c2aea05 | ||
|
|
a6e3c272e2 | ||
|
|
4000f98ba4 | ||
|
|
d06fd404ae | ||
|
|
c6f0e19e2f | ||
|
|
462af9989a | ||
|
|
eedea2fdcd | ||
|
|
9c3d946de0 | ||
|
|
ace3102601 | ||
|
|
48946100e9 | ||
|
|
0067e46192 | ||
|
|
921711a679 | ||
|
|
32dfb765dd | ||
|
|
8482f12909 | ||
|
|
306a56124c | ||
|
|
1f815d7562 | ||
|
|
f25d35fad8 | ||
|
|
f74c57449e | ||
|
|
a697bd935a | ||
|
|
ec294227bd | ||
|
|
f67758eaf3 | ||
|
|
f7ed65d749 | ||
|
|
7ffeb3964b | ||
|
|
025d4df774 | ||
|
|
45086a4b6e | ||
|
|
2db0023653 | ||
|
|
bfc21220a7 | ||
|
|
507491fbec | ||
|
|
c890ef6917 | ||
|
|
6756fb4fe7 | ||
|
|
6c089c0a78 | ||
|
|
b2ab3f987c | ||
|
|
c99b2edf98 | ||
|
|
e052610184 | ||
|
|
13f1725105 | ||
|
|
f2367932e1 | ||
|
|
7e94ec986e | ||
|
|
7bda3e6994 | ||
|
|
3a18606385 | ||
|
|
e25a94e815 | ||
|
|
c13f662e2d | ||
|
|
97ee085f30 | ||
|
|
1364fd5c45 | ||
|
|
cc3186a683 | ||
|
|
0c93c4754d | ||
|
|
7b4cfbeeaa | ||
|
|
8cebb53147 | ||
|
|
3e18f2f09c | ||
|
|
add09e52ef | ||
|
|
5429a509c6 | ||
|
|
3555fa36aa | ||
|
|
ee31519552 | ||
|
|
cd19d50e1d | ||
|
|
a5e4eea5ca | ||
|
|
c6a6270e16 | ||
|
|
18d9d2602a | ||
|
|
f7ec9f2073 | ||
|
|
4c5f66185d | ||
|
|
105c1893cb | ||
|
|
061cee207f | ||
|
|
af4a925b54 | ||
|
|
a6f3e87921 | ||
|
|
9764d9109f | ||
|
|
46dfa57ee0 | ||
|
|
7f436061b8 | ||
|
|
332f2b0678 | ||
|
|
39b6c5d6f4 | ||
|
|
903acff924 | ||
|
|
bd5a6e6fb3 | ||
|
|
9b89ede9c4 | ||
|
|
bf205de3a1 | ||
|
|
8165086d02 | ||
|
|
82f14b087a |
27
3rd-PARTY-LICENSES
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
jquery-confirm
|
||||||
|
==============
|
||||||
|
https://craftpip.github.io/jquery-confirm/
|
||||||
|
|
||||||
|
jquery-confirm is licensed under the MIT license:
|
||||||
|
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2019 Boniface Pereira
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
134
CHANGES.md
Normal file
@@ -0,0 +1,134 @@
|
|||||||
|
# What's new?
|
||||||
|
|
||||||
|
## v2.5
|
||||||
|
### Major Changes
|
||||||
|
- **Nearly twice as fast** - significantly faster speed of image generation. We're now pretty close to automatic1111's speed. Code contributions are welcome to make our project even faster: https://github.com/easydiffusion/sdkit/#is-it-fast
|
||||||
|
- **Full support for Stable Diffusion 2.1 (including CPU)** - supports loading v1.4 or v2.0 or v2.1 models seamlessly. No need to enable "Test SD2", and no need to add `sd2_` to your SD 2.0 model file names. Works on CPU as well.
|
||||||
|
- **Memory optimized Stable Diffusion 2.1** - you can now use Stable Diffusion 2.1 models, with the same low VRAM optimizations that we've always had for SD 1.4. Please note, the SD 2.0 and 2.1 models require more GPU and System RAM, as compared to the SD 1.4 and 1.5 models.
|
||||||
|
- **11 new samplers!** - explore the new samplers, some of which can generate great images in less than 10 inference steps! We've added the Karras and UniPC samplers.
|
||||||
|
- **Model Merging** - You can now merge two models (`.ckpt` or `.safetensors`) and output `.ckpt` or `.safetensors` models, optionally in `fp16` precision. Details: https://github.com/cmdr2/stable-diffusion-ui/wiki/Model-Merging
|
||||||
|
- **Fast loading/unloading of VAEs** - No longer needs to reload the entire Stable Diffusion model, each time you change the VAE
|
||||||
|
- **Database of known models** - automatically picks the right configuration for known models. E.g. we automatically detect and apply "v" parameterization (required for some SD 2.0 models), and "fp32" attention precision (required for some SD 2.1 models).
|
||||||
|
- **Color correction for img2img** - an option to preserve the color profile (histogram) of the initial image. This is especially useful if you're getting red-tinted images after inpainting/masking.
|
||||||
|
- **Three GPU Memory Usage Settings** - `High` (fastest, maximum VRAM usage), `Balanced` (default - almost as fast, significantly lower VRAM usage), `Low` (slowest, very low VRAM usage). The `Low` setting is applied automatically for GPUs with less than 4 GB of VRAM.
|
||||||
|
- **Find models in sub-folders** - This allows you to organize your models into sub-folders inside `models/stable-diffusion`, instead of keeping them all in a single folder.
|
||||||
|
- **Save metadata as JSON** - You can now save the metadata files as either text or json files (choose in the Settings tab).
|
||||||
|
- **Major rewrite of the code** - Most of the codebase has been reorganized and rewritten, to make it more manageable and easier for new developers to contribute features. We've separated our core engine into a new project called `sdkit`, which allows anyone to easily integrate Stable Diffusion (and related modules like GFPGAN etc) into their programming projects (via a simple `pip install sdkit`): https://github.com/easydiffusion/sdkit/
|
||||||
|
- **Name change** - Last, and probably the least, the UI is now called "Easy Diffusion". It indicates the focus of this project - an easy way for people to play with Stable Diffusion.
|
||||||
|
|
||||||
|
Our focus continues to remain on an easy installation experience, and an easy user-interface. While still remaining pretty powerful, in terms of features and speed.
|
||||||
|
|
||||||
|
### Detailed changelog
|
||||||
|
* 2.5.22 - 28 Feb 2023 - Minor styling changes to UI buttons, and the models dropdown.
|
||||||
|
* 2.5.22 - 28 Feb 2023 - Lots of UI-related bug fixes. Thanks @patriceac.
|
||||||
|
* 2.5.21 - 22 Feb 2023 - An option to control the size of the image thumbnails. You can use the `Display options` in the top-right corner to change this. Thanks @JeLuf.
|
||||||
|
* 2.5.20 - 20 Feb 2023 - Support saving images in WEBP format (which consumes less disk space, with similar quality). Thanks @ogmaresca.
|
||||||
|
* 2.5.20 - 18 Feb 2023 - A setting to block NSFW images from being generated. You can enable this setting in the Settings tab.
|
||||||
|
* 2.5.19 - 17 Feb 2023 - Initial support for server-side plugins. Currently supports overriding the `get_cond_and_uncond()` function.
|
||||||
|
* 2.5.18 - 17 Feb 2023 - 5 new samplers! UniPC samplers, some of which produce images in less than 15 steps. Thanks @Schorny.
|
||||||
|
* 2.5.16 - 13 Feb 2023 - Searchable dropdown for models. This is useful if you have a LOT of models. You can type part of the model name, to auto-search through your models. Thanks @patriceac for the feature, and @AssassinJN for help in UI tweaks!
|
||||||
|
* 2.5.16 - 13 Feb 2023 - Lots of fixes and improvements to the installer. First round of changes to add Mac support. Thanks @JeLuf.
|
||||||
|
* 2.5.16 - 13 Feb 2023 - UI bug fixes for the inpainter editor. Thanks @patriceac.
|
||||||
|
* 2.5.16 - 13 Feb 2023 - Fix broken task reorder. Thanks @JeLuf.
|
||||||
|
* 2.5.16 - 13 Feb 2023 - Remove a task if all the images inside it have been removed. Thanks @AssassinJN.
|
||||||
|
* 2.5.16 - 10 Feb 2023 - Embed metadata into the JPG/PNG images, if selected in the "Settings" tab (under "Metadata format"). Thanks @patriceac.
|
||||||
|
* 2.5.16 - 10 Feb 2023 - Sort models alphabetically in the models dropdown. Thanks @ogmaresca.
|
||||||
|
* 2.5.16 - 10 Feb 2023 - Support multiple GFPGAN models. Download new GFPGAN models into the `models/gfpgan` folder, and refresh the UI to use it. Thanks @JeLuf.
|
||||||
|
* 2.5.16 - 10 Feb 2023 - Allow a server to enforce a fixed directory path to save images. This is useful if the server is exposed to a lot of users. This can be set in the `config.json` file as `force_save_path: "/path/to/fixed/save/dir"`. E.g. `force_save_path: "D:/user_images"`. Thanks @JeLuf.
|
||||||
|
* 2.5.16 - 10 Feb 2023 - The "Make Images" button now shows the correct amount of images it'll create when using operators like `{}` or `|`. For e.g. if the prompt is `Photo of a {woman, man}`, then the button will say `Make 2 Images`. Thanks @JeLuf.
|
||||||
|
* 2.5.16 - 10 Feb 2023 - A bunch of UI-related bug fixes. Thanks @patriceac.
|
||||||
|
* 2.5.15 - 8 Feb 2023 - Allow using 'balanced' VRAM usage mode on GPUs with 4 GB or less of VRAM. This mode used to be called 'Turbo' in the previous version.
|
||||||
|
* 2.5.14 - 8 Feb 2023 - Fix broken auto-save settings. We renamed `sampler` to `sampler_name`, which caused old settings to fail.
|
||||||
|
* 2.5.14 - 6 Feb 2023 - Simplify the UI for merging models, and some other minor UI tweaks. Better error reporting if a model failed to load.
|
||||||
|
* 2.5.14 - 3 Feb 2023 - Fix the 'Make Similar Images' button, which was producing incorrect images (weren't very similar).
|
||||||
|
* 2.5.13 - 1 Feb 2023 - Fix the remaining GPU memory leaks, including a better fix (more comprehensive) for the change in 2.5.12 (27 Jan).
|
||||||
|
* 2.5.12 - 27 Jan 2023 - Fix a memory leak, which made the UI unresponsive after an out-of-memory error. The allocated memory is now freed-up after an error.
|
||||||
|
* 2.5.11 - 25 Jan 2023 - UI for Merging Models. Thanks @JeLuf. More info: https://github.com/cmdr2/stable-diffusion-ui/wiki/Model-Merging
|
||||||
|
* 2.5.10 - 24 Jan 2023 - Reduce the VRAM usage for img2img in 'balanced' mode (without reducing the rendering speed), to make it similar to v2.4 of this UI.
|
||||||
|
* 2.5.9 - 23 Jan 2023 - Fix a bug where img2img would produce poorer-quality images for the same settings, as compared to version 2.4 of this UI.
|
||||||
|
* 2.5.9 - 23 Jan 2023 - Reduce the VRAM usage for 'balanced' mode (without reducing the rendering speed), to make it similar to v2.4 of the UI.
|
||||||
|
* 2.5.8 - 17 Jan 2023 - Fix a bug where 'Low' VRAM usage would consume a LOT of VRAM (on higher-end GPUs). Also fixed a bug that caused out-of-memory errors on SD 2.1-768 models, on 'high' VRAM usage setting.
|
||||||
|
* 2.5.7 - 16 Jan 2023 - Fix a bug where VAE files ending with .vae.pt weren't getting displayed. Thanks Madrang, rbertus2000 and JeLuf.
|
||||||
|
* 2.5.6 - 10 Jan 2023 - `Fill` tool for the Image Editor, to allow filling areas with color (or the entire image). And some bug fixes to the Image Editor. Thanks @mdiller.
|
||||||
|
* 2.5.6 - 10 Jan 2023 - Find Stable Diffusion models in sub-folders inside `models/stable-diffusion`. This allows you to organize your models into sub-folders, instead of keeping them all in a single folder. Thanks @JeLuf.
|
||||||
|
* 2.5.5 - 9 Jan 2023 - Lots of bug fixes. Thanks @patriceac and @JeLuf.
|
||||||
|
* 2.5.4 - 29 Dec 2022 - Press Esc key on the keyboard to close the Image Editor. Thanks @patriceac.
|
||||||
|
* 2.5.4 - 29 Dec 2022 - Lots of bug fixes in the UI. Thanks @patriceac.
|
||||||
|
* 2.5.4 - 28 Dec 2022 - Full support for running tasks in parallel on multiple GPUs. Warning: 'Euler Ancestral', 'DPM2 Ancestral' and 'DPM++ 2s Ancestral' may produce slight variations in the image (if run in parallel), so we recommend using the other samplers.
|
||||||
|
* 2.5.3 - 27 Dec 2022 - Fix broken drag-and-drop for text metadata files (as well as paste in clipboard).
|
||||||
|
* 2.5.3 - 27 Dec 2022 - Allow upscaling by 2x as well as 4x.
|
||||||
|
* 2.5.3 - 27 Dec 2022 - Fix broken renders on a second GPU.
|
||||||
|
* 2.5.3 - 26 Dec 2022 - Add a `Remove` button on each image. Thanks @JeLuf.
|
||||||
|
* 2.5.2 - 26 Dec 2022 - Fix broken inpainting if using non-square target images.
|
||||||
|
* 2.5.2 - 26 Dec 2022 - Fix a bug where an incorrect model config would get used for some SD 2.1 models.
|
||||||
|
* 2.5.2 - 26 Dec 2022 - Slight performance and memory improvement while rendering using SD 2.1 models.
|
||||||
|
* 2.5.1 - 25 Dec 2022 - Allow custom config yaml files for models. You can put a config file (`.yaml`) next to the model file, with the same name as the model. For e.g. if you put `robo-diffusion-v2-base.yaml` next to `robo-diffusion-v2-base.ckpt`, it'll automatically use that config file.
|
||||||
|
* 2.5.1 - 25 Dec 2022 - Fix broken rendering for SD 2.1-768 models. Fix broken rendering SD 2.0 safetensor models.
|
||||||
|
* 2.5.0 - 25 Dec 2022 - Major new release! Nearly twice as fast, Full support for SD 2.1 (including low GPU RAM optimizations), 6 new samplers, Model Merging, Fast loading/unloading of VAEs, Database of known models, Color correction for img2img, Three GPU Memory Usage Settings, Save metadata as JSON, Major rewrite of the code, Name change.
|
||||||
|
|
||||||
|
## v2.4
|
||||||
|
### Major Changes
|
||||||
|
- **Allow reordering the task queue** (by dragging and dropping tasks). Thanks @madrang
|
||||||
|
- **Automatic scanning for malicious model files** - using `picklescan`, and support for `safetensor` model format. Thanks @JeLuf
|
||||||
|
- **Image Editor** - for drawing simple images for guiding the AI. Thanks @mdiller
|
||||||
|
- **Use pre-trained hypernetworks** - for improving the quality of images. Thanks @C0bra5
|
||||||
|
- **Support for custom VAE models**. You can place your VAE files in the `models/vae` folder, and refresh the browser page to use them. More info: https://github.com/cmdr2/stable-diffusion-ui/wiki/VAE-Variational-Auto-Encoder
|
||||||
|
- **Experimental support for multiple GPUs!** It should work automatically. Just open one browser tab per GPU, and spread your tasks across your GPUs. For e.g. open our UI in two browser tabs if you have two GPUs. You can customize which GPUs it should use in the "Settings" tab, otherwise let it automatically pick the best GPUs. Thanks @madrang . More info: https://github.com/cmdr2/stable-diffusion-ui/wiki/Run-on-Multiple-GPUs
|
||||||
|
- **Cleaner UI design** - Show settings and help in new tabs, instead of dropdown popups (which were buggy). Thanks @mdiller
|
||||||
|
- **Progress bar.** Thanks @mdiller
|
||||||
|
- **Custom Image Modifiers** - You can now save your custom image modifiers! Your saved modifiers can include special characters like `{}, (), [], |`
|
||||||
|
- Drag and Drop **text files generated from previously saved images**, and copy settings to clipboard. Thanks @madrang
|
||||||
|
- Paste settings from clipboard. Thanks @JeLuf
|
||||||
|
- Bug fixes to reduce the chances of tasks crashing during long multi-hour runs (chrome can put long-running background tabs to sleep). Thanks @JeLuf and @madrang
|
||||||
|
- **Improved documentation.** Thanks @JeLuf and @jsuelwald
|
||||||
|
- Improved the codebase for dealing with system settings and UI settings. Thanks @mdiller
|
||||||
|
- Help instructions next to some setttings, and in the tab
|
||||||
|
- Show system info in the settings tab
|
||||||
|
- Keyboard shortcut: Ctrl+Enter to start a task
|
||||||
|
- Configuration to prevent the browser from opening on startup
|
||||||
|
- Lots of minor bug fixes
|
||||||
|
- A `What's New?` tab in the UI
|
||||||
|
- Ask for a confimation before clearing the results pane or stopping a render task. The dialog can be skipped by holding down the shift key while clicking on the button.
|
||||||
|
- Show the network addresses of the server in the systems setting dialog
|
||||||
|
- Support loading models in the safetensor format, for improved safety
|
||||||
|
|
||||||
|
### Detailed changelog
|
||||||
|
* 2.4.24 - 9 Jan 2022 - Urgent fix for failures on old/long-term-support browsers. Thanks @JeLuf.
|
||||||
|
* 2.4.23/22 - 29 Dec 2022 - Allow rolling back from the upcoming v2.5 change (in beta).
|
||||||
|
* 2.4.21 - 23 Dec 2022 - Speed up image creation, by removing a delay (regression) of 4-5 seconds between clicking the `Make Image` button and calling the server.
|
||||||
|
* 2.4.20 - 22 Dec 2022 - `Pause All` button to pause all the pending tasks. Thanks @JeLuf
|
||||||
|
* 2.4.20 - 22 Dec 2022 - `Undo`/`Redo` buttons in the image editor. Thanks @JeLuf
|
||||||
|
* 2.4.20 - 22 Dec 2022 - Drag handle to reorder the tasks. This fixed a bug where the metadata was no longer selectable (for copying). Thanks @JeLuf
|
||||||
|
* 2.4.19 - 17 Dec 2022 - Add Undo/Redo buttons in the Image Editor. Thanks @JeLuf
|
||||||
|
* 2.4.19 - 10 Dec 2022 - Show init img in task list
|
||||||
|
* 2.4.19 - 7 Dec 2022 - Use pre-trained hypernetworks while generating images. Thanks @C0bra5
|
||||||
|
* 2.4.19 - 6 Dec 2022 - Allow processing new tasks first. Thanks @madrang
|
||||||
|
* 2.4.19 - 6 Dec 2022 - Allow reordering the task queue (by dragging tasks). Thanks @madrang
|
||||||
|
* 2.4.19 - 6 Dec 2022 - Re-organize the code, to make it easier to write user plugins. Thanks @madrang
|
||||||
|
* 2.4.18 - 5 Dec 2022 - Make JPEG Output quality user controllable. Thanks @JeLuf
|
||||||
|
* 2.4.18 - 5 Dec 2022 - Support loading models in the safetensor format, for improved safety. Thanks @JeLuf
|
||||||
|
* 2.4.18 - 1 Dec 2022 - Image Editor, for drawing simple images for guiding the AI. Thanks @mdiller
|
||||||
|
* 2.4.18 - 1 Dec 2022 - Disable an image modifier temporarily by right-clicking it. Thanks @patriceac
|
||||||
|
* 2.4.17 - 30 Nov 2022 - Scroll to generated image. Thanks @patriceac
|
||||||
|
* 2.4.17 - 30 Nov 2022 - Show the network addresses of the server in the systems setting dialog. Thanks @JeLuf
|
||||||
|
* 2.4.17 - 30 Nov 2022 - Fix a bug where GFPGAN wouldn't work properly when multiple GPUs tried to run it at the same time. Thanks @madrang
|
||||||
|
* 2.4.17 - 30 Nov 2022 - Confirm before stopping or clearing all the tasks. Thanks @JeLuf
|
||||||
|
* 2.4.16 - 29 Nov 2022 - Bug fixes for SD 2.0 - remove the need for patching, default to SD 1.4 model if trying to load an SD2 model in SD1.4.
|
||||||
|
* 2.4.15 - 25 Nov 2022 - Experimental support for SD 2.0. Uses lots of memory, not optimized, probably GPU-only.
|
||||||
|
* 2.4.14 - 22 Nov 2022 - Change the backend to a custom fork of Stable Diffusion
|
||||||
|
* 2.4.13 - 21 Nov 2022 - Change the modifier weight via mouse wheel, drag to reorder selected modifiers, and some more modifier-related fixes. Thanks @patriceac
|
||||||
|
* 2.4.12 - 21 Nov 2022 - Another fix for improving how long images take to generate. Reduces the time taken for an enqueued task to start processing.
|
||||||
|
* 2.4.11 - 21 Nov 2022 - Installer improvements: avoid crashing if the username contains a space or special characters, allow moving/renaming the folder after installation on Windows, whitespace fix on git apply
|
||||||
|
* 2.4.11 - 21 Nov 2022 - Validate inputs before submitting the Image request
|
||||||
|
* 2.4.11 - 19 Nov 2022 - New system settings to manage the network config (port number and whether to only listen on localhost)
|
||||||
|
* 2.4.11 - 19 Nov 2022 - Address a regression in how long images take to generate. Use the previous code for moving a model to CPU. This improves things by a second or two per image, but we still have a regression (investigating).
|
||||||
|
* 2.4.10 - 18 Nov 2022 - Textarea for negative prompts. Thanks @JeLuf
|
||||||
|
* 2.4.10 - 18 Nov 2022 - Improved design for Settings, and rounded toggle buttons instead of checkboxes for a more modern look. Thanks @mdiller
|
||||||
|
* 2.4.9 - 18 Nov 2022 - Add Picklescan - a scanner for malicious model files. If it finds a malicious file, it will halt the web application and alert the user. Thanks @JeLuf
|
||||||
|
* 2.4.8 - 18 Nov 2022 - A `Use these settings` button to use the settings from a previously generated image task. Thanks @patriceac
|
||||||
|
* 2.4.7 - 18 Nov 2022 - Don't crash if a VAE file fails to load
|
||||||
|
* 2.4.7 - 17 Nov 2022 - Fix a bug where Face Correction (GFPGAN) would fail on cuda:N (i.e. GPUs other than cuda:0), as well as fail on CPU if the system had an incompatible GPU.
|
||||||
|
* 2.4.6 - 16 Nov 2022 - Fix a regression in VRAM usage during startup, which caused 'Out of Memory' errors when starting on GPUs with 4gb (or less) VRAM
|
||||||
|
* 2.4.5 - 16 Nov 2022 - Add checkbox for "Open browser on startup".
|
||||||
|
* 2.4.5 - 16 Nov 2022 - Add a directory for core plugins that ship with Stable Diffusion UI by default.
|
||||||
|
* 2.4.5 - 16 Nov 2022 - Add a "What's New?" tab as a core plugin, which fetches the contents of CHANGES.md from the app's release branch.
|
||||||
@@ -6,7 +6,7 @@ Thanks
|
|||||||
|
|
||||||
# For developers:
|
# For developers:
|
||||||
|
|
||||||
If you would like to contribute to this project, there is a discord for dicussion:
|
If you would like to contribute to this project, there is a discord for discussion:
|
||||||
[](https://discord.com/invite/u9yhsFmEkB)
|
[](https://discord.com/invite/u9yhsFmEkB)
|
||||||
|
|
||||||
## Development environment for UI (frontend and server) changes
|
## Development environment for UI (frontend and server) changes
|
||||||
@@ -40,6 +40,7 @@ or for Windows
|
|||||||
`mklink /J \projects\stable-diffusion-ui-archive\ui \projects\stable-diffusion-ui-repo\ui` (link name first, source repo dir second)
|
`mklink /J \projects\stable-diffusion-ui-archive\ui \projects\stable-diffusion-ui-repo\ui` (link name first, source repo dir second)
|
||||||
9) Run the project again (like in step 2) and ensure you can still use the UI.
|
9) Run the project again (like in step 2) and ensure you can still use the UI.
|
||||||
10) Congrats, now any changes you make in your repo `ui` folder are linked to this running archive of the app and can be previewed in the browser.
|
10) Congrats, now any changes you make in your repo `ui` folder are linked to this running archive of the app and can be previewed in the browser.
|
||||||
|
11) Please update CHANGES.md in your pull requests.
|
||||||
|
|
||||||
Check the `ui/frontend/build/README.md` for instructions on running and building the React code.
|
Check the `ui/frontend/build/README.md` for instructions on running and building the React code.
|
||||||
|
|
||||||
|
|||||||
1
NSIS/README.md
Normal file
@@ -0,0 +1 @@
|
|||||||
|
Scripts to be used with the Nullsoft Scriptable Installation System
|
||||||
BIN
NSIS/astro.bmp
Normal file
|
After Width: | Height: | Size: 288 KiB |
BIN
NSIS/sd.ico
Normal file
|
After Width: | Height: | Size: 200 KiB |
265
NSIS/sdui.nsi
Normal file
@@ -0,0 +1,265 @@
|
|||||||
|
; Script generated by the HM NIS Edit Script Wizard.
|
||||||
|
|
||||||
|
Target x86-unicode
|
||||||
|
Unicode True
|
||||||
|
!AddPluginDir /x86-unicode "."
|
||||||
|
; HM NIS Edit Wizard helper defines
|
||||||
|
!define PRODUCT_NAME "Stable Diffusion UI"
|
||||||
|
!define PRODUCT_VERSION "Installer 2.35"
|
||||||
|
!define PRODUCT_PUBLISHER "cmdr2 and contributors"
|
||||||
|
!define PRODUCT_WEB_SITE "https://stable-diffusion-ui.github.io"
|
||||||
|
!define PRODUCT_DIR_REGKEY "Software\Microsoft\Cmdr2\App Paths\installer.exe"
|
||||||
|
|
||||||
|
; MUI 1.67 compatible ------
|
||||||
|
!include "MUI.nsh"
|
||||||
|
!include "LogicLib.nsh"
|
||||||
|
!include "nsDialogs.nsh"
|
||||||
|
|
||||||
|
Var Dialog
|
||||||
|
Var Label
|
||||||
|
Var Button
|
||||||
|
|
||||||
|
Var InstDirLen
|
||||||
|
Var LongPathsEnabled
|
||||||
|
Var AccountType
|
||||||
|
|
||||||
|
;---------------------------------------------------------------------------------------------------------
|
||||||
|
; This function returns the number of spaces in a string.
|
||||||
|
; The string is passed on the stack (using Push $STRING)
|
||||||
|
; The result is also returned on the stack and can be consumed with Pop $var
|
||||||
|
; https://nsis.sourceforge.io/Check_for_spaces_in_a_directory_path
|
||||||
|
Function CheckForSpaces
|
||||||
|
Exch $R0
|
||||||
|
Push $R1
|
||||||
|
Push $R2
|
||||||
|
Push $R3
|
||||||
|
StrCpy $R1 -1
|
||||||
|
StrCpy $R3 $R0
|
||||||
|
StrCpy $R0 0
|
||||||
|
loop:
|
||||||
|
StrCpy $R2 $R3 1 $R1
|
||||||
|
IntOp $R1 $R1 - 1
|
||||||
|
StrCmp $R2 "" done
|
||||||
|
StrCmp $R2 " " 0 loop
|
||||||
|
IntOp $R0 $R0 + 1
|
||||||
|
Goto loop
|
||||||
|
done:
|
||||||
|
Pop $R3
|
||||||
|
Pop $R2
|
||||||
|
Pop $R1
|
||||||
|
Exch $R0
|
||||||
|
FunctionEnd
|
||||||
|
|
||||||
|
;---------------------------------------------------------------------------------------------------------
|
||||||
|
; The function DirectoryLeave is called after the user chose the installation directory.
|
||||||
|
; If it calls "abort", the user is sent back to choose a different directory.
|
||||||
|
Function DirectoryLeave
|
||||||
|
; check whether the installation directory path is longer than 30 characters.
|
||||||
|
; If yes, we suggest to the user to enable long filename support
|
||||||
|
;----------------------------------------------------------------------------
|
||||||
|
StrLen $InstDirLen "$INSTDIR"
|
||||||
|
|
||||||
|
; Check whether the registry key that allows for >260 characters in a path name is set
|
||||||
|
ReadRegStr $LongPathsEnabled HKLM "SYSTEM\CurrentControlSet\Control\FileSystem" "LongPathsEnabled"
|
||||||
|
|
||||||
|
${If} $InstDirLen > 30
|
||||||
|
${AndIf} $LongPathsEnabled == "0"
|
||||||
|
; Check whether we're in the Admin group
|
||||||
|
UserInfo::GetAccountType
|
||||||
|
Pop $AccountType
|
||||||
|
|
||||||
|
${If} $AccountType == "Admin"
|
||||||
|
${AndIf} ${Cmd} `MessageBox MB_YESNO|MB_ICONQUESTION 'The path name is too long. $\n$\nYou can either enable long file name support in Windows,$\nor you can go back and choose a different path.$\n$\nFor details see: shorturl.at/auBD1$\n$\nEnable long path name support in Windows?' IDYES`
|
||||||
|
; Enable long path names
|
||||||
|
WriteRegDWORD HKLM "SYSTEM\CurrentControlSet\Control\FileSystem" "LongPathsEnabled" 1
|
||||||
|
${Else}
|
||||||
|
MessageBox MB_OK|MB_ICONEXCLAMATION "Installation path name too long. The installation path must not have more than 30 characters."
|
||||||
|
abort
|
||||||
|
${EndIf}
|
||||||
|
${EndIf}
|
||||||
|
|
||||||
|
; Check for spaces in the installation directory path.
|
||||||
|
; ----------------------------------------------------
|
||||||
|
|
||||||
|
; $R0 = CheckForSpaces( $INSTDIR )
|
||||||
|
Push $INSTDIR # Input string (install path).
|
||||||
|
Call CheckForSpaces
|
||||||
|
Pop $R0 # The function returns the number of spaces found in the input string.
|
||||||
|
|
||||||
|
; Check if any spaces exist in $INSTDIR.
|
||||||
|
${If} $R0 != 0
|
||||||
|
; Plural if more than 1 space in $INSTDIR.
|
||||||
|
; If $R0 == 1: $R1 = ""; else: $R1 = "s"
|
||||||
|
StrCmp $R0 1 0 +3
|
||||||
|
StrCpy $R1 ""
|
||||||
|
Goto +2
|
||||||
|
StrCpy $R1 "s"
|
||||||
|
|
||||||
|
; Show message box then take the user back to the Directory page.
|
||||||
|
MessageBox MB_OK|MB_ICONEXCLAMATION "Error: The Installaton directory \
|
||||||
|
has $R0 space character$R1.$\nPlease choose an installation directory without space characters."
|
||||||
|
Abort
|
||||||
|
${EndIf}
|
||||||
|
|
||||||
|
; Check for NTFS filesystem. Installations on FAT fail.
|
||||||
|
; -----------------------------------------------------
|
||||||
|
StrCpy $5 $INSTDIR 3
|
||||||
|
System::Call 'Kernel32::GetVolumeInformation(t "$5",t,i ${NSIS_MAX_STRLEN},*i,*i,*i,t.r1,i ${NSIS_MAX_STRLEN})i.r0'
|
||||||
|
${If} $0 <> 0
|
||||||
|
${AndIf} $1 == "NTFS"
|
||||||
|
MessageBox mb_ok "$5 has filesystem type '$1'.$\nOnly NTFS filesystems are supported.$\nPlease choose a different drive."
|
||||||
|
Abort
|
||||||
|
${EndIf}
|
||||||
|
|
||||||
|
FunctionEnd
|
||||||
|
|
||||||
|
|
||||||
|
;---------------------------------------------------------------------------------------------------------
|
||||||
|
; Open the MS download page in a browser and enable the [Next] button
|
||||||
|
Function MSMediaFeaturepack
|
||||||
|
ExecShell "open" "https://www.microsoft.com/en-us/software-download/mediafeaturepack"
|
||||||
|
|
||||||
|
GetDlgItem $0 $HWNDPARENT 1
|
||||||
|
EnableWindow $0 1
|
||||||
|
FunctionEnd
|
||||||
|
|
||||||
|
;---------------------------------------------------------------------------------------------------------
|
||||||
|
; Install the MS Media Feature Pack, if it is missing (e.g. on Windows 10 N)
|
||||||
|
Function MediaPackDialog
|
||||||
|
!insertmacro MUI_HEADER_TEXT "Windows Media Feature Pack" "Required software module is missing"
|
||||||
|
|
||||||
|
; Skip this dialog if mf.dll is installed
|
||||||
|
${If} ${FileExists} "$WINDIR\system32\mf.dll"
|
||||||
|
Abort
|
||||||
|
${EndIf}
|
||||||
|
|
||||||
|
nsDialogs::Create 1018
|
||||||
|
Pop $Dialog
|
||||||
|
|
||||||
|
${If} $Dialog == error
|
||||||
|
Abort
|
||||||
|
${EndIf}
|
||||||
|
|
||||||
|
${NSD_CreateLabel} 0 0 100% 48u "The Windows Media Feature Pack is missing on this computer. It is required for the Stable Diffusion UI.$\nYou can continue the installation after installing the Windows Media Feature Pack."
|
||||||
|
Pop $Label
|
||||||
|
|
||||||
|
${NSD_CreateButton} 10% 49u 80% 12u "Download Meda Feature Pack from Microsoft"
|
||||||
|
Pop $Button
|
||||||
|
|
||||||
|
GetFunctionAddress $0 MSMediaFeaturePack
|
||||||
|
nsDialogs::OnClick $Button $0
|
||||||
|
GetDlgItem $0 $HWNDPARENT 1
|
||||||
|
EnableWindow $0 0
|
||||||
|
nsDialogs::Show
|
||||||
|
FunctionEnd
|
||||||
|
|
||||||
|
;---------------------------------------------------------------------------------------------------------
|
||||||
|
; MUI Settings
|
||||||
|
;---------------------------------------------------------------------------------------------------------
|
||||||
|
!define MUI_ABORTWARNING
|
||||||
|
!define MUI_ICON "sd.ico"
|
||||||
|
|
||||||
|
!define MUI_WELCOMEFINISHPAGE_BITMAP "astro.bmp"
|
||||||
|
|
||||||
|
; Welcome page
|
||||||
|
!define MUI_WELCOMEPAGE_TEXT "This installer will guide you through the installation of Stable Diffusion UI.$\n$\n\
|
||||||
|
Click Next to continue."
|
||||||
|
!insertmacro MUI_PAGE_WELCOME
|
||||||
|
Page custom MediaPackDialog
|
||||||
|
|
||||||
|
; License page
|
||||||
|
!insertmacro MUI_PAGE_LICENSE "..\LICENSE"
|
||||||
|
!insertmacro MUI_PAGE_LICENSE "..\CreativeML Open RAIL-M License"
|
||||||
|
; Directory page
|
||||||
|
!define MUI_PAGE_CUSTOMFUNCTION_LEAVE "DirectoryLeave"
|
||||||
|
!insertmacro MUI_PAGE_DIRECTORY
|
||||||
|
|
||||||
|
; Instfiles page
|
||||||
|
!insertmacro MUI_PAGE_INSTFILES
|
||||||
|
|
||||||
|
; Finish page
|
||||||
|
!define MUI_FINISHPAGE_RUN "$INSTDIR\Start Stable Diffusion UI.cmd"
|
||||||
|
!insertmacro MUI_PAGE_FINISH
|
||||||
|
|
||||||
|
; Language files
|
||||||
|
!insertmacro MUI_LANGUAGE "English"
|
||||||
|
;---------------------------------------------------------------------------------------------------------
|
||||||
|
; MUI end
|
||||||
|
;---------------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
Name "${PRODUCT_NAME} ${PRODUCT_VERSION}"
|
||||||
|
OutFile "Install Stable Diffusion UI.exe"
|
||||||
|
InstallDir "C:\Stable-Diffusion-UI\"
|
||||||
|
InstallDirRegKey HKLM "${PRODUCT_DIR_REGKEY}" ""
|
||||||
|
ShowInstDetails show
|
||||||
|
|
||||||
|
;---------------------------------------------------------------------------------------------------------
|
||||||
|
; List of files to be installed
|
||||||
|
Section "MainSection" SEC01
|
||||||
|
SetOutPath "$INSTDIR"
|
||||||
|
File "..\CreativeML Open RAIL-M License"
|
||||||
|
File "..\How to install and run.txt"
|
||||||
|
File "..\LICENSE"
|
||||||
|
File "..\Start Stable Diffusion UI.cmd"
|
||||||
|
SetOutPath "$INSTDIR\scripts"
|
||||||
|
File "..\scripts\bootstrap.bat"
|
||||||
|
File "..\scripts\install_status.txt"
|
||||||
|
File "..\scripts\on_env_start.bat"
|
||||||
|
File "C:\windows\system32\curl.exe"
|
||||||
|
CreateDirectory "$INSTDIR\profile"
|
||||||
|
CreateDirectory "$SMPROGRAMS\Stable Diffusion UI"
|
||||||
|
CreateShortCut "$SMPROGRAMS\Stable Diffusion UI\Start Stable Diffusion UI.lnk" "$INSTDIR\Start Stable Diffusion UI.cmd"
|
||||||
|
SectionEnd
|
||||||
|
|
||||||
|
;---------------------------------------------------------------------------------------------------------
|
||||||
|
; Our installer only needs 25 KB, but once it has run, we need 25 GB
|
||||||
|
; So we need to overwrite the automatically detected space requirements.
|
||||||
|
; https://nsis.sourceforge.io/Docs/Chapter4.html#4.9.13.7
|
||||||
|
; The example in section 4.9.13.7 seems to be wrong: the number
|
||||||
|
; needs to be provided in Kilobytes.
|
||||||
|
Function .onInit
|
||||||
|
; Set required size of section 'SEC01' to 25 Gigabytes
|
||||||
|
SectionSetSize ${SEC01} 26214400
|
||||||
|
|
||||||
|
|
||||||
|
; Check system meory size. We need at least 8GB
|
||||||
|
; ----------------------------------------------------
|
||||||
|
|
||||||
|
; allocate a few bytes of memory
|
||||||
|
System::Alloc 64
|
||||||
|
Pop $1
|
||||||
|
|
||||||
|
; Retrieve HW info from the Windows Kernel
|
||||||
|
System::Call "*$1(i64)"
|
||||||
|
System::Call "Kernel32::GlobalMemoryStatusEx(i r1)"
|
||||||
|
; unpack the data into $R2 - $R10
|
||||||
|
System::Call "*$1(i.r2, i.r3, l.r4, l.r5, l.r6, l.r7, l.r8, l.r9, l.r10)"
|
||||||
|
|
||||||
|
# free up the memory
|
||||||
|
System::Free $1
|
||||||
|
|
||||||
|
; Result mapping:
|
||||||
|
; "Structure size: $2 bytes"
|
||||||
|
; "Memory load: $3%"
|
||||||
|
; "Total physical memory: $4 bytes"
|
||||||
|
; "Free physical memory: $5 bytes"
|
||||||
|
; "Total page file: $6 bytes"
|
||||||
|
; "Free page file: $7 bytes"
|
||||||
|
; "Total virtual: $8 bytes"
|
||||||
|
; "Free virtual: $9 bytes"
|
||||||
|
|
||||||
|
; Mem size in MB
|
||||||
|
System::Int64Op $4 / 1048576
|
||||||
|
Pop $4
|
||||||
|
|
||||||
|
${If} $4 < "8000"
|
||||||
|
MessageBox MB_OK|MB_ICONEXCLAMATION "Warning!$\n$\nYour system has less than 8GB of memory (RAM).$\n$\n\
|
||||||
|
You can still try to install Stable Diffusion UI,$\nbut it might have problems to start, or run$\nvery slowly."
|
||||||
|
${EndIf}
|
||||||
|
|
||||||
|
FunctionEnd
|
||||||
|
|
||||||
|
|
||||||
|
;Section -Post
|
||||||
|
; WriteRegStr HKLM "${PRODUCT_DIR_REGKEY}" "" "$INSTDIR\installer.exe"
|
||||||
|
;SectionEnd
|
||||||
151
README.md
@@ -1,67 +1,114 @@
|
|||||||
# Stable Diffusion UI
|
# Easy Diffusion 2.5
|
||||||
### Easiest way to install and use [Stable Diffusion](https://github.com/CompVis/stable-diffusion) on your own computer. No dependencies or technical knowledge required. 1-click install, powerful features.
|
### The easiest way to install and use [Stable Diffusion](https://github.com/CompVis/stable-diffusion) on your own computer.
|
||||||
|
|
||||||
[](https://discord.com/invite/u9yhsFmEkB) (for support, and development discussion) | [Troubleshooting guide for common problems](Troubleshooting.md)
|
Does not require technical knowledge, does not require pre-installed software. 1-click install, powerful features, friendly community.
|
||||||
|
|
||||||
----
|
[Installation guide](#step-1-download-and-extract-the-installer) | [Troubleshooting guide](https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting) | <sub>[](https://discord.com/invite/u9yhsFmEkB)</sub> <sup>(for support queries, and development discussions)</sup>
|
||||||
|
|
||||||
## Step 1: Download the installer
|

|
||||||
|
|
||||||
|
# Step 1: Download and extract the installer
|
||||||
|
Click the download button for your operating system:
|
||||||
|
|
||||||
<p float="left">
|
<p float="left">
|
||||||
<a href="#installation"><img src="https://github.com/cmdr2/stable-diffusion-ui/raw/develop/media/download-win.png" width="200" /></a>
|
<a href="https://github.com/cmdr2/stable-diffusion-ui/releases/download/v2.5.15/stable-diffusion-ui-windows.zip"><img src="https://github.com/cmdr2/stable-diffusion-ui/raw/main/media/download-win.png" width="200" /></a>
|
||||||
<a href="#installation"><img src="https://github.com/cmdr2/stable-diffusion-ui/raw/develop/media/download-linux.png" width="200" /></a>
|
<a href="https://github.com/cmdr2/stable-diffusion-ui/releases/download/v2.5.15/stable-diffusion-ui-linux.zip"><img src="https://github.com/cmdr2/stable-diffusion-ui/raw/main/media/download-linux.png" width="200" /></a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
## Step 2: Run the program
|
## On Windows:
|
||||||
- On Windows: Double-click `Start Stable Diffusion UI.cmd`
|
1. Unzip/extract the folder `stable-diffusion-ui` which should be in your downloads folder, unless you changed your default downloads destination.
|
||||||
- On Linux: Run `./start.sh` in a terminal
|
2. Move the `stable-diffusion-ui` folder to your `C:` drive (or any other drive like `D:`, at the top root level). `C:\stable-diffusion-ui` or `D:\stable-diffusion-ui` as examples. This will avoid a common problem with Windows (file path length limits).
|
||||||
|
## On Linux:
|
||||||
|
1. Unzip/extract the folder `stable-diffusion-ui` which should be in your downloads folder, unless you changed your default downloads destination.
|
||||||
|
2. Open a terminal window, and navigate to the `stable-diffusion-ui` directory.
|
||||||
|
|
||||||
## Step 3: There is no step 3!
|
# Step 2: Run the program
|
||||||
It's simple to get started. You don't need to install or struggle with Python, Anaconda, Docker etc.
|
## On Windows:
|
||||||
|
Double-click `Start Stable Diffusion UI.cmd`.
|
||||||
|
If Windows SmartScreen prevents you from running the program click `More info` and then `Run anyway`.
|
||||||
|
## On Linux:
|
||||||
|
Run `./start.sh` (or `bash start.sh`) in a terminal.
|
||||||
|
|
||||||
The installer will take care of whatever is needed. A friendly [Discord community](https://discord.com/invite/u9yhsFmEkB) will help you if you face any problems.
|
The installer will take care of whatever is needed. If you face any problems, you can join the friendly [Discord community](https://discord.com/invite/u9yhsFmEkB) and ask for assistance.
|
||||||
|
|
||||||
|
# Step 3: There is no Step 3. It's that simple!
|
||||||
|
|
||||||
|
**To Uninstall:** Just delete the `stable-diffusion-ui` folder to uninstall all the downloaded packages.
|
||||||
|
|
||||||
----
|
----
|
||||||
|
|
||||||
# Easy for new users, powerful features for advanced users
|
# Easy for new users, powerful features for advanced users
|
||||||
### Features:
|
## Features:
|
||||||
- **No Dependencies or Technical Knowledge Required**: 1-click install for Windows 10/11 and Linux. *No dependencies*, no need for WSL or Docker or Conda or technical setup. Just download and run!
|
|
||||||
- **Clutter-free UI**: a friendly and simple UI, while providing a lot of powerful features
|
### User experience
|
||||||
- Supports "*Text to Image*" and "*Image to Image*"
|
- **Hassle-free installation**: Does not require technical knowledge, does not require pre-installed software. Just download and run!
|
||||||
- **Custom Models**: Use your own `.ckpt` file, by placing it inside the `models/stable-diffusion` folder!
|
- **Clutter-free UI**: A friendly and simple UI, while providing a lot of powerful features.
|
||||||
- **Live Preview**: See the image as the AI is drawing it
|
- **Task Queue**: Queue up all your ideas, without waiting for the current task to finish.
|
||||||
- **Task Queue**: Queue up all your ideas, without waiting for the current task to finish
|
- **Intelligent Model Detection**: Automatically figures out the YAML config file to use for the chosen model (via a models database).
|
||||||
- **In-Painting**: Specify areas of your image to paint into
|
- **Live Preview**: See the image as the AI is drawing it.
|
||||||
- **Face Correction (GFPGAN) and Upscaling (RealESRGAN)**
|
|
||||||
- **Image Modifiers**: A library of *modifier tags* like *"Realistic"*, *"Pencil Sketch"*, *"ArtStation"* etc. Experiment with various styles quickly.
|
- **Image Modifiers**: A library of *modifier tags* like *"Realistic"*, *"Pencil Sketch"*, *"ArtStation"* etc. Experiment with various styles quickly.
|
||||||
- **Loopback**: Use the output image as the input image for the next img2img task
|
- **Multiple Prompts File**: Queue multiple prompts by entering one prompt per line, or by running a text file.
|
||||||
|
- **Save generated images to disk**: Save your images to your PC!
|
||||||
|
- **UI Themes**: Customize the program to your liking.
|
||||||
|
- **Searchable models dropdown**: organize your models into sub-folders, and search through them in the UI.
|
||||||
|
|
||||||
|
### Image generation
|
||||||
|
- **Supports**: "*Text to Image*" and "*Image to Image*".
|
||||||
|
- **19 Samplers**: `ddim`, `plms`, `heun`, `euler`, `euler_a`, `dpm2`, `dpm2_a`, `lms`, `dpm_solver_stability`, `dpmpp_2s_a`, `dpmpp_2m`, `dpmpp_sde`, `dpm_fast`, `dpm_adaptive`, `unipc_snr`, `unipc_tu`, `unipc_tq`, `unipc_snr_2`, `unipc_tu_2`.
|
||||||
|
- **In-Painting**: Specify areas of your image to paint into.
|
||||||
|
- **Simple Drawing Tool**: Draw basic images to guide the AI, without needing an external drawing program.
|
||||||
|
- **Face Correction (GFPGAN)**
|
||||||
|
- **Upscaling (RealESRGAN)**
|
||||||
|
- **Loopback**: Use the output image as the input image for the next img2img task.
|
||||||
- **Negative Prompt**: Specify aspects of the image to *remove*.
|
- **Negative Prompt**: Specify aspects of the image to *remove*.
|
||||||
- **Attention/Emphasis:** () in the prompt increases the model's attention to enclosed words, and [] decreases it
|
- **Attention/Emphasis**: () in the prompt increases the model's attention to enclosed words, and [] decreases it.
|
||||||
- **Weighted Prompts:** Use weights for specific words in your prompt to change their importance, e.g. `red:2.4 dragon:1.2`
|
- **Weighted Prompts**: Use weights for specific words in your prompt to change their importance, e.g. `red:2.4 dragon:1.2`.
|
||||||
- **Prompt Matrix:** (in beta) Quickly create multiple variations of your prompt, e.g. `a photograph of an astronaut riding a horse | illustration | cinematic lighting`
|
- **Prompt Matrix**: Quickly create multiple variations of your prompt, e.g. `a photograph of an astronaut riding a horse | illustration | cinematic lighting`.
|
||||||
- **Lots of Samplers:** ddim, plms, heun, euler, euler_a, dpm2, dpm2_a, lms
|
- **1-click Upscale/Face Correction**: Upscale or correct an image after it has been generated.
|
||||||
- **Multiple Prompts File:** Queue multiple prompts by entering one prompt per line, or by running a text file
|
- **Make Similar Images**: Click to generate multiple variations of a generated image.
|
||||||
- **NSFW Setting**: A setting in the UI to control *NSFW content*
|
- **NSFW Setting**: A setting in the UI to control *NSFW content*.
|
||||||
- **JPEG/PNG output**
|
- **JPEG/PNG/WEBP output**: Multiple file formats.
|
||||||
- **Save generated images to disk**
|
|
||||||
|
### Advanced features
|
||||||
|
- **Custom Models**: Use your own `.ckpt` or `.safetensors` file, by placing it inside the `models/stable-diffusion` folder!
|
||||||
|
- **Stable Diffusion 2.1 support**
|
||||||
|
- **Merge Models**
|
||||||
|
- **Use custom VAE models**
|
||||||
|
- **Use pre-trained Hypernetworks**
|
||||||
|
- **Use custom GFPGAN models**
|
||||||
|
- **UI Plugins**: Choose from a growing list of [community-generated UI plugins](https://github.com/cmdr2/stable-diffusion-ui/wiki/UI-Plugins), or write your own plugin to add features to the project!
|
||||||
|
|
||||||
|
### Performance and security
|
||||||
|
- **Fast**: Creates a 512x512 image with euler_a in 5 seconds, on an NVIDIA 3060 12GB.
|
||||||
|
- **Low Memory Usage**: Create 512x512 images with less than 3 GB of GPU RAM, and 768x768 images with less than 4 GB of GPU RAM!
|
||||||
- **Use CPU setting**: If you don't have a compatible graphics card, but still want to run it on your CPU.
|
- **Use CPU setting**: If you don't have a compatible graphics card, but still want to run it on your CPU.
|
||||||
|
- **Multi-GPU support**: Automatically spreads your tasks across multiple GPUs (if available), for faster performance!
|
||||||
|
- **Auto scan for malicious models**: Uses picklescan to prevent malicious models.
|
||||||
|
- **Safetensors support**: Support loading models in the safetensor format, for improved safety.
|
||||||
- **Auto-updater**: Gets you the latest improvements and bug-fixes to a rapidly evolving project.
|
- **Auto-updater**: Gets you the latest improvements and bug-fixes to a rapidly evolving project.
|
||||||
- **Low Memory Usage**: Creates 512x512 images with less than 4GB of VRAM!
|
|
||||||
- **Developer Console**: A developer-mode for those who want to modify their Stable Diffusion code, and edit the conda environment.
|
- **Developer Console**: A developer-mode for those who want to modify their Stable Diffusion code, and edit the conda environment.
|
||||||
|
|
||||||
### Easy for new users:
|
**(and a lot more)**
|
||||||

|
|
||||||
|
|
||||||
### Powerful features for advanced users:
|
----
|
||||||

|
|
||||||
|
|
||||||
### Live Preview
|
## Easy for new users:
|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
|
## Powerful features for advanced users:
|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
|
## Live Preview
|
||||||
Useful for judging (and stopping) an image quickly, without waiting for it to finish rendering.
|
Useful for judging (and stopping) an image quickly, without waiting for it to finish rendering.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### Task Queue
|
## Task Queue
|
||||||

|

|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# System Requirements
|
# System Requirements
|
||||||
1. Windows 10/11, or Linux. Experimental support for Mac is coming soon.
|
1. Windows 10/11, or Linux. Experimental support for Mac is coming soon.
|
||||||
@@ -70,23 +117,10 @@ Useful for judging (and stopping) an image quickly, without waiting for it to fi
|
|||||||
|
|
||||||
You don't need to install or struggle with Python, Anaconda, Docker etc. The installer will take care of whatever is needed.
|
You don't need to install or struggle with Python, Anaconda, Docker etc. The installer will take care of whatever is needed.
|
||||||
|
|
||||||
# Installation
|
----
|
||||||
1. **Download** [for Windows](https://github.com/cmdr2/stable-diffusion-ui/releases/download/v2.3.5/stable-diffusion-ui-windows.zip) or [for Linux](https://github.com/cmdr2/stable-diffusion-ui/releases/download/v2.3.5/stable-diffusion-ui-linux.zip).
|
|
||||||
|
|
||||||
2. **Extract**:
|
|
||||||
- For Windows: After unzipping the file, please move the `stable-diffusion-ui` folder to your `C:` (or any drive like D:, at the top root level), e.g. `C:\stable-diffusion-ui`. This will avoid a common problem with Windows (file path length limits).
|
|
||||||
- For Linux: After extracting the .tar.xz file, please open a terminal, and go to the `stable-diffusion-ui` directory.
|
|
||||||
|
|
||||||
3. **Run**:
|
|
||||||
- For Windows: `Start Stable Diffusion UI.cmd` by double-clicking it.
|
|
||||||
- For Linux: In the terminal, run `./start.sh` (or `bash start.sh`)
|
|
||||||
|
|
||||||
This will automatically install Stable Diffusion, set it up, and start the interface. No additional steps are needed.
|
|
||||||
|
|
||||||
**To Uninstall:** Just delete the `stable-diffusion-ui` folder to uninstall all the downloaded packages.
|
|
||||||
|
|
||||||
# How to use?
|
# How to use?
|
||||||
Please use our [guide](https://github.com/cmdr2/stable-diffusion-ui/wiki/How-to-Use) to understand how to use the features in this UI.
|
Please refer to our [guide](https://github.com/cmdr2/stable-diffusion-ui/wiki/How-to-Use) to understand how to use the features in this UI.
|
||||||
|
|
||||||
# Bugs reports and code contributions welcome
|
# Bugs reports and code contributions welcome
|
||||||
If there are any problems or suggestions, please feel free to ask on the [discord server](https://discord.com/invite/u9yhsFmEkB) or [file an issue](https://github.com/cmdr2/stable-diffusion-ui/issues).
|
If there are any problems or suggestions, please feel free to ask on the [discord server](https://discord.com/invite/u9yhsFmEkB) or [file an issue](https://github.com/cmdr2/stable-diffusion-ui/issues).
|
||||||
@@ -102,4 +136,11 @@ If you have any code contributions in mind, please feel free to say Hi to us on
|
|||||||
# Disclaimer
|
# Disclaimer
|
||||||
The authors of this project are not responsible for any content generated using this interface.
|
The authors of this project are not responsible for any content generated using this interface.
|
||||||
|
|
||||||
The license of this software forbids you from sharing any content that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, spread misinformation, or target vulnerable groups. For the full list of restrictions please read [the license](LICENSE). You agree to these terms by using this software.
|
The license of this software forbids you from sharing any content that:
|
||||||
|
- Violates any laws.
|
||||||
|
- Produces any harm to a person or persons.
|
||||||
|
- Disseminates (spreads) any personal information that would be meant for harm.
|
||||||
|
- Spreads misinformation.
|
||||||
|
- Target vulnerable groups.
|
||||||
|
|
||||||
|
For the full list of restrictions please read [the License](LICENSE). You agree to these terms by using this software.
|
||||||
|
|||||||
@@ -1 +0,0 @@
|
|||||||
Moved to https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting
|
|
||||||
@@ -23,12 +23,21 @@ call conda --version
|
|||||||
|
|
||||||
echo.
|
echo.
|
||||||
|
|
||||||
@rem activate the environment
|
@rem activate the legacy environment (if present) and set PYTHONPATH
|
||||||
call conda activate .\stable-diffusion\env
|
if exist "installer_files\env" (
|
||||||
|
set PYTHONPATH=%cd%\installer_files\env\lib\site-packages
|
||||||
|
)
|
||||||
|
if exist "stable-diffusion\env" (
|
||||||
|
call conda activate .\stable-diffusion\env
|
||||||
|
set PYTHONPATH=%cd%\stable-diffusion\env\lib\site-packages
|
||||||
|
)
|
||||||
|
|
||||||
call where python
|
call where python
|
||||||
call python --version
|
call python --version
|
||||||
|
|
||||||
|
echo PYTHONPATH=%PYTHONPATH%
|
||||||
|
|
||||||
|
@rem done
|
||||||
echo.
|
echo.
|
||||||
|
|
||||||
cmd /k
|
cmd /k
|
||||||
|
|||||||
@@ -1,8 +1,27 @@
|
|||||||
@echo off
|
@echo off
|
||||||
|
|
||||||
cd /d %~dp0
|
cd /d %~dp0
|
||||||
|
echo Install dir: %~dp0
|
||||||
|
|
||||||
set PATH=C:\Windows\System32;%PATH%
|
set PATH=C:\Windows\System32;%PATH%
|
||||||
|
|
||||||
|
if exist "on_sd_start.bat" (
|
||||||
|
echo ================================================================================
|
||||||
|
echo.
|
||||||
|
echo !!!! WARNING !!!!
|
||||||
|
echo.
|
||||||
|
echo It looks like you're trying to run the installation script from a source code
|
||||||
|
echo download. This will not work.
|
||||||
|
echo.
|
||||||
|
echo Recommended: Please close this window and download the installer from
|
||||||
|
echo https://stable-diffusion-ui.github.io/docs/installation/
|
||||||
|
echo.
|
||||||
|
echo ================================================================================
|
||||||
|
echo.
|
||||||
|
pause
|
||||||
|
exit /b
|
||||||
|
)
|
||||||
|
|
||||||
@rem set legacy installer's PATH, if it exists
|
@rem set legacy installer's PATH, if it exists
|
||||||
if exist "installer" set PATH=%cd%\installer;%cd%\installer\Library\bin;%cd%\installer\Scripts;%cd%\installer\Library\usr\bin;%PATH%
|
if exist "installer" set PATH=%cd%\installer;%cd%\installer\Library\bin;%cd%\installer\Scripts;%cd%\installer\Library\usr\bin;%PATH%
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
@echo off
|
@echo off
|
||||||
|
setlocal enabledelayedexpansion
|
||||||
|
|
||||||
@rem This script will install git and conda (if not found on the PATH variable)
|
@rem This script will install git and conda (if not found on the PATH variable)
|
||||||
@rem using micromamba (an 8mb static-linked single-file binary, conda replacement).
|
@rem using micromamba (an 8mb static-linked single-file binary, conda replacement).
|
||||||
@@ -24,14 +25,14 @@ if exist "%INSTALL_ENV_DIR%" set PATH=%INSTALL_ENV_DIR%;%INSTALL_ENV_DIR%\Librar
|
|||||||
set PACKAGES_TO_INSTALL=
|
set PACKAGES_TO_INSTALL=
|
||||||
|
|
||||||
if not exist "%LEGACY_INSTALL_ENV_DIR%\etc\profile.d\conda.sh" (
|
if not exist "%LEGACY_INSTALL_ENV_DIR%\etc\profile.d\conda.sh" (
|
||||||
if not exist "%INSTALL_ENV_DIR%\etc\profile.d\conda.sh" set PACKAGES_TO_INSTALL=%PACKAGES_TO_INSTALL% conda
|
if not exist "%INSTALL_ENV_DIR%\etc\profile.d\conda.sh" set PACKAGES_TO_INSTALL=%PACKAGES_TO_INSTALL% conda python=3.8.5
|
||||||
)
|
)
|
||||||
|
|
||||||
call git --version >.tmp1 2>.tmp2
|
call git --version >.tmp1 2>.tmp2
|
||||||
if "%ERRORLEVEL%" NEQ "0" set PACKAGES_TO_INSTALL=%PACKAGES_TO_INSTALL% git
|
if "!ERRORLEVEL!" NEQ "0" set PACKAGES_TO_INSTALL=%PACKAGES_TO_INSTALL% git
|
||||||
|
|
||||||
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" --version >.tmp1 2>.tmp2
|
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" --version >.tmp1 2>.tmp2
|
||||||
if "%ERRORLEVEL%" EQU "0" set umamba_exists=T
|
if "!ERRORLEVEL!" EQU "0" set umamba_exists=T
|
||||||
|
|
||||||
@rem (if necessary) install git and conda into a contained environment
|
@rem (if necessary) install git and conda into a contained environment
|
||||||
if "%PACKAGES_TO_INSTALL%" NEQ "" (
|
if "%PACKAGES_TO_INSTALL%" NEQ "" (
|
||||||
@@ -42,11 +43,11 @@ if "%PACKAGES_TO_INSTALL%" NEQ "" (
|
|||||||
mkdir "%MAMBA_ROOT_PREFIX%"
|
mkdir "%MAMBA_ROOT_PREFIX%"
|
||||||
call curl -Lk "%MICROMAMBA_DOWNLOAD_URL%" > "%MAMBA_ROOT_PREFIX%\micromamba.exe"
|
call curl -Lk "%MICROMAMBA_DOWNLOAD_URL%" > "%MAMBA_ROOT_PREFIX%\micromamba.exe"
|
||||||
|
|
||||||
@REM if "%ERRORLEVEL%" NEQ "0" (
|
if "!ERRORLEVEL!" NEQ "0" (
|
||||||
@REM echo "There was a problem downloading micromamba. Cannot continue."
|
echo "There was a problem downloading micromamba. Cannot continue."
|
||||||
@REM pause
|
pause
|
||||||
@REM exit /b
|
exit /b
|
||||||
@REM )
|
)
|
||||||
|
|
||||||
mkdir "%APPDATA%"
|
mkdir "%APPDATA%"
|
||||||
mkdir "%USERPROFILE%"
|
mkdir "%USERPROFILE%"
|
||||||
|
|||||||
@@ -21,9 +21,19 @@ OS_ARCH=$(uname -m)
|
|||||||
case "${OS_ARCH}" in
|
case "${OS_ARCH}" in
|
||||||
x86_64*) OS_ARCH="64";;
|
x86_64*) OS_ARCH="64";;
|
||||||
arm64*) OS_ARCH="arm64";;
|
arm64*) OS_ARCH="arm64";;
|
||||||
|
aarch64*) OS_ARCH="arm64";;
|
||||||
*) echo "Unknown system architecture: $OS_ARCH! This script runs only on x86_64 or arm64" && exit
|
*) echo "Unknown system architecture: $OS_ARCH! This script runs only on x86_64 or arm64" && exit
|
||||||
esac
|
esac
|
||||||
|
|
||||||
|
if ! which curl; then fail "'curl' not found. Please install curl."; fi
|
||||||
|
if ! which tar; then fail "'tar' not found. Please install tar."; fi
|
||||||
|
if ! which bzip2; then fail "'bzip2' not found. Please install bzip2."; fi
|
||||||
|
|
||||||
|
if pwd | grep ' '; then fail "The installation directory's path contains a space character. Conda will fail to install. Please change the directory."; fi
|
||||||
|
if [ -f /proc/cpuinfo ]; then
|
||||||
|
if ! cat /proc/cpuinfo | grep avx | uniq; then fail "Your CPU doesn't support AVX."; fi
|
||||||
|
fi
|
||||||
|
|
||||||
# https://mamba.readthedocs.io/en/latest/installation.html
|
# https://mamba.readthedocs.io/en/latest/installation.html
|
||||||
if [ "$OS_NAME" == "linux" ] && [ "$OS_ARCH" == "arm64" ]; then OS_ARCH="aarch64"; fi
|
if [ "$OS_NAME" == "linux" ] && [ "$OS_ARCH" == "arm64" ]; then OS_ARCH="aarch64"; fi
|
||||||
|
|
||||||
@@ -39,7 +49,7 @@ if [ -e "$INSTALL_ENV_DIR" ]; then export PATH="$INSTALL_ENV_DIR/bin:$PATH"; fi
|
|||||||
|
|
||||||
PACKAGES_TO_INSTALL=""
|
PACKAGES_TO_INSTALL=""
|
||||||
|
|
||||||
if [ ! -e "$LEGACY_INSTALL_ENV_DIR/etc/profile.d/conda.sh" ] && [ ! -e "$INSTALL_ENV_DIR/etc/profile.d/conda.sh" ]; then PACKAGES_TO_INSTALL="$PACKAGES_TO_INSTALL conda"; fi
|
if [ ! -e "$LEGACY_INSTALL_ENV_DIR/etc/profile.d/conda.sh" ] && [ ! -e "$INSTALL_ENV_DIR/etc/profile.d/conda.sh" ]; then PACKAGES_TO_INSTALL="$PACKAGES_TO_INSTALL conda python=3.8.5"; fi
|
||||||
if ! hash "git" &>/dev/null; then PACKAGES_TO_INSTALL="$PACKAGES_TO_INSTALL git"; fi
|
if ! hash "git" &>/dev/null; then PACKAGES_TO_INSTALL="$PACKAGES_TO_INSTALL git"; fi
|
||||||
|
|
||||||
if "$MAMBA_ROOT_PREFIX/micromamba" --version &>/dev/null; then umamba_exists="T"; fi
|
if "$MAMBA_ROOT_PREFIX/micromamba" --version &>/dev/null; then umamba_exists="T"; fi
|
||||||
@@ -51,7 +61,7 @@ if [ "$PACKAGES_TO_INSTALL" != "" ]; then
|
|||||||
echo "Downloading micromamba from $MICROMAMBA_DOWNLOAD_URL to $MAMBA_ROOT_PREFIX/micromamba"
|
echo "Downloading micromamba from $MICROMAMBA_DOWNLOAD_URL to $MAMBA_ROOT_PREFIX/micromamba"
|
||||||
|
|
||||||
mkdir -p "$MAMBA_ROOT_PREFIX"
|
mkdir -p "$MAMBA_ROOT_PREFIX"
|
||||||
curl -L "$MICROMAMBA_DOWNLOAD_URL" | tar -xvj bin/micromamba -O > "$MAMBA_ROOT_PREFIX/micromamba"
|
curl -L "$MICROMAMBA_DOWNLOAD_URL" | tar -xvj -O bin/micromamba > "$MAMBA_ROOT_PREFIX/micromamba"
|
||||||
|
|
||||||
if [ "$?" != "0" ]; then
|
if [ "$?" != "0" ]; then
|
||||||
echo
|
echo
|
||||||
|
|||||||
13
scripts/check_modules.py
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
'''
|
||||||
|
This script checks if the given modules exist
|
||||||
|
'''
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import pkgutil
|
||||||
|
|
||||||
|
modules = sys.argv[1:]
|
||||||
|
missing_modules = []
|
||||||
|
for m in modules:
|
||||||
|
if pkgutil.find_loader(m) is None:
|
||||||
|
print('module', m, 'not found')
|
||||||
|
exit(1)
|
||||||
@@ -26,15 +26,26 @@ if [ "$0" == "bash" ]; then
|
|||||||
|
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
# activate the environment
|
# activate the legacy environment (if present) and set PYTHONPATH
|
||||||
|
if [ -e "installer_files/env" ]; then
|
||||||
|
export PYTHONPATH="$(pwd)/installer_files/env/lib/python3.8/site-packages"
|
||||||
|
fi
|
||||||
|
if [ -e "stable-diffusion/env" ]; then
|
||||||
CONDA_BASEPATH=$(conda info --base)
|
CONDA_BASEPATH=$(conda info --base)
|
||||||
source "$CONDA_BASEPATH/etc/profile.d/conda.sh" # otherwise conda complains about 'shell not initialized' (needed when running in a script)
|
source "$CONDA_BASEPATH/etc/profile.d/conda.sh" # otherwise conda complains about 'shell not initialized' (needed when running in a script)
|
||||||
|
|
||||||
conda activate ./stable-diffusion/env
|
conda activate ./stable-diffusion/env
|
||||||
|
|
||||||
|
export PYTHONPATH="$(pwd)/stable-diffusion/env/lib/python3.8/site-packages"
|
||||||
|
fi
|
||||||
|
|
||||||
which python
|
which python
|
||||||
python --version
|
python --version
|
||||||
|
|
||||||
|
echo "PYTHONPATH=$PYTHONPATH"
|
||||||
|
|
||||||
|
# done
|
||||||
|
|
||||||
echo ""
|
echo ""
|
||||||
else
|
else
|
||||||
file_name=$(basename "${BASH_SOURCE[0]}")
|
file_name=$(basename "${BASH_SOURCE[0]}")
|
||||||
|
|||||||
@@ -28,5 +28,12 @@ EOF
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
filesize() {
|
||||||
|
case "$(uname -s)" in
|
||||||
|
Linux*) stat -c "%s" $1;;
|
||||||
|
Darwin*) stat -f "%z" $1;;
|
||||||
|
*) echo "Unknown OS: $OS_NAME! This script runs only on Linux or Mac" && exit
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
@echo off
|
@echo off
|
||||||
|
|
||||||
@echo. & echo "Stable Diffusion UI - v2" & echo.
|
@echo. & echo "Easy Diffusion - v2" & echo.
|
||||||
|
|
||||||
set PATH=C:\Windows\System32;%PATH%
|
set PATH=C:\Windows\System32;%PATH%
|
||||||
|
|
||||||
@@ -28,7 +28,7 @@ if "%update_branch%"=="" (
|
|||||||
|
|
||||||
@>nul findstr /m "sd_ui_git_cloned" scripts\install_status.txt
|
@>nul findstr /m "sd_ui_git_cloned" scripts\install_status.txt
|
||||||
@if "%ERRORLEVEL%" EQU "0" (
|
@if "%ERRORLEVEL%" EQU "0" (
|
||||||
@echo "Stable Diffusion UI's git repository was already installed. Updating from %update_branch%.."
|
@echo "Easy Diffusion's git repository was already installed. Updating from %update_branch%.."
|
||||||
|
|
||||||
@cd sd-ui-files
|
@cd sd-ui-files
|
||||||
|
|
||||||
@@ -38,13 +38,13 @@ if "%update_branch%"=="" (
|
|||||||
|
|
||||||
@cd ..
|
@cd ..
|
||||||
) else (
|
) else (
|
||||||
@echo. & echo "Downloading Stable Diffusion UI.." & echo.
|
@echo. & echo "Downloading Easy Diffusion..." & echo.
|
||||||
@echo "Using the %update_branch% channel" & echo.
|
@echo "Using the %update_branch% channel" & echo.
|
||||||
|
|
||||||
@call git clone -b "%update_branch%" https://github.com/cmdr2/stable-diffusion-ui.git sd-ui-files && (
|
@call git clone -b "%update_branch%" https://github.com/cmdr2/stable-diffusion-ui.git sd-ui-files && (
|
||||||
@echo sd_ui_git_cloned >> scripts\install_status.txt
|
@echo sd_ui_git_cloned >> scripts\install_status.txt
|
||||||
) || (
|
) || (
|
||||||
@echo "Error downloading Stable Diffusion UI. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!"
|
@echo "Error downloading Easy Diffusion. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!"
|
||||||
pause
|
pause
|
||||||
@exit /b
|
@exit /b
|
||||||
)
|
)
|
||||||
@@ -53,6 +53,7 @@ if "%update_branch%"=="" (
|
|||||||
@xcopy sd-ui-files\ui ui /s /i /Y /q
|
@xcopy sd-ui-files\ui ui /s /i /Y /q
|
||||||
@copy sd-ui-files\scripts\on_sd_start.bat scripts\ /Y
|
@copy sd-ui-files\scripts\on_sd_start.bat scripts\ /Y
|
||||||
@copy sd-ui-files\scripts\bootstrap.bat scripts\ /Y
|
@copy sd-ui-files\scripts\bootstrap.bat scripts\ /Y
|
||||||
|
@copy sd-ui-files\scripts\check_modules.py scripts\ /Y
|
||||||
@copy "sd-ui-files\scripts\Start Stable Diffusion UI.cmd" . /Y
|
@copy "sd-ui-files\scripts\Start Stable Diffusion UI.cmd" . /Y
|
||||||
@copy "sd-ui-files\scripts\Developer Console.cmd" . /Y
|
@copy "sd-ui-files\scripts\Developer Console.cmd" . /Y
|
||||||
|
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
source ./scripts/functions.sh
|
source ./scripts/functions.sh
|
||||||
|
|
||||||
printf "\n\nStable Diffusion UI\n\n"
|
printf "\n\nEasy Diffusion\n\n"
|
||||||
|
|
||||||
if [ -f "scripts/config.sh" ]; then
|
if [ -f "scripts/config.sh" ]; then
|
||||||
source scripts/config.sh
|
source scripts/config.sh
|
||||||
@@ -13,7 +13,7 @@ if [ "$update_branch" == "" ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -f "scripts/install_status.txt" ] && [ `grep -c sd_ui_git_cloned scripts/install_status.txt` -gt "0" ]; then
|
if [ -f "scripts/install_status.txt" ] && [ `grep -c sd_ui_git_cloned scripts/install_status.txt` -gt "0" ]; then
|
||||||
echo "Stable Diffusion UI's git repository was already installed. Updating from $update_branch.."
|
echo "Easy Diffusion's git repository was already installed. Updating from $update_branch.."
|
||||||
|
|
||||||
cd sd-ui-files
|
cd sd-ui-files
|
||||||
|
|
||||||
@@ -23,7 +23,7 @@ if [ -f "scripts/install_status.txt" ] && [ `grep -c sd_ui_git_cloned scripts/in
|
|||||||
|
|
||||||
cd ..
|
cd ..
|
||||||
else
|
else
|
||||||
printf "\n\nDownloading Stable Diffusion UI..\n\n"
|
printf "\n\nDownloading Easy Diffusion..\n\n"
|
||||||
printf "Using the $update_branch channel\n\n"
|
printf "Using the $update_branch channel\n\n"
|
||||||
|
|
||||||
if git clone -b "$update_branch" https://github.com/cmdr2/stable-diffusion-ui.git sd-ui-files ; then
|
if git clone -b "$update_branch" https://github.com/cmdr2/stable-diffusion-ui.git sd-ui-files ; then
|
||||||
@@ -37,9 +37,9 @@ rm -rf ui
|
|||||||
cp -Rf sd-ui-files/ui .
|
cp -Rf sd-ui-files/ui .
|
||||||
cp sd-ui-files/scripts/on_sd_start.sh scripts/
|
cp sd-ui-files/scripts/on_sd_start.sh scripts/
|
||||||
cp sd-ui-files/scripts/bootstrap.sh scripts/
|
cp sd-ui-files/scripts/bootstrap.sh scripts/
|
||||||
|
cp sd-ui-files/scripts/check_modules.py scripts/
|
||||||
cp sd-ui-files/scripts/start.sh .
|
cp sd-ui-files/scripts/start.sh .
|
||||||
cp sd-ui-files/scripts/developer_console.sh .
|
cp sd-ui-files/scripts/developer_console.sh .
|
||||||
|
cp sd-ui-files/scripts/functions.sh scripts/
|
||||||
|
|
||||||
./scripts/on_sd_start.sh
|
exec ./scripts/on_sd_start.sh
|
||||||
|
|
||||||
read -p "Press any key to continue"
|
|
||||||
|
|||||||
@@ -1,183 +1,156 @@
|
|||||||
@echo off
|
@echo off
|
||||||
|
|
||||||
|
@REM Caution, this file will make your eyes and brain bleed. It's such an unholy mess.
|
||||||
|
@REM Note to self: Please rewrite this in Python. For the sake of your own sanity.
|
||||||
|
|
||||||
@copy sd-ui-files\scripts\on_env_start.bat scripts\ /Y
|
@copy sd-ui-files\scripts\on_env_start.bat scripts\ /Y
|
||||||
@copy sd-ui-files\scripts\bootstrap.bat scripts\ /Y
|
@copy sd-ui-files\scripts\bootstrap.bat scripts\ /Y
|
||||||
|
@copy sd-ui-files\scripts\check_modules.py scripts\ /Y
|
||||||
|
|
||||||
if exist "%cd%\profile" (
|
if exist "%cd%\profile" (
|
||||||
set USERPROFILE=%cd%\profile
|
set USERPROFILE=%cd%\profile
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@rem set the correct installer path (current vs legacy)
|
||||||
|
if exist "%cd%\installer_files\env" (
|
||||||
|
set INSTALL_ENV_DIR=%cd%\installer_files\env
|
||||||
|
)
|
||||||
|
if exist "%cd%\stable-diffusion\env" (
|
||||||
|
set INSTALL_ENV_DIR=%cd%\stable-diffusion\env
|
||||||
|
)
|
||||||
|
|
||||||
|
@mkdir tmp
|
||||||
|
@set TMP=%cd%\tmp
|
||||||
|
@set TEMP=%cd%\tmp
|
||||||
|
|
||||||
@rem activate the installer env
|
@rem activate the installer env
|
||||||
call conda activate
|
call conda activate
|
||||||
|
@if "%ERRORLEVEL%" NEQ "0" (
|
||||||
@REM Caution, this file will make your eyes and brain bleed. It's such an unholy mess.
|
@echo. & echo "Error activating conda for Easy Diffusion. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
||||||
@REM Note to self: Please rewrite this in Python. For the sake of your own sanity.
|
pause
|
||||||
|
exit /b
|
||||||
|
)
|
||||||
|
|
||||||
@REM remove the old version of the dev console script, if it's still present
|
@REM remove the old version of the dev console script, if it's still present
|
||||||
if exist "Open Developer Console.cmd" del "Open Developer Console.cmd"
|
if exist "Open Developer Console.cmd" del "Open Developer Console.cmd"
|
||||||
|
|
||||||
@call python -c "import os; import shutil; frm = 'sd-ui-files\\ui\\hotfix\\9c24e6cd9f499d02c4f21a033736dabd365962dc80fe3aeb57a8f85ea45a20a3.26fead7ea4f0f843f6eb4055dfd25693f1a71f3c6871b184042d4b126244e142'; dst = os.path.join(os.path.expanduser('~'), '.cache', 'huggingface', 'transformers', '9c24e6cd9f499d02c4f21a033736dabd365962dc80fe3aeb57a8f85ea45a20a3.26fead7ea4f0f843f6eb4055dfd25693f1a71f3c6871b184042d4b126244e142'); shutil.copyfile(frm, dst) if os.path.exists(dst) else print(''); print('Hotfixed broken JSON file from OpenAI');"
|
@call python -c "import os; import shutil; frm = 'sd-ui-files\\ui\\hotfix\\9c24e6cd9f499d02c4f21a033736dabd365962dc80fe3aeb57a8f85ea45a20a3.26fead7ea4f0f843f6eb4055dfd25693f1a71f3c6871b184042d4b126244e142'; dst = os.path.join(os.path.expanduser('~'), '.cache', 'huggingface', 'transformers', '9c24e6cd9f499d02c4f21a033736dabd365962dc80fe3aeb57a8f85ea45a20a3.26fead7ea4f0f843f6eb4055dfd25693f1a71f3c6871b184042d4b126244e142'); shutil.copyfile(frm, dst) if os.path.exists(dst) else print(''); print('Hotfixed broken JSON file from OpenAI');"
|
||||||
|
|
||||||
@>nul findstr /m "sd_git_cloned" scripts\install_status.txt
|
@rem create the stable-diffusion folder, to work with legacy installations
|
||||||
@if "%ERRORLEVEL%" EQU "0" (
|
if not exist "stable-diffusion" mkdir stable-diffusion
|
||||||
@echo "Stable Diffusion's git repository was already installed. Updating.."
|
cd stable-diffusion
|
||||||
|
|
||||||
@cd stable-diffusion
|
@rem activate the old stable-diffusion env, if it exists
|
||||||
|
if exist "env" (
|
||||||
@call git reset --hard
|
call conda activate .\env
|
||||||
@call git pull
|
|
||||||
@call git -c advice.detachedHead=false checkout f6cfebffa752ee11a7b07497b8529d5971de916c
|
|
||||||
|
|
||||||
@call git apply ..\ui\sd_internal\ddim_callback.patch
|
|
||||||
@call git apply ..\ui\sd_internal\env_yaml.patch
|
|
||||||
|
|
||||||
@cd ..
|
|
||||||
) else (
|
|
||||||
@echo. & echo "Downloading Stable Diffusion.." & echo.
|
|
||||||
|
|
||||||
@call git clone https://github.com/basujindal/stable-diffusion.git && (
|
|
||||||
@echo sd_git_cloned >> scripts\install_status.txt
|
|
||||||
) || (
|
|
||||||
@echo "Error downloading Stable Diffusion. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!"
|
|
||||||
pause
|
|
||||||
@exit /b
|
|
||||||
)
|
|
||||||
|
|
||||||
@cd stable-diffusion
|
|
||||||
@call git -c advice.detachedHead=false checkout f6cfebffa752ee11a7b07497b8529d5971de916c
|
|
||||||
|
|
||||||
@call git apply ..\ui\sd_internal\ddim_callback.patch
|
|
||||||
@call git apply ..\ui\sd_internal\env_yaml.patch
|
|
||||||
|
|
||||||
@cd ..
|
|
||||||
)
|
)
|
||||||
|
|
||||||
@cd stable-diffusion
|
@rem disable the legacy src and ldm folder (otherwise this prevents installing gfpgan and realesrgan)
|
||||||
|
if exist src rename src src-old
|
||||||
|
if exist ldm rename ldm ldm-old
|
||||||
|
|
||||||
@>nul findstr /m "conda_sd_env_created" ..\scripts\install_status.txt
|
if not exist "..\models\stable-diffusion" mkdir "..\models\stable-diffusion"
|
||||||
@if "%ERRORLEVEL%" EQU "0" (
|
if not exist "..\models\gfpgan" mkdir "..\models\gfpgan"
|
||||||
@echo "Packages necessary for Stable Diffusion were already installed"
|
if not exist "..\models\realesrgan" mkdir "..\models\realesrgan"
|
||||||
|
if not exist "..\models\vae" mkdir "..\models\vae"
|
||||||
|
|
||||||
@call conda activate .\env
|
@rem migrate the legacy models to the correct path (if already downloaded)
|
||||||
|
if exist "sd-v1-4.ckpt" move sd-v1-4.ckpt ..\models\stable-diffusion\
|
||||||
|
if exist "custom-model.ckpt" move custom-model.ckpt ..\models\stable-diffusion\
|
||||||
|
if exist "GFPGANv1.3.pth" move GFPGANv1.3.pth ..\models\gfpgan\
|
||||||
|
if exist "RealESRGAN_x4plus.pth" move RealESRGAN_x4plus.pth ..\models\realesrgan\
|
||||||
|
if exist "RealESRGAN_x4plus_anime_6B.pth" move RealESRGAN_x4plus_anime_6B.pth ..\models\realesrgan\
|
||||||
|
|
||||||
|
if not exist "%INSTALL_ENV_DIR%\DLLs\libssl-1_1-x64.dll" copy "%INSTALL_ENV_DIR%\Library\bin\libssl-1_1-x64.dll" "%INSTALL_ENV_DIR%\DLLs\"
|
||||||
|
if not exist "%INSTALL_ENV_DIR%\DLLs\libcrypto-1_1-x64.dll" copy "%INSTALL_ENV_DIR%\Library\bin\libcrypto-1_1-x64.dll" "%INSTALL_ENV_DIR%\DLLs\"
|
||||||
|
|
||||||
|
@rem install torch and torchvision
|
||||||
|
call python ..\scripts\check_modules.py torch torchvision
|
||||||
|
if "%ERRORLEVEL%" EQU "0" (
|
||||||
|
echo "torch and torchvision have already been installed."
|
||||||
) else (
|
) else (
|
||||||
@echo. & echo "Downloading packages necessary for Stable Diffusion.." & echo. & echo "***** This will take some time (depending on the speed of the Internet connection) and may appear to be stuck, but please be patient ***** .." & echo.
|
echo "Installing torch and torchvision.."
|
||||||
|
|
||||||
@rmdir /s /q .\env
|
@REM prevent from using packages from the user's home directory, to avoid conflicts
|
||||||
|
set PYTHONNOUSERSITE=1
|
||||||
|
set PYTHONPATH=%INSTALL_ENV_DIR%\lib\site-packages
|
||||||
|
|
||||||
@REM prevent conda from using packages from the user's home directory, to avoid conflicts
|
call python -m pip install --upgrade torch torchvision --extra-index-url https://download.pytorch.org/whl/cu116 || (
|
||||||
@set PYTHONNOUSERSITE=1
|
echo "Error installing torch. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!"
|
||||||
|
|
||||||
set USERPROFILE=%cd%\profile
|
|
||||||
set TMP=%cd%\tmp
|
|
||||||
set TEMP=%cd%\tmp
|
|
||||||
|
|
||||||
set PYTHONPATH=%cd%;%cd%\env\lib\site-packages
|
|
||||||
|
|
||||||
@call conda env create --prefix env -f environment.yaml || (
|
|
||||||
@echo. & echo "Error installing the packages necessary for Stable Diffusion. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
|
||||||
pause
|
pause
|
||||||
exit /b
|
exit /b
|
||||||
)
|
)
|
||||||
|
|
||||||
@call conda activate .\env
|
|
||||||
|
|
||||||
@call conda install -c conda-forge -y --prefix env antlr4-python3-runtime=4.8 || (
|
|
||||||
@echo. & echo "Error installing antlr4-python3-runtime for Stable Diffusion. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
|
||||||
pause
|
|
||||||
exit /b
|
|
||||||
)
|
|
||||||
|
|
||||||
for /f "tokens=*" %%a in ('python -c "import torch; import ldm; import transformers; import numpy; import antlr4; print(42)"') do if "%%a" NEQ "42" (
|
|
||||||
@echo. & echo "Dependency test failed! Error installing the packages necessary for Stable Diffusion. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
|
||||||
pause
|
|
||||||
exit /b
|
|
||||||
)
|
|
||||||
|
|
||||||
@echo conda_sd_env_created >> ..\scripts\install_status.txt
|
|
||||||
)
|
)
|
||||||
|
|
||||||
set PATH=C:\Windows\System32;%PATH%
|
set PATH=C:\Windows\System32;%PATH%
|
||||||
|
|
||||||
@>nul findstr /m "conda_sd_gfpgan_deps_installed" ..\scripts\install_status.txt
|
@rem install/upgrade sdkit
|
||||||
@if "%ERRORLEVEL%" EQU "0" (
|
call python ..\scripts\check_modules.py sdkit sdkit.models ldm transformers numpy antlr4 gfpgan realesrgan
|
||||||
@echo "Packages necessary for GFPGAN (Face Correction) were already installed"
|
if "%ERRORLEVEL%" EQU "0" (
|
||||||
|
echo "sdkit is already installed."
|
||||||
|
|
||||||
|
@rem skip sdkit upgrade if in developer-mode
|
||||||
|
if not exist "..\src\sdkit" (
|
||||||
|
@REM prevent from using packages from the user's home directory, to avoid conflicts
|
||||||
|
set PYTHONNOUSERSITE=1
|
||||||
|
set PYTHONPATH=%INSTALL_ENV_DIR%\lib\site-packages
|
||||||
|
|
||||||
|
call python -m pip install --upgrade sdkit==1.0.43 -q || (
|
||||||
|
echo "Error updating sdkit"
|
||||||
|
)
|
||||||
|
)
|
||||||
) else (
|
) else (
|
||||||
@echo. & echo "Downloading packages necessary for GFPGAN (Face Correction).." & echo.
|
echo "Installing sdkit: https://pypi.org/project/sdkit/"
|
||||||
|
|
||||||
@set PYTHONNOUSERSITE=1
|
@REM prevent from using packages from the user's home directory, to avoid conflicts
|
||||||
|
set PYTHONNOUSERSITE=1
|
||||||
|
set PYTHONPATH=%INSTALL_ENV_DIR%\lib\site-packages
|
||||||
|
|
||||||
set USERPROFILE=%cd%\profile
|
call python -m pip install sdkit==1.0.43 || (
|
||||||
set TMP=%cd%\tmp
|
echo "Error installing sdkit. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!"
|
||||||
set TEMP=%cd%\tmp
|
|
||||||
|
|
||||||
set PYTHONPATH=%cd%;%cd%\env\lib\site-packages
|
|
||||||
|
|
||||||
@call pip install -e git+https://github.com/TencentARC/GFPGAN#egg=GFPGAN || (
|
|
||||||
@echo. & echo "Error installing the packages necessary for GFPGAN (Face Correction). Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
|
||||||
pause
|
pause
|
||||||
exit /b
|
exit /b
|
||||||
)
|
)
|
||||||
|
|
||||||
@call pip install basicsr==1.4.2 || (
|
|
||||||
@echo. & echo "Error installing the basicsr package necessary for GFPGAN (Face Correction). Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
|
||||||
pause
|
|
||||||
exit /b
|
|
||||||
)
|
|
||||||
|
|
||||||
for /f "tokens=*" %%a in ('python -c "from gfpgan import GFPGANer; print(42)"') do if "%%a" NEQ "42" (
|
|
||||||
@echo. & echo "Dependency test failed! Error installing the packages necessary for GFPGAN (Face Correction). Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
|
||||||
pause
|
|
||||||
exit /b
|
|
||||||
)
|
|
||||||
|
|
||||||
@echo conda_sd_gfpgan_deps_installed >> ..\scripts\install_status.txt
|
|
||||||
)
|
)
|
||||||
|
|
||||||
@>nul findstr /m "conda_sd_esrgan_deps_installed" ..\scripts\install_status.txt
|
call python -c "from importlib.metadata import version; print('sdkit version:', version('sdkit'))"
|
||||||
@if "%ERRORLEVEL%" EQU "0" (
|
|
||||||
@echo "Packages necessary for ESRGAN (Resolution Upscaling) were already installed"
|
@rem upgrade stable-diffusion-sdkit
|
||||||
|
call python -m pip install --upgrade stable-diffusion-sdkit==2.1.3 -q || (
|
||||||
|
echo "Error updating stable-diffusion-sdkit"
|
||||||
|
)
|
||||||
|
call python -c "from importlib.metadata import version; print('stable-diffusion version:', version('stable-diffusion-sdkit'))"
|
||||||
|
|
||||||
|
@rem install rich
|
||||||
|
call python ..\scripts\check_modules.py rich
|
||||||
|
if "%ERRORLEVEL%" EQU "0" (
|
||||||
|
echo "rich has already been installed."
|
||||||
) else (
|
) else (
|
||||||
@echo. & echo "Downloading packages necessary for ESRGAN (Resolution Upscaling).." & echo.
|
echo "Installing rich.."
|
||||||
|
|
||||||
@set PYTHONNOUSERSITE=1
|
set PYTHONNOUSERSITE=1
|
||||||
|
set PYTHONPATH=%INSTALL_ENV_DIR%\lib\site-packages
|
||||||
|
|
||||||
set USERPROFILE=%cd%\profile
|
call python -m pip install rich || (
|
||||||
set TMP=%cd%\tmp
|
echo "Error installing rich. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!"
|
||||||
set TEMP=%cd%\tmp
|
|
||||||
|
|
||||||
set PYTHONPATH=%cd%;%cd%\env\lib\site-packages
|
|
||||||
|
|
||||||
@call pip install -e git+https://github.com/xinntao/Real-ESRGAN#egg=realesrgan || (
|
|
||||||
@echo. & echo "Error installing the packages necessary for ESRGAN (Resolution Upscaling). Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
|
||||||
pause
|
pause
|
||||||
exit /b
|
exit /b
|
||||||
)
|
)
|
||||||
|
|
||||||
for /f "tokens=*" %%a in ('python -c "from basicsr.archs.rrdbnet_arch import RRDBNet; from realesrgan import RealESRGANer; print(42)"') do if "%%a" NEQ "42" (
|
|
||||||
@echo. & echo "Dependency test failed! Error installing the packages necessary for ESRGAN (Resolution Upscaling). Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
|
||||||
pause
|
|
||||||
exit /b
|
|
||||||
)
|
|
||||||
|
|
||||||
@echo conda_sd_esrgan_deps_installed >> ..\scripts\install_status.txt
|
|
||||||
)
|
)
|
||||||
|
|
||||||
@>nul findstr /m "conda_sd_ui_deps_installed" ..\scripts\install_status.txt
|
set PATH=C:\Windows\System32;%PATH%
|
||||||
|
|
||||||
|
call python ..\scripts\check_modules.py uvicorn fastapi
|
||||||
@if "%ERRORLEVEL%" EQU "0" (
|
@if "%ERRORLEVEL%" EQU "0" (
|
||||||
echo "Packages necessary for Stable Diffusion UI were already installed"
|
echo "Packages necessary for Easy Diffusion were already installed"
|
||||||
) else (
|
) else (
|
||||||
@echo. & echo "Downloading packages necessary for Stable Diffusion UI.." & echo.
|
@echo. & echo "Downloading packages necessary for Easy Diffusion..." & echo.
|
||||||
|
|
||||||
@set PYTHONNOUSERSITE=1
|
set PYTHONNOUSERSITE=1
|
||||||
|
set PYTHONPATH=%INSTALL_ENV_DIR%\lib\site-packages
|
||||||
|
|
||||||
set USERPROFILE=%cd%\profile
|
@call conda install -c conda-forge -y uvicorn fastapi || (
|
||||||
set TMP=%cd%\tmp
|
echo "Error installing the packages necessary for Easy Diffusion. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!"
|
||||||
set TEMP=%cd%\tmp
|
|
||||||
|
|
||||||
set PYTHONPATH=%cd%;%cd%\env\lib\site-packages
|
|
||||||
|
|
||||||
@call conda install -c conda-forge -y --prefix env uvicorn fastapi || (
|
|
||||||
echo "Error installing the packages necessary for Stable Diffusion UI. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!"
|
|
||||||
pause
|
pause
|
||||||
exit /b
|
exit /b
|
||||||
)
|
)
|
||||||
@@ -196,37 +169,30 @@ call WHERE uvicorn > .tmp
|
|||||||
@echo conda_sd_ui_deps_installed >> ..\scripts\install_status.txt
|
@echo conda_sd_ui_deps_installed >> ..\scripts\install_status.txt
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@if exist "..\models\stable-diffusion\sd-v1-4.ckpt" (
|
||||||
|
for %%I in ("..\models\stable-diffusion\sd-v1-4.ckpt") do if "%%~zI" EQU "4265380512" (
|
||||||
if not exist "..\models\stable-diffusion" mkdir "..\models\stable-diffusion"
|
|
||||||
if not exist "..\models\vae" mkdir "..\models\vae"
|
|
||||||
echo. > "..\models\stable-diffusion\Put your custom ckpt files here.txt"
|
|
||||||
echo. > "..\models\vae\Put your VAE files here.txt"
|
|
||||||
|
|
||||||
@if exist "sd-v1-4.ckpt" (
|
|
||||||
for %%I in ("sd-v1-4.ckpt") do if "%%~zI" EQU "4265380512" (
|
|
||||||
echo "Data files (weights) necessary for Stable Diffusion were already downloaded. Using the HuggingFace 4 GB Model."
|
echo "Data files (weights) necessary for Stable Diffusion were already downloaded. Using the HuggingFace 4 GB Model."
|
||||||
) else (
|
) else (
|
||||||
for %%J in ("sd-v1-4.ckpt") do if "%%~zJ" EQU "7703807346" (
|
for %%J in ("..\models\stable-diffusion\sd-v1-4.ckpt") do if "%%~zJ" EQU "7703807346" (
|
||||||
echo "Data files (weights) necessary for Stable Diffusion were already downloaded. Using the HuggingFace 7 GB Model."
|
echo "Data files (weights) necessary for Stable Diffusion were already downloaded. Using the HuggingFace 7 GB Model."
|
||||||
) else (
|
) else (
|
||||||
for %%K in ("sd-v1-4.ckpt") do if "%%~zK" EQU "7703810927" (
|
for %%K in ("..\models\stable-diffusion\sd-v1-4.ckpt") do if "%%~zK" EQU "7703810927" (
|
||||||
echo "Data files (weights) necessary for Stable Diffusion were already downloaded. Using the Waifu Model."
|
echo "Data files (weights) necessary for Stable Diffusion were already downloaded. Using the Waifu Model."
|
||||||
) else (
|
) else (
|
||||||
echo. & echo "The model file present at %cd%\sd-v1-4.ckpt is invalid. It is only %%~zK bytes in size. Re-downloading.." & echo.
|
echo. & echo "The model file present at models\stable-diffusion\sd-v1-4.ckpt is invalid. It is only %%~zK bytes in size. Re-downloading.." & echo.
|
||||||
del "sd-v1-4.ckpt"
|
del "..\models\stable-diffusion\sd-v1-4.ckpt"
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
@if not exist "sd-v1-4.ckpt" (
|
@if not exist "..\models\stable-diffusion\sd-v1-4.ckpt" (
|
||||||
@echo. & echo "Downloading data files (weights) for Stable Diffusion.." & echo.
|
@echo. & echo "Downloading data files (weights) for Stable Diffusion.." & echo.
|
||||||
|
|
||||||
@call curl -L -k https://me.cmdr2.org/stable-diffusion-ui/sd-v1-4.ckpt > sd-v1-4.ckpt
|
@call curl -L -k https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt > ..\models\stable-diffusion\sd-v1-4.ckpt
|
||||||
|
|
||||||
@if exist "sd-v1-4.ckpt" (
|
@if exist "..\models\stable-diffusion\sd-v1-4.ckpt" (
|
||||||
for %%I in ("sd-v1-4.ckpt") do if "%%~zI" NEQ "4265380512" (
|
for %%I in ("..\models\stable-diffusion\sd-v1-4.ckpt") do if "%%~zI" NEQ "4265380512" (
|
||||||
echo. & echo "Error: The downloaded model file was invalid! Bytes downloaded: %%~zI" & echo.
|
echo. & echo "Error: The downloaded model file was invalid! Bytes downloaded: %%~zI" & echo.
|
||||||
echo. & echo "Error downloading the data files (weights) for Stable Diffusion. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
echo. & echo "Error downloading the data files (weights) for Stable Diffusion. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
||||||
pause
|
pause
|
||||||
@@ -241,22 +207,22 @@ echo. > "..\models\vae\Put your VAE files here.txt"
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
@if exist "GFPGANv1.3.pth" (
|
@if exist "..\models\gfpgan\GFPGANv1.3.pth" (
|
||||||
for %%I in ("GFPGANv1.3.pth") do if "%%~zI" EQU "348632874" (
|
for %%I in ("..\models\gfpgan\GFPGANv1.3.pth") do if "%%~zI" EQU "348632874" (
|
||||||
echo "Data files (weights) necessary for GFPGAN (Face Correction) were already downloaded"
|
echo "Data files (weights) necessary for GFPGAN (Face Correction) were already downloaded"
|
||||||
) else (
|
) else (
|
||||||
echo. & echo "The GFPGAN model file present at %cd%\GFPGANv1.3.pth is invalid. It is only %%~zI bytes in size. Re-downloading.." & echo.
|
echo. & echo "The GFPGAN model file present at models\gfpgan\GFPGANv1.3.pth is invalid. It is only %%~zI bytes in size. Re-downloading.." & echo.
|
||||||
del "GFPGANv1.3.pth"
|
del "..\models\gfpgan\GFPGANv1.3.pth"
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
@if not exist "GFPGANv1.3.pth" (
|
@if not exist "..\models\gfpgan\GFPGANv1.3.pth" (
|
||||||
@echo. & echo "Downloading data files (weights) for GFPGAN (Face Correction).." & echo.
|
@echo. & echo "Downloading data files (weights) for GFPGAN (Face Correction).." & echo.
|
||||||
|
|
||||||
@call curl -L -k https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth > GFPGANv1.3.pth
|
@call curl -L -k https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth > ..\models\gfpgan\GFPGANv1.3.pth
|
||||||
|
|
||||||
@if exist "GFPGANv1.3.pth" (
|
@if exist "..\models\gfpgan\GFPGANv1.3.pth" (
|
||||||
for %%I in ("GFPGANv1.3.pth") do if "%%~zI" NEQ "348632874" (
|
for %%I in ("..\models\gfpgan\GFPGANv1.3.pth") do if "%%~zI" NEQ "348632874" (
|
||||||
echo. & echo "Error: The downloaded GFPGAN model file was invalid! Bytes downloaded: %%~zI" & echo.
|
echo. & echo "Error: The downloaded GFPGAN model file was invalid! Bytes downloaded: %%~zI" & echo.
|
||||||
echo. & echo "Error downloading the data files (weights) for GFPGAN (Face Correction). Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
echo. & echo "Error downloading the data files (weights) for GFPGAN (Face Correction). Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
||||||
pause
|
pause
|
||||||
@@ -271,22 +237,22 @@ echo. > "..\models\vae\Put your VAE files here.txt"
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
@if exist "RealESRGAN_x4plus.pth" (
|
@if exist "..\models\realesrgan\RealESRGAN_x4plus.pth" (
|
||||||
for %%I in ("RealESRGAN_x4plus.pth") do if "%%~zI" EQU "67040989" (
|
for %%I in ("..\models\realesrgan\RealESRGAN_x4plus.pth") do if "%%~zI" EQU "67040989" (
|
||||||
echo "Data files (weights) necessary for ESRGAN (Resolution Upscaling) x4plus were already downloaded"
|
echo "Data files (weights) necessary for ESRGAN (Resolution Upscaling) x4plus were already downloaded"
|
||||||
) else (
|
) else (
|
||||||
echo. & echo "The GFPGAN model file present at %cd%\RealESRGAN_x4plus.pth is invalid. It is only %%~zI bytes in size. Re-downloading.." & echo.
|
echo. & echo "The RealESRGAN model file present at models\realesrgan\RealESRGAN_x4plus.pth is invalid. It is only %%~zI bytes in size. Re-downloading.." & echo.
|
||||||
del "RealESRGAN_x4plus.pth"
|
del "..\models\realesrgan\RealESRGAN_x4plus.pth"
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
@if not exist "RealESRGAN_x4plus.pth" (
|
@if not exist "..\models\realesrgan\RealESRGAN_x4plus.pth" (
|
||||||
@echo. & echo "Downloading data files (weights) for ESRGAN (Resolution Upscaling) x4plus.." & echo.
|
@echo. & echo "Downloading data files (weights) for ESRGAN (Resolution Upscaling) x4plus.." & echo.
|
||||||
|
|
||||||
@call curl -L -k https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth > RealESRGAN_x4plus.pth
|
@call curl -L -k https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth > ..\models\realesrgan\RealESRGAN_x4plus.pth
|
||||||
|
|
||||||
@if exist "RealESRGAN_x4plus.pth" (
|
@if exist "..\models\realesrgan\RealESRGAN_x4plus.pth" (
|
||||||
for %%I in ("RealESRGAN_x4plus.pth") do if "%%~zI" NEQ "67040989" (
|
for %%I in ("..\models\realesrgan\RealESRGAN_x4plus.pth") do if "%%~zI" NEQ "67040989" (
|
||||||
echo. & echo "Error: The downloaded ESRGAN x4plus model file was invalid! Bytes downloaded: %%~zI" & echo.
|
echo. & echo "Error: The downloaded ESRGAN x4plus model file was invalid! Bytes downloaded: %%~zI" & echo.
|
||||||
echo. & echo "Error downloading the data files (weights) for ESRGAN (Resolution Upscaling) x4plus. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
echo. & echo "Error downloading the data files (weights) for ESRGAN (Resolution Upscaling) x4plus. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
||||||
pause
|
pause
|
||||||
@@ -301,22 +267,22 @@ echo. > "..\models\vae\Put your VAE files here.txt"
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
@if exist "RealESRGAN_x4plus_anime_6B.pth" (
|
@if exist "..\models\realesrgan\RealESRGAN_x4plus_anime_6B.pth" (
|
||||||
for %%I in ("RealESRGAN_x4plus_anime_6B.pth") do if "%%~zI" EQU "17938799" (
|
for %%I in ("..\models\realesrgan\RealESRGAN_x4plus_anime_6B.pth") do if "%%~zI" EQU "17938799" (
|
||||||
echo "Data files (weights) necessary for ESRGAN (Resolution Upscaling) x4plus_anime were already downloaded"
|
echo "Data files (weights) necessary for ESRGAN (Resolution Upscaling) x4plus_anime were already downloaded"
|
||||||
) else (
|
) else (
|
||||||
echo. & echo "The GFPGAN model file present at %cd%\RealESRGAN_x4plus_anime_6B.pth is invalid. It is only %%~zI bytes in size. Re-downloading.." & echo.
|
echo. & echo "The RealESRGAN model file present at models\realesrgan\RealESRGAN_x4plus_anime_6B.pth is invalid. It is only %%~zI bytes in size. Re-downloading.." & echo.
|
||||||
del "RealESRGAN_x4plus_anime_6B.pth"
|
del "..\models\realesrgan\RealESRGAN_x4plus_anime_6B.pth"
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
@if not exist "RealESRGAN_x4plus_anime_6B.pth" (
|
@if not exist "..\models\realesrgan\RealESRGAN_x4plus_anime_6B.pth" (
|
||||||
@echo. & echo "Downloading data files (weights) for ESRGAN (Resolution Upscaling) x4plus_anime.." & echo.
|
@echo. & echo "Downloading data files (weights) for ESRGAN (Resolution Upscaling) x4plus_anime.." & echo.
|
||||||
|
|
||||||
@call curl -L -k https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth > RealESRGAN_x4plus_anime_6B.pth
|
@call curl -L -k https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth > ..\models\realesrgan\RealESRGAN_x4plus_anime_6B.pth
|
||||||
|
|
||||||
@if exist "RealESRGAN_x4plus_anime_6B.pth" (
|
@if exist "..\models\realesrgan\RealESRGAN_x4plus_anime_6B.pth" (
|
||||||
for %%I in ("RealESRGAN_x4plus_anime_6B.pth") do if "%%~zI" NEQ "17938799" (
|
for %%I in ("..\models\realesrgan\RealESRGAN_x4plus_anime_6B.pth") do if "%%~zI" NEQ "17938799" (
|
||||||
echo. & echo "Error: The downloaded ESRGAN x4plus_anime model file was invalid! Bytes downloaded: %%~zI" & echo.
|
echo. & echo "Error: The downloaded ESRGAN x4plus_anime model file was invalid! Bytes downloaded: %%~zI" & echo.
|
||||||
echo. & echo "Error downloading the data files (weights) for ESRGAN (Resolution Upscaling) x4plus_anime. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
echo. & echo "Error downloading the data files (weights) for ESRGAN (Resolution Upscaling) x4plus_anime. Sorry about that, please try to:" & echo " 1. Run this installer again." & echo " 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" & echo " 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB" & echo " 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues" & echo "Thanks!" & echo.
|
||||||
pause
|
pause
|
||||||
@@ -359,22 +325,18 @@ echo. > "..\models\vae\Put your VAE files here.txt"
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@>nul findstr /m "sd_install_complete" ..\scripts\install_status.txt
|
@>nul findstr /m "sd_install_complete" ..\scripts\install_status.txt
|
||||||
@if "%ERRORLEVEL%" NEQ "0" (
|
@if "%ERRORLEVEL%" NEQ "0" (
|
||||||
@echo sd_weights_downloaded >> ..\scripts\install_status.txt
|
@echo sd_weights_downloaded >> ..\scripts\install_status.txt
|
||||||
@echo sd_install_complete >> ..\scripts\install_status.txt
|
@echo sd_install_complete >> ..\scripts\install_status.txt
|
||||||
)
|
)
|
||||||
|
|
||||||
@echo. & echo "Stable Diffusion is ready!" & echo.
|
@echo. & echo "Easy Diffusion installation complete! Starting the server!" & echo.
|
||||||
|
|
||||||
@set SD_DIR=%cd%
|
@set SD_DIR=%cd%
|
||||||
|
|
||||||
@cd env\lib\site-packages
|
set PYTHONPATH=%INSTALL_ENV_DIR%\lib\site-packages
|
||||||
@set PYTHONPATH=%SD_DIR%;%cd%
|
echo PYTHONPATH=%PYTHONPATH%
|
||||||
@cd ..\..\..
|
|
||||||
@echo PYTHONPATH=%PYTHONPATH%
|
|
||||||
|
|
||||||
call where python
|
call where python
|
||||||
call python --version
|
call python --version
|
||||||
@@ -383,9 +345,12 @@ call python --version
|
|||||||
@set SD_UI_PATH=%cd%\ui
|
@set SD_UI_PATH=%cd%\ui
|
||||||
@cd stable-diffusion
|
@cd stable-diffusion
|
||||||
|
|
||||||
|
@rem set any overrides
|
||||||
|
set HF_HUB_DISABLE_SYMLINKS_WARNING=true
|
||||||
|
|
||||||
@if NOT DEFINED SD_UI_BIND_PORT set SD_UI_BIND_PORT=9000
|
@if NOT DEFINED SD_UI_BIND_PORT set SD_UI_BIND_PORT=9000
|
||||||
@if NOT DEFINED SD_UI_BIND_IP set SD_UI_BIND_IP=0.0.0.0
|
@if NOT DEFINED SD_UI_BIND_IP set SD_UI_BIND_IP=0.0.0.0
|
||||||
@uvicorn server:app --app-dir "%SD_UI_PATH%" --port %SD_UI_BIND_PORT% --host %SD_UI_BIND_IP%
|
@uvicorn main:server_api --app-dir "%SD_UI_PATH%" --port %SD_UI_BIND_PORT% --host %SD_UI_BIND_IP% --log-level error
|
||||||
|
|
||||||
|
|
||||||
@pause
|
@pause
|
||||||
|
|||||||
@@ -1,9 +1,11 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
source ./scripts/functions.sh
|
cp sd-ui-files/scripts/functions.sh scripts/
|
||||||
|
|
||||||
cp sd-ui-files/scripts/on_env_start.sh scripts/
|
cp sd-ui-files/scripts/on_env_start.sh scripts/
|
||||||
cp sd-ui-files/scripts/bootstrap.sh scripts/
|
cp sd-ui-files/scripts/bootstrap.sh scripts/
|
||||||
|
cp sd-ui-files/scripts/check_modules.py scripts/
|
||||||
|
|
||||||
|
source ./scripts/functions.sh
|
||||||
|
|
||||||
# activate the installer env
|
# activate the installer env
|
||||||
CONDA_BASEPATH=$(conda info --base)
|
CONDA_BASEPATH=$(conda info --base)
|
||||||
@@ -21,129 +23,110 @@ python -c "import os; import shutil; frm = 'sd-ui-files/ui/hotfix/9c24e6cd9f499d
|
|||||||
# Caution, this file will make your eyes and brain bleed. It's such an unholy mess.
|
# Caution, this file will make your eyes and brain bleed. It's such an unholy mess.
|
||||||
# Note to self: Please rewrite this in Python. For the sake of your own sanity.
|
# Note to self: Please rewrite this in Python. For the sake of your own sanity.
|
||||||
|
|
||||||
if [ -e "scripts/install_status.txt" ] && [ `grep -c sd_git_cloned scripts/install_status.txt` -gt "0" ]; then
|
# set the correct installer path (current vs legacy)
|
||||||
echo "Stable Diffusion's git repository was already installed. Updating.."
|
if [ -e "installer_files/env" ]; then
|
||||||
|
export INSTALL_ENV_DIR="$(pwd)/installer_files/env"
|
||||||
cd stable-diffusion
|
fi
|
||||||
|
if [ -e "stable-diffusion/env" ]; then
|
||||||
git reset --hard
|
export INSTALL_ENV_DIR="$(pwd)/stable-diffusion/env"
|
||||||
git pull
|
|
||||||
git -c advice.detachedHead=false checkout f6cfebffa752ee11a7b07497b8529d5971de916c
|
|
||||||
|
|
||||||
git apply ../ui/sd_internal/ddim_callback.patch || fail "ddim patch failed"
|
|
||||||
git apply ../ui/sd_internal/env_yaml.patch || fail "yaml patch failed"
|
|
||||||
|
|
||||||
cd ..
|
|
||||||
else
|
|
||||||
printf "\n\nDownloading Stable Diffusion..\n\n"
|
|
||||||
|
|
||||||
if git clone https://github.com/basujindal/stable-diffusion.git ; then
|
|
||||||
echo sd_git_cloned >> scripts/install_status.txt
|
|
||||||
else
|
|
||||||
fail "git clone of basujindal/stable-diffusion.git failed"
|
|
||||||
fi
|
|
||||||
|
|
||||||
cd stable-diffusion
|
|
||||||
git -c advice.detachedHead=false checkout f6cfebffa752ee11a7b07497b8529d5971de916c
|
|
||||||
|
|
||||||
git apply ../ui/sd_internal/ddim_callback.patch || fail "ddim patch failed"
|
|
||||||
git apply ../ui/sd_internal/env_yaml.patch || fail "yaml patch failed"
|
|
||||||
|
|
||||||
cd ..
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# create the stable-diffusion folder, to work with legacy installations
|
||||||
|
if [ ! -e "stable-diffusion" ]; then mkdir stable-diffusion; fi
|
||||||
cd stable-diffusion
|
cd stable-diffusion
|
||||||
|
|
||||||
if [ `grep -c conda_sd_env_created ../scripts/install_status.txt` -gt "0" ]; then
|
# activate the old stable-diffusion env, if it exists
|
||||||
echo "Packages necessary for Stable Diffusion were already installed"
|
if [ -e "env" ]; then
|
||||||
|
|
||||||
conda activate ./env || fail "conda activate failed"
|
conda activate ./env || fail "conda activate failed"
|
||||||
else
|
|
||||||
printf "\n\nDownloading packages necessary for Stable Diffusion..\n"
|
|
||||||
printf "\n\n***** This will take some time (depending on the speed of the Internet connection) and may appear to be stuck, but please be patient ***** ..\n\n"
|
|
||||||
|
|
||||||
# prevent conda from using packages from the user's home directory, to avoid conflicts
|
|
||||||
export PYTHONNOUSERSITE=1
|
|
||||||
export PYTHONPATH="$(pwd):$(pwd)/env/lib/site-packages"
|
|
||||||
|
|
||||||
if conda env create --prefix env --force -f environment.yaml ; then
|
|
||||||
echo "Installed. Testing.."
|
|
||||||
else
|
|
||||||
fail "'conda env create' failed"
|
|
||||||
fi
|
|
||||||
|
|
||||||
conda activate ./env || fail "conda activate failed"
|
|
||||||
|
|
||||||
if conda install -c conda-forge --prefix ./env -y antlr4-python3-runtime=4.8 ; then
|
|
||||||
echo "Installed. Testing.."
|
|
||||||
else
|
|
||||||
fail "Error installing antlr4-python3-runtime"
|
|
||||||
fi
|
|
||||||
|
|
||||||
out_test=`python -c "import torch; import ldm; import transformers; import numpy; import antlr4; print(42)"`
|
|
||||||
if [ "$out_test" != "42" ]; then
|
|
||||||
fail "Dependency test failed"
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo conda_sd_env_created >> ../scripts/install_status.txt
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ `grep -c conda_sd_gfpgan_deps_installed ../scripts/install_status.txt` -gt "0" ]; then
|
# disable the legacy src and ldm folder (otherwise this prevents installing gfpgan and realesrgan)
|
||||||
echo "Packages necessary for GFPGAN (Face Correction) were already installed"
|
if [ -e "src" ]; then mv src src-old; fi
|
||||||
|
if [ -e "ldm" ]; then mv ldm ldm-old; fi
|
||||||
|
|
||||||
|
mkdir -p "../models/stable-diffusion"
|
||||||
|
mkdir -p "../models/gfpgan"
|
||||||
|
mkdir -p "../models/realesrgan"
|
||||||
|
mkdir -p "../models/vae"
|
||||||
|
|
||||||
|
# migrate the legacy models to the correct path (if already downloaded)
|
||||||
|
if [ -e "sd-v1-4.ckpt" ]; then mv sd-v1-4.ckpt ../models/stable-diffusion/; fi
|
||||||
|
if [ -e "custom-model.ckpt" ]; then mv custom-model.ckpt ../models/stable-diffusion/; fi
|
||||||
|
if [ -e "GFPGANv1.3.pth" ]; then mv GFPGANv1.3.pth ../models/gfpgan/; fi
|
||||||
|
if [ -e "RealESRGAN_x4plus.pth" ]; then mv RealESRGAN_x4plus.pth ../models/realesrgan/; fi
|
||||||
|
if [ -e "RealESRGAN_x4plus_anime_6B.pth" ]; then mv RealESRGAN_x4plus_anime_6B.pth ../models/realesrgan/; fi
|
||||||
|
|
||||||
|
# install torch and torchvision
|
||||||
|
if python ../scripts/check_modules.py torch torchvision; then
|
||||||
|
echo "torch and torchvision have already been installed."
|
||||||
else
|
else
|
||||||
printf "\n\nDownloading packages necessary for GFPGAN (Face Correction)..\n"
|
echo "Installing torch and torchvision.."
|
||||||
|
|
||||||
export PYTHONNOUSERSITE=1
|
export PYTHONNOUSERSITE=1
|
||||||
export PYTHONPATH="$(pwd):$(pwd)/env/lib/site-packages"
|
export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages"
|
||||||
|
|
||||||
if pip install -e git+https://github.com/TencentARC/GFPGAN#egg=GFPGAN ; then
|
if python -m pip install --upgrade torch torchvision --extra-index-url https://download.pytorch.org/whl/cu116 ; then
|
||||||
echo "Installed. Testing.."
|
echo "Installed."
|
||||||
else
|
else
|
||||||
fail "Error installing the packages necessary for GFPGAN (Face Correction)."
|
fail "torch install failed"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
out_test=`python -c "from gfpgan import GFPGANer; print(42)"`
|
|
||||||
if [ "$out_test" != "42" ]; then
|
|
||||||
echo "EE The dependency check has failed. This usually means that some system libraries are missing."
|
|
||||||
echo "EE On Debian/Ubuntu systems, this are often these packages: libsm6 libxext6 libxrender-dev"
|
|
||||||
echo "EE Other Linux distributions might have different package names for these libraries."
|
|
||||||
fail "GFPGAN dependency test failed"
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo conda_sd_gfpgan_deps_installed >> ../scripts/install_status.txt
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ `grep -c conda_sd_esrgan_deps_installed ../scripts/install_status.txt` -gt "0" ]; then
|
# install/upgrade sdkit
|
||||||
echo "Packages necessary for ESRGAN (Resolution Upscaling) were already installed"
|
if python ../scripts/check_modules.py sdkit sdkit.models ldm transformers numpy antlr4 gfpgan realesrgan ; then
|
||||||
|
echo "sdkit is already installed."
|
||||||
|
|
||||||
|
# skip sdkit upgrade if in developer-mode
|
||||||
|
if [ ! -e "../src/sdkit" ]; then
|
||||||
|
export PYTHONNOUSERSITE=1
|
||||||
|
export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages"
|
||||||
|
|
||||||
|
python -m pip install --upgrade sdkit==1.0.43 -q
|
||||||
|
fi
|
||||||
else
|
else
|
||||||
printf "\n\nDownloading packages necessary for ESRGAN (Resolution Upscaling)..\n"
|
echo "Installing sdkit: https://pypi.org/project/sdkit/"
|
||||||
|
|
||||||
export PYTHONNOUSERSITE=1
|
export PYTHONNOUSERSITE=1
|
||||||
export PYTHONPATH="$(pwd):$(pwd)/env/lib/site-packages"
|
export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages"
|
||||||
|
|
||||||
if pip install -e git+https://github.com/xinntao/Real-ESRGAN#egg=realesrgan ; then
|
if python -m pip install sdkit==1.0.43 ; then
|
||||||
echo "Installed. Testing.."
|
echo "Installed."
|
||||||
else
|
else
|
||||||
fail "Error installing the packages necessary for ESRGAN"
|
fail "sdkit install failed"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
out_test=`python -c "from basicsr.archs.rrdbnet_arch import RRDBNet; from realesrgan import RealESRGANer; print(42)"`
|
|
||||||
if [ "$out_test" != "42" ]; then
|
|
||||||
fail "ESRGAN dependency test failed"
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo conda_sd_esrgan_deps_installed >> ../scripts/install_status.txt
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ `grep -c conda_sd_ui_deps_installed ../scripts/install_status.txt` -gt "0" ]; then
|
python -c "from importlib.metadata import version; print('sdkit version:', version('sdkit'))"
|
||||||
echo "Packages necessary for Stable Diffusion UI were already installed"
|
|
||||||
|
# upgrade stable-diffusion-sdkit
|
||||||
|
python -m pip install --upgrade stable-diffusion-sdkit==2.1.3 -q
|
||||||
|
python -c "from importlib.metadata import version; print('stable-diffusion version:', version('stable-diffusion-sdkit'))"
|
||||||
|
|
||||||
|
# install rich
|
||||||
|
if python ../scripts/check_modules.py rich; then
|
||||||
|
echo "rich has already been installed."
|
||||||
else
|
else
|
||||||
printf "\n\nDownloading packages necessary for Stable Diffusion UI..\n\n"
|
echo "Installing rich.."
|
||||||
|
|
||||||
export PYTHONNOUSERSITE=1
|
export PYTHONNOUSERSITE=1
|
||||||
export PYTHONPATH="$(pwd):$(pwd)/env/lib/site-packages"
|
export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages"
|
||||||
|
|
||||||
if conda install -c conda-forge --prefix ./env -y uvicorn fastapi ; then
|
if python -m pip install rich ; then
|
||||||
|
echo "Installed."
|
||||||
|
else
|
||||||
|
fail "Install failed for rich"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if python ../scripts/check_modules.py uvicorn fastapi ; then
|
||||||
|
echo "Packages necessary for Easy Diffusion were already installed"
|
||||||
|
else
|
||||||
|
printf "\n\nDownloading packages necessary for Easy Diffusion..\n\n"
|
||||||
|
|
||||||
|
export PYTHONNOUSERSITE=1
|
||||||
|
export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages"
|
||||||
|
|
||||||
|
if conda install -c conda-forge -y uvicorn fastapi ; then
|
||||||
echo "Installed. Testing.."
|
echo "Installed. Testing.."
|
||||||
else
|
else
|
||||||
fail "'conda install uvicorn' failed"
|
fail "'conda install uvicorn' failed"
|
||||||
@@ -152,35 +135,26 @@ else
|
|||||||
if ! command -v uvicorn &> /dev/null; then
|
if ! command -v uvicorn &> /dev/null; then
|
||||||
fail "UI packages not found!"
|
fail "UI packages not found!"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo conda_sd_ui_deps_installed >> ../scripts/install_status.txt
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [ -f "../models/stable-diffusion/sd-v1-4.ckpt" ]; then
|
||||||
|
model_size=`filesize "../models/stable-diffusion/sd-v1-4.ckpt"`
|
||||||
mkdir -p "../models/stable-diffusion"
|
|
||||||
mkdir -p "../models/vae"
|
|
||||||
echo "" > "../models/stable-diffusion/Put your custom ckpt files here.txt"
|
|
||||||
echo "" > "../models/vae/Put your VAE files here.txt"
|
|
||||||
|
|
||||||
if [ -f "sd-v1-4.ckpt" ]; then
|
|
||||||
model_size=`find "sd-v1-4.ckpt" -printf "%s"`
|
|
||||||
|
|
||||||
if [ "$model_size" -eq "4265380512" ] || [ "$model_size" -eq "7703807346" ] || [ "$model_size" -eq "7703810927" ]; then
|
if [ "$model_size" -eq "4265380512" ] || [ "$model_size" -eq "7703807346" ] || [ "$model_size" -eq "7703810927" ]; then
|
||||||
echo "Data files (weights) necessary for Stable Diffusion were already downloaded"
|
echo "Data files (weights) necessary for Stable Diffusion were already downloaded"
|
||||||
else
|
else
|
||||||
printf "\n\nThe model file present at $PWD/sd-v1-4.ckpt is invalid. It is only $model_size bytes in size. Re-downloading.."
|
printf "\n\nThe model file present at models/stable-diffusion/sd-v1-4.ckpt is invalid. It is only $model_size bytes in size. Re-downloading.."
|
||||||
rm sd-v1-4.ckpt
|
rm ../models/stable-diffusion/sd-v1-4.ckpt
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ ! -f "sd-v1-4.ckpt" ]; then
|
if [ ! -f "../models/stable-diffusion/sd-v1-4.ckpt" ]; then
|
||||||
echo "Downloading data files (weights) for Stable Diffusion.."
|
echo "Downloading data files (weights) for Stable Diffusion.."
|
||||||
|
|
||||||
curl -L -k https://me.cmdr2.org/stable-diffusion-ui/sd-v1-4.ckpt > sd-v1-4.ckpt
|
curl -L -k https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt > ../models/stable-diffusion/sd-v1-4.ckpt
|
||||||
|
|
||||||
if [ -f "sd-v1-4.ckpt" ]; then
|
if [ -f "../models/stable-diffusion/sd-v1-4.ckpt" ]; then
|
||||||
model_size=`find "sd-v1-4.ckpt" -printf "%s"`
|
model_size=`filesize "../models/stable-diffusion/sd-v1-4.ckpt"`
|
||||||
if [ ! "$model_size" == "4265380512" ]; then
|
if [ ! "$model_size" == "4265380512" ]; then
|
||||||
fail "The downloaded model file was invalid! Bytes downloaded: $model_size"
|
fail "The downloaded model file was invalid! Bytes downloaded: $model_size"
|
||||||
fi
|
fi
|
||||||
@@ -190,24 +164,24 @@ if [ ! -f "sd-v1-4.ckpt" ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
if [ -f "GFPGANv1.3.pth" ]; then
|
if [ -f "../models/gfpgan/GFPGANv1.3.pth" ]; then
|
||||||
model_size=`find "GFPGANv1.3.pth" -printf "%s"`
|
model_size=`filesize "../models/gfpgan/GFPGANv1.3.pth"`
|
||||||
|
|
||||||
if [ "$model_size" -eq "348632874" ]; then
|
if [ "$model_size" -eq "348632874" ]; then
|
||||||
echo "Data files (weights) necessary for GFPGAN (Face Correction) were already downloaded"
|
echo "Data files (weights) necessary for GFPGAN (Face Correction) were already downloaded"
|
||||||
else
|
else
|
||||||
printf "\n\nThe model file present at $PWD/GFPGANv1.3.pth is invalid. It is only $model_size bytes in size. Re-downloading.."
|
printf "\n\nThe model file present at models/gfpgan/GFPGANv1.3.pth is invalid. It is only $model_size bytes in size. Re-downloading.."
|
||||||
rm GFPGANv1.3.pth
|
rm ../models/gfpgan/GFPGANv1.3.pth
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ ! -f "GFPGANv1.3.pth" ]; then
|
if [ ! -f "../models/gfpgan/GFPGANv1.3.pth" ]; then
|
||||||
echo "Downloading data files (weights) for GFPGAN (Face Correction).."
|
echo "Downloading data files (weights) for GFPGAN (Face Correction).."
|
||||||
|
|
||||||
curl -L -k https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth > GFPGANv1.3.pth
|
curl -L -k https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth > ../models/gfpgan/GFPGANv1.3.pth
|
||||||
|
|
||||||
if [ -f "GFPGANv1.3.pth" ]; then
|
if [ -f "../models/gfpgan/GFPGANv1.3.pth" ]; then
|
||||||
model_size=`find "GFPGANv1.3.pth" -printf "%s"`
|
model_size=`filesize "../models/gfpgan/GFPGANv1.3.pth"`
|
||||||
if [ ! "$model_size" -eq "348632874" ]; then
|
if [ ! "$model_size" -eq "348632874" ]; then
|
||||||
fail "The downloaded GFPGAN model file was invalid! Bytes downloaded: $model_size"
|
fail "The downloaded GFPGAN model file was invalid! Bytes downloaded: $model_size"
|
||||||
fi
|
fi
|
||||||
@@ -217,24 +191,24 @@ if [ ! -f "GFPGANv1.3.pth" ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
if [ -f "RealESRGAN_x4plus.pth" ]; then
|
if [ -f "../models/realesrgan/RealESRGAN_x4plus.pth" ]; then
|
||||||
model_size=`find "RealESRGAN_x4plus.pth" -printf "%s"`
|
model_size=`filesize "../models/realesrgan/RealESRGAN_x4plus.pth"`
|
||||||
|
|
||||||
if [ "$model_size" -eq "67040989" ]; then
|
if [ "$model_size" -eq "67040989" ]; then
|
||||||
echo "Data files (weights) necessary for ESRGAN (Resolution Upscaling) x4plus were already downloaded"
|
echo "Data files (weights) necessary for ESRGAN (Resolution Upscaling) x4plus were already downloaded"
|
||||||
else
|
else
|
||||||
printf "\n\nThe model file present at $PWD/RealESRGAN_x4plus.pth is invalid. It is only $model_size bytes in size. Re-downloading.."
|
printf "\n\nThe model file present at models/realesrgan/RealESRGAN_x4plus.pth is invalid. It is only $model_size bytes in size. Re-downloading.."
|
||||||
rm RealESRGAN_x4plus.pth
|
rm ../models/realesrgan/RealESRGAN_x4plus.pth
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ ! -f "RealESRGAN_x4plus.pth" ]; then
|
if [ ! -f "../models/realesrgan/RealESRGAN_x4plus.pth" ]; then
|
||||||
echo "Downloading data files (weights) for ESRGAN (Resolution Upscaling) x4plus.."
|
echo "Downloading data files (weights) for ESRGAN (Resolution Upscaling) x4plus.."
|
||||||
|
|
||||||
curl -L -k https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth > RealESRGAN_x4plus.pth
|
curl -L -k https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth > ../models/realesrgan/RealESRGAN_x4plus.pth
|
||||||
|
|
||||||
if [ -f "RealESRGAN_x4plus.pth" ]; then
|
if [ -f "../models/realesrgan/RealESRGAN_x4plus.pth" ]; then
|
||||||
model_size=`find "RealESRGAN_x4plus.pth" -printf "%s"`
|
model_size=`filesize "../models/realesrgan/RealESRGAN_x4plus.pth"`
|
||||||
if [ ! "$model_size" -eq "67040989" ]; then
|
if [ ! "$model_size" -eq "67040989" ]; then
|
||||||
fail "The downloaded ESRGAN x4plus model file was invalid! Bytes downloaded: $model_size"
|
fail "The downloaded ESRGAN x4plus model file was invalid! Bytes downloaded: $model_size"
|
||||||
fi
|
fi
|
||||||
@@ -244,24 +218,24 @@ if [ ! -f "RealESRGAN_x4plus.pth" ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
if [ -f "RealESRGAN_x4plus_anime_6B.pth" ]; then
|
if [ -f "../models/realesrgan/RealESRGAN_x4plus_anime_6B.pth" ]; then
|
||||||
model_size=`find "RealESRGAN_x4plus_anime_6B.pth" -printf "%s"`
|
model_size=`filesize "../models/realesrgan/RealESRGAN_x4plus_anime_6B.pth"`
|
||||||
|
|
||||||
if [ "$model_size" -eq "17938799" ]; then
|
if [ "$model_size" -eq "17938799" ]; then
|
||||||
echo "Data files (weights) necessary for ESRGAN (Resolution Upscaling) x4plus_anime were already downloaded"
|
echo "Data files (weights) necessary for ESRGAN (Resolution Upscaling) x4plus_anime were already downloaded"
|
||||||
else
|
else
|
||||||
printf "\n\nThe model file present at $PWD/RealESRGAN_x4plus_anime_6B.pth is invalid. It is only $model_size bytes in size. Re-downloading.."
|
printf "\n\nThe model file present at models/realesrgan/RealESRGAN_x4plus_anime_6B.pth is invalid. It is only $model_size bytes in size. Re-downloading.."
|
||||||
rm RealESRGAN_x4plus_anime_6B.pth
|
rm ../models/realesrgan/RealESRGAN_x4plus_anime_6B.pth
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ ! -f "RealESRGAN_x4plus_anime_6B.pth" ]; then
|
if [ ! -f "../models/realesrgan/RealESRGAN_x4plus_anime_6B.pth" ]; then
|
||||||
echo "Downloading data files (weights) for ESRGAN (Resolution Upscaling) x4plus_anime.."
|
echo "Downloading data files (weights) for ESRGAN (Resolution Upscaling) x4plus_anime.."
|
||||||
|
|
||||||
curl -L -k https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth > RealESRGAN_x4plus_anime_6B.pth
|
curl -L -k https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth > ../models/realesrgan/RealESRGAN_x4plus_anime_6B.pth
|
||||||
|
|
||||||
if [ -f "RealESRGAN_x4plus_anime_6B.pth" ]; then
|
if [ -f "../models/realesrgan/RealESRGAN_x4plus_anime_6B.pth" ]; then
|
||||||
model_size=`find "RealESRGAN_x4plus_anime_6B.pth" -printf "%s"`
|
model_size=`filesize "../models/realesrgan/RealESRGAN_x4plus_anime_6B.pth"`
|
||||||
if [ ! "$model_size" -eq "17938799" ]; then
|
if [ ! "$model_size" -eq "17938799" ]; then
|
||||||
fail "The downloaded ESRGAN x4plus_anime model file was invalid! Bytes downloaded: $model_size"
|
fail "The downloaded ESRGAN x4plus_anime model file was invalid! Bytes downloaded: $model_size"
|
||||||
fi
|
fi
|
||||||
@@ -272,7 +246,7 @@ fi
|
|||||||
|
|
||||||
|
|
||||||
if [ -f "../models/vae/vae-ft-mse-840000-ema-pruned.ckpt" ]; then
|
if [ -f "../models/vae/vae-ft-mse-840000-ema-pruned.ckpt" ]; then
|
||||||
model_size=`find ../models/vae/vae-ft-mse-840000-ema-pruned.ckpt -printf "%s"`
|
model_size=`filesize "../models/vae/vae-ft-mse-840000-ema-pruned.ckpt"`
|
||||||
|
|
||||||
if [ "$model_size" -eq "334695179" ]; then
|
if [ "$model_size" -eq "334695179" ]; then
|
||||||
echo "Data files (weights) necessary for the default VAE (sd-vae-ft-mse-original) were already downloaded"
|
echo "Data files (weights) necessary for the default VAE (sd-vae-ft-mse-original) were already downloaded"
|
||||||
@@ -288,7 +262,7 @@ if [ ! -f "../models/vae/vae-ft-mse-840000-ema-pruned.ckpt" ]; then
|
|||||||
curl -L -k https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.ckpt > ../models/vae/vae-ft-mse-840000-ema-pruned.ckpt
|
curl -L -k https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.ckpt > ../models/vae/vae-ft-mse-840000-ema-pruned.ckpt
|
||||||
|
|
||||||
if [ -f "../models/vae/vae-ft-mse-840000-ema-pruned.ckpt" ]; then
|
if [ -f "../models/vae/vae-ft-mse-840000-ema-pruned.ckpt" ]; then
|
||||||
model_size=`find ../models/vae/vae-ft-mse-840000-ema-pruned.ckpt -printf "%s"`
|
model_size=`filesize "../models/vae/vae-ft-mse-840000-ema-pruned.ckpt"`
|
||||||
if [ ! "$model_size" -eq "334695179" ]; then
|
if [ ! "$model_size" -eq "334695179" ]; then
|
||||||
printf "\n\nError: The downloaded default VAE (sd-vae-ft-mse-original) file was invalid! Bytes downloaded: $model_size\n\n"
|
printf "\n\nError: The downloaded default VAE (sd-vae-ft-mse-original) file was invalid! Bytes downloaded: $model_size\n\n"
|
||||||
printf "\n\nError downloading the data files (weights) for the default VAE (sd-vae-ft-mse-original). Sorry about that, please try to:\n 1. Run this installer again.\n 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting\n 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB\n 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues\nThanks!\n\n"
|
printf "\n\nError downloading the data files (weights) for the default VAE (sd-vae-ft-mse-original). Sorry about that, please try to:\n 1. Run this installer again.\n 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting\n 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB\n 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues\nThanks!\n\n"
|
||||||
@@ -302,16 +276,16 @@ if [ ! -f "../models/vae/vae-ft-mse-840000-ema-pruned.ckpt" ]; then
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
if [ `grep -c sd_install_complete ../scripts/install_status.txt` -gt "0" ]; then
|
if [ `grep -c sd_install_complete ../scripts/install_status.txt` -gt "0" ]; then
|
||||||
echo sd_weights_downloaded >> ../scripts/install_status.txt
|
echo sd_weights_downloaded >> ../scripts/install_status.txt
|
||||||
echo sd_install_complete >> ../scripts/install_status.txt
|
echo sd_install_complete >> ../scripts/install_status.txt
|
||||||
fi
|
fi
|
||||||
|
|
||||||
printf "\n\nStable Diffusion is ready!\n\n"
|
printf "\n\nEasy Diffusion installation complete, starting the server!\n\n"
|
||||||
|
|
||||||
SD_PATH=`pwd`
|
SD_PATH=`pwd`
|
||||||
export PYTHONPATH="$SD_PATH:$SD_PATH/env/lib/python3.8/site-packages"
|
|
||||||
|
export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages"
|
||||||
echo "PYTHONPATH=$PYTHONPATH"
|
echo "PYTHONPATH=$PYTHONPATH"
|
||||||
|
|
||||||
which python
|
which python
|
||||||
@@ -321,6 +295,6 @@ cd ..
|
|||||||
export SD_UI_PATH=`pwd`/ui
|
export SD_UI_PATH=`pwd`/ui
|
||||||
cd stable-diffusion
|
cd stable-diffusion
|
||||||
|
|
||||||
uvicorn server:app --app-dir "$SD_UI_PATH" --port ${SD_UI_BIND_PORT:-9000} --host ${SD_UI_BIND_IP:-0.0.0.0}
|
uvicorn main:server_api --app-dir "$SD_UI_PATH" --port ${SD_UI_BIND_PORT:-9000} --host ${SD_UI_BIND_IP:-0.0.0.0} --log-level error
|
||||||
|
|
||||||
read -p "Press any key to continue"
|
read -p "Press any key to continue"
|
||||||
|
|||||||
@@ -1,6 +0,0 @@
|
|||||||
@call conda --version
|
|
||||||
@call git --version
|
|
||||||
|
|
||||||
cd %CONDA_PREFIX%\..\scripts
|
|
||||||
|
|
||||||
on_env_start.bat
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
conda-unpack
|
|
||||||
|
|
||||||
source $CONDA_PREFIX/etc/profile.d/conda.sh
|
|
||||||
|
|
||||||
conda --version
|
|
||||||
git --version
|
|
||||||
|
|
||||||
cd $CONDA_PREFIX/../scripts
|
|
||||||
|
|
||||||
./on_env_start.sh
|
|
||||||
@@ -2,6 +2,24 @@
|
|||||||
|
|
||||||
cd "$(dirname "${BASH_SOURCE[0]}")"
|
cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||||
|
|
||||||
|
if [ -f "on_sd_start.bat" ]; then
|
||||||
|
echo ================================================================================
|
||||||
|
echo
|
||||||
|
echo !!!! WARNING !!!!
|
||||||
|
echo
|
||||||
|
echo It looks like you\'re trying to run the installation script from a source code
|
||||||
|
echo download. This will not work.
|
||||||
|
echo
|
||||||
|
echo Recommended: Please close this window and download the installer from
|
||||||
|
echo https://stable-diffusion-ui.github.io/docs/installation/
|
||||||
|
echo
|
||||||
|
echo ================================================================================
|
||||||
|
echo
|
||||||
|
read
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
# set legacy installer's PATH, if it exists
|
# set legacy installer's PATH, if it exists
|
||||||
if [ -e "installer" ]; then export PATH="$(pwd)/installer/bin:$PATH"; fi
|
if [ -e "installer" ]; then export PATH="$(pwd)/installer/bin:$PATH"; fi
|
||||||
|
|
||||||
@@ -19,4 +37,5 @@ which conda
|
|||||||
conda --version || exit 1
|
conda --version || exit 1
|
||||||
|
|
||||||
# Download the rest of the installer and UI
|
# Download the rest of the installer and UI
|
||||||
|
chmod +x scripts/*.sh
|
||||||
scripts/on_env_start.sh
|
scripts/on_env_start.sh
|
||||||
|
|||||||
0
ui/easydiffusion/__init__.py
Normal file
236
ui/easydiffusion/app.py
Normal file
@@ -0,0 +1,236 @@
|
|||||||
|
import os
|
||||||
|
import socket
|
||||||
|
import sys
|
||||||
|
import json
|
||||||
|
import traceback
|
||||||
|
import logging
|
||||||
|
import shlex
|
||||||
|
from rich.logging import RichHandler
|
||||||
|
|
||||||
|
from sdkit.utils import log as sdkit_log # hack, so we can overwrite the log config
|
||||||
|
|
||||||
|
from easydiffusion import task_manager
|
||||||
|
from easydiffusion.utils import log
|
||||||
|
|
||||||
|
# Remove all handlers associated with the root logger object.
|
||||||
|
for handler in logging.root.handlers[:]:
|
||||||
|
logging.root.removeHandler(handler)
|
||||||
|
|
||||||
|
LOG_FORMAT = "%(asctime)s.%(msecs)03d %(levelname)s %(threadName)s %(message)s"
|
||||||
|
logging.basicConfig(
|
||||||
|
level=logging.INFO,
|
||||||
|
format=LOG_FORMAT,
|
||||||
|
datefmt="%X",
|
||||||
|
handlers=[RichHandler(markup=True, rich_tracebacks=False, show_time=False, show_level=False)],
|
||||||
|
)
|
||||||
|
|
||||||
|
SD_DIR = os.getcwd()
|
||||||
|
|
||||||
|
SD_UI_DIR = os.getenv("SD_UI_PATH", None)
|
||||||
|
|
||||||
|
CONFIG_DIR = os.path.abspath(os.path.join(SD_UI_DIR, "..", "scripts"))
|
||||||
|
MODELS_DIR = os.path.abspath(os.path.join(SD_DIR, "..", "models"))
|
||||||
|
|
||||||
|
USER_PLUGINS_DIR = os.path.abspath(os.path.join(SD_DIR, "..", "plugins"))
|
||||||
|
CORE_PLUGINS_DIR = os.path.abspath(os.path.join(SD_UI_DIR, "plugins"))
|
||||||
|
|
||||||
|
USER_UI_PLUGINS_DIR = os.path.join(USER_PLUGINS_DIR, "ui")
|
||||||
|
CORE_UI_PLUGINS_DIR = os.path.join(CORE_PLUGINS_DIR, "ui")
|
||||||
|
USER_SERVER_PLUGINS_DIR = os.path.join(USER_PLUGINS_DIR, "server")
|
||||||
|
UI_PLUGINS_SOURCES = ((CORE_UI_PLUGINS_DIR, "core"), (USER_UI_PLUGINS_DIR, "user"))
|
||||||
|
|
||||||
|
sys.path.append(os.path.dirname(SD_UI_DIR))
|
||||||
|
sys.path.append(USER_SERVER_PLUGINS_DIR)
|
||||||
|
|
||||||
|
OUTPUT_DIRNAME = "Stable Diffusion UI" # in the user's home folder
|
||||||
|
PRESERVE_CONFIG_VARS = ["FORCE_FULL_PRECISION"]
|
||||||
|
TASK_TTL = 15 * 60 # Discard last session's task timeout
|
||||||
|
APP_CONFIG_DEFAULTS = {
|
||||||
|
# auto: selects the cuda device with the most free memory, cuda: use the currently active cuda device.
|
||||||
|
"render_devices": "auto", # valid entries: 'auto', 'cpu' or 'cuda:N' (where N is a GPU index)
|
||||||
|
"update_branch": "main",
|
||||||
|
"ui": {
|
||||||
|
"open_browser_on_start": True,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def init():
|
||||||
|
os.makedirs(USER_UI_PLUGINS_DIR, exist_ok=True)
|
||||||
|
os.makedirs(USER_SERVER_PLUGINS_DIR, exist_ok=True)
|
||||||
|
|
||||||
|
load_server_plugins()
|
||||||
|
|
||||||
|
update_render_threads()
|
||||||
|
|
||||||
|
|
||||||
|
def getConfig(default_val=APP_CONFIG_DEFAULTS):
|
||||||
|
try:
|
||||||
|
config_json_path = os.path.join(CONFIG_DIR, "config.json")
|
||||||
|
if not os.path.exists(config_json_path):
|
||||||
|
config = default_val
|
||||||
|
else:
|
||||||
|
with open(config_json_path, "r", encoding="utf-8") as f:
|
||||||
|
config = json.load(f)
|
||||||
|
if "net" not in config:
|
||||||
|
config["net"] = {}
|
||||||
|
if os.getenv("SD_UI_BIND_PORT") is not None:
|
||||||
|
config["net"]["listen_port"] = int(os.getenv("SD_UI_BIND_PORT"))
|
||||||
|
else:
|
||||||
|
config["net"]["listen_port"] = 9000
|
||||||
|
if os.getenv("SD_UI_BIND_IP") is not None:
|
||||||
|
config["net"]["listen_to_network"] = os.getenv("SD_UI_BIND_IP") == "0.0.0.0"
|
||||||
|
else:
|
||||||
|
config["net"]["listen_to_network"] = True
|
||||||
|
return config
|
||||||
|
except Exception as e:
|
||||||
|
log.warn(traceback.format_exc())
|
||||||
|
return default_val
|
||||||
|
|
||||||
|
|
||||||
|
def setConfig(config):
|
||||||
|
try: # config.json
|
||||||
|
config_json_path = os.path.join(CONFIG_DIR, "config.json")
|
||||||
|
with open(config_json_path, "w", encoding="utf-8") as f:
|
||||||
|
json.dump(config, f)
|
||||||
|
except:
|
||||||
|
log.error(traceback.format_exc())
|
||||||
|
|
||||||
|
try: # config.bat
|
||||||
|
config_bat_path = os.path.join(CONFIG_DIR, "config.bat")
|
||||||
|
config_bat = []
|
||||||
|
|
||||||
|
if "update_branch" in config:
|
||||||
|
config_bat.append(f"@set update_branch={config['update_branch']}")
|
||||||
|
|
||||||
|
config_bat.append(f"@set SD_UI_BIND_PORT={config['net']['listen_port']}")
|
||||||
|
bind_ip = "0.0.0.0" if config["net"]["listen_to_network"] else "127.0.0.1"
|
||||||
|
config_bat.append(f"@set SD_UI_BIND_IP={bind_ip}")
|
||||||
|
|
||||||
|
# Preserve these variables if they are set
|
||||||
|
for var in PRESERVE_CONFIG_VARS:
|
||||||
|
if os.getenv(var) is not None:
|
||||||
|
config_bat.append(f"@set {var}={os.getenv(var)}")
|
||||||
|
|
||||||
|
if len(config_bat) > 0:
|
||||||
|
with open(config_bat_path, "w", encoding="utf-8") as f:
|
||||||
|
f.write("\n".join(config_bat))
|
||||||
|
except:
|
||||||
|
log.error(traceback.format_exc())
|
||||||
|
|
||||||
|
try: # config.sh
|
||||||
|
config_sh_path = os.path.join(CONFIG_DIR, "config.sh")
|
||||||
|
config_sh = ["#!/bin/bash"]
|
||||||
|
|
||||||
|
if "update_branch" in config:
|
||||||
|
config_sh.append(f"export update_branch={config['update_branch']}")
|
||||||
|
|
||||||
|
config_sh.append(f"export SD_UI_BIND_PORT={config['net']['listen_port']}")
|
||||||
|
bind_ip = "0.0.0.0" if config["net"]["listen_to_network"] else "127.0.0.1"
|
||||||
|
config_sh.append(f"export SD_UI_BIND_IP={bind_ip}")
|
||||||
|
|
||||||
|
# Preserve these variables if they are set
|
||||||
|
for var in PRESERVE_CONFIG_VARS:
|
||||||
|
if os.getenv(var) is not None:
|
||||||
|
config_bat.append(f'export {var}="{shlex.quote(os.getenv(var))}"')
|
||||||
|
|
||||||
|
if len(config_sh) > 1:
|
||||||
|
with open(config_sh_path, "w", encoding="utf-8") as f:
|
||||||
|
f.write("\n".join(config_sh))
|
||||||
|
except:
|
||||||
|
log.error(traceback.format_exc())
|
||||||
|
|
||||||
|
|
||||||
|
def save_to_config(ckpt_model_name, vae_model_name, hypernetwork_model_name, vram_usage_level):
|
||||||
|
config = getConfig()
|
||||||
|
if "model" not in config:
|
||||||
|
config["model"] = {}
|
||||||
|
|
||||||
|
config["model"]["stable-diffusion"] = ckpt_model_name
|
||||||
|
config["model"]["vae"] = vae_model_name
|
||||||
|
config["model"]["hypernetwork"] = hypernetwork_model_name
|
||||||
|
|
||||||
|
if vae_model_name is None or vae_model_name == "":
|
||||||
|
del config["model"]["vae"]
|
||||||
|
if hypernetwork_model_name is None or hypernetwork_model_name == "":
|
||||||
|
del config["model"]["hypernetwork"]
|
||||||
|
|
||||||
|
config["vram_usage_level"] = vram_usage_level
|
||||||
|
|
||||||
|
setConfig(config)
|
||||||
|
|
||||||
|
|
||||||
|
def update_render_threads():
|
||||||
|
config = getConfig()
|
||||||
|
render_devices = config.get("render_devices", "auto")
|
||||||
|
active_devices = task_manager.get_devices()["active"].keys()
|
||||||
|
|
||||||
|
log.debug(f"requesting for render_devices: {render_devices}")
|
||||||
|
task_manager.update_render_threads(render_devices, active_devices)
|
||||||
|
|
||||||
|
|
||||||
|
def getUIPlugins():
|
||||||
|
plugins = []
|
||||||
|
|
||||||
|
for plugins_dir, dir_prefix in UI_PLUGINS_SOURCES:
|
||||||
|
for file in os.listdir(plugins_dir):
|
||||||
|
if file.endswith(".plugin.js"):
|
||||||
|
plugins.append(f"/plugins/{dir_prefix}/{file}")
|
||||||
|
|
||||||
|
return plugins
|
||||||
|
|
||||||
|
|
||||||
|
def load_server_plugins():
|
||||||
|
if not os.path.exists(USER_SERVER_PLUGINS_DIR):
|
||||||
|
return
|
||||||
|
|
||||||
|
import importlib
|
||||||
|
|
||||||
|
def load_plugin(file):
|
||||||
|
mod_path = file.replace(".py", "")
|
||||||
|
return importlib.import_module(mod_path)
|
||||||
|
|
||||||
|
def apply_plugin(file, plugin):
|
||||||
|
if hasattr(plugin, "get_cond_and_uncond"):
|
||||||
|
import sdkit.generate.image_generator
|
||||||
|
|
||||||
|
sdkit.generate.image_generator.get_cond_and_uncond = plugin.get_cond_and_uncond
|
||||||
|
log.info(f"Overridden get_cond_and_uncond with the one in the server plugin: {file}")
|
||||||
|
|
||||||
|
for file in os.listdir(USER_SERVER_PLUGINS_DIR):
|
||||||
|
file_path = os.path.join(USER_SERVER_PLUGINS_DIR, file)
|
||||||
|
if (not os.path.isdir(file_path) and not file_path.endswith("_plugin.py")) or (
|
||||||
|
os.path.isdir(file_path) and not file_path.endswith("_plugin")
|
||||||
|
):
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
log.info(f"Loading server plugin: {file}")
|
||||||
|
mod = load_plugin(file)
|
||||||
|
|
||||||
|
log.info(f"Applying server plugin: {file}")
|
||||||
|
apply_plugin(file, mod)
|
||||||
|
except:
|
||||||
|
log.warn(f"Error while loading a server plugin")
|
||||||
|
log.warn(traceback.format_exc())
|
||||||
|
|
||||||
|
|
||||||
|
def getIPConfig():
|
||||||
|
try:
|
||||||
|
ips = socket.gethostbyname_ex(socket.gethostname())
|
||||||
|
ips[2].append(ips[0])
|
||||||
|
return ips[2]
|
||||||
|
except Exception as e:
|
||||||
|
log.exception(e)
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
def open_browser():
|
||||||
|
config = getConfig()
|
||||||
|
ui = config.get("ui", {})
|
||||||
|
net = config.get("net", {"listen_port": 9000})
|
||||||
|
port = net.get("listen_port", 9000)
|
||||||
|
if ui.get("open_browser_on_start", True):
|
||||||
|
import webbrowser
|
||||||
|
|
||||||
|
webbrowser.open(f"http://localhost:{port}")
|
||||||
236
ui/easydiffusion/device_manager.py
Normal file
@@ -0,0 +1,236 @@
|
|||||||
|
import os
|
||||||
|
import torch
|
||||||
|
import traceback
|
||||||
|
import re
|
||||||
|
|
||||||
|
from easydiffusion.utils import log
|
||||||
|
|
||||||
|
"""
|
||||||
|
Set `FORCE_FULL_PRECISION` in the environment variables, or in `config.bat`/`config.sh` to set full precision (i.e. float32).
|
||||||
|
Otherwise the models will load at half-precision (i.e. float16).
|
||||||
|
|
||||||
|
Half-precision is fine most of the time. Full precision is only needed for working around GPU bugs (like NVIDIA 16xx GPUs).
|
||||||
|
"""
|
||||||
|
|
||||||
|
COMPARABLE_GPU_PERCENTILE = (
|
||||||
|
0.65 # if a GPU's free_mem is within this % of the GPU with the most free_mem, it will be picked
|
||||||
|
)
|
||||||
|
|
||||||
|
mem_free_threshold = 0
|
||||||
|
|
||||||
|
|
||||||
|
def get_device_delta(render_devices, active_devices):
|
||||||
|
"""
|
||||||
|
render_devices: 'cpu', or 'auto' or ['cuda:N'...]
|
||||||
|
active_devices: ['cpu', 'cuda:N'...]
|
||||||
|
"""
|
||||||
|
|
||||||
|
if render_devices in ("cpu", "auto"):
|
||||||
|
render_devices = [render_devices]
|
||||||
|
elif render_devices is not None:
|
||||||
|
if isinstance(render_devices, str):
|
||||||
|
render_devices = [render_devices]
|
||||||
|
if isinstance(render_devices, list) and len(render_devices) > 0:
|
||||||
|
render_devices = list(filter(lambda x: x.startswith("cuda:"), render_devices))
|
||||||
|
if len(render_devices) == 0:
|
||||||
|
raise Exception(
|
||||||
|
'Invalid render_devices value in config.json. Valid: {"render_devices": ["cuda:0", "cuda:1"...]}, or {"render_devices": "cpu"} or {"render_devices": "auto"}'
|
||||||
|
)
|
||||||
|
|
||||||
|
render_devices = list(filter(lambda x: is_device_compatible(x), render_devices))
|
||||||
|
if len(render_devices) == 0:
|
||||||
|
raise Exception(
|
||||||
|
"Sorry, none of the render_devices configured in config.json are compatible with Stable Diffusion"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise Exception(
|
||||||
|
'Invalid render_devices value in config.json. Valid: {"render_devices": ["cuda:0", "cuda:1"...]}, or {"render_devices": "cpu"} or {"render_devices": "auto"}'
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
render_devices = ["auto"]
|
||||||
|
|
||||||
|
if "auto" in render_devices:
|
||||||
|
render_devices = auto_pick_devices(active_devices)
|
||||||
|
if "cpu" in render_devices:
|
||||||
|
log.warn("WARNING: Could not find a compatible GPU. Using the CPU, but this will be very slow!")
|
||||||
|
|
||||||
|
active_devices = set(active_devices)
|
||||||
|
render_devices = set(render_devices)
|
||||||
|
|
||||||
|
devices_to_start = render_devices - active_devices
|
||||||
|
devices_to_stop = active_devices - render_devices
|
||||||
|
|
||||||
|
return devices_to_start, devices_to_stop
|
||||||
|
|
||||||
|
|
||||||
|
def auto_pick_devices(currently_active_devices):
|
||||||
|
global mem_free_threshold
|
||||||
|
|
||||||
|
if not torch.cuda.is_available():
|
||||||
|
return ["cpu"]
|
||||||
|
|
||||||
|
device_count = torch.cuda.device_count()
|
||||||
|
if device_count == 1:
|
||||||
|
return ["cuda:0"] if is_device_compatible("cuda:0") else ["cpu"]
|
||||||
|
|
||||||
|
log.debug("Autoselecting GPU. Using most free memory.")
|
||||||
|
devices = []
|
||||||
|
for device in range(device_count):
|
||||||
|
device = f"cuda:{device}"
|
||||||
|
if not is_device_compatible(device):
|
||||||
|
continue
|
||||||
|
|
||||||
|
mem_free, mem_total = torch.cuda.mem_get_info(device)
|
||||||
|
mem_free /= float(10**9)
|
||||||
|
mem_total /= float(10**9)
|
||||||
|
device_name = torch.cuda.get_device_name(device)
|
||||||
|
log.debug(
|
||||||
|
f"{device} detected: {device_name} - Memory (free/total): {round(mem_free, 2)}Gb / {round(mem_total, 2)}Gb"
|
||||||
|
)
|
||||||
|
devices.append({"device": device, "device_name": device_name, "mem_free": mem_free})
|
||||||
|
|
||||||
|
devices.sort(key=lambda x: x["mem_free"], reverse=True)
|
||||||
|
max_mem_free = devices[0]["mem_free"]
|
||||||
|
curr_mem_free_threshold = COMPARABLE_GPU_PERCENTILE * max_mem_free
|
||||||
|
mem_free_threshold = max(curr_mem_free_threshold, mem_free_threshold)
|
||||||
|
|
||||||
|
# Auto-pick algorithm:
|
||||||
|
# 1. Pick the top 75 percentile of the GPUs, sorted by free_mem.
|
||||||
|
# 2. Also include already-running devices (GPU-only), otherwise their free_mem will
|
||||||
|
# always be very low (since their VRAM contains the model).
|
||||||
|
# These already-running devices probably aren't terrible, since they were picked in the past.
|
||||||
|
# Worst case, the user can restart the program and that'll get rid of them.
|
||||||
|
devices = list(
|
||||||
|
filter((lambda x: x["mem_free"] > mem_free_threshold or x["device"] in currently_active_devices), devices)
|
||||||
|
)
|
||||||
|
devices = list(map(lambda x: x["device"], devices))
|
||||||
|
return devices
|
||||||
|
|
||||||
|
|
||||||
|
def device_init(context, device):
|
||||||
|
"""
|
||||||
|
This function assumes the 'device' has already been verified to be compatible.
|
||||||
|
`get_device_delta()` has already filtered out incompatible devices.
|
||||||
|
"""
|
||||||
|
|
||||||
|
validate_device_id(device, log_prefix="device_init")
|
||||||
|
|
||||||
|
if device == "cpu":
|
||||||
|
context.device = "cpu"
|
||||||
|
context.device_name = get_processor_name()
|
||||||
|
context.half_precision = False
|
||||||
|
log.debug(f"Render device CPU available as {context.device_name}")
|
||||||
|
return
|
||||||
|
|
||||||
|
context.device_name = torch.cuda.get_device_name(device)
|
||||||
|
context.device = device
|
||||||
|
|
||||||
|
# Force full precision on 1660 and 1650 NVIDIA cards to avoid creating green images
|
||||||
|
if needs_to_force_full_precision(context):
|
||||||
|
log.warn(f"forcing full precision on this GPU, to avoid green images. GPU detected: {context.device_name}")
|
||||||
|
# Apply force_full_precision now before models are loaded.
|
||||||
|
context.half_precision = False
|
||||||
|
|
||||||
|
log.info(f'Setting {device} as active, with precision: {"half" if context.half_precision else "full"}')
|
||||||
|
torch.cuda.device(device)
|
||||||
|
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
def needs_to_force_full_precision(context):
|
||||||
|
if "FORCE_FULL_PRECISION" in os.environ:
|
||||||
|
return True
|
||||||
|
|
||||||
|
device_name = context.device_name.lower()
|
||||||
|
return (
|
||||||
|
("nvidia" in device_name or "geforce" in device_name or "quadro" in device_name)
|
||||||
|
and (
|
||||||
|
" 1660" in device_name
|
||||||
|
or " 1650" in device_name
|
||||||
|
or " t400" in device_name
|
||||||
|
or " t550" in device_name
|
||||||
|
or " t600" in device_name
|
||||||
|
or " t1000" in device_name
|
||||||
|
or " t1200" in device_name
|
||||||
|
or " t2000" in device_name
|
||||||
|
)
|
||||||
|
) or ("tesla k40m" in device_name)
|
||||||
|
|
||||||
|
|
||||||
|
def get_max_vram_usage_level(device):
|
||||||
|
if device != "cpu":
|
||||||
|
_, mem_total = torch.cuda.mem_get_info(device)
|
||||||
|
mem_total /= float(10**9)
|
||||||
|
|
||||||
|
if mem_total < 4.5:
|
||||||
|
return "low"
|
||||||
|
elif mem_total < 6.5:
|
||||||
|
return "balanced"
|
||||||
|
|
||||||
|
return "high"
|
||||||
|
|
||||||
|
|
||||||
|
def validate_device_id(device, log_prefix=""):
|
||||||
|
def is_valid():
|
||||||
|
if not isinstance(device, str):
|
||||||
|
return False
|
||||||
|
if device == "cpu":
|
||||||
|
return True
|
||||||
|
if not device.startswith("cuda:") or not device[5:].isnumeric():
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
if not is_valid():
|
||||||
|
raise EnvironmentError(
|
||||||
|
f"{log_prefix}: device id should be 'cpu', or 'cuda:N' (where N is an integer index for the GPU). Got: {device}"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def is_device_compatible(device):
|
||||||
|
"""
|
||||||
|
Returns True/False, and prints any compatibility errors
|
||||||
|
"""
|
||||||
|
# static variable "history".
|
||||||
|
is_device_compatible.history = getattr(is_device_compatible, "history", {})
|
||||||
|
try:
|
||||||
|
validate_device_id(device, log_prefix="is_device_compatible")
|
||||||
|
except:
|
||||||
|
log.error(str(e))
|
||||||
|
return False
|
||||||
|
|
||||||
|
if device == "cpu":
|
||||||
|
return True
|
||||||
|
# Memory check
|
||||||
|
try:
|
||||||
|
_, mem_total = torch.cuda.mem_get_info(device)
|
||||||
|
mem_total /= float(10**9)
|
||||||
|
if mem_total < 3.0:
|
||||||
|
if is_device_compatible.history.get(device) == None:
|
||||||
|
log.warn(f"GPU {device} with less than 3 GB of VRAM is not compatible with Stable Diffusion")
|
||||||
|
is_device_compatible.history[device] = 1
|
||||||
|
return False
|
||||||
|
except RuntimeError as e:
|
||||||
|
log.error(str(e))
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def get_processor_name():
|
||||||
|
try:
|
||||||
|
import platform, subprocess
|
||||||
|
|
||||||
|
if platform.system() == "Windows":
|
||||||
|
return platform.processor()
|
||||||
|
elif platform.system() == "Darwin":
|
||||||
|
os.environ["PATH"] = os.environ["PATH"] + os.pathsep + "/usr/sbin"
|
||||||
|
command = "sysctl -n machdep.cpu.brand_string"
|
||||||
|
return subprocess.check_output(command).strip()
|
||||||
|
elif platform.system() == "Linux":
|
||||||
|
command = "cat /proc/cpuinfo"
|
||||||
|
all_info = subprocess.check_output(command, shell=True).decode().strip()
|
||||||
|
for line in all_info.split("\n"):
|
||||||
|
if "model name" in line:
|
||||||
|
return re.sub(".*model name.*:", "", line, 1).strip()
|
||||||
|
except:
|
||||||
|
log.error(traceback.format_exc())
|
||||||
|
return "cpu"
|
||||||
255
ui/easydiffusion/model_manager.py
Normal file
@@ -0,0 +1,255 @@
|
|||||||
|
import os
|
||||||
|
|
||||||
|
from easydiffusion import app
|
||||||
|
from easydiffusion.types import TaskData
|
||||||
|
from easydiffusion.utils import log
|
||||||
|
|
||||||
|
from sdkit import Context
|
||||||
|
from sdkit.models import load_model, unload_model, scan_model
|
||||||
|
|
||||||
|
KNOWN_MODEL_TYPES = ["stable-diffusion", "vae", "hypernetwork", "gfpgan", "realesrgan"]
|
||||||
|
MODEL_EXTENSIONS = {
|
||||||
|
"stable-diffusion": [".ckpt", ".safetensors"],
|
||||||
|
"vae": [".vae.pt", ".ckpt", ".safetensors"],
|
||||||
|
"hypernetwork": [".pt", ".safetensors"],
|
||||||
|
"gfpgan": [".pth"],
|
||||||
|
"realesrgan": [".pth"],
|
||||||
|
}
|
||||||
|
DEFAULT_MODELS = {
|
||||||
|
"stable-diffusion": [ # needed to support the legacy installations
|
||||||
|
"custom-model", # only one custom model file was supported initially, creatively named 'custom-model'
|
||||||
|
"sd-v1-4", # Default fallback.
|
||||||
|
],
|
||||||
|
"gfpgan": ["GFPGANv1.3"],
|
||||||
|
"realesrgan": ["RealESRGAN_x4plus"],
|
||||||
|
}
|
||||||
|
MODELS_TO_LOAD_ON_START = ["stable-diffusion", "vae", "hypernetwork"]
|
||||||
|
|
||||||
|
known_models = {}
|
||||||
|
|
||||||
|
|
||||||
|
def init():
|
||||||
|
make_model_folders()
|
||||||
|
getModels() # run this once, to cache the picklescan results
|
||||||
|
|
||||||
|
|
||||||
|
def load_default_models(context: Context):
|
||||||
|
set_vram_optimizations(context)
|
||||||
|
|
||||||
|
# init default model paths
|
||||||
|
for model_type in MODELS_TO_LOAD_ON_START:
|
||||||
|
context.model_paths[model_type] = resolve_model_to_use(model_type=model_type)
|
||||||
|
try:
|
||||||
|
load_model(context, model_type)
|
||||||
|
except Exception as e:
|
||||||
|
log.error(f"[red]Error while loading {model_type} model: {context.model_paths[model_type]}[/red]")
|
||||||
|
log.error(f"[red]Error: {e}[/red]")
|
||||||
|
log.error(f"[red]Consider removing the model from the model folder.[red]")
|
||||||
|
|
||||||
|
|
||||||
|
def unload_all(context: Context):
|
||||||
|
for model_type in KNOWN_MODEL_TYPES:
|
||||||
|
unload_model(context, model_type)
|
||||||
|
|
||||||
|
|
||||||
|
def resolve_model_to_use(model_name: str = None, model_type: str = None):
|
||||||
|
model_extensions = MODEL_EXTENSIONS.get(model_type, [])
|
||||||
|
default_models = DEFAULT_MODELS.get(model_type, [])
|
||||||
|
config = app.getConfig()
|
||||||
|
|
||||||
|
model_dirs = [os.path.join(app.MODELS_DIR, model_type), app.SD_DIR]
|
||||||
|
if not model_name: # When None try user configured model.
|
||||||
|
# config = getConfig()
|
||||||
|
if "model" in config and model_type in config["model"]:
|
||||||
|
model_name = config["model"][model_type]
|
||||||
|
|
||||||
|
if model_name:
|
||||||
|
# Check models directory
|
||||||
|
models_dir_path = os.path.join(app.MODELS_DIR, model_type, model_name)
|
||||||
|
for model_extension in model_extensions:
|
||||||
|
if os.path.exists(models_dir_path + model_extension):
|
||||||
|
return models_dir_path + model_extension
|
||||||
|
if os.path.exists(model_name + model_extension):
|
||||||
|
return os.path.abspath(model_name + model_extension)
|
||||||
|
|
||||||
|
# Default locations
|
||||||
|
if model_name in default_models:
|
||||||
|
default_model_path = os.path.join(app.SD_DIR, model_name)
|
||||||
|
for model_extension in model_extensions:
|
||||||
|
if os.path.exists(default_model_path + model_extension):
|
||||||
|
return default_model_path + model_extension
|
||||||
|
|
||||||
|
# Can't find requested model, check the default paths.
|
||||||
|
for default_model in default_models:
|
||||||
|
for model_dir in model_dirs:
|
||||||
|
default_model_path = os.path.join(model_dir, default_model)
|
||||||
|
for model_extension in model_extensions:
|
||||||
|
if os.path.exists(default_model_path + model_extension):
|
||||||
|
if model_name is not None:
|
||||||
|
log.warn(
|
||||||
|
f"Could not find the configured custom model {model_name}{model_extension}. Using the default one: {default_model_path}{model_extension}"
|
||||||
|
)
|
||||||
|
return default_model_path + model_extension
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def reload_models_if_necessary(context: Context, task_data: TaskData):
|
||||||
|
model_paths_in_req = {
|
||||||
|
"stable-diffusion": task_data.use_stable_diffusion_model,
|
||||||
|
"vae": task_data.use_vae_model,
|
||||||
|
"hypernetwork": task_data.use_hypernetwork_model,
|
||||||
|
"gfpgan": task_data.use_face_correction,
|
||||||
|
"realesrgan": task_data.use_upscale,
|
||||||
|
"nsfw_checker": True if task_data.block_nsfw else None,
|
||||||
|
}
|
||||||
|
models_to_reload = {
|
||||||
|
model_type: path
|
||||||
|
for model_type, path in model_paths_in_req.items()
|
||||||
|
if context.model_paths.get(model_type) != path
|
||||||
|
}
|
||||||
|
|
||||||
|
if set_vram_optimizations(context): # reload SD
|
||||||
|
models_to_reload["stable-diffusion"] = model_paths_in_req["stable-diffusion"]
|
||||||
|
|
||||||
|
for model_type, model_path_in_req in models_to_reload.items():
|
||||||
|
context.model_paths[model_type] = model_path_in_req
|
||||||
|
|
||||||
|
action_fn = unload_model if context.model_paths[model_type] is None else load_model
|
||||||
|
action_fn(context, model_type, scan_model=False) # we've scanned them already
|
||||||
|
|
||||||
|
|
||||||
|
def resolve_model_paths(task_data: TaskData):
|
||||||
|
task_data.use_stable_diffusion_model = resolve_model_to_use(
|
||||||
|
task_data.use_stable_diffusion_model, model_type="stable-diffusion"
|
||||||
|
)
|
||||||
|
task_data.use_vae_model = resolve_model_to_use(task_data.use_vae_model, model_type="vae")
|
||||||
|
task_data.use_hypernetwork_model = resolve_model_to_use(task_data.use_hypernetwork_model, model_type="hypernetwork")
|
||||||
|
|
||||||
|
if task_data.use_face_correction:
|
||||||
|
task_data.use_face_correction = resolve_model_to_use(task_data.use_face_correction, "gfpgan")
|
||||||
|
if task_data.use_upscale:
|
||||||
|
task_data.use_upscale = resolve_model_to_use(task_data.use_upscale, "realesrgan")
|
||||||
|
|
||||||
|
|
||||||
|
def set_vram_optimizations(context: Context):
|
||||||
|
config = app.getConfig()
|
||||||
|
vram_usage_level = config.get("vram_usage_level", "balanced")
|
||||||
|
|
||||||
|
if vram_usage_level != context.vram_usage_level:
|
||||||
|
context.vram_usage_level = vram_usage_level
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def make_model_folders():
|
||||||
|
for model_type in KNOWN_MODEL_TYPES:
|
||||||
|
model_dir_path = os.path.join(app.MODELS_DIR, model_type)
|
||||||
|
|
||||||
|
os.makedirs(model_dir_path, exist_ok=True)
|
||||||
|
|
||||||
|
help_file_name = f"Place your {model_type} model files here.txt"
|
||||||
|
help_file_contents = f'Supported extensions: {" or ".join(MODEL_EXTENSIONS.get(model_type))}'
|
||||||
|
|
||||||
|
with open(os.path.join(model_dir_path, help_file_name), "w", encoding="utf-8") as f:
|
||||||
|
f.write(help_file_contents)
|
||||||
|
|
||||||
|
|
||||||
|
def is_malicious_model(file_path):
|
||||||
|
try:
|
||||||
|
if file_path.endswith(".safetensors"):
|
||||||
|
return False
|
||||||
|
scan_result = scan_model(file_path)
|
||||||
|
if scan_result.issues_count > 0 or scan_result.infected_files > 0:
|
||||||
|
log.warn(
|
||||||
|
":warning: [bold red]Scan %s: %d scanned, %d issue, %d infected.[/bold red]"
|
||||||
|
% (file_path, scan_result.scanned_files, scan_result.issues_count, scan_result.infected_files)
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
log.debug(
|
||||||
|
"Scan %s: [green]%d scanned, %d issue, %d infected.[/green]"
|
||||||
|
% (file_path, scan_result.scanned_files, scan_result.issues_count, scan_result.infected_files)
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
except Exception as e:
|
||||||
|
log.error(f"error while scanning: {file_path}, error: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def getModels():
|
||||||
|
models = {
|
||||||
|
"active": {
|
||||||
|
"stable-diffusion": "sd-v1-4",
|
||||||
|
"vae": "",
|
||||||
|
"hypernetwork": "",
|
||||||
|
},
|
||||||
|
"options": {
|
||||||
|
"stable-diffusion": ["sd-v1-4"],
|
||||||
|
"vae": [],
|
||||||
|
"hypernetwork": [],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
models_scanned = 0
|
||||||
|
|
||||||
|
class MaliciousModelException(Exception):
|
||||||
|
"Raised when picklescan reports a problem with a model"
|
||||||
|
pass
|
||||||
|
|
||||||
|
def scan_directory(directory, suffixes, directoriesFirst: bool = True):
|
||||||
|
nonlocal models_scanned
|
||||||
|
tree = []
|
||||||
|
for entry in sorted(
|
||||||
|
os.scandir(directory), key=lambda entry: (entry.is_file() == directoriesFirst, entry.name.lower())
|
||||||
|
):
|
||||||
|
if entry.is_file():
|
||||||
|
matching_suffix = list(filter(lambda s: entry.name.endswith(s), suffixes))
|
||||||
|
if len(matching_suffix) == 0:
|
||||||
|
continue
|
||||||
|
matching_suffix = matching_suffix[0]
|
||||||
|
|
||||||
|
mtime = entry.stat().st_mtime
|
||||||
|
mod_time = known_models[entry.path] if entry.path in known_models else -1
|
||||||
|
if mod_time != mtime:
|
||||||
|
models_scanned += 1
|
||||||
|
if is_malicious_model(entry.path):
|
||||||
|
raise MaliciousModelException(entry.path)
|
||||||
|
known_models[entry.path] = mtime
|
||||||
|
tree.append(entry.name[: -len(matching_suffix)])
|
||||||
|
elif entry.is_dir():
|
||||||
|
scan = scan_directory(entry.path, suffixes, directoriesFirst=False)
|
||||||
|
|
||||||
|
if len(scan) != 0:
|
||||||
|
tree.append((entry.name, scan))
|
||||||
|
return tree
|
||||||
|
|
||||||
|
def listModels(model_type):
|
||||||
|
nonlocal models_scanned
|
||||||
|
|
||||||
|
model_extensions = MODEL_EXTENSIONS.get(model_type, [])
|
||||||
|
models_dir = os.path.join(app.MODELS_DIR, model_type)
|
||||||
|
if not os.path.exists(models_dir):
|
||||||
|
os.makedirs(models_dir)
|
||||||
|
|
||||||
|
try:
|
||||||
|
models["options"][model_type] = scan_directory(models_dir, model_extensions)
|
||||||
|
except MaliciousModelException as e:
|
||||||
|
models["scan-error"] = e
|
||||||
|
|
||||||
|
# custom models
|
||||||
|
listModels(model_type="stable-diffusion")
|
||||||
|
listModels(model_type="vae")
|
||||||
|
listModels(model_type="hypernetwork")
|
||||||
|
listModels(model_type="gfpgan")
|
||||||
|
|
||||||
|
if models_scanned > 0:
|
||||||
|
log.info(f"[green]Scanned {models_scanned} models. Nothing infected[/]")
|
||||||
|
|
||||||
|
# legacy
|
||||||
|
custom_weight_path = os.path.join(app.SD_DIR, "custom-model.ckpt")
|
||||||
|
if os.path.exists(custom_weight_path):
|
||||||
|
models["options"]["stable-diffusion"].append("custom-model")
|
||||||
|
|
||||||
|
return models
|
||||||
177
ui/easydiffusion/renderer.py
Normal file
@@ -0,0 +1,177 @@
|
|||||||
|
import queue
|
||||||
|
import time
|
||||||
|
import json
|
||||||
|
import pprint
|
||||||
|
|
||||||
|
from easydiffusion import device_manager
|
||||||
|
from easydiffusion.types import TaskData, Response, Image as ResponseImage, UserInitiatedStop, GenerateImageRequest
|
||||||
|
from easydiffusion.utils import get_printable_request, save_images_to_disk, log
|
||||||
|
|
||||||
|
from sdkit import Context
|
||||||
|
from sdkit.generate import generate_images
|
||||||
|
from sdkit.filter import apply_filters
|
||||||
|
from sdkit.utils import img_to_buffer, img_to_base64_str, latent_samples_to_images, gc
|
||||||
|
|
||||||
|
context = Context() # thread-local
|
||||||
|
"""
|
||||||
|
runtime data (bound locally to this thread), for e.g. device, references to loaded models, optimization flags etc
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def init(device):
|
||||||
|
"""
|
||||||
|
Initializes the fields that will be bound to this runtime's context, and sets the current torch device
|
||||||
|
"""
|
||||||
|
context.stop_processing = False
|
||||||
|
context.temp_images = {}
|
||||||
|
context.partial_x_samples = None
|
||||||
|
|
||||||
|
device_manager.device_init(context, device)
|
||||||
|
|
||||||
|
|
||||||
|
def make_images(
|
||||||
|
req: GenerateImageRequest, task_data: TaskData, data_queue: queue.Queue, task_temp_images: list, step_callback
|
||||||
|
):
|
||||||
|
context.stop_processing = False
|
||||||
|
print_task_info(req, task_data)
|
||||||
|
|
||||||
|
images, seeds = make_images_internal(req, task_data, data_queue, task_temp_images, step_callback)
|
||||||
|
|
||||||
|
res = Response(req, task_data, images=construct_response(images, seeds, task_data, base_seed=req.seed))
|
||||||
|
res = res.json()
|
||||||
|
data_queue.put(json.dumps(res))
|
||||||
|
log.info("Task completed")
|
||||||
|
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
def print_task_info(req: GenerateImageRequest, task_data: TaskData):
|
||||||
|
req_str = pprint.pformat(get_printable_request(req)).replace("[", "\[")
|
||||||
|
task_str = pprint.pformat(task_data.dict()).replace("[", "\[")
|
||||||
|
log.info(f"request: {req_str}")
|
||||||
|
log.info(f"task data: {task_str}")
|
||||||
|
|
||||||
|
|
||||||
|
def make_images_internal(
|
||||||
|
req: GenerateImageRequest, task_data: TaskData, data_queue: queue.Queue, task_temp_images: list, step_callback
|
||||||
|
):
|
||||||
|
|
||||||
|
images, user_stopped = generate_images_internal(
|
||||||
|
req, task_data, data_queue, task_temp_images, step_callback, task_data.stream_image_progress, task_data.stream_image_progress_interval
|
||||||
|
)
|
||||||
|
filtered_images = filter_images(task_data, images, user_stopped)
|
||||||
|
|
||||||
|
if task_data.save_to_disk_path is not None:
|
||||||
|
save_images_to_disk(images, filtered_images, req, task_data)
|
||||||
|
|
||||||
|
seeds = [*range(req.seed, req.seed + len(images))]
|
||||||
|
if task_data.show_only_filtered_image or filtered_images is images:
|
||||||
|
return filtered_images, seeds
|
||||||
|
else:
|
||||||
|
return images + filtered_images, seeds + seeds
|
||||||
|
|
||||||
|
|
||||||
|
def generate_images_internal(
|
||||||
|
req: GenerateImageRequest,
|
||||||
|
task_data: TaskData,
|
||||||
|
data_queue: queue.Queue,
|
||||||
|
task_temp_images: list,
|
||||||
|
step_callback,
|
||||||
|
stream_image_progress: bool,
|
||||||
|
stream_image_progress_interval: int,
|
||||||
|
):
|
||||||
|
context.temp_images.clear()
|
||||||
|
|
||||||
|
callback = make_step_callback(req, task_data, data_queue, task_temp_images, step_callback, stream_image_progress, stream_image_progress_interval)
|
||||||
|
|
||||||
|
try:
|
||||||
|
if req.init_image is not None:
|
||||||
|
req.sampler_name = "ddim"
|
||||||
|
|
||||||
|
images = generate_images(context, callback=callback, **req.dict())
|
||||||
|
user_stopped = False
|
||||||
|
except UserInitiatedStop:
|
||||||
|
images = []
|
||||||
|
user_stopped = True
|
||||||
|
if context.partial_x_samples is not None:
|
||||||
|
images = latent_samples_to_images(context, context.partial_x_samples)
|
||||||
|
finally:
|
||||||
|
if hasattr(context, "partial_x_samples") and context.partial_x_samples is not None:
|
||||||
|
del context.partial_x_samples
|
||||||
|
context.partial_x_samples = None
|
||||||
|
|
||||||
|
return images, user_stopped
|
||||||
|
|
||||||
|
|
||||||
|
def filter_images(task_data: TaskData, images: list, user_stopped):
|
||||||
|
if user_stopped:
|
||||||
|
return images
|
||||||
|
|
||||||
|
filters_to_apply = []
|
||||||
|
if task_data.block_nsfw:
|
||||||
|
filters_to_apply.append("nsfw_checker")
|
||||||
|
if task_data.use_face_correction and "gfpgan" in task_data.use_face_correction.lower():
|
||||||
|
filters_to_apply.append("gfpgan")
|
||||||
|
if task_data.use_upscale and "realesrgan" in task_data.use_upscale.lower():
|
||||||
|
filters_to_apply.append("realesrgan")
|
||||||
|
|
||||||
|
if len(filters_to_apply) == 0:
|
||||||
|
return images
|
||||||
|
|
||||||
|
return apply_filters(context, filters_to_apply, images, scale=task_data.upscale_amount)
|
||||||
|
|
||||||
|
|
||||||
|
def construct_response(images: list, seeds: list, task_data: TaskData, base_seed: int):
|
||||||
|
return [
|
||||||
|
ResponseImage(
|
||||||
|
data=img_to_base64_str(img, task_data.output_format, task_data.output_quality),
|
||||||
|
seed=seed,
|
||||||
|
)
|
||||||
|
for img, seed in zip(images, seeds)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def make_step_callback(
|
||||||
|
req: GenerateImageRequest,
|
||||||
|
task_data: TaskData,
|
||||||
|
data_queue: queue.Queue,
|
||||||
|
task_temp_images: list,
|
||||||
|
step_callback,
|
||||||
|
stream_image_progress: bool,
|
||||||
|
stream_image_progress_interval: int,
|
||||||
|
):
|
||||||
|
n_steps = req.num_inference_steps if req.init_image is None else int(req.num_inference_steps * req.prompt_strength)
|
||||||
|
last_callback_time = -1
|
||||||
|
|
||||||
|
def update_temp_img(x_samples, task_temp_images: list):
|
||||||
|
partial_images = []
|
||||||
|
images = latent_samples_to_images(context, x_samples)
|
||||||
|
for i, img in enumerate(images):
|
||||||
|
buf = img_to_buffer(img, output_format="JPEG")
|
||||||
|
|
||||||
|
context.temp_images[f"{task_data.request_id}/{i}"] = buf
|
||||||
|
task_temp_images[i] = buf
|
||||||
|
partial_images.append({"path": f"/image/tmp/{task_data.request_id}/{i}"})
|
||||||
|
del images
|
||||||
|
return partial_images
|
||||||
|
|
||||||
|
def on_image_step(x_samples, i):
|
||||||
|
nonlocal last_callback_time
|
||||||
|
|
||||||
|
context.partial_x_samples = x_samples
|
||||||
|
step_time = time.time() - last_callback_time if last_callback_time != -1 else -1
|
||||||
|
last_callback_time = time.time()
|
||||||
|
|
||||||
|
progress = {"step": i, "step_time": step_time, "total_steps": n_steps}
|
||||||
|
|
||||||
|
if stream_image_progress and stream_image_progress_interval > 0 and i % stream_image_progress_interval == 0:
|
||||||
|
progress["output"] = update_temp_img(x_samples, task_temp_images)
|
||||||
|
|
||||||
|
data_queue.put(json.dumps(progress))
|
||||||
|
|
||||||
|
step_callback()
|
||||||
|
|
||||||
|
if context.stop_processing:
|
||||||
|
raise UserInitiatedStop("User requested that we stop processing")
|
||||||
|
|
||||||
|
return on_image_step
|
||||||
285
ui/easydiffusion/server.py
Normal file
@@ -0,0 +1,285 @@
|
|||||||
|
"""server.py: FastAPI SD-UI Web Host.
|
||||||
|
Notes:
|
||||||
|
async endpoints always run on the main thread. Without they run on the thread pool.
|
||||||
|
"""
|
||||||
|
import os
|
||||||
|
import traceback
|
||||||
|
import datetime
|
||||||
|
from typing import List, Union
|
||||||
|
|
||||||
|
from fastapi import FastAPI, HTTPException
|
||||||
|
from fastapi.staticfiles import StaticFiles
|
||||||
|
from starlette.responses import FileResponse, JSONResponse, StreamingResponse
|
||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
from easydiffusion import app, model_manager, task_manager
|
||||||
|
from easydiffusion.types import TaskData, GenerateImageRequest, MergeRequest
|
||||||
|
from easydiffusion.utils import log
|
||||||
|
|
||||||
|
log.info(f"started in {app.SD_DIR}")
|
||||||
|
log.info(f"started at {datetime.datetime.now():%x %X}")
|
||||||
|
|
||||||
|
server_api = FastAPI()
|
||||||
|
|
||||||
|
NOCACHE_HEADERS = {"Cache-Control": "no-cache, no-store, must-revalidate", "Pragma": "no-cache", "Expires": "0"}
|
||||||
|
|
||||||
|
|
||||||
|
class NoCacheStaticFiles(StaticFiles):
|
||||||
|
def is_not_modified(self, response_headers, request_headers) -> bool:
|
||||||
|
if "content-type" in response_headers and (
|
||||||
|
"javascript" in response_headers["content-type"] or "css" in response_headers["content-type"]
|
||||||
|
):
|
||||||
|
response_headers.update(NOCACHE_HEADERS)
|
||||||
|
return False
|
||||||
|
|
||||||
|
return super().is_not_modified(response_headers, request_headers)
|
||||||
|
|
||||||
|
|
||||||
|
class SetAppConfigRequest(BaseModel):
|
||||||
|
update_branch: str = None
|
||||||
|
render_devices: Union[List[str], List[int], str, int] = None
|
||||||
|
model_vae: str = None
|
||||||
|
ui_open_browser_on_start: bool = None
|
||||||
|
listen_to_network: bool = None
|
||||||
|
listen_port: int = None
|
||||||
|
|
||||||
|
|
||||||
|
def init():
|
||||||
|
server_api.mount("/media", NoCacheStaticFiles(directory=os.path.join(app.SD_UI_DIR, "media")), name="media")
|
||||||
|
|
||||||
|
for plugins_dir, dir_prefix in app.UI_PLUGINS_SOURCES:
|
||||||
|
server_api.mount(
|
||||||
|
f"/plugins/{dir_prefix}", NoCacheStaticFiles(directory=plugins_dir), name=f"plugins-{dir_prefix}"
|
||||||
|
)
|
||||||
|
|
||||||
|
@server_api.post("/app_config")
|
||||||
|
async def set_app_config(req: SetAppConfigRequest):
|
||||||
|
return set_app_config_internal(req)
|
||||||
|
|
||||||
|
@server_api.get("/get/{key:path}")
|
||||||
|
def read_web_data(key: str = None):
|
||||||
|
return read_web_data_internal(key)
|
||||||
|
|
||||||
|
@server_api.get("/ping") # Get server and optionally session status.
|
||||||
|
def ping(session_id: str = None):
|
||||||
|
return ping_internal(session_id)
|
||||||
|
|
||||||
|
@server_api.post("/render")
|
||||||
|
def render(req: dict):
|
||||||
|
return render_internal(req)
|
||||||
|
|
||||||
|
@server_api.post("/model/merge")
|
||||||
|
def model_merge(req: dict):
|
||||||
|
print(req)
|
||||||
|
return model_merge_internal(req)
|
||||||
|
|
||||||
|
@server_api.get("/image/stream/{task_id:int}")
|
||||||
|
def stream(task_id: int):
|
||||||
|
return stream_internal(task_id)
|
||||||
|
|
||||||
|
@server_api.get("/image/stop")
|
||||||
|
def stop(task: int):
|
||||||
|
return stop_internal(task)
|
||||||
|
|
||||||
|
@server_api.get("/image/tmp/{task_id:int}/{img_id:int}")
|
||||||
|
def get_image(task_id: int, img_id: int):
|
||||||
|
return get_image_internal(task_id, img_id)
|
||||||
|
|
||||||
|
@server_api.get("/")
|
||||||
|
def read_root():
|
||||||
|
return FileResponse(os.path.join(app.SD_UI_DIR, "index.html"), headers=NOCACHE_HEADERS)
|
||||||
|
|
||||||
|
@server_api.on_event("shutdown")
|
||||||
|
def shutdown_event(): # Signal render thread to close on shutdown
|
||||||
|
task_manager.current_state_error = SystemExit("Application shutting down.")
|
||||||
|
|
||||||
|
|
||||||
|
# API implementations
|
||||||
|
def set_app_config_internal(req: SetAppConfigRequest):
|
||||||
|
config = app.getConfig()
|
||||||
|
if req.update_branch is not None:
|
||||||
|
config["update_branch"] = req.update_branch
|
||||||
|
if req.render_devices is not None:
|
||||||
|
update_render_devices_in_config(config, req.render_devices)
|
||||||
|
if req.ui_open_browser_on_start is not None:
|
||||||
|
if "ui" not in config:
|
||||||
|
config["ui"] = {}
|
||||||
|
config["ui"]["open_browser_on_start"] = req.ui_open_browser_on_start
|
||||||
|
if req.listen_to_network is not None:
|
||||||
|
if "net" not in config:
|
||||||
|
config["net"] = {}
|
||||||
|
config["net"]["listen_to_network"] = bool(req.listen_to_network)
|
||||||
|
if req.listen_port is not None:
|
||||||
|
if "net" not in config:
|
||||||
|
config["net"] = {}
|
||||||
|
config["net"]["listen_port"] = int(req.listen_port)
|
||||||
|
try:
|
||||||
|
app.setConfig(config)
|
||||||
|
|
||||||
|
if req.render_devices:
|
||||||
|
app.update_render_threads()
|
||||||
|
|
||||||
|
return JSONResponse({"status": "OK"}, headers=NOCACHE_HEADERS)
|
||||||
|
except Exception as e:
|
||||||
|
log.error(traceback.format_exc())
|
||||||
|
raise HTTPException(status_code=500, detail=str(e))
|
||||||
|
|
||||||
|
|
||||||
|
def update_render_devices_in_config(config, render_devices):
|
||||||
|
if render_devices not in ("cpu", "auto") and not render_devices.startswith("cuda:"):
|
||||||
|
raise HTTPException(status_code=400, detail=f"Invalid render device requested: {render_devices}")
|
||||||
|
|
||||||
|
if render_devices.startswith("cuda:"):
|
||||||
|
render_devices = render_devices.split(",")
|
||||||
|
|
||||||
|
config["render_devices"] = render_devices
|
||||||
|
|
||||||
|
|
||||||
|
def read_web_data_internal(key: str = None):
|
||||||
|
if not key: # /get without parameters, stable-diffusion easter egg.
|
||||||
|
raise HTTPException(status_code=418, detail="StableDiffusion is drawing a teapot!") # HTTP418 I'm a teapot
|
||||||
|
elif key == "app_config":
|
||||||
|
return JSONResponse(app.getConfig(), headers=NOCACHE_HEADERS)
|
||||||
|
elif key == "system_info":
|
||||||
|
config = app.getConfig()
|
||||||
|
|
||||||
|
output_dir = config.get("force_save_path", os.path.join(os.path.expanduser("~"), app.OUTPUT_DIRNAME))
|
||||||
|
|
||||||
|
system_info = {
|
||||||
|
"devices": task_manager.get_devices(),
|
||||||
|
"hosts": app.getIPConfig(),
|
||||||
|
"default_output_dir": output_dir,
|
||||||
|
"enforce_output_dir": ("force_save_path" in config),
|
||||||
|
}
|
||||||
|
system_info["devices"]["config"] = config.get("render_devices", "auto")
|
||||||
|
return JSONResponse(system_info, headers=NOCACHE_HEADERS)
|
||||||
|
elif key == "models":
|
||||||
|
return JSONResponse(model_manager.getModels(), headers=NOCACHE_HEADERS)
|
||||||
|
elif key == "modifiers":
|
||||||
|
return FileResponse(os.path.join(app.SD_UI_DIR, "modifiers.json"), headers=NOCACHE_HEADERS)
|
||||||
|
elif key == "ui_plugins":
|
||||||
|
return JSONResponse(app.getUIPlugins(), headers=NOCACHE_HEADERS)
|
||||||
|
else:
|
||||||
|
raise HTTPException(status_code=404, detail=f"Request for unknown {key}") # HTTP404 Not Found
|
||||||
|
|
||||||
|
|
||||||
|
def ping_internal(session_id: str = None):
|
||||||
|
if task_manager.is_alive() <= 0: # Check that render threads are alive.
|
||||||
|
if task_manager.current_state_error:
|
||||||
|
raise HTTPException(status_code=500, detail=str(task_manager.current_state_error))
|
||||||
|
raise HTTPException(status_code=500, detail="Render thread is dead.")
|
||||||
|
if task_manager.current_state_error and not isinstance(task_manager.current_state_error, StopAsyncIteration):
|
||||||
|
raise HTTPException(status_code=500, detail=str(task_manager.current_state_error))
|
||||||
|
# Alive
|
||||||
|
response = {"status": str(task_manager.current_state)}
|
||||||
|
if session_id:
|
||||||
|
session = task_manager.get_cached_session(session_id, update_ttl=True)
|
||||||
|
response["tasks"] = {id(t): t.status for t in session.tasks}
|
||||||
|
response["devices"] = task_manager.get_devices()
|
||||||
|
return JSONResponse(response, headers=NOCACHE_HEADERS)
|
||||||
|
|
||||||
|
|
||||||
|
def render_internal(req: dict):
|
||||||
|
try:
|
||||||
|
# separate out the request data into rendering and task-specific data
|
||||||
|
render_req: GenerateImageRequest = GenerateImageRequest.parse_obj(req)
|
||||||
|
task_data: TaskData = TaskData.parse_obj(req)
|
||||||
|
|
||||||
|
# Overwrite user specified save path
|
||||||
|
config = app.getConfig()
|
||||||
|
if "force_save_path" in config:
|
||||||
|
task_data.save_to_disk_path = config["force_save_path"]
|
||||||
|
|
||||||
|
render_req.init_image_mask = req.get("mask") # hack: will rename this in the HTTP API in a future revision
|
||||||
|
|
||||||
|
app.save_to_config(
|
||||||
|
task_data.use_stable_diffusion_model,
|
||||||
|
task_data.use_vae_model,
|
||||||
|
task_data.use_hypernetwork_model,
|
||||||
|
task_data.vram_usage_level,
|
||||||
|
)
|
||||||
|
|
||||||
|
# enqueue the task
|
||||||
|
new_task = task_manager.render(render_req, task_data)
|
||||||
|
response = {
|
||||||
|
"status": str(task_manager.current_state),
|
||||||
|
"queue": len(task_manager.tasks_queue),
|
||||||
|
"stream": f"/image/stream/{id(new_task)}",
|
||||||
|
"task": id(new_task),
|
||||||
|
}
|
||||||
|
return JSONResponse(response, headers=NOCACHE_HEADERS)
|
||||||
|
except ChildProcessError as e: # Render thread is dead
|
||||||
|
raise HTTPException(status_code=500, detail=f"Rendering thread has died.") # HTTP500 Internal Server Error
|
||||||
|
except ConnectionRefusedError as e: # Unstarted task pending limit reached, deny queueing too many.
|
||||||
|
raise HTTPException(status_code=503, detail=str(e)) # HTTP503 Service Unavailable
|
||||||
|
except Exception as e:
|
||||||
|
log.error(traceback.format_exc())
|
||||||
|
raise HTTPException(status_code=500, detail=str(e))
|
||||||
|
|
||||||
|
|
||||||
|
def model_merge_internal(req: dict):
|
||||||
|
try:
|
||||||
|
from sdkit.train import merge_models
|
||||||
|
from easydiffusion.utils.save_utils import filename_regex
|
||||||
|
|
||||||
|
mergeReq: MergeRequest = MergeRequest.parse_obj(req)
|
||||||
|
|
||||||
|
merge_models(
|
||||||
|
model_manager.resolve_model_to_use(mergeReq.model0, "stable-diffusion"),
|
||||||
|
model_manager.resolve_model_to_use(mergeReq.model1, "stable-diffusion"),
|
||||||
|
mergeReq.ratio,
|
||||||
|
os.path.join(app.MODELS_DIR, "stable-diffusion", filename_regex.sub("_", mergeReq.out_path)),
|
||||||
|
mergeReq.use_fp16,
|
||||||
|
)
|
||||||
|
return JSONResponse({"status": "OK"}, headers=NOCACHE_HEADERS)
|
||||||
|
except Exception as e:
|
||||||
|
log.error(traceback.format_exc())
|
||||||
|
raise HTTPException(status_code=500, detail=str(e))
|
||||||
|
|
||||||
|
|
||||||
|
def stream_internal(task_id: int):
|
||||||
|
# TODO Move to WebSockets ??
|
||||||
|
task = task_manager.get_cached_task(task_id, update_ttl=True)
|
||||||
|
if not task:
|
||||||
|
raise HTTPException(status_code=404, detail=f"Request {task_id} not found.") # HTTP404 NotFound
|
||||||
|
# if (id(task) != task_id): raise HTTPException(status_code=409, detail=f'Wrong task id received. Expected:{id(task)}, Received:{task_id}') # HTTP409 Conflict
|
||||||
|
if task.buffer_queue.empty() and not task.lock.locked():
|
||||||
|
if task.response:
|
||||||
|
# log.info(f'Session {session_id} sending cached response')
|
||||||
|
return JSONResponse(task.response, headers=NOCACHE_HEADERS)
|
||||||
|
raise HTTPException(status_code=425, detail="Too Early, task not started yet.") # HTTP425 Too Early
|
||||||
|
# log.info(f'Session {session_id} opened live render stream {id(task.buffer_queue)}')
|
||||||
|
return StreamingResponse(task.read_buffer_generator(), media_type="application/json")
|
||||||
|
|
||||||
|
|
||||||
|
def stop_internal(task: int):
|
||||||
|
if not task:
|
||||||
|
if (
|
||||||
|
task_manager.current_state == task_manager.ServerStates.Online
|
||||||
|
or task_manager.current_state == task_manager.ServerStates.Unavailable
|
||||||
|
):
|
||||||
|
raise HTTPException(status_code=409, detail="Not currently running any tasks.") # HTTP409 Conflict
|
||||||
|
task_manager.current_state_error = StopAsyncIteration("")
|
||||||
|
return {"OK"}
|
||||||
|
task_id = task
|
||||||
|
task = task_manager.get_cached_task(task_id, update_ttl=False)
|
||||||
|
if not task:
|
||||||
|
raise HTTPException(status_code=404, detail=f"Task {task_id} was not found.") # HTTP404 Not Found
|
||||||
|
if isinstance(task.error, StopAsyncIteration):
|
||||||
|
raise HTTPException(status_code=409, detail=f"Task {task_id} is already stopped.") # HTTP409 Conflict
|
||||||
|
task.error = StopAsyncIteration(f"Task {task_id} stop requested.")
|
||||||
|
return {"OK"}
|
||||||
|
|
||||||
|
|
||||||
|
def get_image_internal(task_id: int, img_id: int):
|
||||||
|
task = task_manager.get_cached_task(task_id, update_ttl=True)
|
||||||
|
if not task:
|
||||||
|
raise HTTPException(status_code=410, detail=f"Task {task_id} could not be found.") # HTTP404 NotFound
|
||||||
|
if not task.temp_images[img_id]:
|
||||||
|
raise HTTPException(status_code=425, detail="Too Early, task data is not available yet.") # HTTP425 Too Early
|
||||||
|
try:
|
||||||
|
img_data = task.temp_images[img_id]
|
||||||
|
img_data.seek(0)
|
||||||
|
return StreamingResponse(img_data, media_type="image/jpeg")
|
||||||
|
except KeyError as e:
|
||||||
|
raise HTTPException(status_code=500, detail=str(e))
|
||||||
562
ui/easydiffusion/task_manager.py
Normal file
@@ -0,0 +1,562 @@
|
|||||||
|
"""task_manager.py: manage tasks dispatching and render threads.
|
||||||
|
Notes:
|
||||||
|
render_threads should be the only hard reference held by the manager to the threads.
|
||||||
|
Use weak_thread_data to store all other data using weak keys.
|
||||||
|
This will allow for garbage collection after the thread dies.
|
||||||
|
"""
|
||||||
|
import json
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
TASK_TTL = 15 * 60 # seconds, Discard last session's task timeout
|
||||||
|
|
||||||
|
import torch
|
||||||
|
import queue, threading, time, weakref
|
||||||
|
from typing import Any, Hashable
|
||||||
|
|
||||||
|
from easydiffusion import device_manager
|
||||||
|
from easydiffusion.types import TaskData, GenerateImageRequest
|
||||||
|
from easydiffusion.utils import log
|
||||||
|
|
||||||
|
from sdkit.utils import gc
|
||||||
|
|
||||||
|
THREAD_NAME_PREFIX = ""
|
||||||
|
ERR_LOCK_FAILED = " failed to acquire lock within timeout."
|
||||||
|
LOCK_TIMEOUT = 15 # Maximum locking time in seconds before failing a task.
|
||||||
|
# It's better to get an exception than a deadlock... ALWAYS use timeout in critical paths.
|
||||||
|
|
||||||
|
DEVICE_START_TIMEOUT = 60 # seconds - Maximum time to wait for a render device to init.
|
||||||
|
|
||||||
|
|
||||||
|
class SymbolClass(type): # Print nicely formatted Symbol names.
|
||||||
|
def __repr__(self):
|
||||||
|
return self.__qualname__
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return self.__name__
|
||||||
|
|
||||||
|
|
||||||
|
class Symbol(metaclass=SymbolClass):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class ServerStates:
|
||||||
|
class Init(Symbol):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class LoadingModel(Symbol):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class Online(Symbol):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class Rendering(Symbol):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class Unavailable(Symbol):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class RenderTask: # Task with output queue and completion lock.
|
||||||
|
def __init__(self, req: GenerateImageRequest, task_data: TaskData):
|
||||||
|
task_data.request_id = id(self)
|
||||||
|
self.render_request: GenerateImageRequest = req # Initial Request
|
||||||
|
self.task_data: TaskData = task_data
|
||||||
|
self.response: Any = None # Copy of the last reponse
|
||||||
|
self.render_device = None # Select the task affinity. (Not used to change active devices).
|
||||||
|
self.temp_images: list = [None] * req.num_outputs * (1 if task_data.show_only_filtered_image else 2)
|
||||||
|
self.error: Exception = None
|
||||||
|
self.lock: threading.Lock = threading.Lock() # Locks at task start and unlocks when task is completed
|
||||||
|
self.buffer_queue: queue.Queue = queue.Queue() # Queue of JSON string segments
|
||||||
|
|
||||||
|
async def read_buffer_generator(self):
|
||||||
|
try:
|
||||||
|
while not self.buffer_queue.empty():
|
||||||
|
res = self.buffer_queue.get(block=False)
|
||||||
|
self.buffer_queue.task_done()
|
||||||
|
yield res
|
||||||
|
except queue.Empty as e:
|
||||||
|
yield
|
||||||
|
|
||||||
|
@property
|
||||||
|
def status(self):
|
||||||
|
if self.lock.locked():
|
||||||
|
return "running"
|
||||||
|
if isinstance(self.error, StopAsyncIteration):
|
||||||
|
return "stopped"
|
||||||
|
if self.error:
|
||||||
|
return "error"
|
||||||
|
if not self.buffer_queue.empty():
|
||||||
|
return "buffer"
|
||||||
|
if self.response:
|
||||||
|
return "completed"
|
||||||
|
return "pending"
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_pending(self):
|
||||||
|
return bool(not self.response and not self.error)
|
||||||
|
|
||||||
|
|
||||||
|
# Temporary cache to allow to query tasks results for a short time after they are completed.
|
||||||
|
class DataCache:
|
||||||
|
def __init__(self):
|
||||||
|
self._base = dict()
|
||||||
|
self._lock: threading.Lock = threading.Lock()
|
||||||
|
|
||||||
|
def _get_ttl_time(self, ttl: int) -> int:
|
||||||
|
return int(time.time()) + ttl
|
||||||
|
|
||||||
|
def _is_expired(self, timestamp: int) -> bool:
|
||||||
|
return int(time.time()) >= timestamp
|
||||||
|
|
||||||
|
def clean(self) -> None:
|
||||||
|
if not self._lock.acquire(blocking=True, timeout=LOCK_TIMEOUT):
|
||||||
|
raise Exception("DataCache.clean" + ERR_LOCK_FAILED)
|
||||||
|
try:
|
||||||
|
# Create a list of expired keys to delete
|
||||||
|
to_delete = []
|
||||||
|
for key in self._base:
|
||||||
|
ttl, _ = self._base[key]
|
||||||
|
if self._is_expired(ttl):
|
||||||
|
to_delete.append(key)
|
||||||
|
# Remove Items
|
||||||
|
for key in to_delete:
|
||||||
|
(_, val) = self._base[key]
|
||||||
|
if isinstance(val, RenderTask):
|
||||||
|
log.debug(f"RenderTask {key} expired. Data removed.")
|
||||||
|
elif isinstance(val, SessionState):
|
||||||
|
log.debug(f"Session {key} expired. Data removed.")
|
||||||
|
else:
|
||||||
|
log.debug(f"Key {key} expired. Data removed.")
|
||||||
|
del self._base[key]
|
||||||
|
finally:
|
||||||
|
self._lock.release()
|
||||||
|
|
||||||
|
def clear(self) -> None:
|
||||||
|
if not self._lock.acquire(blocking=True, timeout=LOCK_TIMEOUT):
|
||||||
|
raise Exception("DataCache.clear" + ERR_LOCK_FAILED)
|
||||||
|
try:
|
||||||
|
self._base.clear()
|
||||||
|
finally:
|
||||||
|
self._lock.release()
|
||||||
|
|
||||||
|
def delete(self, key: Hashable) -> bool:
|
||||||
|
if not self._lock.acquire(blocking=True, timeout=LOCK_TIMEOUT):
|
||||||
|
raise Exception("DataCache.delete" + ERR_LOCK_FAILED)
|
||||||
|
try:
|
||||||
|
if key not in self._base:
|
||||||
|
return False
|
||||||
|
del self._base[key]
|
||||||
|
return True
|
||||||
|
finally:
|
||||||
|
self._lock.release()
|
||||||
|
|
||||||
|
def keep(self, key: Hashable, ttl: int) -> bool:
|
||||||
|
if not self._lock.acquire(blocking=True, timeout=LOCK_TIMEOUT):
|
||||||
|
raise Exception("DataCache.keep" + ERR_LOCK_FAILED)
|
||||||
|
try:
|
||||||
|
if key in self._base:
|
||||||
|
_, value = self._base.get(key)
|
||||||
|
self._base[key] = (self._get_ttl_time(ttl), value)
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
finally:
|
||||||
|
self._lock.release()
|
||||||
|
|
||||||
|
def put(self, key: Hashable, value: Any, ttl: int) -> bool:
|
||||||
|
if not self._lock.acquire(blocking=True, timeout=LOCK_TIMEOUT):
|
||||||
|
raise Exception("DataCache.put" + ERR_LOCK_FAILED)
|
||||||
|
try:
|
||||||
|
self._base[key] = (self._get_ttl_time(ttl), value)
|
||||||
|
except Exception as e:
|
||||||
|
log.error(traceback.format_exc())
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
return True
|
||||||
|
finally:
|
||||||
|
self._lock.release()
|
||||||
|
|
||||||
|
def tryGet(self, key: Hashable) -> Any:
|
||||||
|
if not self._lock.acquire(blocking=True, timeout=LOCK_TIMEOUT):
|
||||||
|
raise Exception("DataCache.tryGet" + ERR_LOCK_FAILED)
|
||||||
|
try:
|
||||||
|
ttl, value = self._base.get(key, (None, None))
|
||||||
|
if ttl is not None and self._is_expired(ttl):
|
||||||
|
log.debug(f"Session {key} expired. Discarding data.")
|
||||||
|
del self._base[key]
|
||||||
|
return None
|
||||||
|
return value
|
||||||
|
finally:
|
||||||
|
self._lock.release()
|
||||||
|
|
||||||
|
|
||||||
|
manager_lock = threading.RLock()
|
||||||
|
render_threads = []
|
||||||
|
current_state = ServerStates.Init
|
||||||
|
current_state_error: Exception = None
|
||||||
|
tasks_queue = []
|
||||||
|
session_cache = DataCache()
|
||||||
|
task_cache = DataCache()
|
||||||
|
weak_thread_data = weakref.WeakKeyDictionary()
|
||||||
|
idle_event: threading.Event = threading.Event()
|
||||||
|
|
||||||
|
|
||||||
|
class SessionState:
|
||||||
|
def __init__(self, id: str):
|
||||||
|
self._id = id
|
||||||
|
self._tasks_ids = []
|
||||||
|
|
||||||
|
@property
|
||||||
|
def id(self):
|
||||||
|
return self._id
|
||||||
|
|
||||||
|
@property
|
||||||
|
def tasks(self):
|
||||||
|
tasks = []
|
||||||
|
for task_id in self._tasks_ids:
|
||||||
|
task = task_cache.tryGet(task_id)
|
||||||
|
if task:
|
||||||
|
tasks.append(task)
|
||||||
|
return tasks
|
||||||
|
|
||||||
|
def put(self, task, ttl=TASK_TTL):
|
||||||
|
task_id = id(task)
|
||||||
|
self._tasks_ids.append(task_id)
|
||||||
|
if not task_cache.put(task_id, task, ttl):
|
||||||
|
return False
|
||||||
|
while len(self._tasks_ids) > len(render_threads) * 2:
|
||||||
|
self._tasks_ids.pop(0)
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def thread_get_next_task():
|
||||||
|
from easydiffusion import renderer
|
||||||
|
|
||||||
|
if not manager_lock.acquire(blocking=True, timeout=LOCK_TIMEOUT):
|
||||||
|
log.warn(f"Render thread on device: {renderer.context.device} failed to acquire manager lock.")
|
||||||
|
return None
|
||||||
|
if len(tasks_queue) <= 0:
|
||||||
|
manager_lock.release()
|
||||||
|
return None
|
||||||
|
task = None
|
||||||
|
try: # Select a render task.
|
||||||
|
for queued_task in tasks_queue:
|
||||||
|
if queued_task.render_device and renderer.context.device != queued_task.render_device:
|
||||||
|
# Is asking for a specific render device.
|
||||||
|
if is_alive(queued_task.render_device) > 0:
|
||||||
|
continue # requested device alive, skip current one.
|
||||||
|
else:
|
||||||
|
# Requested device is not active, return error to UI.
|
||||||
|
queued_task.error = Exception(queued_task.render_device + " is not currently active.")
|
||||||
|
task = queued_task
|
||||||
|
break
|
||||||
|
if not queued_task.render_device and renderer.context.device == "cpu" and is_alive() > 1:
|
||||||
|
# not asking for any specific devices, cpu want to grab task but other render devices are alive.
|
||||||
|
continue # Skip Tasks, don't run on CPU unless there is nothing else or user asked for it.
|
||||||
|
task = queued_task
|
||||||
|
break
|
||||||
|
if task is not None:
|
||||||
|
del tasks_queue[tasks_queue.index(task)]
|
||||||
|
return task
|
||||||
|
finally:
|
||||||
|
manager_lock.release()
|
||||||
|
|
||||||
|
|
||||||
|
def thread_render(device):
|
||||||
|
global current_state, current_state_error
|
||||||
|
|
||||||
|
from easydiffusion import renderer, model_manager
|
||||||
|
|
||||||
|
try:
|
||||||
|
renderer.init(device)
|
||||||
|
|
||||||
|
weak_thread_data[threading.current_thread()] = {
|
||||||
|
"device": renderer.context.device,
|
||||||
|
"device_name": renderer.context.device_name,
|
||||||
|
"alive": True,
|
||||||
|
}
|
||||||
|
|
||||||
|
current_state = ServerStates.LoadingModel
|
||||||
|
model_manager.load_default_models(renderer.context)
|
||||||
|
|
||||||
|
current_state = ServerStates.Online
|
||||||
|
except Exception as e:
|
||||||
|
log.error(traceback.format_exc())
|
||||||
|
weak_thread_data[threading.current_thread()] = {"error": e, "alive": False}
|
||||||
|
return
|
||||||
|
|
||||||
|
while True:
|
||||||
|
session_cache.clean()
|
||||||
|
task_cache.clean()
|
||||||
|
if not weak_thread_data[threading.current_thread()]["alive"]:
|
||||||
|
log.info(f"Shutting down thread for device {renderer.context.device}")
|
||||||
|
model_manager.unload_all(renderer.context)
|
||||||
|
return
|
||||||
|
if isinstance(current_state_error, SystemExit):
|
||||||
|
current_state = ServerStates.Unavailable
|
||||||
|
return
|
||||||
|
task = thread_get_next_task()
|
||||||
|
if task is None:
|
||||||
|
idle_event.clear()
|
||||||
|
idle_event.wait(timeout=1)
|
||||||
|
continue
|
||||||
|
if task.error is not None:
|
||||||
|
log.error(task.error)
|
||||||
|
task.response = {"status": "failed", "detail": str(task.error)}
|
||||||
|
task.buffer_queue.put(json.dumps(task.response))
|
||||||
|
continue
|
||||||
|
if current_state_error:
|
||||||
|
task.error = current_state_error
|
||||||
|
task.response = {"status": "failed", "detail": str(task.error)}
|
||||||
|
task.buffer_queue.put(json.dumps(task.response))
|
||||||
|
continue
|
||||||
|
log.info(f"Session {task.task_data.session_id} starting task {id(task)} on {renderer.context.device_name}")
|
||||||
|
if not task.lock.acquire(blocking=False):
|
||||||
|
raise Exception("Got locked task from queue.")
|
||||||
|
try:
|
||||||
|
|
||||||
|
def step_callback():
|
||||||
|
global current_state_error
|
||||||
|
|
||||||
|
if (
|
||||||
|
isinstance(current_state_error, SystemExit)
|
||||||
|
or isinstance(current_state_error, StopAsyncIteration)
|
||||||
|
or isinstance(task.error, StopAsyncIteration)
|
||||||
|
):
|
||||||
|
renderer.context.stop_processing = True
|
||||||
|
if isinstance(current_state_error, StopAsyncIteration):
|
||||||
|
task.error = current_state_error
|
||||||
|
current_state_error = None
|
||||||
|
log.info(f"Session {task.task_data.session_id} sent cancel signal for task {id(task)}")
|
||||||
|
|
||||||
|
current_state = ServerStates.LoadingModel
|
||||||
|
model_manager.resolve_model_paths(task.task_data)
|
||||||
|
model_manager.reload_models_if_necessary(renderer.context, task.task_data)
|
||||||
|
|
||||||
|
current_state = ServerStates.Rendering
|
||||||
|
task.response = renderer.make_images(
|
||||||
|
task.render_request, task.task_data, task.buffer_queue, task.temp_images, step_callback
|
||||||
|
)
|
||||||
|
# Before looping back to the generator, mark cache as still alive.
|
||||||
|
task_cache.keep(id(task), TASK_TTL)
|
||||||
|
session_cache.keep(task.task_data.session_id, TASK_TTL)
|
||||||
|
except Exception as e:
|
||||||
|
task.error = str(e)
|
||||||
|
task.response = {"status": "failed", "detail": str(task.error)}
|
||||||
|
task.buffer_queue.put(json.dumps(task.response))
|
||||||
|
log.error(traceback.format_exc())
|
||||||
|
finally:
|
||||||
|
gc(renderer.context)
|
||||||
|
task.lock.release()
|
||||||
|
task_cache.keep(id(task), TASK_TTL)
|
||||||
|
session_cache.keep(task.task_data.session_id, TASK_TTL)
|
||||||
|
if isinstance(task.error, StopAsyncIteration):
|
||||||
|
log.info(f"Session {task.task_data.session_id} task {id(task)} cancelled!")
|
||||||
|
elif task.error is not None:
|
||||||
|
log.info(f"Session {task.task_data.session_id} task {id(task)} failed!")
|
||||||
|
else:
|
||||||
|
log.info(
|
||||||
|
f"Session {task.task_data.session_id} task {id(task)} completed by {renderer.context.device_name}."
|
||||||
|
)
|
||||||
|
current_state = ServerStates.Online
|
||||||
|
|
||||||
|
|
||||||
|
def get_cached_task(task_id: str, update_ttl: bool = False):
|
||||||
|
# By calling keep before tryGet, wont discard if was expired.
|
||||||
|
if update_ttl and not task_cache.keep(task_id, TASK_TTL):
|
||||||
|
# Failed to keep task, already gone.
|
||||||
|
return None
|
||||||
|
return task_cache.tryGet(task_id)
|
||||||
|
|
||||||
|
|
||||||
|
def get_cached_session(session_id: str, update_ttl: bool = False):
|
||||||
|
if update_ttl:
|
||||||
|
session_cache.keep(session_id, TASK_TTL)
|
||||||
|
session = session_cache.tryGet(session_id)
|
||||||
|
if not session:
|
||||||
|
session = SessionState(session_id)
|
||||||
|
session_cache.put(session_id, session, TASK_TTL)
|
||||||
|
return session
|
||||||
|
|
||||||
|
|
||||||
|
def get_devices():
|
||||||
|
devices = {
|
||||||
|
"all": {},
|
||||||
|
"active": {},
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_device_info(device):
|
||||||
|
if device == "cpu":
|
||||||
|
return {"name": device_manager.get_processor_name()}
|
||||||
|
|
||||||
|
mem_free, mem_total = torch.cuda.mem_get_info(device)
|
||||||
|
mem_free /= float(10**9)
|
||||||
|
mem_total /= float(10**9)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"name": torch.cuda.get_device_name(device),
|
||||||
|
"mem_free": mem_free,
|
||||||
|
"mem_total": mem_total,
|
||||||
|
"max_vram_usage_level": device_manager.get_max_vram_usage_level(device),
|
||||||
|
}
|
||||||
|
|
||||||
|
# list the compatible devices
|
||||||
|
gpu_count = torch.cuda.device_count()
|
||||||
|
for device in range(gpu_count):
|
||||||
|
device = f"cuda:{device}"
|
||||||
|
if not device_manager.is_device_compatible(device):
|
||||||
|
continue
|
||||||
|
|
||||||
|
devices["all"].update({device: get_device_info(device)})
|
||||||
|
|
||||||
|
devices["all"].update({"cpu": get_device_info("cpu")})
|
||||||
|
|
||||||
|
# list the activated devices
|
||||||
|
if not manager_lock.acquire(blocking=True, timeout=LOCK_TIMEOUT):
|
||||||
|
raise Exception("get_devices" + ERR_LOCK_FAILED)
|
||||||
|
try:
|
||||||
|
for rthread in render_threads:
|
||||||
|
if not rthread.is_alive():
|
||||||
|
continue
|
||||||
|
weak_data = weak_thread_data.get(rthread)
|
||||||
|
if not weak_data or not "device" in weak_data or not "device_name" in weak_data:
|
||||||
|
continue
|
||||||
|
device = weak_data["device"]
|
||||||
|
devices["active"].update({device: get_device_info(device)})
|
||||||
|
finally:
|
||||||
|
manager_lock.release()
|
||||||
|
|
||||||
|
return devices
|
||||||
|
|
||||||
|
|
||||||
|
def is_alive(device=None):
|
||||||
|
if not manager_lock.acquire(blocking=True, timeout=LOCK_TIMEOUT):
|
||||||
|
raise Exception("is_alive" + ERR_LOCK_FAILED)
|
||||||
|
nbr_alive = 0
|
||||||
|
try:
|
||||||
|
for rthread in render_threads:
|
||||||
|
if device is not None:
|
||||||
|
weak_data = weak_thread_data.get(rthread)
|
||||||
|
if weak_data is None or not "device" in weak_data or weak_data["device"] is None:
|
||||||
|
continue
|
||||||
|
thread_device = weak_data["device"]
|
||||||
|
if thread_device != device:
|
||||||
|
continue
|
||||||
|
if rthread.is_alive():
|
||||||
|
nbr_alive += 1
|
||||||
|
return nbr_alive
|
||||||
|
finally:
|
||||||
|
manager_lock.release()
|
||||||
|
|
||||||
|
|
||||||
|
def start_render_thread(device):
|
||||||
|
if not manager_lock.acquire(blocking=True, timeout=LOCK_TIMEOUT):
|
||||||
|
raise Exception("start_render_thread" + ERR_LOCK_FAILED)
|
||||||
|
log.info(f"Start new Rendering Thread on device: {device}")
|
||||||
|
try:
|
||||||
|
rthread = threading.Thread(target=thread_render, kwargs={"device": device})
|
||||||
|
rthread.daemon = True
|
||||||
|
rthread.name = THREAD_NAME_PREFIX + device
|
||||||
|
rthread.start()
|
||||||
|
render_threads.append(rthread)
|
||||||
|
finally:
|
||||||
|
manager_lock.release()
|
||||||
|
timeout = DEVICE_START_TIMEOUT
|
||||||
|
while not rthread.is_alive() or not rthread in weak_thread_data or not "device" in weak_thread_data[rthread]:
|
||||||
|
if rthread in weak_thread_data and "error" in weak_thread_data[rthread]:
|
||||||
|
log.error(f"{rthread}, {device}, error: {weak_thread_data[rthread]['error']}")
|
||||||
|
return False
|
||||||
|
if timeout <= 0:
|
||||||
|
return False
|
||||||
|
timeout -= 1
|
||||||
|
time.sleep(1)
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def stop_render_thread(device):
|
||||||
|
try:
|
||||||
|
device_manager.validate_device_id(device, log_prefix="stop_render_thread")
|
||||||
|
except:
|
||||||
|
log.error(traceback.format_exc())
|
||||||
|
return False
|
||||||
|
|
||||||
|
if not manager_lock.acquire(blocking=True, timeout=LOCK_TIMEOUT):
|
||||||
|
raise Exception("stop_render_thread" + ERR_LOCK_FAILED)
|
||||||
|
log.info(f"Stopping Rendering Thread on device: {device}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
thread_to_remove = None
|
||||||
|
for rthread in render_threads:
|
||||||
|
weak_data = weak_thread_data.get(rthread)
|
||||||
|
if weak_data is None or not "device" in weak_data or weak_data["device"] is None:
|
||||||
|
continue
|
||||||
|
thread_device = weak_data["device"]
|
||||||
|
if thread_device == device:
|
||||||
|
weak_data["alive"] = False
|
||||||
|
thread_to_remove = rthread
|
||||||
|
break
|
||||||
|
if thread_to_remove is not None:
|
||||||
|
render_threads.remove(rthread)
|
||||||
|
return True
|
||||||
|
finally:
|
||||||
|
manager_lock.release()
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def update_render_threads(render_devices, active_devices):
|
||||||
|
devices_to_start, devices_to_stop = device_manager.get_device_delta(render_devices, active_devices)
|
||||||
|
log.debug(f"devices_to_start: {devices_to_start}")
|
||||||
|
log.debug(f"devices_to_stop: {devices_to_stop}")
|
||||||
|
|
||||||
|
for device in devices_to_stop:
|
||||||
|
if is_alive(device) <= 0:
|
||||||
|
log.debug(f"{device} is not alive")
|
||||||
|
continue
|
||||||
|
if not stop_render_thread(device):
|
||||||
|
log.warn(f"{device} could not stop render thread")
|
||||||
|
|
||||||
|
for device in devices_to_start:
|
||||||
|
if is_alive(device) >= 1:
|
||||||
|
log.debug(f"{device} already registered.")
|
||||||
|
continue
|
||||||
|
if not start_render_thread(device):
|
||||||
|
log.warn(f"{device} failed to start.")
|
||||||
|
|
||||||
|
if is_alive() <= 0: # No running devices, probably invalid user config.
|
||||||
|
raise EnvironmentError(
|
||||||
|
'ERROR: No active render devices! Please verify the "render_devices" value in config.json'
|
||||||
|
)
|
||||||
|
|
||||||
|
log.debug(f"active devices: {get_devices()['active']}")
|
||||||
|
|
||||||
|
|
||||||
|
def shutdown_event(): # Signal render thread to close on shutdown
|
||||||
|
global current_state_error
|
||||||
|
current_state_error = SystemExit("Application shutting down.")
|
||||||
|
|
||||||
|
|
||||||
|
def render(render_req: GenerateImageRequest, task_data: TaskData):
|
||||||
|
current_thread_count = is_alive()
|
||||||
|
if current_thread_count <= 0: # Render thread is dead
|
||||||
|
raise ChildProcessError("Rendering thread has died.")
|
||||||
|
|
||||||
|
# Alive, check if task in cache
|
||||||
|
session = get_cached_session(task_data.session_id, update_ttl=True)
|
||||||
|
pending_tasks = list(filter(lambda t: t.is_pending, session.tasks))
|
||||||
|
if current_thread_count < len(pending_tasks):
|
||||||
|
raise ConnectionRefusedError(
|
||||||
|
f"Session {task_data.session_id} already has {len(pending_tasks)} pending tasks out of {current_thread_count}."
|
||||||
|
)
|
||||||
|
|
||||||
|
new_task = RenderTask(render_req, task_data)
|
||||||
|
if session.put(new_task, TASK_TTL):
|
||||||
|
# Use twice the normal timeout for adding user requests.
|
||||||
|
# Tries to force session.put to fail before tasks_queue.put would.
|
||||||
|
if manager_lock.acquire(blocking=True, timeout=LOCK_TIMEOUT * 2):
|
||||||
|
try:
|
||||||
|
tasks_queue.append(new_task)
|
||||||
|
idle_event.set()
|
||||||
|
return new_task
|
||||||
|
finally:
|
||||||
|
manager_lock.release()
|
||||||
|
raise RuntimeError("Failed to add task to cache.")
|
||||||
103
ui/easydiffusion/types.py
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
from pydantic import BaseModel
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
|
||||||
|
class GenerateImageRequest(BaseModel):
|
||||||
|
prompt: str = ""
|
||||||
|
negative_prompt: str = ""
|
||||||
|
|
||||||
|
seed: int = 42
|
||||||
|
width: int = 512
|
||||||
|
height: int = 512
|
||||||
|
|
||||||
|
num_outputs: int = 1
|
||||||
|
num_inference_steps: int = 50
|
||||||
|
guidance_scale: float = 7.5
|
||||||
|
|
||||||
|
init_image: Any = None
|
||||||
|
init_image_mask: Any = None
|
||||||
|
prompt_strength: float = 0.8
|
||||||
|
preserve_init_image_color_profile = False
|
||||||
|
|
||||||
|
sampler_name: str = None # "ddim", "plms", "heun", "euler", "euler_a", "dpm2", "dpm2_a", "lms"
|
||||||
|
hypernetwork_strength: float = 0
|
||||||
|
|
||||||
|
|
||||||
|
class TaskData(BaseModel):
|
||||||
|
request_id: str = None
|
||||||
|
session_id: str = "session"
|
||||||
|
save_to_disk_path: str = None
|
||||||
|
vram_usage_level: str = "balanced" # or "low" or "medium"
|
||||||
|
|
||||||
|
use_face_correction: str = None # or "GFPGANv1.3"
|
||||||
|
use_upscale: str = None # or "RealESRGAN_x4plus" or "RealESRGAN_x4plus_anime_6B"
|
||||||
|
upscale_amount: int = 4 # or 2
|
||||||
|
use_stable_diffusion_model: str = "sd-v1-4"
|
||||||
|
# use_stable_diffusion_config: str = "v1-inference"
|
||||||
|
use_vae_model: str = None
|
||||||
|
use_hypernetwork_model: str = None
|
||||||
|
|
||||||
|
show_only_filtered_image: bool = False
|
||||||
|
block_nsfw: bool = False
|
||||||
|
output_format: str = "jpeg" # or "png" or "webp"
|
||||||
|
output_quality: int = 75
|
||||||
|
metadata_output_format: str = "txt" # or "json"
|
||||||
|
stream_image_progress: bool = False
|
||||||
|
stream_image_progress_interval: int = 5
|
||||||
|
|
||||||
|
|
||||||
|
class MergeRequest(BaseModel):
|
||||||
|
model0: str = None
|
||||||
|
model1: str = None
|
||||||
|
ratio: float = None
|
||||||
|
out_path: str = "mix"
|
||||||
|
use_fp16 = True
|
||||||
|
|
||||||
|
|
||||||
|
class Image:
|
||||||
|
data: str # base64
|
||||||
|
seed: int
|
||||||
|
is_nsfw: bool
|
||||||
|
path_abs: str = None
|
||||||
|
|
||||||
|
def __init__(self, data, seed):
|
||||||
|
self.data = data
|
||||||
|
self.seed = seed
|
||||||
|
|
||||||
|
def json(self):
|
||||||
|
return {
|
||||||
|
"data": self.data,
|
||||||
|
"seed": self.seed,
|
||||||
|
"path_abs": self.path_abs,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class Response:
|
||||||
|
render_request: GenerateImageRequest
|
||||||
|
task_data: TaskData
|
||||||
|
images: list
|
||||||
|
|
||||||
|
def __init__(self, render_request: GenerateImageRequest, task_data: TaskData, images: list):
|
||||||
|
self.render_request = render_request
|
||||||
|
self.task_data = task_data
|
||||||
|
self.images = images
|
||||||
|
|
||||||
|
def json(self):
|
||||||
|
del self.render_request.init_image
|
||||||
|
del self.render_request.init_image_mask
|
||||||
|
|
||||||
|
res = {
|
||||||
|
"status": "succeeded",
|
||||||
|
"render_request": self.render_request.dict(),
|
||||||
|
"task_data": self.task_data.dict(),
|
||||||
|
"output": [],
|
||||||
|
}
|
||||||
|
|
||||||
|
for image in self.images:
|
||||||
|
res["output"].append(image.json())
|
||||||
|
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
class UserInitiatedStop(Exception):
|
||||||
|
pass
|
||||||
8
ui/easydiffusion/utils/__init__.py
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
import logging
|
||||||
|
|
||||||
|
log = logging.getLogger("easydiffusion")
|
||||||
|
|
||||||
|
from .save_utils import (
|
||||||
|
save_images_to_disk,
|
||||||
|
get_printable_request,
|
||||||
|
)
|
||||||
132
ui/easydiffusion/utils/save_utils.py
Normal file
@@ -0,0 +1,132 @@
|
|||||||
|
import os
|
||||||
|
import time
|
||||||
|
import base64
|
||||||
|
import re
|
||||||
|
|
||||||
|
from easydiffusion.types import TaskData, GenerateImageRequest
|
||||||
|
|
||||||
|
from sdkit.utils import save_images, save_dicts
|
||||||
|
|
||||||
|
filename_regex = re.compile("[^a-zA-Z0-9._-]")
|
||||||
|
|
||||||
|
# keep in sync with `ui/media/js/dnd.js`
|
||||||
|
TASK_TEXT_MAPPING = {
|
||||||
|
"prompt": "Prompt",
|
||||||
|
"width": "Width",
|
||||||
|
"height": "Height",
|
||||||
|
"seed": "Seed",
|
||||||
|
"num_inference_steps": "Steps",
|
||||||
|
"guidance_scale": "Guidance Scale",
|
||||||
|
"prompt_strength": "Prompt Strength",
|
||||||
|
"use_face_correction": "Use Face Correction",
|
||||||
|
"use_upscale": "Use Upscaling",
|
||||||
|
"upscale_amount": "Upscale By",
|
||||||
|
"sampler_name": "Sampler",
|
||||||
|
"negative_prompt": "Negative Prompt",
|
||||||
|
"use_stable_diffusion_model": "Stable Diffusion model",
|
||||||
|
"use_vae_model": "VAE model",
|
||||||
|
"use_hypernetwork_model": "Hypernetwork model",
|
||||||
|
"hypernetwork_strength": "Hypernetwork Strength",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def save_images_to_disk(images: list, filtered_images: list, req: GenerateImageRequest, task_data: TaskData):
|
||||||
|
now = time.time()
|
||||||
|
save_dir_path = os.path.join(task_data.save_to_disk_path, filename_regex.sub("_", task_data.session_id))
|
||||||
|
metadata_entries = get_metadata_entries_for_request(req, task_data)
|
||||||
|
make_filename = make_filename_callback(req, now=now)
|
||||||
|
|
||||||
|
if task_data.show_only_filtered_image or filtered_images is images:
|
||||||
|
save_images(
|
||||||
|
filtered_images,
|
||||||
|
save_dir_path,
|
||||||
|
file_name=make_filename,
|
||||||
|
output_format=task_data.output_format,
|
||||||
|
output_quality=task_data.output_quality,
|
||||||
|
)
|
||||||
|
if task_data.metadata_output_format.lower() in ["json", "txt", "embed"]:
|
||||||
|
save_dicts(
|
||||||
|
metadata_entries,
|
||||||
|
save_dir_path,
|
||||||
|
file_name=make_filename,
|
||||||
|
output_format=task_data.metadata_output_format,
|
||||||
|
file_format=task_data.output_format,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
make_filter_filename = make_filename_callback(req, now=now, suffix="filtered")
|
||||||
|
|
||||||
|
save_images(
|
||||||
|
images,
|
||||||
|
save_dir_path,
|
||||||
|
file_name=make_filename,
|
||||||
|
output_format=task_data.output_format,
|
||||||
|
output_quality=task_data.output_quality,
|
||||||
|
)
|
||||||
|
save_images(
|
||||||
|
filtered_images,
|
||||||
|
save_dir_path,
|
||||||
|
file_name=make_filter_filename,
|
||||||
|
output_format=task_data.output_format,
|
||||||
|
output_quality=task_data.output_quality,
|
||||||
|
)
|
||||||
|
if task_data.metadata_output_format.lower() in ["json", "txt", "embed"]:
|
||||||
|
save_dicts(
|
||||||
|
metadata_entries,
|
||||||
|
save_dir_path,
|
||||||
|
file_name=make_filter_filename,
|
||||||
|
output_format=task_data.metadata_output_format,
|
||||||
|
file_format=task_data.output_format,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_metadata_entries_for_request(req: GenerateImageRequest, task_data: TaskData):
|
||||||
|
metadata = get_printable_request(req)
|
||||||
|
metadata.update(
|
||||||
|
{
|
||||||
|
"use_stable_diffusion_model": task_data.use_stable_diffusion_model,
|
||||||
|
"use_vae_model": task_data.use_vae_model,
|
||||||
|
"use_hypernetwork_model": task_data.use_hypernetwork_model,
|
||||||
|
"use_face_correction": task_data.use_face_correction,
|
||||||
|
"use_upscale": task_data.use_upscale,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
if metadata["use_upscale"] is not None:
|
||||||
|
metadata["upscale_amount"] = task_data.upscale_amount
|
||||||
|
if task_data.use_hypernetwork_model is None:
|
||||||
|
del metadata["hypernetwork_strength"]
|
||||||
|
|
||||||
|
# if text, format it in the text format expected by the UI
|
||||||
|
is_txt_format = task_data.metadata_output_format.lower() == "txt"
|
||||||
|
if is_txt_format:
|
||||||
|
metadata = {TASK_TEXT_MAPPING[key]: val for key, val in metadata.items() if key in TASK_TEXT_MAPPING}
|
||||||
|
|
||||||
|
entries = [metadata.copy() for _ in range(req.num_outputs)]
|
||||||
|
for i, entry in enumerate(entries):
|
||||||
|
entry["Seed" if is_txt_format else "seed"] = req.seed + i
|
||||||
|
|
||||||
|
return entries
|
||||||
|
|
||||||
|
|
||||||
|
def get_printable_request(req: GenerateImageRequest):
|
||||||
|
metadata = req.dict()
|
||||||
|
del metadata["init_image"]
|
||||||
|
del metadata["init_image_mask"]
|
||||||
|
if req.init_image is None:
|
||||||
|
del metadata["prompt_strength"]
|
||||||
|
return metadata
|
||||||
|
|
||||||
|
|
||||||
|
def make_filename_callback(req: GenerateImageRequest, suffix=None, now=None):
|
||||||
|
if now is None:
|
||||||
|
now = time.time()
|
||||||
|
|
||||||
|
def make_filename(i):
|
||||||
|
img_id = base64.b64encode(int(now + i).to_bytes(8, "big")).decode() # Generate unique ID based on time.
|
||||||
|
img_id = img_id.translate({43: None, 47: None, 61: None})[-8:] # Remove + / = and keep last 8 chars.
|
||||||
|
|
||||||
|
prompt_flattened = filename_regex.sub("_", req.prompt)[:50]
|
||||||
|
name = f"{prompt_flattened}_{img_id}"
|
||||||
|
name = name if suffix is None else f"{name}_{suffix}"
|
||||||
|
return name
|
||||||
|
|
||||||
|
return make_filename
|
||||||
271
ui/index.html
@@ -1,31 +1,39 @@
|
|||||||
<!DOCTYPE html>
|
<!DOCTYPE html>
|
||||||
<html>
|
<html>
|
||||||
<head>
|
<head>
|
||||||
<title>Stable Diffusion UI</title>
|
<title>Easy Diffusion</title>
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||||
|
<meta name="theme-color" content="#673AB6">
|
||||||
<link rel="icon" type="image/png" href="/media/images/favicon-16x16.png" sizes="16x16">
|
<link rel="icon" type="image/png" href="/media/images/favicon-16x16.png" sizes="16x16">
|
||||||
<link rel="icon" type="image/png" href="/media/images/favicon-32x32.png" sizes="32x32">
|
<link rel="icon" type="image/png" href="/media/images/favicon-32x32.png" sizes="32x32">
|
||||||
<link rel="stylesheet" href="/media/css/fonts.css?v=1">
|
<link rel="stylesheet" href="/media/css/jquery-confirm.min.css">
|
||||||
<link rel="stylesheet" href="/media/css/themes.css?v=3">
|
<link rel="stylesheet" href="/media/css/fonts.css">
|
||||||
<link rel="stylesheet" href="/media/css/main.css?v=17">
|
<link rel="stylesheet" href="/media/css/themes.css">
|
||||||
<link rel="stylesheet" href="/media/css/auto-save.css?v=5">
|
<link rel="stylesheet" href="/media/css/main.css">
|
||||||
<link rel="stylesheet" href="/media/css/modifier-thumbnails.css?v=4">
|
<link rel="stylesheet" href="/media/css/auto-save.css">
|
||||||
<link rel="stylesheet" href="/media/css/fontawesome-all.min.css?v=1">
|
<link rel="stylesheet" href="/media/css/modifier-thumbnails.css">
|
||||||
<link rel="stylesheet" href="/media/css/drawingboard.min.css">
|
<link rel="stylesheet" href="/media/css/fontawesome-all.min.css">
|
||||||
|
<link rel="stylesheet" href="/media/css/image-editor.css">
|
||||||
|
<link rel="stylesheet" href="/media/css/searchable-models.css">
|
||||||
|
<link rel="manifest" href="/media/manifest.webmanifest">
|
||||||
<script src="/media/js/jquery-3.6.1.min.js"></script>
|
<script src="/media/js/jquery-3.6.1.min.js"></script>
|
||||||
<script src="/media/js/drawingboard.min.js"></script>
|
<script src="/media/js/jquery-confirm.min.js"></script>
|
||||||
|
<script src="/media/js/marked.min.js"></script>
|
||||||
</head>
|
</head>
|
||||||
<body>
|
<body>
|
||||||
<div id="container">
|
<div id="container">
|
||||||
<div id="top-nav">
|
<div id="top-nav">
|
||||||
<div id="logo">
|
<div id="logo">
|
||||||
<h1>Stable Diffusion UI <small>v2.4.6 <span id="updateBranchLabel"></span></small></h1>
|
<h1>
|
||||||
|
Easy Diffusion
|
||||||
|
<small>v2.5.22 <span id="updateBranchLabel"></span></small>
|
||||||
|
</h1>
|
||||||
</div>
|
</div>
|
||||||
<div id="server-status">
|
<div id="server-status">
|
||||||
<div id="server-status-color">●</div>
|
<div id="server-status-color">●</div>
|
||||||
<span id="server-status-msg">Stable Diffusion is starting..</span>
|
<span id="server-status-msg">Stable Diffusion is starting..</span>
|
||||||
</div>
|
</div>
|
||||||
<div id="tab-container">
|
<div id="tab-container" class="tab-container">
|
||||||
<span id="tab-main" class="tab active">
|
<span id="tab-main" class="tab active">
|
||||||
<span><i class="fa fa-image icon"></i> Generate</span>
|
<span><i class="fa fa-image icon"></i> Generate</span>
|
||||||
</span>
|
</span>
|
||||||
@@ -43,48 +51,63 @@
|
|||||||
<div id="editor">
|
<div id="editor">
|
||||||
<div id="editor-inputs">
|
<div id="editor-inputs">
|
||||||
<div id="editor-inputs-prompt" class="row">
|
<div id="editor-inputs-prompt" class="row">
|
||||||
<label for="prompt"><b>Enter Prompt</b></label> <small>or</small> <button id="promptsFromFileBtn">Load from a file</button>
|
<label for="prompt"><b>Enter Prompt</b></label> <small>or</small> <button id="promptsFromFileBtn" class="tertiaryButton">Load from a file</button>
|
||||||
<textarea id="prompt" class="col-free">a photograph of an astronaut riding a horse</textarea>
|
<textarea id="prompt" class="col-free">a photograph of an astronaut riding a horse</textarea>
|
||||||
<input id="prompt_from_file" name="prompt_from_file" type="file" /> <!-- hidden -->
|
<input id="prompt_from_file" name="prompt_from_file" type="file" /> <!-- hidden -->
|
||||||
|
|
||||||
<label for="negative_prompt" class="collapsible" id="negative_prompt_handle">
|
<label for="negative_prompt" class="collapsible" id="negative_prompt_handle">
|
||||||
Negative Prompt
|
Negative Prompt
|
||||||
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/Writing-prompts#negative-prompts" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip right">Click to learn more about Negative Prompts</span></i></a>
|
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/Writing-prompts#negative-prompts" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top">Click to learn more about Negative Prompts</span></i></a>
|
||||||
<small>(optional)</small>
|
<small>(optional)</small>
|
||||||
</label>
|
</label>
|
||||||
<div class="collapsible-content">
|
<div class="collapsible-content">
|
||||||
<input id="negative_prompt" name="negative_prompt" placeholder="list the things to remove from the image (e.g. fog, green)">
|
<textarea id="negative_prompt" name="negative_prompt" placeholder="list the things to remove from the image (e.g. fog, green)"></textarea>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div id="editor-inputs-init-image" class="row">
|
<div id="editor-inputs-init-image" class="row">
|
||||||
<label for="init_image">Initial Image (img2img) <small>(optional)</small> </label> <input id="init_image" name="init_image" type="file" /><br/>
|
<label for="init_image">Initial Image (img2img) <small>(optional)</small> </label>
|
||||||
|
|
||||||
<div id="init_image_preview_container" class="image_preview_container">
|
<div id="init_image_preview_container" class="image_preview_container">
|
||||||
<div id="init_image_wrapper">
|
<div id="init_image_wrapper">
|
||||||
<img id="init_image_preview" src="" />
|
<img id="init_image_preview" src="" />
|
||||||
<span id="init_image_size_box"></span>
|
<span id="init_image_size_box" class="img_bottom_label"></span>
|
||||||
<button class="init_image_clear image_clear_btn">X</button>
|
<button class="init_image_clear image_clear_btn"><i class="fa-solid fa-xmark"></i></button>
|
||||||
|
</div>
|
||||||
|
<div id="init_image_buttons">
|
||||||
|
<div class="button">
|
||||||
|
<i class="fa-regular fa-folder-open"></i>
|
||||||
|
Browse
|
||||||
|
<input id="init_image" name="init_image" type="file" />
|
||||||
|
</div>
|
||||||
|
<div id="init_image_button_draw" class="button">
|
||||||
|
<i class="fa-solid fa-pencil"></i>
|
||||||
|
Draw
|
||||||
|
</div>
|
||||||
|
<div id="inpaint_button_container">
|
||||||
|
<div id="init_image_button_inpaint" class="button">
|
||||||
|
<i class="fa-solid fa-paintbrush"></i>
|
||||||
|
Inpaint
|
||||||
|
</div>
|
||||||
|
<input id="enable_mask" name="enable_mask" type="checkbox">
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<br/>
|
<div id="apply_color_correction_setting" class="pl-5"><input id="apply_color_correction" name="apply_color_correction" type="checkbox"> <label for="apply_color_correction">Preserve color profile <small>(helps during inpainting)</small></label></div>
|
||||||
<input id="enable_mask" name="enable_mask" type="checkbox">
|
|
||||||
<label for="enable_mask">
|
|
||||||
In-Painting (beta)
|
|
||||||
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/Inpainting" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip right">Click to learn more about InPainting</span></i></a>
|
|
||||||
<small>(select the area which the AI will paint into)</small>
|
|
||||||
</label>
|
|
||||||
<div id="inpaintingEditor"></div>
|
|
||||||
</div>
|
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div id="editor-inputs-tags-container" class="row">
|
<div id="editor-inputs-tags-container" class="row">
|
||||||
<label>Image Modifiers: <small>(click an Image Modifier to remove it)</small></label>
|
<label>Image Modifiers <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip right">click an Image Modifier to remove it, right-click to temporarily disable it, use Ctrl+Mouse Wheel to adjust its weight</span></i></label>
|
||||||
<div id="editor-inputs-tags-list"></div>
|
<div id="editor-inputs-tags-list"></div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<button id="makeImage">Make Image</button>
|
<button id="makeImage" class="primaryButton">Make Image</button>
|
||||||
|
<div id="render-buttons">
|
||||||
<button id="stopImage" class="secondaryButton">Stop All</button>
|
<button id="stopImage" class="secondaryButton">Stop All</button>
|
||||||
|
<button id="pause"><i class="fa-solid fa-pause"></i> Pause All</button>
|
||||||
|
<button id="resume"><i class="fa-solid fa-play"></i> Resume</button>
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<span class="line-separator"></span>
|
<span class="line-separator"></span>
|
||||||
@@ -93,7 +116,7 @@
|
|||||||
<h4 class="collapsible">
|
<h4 class="collapsible">
|
||||||
Image Settings
|
Image Settings
|
||||||
<i id="reset-image-settings" class="fa-solid fa-arrow-rotate-left section-button">
|
<i id="reset-image-settings" class="fa-solid fa-arrow-rotate-left section-button">
|
||||||
<span class="simple-tooltip left">
|
<span class="simple-tooltip top-left">
|
||||||
Reset Image Settings
|
Reset Image Settings
|
||||||
</span>
|
</span>
|
||||||
</i>
|
</i>
|
||||||
@@ -101,32 +124,44 @@
|
|||||||
<div id="editor-settings-entries" class="collapsible-content">
|
<div id="editor-settings-entries" class="collapsible-content">
|
||||||
<div><table>
|
<div><table>
|
||||||
<tr><b class="settings-subheader">Image Settings</b></tr>
|
<tr><b class="settings-subheader">Image Settings</b></tr>
|
||||||
<tr class="pl-5"><td><label for="seed">Seed:</label></td><td><input id="seed" name="seed" size="10" value="30000" onkeypress="preventNonNumericalInput(event)"> <input id="random_seed" name="random_seed" type="checkbox" checked><label for="random_seed">Random</label></td></tr>
|
<tr class="pl-5"><td><label for="seed">Seed:</label></td><td><input id="seed" name="seed" size="10" value="0" onkeypress="preventNonNumericalInput(event)"> <input id="random_seed" name="random_seed" type="checkbox" checked><label for="random_seed">Random</label></td></tr>
|
||||||
<tr class="pl-5"><td><label for="num_outputs_total">Number of Images:</label></td><td><input id="num_outputs_total" name="num_outputs_total" value="1" size="1" onkeypress="preventNonNumericalInput(event)"> <label><small>(total)</small></label> <input id="num_outputs_parallel" name="num_outputs_parallel" value="1" size="1" onkeypress="preventNonNumericalInput(event)"> <label for="num_outputs_parallel"><small>(in parallel)</small></label></td></tr>
|
<tr class="pl-5"><td><label for="num_outputs_total">Number of Images:</label></td><td><input id="num_outputs_total" name="num_outputs_total" value="1" size="1" onkeypress="preventNonNumericalInput(event)"> <label><small>(total)</small></label> <input id="num_outputs_parallel" name="num_outputs_parallel" value="1" size="1" onkeypress="preventNonNumericalInput(event)"> <label for="num_outputs_parallel"><small>(in parallel)</small></label></td></tr>
|
||||||
<tr class="pl-5"><td><label for="stable_diffusion_model">Model:</label></td><td>
|
<tr class="pl-5"><td><label for="stable_diffusion_model">Model:</label></td><td class="model-input">
|
||||||
<select id="stable_diffusion_model" name="stable_diffusion_model">
|
<input id="stable_diffusion_model" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
|
||||||
<!-- <option value="sd-v1-4" selected>sd-v1-4</option> -->
|
<button id="reload-models" class="secondaryButton reloadModels"><i class='fa-solid fa-rotate'></i></button>
|
||||||
</select>
|
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/Custom-Models" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Click to learn more about custom models</span></i></a>
|
||||||
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/Custom-Models" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip right">Click to learn more about custom models</span></i></a>
|
|
||||||
</td></tr>
|
</td></tr>
|
||||||
|
<!-- <tr id="modelConfigSelection" class="pl-5"><td><label for="model_config">Model Config:</i></label></td><td>
|
||||||
|
<select id="model_config" name="model_config">
|
||||||
|
</select>
|
||||||
|
</td></tr> -->
|
||||||
<tr class="pl-5"><td><label for="vae_model">Custom VAE:</i></label></td><td>
|
<tr class="pl-5"><td><label for="vae_model">Custom VAE:</i></label></td><td>
|
||||||
<select id="vae_model" name="vae_model">
|
<input id="vae_model" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
|
||||||
<!-- <option value="" selected>None</option> -->
|
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/VAE-Variational-Auto-Encoder" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Click to learn more about VAEs</span></i></a>
|
||||||
</select>
|
|
||||||
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/VAE-Variational-Auto-Encoder" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip right">Click to learn more about VAEs</span></i></a>
|
|
||||||
</td></tr>
|
</td></tr>
|
||||||
<tr id="samplerSelection" class="pl-5"><td><label for="sampler">Sampler:</label></td><td>
|
<tr id="samplerSelection" class="pl-5"><td><label for="sampler_name">Sampler:</label></td><td>
|
||||||
<select id="sampler" name="sampler">
|
<select id="sampler_name" name="sampler_name">
|
||||||
<option value="plms">plms</option>
|
<option value="plms">PLMS</option>
|
||||||
<option value="ddim">ddim</option>
|
<option value="ddim">DDIM</option>
|
||||||
<option value="heun">heun</option>
|
<option value="heun">Heun</option>
|
||||||
<option value="euler">euler</option>
|
<option value="euler">Euler</option>
|
||||||
<option value="euler_a" selected>euler_a</option>
|
<option value="euler_a" selected>Euler Ancestral</option>
|
||||||
<option value="dpm2">dpm2</option>
|
<option value="dpm2">DPM2</option>
|
||||||
<option value="dpm2_a">dpm2_a</option>
|
<option value="dpm2_a">DPM2 Ancestral</option>
|
||||||
<option value="lms">lms</option>
|
<option value="lms">LMS</option>
|
||||||
|
<option value="dpm_solver_stability">DPM Solver (Stability AI)</option>
|
||||||
|
<option value="dpmpp_2s_a">DPM++ 2s Ancestral</option>
|
||||||
|
<option value="dpmpp_2m">DPM++ 2m</option>
|
||||||
|
<option value="dpmpp_sde">DPM++ SDE</option>
|
||||||
|
<option value="dpm_fast">DPM Fast</option>
|
||||||
|
<option value="dpm_adaptive">DPM Adaptive</option>
|
||||||
|
<option value="unipc_snr">UniPC SNR</option>
|
||||||
|
<option value="unipc_tu">UniPC TU</option>
|
||||||
|
<option value="unipc_snr_2">UniPC SNR 2</option>
|
||||||
|
<option value="unipc_tu_2">UniPC TC 2</option>
|
||||||
|
<option value="unipc_tq">UniPC TQ</option>
|
||||||
</select>
|
</select>
|
||||||
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/How-to-Use#samplers" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip right">Click to learn more about samplers</span></i></a>
|
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/How-to-Use#samplers" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Click to learn more about samplers</span></i></a>
|
||||||
</td></tr>
|
</td></tr>
|
||||||
<tr class="pl-5"><td><label>Image Size: </label></td><td>
|
<tr class="pl-5"><td><label>Image Size: </label></td><td>
|
||||||
<select id="width" name="width" value="512">
|
<select id="width" name="width" value="512">
|
||||||
@@ -175,22 +210,38 @@
|
|||||||
<label for="height"><small>(height)</small></label>
|
<label for="height"><small>(height)</small></label>
|
||||||
</td></tr>
|
</td></tr>
|
||||||
<tr class="pl-5"><td><label for="num_inference_steps">Inference Steps:</label></td><td> <input id="num_inference_steps" name="num_inference_steps" size="4" value="25" onkeypress="preventNonNumericalInput(event)"></td></tr>
|
<tr class="pl-5"><td><label for="num_inference_steps">Inference Steps:</label></td><td> <input id="num_inference_steps" name="num_inference_steps" size="4" value="25" onkeypress="preventNonNumericalInput(event)"></td></tr>
|
||||||
<tr class="pl-5"><td><label for="guidance_scale_slider">Guidance Scale:</label></td><td> <input id="guidance_scale_slider" name="guidance_scale_slider" class="editor-slider" value="75" type="range" min="10" max="500"> <input id="guidance_scale" name="guidance_scale" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"></td></tr>
|
<tr class="pl-5"><td><label for="guidance_scale_slider">Guidance Scale:</label></td><td> <input id="guidance_scale_slider" name="guidance_scale_slider" class="editor-slider" value="75" type="range" min="11" max="500"> <input id="guidance_scale" name="guidance_scale" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"></td></tr>
|
||||||
<tr id="prompt_strength_container" class="pl-5"><td><label for="prompt_strength_slider">Prompt Strength:</label></td><td> <input id="prompt_strength_slider" name="prompt_strength_slider" class="editor-slider" value="80" type="range" min="0" max="99"> <input id="prompt_strength" name="prompt_strength" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"><br/></td></tr></span>
|
<tr id="prompt_strength_container" class="pl-5"><td><label for="prompt_strength_slider">Prompt Strength:</label></td><td> <input id="prompt_strength_slider" name="prompt_strength_slider" class="editor-slider" value="80" type="range" min="0" max="99"> <input id="prompt_strength" name="prompt_strength" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"><br/></td></tr>
|
||||||
|
<tr class="pl-5"><td><label for="hypernetwork_model">Hypernetwork:</i></label></td><td>
|
||||||
|
<input id="hypernetwork_model" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
|
||||||
|
</td></tr>
|
||||||
|
<tr id="hypernetwork_strength_container" class="pl-5">
|
||||||
|
<td><label for="hypernetwork_strength_slider">Hypernetwork Strength:</label></td>
|
||||||
|
<td> <input id="hypernetwork_strength_slider" name="hypernetwork_strength_slider" class="editor-slider" value="100" type="range" min="0" max="100"> <input id="hypernetwork_strength" name="hypernetwork_strength" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"><br/></td>
|
||||||
|
</tr>
|
||||||
<tr class="pl-5"><td><label for="output_format">Output Format:</label></td><td>
|
<tr class="pl-5"><td><label for="output_format">Output Format:</label></td><td>
|
||||||
<select id="output_format" name="output_format">
|
<select id="output_format" name="output_format">
|
||||||
<option value="jpeg" selected>jpeg</option>
|
<option value="jpeg" selected>jpeg</option>
|
||||||
<option value="png">png</option>
|
<option value="png">png</option>
|
||||||
|
<option value="webp">webp</option>
|
||||||
</select>
|
</select>
|
||||||
</td></tr>
|
</td></tr>
|
||||||
|
<tr class="pl-5" id="output_quality_row"><td><label for="output_quality">Image Quality:</label></td><td>
|
||||||
|
<input id="output_quality_slider" name="output_quality" class="editor-slider" value="75" type="range" min="10" max="95"> <input id="output_quality" name="output_quality" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)">
|
||||||
|
</td></tr>
|
||||||
</table></div>
|
</table></div>
|
||||||
|
|
||||||
<div><ul>
|
<div><ul>
|
||||||
<li><b class="settings-subheader">Render Settings</b></li>
|
<li><b class="settings-subheader">Render Settings</b></li>
|
||||||
<li class="pl-5"><input id="stream_image_progress" name="stream_image_progress" type="checkbox"> <label for="stream_image_progress">Show a live preview <small>(uses more VRAM, and slower image creation)</small></label></li>
|
<li class="pl-5"><input id="stream_image_progress" name="stream_image_progress" type="checkbox"> <label for="stream_image_progress">Show a live preview <small>(uses more VRAM, slower images)</small></label></li>
|
||||||
<li class="pl-5"><input id="use_face_correction" name="use_face_correction" type="checkbox"> <label for="use_face_correction">Fix incorrect faces and eyes <small>(uses GFPGAN)</small></label></li>
|
<li class="pl-5"><input id="use_face_correction" name="use_face_correction" type="checkbox"> <label for="use_face_correction">Fix incorrect faces and eyes</label> <div style="display:inline-block;"><input id="gfpgan_model" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" /></div></li>
|
||||||
<li class="pl-5">
|
<li class="pl-5">
|
||||||
<input id="use_upscale" name="use_upscale" type="checkbox"> <label for="use_upscale">Upscale image by 4x with </label>
|
<input id="use_upscale" name="use_upscale" type="checkbox"> <label for="use_upscale">Scale up by</label>
|
||||||
|
<select id="upscale_amount" name="upscale_amount">
|
||||||
|
<option value="2">2x</option>
|
||||||
|
<option value="4" selected>4x</option>
|
||||||
|
</select>
|
||||||
|
with
|
||||||
<select id="upscale_model" name="upscale_model">
|
<select id="upscale_model" name="upscale_model">
|
||||||
<option value="RealESRGAN_x4plus" selected>RealESRGAN_x4plus</option>
|
<option value="RealESRGAN_x4plus" selected>RealESRGAN_x4plus</option>
|
||||||
<option value="RealESRGAN_x4plus_anime_6B">RealESRGAN_x4plus_anime_6B</option>
|
<option value="RealESRGAN_x4plus_anime_6B">RealESRGAN_x4plus_anime_6B</option>
|
||||||
@@ -232,8 +283,34 @@
|
|||||||
and selecting the desired modifiers.<br/><br/>
|
and selecting the desired modifiers.<br/><br/>
|
||||||
Click "Image Settings" for additional settings like seed, image size, number of images to generate etc.<br/><br/>Enjoy! :)
|
Click "Image Settings" for additional settings like seed, image size, number of images to generate etc.<br/><br/>Enjoy! :)
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
<div id="preview-content">
|
||||||
<div id="preview-tools">
|
<div id="preview-tools">
|
||||||
<button id="clear-all-previews" class="secondaryButton"><i class="fa-solid fa-trash-can"></i> Clear All</button>
|
<button id="clear-all-previews" class="secondaryButton"><i class="fa-solid fa-trash-can icon"></i> Clear All</button>
|
||||||
|
<button id="save-all-images" class="tertiaryButton"><i class="fa-solid fa-download icon"></i> Download All Images</button>
|
||||||
|
<div class="display-settings">
|
||||||
|
<button id="auto_scroll_btn" class="tertiaryButton">
|
||||||
|
<i class="fa-solid fa-arrows-up-to-line icon"></i>
|
||||||
|
<input id="auto_scroll" name="auto_scroll" type="checkbox" style="display: none">
|
||||||
|
<span class="simple-tooltip left">
|
||||||
|
Scroll to generated image (<span class="state">OFF</span>)
|
||||||
|
</span>
|
||||||
|
</button>
|
||||||
|
<button class="dropdown tertiaryButton">
|
||||||
|
<i class="fa-solid fa-magnifying-glass-plus icon dropbtn"></i>
|
||||||
|
<span class="simple-tooltip left">
|
||||||
|
Image Size
|
||||||
|
</span>
|
||||||
|
</button>
|
||||||
|
<div class="dropdown-content">
|
||||||
|
<div class="dropdown-item">
|
||||||
|
<input id="thumbnail_size" name="thumbnail_size" class="editor-slider" type="range" value="70" min="5" max="200" oninput="sliderUpdate(event)">
|
||||||
|
<input id="thumbnail_size-input" name="thumbnail_size-input" size="3" value="70" pattern="^[0-9.]+$" onkeypress="preventNonNumericalInput(event)" oninput="sliderUpdate(event)"> %
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="clearfix" style="clear: both;"></div>
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
@@ -241,15 +318,24 @@
|
|||||||
<div id="tab-content-settings" class="tab-content">
|
<div id="tab-content-settings" class="tab-content">
|
||||||
<div id="system-settings" class="tab-content-inner">
|
<div id="system-settings" class="tab-content-inner">
|
||||||
<h1>System Settings</h1>
|
<h1>System Settings</h1>
|
||||||
<table class="form-table"></table>
|
<div class="parameters-table"></div>
|
||||||
<br/>
|
<br/>
|
||||||
<button id="save-system-settings-btn" class="primaryButton">Save</button>
|
<button id="save-system-settings-btn" class="primaryButton">Save</button>
|
||||||
<br/><br/>
|
<br/><br/>
|
||||||
<div>
|
<div>
|
||||||
<h3><i class="fa fa-microchip icon"></i> System Info</h3>
|
<h3><i class="fa fa-microchip icon"></i> System Info</h3>
|
||||||
<div id="system-info"></div>
|
<div id="system-info">
|
||||||
|
<table>
|
||||||
|
<tr><td><label>Processor:</label></td><td id="system-info-cpu" class="value"></td></tr>
|
||||||
|
<tr><td><label>Compatible Graphics Cards (all):</label></td><td id="system-info-gpus-all" class="value"></td></tr>
|
||||||
|
<tr><td></td><td> </td></tr>
|
||||||
|
<tr><td><label>Used for rendering 🔥:</label></td><td id="system-info-rendering-devices" class="value"></td></tr>
|
||||||
|
<tr><td><label>Server Addresses <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">You can access Stable Diffusion UI from other devices using these addresses</span></i> :</label></td><td id="system-info-server-hosts" class="value"></td></tr>
|
||||||
|
</table>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
<div id="tab-content-about" class="tab-content">
|
<div id="tab-content-about" class="tab-content">
|
||||||
<div class="tab-content-inner">
|
<div class="tab-content-inner">
|
||||||
@@ -263,6 +349,7 @@
|
|||||||
<li> <a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/UI-Overview" target="_blank"><i class="fa-solid fa-list fa-fw"></i> UI Overview</a>
|
<li> <a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/UI-Overview" target="_blank"><i class="fa-solid fa-list fa-fw"></i> UI Overview</a>
|
||||||
<li> <a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/Writing-Prompts" target="_blank"><i class="fa-solid fa-pen-to-square fa-fw"></i> Writing prompts</a>
|
<li> <a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/Writing-Prompts" target="_blank"><i class="fa-solid fa-pen-to-square fa-fw"></i> Writing prompts</a>
|
||||||
<li> <a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/Inpainting" target="_blank"><i class="fa-solid fa-paintbrush fa-fw"></i> Inpainting</a>
|
<li> <a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/Inpainting" target="_blank"><i class="fa-solid fa-paintbrush fa-fw"></i> Inpainting</a>
|
||||||
|
<li> <a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/Run-on-Multiple-GPUs" target="_blank"><i class="fa-solid fa-paintbrush fa-fw"></i> Run on Multiple GPUs</a>
|
||||||
</ul>
|
</ul>
|
||||||
|
|
||||||
<li><span class="help-section">Installation</span>
|
<li><span class="help-section">Installation</span>
|
||||||
@@ -313,6 +400,38 @@
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
<div id="image-editor" class="popup image-editor-popup">
|
||||||
|
<div>
|
||||||
|
<i class="close-button fa-solid fa-xmark"></i>
|
||||||
|
<h1>Image Editor</h1>
|
||||||
|
<div class="flex-container">
|
||||||
|
<div class="editor-controls-left"></div>
|
||||||
|
<div class="editor-controls-center">
|
||||||
|
<div></div>
|
||||||
|
</div>
|
||||||
|
<div class="editor-controls-right">
|
||||||
|
<div></div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div id="image-inpainter" class="popup image-editor-popup">
|
||||||
|
<div>
|
||||||
|
<i class="close-button fa-solid fa-xmark"></i>
|
||||||
|
<h1>Inpainter</h1>
|
||||||
|
<div class="flex-container">
|
||||||
|
<div class="editor-controls-left"></div>
|
||||||
|
<div class="editor-controls-center">
|
||||||
|
<div></div>
|
||||||
|
</div>
|
||||||
|
<div class="editor-controls-right">
|
||||||
|
<div></div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
<div id="footer-spacer"></div>
|
<div id="footer-spacer"></div>
|
||||||
<div id="footer">
|
<div id="footer">
|
||||||
<div class="line-separator"> </div>
|
<div class="line-separator"> </div>
|
||||||
@@ -326,28 +445,34 @@
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</body>
|
</body>
|
||||||
|
<script src="media/js/utils.js"></script>
|
||||||
|
<script src="media/js/engine.js"></script>
|
||||||
|
<script src="media/js/parameters.js"></script>
|
||||||
|
<script src="media/js/plugins.js"></script>
|
||||||
|
|
||||||
<script src="media/js/parameters.js?v=9"></script>
|
<script src="media/js/image-modifiers.js"></script>
|
||||||
<script src="media/js/plugins.js?v=1"></script>
|
<script src="media/js/auto-save.js"></script>
|
||||||
<script src="media/js/utils.js?v=6"></script>
|
|
||||||
<script src="media/js/inpainting-editor.js?v=1"></script>
|
<script src="media/js/searchable-models.js"></script>
|
||||||
<script src="media/js/image-modifiers.js?v=6"></script>
|
<script src="media/js/main.js"></script>
|
||||||
<script src="media/js/auto-save.js?v=8"></script>
|
<script src="media/js/themes.js"></script>
|
||||||
<script src="media/js/main.js?v=22.1"></script>
|
<script src="media/js/dnd.js"></script>
|
||||||
<script src="media/js/themes.js?v=4"></script>
|
<script src="media/js/image-editor.js"></script>
|
||||||
<script src="media/js/dnd.js?v=9"></script>
|
|
||||||
<script>
|
<script>
|
||||||
async function init() {
|
async function init() {
|
||||||
await initSettings()
|
await initSettings()
|
||||||
await getModels()
|
await getModels()
|
||||||
await getDiskPath()
|
|
||||||
await getAppConfig()
|
await getAppConfig()
|
||||||
await loadModifiers()
|
|
||||||
await loadUIPlugins()
|
await loadUIPlugins()
|
||||||
await getDevices()
|
await loadModifiers()
|
||||||
|
await getSystemInfo()
|
||||||
|
|
||||||
setInterval(healthCheck, HEALTH_PING_INTERVAL * 1000)
|
SD.init({
|
||||||
healthCheck()
|
events: {
|
||||||
|
statusChange: setServerStatus
|
||||||
|
, idle: onIdle
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
playSound()
|
playSound()
|
||||||
}
|
}
|
||||||
|
|||||||
10
ui/main.py
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
from easydiffusion import model_manager, app, server
|
||||||
|
from easydiffusion.server import server_api # required for uvicorn
|
||||||
|
|
||||||
|
# Init the app
|
||||||
|
model_manager.init()
|
||||||
|
app.init()
|
||||||
|
server.init()
|
||||||
|
|
||||||
|
# start the browser ui
|
||||||
|
app.open_browser()
|
||||||
@@ -26,23 +26,56 @@
|
|||||||
float: left;
|
float: left;
|
||||||
}
|
}
|
||||||
|
|
||||||
.form-table small {
|
|
||||||
|
.parameters-table {
|
||||||
|
display: flex;
|
||||||
|
flex-direction: column;
|
||||||
|
gap: 1px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.parameters-table > div {
|
||||||
|
background: var(--background-color2);
|
||||||
|
display: flex;
|
||||||
|
padding: 0px 4px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.parameters-table > div > div {
|
||||||
|
padding: 10px;
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
justify-content: center;
|
||||||
|
}
|
||||||
|
|
||||||
|
.parameters-table small {
|
||||||
color: rgb(153, 153, 153);
|
color: rgb(153, 153, 153);
|
||||||
}
|
}
|
||||||
|
|
||||||
#system-settings .form-table td {
|
.parameters-table > div > div:nth-child(1) {
|
||||||
height: 24px;
|
font-size: 20px;
|
||||||
|
width: 45px;
|
||||||
}
|
}
|
||||||
|
|
||||||
#system-settings .form-table td:last-child div {
|
.parameters-table > div > div:nth-child(2) {
|
||||||
display: flex;
|
flex: 1;
|
||||||
align-items: center;
|
flex-direction: column;
|
||||||
}
|
|
||||||
#system-settings .form-table td:last-child div > :not([type="checkbox"]):first-child {
|
|
||||||
margin-left: 3px;
|
|
||||||
}
|
|
||||||
|
|
||||||
#system-settings .form-table td:last-child div small {
|
|
||||||
padding-left: 5px;
|
|
||||||
text-align: left;
|
text-align: left;
|
||||||
|
justify-content: center;
|
||||||
|
align-items: start;
|
||||||
|
gap: 4px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.parameters-table > div > div:nth-child(3) {
|
||||||
|
text-align: right;
|
||||||
|
}
|
||||||
|
|
||||||
|
.parameters-table > div:first-child {
|
||||||
|
border-radius: 12px 12px 0px 0px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.parameters-table > div:last-child {
|
||||||
|
border-radius: 0px 0px 12px 12px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.parameters-table .fa-fire {
|
||||||
|
color: #F7630C;
|
||||||
}
|
}
|
||||||
5
ui/media/css/drawingboard.min.css
vendored
216
ui/media/css/image-editor.css
Normal file
@@ -0,0 +1,216 @@
|
|||||||
|
.editor-controls-left {
|
||||||
|
padding-left: 32px;
|
||||||
|
text-align: left;
|
||||||
|
padding-bottom: 20px;
|
||||||
|
max-width: min-content;
|
||||||
|
}
|
||||||
|
|
||||||
|
.editor-options-container {
|
||||||
|
display: flex;
|
||||||
|
row-gap: 10px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.editor-options-container > * {
|
||||||
|
flex: 1;
|
||||||
|
display: flex;
|
||||||
|
justify-content: center;
|
||||||
|
align-items: center;
|
||||||
|
}
|
||||||
|
|
||||||
|
.editor-options-container > * > * {
|
||||||
|
position: inherit;
|
||||||
|
width: 32px;
|
||||||
|
height: 32px;
|
||||||
|
border-radius: 16px;
|
||||||
|
background: var(--background-color3);
|
||||||
|
cursor: pointer;
|
||||||
|
transition: opacity 0.25s;
|
||||||
|
}
|
||||||
|
.editor-options-container > * > *:hover {
|
||||||
|
opacity: 0.75;
|
||||||
|
}
|
||||||
|
|
||||||
|
.editor-options-container > * > *.active {
|
||||||
|
border: 1px solid #3584e4;
|
||||||
|
}
|
||||||
|
|
||||||
|
.image_editor_opacity .editor-options-container > * > *:not(.active) {
|
||||||
|
border: 1px solid var(--background-color3);
|
||||||
|
}
|
||||||
|
|
||||||
|
.image_editor_color .editor-options-container {
|
||||||
|
flex-wrap: wrap;
|
||||||
|
}
|
||||||
|
.image_editor_color .editor-options-container > * {
|
||||||
|
flex: 20%;
|
||||||
|
}
|
||||||
|
.image_editor_color .editor-options-container > * > * {
|
||||||
|
position: relative;
|
||||||
|
}
|
||||||
|
.image_editor_color .editor-options-container > * > *.active::before {
|
||||||
|
content: "\f00c";
|
||||||
|
display: var(--fa-display,inline-block);
|
||||||
|
font-style: normal;
|
||||||
|
font-variant: normal;
|
||||||
|
line-height: 1;
|
||||||
|
text-rendering: auto;
|
||||||
|
font-family: var(--fa-style-family, "Font Awesome 6 Free");
|
||||||
|
font-weight: var(--fa-style, 900);
|
||||||
|
position: absolute;
|
||||||
|
left: 50%;
|
||||||
|
top: 50%;
|
||||||
|
transform: translate(-50%, -50%) scale(125%);
|
||||||
|
color: black;
|
||||||
|
}
|
||||||
|
.image_editor_color .editor-options-container > *:first-child {
|
||||||
|
flex: 100%;
|
||||||
|
}
|
||||||
|
.image_editor_color .editor-options-container > *:first-child > * {
|
||||||
|
width: 100%;
|
||||||
|
}
|
||||||
|
.image_editor_color .editor-options-container > *:first-child > * > input {
|
||||||
|
width: 100%;
|
||||||
|
height: 100%;
|
||||||
|
opacity: 0;
|
||||||
|
cursor: pointer;
|
||||||
|
}
|
||||||
|
.image_editor_color .editor-options-container > *:first-child > * > span {
|
||||||
|
position: absolute;
|
||||||
|
left: 50%;
|
||||||
|
top: 50%;
|
||||||
|
transform: translate(-50%, -50%);
|
||||||
|
opacity: 0.5;
|
||||||
|
}
|
||||||
|
.image_editor_color .editor-options-container > *:first-child > *.active > span {
|
||||||
|
opacity: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
.image_editor_tool .editor-options-container {
|
||||||
|
flex-wrap: wrap;
|
||||||
|
}
|
||||||
|
|
||||||
|
.image_editor_tool .editor-options-container > * {
|
||||||
|
padding: 2px;
|
||||||
|
flex: 50%;
|
||||||
|
}
|
||||||
|
|
||||||
|
.editor-controls-center {
|
||||||
|
/* background: var(--background-color2); */
|
||||||
|
flex: 1;
|
||||||
|
display: flex;
|
||||||
|
justify-content: center;
|
||||||
|
align-items: center;
|
||||||
|
}
|
||||||
|
|
||||||
|
.editor-controls-center > div {
|
||||||
|
position: relative;
|
||||||
|
background: black;
|
||||||
|
}
|
||||||
|
|
||||||
|
.editor-controls-center canvas {
|
||||||
|
position: absolute;
|
||||||
|
left: 0;
|
||||||
|
top: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
.editor-controls-right {
|
||||||
|
padding: 32px;
|
||||||
|
display: flex;
|
||||||
|
flex-direction: column;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
.editor-controls-right > div:last-child {
|
||||||
|
flex: 1;
|
||||||
|
display: flex;
|
||||||
|
flex-direction: column;
|
||||||
|
min-width: 200px;
|
||||||
|
gap: 5px;
|
||||||
|
justify-content: end;
|
||||||
|
}
|
||||||
|
|
||||||
|
.image-editor-button {
|
||||||
|
width: 100%;
|
||||||
|
height: 32px;
|
||||||
|
border-radius: 16px;
|
||||||
|
background: var(--background-color3);
|
||||||
|
}
|
||||||
|
|
||||||
|
.editor-controls-right .image-editor-button {
|
||||||
|
margin-bottom: 4px;
|
||||||
|
}
|
||||||
|
|
||||||
|
#init_image_button_inpaint .input-toggle {
|
||||||
|
position: absolute;
|
||||||
|
left: 16px;
|
||||||
|
}
|
||||||
|
|
||||||
|
#init_image_button_inpaint .input-toggle input:not(:checked) ~ label {
|
||||||
|
pointer-events: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
.image-editor-popup {
|
||||||
|
--popup-margin: 16px;
|
||||||
|
--popup-padding: 24px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.image-editor-popup > div {
|
||||||
|
margin: var(--popup-margin);
|
||||||
|
padding: var(--popup-padding);
|
||||||
|
min-height: calc(100vh - (2 * var(--popup-margin)));
|
||||||
|
max-width: none;
|
||||||
|
min-width: fit-content;
|
||||||
|
}
|
||||||
|
|
||||||
|
.image-editor-popup h1 {
|
||||||
|
position: absolute;
|
||||||
|
top: 32px;
|
||||||
|
left: 50%;
|
||||||
|
transform: translateX(-50%);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@media screen and (max-width: 700px) {
|
||||||
|
.image-editor-popup > div {
|
||||||
|
margin: 0px;
|
||||||
|
padding: 0px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.image-editor-popup h1 {
|
||||||
|
position: relative;
|
||||||
|
transform: none;
|
||||||
|
left: auto;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
.image-editor-popup > div > div {
|
||||||
|
min-height: calc(100vh - (2 * var(--popup-margin)) - (2 * var(--popup-padding)));
|
||||||
|
}
|
||||||
|
|
||||||
|
.inpainter .image_editor_color {
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
.inpainter .editor-canvas-background {
|
||||||
|
opacity: 0.75;
|
||||||
|
}
|
||||||
|
|
||||||
|
#init_image_preview_container .button {
|
||||||
|
display: flex;
|
||||||
|
padding: 6px;
|
||||||
|
height: 24px;
|
||||||
|
box-shadow: 2px 2px 1px 1px #00000088;
|
||||||
|
}
|
||||||
|
|
||||||
|
#init_image_preview_container .button:hover {
|
||||||
|
background: var(--background-color4)
|
||||||
|
}
|
||||||
|
|
||||||
|
.image-editor-popup .button {
|
||||||
|
display: flex;
|
||||||
|
}
|
||||||
|
.image-editor-popup h4 {
|
||||||
|
text-align: left;
|
||||||
|
}
|
||||||
9
ui/media/css/jquery-confirm.min.css
vendored
Normal file
@@ -22,16 +22,27 @@ a:visited {
|
|||||||
label {
|
label {
|
||||||
font-size: 10pt;
|
font-size: 10pt;
|
||||||
}
|
}
|
||||||
|
code {
|
||||||
|
background: var(--background-color4);
|
||||||
|
padding: 2px 4px;
|
||||||
|
border-radius: 4px;
|
||||||
|
}
|
||||||
#prompt {
|
#prompt {
|
||||||
width: 100%;
|
width: 100%;
|
||||||
height: 65pt;
|
height: 65pt;
|
||||||
font-size: 13px;
|
font-size: 14px;
|
||||||
margin-bottom: 6px;
|
margin-bottom: 6px;
|
||||||
margin-top: 5px;
|
margin-top: 5px;
|
||||||
display: block;
|
display: block;
|
||||||
|
border: 2px solid var(--background-color2);
|
||||||
}
|
}
|
||||||
.image_preview_container {
|
#negative_prompt {
|
||||||
margin-top: 10pt;
|
width: 100%;
|
||||||
|
height: 50pt;
|
||||||
|
font-size: 13px;
|
||||||
|
margin-bottom: 5px;
|
||||||
|
margin-top: 5px;
|
||||||
|
display: block;
|
||||||
}
|
}
|
||||||
.image_clear_btn {
|
.image_clear_btn {
|
||||||
position: absolute;
|
position: absolute;
|
||||||
@@ -50,6 +61,11 @@ label {
|
|||||||
top: 0px;
|
top: 0px;
|
||||||
right: 0px;
|
right: 0px;
|
||||||
}
|
}
|
||||||
|
.image_clear_btn:active {
|
||||||
|
position: absolute;
|
||||||
|
top: 0px;
|
||||||
|
left: auto;
|
||||||
|
}
|
||||||
.settings-box ul {
|
.settings-box ul {
|
||||||
font-size: 9pt;
|
font-size: 9pt;
|
||||||
margin-bottom: 5px;
|
margin-bottom: 5px;
|
||||||
@@ -91,6 +107,7 @@ label {
|
|||||||
.imgContainer {
|
.imgContainer {
|
||||||
display: flex;
|
display: flex;
|
||||||
justify-content: flex-end;
|
justify-content: flex-end;
|
||||||
|
position: relative;
|
||||||
}
|
}
|
||||||
.imgItemInfo {
|
.imgItemInfo {
|
||||||
padding-bottom: 0.5em;
|
padding-bottom: 0.5em;
|
||||||
@@ -98,16 +115,35 @@ label {
|
|||||||
align-items: flex-end;
|
align-items: flex-end;
|
||||||
flex-direction: column;
|
flex-direction: column;
|
||||||
position: absolute;
|
position: absolute;
|
||||||
padding: 5px;
|
padding-right: 5pt;
|
||||||
|
padding-top: 6pt;
|
||||||
opacity: 0;
|
opacity: 0;
|
||||||
transition: 0.1s all;
|
transition: 0.1s all;
|
||||||
}
|
}
|
||||||
|
.imgPreviewItemClearBtn {
|
||||||
|
opacity: 0;
|
||||||
|
}
|
||||||
|
.imgContainer .img_bottom_label {
|
||||||
|
opacity: 0;
|
||||||
|
}
|
||||||
|
.imgPreviewItemClearBtn:hover {
|
||||||
|
background: rgb(177, 27, 0);
|
||||||
|
}
|
||||||
.imgContainer:hover > .imgItemInfo {
|
.imgContainer:hover > .imgItemInfo {
|
||||||
opacity: 1;
|
opacity: 1;
|
||||||
}
|
}
|
||||||
|
.imgContainer:hover > .imgPreviewItemClearBtn {
|
||||||
|
opacity: 1;
|
||||||
|
}
|
||||||
|
.imgContainer:hover > .img_bottom_label {
|
||||||
|
opacity: 60%;
|
||||||
|
}
|
||||||
.imgItemInfo * {
|
.imgItemInfo * {
|
||||||
margin-bottom: 7px;
|
margin-bottom: 7px;
|
||||||
}
|
}
|
||||||
|
.imgItem .image_clear_btn {
|
||||||
|
transform: translate(40%, -50%);
|
||||||
|
}
|
||||||
#container {
|
#container {
|
||||||
min-height: 100vh;
|
min-height: 100vh;
|
||||||
width: 100%;
|
width: 100%;
|
||||||
@@ -123,7 +159,7 @@ label {
|
|||||||
padding: 16px;
|
padding: 16px;
|
||||||
display: flex;
|
display: flex;
|
||||||
flex-direction: column;
|
flex-direction: column;
|
||||||
flex: 0 0 370pt;
|
flex: 0 0 380pt;
|
||||||
}
|
}
|
||||||
#editor label {
|
#editor label {
|
||||||
font-weight: normal;
|
font-weight: normal;
|
||||||
@@ -163,7 +199,7 @@ label {
|
|||||||
flex: 0 0 70px;
|
flex: 0 0 70px;
|
||||||
background: var(--accent-color);
|
background: var(--accent-color);
|
||||||
border: var(--primary-button-border);
|
border: var(--primary-button-border);
|
||||||
color: rgb(255, 221, 255);
|
color: var(--accent-text-color);
|
||||||
width: 100%;
|
width: 100%;
|
||||||
height: 30pt;
|
height: 30pt;
|
||||||
}
|
}
|
||||||
@@ -175,15 +211,29 @@ label {
|
|||||||
background: rgb(132, 8, 0);
|
background: rgb(132, 8, 0);
|
||||||
border: 2px solid rgb(122, 29, 0);
|
border: 2px solid rgb(122, 29, 0);
|
||||||
color: rgb(255, 221, 255);
|
color: rgb(255, 221, 255);
|
||||||
width: 100%;
|
|
||||||
height: 30pt;
|
height: 30pt;
|
||||||
border-radius: 6px;
|
border-radius: 6px;
|
||||||
display: none;
|
flex-grow: 2;
|
||||||
margin-top: 2pt;
|
|
||||||
}
|
}
|
||||||
#stopImage:hover {
|
#stopImage:hover {
|
||||||
background: rgb(177, 27, 0);
|
background: rgb(177, 27, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
div#render-buttons {
|
||||||
|
gap: 3px;
|
||||||
|
margin-top: 4px;
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
button#pause {
|
||||||
|
flex-grow: 1;
|
||||||
|
background: var(--accent-color);
|
||||||
|
}
|
||||||
|
button#resume {
|
||||||
|
flex-grow: 1;
|
||||||
|
background: var(--accent-color);
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
|
||||||
.flex-container {
|
.flex-container {
|
||||||
display: flex;
|
display: flex;
|
||||||
width: 100%;
|
width: 100%;
|
||||||
@@ -196,7 +246,7 @@ label {
|
|||||||
}
|
}
|
||||||
.collapsible-content {
|
.collapsible-content {
|
||||||
display: block;
|
display: block;
|
||||||
padding-left: 15px;
|
padding-left: 10px;
|
||||||
}
|
}
|
||||||
.collapsible-content h5 {
|
.collapsible-content h5 {
|
||||||
padding: 5pt 0pt;
|
padding: 5pt 0pt;
|
||||||
@@ -211,7 +261,6 @@ label {
|
|||||||
display: none !important;
|
display: none !important;
|
||||||
}
|
}
|
||||||
#editor-modifiers {
|
#editor-modifiers {
|
||||||
max-width: 600px;
|
|
||||||
overflow-y: auto;
|
overflow-y: auto;
|
||||||
overflow-x: hidden;
|
overflow-x: hidden;
|
||||||
}
|
}
|
||||||
@@ -222,6 +271,11 @@ label {
|
|||||||
img {
|
img {
|
||||||
box-shadow: 0 4px 8px 0 rgba(0, 0, 0, 0.15), 0 6px 20px 0 rgba(0, 0, 0, 0.15);
|
box-shadow: 0 4px 8px 0 rgba(0, 0, 0, 0.15), 0 6px 20px 0 rgba(0, 0, 0, 0.15);
|
||||||
}
|
}
|
||||||
|
div.img-preview img {
|
||||||
|
width:100%;
|
||||||
|
height: 100%;
|
||||||
|
max-height: 70vh;
|
||||||
|
}
|
||||||
.line-separator {
|
.line-separator {
|
||||||
background: var(--background-color3);
|
background: var(--background-color3);
|
||||||
height: 1pt;
|
height: 1pt;
|
||||||
@@ -250,39 +304,13 @@ img {
|
|||||||
}
|
}
|
||||||
.preview-prompt {
|
.preview-prompt {
|
||||||
font-size: 13pt;
|
font-size: 13pt;
|
||||||
margin-bottom: 10pt;
|
display: inline;
|
||||||
}
|
}
|
||||||
#coffeeButton {
|
#coffeeButton {
|
||||||
height: 23px;
|
height: 23px;
|
||||||
transform: translateY(25%);
|
transform: translateY(25%);
|
||||||
}
|
}
|
||||||
|
|
||||||
#inpaintingEditor {
|
|
||||||
width: 300pt;
|
|
||||||
height: 300pt;
|
|
||||||
margin-top: 5pt;
|
|
||||||
}
|
|
||||||
.drawing-board-canvas-wrapper {
|
|
||||||
background-size: 100% 100%;
|
|
||||||
}
|
|
||||||
.drawing-board-controls {
|
|
||||||
min-width: 273px;
|
|
||||||
}
|
|
||||||
.drawing-board-control > button {
|
|
||||||
background-color: #eee;
|
|
||||||
border-radius: 3pt;
|
|
||||||
}
|
|
||||||
.drawing-board-control-inner {
|
|
||||||
background-color: #eee;
|
|
||||||
border-radius: 3pt;
|
|
||||||
}
|
|
||||||
#inpaintingEditor canvas {
|
|
||||||
opacity: 0.6;
|
|
||||||
}
|
|
||||||
#enable_mask {
|
|
||||||
margin-top: 8pt;
|
|
||||||
}
|
|
||||||
|
|
||||||
#top-nav {
|
#top-nav {
|
||||||
position: relative;
|
position: relative;
|
||||||
background: var(--background-color4);
|
background: var(--background-color4);
|
||||||
@@ -380,10 +408,8 @@ img {
|
|||||||
display: none;
|
display: none;
|
||||||
position: absolute;
|
position: absolute;
|
||||||
z-index: 2;
|
z-index: 2;
|
||||||
|
width: max-content;
|
||||||
|
|
||||||
background: var(--background-color4);
|
|
||||||
border: 2px solid var(--background-color2);
|
|
||||||
border-radius: 7px;
|
|
||||||
padding: 5px;
|
padding: 5px;
|
||||||
margin-bottom: 15px;
|
margin-bottom: 15px;
|
||||||
box-shadow: 0 20px 28px 0 rgba(0, 0, 0, 0.15), 0 6px 20px 0 rgba(0, 0, 0, 0.15);
|
box-shadow: 0 20px 28px 0 rgba(0, 0, 0, 0.15), 0 6px 20px 0 rgba(0, 0, 0, 0.15);
|
||||||
@@ -391,6 +417,36 @@ img {
|
|||||||
.dropdown:hover .dropdown-content {
|
.dropdown:hover .dropdown-content {
|
||||||
display: block;
|
display: block;
|
||||||
}
|
}
|
||||||
|
.dropdown:hover + .dropdown-content {
|
||||||
|
display: block;
|
||||||
|
}
|
||||||
|
.dropdown-content:hover {
|
||||||
|
display: block;
|
||||||
|
}
|
||||||
|
|
||||||
|
.display-settings {
|
||||||
|
float: right;
|
||||||
|
position: relative;
|
||||||
|
}
|
||||||
|
|
||||||
|
.display-settings .dropdown-content {
|
||||||
|
right: 0px;
|
||||||
|
top: 12pt;
|
||||||
|
}
|
||||||
|
|
||||||
|
.dropdown-item {
|
||||||
|
padding: 4px;
|
||||||
|
background: var(--background-color4);
|
||||||
|
border: 2px solid var(--background-color2);
|
||||||
|
}
|
||||||
|
|
||||||
|
.dropdown-item:first-child {
|
||||||
|
border-radius: 7px 7px 0px 0px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.dropdown-item:last-child {
|
||||||
|
border-radius: 0px 0px 7px 7px;
|
||||||
|
}
|
||||||
|
|
||||||
.imageTaskContainer {
|
.imageTaskContainer {
|
||||||
border: 1px solid var(--background-color2);
|
border: 1px solid var(--background-color2);
|
||||||
@@ -402,14 +458,34 @@ img {
|
|||||||
.imageTaskContainer > div > .collapsible-handle {
|
.imageTaskContainer > div > .collapsible-handle {
|
||||||
display: none;
|
display: none;
|
||||||
}
|
}
|
||||||
|
.dropTargetBefore::before{
|
||||||
|
content: "";
|
||||||
|
border: 1px solid #fff;
|
||||||
|
margin-bottom: -2px;
|
||||||
|
display: block;
|
||||||
|
box-shadow: 0 0 5px #fff;
|
||||||
|
transform: translate(0px, -14px);
|
||||||
|
}
|
||||||
|
.dropTargetAfter::after{
|
||||||
|
content: "";
|
||||||
|
border: 1px solid #fff;
|
||||||
|
margin-bottom: -2px;
|
||||||
|
display: block;
|
||||||
|
box-shadow: 0 0 5px #fff;
|
||||||
|
transform: translate(0px, 14px);
|
||||||
|
}
|
||||||
|
.drag-handle {
|
||||||
|
margin-right: 6px;
|
||||||
|
cursor: move;
|
||||||
|
}
|
||||||
.taskStatusLabel {
|
.taskStatusLabel {
|
||||||
float: left;
|
|
||||||
font-size: 8pt;
|
font-size: 8pt;
|
||||||
background:var(--background-color2);
|
background:var(--background-color2);
|
||||||
border: 1px solid rgb(61, 62, 66);
|
border: 1px solid rgb(61, 62, 66);
|
||||||
padding: 2pt 4pt;
|
padding: 2pt 4pt;
|
||||||
border-radius: 2pt;
|
border-radius: 2pt;
|
||||||
margin-right: 5pt;
|
margin-right: 5pt;
|
||||||
|
display: inline;
|
||||||
}
|
}
|
||||||
.activeTaskLabel {
|
.activeTaskLabel {
|
||||||
background:rgb(0, 90, 30);
|
background:rgb(0, 90, 30);
|
||||||
@@ -426,6 +502,7 @@ img {
|
|||||||
background: var(--accent-color);
|
background: var(--accent-color);
|
||||||
border: var(--primary-button-border);
|
border: var(--primary-button-border);
|
||||||
color: rgb(255, 221, 255);
|
color: rgb(255, 221, 255);
|
||||||
|
padding: 3pt 6pt;
|
||||||
}
|
}
|
||||||
.secondaryButton {
|
.secondaryButton {
|
||||||
background: rgb(132, 8, 0);
|
background: rgb(132, 8, 0);
|
||||||
@@ -437,6 +514,26 @@ img {
|
|||||||
.secondaryButton:hover {
|
.secondaryButton:hover {
|
||||||
background: rgb(177, 27, 0);
|
background: rgb(177, 27, 0);
|
||||||
}
|
}
|
||||||
|
.tertiaryButton {
|
||||||
|
background: var(--tertiary-background-color);
|
||||||
|
color: var(--tertiary-color);
|
||||||
|
border: 1px solid var(--tertiary-border-color);
|
||||||
|
padding: 3pt 6pt;
|
||||||
|
border-radius: 5px;
|
||||||
|
}
|
||||||
|
.tertiaryButton:hover {
|
||||||
|
background: hsl(var(--accent-hue), 100%, calc(var(--accent-lightness) + 6%));
|
||||||
|
color: var(--accent-text-color);
|
||||||
|
}
|
||||||
|
.tertiaryButton.pressed {
|
||||||
|
border-style: inset;
|
||||||
|
background: hsl(var(--accent-hue), 100%, calc(var(--accent-lightness) + 6%));
|
||||||
|
color: var(--accent-text-color);
|
||||||
|
}
|
||||||
|
.useSettings {
|
||||||
|
margin-right: 6pt;
|
||||||
|
float: right;
|
||||||
|
}
|
||||||
.stopTask {
|
.stopTask {
|
||||||
float: right;
|
float: right;
|
||||||
}
|
}
|
||||||
@@ -448,6 +545,7 @@ img {
|
|||||||
font-size: 10pt;
|
font-size: 10pt;
|
||||||
color: #aaa;
|
color: #aaa;
|
||||||
margin-bottom: 5pt;
|
margin-bottom: 5pt;
|
||||||
|
margin-top: 5pt;
|
||||||
}
|
}
|
||||||
.img-batch {
|
.img-batch {
|
||||||
display: inline;
|
display: inline;
|
||||||
@@ -455,8 +553,58 @@ img {
|
|||||||
#prompt_from_file {
|
#prompt_from_file {
|
||||||
display: none;
|
display: none;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#init_image_preview_container {
|
||||||
|
display: flex;
|
||||||
|
margin-top: 6px;
|
||||||
|
margin-bottom: 8px;
|
||||||
|
}
|
||||||
|
|
||||||
|
#init_image_preview_container:not(.has-image) #init_image_wrapper,
|
||||||
|
#init_image_preview_container:not(.has-image) #inpaint_button_container {
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#init_image_buttons {
|
||||||
|
display: flex;
|
||||||
|
gap: 8px;
|
||||||
|
}
|
||||||
|
|
||||||
|
#init_image_preview_container.has-image #init_image_buttons {
|
||||||
|
flex-direction: column;
|
||||||
|
padding-left: 8px;
|
||||||
|
}
|
||||||
|
|
||||||
|
#init_image_buttons .button {
|
||||||
|
position: relative;
|
||||||
|
height: 32px;
|
||||||
|
width: 150px;
|
||||||
|
}
|
||||||
|
|
||||||
|
#init_image_buttons .button > input {
|
||||||
|
position: absolute;
|
||||||
|
left: 0;
|
||||||
|
top: 0;
|
||||||
|
right: 0;
|
||||||
|
bottom: 0;
|
||||||
|
opacity: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
#inpaint_button_container {
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
gap: 8px;
|
||||||
|
}
|
||||||
|
|
||||||
|
#init_image_wrapper {
|
||||||
|
grid-row: span 3;
|
||||||
|
position: relative;
|
||||||
|
width: fit-content;
|
||||||
|
max-height: 150px;
|
||||||
|
}
|
||||||
|
|
||||||
#init_image_preview {
|
#init_image_preview {
|
||||||
max-width: 150px;
|
|
||||||
max-height: 150px;
|
max-height: 150px;
|
||||||
height: 100%;
|
height: 100%;
|
||||||
width: 100%;
|
width: 100%;
|
||||||
@@ -464,30 +612,31 @@ img {
|
|||||||
border-radius: 6px;
|
border-radius: 6px;
|
||||||
transition: all 1s ease-in-out;
|
transition: all 1s ease-in-out;
|
||||||
}
|
}
|
||||||
|
/*
|
||||||
#init_image_preview:hover {
|
#init_image_preview:hover {
|
||||||
max-width: 500px;
|
max-width: 500px;
|
||||||
max-height: 1000px;
|
max-height: 1000px;
|
||||||
|
|
||||||
transition: all 1s 0.5s ease-in-out;
|
transition: all 1s 0.5s ease-in-out;
|
||||||
}
|
} */
|
||||||
|
|
||||||
#init_image_wrapper {
|
|
||||||
position: relative;
|
|
||||||
width: fit-content;
|
|
||||||
}
|
|
||||||
|
|
||||||
#init_image_size_box {
|
#init_image_size_box {
|
||||||
|
border-radius: 6px 0px;
|
||||||
|
}
|
||||||
|
.img_bottom_label {
|
||||||
position: absolute;
|
position: absolute;
|
||||||
right: 0px;
|
right: 0px;
|
||||||
bottom: 3px;
|
bottom: 0px;
|
||||||
padding: 3px;
|
padding: 3px;
|
||||||
background: black;
|
background: black;
|
||||||
color: white;
|
color: white;
|
||||||
text-shadow: 0px 0px 4px black;
|
text-shadow: 0px 0px 4px black;
|
||||||
opacity: 60%;
|
opacity: 60%;
|
||||||
font-size: 12px;
|
font-size: 12px;
|
||||||
border-radius: 6px 0px;
|
}
|
||||||
|
|
||||||
|
#editor-settings {
|
||||||
|
min-width: 350px;
|
||||||
}
|
}
|
||||||
|
|
||||||
#editor-settings-entries {
|
#editor-settings-entries {
|
||||||
@@ -500,7 +649,6 @@ img {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#editor-settings-entries ul {
|
#editor-settings-entries ul {
|
||||||
margin: 0px;
|
|
||||||
padding: 0px;
|
padding: 0px;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -528,6 +676,10 @@ option {
|
|||||||
cursor: pointer;
|
cursor: pointer;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
input[type="file"] * {
|
||||||
|
cursor: pointer;
|
||||||
|
}
|
||||||
|
|
||||||
input,
|
input,
|
||||||
select,
|
select,
|
||||||
textarea {
|
textarea {
|
||||||
@@ -566,12 +718,26 @@ input[type="file"] {
|
|||||||
}
|
}
|
||||||
|
|
||||||
button,
|
button,
|
||||||
input::file-selector-button {
|
input::file-selector-button,
|
||||||
|
.button {
|
||||||
padding: 2px 4px;
|
padding: 2px 4px;
|
||||||
border-radius: 4px;
|
border-radius: var(--input-border-radius);
|
||||||
background: var(--button-color);
|
background: var(--button-color);
|
||||||
color: var(--button-text-color);
|
color: var(--button-text-color);
|
||||||
border: var(--button-border);
|
border: var(--button-border);
|
||||||
|
align-items: center;
|
||||||
|
justify-content: center;
|
||||||
|
cursor: pointer;
|
||||||
|
}
|
||||||
|
|
||||||
|
.button i {
|
||||||
|
margin-right: 8px;
|
||||||
|
}
|
||||||
|
|
||||||
|
button:hover,
|
||||||
|
.button:hover {
|
||||||
|
transition-duration: 0.1s;
|
||||||
|
background: hsl(var(--accent-hue), 100%, calc(var(--accent-lightness) + 6%));
|
||||||
}
|
}
|
||||||
|
|
||||||
input::file-selector-button {
|
input::file-selector-button {
|
||||||
@@ -579,11 +745,73 @@ input::file-selector-button {
|
|||||||
height: 19px;
|
height: 19px;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* MOBILE SUPPORT */
|
|
||||||
@media screen and (max-width: 700px) {
|
.input-toggle {
|
||||||
|
display: inline-block;
|
||||||
|
position: relative;
|
||||||
|
vertical-align: middle;
|
||||||
|
width: calc(var(--input-height) * 2);
|
||||||
|
user-select: none;
|
||||||
|
-webkit-user-select: none;
|
||||||
|
-moz-user-select: none;
|
||||||
|
-ms-user-select: none;
|
||||||
|
margin-right: 4px;
|
||||||
|
}
|
||||||
|
.input-toggle > input {
|
||||||
|
position: absolute;
|
||||||
|
opacity: 0;
|
||||||
|
pointer-events: none;
|
||||||
|
}
|
||||||
|
.input-toggle > label {
|
||||||
|
display: block;
|
||||||
|
overflow: hidden;
|
||||||
|
cursor: pointer;
|
||||||
|
height: var(--input-height);
|
||||||
|
padding: 0;
|
||||||
|
line-height: var(--input-height);
|
||||||
|
border: var(--input-border-size) solid var(--input-border-color);
|
||||||
|
border-radius: var(--input-height);
|
||||||
|
background: var(--input-background-color);
|
||||||
|
transition: background 0.2s ease-in;
|
||||||
|
}
|
||||||
|
.input-toggle > label:before {
|
||||||
|
content: "";
|
||||||
|
display: block;
|
||||||
|
width: calc(var(--input-height) - ((var(--input-border-size) + var(--input-switch-padding)) * 2));
|
||||||
|
margin: 0px;
|
||||||
|
background: var(--input-text-color);
|
||||||
|
position: absolute;
|
||||||
|
top: calc(var(--input-border-size) + var(--input-switch-padding));
|
||||||
|
bottom: calc(var(--input-border-size) + var(--input-switch-padding));
|
||||||
|
right: calc(var(--input-border-size) + var(--input-switch-padding) + var(--input-height));
|
||||||
|
border-radius: calc(var(--input-height) - ((var(--input-border-size) + var(--input-switch-padding)) * 2));
|
||||||
|
transition: all 0.2s ease-in 0s;
|
||||||
|
opacity: 0.8;
|
||||||
|
}
|
||||||
|
.input-toggle > input:checked + label {
|
||||||
|
background: var(--accent-color);
|
||||||
|
}
|
||||||
|
.input-toggle > input:checked + label:before {
|
||||||
|
right: calc(var(--input-border-size) + var(--input-switch-padding));
|
||||||
|
opacity: 1;
|
||||||
|
}
|
||||||
|
.model-filter {
|
||||||
|
width: 90%;
|
||||||
|
padding-right: 20px;
|
||||||
|
white-space: nowrap;
|
||||||
|
overflow: hidden;
|
||||||
|
text-overflow: ellipsis;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Small screens */
|
||||||
|
@media screen and (max-width: 1265px) {
|
||||||
#top-nav {
|
#top-nav {
|
||||||
flex-direction: column;
|
flex-direction: column;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* MOBILE SUPPORT */
|
||||||
|
@media screen and (max-width: 700px) {
|
||||||
body {
|
body {
|
||||||
margin: 0px;
|
margin: 0px;
|
||||||
}
|
}
|
||||||
@@ -611,15 +839,12 @@ input::file-selector-button {
|
|||||||
width: 100%;
|
width: 100%;
|
||||||
object-fit: contain;
|
object-fit: contain;
|
||||||
}
|
}
|
||||||
.dropdown-content {
|
|
||||||
width: auto !important;
|
|
||||||
transform: none !important;
|
|
||||||
left: 0px;
|
|
||||||
right: 0px;
|
|
||||||
}
|
|
||||||
#editor {
|
#editor {
|
||||||
padding: 16px 8px;
|
padding: 16px 8px;
|
||||||
}
|
}
|
||||||
|
#editor-settings {
|
||||||
|
min-width: 0px;
|
||||||
|
}
|
||||||
.tab-content-inner {
|
.tab-content-inner {
|
||||||
margin: 0px;
|
margin: 0px;
|
||||||
}
|
}
|
||||||
@@ -630,7 +855,7 @@ input::file-selector-button {
|
|||||||
padding-right: 0px;
|
padding-right: 0px;
|
||||||
}
|
}
|
||||||
#server-status {
|
#server-status {
|
||||||
display: none;
|
top: 75%;
|
||||||
}
|
}
|
||||||
.popup > div {
|
.popup > div {
|
||||||
padding-left: 5px !important;
|
padding-left: 5px !important;
|
||||||
@@ -643,21 +868,30 @@ input::file-selector-button {
|
|||||||
padding: 0px !important;
|
padding: 0px !important;
|
||||||
margin: 24px !important;
|
margin: 24px !important;
|
||||||
}
|
}
|
||||||
.simple-tooltip.right {
|
.simple-tooltip {
|
||||||
right: initial;
|
display: none;
|
||||||
left: 0px;
|
|
||||||
top: 50%;
|
|
||||||
transform: translate(calc(-100% + 15%), -50%);
|
|
||||||
}
|
}
|
||||||
:hover > .simple-tooltip.right {
|
#preview-tools button {
|
||||||
transform: translate(100%, -50%);
|
font-size: 0px;
|
||||||
|
}
|
||||||
|
#preview-tools button .icon {
|
||||||
|
font-size: 12pt;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@media screen and (max-width: 500px) {
|
||||||
|
#server-status #server-status-msg {
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
#server-status:hover #server-status-msg {
|
||||||
|
display: inline;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@media (min-width: 700px) {
|
@media (min-width: 700px) {
|
||||||
/* #editor {
|
/* #editor {
|
||||||
max-width: 480px;
|
max-width: 480px;
|
||||||
} */
|
}*/
|
||||||
.float-container {
|
.float-container {
|
||||||
padding: 20px;
|
padding: 20px;
|
||||||
}
|
}
|
||||||
@@ -674,6 +908,8 @@ input::file-selector-button {
|
|||||||
|
|
||||||
#promptsFromFileBtn {
|
#promptsFromFileBtn {
|
||||||
font-size: 9pt;
|
font-size: 9pt;
|
||||||
|
display: inline;
|
||||||
|
padding: 2pt;
|
||||||
}
|
}
|
||||||
|
|
||||||
.section-button {
|
.section-button {
|
||||||
@@ -714,9 +950,11 @@ input::file-selector-button {
|
|||||||
visibility: hidden;
|
visibility: hidden;
|
||||||
opacity: 0;
|
opacity: 0;
|
||||||
position: absolute;
|
position: absolute;
|
||||||
white-space: nowrap;
|
width: max-content;
|
||||||
|
max-width: 300px;
|
||||||
padding: 8px 12px;
|
padding: 8px 12px;
|
||||||
transition: 0.3s all;
|
transition: 0.3s all;
|
||||||
|
z-index: 1000;
|
||||||
|
|
||||||
pointer-events: none;
|
pointer-events: none;
|
||||||
}
|
}
|
||||||
@@ -730,7 +968,7 @@ input::file-selector-button {
|
|||||||
.simple-tooltip.right {
|
.simple-tooltip.right {
|
||||||
right: 0px;
|
right: 0px;
|
||||||
top: 50%;
|
top: 50%;
|
||||||
transform: translate(calc(100% - 15%), -50%);
|
transform: translate(100%, -50%);
|
||||||
}
|
}
|
||||||
:hover > .simple-tooltip.right {
|
:hover > .simple-tooltip.right {
|
||||||
transform: translate(100%, -50%);
|
transform: translate(100%, -50%);
|
||||||
@@ -763,6 +1001,15 @@ input::file-selector-button {
|
|||||||
transform: translate(-50%, 100%);
|
transform: translate(-50%, 100%);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.simple-tooltip.top-left {
|
||||||
|
top: 0px;
|
||||||
|
left: 0px;
|
||||||
|
transform: translate(calc(-100% + 15%), calc(-100% + 15%));
|
||||||
|
}
|
||||||
|
:hover > .simple-tooltip.top-left {
|
||||||
|
transform: translate(-80%, -100%);
|
||||||
|
}
|
||||||
|
|
||||||
/* PROGRESS BAR */
|
/* PROGRESS BAR */
|
||||||
.progress-bar {
|
.progress-bar {
|
||||||
background: var(--background-color3);
|
background: var(--background-color3);
|
||||||
@@ -771,6 +1018,7 @@ input::file-selector-button {
|
|||||||
height: 16px;
|
height: 16px;
|
||||||
position: relative;
|
position: relative;
|
||||||
transition: 0.25s 1s border, 0.25s 1s height;
|
transition: 0.25s 1s border, 0.25s 1s height;
|
||||||
|
clear: both;
|
||||||
}
|
}
|
||||||
.progress-bar > div {
|
.progress-bar > div {
|
||||||
background: var(--accent-color);
|
background: var(--accent-color);
|
||||||
@@ -848,7 +1096,7 @@ input::file-selector-button {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* TABS */
|
/* TABS */
|
||||||
#tab-container {
|
.tab-container {
|
||||||
display: flex;
|
display: flex;
|
||||||
align-items: flex-end;
|
align-items: flex-end;
|
||||||
}
|
}
|
||||||
@@ -875,8 +1123,8 @@ input::file-selector-button {
|
|||||||
display: none;
|
display: none;
|
||||||
}
|
}
|
||||||
|
|
||||||
#tab-content-wrapper {
|
#tab-content-wrapper > * {
|
||||||
border-top: 8px solid var(--background-color1);
|
padding-top: 8px;
|
||||||
}
|
}
|
||||||
|
|
||||||
.tab-content-inner {
|
.tab-content-inner {
|
||||||
@@ -898,6 +1146,9 @@ input::file-selector-button {
|
|||||||
i.active {
|
i.active {
|
||||||
background: var(--accent-color);
|
background: var(--accent-color);
|
||||||
}
|
}
|
||||||
|
.primaryButton.active {
|
||||||
|
background: hsl(var(--accent-hue), 100%, 50%);
|
||||||
|
}
|
||||||
#system-info {
|
#system-info {
|
||||||
max-width: 800px;
|
max-width: 800px;
|
||||||
font-size: 10pt;
|
font-size: 10pt;
|
||||||
@@ -910,6 +1161,102 @@ i.active {
|
|||||||
float: right;
|
float: right;
|
||||||
font-weight: bold;
|
font-weight: bold;
|
||||||
}
|
}
|
||||||
#save-system-settings-btn {
|
|
||||||
|
button:active {
|
||||||
|
transition-duration: 0.1s;
|
||||||
|
background-color: hsl(var(--accent-hue), 100%, calc(var(--accent-lightness) + 24%));
|
||||||
|
position: relative;
|
||||||
|
top: 1px;
|
||||||
|
left: 1px;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.task-initimg > img {
|
||||||
|
margin-right: 6px;
|
||||||
|
display: block;
|
||||||
|
}
|
||||||
|
div.task-fs-initimage {
|
||||||
|
display: none;
|
||||||
|
position: absolute;
|
||||||
|
}
|
||||||
|
div.task-initimg:hover div.task-fs-initimage {
|
||||||
|
display: block;
|
||||||
|
position: absolute;
|
||||||
|
z-index: 9999;
|
||||||
|
box-shadow: 0 0 30px #000;
|
||||||
|
margin-top:-64px;
|
||||||
|
max-width: 75vw;
|
||||||
|
max-height: 75vh;
|
||||||
|
}
|
||||||
|
div.top-right {
|
||||||
|
position: absolute;
|
||||||
|
top: 8px;
|
||||||
|
right: 8px;
|
||||||
|
}
|
||||||
|
|
||||||
|
button#save-system-settings-btn {
|
||||||
padding: 4pt 8pt;
|
padding: 4pt 8pt;
|
||||||
}
|
}
|
||||||
|
#ip-info a {
|
||||||
|
color:var(--text-color)
|
||||||
|
}
|
||||||
|
#ip-info div {
|
||||||
|
line-height: 200%;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* SCROLLBARS */
|
||||||
|
:root {
|
||||||
|
--scrollbar-width: 14px;
|
||||||
|
--scrollbar-radius: 10px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.scrollbar-editor::-webkit-scrollbar {
|
||||||
|
width: 8px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.scrollbar-editor::-webkit-scrollbar-track {
|
||||||
|
border-radius: 10px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.scrollbar-editor::-webkit-scrollbar-thumb {
|
||||||
|
background: --background-color2;
|
||||||
|
border-radius: 10px;
|
||||||
|
}
|
||||||
|
|
||||||
|
::-webkit-scrollbar {
|
||||||
|
width: var(--scrollbar-width);
|
||||||
|
}
|
||||||
|
|
||||||
|
::-webkit-scrollbar-track {
|
||||||
|
box-shadow: inset 0 0 5px var(--input-border-color);
|
||||||
|
border-radius: var(--input-border-radius);
|
||||||
|
}
|
||||||
|
|
||||||
|
::-webkit-scrollbar-thumb {
|
||||||
|
background: var(--background-color2);
|
||||||
|
border-radius: var(--scrollbar-radius);
|
||||||
|
}
|
||||||
|
|
||||||
|
body.pause {
|
||||||
|
border: solid 12px var(--accent-color);
|
||||||
|
}
|
||||||
|
|
||||||
|
body.wait-pause {
|
||||||
|
animation: blinker 2s linear infinite;
|
||||||
|
}
|
||||||
|
|
||||||
|
@keyframes blinker {
|
||||||
|
0% { border: solid 12px var(--accent-color); }
|
||||||
|
50% { border: solid 12px var(--background-color1); }
|
||||||
|
100% { border: solid 12px var(--accent-color); }
|
||||||
|
}
|
||||||
|
|
||||||
|
.jconfirm.jconfirm-modern .jconfirm-box div.jconfirm-title-c {
|
||||||
|
color: var(--button-text-color);
|
||||||
|
}
|
||||||
|
.jconfirm.jconfirm-modern .jconfirm-box {
|
||||||
|
background-color: var(--background-color1);
|
||||||
|
}
|
||||||
|
|
||||||
|
.displayNone {
|
||||||
|
display:none !important;
|
||||||
|
}
|
||||||
|
|||||||
99
ui/media/css/searchable-models.css
Normal file
@@ -0,0 +1,99 @@
|
|||||||
|
.model-list {
|
||||||
|
position: absolute;
|
||||||
|
margin-block-start: 2px;
|
||||||
|
display: none;
|
||||||
|
padding-inline-start: 0;
|
||||||
|
max-height: 200px;
|
||||||
|
overflow: auto;
|
||||||
|
background: var(--input-background-color);
|
||||||
|
border: var(--input-border-size) solid var(--input-border-color);
|
||||||
|
border-radius: var(--input-border-radius);
|
||||||
|
color: var(--input-text-color);
|
||||||
|
z-index: 1;
|
||||||
|
line-height: normal;
|
||||||
|
}
|
||||||
|
|
||||||
|
.model-list ul {
|
||||||
|
padding-right: 20px;
|
||||||
|
padding-inline-start: 0;
|
||||||
|
margin-top: 3pt;
|
||||||
|
}
|
||||||
|
|
||||||
|
.model-list li {
|
||||||
|
padding-top: 3px;
|
||||||
|
padding-bottom: 3px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.model-list .icon {
|
||||||
|
padding-right: 3pt;
|
||||||
|
}
|
||||||
|
|
||||||
|
.model-result {
|
||||||
|
list-style: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
.model-no-result {
|
||||||
|
color: var(--text-color);
|
||||||
|
list-style: none;
|
||||||
|
padding: 3px 6px 3px 6px;
|
||||||
|
font-size: 9pt;
|
||||||
|
font-style: italic;
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
.model-list li.model-folder {
|
||||||
|
color: var(--text-color);
|
||||||
|
list-style: none;
|
||||||
|
padding: 6px 6px 6px 6px;
|
||||||
|
font-size: 9pt;
|
||||||
|
font-weight: bold;
|
||||||
|
border-top: 1px solid var(--background-color1);
|
||||||
|
}
|
||||||
|
|
||||||
|
.model-list li.model-file {
|
||||||
|
color: var(--input-text-color);
|
||||||
|
list-style: none;
|
||||||
|
padding-left: 12px;
|
||||||
|
padding-right:20px;
|
||||||
|
font-size: 10pt;
|
||||||
|
font-weight: normal;
|
||||||
|
transition: none;
|
||||||
|
transition:property: none;
|
||||||
|
cursor: default;
|
||||||
|
}
|
||||||
|
|
||||||
|
.model-list li.model-file.in-root-folder {
|
||||||
|
padding-left: 6px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.model-list li.model-file.selected {
|
||||||
|
background: grey;
|
||||||
|
}
|
||||||
|
|
||||||
|
.model-selector {
|
||||||
|
cursor: pointer;
|
||||||
|
}
|
||||||
|
|
||||||
|
.model-selector-arrow {
|
||||||
|
position: absolute;
|
||||||
|
width: 17px;
|
||||||
|
margin: 5px -17px;
|
||||||
|
padding-top: 3px;
|
||||||
|
cursor: pointer;
|
||||||
|
font-size: 8pt;
|
||||||
|
transition: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
.model-input {
|
||||||
|
white-space: nowrap;
|
||||||
|
}
|
||||||
|
|
||||||
|
.reloadModels {
|
||||||
|
background: var(--background-color2);
|
||||||
|
border: none;
|
||||||
|
padding: 0px 0px;
|
||||||
|
}
|
||||||
|
|
||||||
|
#reload-models.secondaryButton:hover {
|
||||||
|
background: var(--background-color2);
|
||||||
|
}
|
||||||
@@ -1,29 +1,42 @@
|
|||||||
:root {
|
:root {
|
||||||
--background-color1: rgb(32, 33, 36); /* main parts of the page */
|
--main-hue: 222;
|
||||||
--background-color2: rgb(44, 45, 48); /* main panels */
|
--main-saturation: 4%;
|
||||||
--background-color3: rgb(47, 49, 53);
|
--value-base: 13%;
|
||||||
--background-color4: rgb(18, 18, 19); /* settings dropdowns */
|
--value-step: 5%;
|
||||||
|
--background-color1: hsl(var(--main-hue), var(--main-saturation), var(--value-base));
|
||||||
|
--background-color2: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) + (1 * var(--value-step))));
|
||||||
|
--background-color3: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) - (0.5 * var(--value-step))));
|
||||||
|
--background-color4: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) - (1.5 * var(--value-step))));
|
||||||
|
|
||||||
--accent-hue: 266;
|
--accent-hue: 267;
|
||||||
--accent-lightness: 36%;
|
--accent-lightness: 36%;
|
||||||
--accent-lightness-hover: 40%;
|
--accent-lightness-hover: 40%;
|
||||||
|
|
||||||
--text-color: #eee;
|
--text-color: #eee;
|
||||||
|
|
||||||
--input-text-color: black;
|
--input-text-color: #eee;
|
||||||
--input-background-color: #e9e9ed;
|
--input-background-color: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) - (0.7 * var(--value-step))));
|
||||||
--input-border-color: #8f8f9d;
|
--input-border-color: var(--background-color4);
|
||||||
|
|
||||||
--button-text-color: var(--input-text-color);
|
--button-text-color: var(--input-text-color);
|
||||||
--button-color: #e9e9ed;
|
--button-color: var(--input-background-color);
|
||||||
--button-border: 1px solid #8f8f9d;
|
--button-border: none;
|
||||||
|
|
||||||
/* other */
|
/* other */
|
||||||
--input-border-radius: 4px;
|
--input-border-radius: 4px;
|
||||||
--input-border-size: 1px;
|
--input-border-size: 1px;
|
||||||
--accent-color: hsl(var(--accent-hue), 100%, var(--accent-lightness));
|
--accent-color: hsl(var(--accent-hue), 100%, var(--accent-lightness));
|
||||||
--accent-color-hover: hsl(var(--accent-hue), 100%, var(--accent-lightness-hover));
|
--accent-color-hover: hsl(var(--accent-hue), 100%, var(--accent-lightness-hover));
|
||||||
|
--accent-text-color: rgb(255, 221, 255);
|
||||||
--primary-button-border: none;
|
--primary-button-border: none;
|
||||||
|
--input-switch-padding: 1px;
|
||||||
|
--input-height: 18px;
|
||||||
|
--tertiary-background-color: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) + (2 * var(--value-step))));
|
||||||
|
--tertiary-border-color: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) + (3 * var(--value-step))));
|
||||||
|
--tertiary-color: var(--input-text-color)
|
||||||
|
|
||||||
|
/* Main theme color, hex color fallback. */
|
||||||
|
--theme-color-fallback: #673AB6;
|
||||||
}
|
}
|
||||||
|
|
||||||
.theme-light {
|
.theme-light {
|
||||||
@@ -37,6 +50,13 @@
|
|||||||
--input-text-color: black;
|
--input-text-color: black;
|
||||||
--input-background-color: #f8f9fa;
|
--input-background-color: #f8f9fa;
|
||||||
--input-border-color: grey;
|
--input-border-color: grey;
|
||||||
|
|
||||||
|
--theme-color-fallback: #aaaaaa;
|
||||||
|
|
||||||
|
--tertiary-background-color: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) + (16.8 * var(--value-step))));
|
||||||
|
--tertiary-border-color: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) + (12 * var(--value-step))));
|
||||||
|
|
||||||
|
--accent-text-color: white;
|
||||||
}
|
}
|
||||||
|
|
||||||
.theme-discord {
|
.theme-discord {
|
||||||
@@ -47,15 +67,16 @@
|
|||||||
|
|
||||||
--accent-hue: 235;
|
--accent-hue: 235;
|
||||||
--accent-lightness: 65%;
|
--accent-lightness: 65%;
|
||||||
--primary-button-border: none;
|
|
||||||
|
|
||||||
--button-color: var(--accent-color);
|
|
||||||
--button-border: none;
|
|
||||||
|
|
||||||
--input-text-color: #ccc;
|
|
||||||
--input-border-size: 2px;
|
--input-border-size: 2px;
|
||||||
--input-background-color: #202225;
|
--input-background-color: #202225;
|
||||||
--input-border-color: var(--input-background-color);
|
--input-border-color: var(--input-background-color);
|
||||||
|
|
||||||
|
--theme-color-fallback: #202225;
|
||||||
|
|
||||||
|
--tertiary-background-color: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) + (3.5 * var(--value-step))));
|
||||||
|
--tertiary-border-color: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) + (4.5 * var(--value-step))));
|
||||||
|
--accent-text-color: white;
|
||||||
}
|
}
|
||||||
|
|
||||||
.theme-cool-blue {
|
.theme-cool-blue {
|
||||||
@@ -68,16 +89,15 @@
|
|||||||
--background-color3: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) - (2 * var(--value-step))));
|
--background-color3: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) - (2 * var(--value-step))));
|
||||||
--background-color4: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) - (3 * var(--value-step))));
|
--background-color4: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) - (3 * var(--value-step))));
|
||||||
|
|
||||||
--accent-hue: 212;
|
|
||||||
--primary-button-border: none;
|
|
||||||
|
|
||||||
--button-color: var(--accent-color);
|
|
||||||
--button-border: none;
|
|
||||||
|
|
||||||
--input-border-size: 1px;
|
|
||||||
--input-background-color: var(--background-color3);
|
--input-background-color: var(--background-color3);
|
||||||
--input-text-color: #ccc;
|
|
||||||
--input-border-color: var(--background-color4);
|
--accent-hue: 212;
|
||||||
|
|
||||||
|
--theme-color-fallback: #0056b8;
|
||||||
|
|
||||||
|
--tertiary-background-color: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) + (3.5 * var(--value-step))));
|
||||||
|
--tertiary-border-color: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) + (4.5 * var(--value-step))));
|
||||||
|
--accent-text-color: #f7fbff;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -91,15 +111,12 @@
|
|||||||
--background-color3: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) - (2 * var(--value-step))));
|
--background-color3: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) - (2 * var(--value-step))));
|
||||||
--background-color4: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) - (3 * var(--value-step))));
|
--background-color4: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) - (3 * var(--value-step))));
|
||||||
|
|
||||||
--primary-button-border: none;
|
|
||||||
|
|
||||||
--button-color: var(--accent-color);
|
|
||||||
--button-border: none;
|
|
||||||
|
|
||||||
--input-border-size: 1px;
|
|
||||||
--input-background-color: var(--background-color3);
|
--input-background-color: var(--background-color3);
|
||||||
--input-text-color: #ccc;
|
|
||||||
--input-border-color: var(--background-color4);
|
--theme-color-fallback: #5300b8;
|
||||||
|
|
||||||
|
--tertiary-background-color: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) + (3.5 * var(--value-step))));
|
||||||
|
--tertiary-border-color: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) + (4.5 * var(--value-step))));
|
||||||
}
|
}
|
||||||
|
|
||||||
.theme-super-dark {
|
.theme-super-dark {
|
||||||
@@ -112,15 +129,10 @@
|
|||||||
--background-color3: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) + (2 * var(--value-step))));
|
--background-color3: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) + (2 * var(--value-step))));
|
||||||
--background-color4: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) + (1.4 * var(--value-step))));
|
--background-color4: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) + (1.4 * var(--value-step))));
|
||||||
|
|
||||||
--primary-button-border: none;
|
|
||||||
|
|
||||||
--button-color: var(--accent-color);
|
|
||||||
--button-border: none;
|
|
||||||
|
|
||||||
--input-border-size: 0px;
|
|
||||||
--input-background-color: var(--background-color3);
|
--input-background-color: var(--background-color3);
|
||||||
--input-text-color: #ccc;
|
--input-border-size: 0px;
|
||||||
--input-border-color: var(--background-color4);
|
|
||||||
|
--theme-color-fallback: #000000;
|
||||||
}
|
}
|
||||||
|
|
||||||
.theme-wild {
|
.theme-wild {
|
||||||
@@ -134,13 +146,38 @@
|
|||||||
--background-color4: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) - (3 * var(--value-step))));
|
--background-color4: hsl(var(--main-hue), var(--main-saturation), calc(var(--value-base) - (3 * var(--value-step))));
|
||||||
|
|
||||||
--accent-hue: 212;
|
--accent-hue: 212;
|
||||||
--primary-button-border: none;
|
|
||||||
|
|
||||||
--button-color: var(--accent-color);
|
|
||||||
--button-border: none;
|
|
||||||
|
|
||||||
--input-border-size: 1px;
|
--input-border-size: 1px;
|
||||||
--input-background-color: hsl(222, var(--main-saturation), calc(var(--value-base) - (2 * var(--value-step))));
|
--input-background-color: hsl(222, var(--main-saturation), calc(var(--value-base) - (2 * var(--value-step))));
|
||||||
--input-text-color: red;
|
--input-text-color: #FF0000;
|
||||||
--input-border-color: green;
|
--input-border-color: #005E05;
|
||||||
|
|
||||||
|
--tertiary-color: white;
|
||||||
|
--accent-text-color: #f7fbff;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
.theme-gnomie {
|
||||||
|
--background-color1: #242424;
|
||||||
|
--background-color2: #353535;
|
||||||
|
--background-color3: #494949;
|
||||||
|
--background-color4: #000000;
|
||||||
|
|
||||||
|
--accent-hue: 213;
|
||||||
|
--accent-lightness: 55%;
|
||||||
|
--accent-color: #2168bf;
|
||||||
|
|
||||||
|
--input-border-radius: 6px;
|
||||||
|
--input-text-color: #ffffff;
|
||||||
|
--input-background-color: #2a2a2a;
|
||||||
|
--input-border-size: 0px;
|
||||||
|
--input-border-color: var(--input-background-color);
|
||||||
|
|
||||||
|
--theme-color-fallback: #2168bf;
|
||||||
|
}
|
||||||
|
|
||||||
|
.theme-gnomie .panel-box {
|
||||||
|
border: none;
|
||||||
|
box-shadow: 0px 1px 2px rgba(0, 0, 0, 0.25);
|
||||||
|
border-radius: 10px;
|
||||||
}
|
}
|
||||||
BIN
ui/media/images/fa-eraser.png
Normal file
|
After Width: | Height: | Size: 11 KiB |
4
ui/media/images/fa-eraser.svg
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 576 576" width="24" height="24">
|
||||||
|
<!--! Font Awesome Pro 6.2.1 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license (Commercial License) Copyright 2022 Fonticons, Inc.-->
|
||||||
|
<path style="filter: drop-shadow(0px 0px 20px white)" d="M290.7 57.4 57.4 290.7c-25 25-25 65.5 0 90.5l80 80c12 12 28.3 18.7 45.3 18.7H512c17.7 0 32-14.3 32-32s-14.3-32-32-32H387.9l130.7-130.6c25-25 25-65.5 0-90.5L381.3 57.4c-25-25-65.5-25-90.5 0zm6.7 358.6H182.6l-80-80 124.7-124.7 137.4 137.4-67.3 67.3z"/>
|
||||||
|
</svg>
|
||||||
|
After Width: | Height: | Size: 571 B |
BIN
ui/media/images/fa-eye-dropper.png
Normal file
|
After Width: | Height: | Size: 12 KiB |
4
ui/media/images/fa-eye-dropper.svg
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" width="24" height="24">
|
||||||
|
<!--! Font Awesome Pro 6.2.1 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license (Commercial License) Copyright 2022 Fonticons, Inc.-->
|
||||||
|
<path style="filter: drop-shadow(0px 0px 20px white)" d="M341.6 29.2 240.1 130.8l-9.4-9.4c-12.5-12.5-32.8-12.5-45.3 0s-12.5 32.8 0 45.3l160 160c12.5 12.5 32.8 12.5 45.3 0s12.5-32.8 0-45.3l-9.4-9.4 101.5-101.6c39-39 39-102.2 0-141.1s-102.2-39-141.1 0zM55.4 323.3c-15 15-23.4 35.4-23.4 56.6v42.4L5.4 462.2c-8.5 12.7-6.8 29.6 4 40.4s27.7 12.5 40.4 4L89.7 480h42.4c21.2 0 41.6-8.4 56.6-23.4l120.7-120.7-45.3-45.3-120.7 120.7c-3 3-7.1 4.7-11.3 4.7H96v-36.1c0-4.2 1.7-8.3 4.7-11.3l120.7-120.7-45.3-45.3L55.4 323.3z"/>
|
||||||
|
</svg>
|
||||||
|
After Width: | Height: | Size: 775 B |
4
ui/media/images/fa-fill.svg
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 576 576" width="24" height="24">
|
||||||
|
<!--! Font Awesome Pro 6.2.1 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license (Commercial License) Copyright 2022 Fonticons, Inc.-->
|
||||||
|
<path style="filter: drop-shadow(0px 0px 20px white)" d="M118.6 9.4c-12.5-12.5-32.7-12.5-45.2 0s-12.5 32.8 0 45.3l81.3 81.3-92.1 92.1c-37.5 37.5-37.5 98.3 0 135.8l117.5 117.5c37.5 37.5 98.3 37.5 135.8 0l190.4-190.5c28.1-28.1 28.1-73.7 0-101.8L354.9 37.7c-28.1-28.1-73.7-28.1-101.8 0l-53.1 53-81.4-81.3zM200 181.3l49.4 49.4c12.5 12.5 32.8 12.5 45.3 0s12.5-32.8 0-45.3L245.3 136l53.1-53.1c3.1-3.1 8.2-3.1 11.3 0l151.4 151.4c3.1 3.1 3.1 8.2 0 11.3L418.7 288H99.5c1.4-5.4 4.2-10.4 8.4-14.6l92.1-92.1z"/>
|
||||||
|
</svg>
|
||||||
|
After Width: | Height: | Size: 763 B |
BIN
ui/media/images/fa-pencil.png
Normal file
|
After Width: | Height: | Size: 10 KiB |
4
ui/media/images/fa-pencil.svg
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" width="24" height="24">
|
||||||
|
<!--! Font Awesome Pro 6.2.1 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license (Commercial License) Copyright 2022 Fonticons, Inc.-->
|
||||||
|
<path style="filter: drop-shadow(0px 0px 20px white)" d="m410.3 231 11.3-11.3-33.9-33.9-62.1-62.1-33.9-33.9-11.3 11.3-22.6 22.6L58.6 322.9c-10.4 10.4-18 23.3-22.2 37.4L1 480.7c-2.5 8.4-.2 17.5 6.1 23.7s15.3 8.5 23.7 6.1l120.3-35.4c14.1-4.2 27-11.8 37.4-22.2l199.2-199.2 22.6-22.7zM160 399.4l-9.1 22.7c-4 3.1-8.5 5.4-13.3 6.9l-78.2 23 23-78.1c1.4-4.9 3.8-9.4 6.9-13.3l22.7-9.1v32c0 8.8 7.2 16 16 16h32zM362.7 18.7l-14.4 14.5-22.6 22.6-11.4 11.3 33.9 33.9 62.1 62.1 33.9 33.9 11.3-11.3 22.6-22.6 14.5-14.5c25-25 25-65.5 0-90.5l-39.3-39.4c-25-25-65.5-25-90.5 0zm-47.4 168-144 144c-6.2 6.2-16.4 6.2-22.6 0s-6.2-16.4 0-22.6l144-144c6.2-6.2 16.4-6.2 22.6 0s6.2 16.4 0 22.6z"/>
|
||||||
|
</svg>
|
||||||
|
After Width: | Height: | Size: 934 B |
|
Before Width: | Height: | Size: 466 B After Width: | Height: | Size: 1.4 KiB |
|
Before Width: | Height: | Size: 973 B After Width: | Height: | Size: 3.2 KiB |
BIN
ui/media/images/icon-512x512.png
Normal file
|
After Width: | Height: | Size: 329 KiB |
@@ -14,17 +14,23 @@ const SETTINGS_IDS_LIST = [
|
|||||||
"num_outputs_parallel",
|
"num_outputs_parallel",
|
||||||
"stable_diffusion_model",
|
"stable_diffusion_model",
|
||||||
"vae_model",
|
"vae_model",
|
||||||
"sampler",
|
"hypernetwork_model",
|
||||||
|
"sampler_name",
|
||||||
"width",
|
"width",
|
||||||
"height",
|
"height",
|
||||||
"num_inference_steps",
|
"num_inference_steps",
|
||||||
"guidance_scale",
|
"guidance_scale",
|
||||||
"prompt_strength",
|
"prompt_strength",
|
||||||
|
"hypernetwork_strength",
|
||||||
"output_format",
|
"output_format",
|
||||||
|
"output_quality",
|
||||||
"negative_prompt",
|
"negative_prompt",
|
||||||
"stream_image_progress",
|
"stream_image_progress",
|
||||||
"use_face_correction",
|
"use_face_correction",
|
||||||
|
"gfpgan_model",
|
||||||
"use_upscale",
|
"use_upscale",
|
||||||
|
"upscale_amount",
|
||||||
|
"block_nsfw",
|
||||||
"show_only_filtered_image",
|
"show_only_filtered_image",
|
||||||
"upscale_model",
|
"upscale_model",
|
||||||
"preview-image",
|
"preview-image",
|
||||||
@@ -33,9 +39,14 @@ const SETTINGS_IDS_LIST = [
|
|||||||
"save_to_disk",
|
"save_to_disk",
|
||||||
"diskPath",
|
"diskPath",
|
||||||
"sound_toggle",
|
"sound_toggle",
|
||||||
"turbo",
|
"vram_usage_level",
|
||||||
"use_full_precision",
|
"confirm_dangerous_actions",
|
||||||
"auto_save_settings"
|
"metadata_output_format",
|
||||||
|
"auto_save_settings",
|
||||||
|
"apply_color_correction",
|
||||||
|
"process_order_toggle",
|
||||||
|
"thumbnail_size",
|
||||||
|
"auto_scroll"
|
||||||
]
|
]
|
||||||
|
|
||||||
const IGNORE_BY_DEFAULT = [
|
const IGNORE_BY_DEFAULT = [
|
||||||
@@ -55,6 +66,9 @@ async function initSettings() {
|
|||||||
if (!element) {
|
if (!element) {
|
||||||
console.error(`Missing settings element ${id}`)
|
console.error(`Missing settings element ${id}`)
|
||||||
}
|
}
|
||||||
|
if (id in SETTINGS) { // don't create it again
|
||||||
|
return
|
||||||
|
}
|
||||||
SETTINGS[id] = {
|
SETTINGS[id] = {
|
||||||
key: id,
|
key: id,
|
||||||
element: element,
|
element: element,
|
||||||
@@ -82,6 +96,9 @@ async function initSettings() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function getSetting(element) {
|
function getSetting(element) {
|
||||||
|
if (element.dataset && 'path' in element.dataset) {
|
||||||
|
return element.dataset.path
|
||||||
|
}
|
||||||
if (typeof element === "string" || element instanceof String) {
|
if (typeof element === "string" || element instanceof String) {
|
||||||
element = SETTINGS[element].element
|
element = SETTINGS[element].element
|
||||||
}
|
}
|
||||||
@@ -91,6 +108,10 @@ function getSetting(element) {
|
|||||||
return element.value
|
return element.value
|
||||||
}
|
}
|
||||||
function setSetting(element, value) {
|
function setSetting(element, value) {
|
||||||
|
if (element.dataset && 'path' in element.dataset) {
|
||||||
|
element.dataset.path = value
|
||||||
|
return // no need to dispatch any event here because the models are not loaded yet
|
||||||
|
}
|
||||||
if (typeof element === "string" || element instanceof String) {
|
if (typeof element === "string" || element instanceof String) {
|
||||||
element = SETTINGS[element].element
|
element = SETTINGS[element].element
|
||||||
}
|
}
|
||||||
@@ -124,7 +145,7 @@ function loadSettings() {
|
|||||||
var saved_settings_text = localStorage.getItem(SETTINGS_KEY)
|
var saved_settings_text = localStorage.getItem(SETTINGS_KEY)
|
||||||
if (saved_settings_text) {
|
if (saved_settings_text) {
|
||||||
var saved_settings = JSON.parse(saved_settings_text)
|
var saved_settings = JSON.parse(saved_settings_text)
|
||||||
if (saved_settings.find(s => s.key == "auto_save_settings").value == false) {
|
if (saved_settings.find(s => s.key == "auto_save_settings")?.value == false) {
|
||||||
setSetting("auto_save_settings", false)
|
setSetting("auto_save_settings", false)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -213,6 +234,7 @@ function fillSaveSettingsConfigTable() {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
prettifyInputs(saveSettingsConfigTable)
|
||||||
}
|
}
|
||||||
|
|
||||||
// configureSettingsSaveBtn
|
// configureSettingsSaveBtn
|
||||||
@@ -224,7 +246,7 @@ var autoSaveSettings = document.getElementById("auto_save_settings")
|
|||||||
var configSettingsButton = document.createElement("button")
|
var configSettingsButton = document.createElement("button")
|
||||||
configSettingsButton.textContent = "Configure"
|
configSettingsButton.textContent = "Configure"
|
||||||
configSettingsButton.style.margin = "0px 5px"
|
configSettingsButton.style.margin = "0px 5px"
|
||||||
autoSaveSettings.insertAdjacentElement("afterend", configSettingsButton)
|
autoSaveSettings.insertAdjacentElement("beforebegin", configSettingsButton)
|
||||||
autoSaveSettings.addEventListener("change", () => {
|
autoSaveSettings.addEventListener("change", () => {
|
||||||
configSettingsButton.style.display = autoSaveSettings.checked ? "block" : "none"
|
configSettingsButton.style.display = autoSaveSettings.checked ? "block" : "none"
|
||||||
})
|
})
|
||||||
@@ -251,10 +273,12 @@ function tryLoadOldSettings() {
|
|||||||
var saved_settings = JSON.parse(saved_settings_text)
|
var saved_settings = JSON.parse(saved_settings_text)
|
||||||
Object.keys(saved_settings.should_save).forEach(key => {
|
Object.keys(saved_settings.should_save).forEach(key => {
|
||||||
key = key in old_map ? old_map[key] : key
|
key = key in old_map ? old_map[key] : key
|
||||||
|
if (!(key in SETTINGS)) return
|
||||||
SETTINGS[key].ignore = !saved_settings.should_save[key]
|
SETTINGS[key].ignore = !saved_settings.should_save[key]
|
||||||
});
|
});
|
||||||
Object.keys(saved_settings.values).forEach(key => {
|
Object.keys(saved_settings.values).forEach(key => {
|
||||||
key = key in old_map ? old_map[key] : key
|
key = key in old_map ? old_map[key] : key
|
||||||
|
if (!(key in SETTINGS)) return
|
||||||
var setting = SETTINGS[key]
|
var setting = SETTINGS[key]
|
||||||
if (!setting.ignore) {
|
if (!setting.ignore) {
|
||||||
setting.value = saved_settings.values[key]
|
setting.value = saved_settings.values[key]
|
||||||
@@ -269,8 +293,6 @@ function tryLoadOldSettings() {
|
|||||||
"soundEnabled": "sound_toggle",
|
"soundEnabled": "sound_toggle",
|
||||||
"saveToDisk": "save_to_disk",
|
"saveToDisk": "save_to_disk",
|
||||||
"useCPU": "use_cpu",
|
"useCPU": "use_cpu",
|
||||||
"useFullPrecision": "use_full_precision",
|
|
||||||
"useTurboMode": "turbo",
|
|
||||||
"diskPath": "diskPath",
|
"diskPath": "diskPath",
|
||||||
"useFaceCorrection": "use_face_correction",
|
"useFaceCorrection": "use_face_correction",
|
||||||
"useUpscaling": "use_upscale",
|
"useUpscaling": "use_upscale",
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
const EXT_REGEX = /(?:\.([^.]+))?$/
|
const EXT_REGEX = /(?:\.([^.]+))?$/
|
||||||
const TEXT_EXTENSIONS = ['txt', 'json']
|
const TEXT_EXTENSIONS = ['txt', 'json']
|
||||||
const IMAGE_EXTENSIONS = ['jpg', 'jpeg', 'png', 'bmp', 'tiff', 'tif', 'tga']
|
const IMAGE_EXTENSIONS = ['jpg', 'jpeg', 'png', 'bmp', 'tiff', 'tif', 'tga', 'webp']
|
||||||
|
|
||||||
function parseBoolean(stringValue) {
|
function parseBoolean(stringValue) {
|
||||||
if (typeof stringValue === 'boolean') {
|
if (typeof stringValue === 'boolean') {
|
||||||
@@ -25,6 +25,7 @@ function parseBoolean(stringValue) {
|
|||||||
case "no":
|
case "no":
|
||||||
case "off":
|
case "off":
|
||||||
case "0":
|
case "0":
|
||||||
|
case "none":
|
||||||
case null:
|
case null:
|
||||||
case undefined:
|
case undefined:
|
||||||
return false;
|
return false;
|
||||||
@@ -51,6 +52,20 @@ const TASK_MAPPING = {
|
|||||||
readUI: () => negativePromptField.value,
|
readUI: () => negativePromptField.value,
|
||||||
parse: (val) => val
|
parse: (val) => val
|
||||||
},
|
},
|
||||||
|
active_tags: { name: "Image Modifiers",
|
||||||
|
setUI: (active_tags) => {
|
||||||
|
refreshModifiersState(active_tags)
|
||||||
|
},
|
||||||
|
readUI: () => activeTags.map(x => x.name),
|
||||||
|
parse: (val) => val
|
||||||
|
},
|
||||||
|
inactive_tags: { name: "Inactive Image Modifiers",
|
||||||
|
setUI: (inactive_tags) => {
|
||||||
|
refreshInactiveTags(inactive_tags)
|
||||||
|
},
|
||||||
|
readUI: () => activeTags.filter(tag => tag.inactive === true).map(x => x.name),
|
||||||
|
parse: (val) => val
|
||||||
|
},
|
||||||
width: { name: 'Width',
|
width: { name: 'Width',
|
||||||
setUI: (width) => {
|
setUI: (width) => {
|
||||||
const oldVal = widthField.value
|
const oldVal = widthField.value
|
||||||
@@ -78,13 +93,14 @@ const TASK_MAPPING = {
|
|||||||
if (!seed) {
|
if (!seed) {
|
||||||
randomSeedField.checked = true
|
randomSeedField.checked = true
|
||||||
seedField.disabled = true
|
seedField.disabled = true
|
||||||
|
seedField.value = 0
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
randomSeedField.checked = false
|
randomSeedField.checked = false
|
||||||
seedField.disabled = false
|
seedField.disabled = false
|
||||||
seedField.value = seed
|
seedField.value = seed
|
||||||
},
|
},
|
||||||
readUI: () => (randomSeedField.checked ? Math.floor(Math.random() * 10000000) : parseInt(seedField.value)),
|
readUI: () => parseInt(seedField.value), // just return the value the user is seeing in the UI
|
||||||
parse: (val) => parseInt(val)
|
parse: (val) => parseInt(val)
|
||||||
},
|
},
|
||||||
num_inference_steps: { name: 'Steps',
|
num_inference_steps: { name: 'Steps',
|
||||||
@@ -120,29 +136,51 @@ const TASK_MAPPING = {
|
|||||||
},
|
},
|
||||||
mask: { name: 'Mask',
|
mask: { name: 'Mask',
|
||||||
setUI: (mask) => {
|
setUI: (mask) => {
|
||||||
inpaintingEditor.setImg(mask)
|
setTimeout(() => { // add a delay to insure this happens AFTER the main image loads (which reloads the inpainter)
|
||||||
|
imageInpainter.setImg(mask)
|
||||||
|
}, 250)
|
||||||
maskSetting.checked = Boolean(mask)
|
maskSetting.checked = Boolean(mask)
|
||||||
},
|
},
|
||||||
readUI: () => (maskSetting.checked ? inpaintingEditor.getImg() : undefined),
|
readUI: () => (maskSetting.checked ? imageInpainter.getImg() : undefined),
|
||||||
parse: (val) => val
|
parse: (val) => val
|
||||||
},
|
},
|
||||||
|
preserve_init_image_color_profile: { name: 'Preserve Color Profile',
|
||||||
|
setUI: (preserve_init_image_color_profile) => {
|
||||||
|
applyColorCorrectionField.checked = parseBoolean(preserve_init_image_color_profile)
|
||||||
|
},
|
||||||
|
readUI: () => applyColorCorrectionField.checked,
|
||||||
|
parse: (val) => parseBoolean(val)
|
||||||
|
},
|
||||||
|
|
||||||
use_face_correction: { name: 'Use Face Correction',
|
use_face_correction: { name: 'Use Face Correction',
|
||||||
setUI: (use_face_correction) => {
|
setUI: (use_face_correction) => {
|
||||||
useFaceCorrectionField.checked = parseBoolean(use_face_correction)
|
const oldVal = gfpganModelField.value
|
||||||
|
gfpganModelField.value = getModelPath(use_face_correction, ['.pth'])
|
||||||
|
if (gfpganModelField.value) { // Is a valid value for the field.
|
||||||
|
useFaceCorrectionField.checked = true
|
||||||
|
gfpganModelField.disabled = false
|
||||||
|
} else { // Not a valid value, restore the old value and disable the filter.
|
||||||
|
gfpganModelField.disabled = true
|
||||||
|
gfpganModelField.value = oldVal
|
||||||
|
useFaceCorrectionField.checked = false
|
||||||
|
}
|
||||||
|
|
||||||
|
//useFaceCorrectionField.checked = parseBoolean(use_face_correction)
|
||||||
},
|
},
|
||||||
readUI: () => useFaceCorrectionField.checked,
|
readUI: () => (useFaceCorrectionField.checked ? gfpganModelField.value : undefined),
|
||||||
parse: (val) => parseBoolean(val)
|
parse: (val) => val
|
||||||
},
|
},
|
||||||
use_upscale: { name: 'Use Upscaling',
|
use_upscale: { name: 'Use Upscaling',
|
||||||
setUI: (use_upscale) => {
|
setUI: (use_upscale) => {
|
||||||
const oldVal = upscaleModelField.value
|
const oldVal = upscaleModelField.value
|
||||||
upscaleModelField.value = use_upscale
|
upscaleModelField.value = getModelPath(use_upscale, ['.pth'])
|
||||||
if (upscaleModelField.value) { // Is a valid value for the field.
|
if (upscaleModelField.value) { // Is a valid value for the field.
|
||||||
useUpscalingField.checked = true
|
useUpscalingField.checked = true
|
||||||
upscaleModelField.disabled = false
|
upscaleModelField.disabled = false
|
||||||
|
upscaleAmountField.disabled = false
|
||||||
} else { // Not a valid value, restore the old value and disable the filter.
|
} else { // Not a valid value, restore the old value and disable the filter.
|
||||||
upscaleModelField.disabled = true
|
upscaleModelField.disabled = true
|
||||||
|
upscaleAmountField.disabled = true
|
||||||
upscaleModelField.value = oldVal
|
upscaleModelField.value = oldVal
|
||||||
useUpscalingField.checked = false
|
useUpscalingField.checked = false
|
||||||
}
|
}
|
||||||
@@ -150,9 +188,16 @@ const TASK_MAPPING = {
|
|||||||
readUI: () => (useUpscalingField.checked ? upscaleModelField.value : undefined),
|
readUI: () => (useUpscalingField.checked ? upscaleModelField.value : undefined),
|
||||||
parse: (val) => val
|
parse: (val) => val
|
||||||
},
|
},
|
||||||
sampler: { name: 'Sampler',
|
upscale_amount: { name: 'Upscale By',
|
||||||
setUI: (sampler) => {
|
setUI: (upscale_amount) => {
|
||||||
samplerField.value = sampler
|
upscaleAmountField.value = upscale_amount
|
||||||
|
},
|
||||||
|
readUI: () => upscaleAmountField.value,
|
||||||
|
parse: (val) => val
|
||||||
|
},
|
||||||
|
sampler_name: { name: 'Sampler',
|
||||||
|
setUI: (sampler_name) => {
|
||||||
|
samplerField.value = sampler_name
|
||||||
},
|
},
|
||||||
readUI: () => samplerField.value,
|
readUI: () => samplerField.value,
|
||||||
parse: (val) => val
|
parse: (val) => val
|
||||||
@@ -161,18 +206,7 @@ const TASK_MAPPING = {
|
|||||||
setUI: (use_stable_diffusion_model) => {
|
setUI: (use_stable_diffusion_model) => {
|
||||||
const oldVal = stableDiffusionModelField.value
|
const oldVal = stableDiffusionModelField.value
|
||||||
|
|
||||||
let pathIdx = use_stable_diffusion_model.lastIndexOf('/') // Linux, Mac paths
|
use_stable_diffusion_model = getModelPath(use_stable_diffusion_model, ['.ckpt', '.safetensors'])
|
||||||
if (pathIdx < 0) {
|
|
||||||
pathIdx = use_stable_diffusion_model.lastIndexOf('\\') // Windows paths.
|
|
||||||
}
|
|
||||||
if (pathIdx >= 0) {
|
|
||||||
use_stable_diffusion_model = use_stable_diffusion_model.slice(pathIdx + 1)
|
|
||||||
}
|
|
||||||
const modelExt = '.ckpt'
|
|
||||||
if (use_stable_diffusion_model.endsWith(modelExt)) {
|
|
||||||
use_stable_diffusion_model = use_stable_diffusion_model.slice(0, use_stable_diffusion_model.length - modelExt.length)
|
|
||||||
}
|
|
||||||
|
|
||||||
stableDiffusionModelField.value = use_stable_diffusion_model
|
stableDiffusionModelField.value = use_stable_diffusion_model
|
||||||
|
|
||||||
if (!stableDiffusionModelField.value) {
|
if (!stableDiffusionModelField.value) {
|
||||||
@@ -182,10 +216,47 @@ const TASK_MAPPING = {
|
|||||||
readUI: () => stableDiffusionModelField.value,
|
readUI: () => stableDiffusionModelField.value,
|
||||||
parse: (val) => val
|
parse: (val) => val
|
||||||
},
|
},
|
||||||
|
use_vae_model: { name: 'VAE model',
|
||||||
|
setUI: (use_vae_model) => {
|
||||||
|
const oldVal = vaeModelField.value
|
||||||
|
use_vae_model = (use_vae_model === undefined || use_vae_model === null || use_vae_model === 'None' ? '' : use_vae_model)
|
||||||
|
|
||||||
numOutputsParallel: { name: 'Parallel Images',
|
if (use_vae_model !== '') {
|
||||||
setUI: (numOutputsParallel) => {
|
use_vae_model = getModelPath(use_vae_model, ['.vae.pt', '.ckpt'])
|
||||||
numOutputsParallelField.value = numOutputsParallel
|
use_vae_model = use_vae_model !== '' ? use_vae_model : oldVal
|
||||||
|
}
|
||||||
|
vaeModelField.value = use_vae_model
|
||||||
|
},
|
||||||
|
readUI: () => vaeModelField.value,
|
||||||
|
parse: (val) => val
|
||||||
|
},
|
||||||
|
use_hypernetwork_model: { name: 'Hypernetwork model',
|
||||||
|
setUI: (use_hypernetwork_model) => {
|
||||||
|
const oldVal = hypernetworkModelField.value
|
||||||
|
use_hypernetwork_model = (use_hypernetwork_model === undefined || use_hypernetwork_model === null || use_hypernetwork_model === 'None' ? '' : use_hypernetwork_model)
|
||||||
|
|
||||||
|
if (use_hypernetwork_model !== '') {
|
||||||
|
use_hypernetwork_model = getModelPath(use_hypernetwork_model, ['.pt'])
|
||||||
|
use_hypernetwork_model = use_hypernetwork_model !== '' ? use_hypernetwork_model : oldVal
|
||||||
|
}
|
||||||
|
hypernetworkModelField.value = use_hypernetwork_model
|
||||||
|
hypernetworkModelField.dispatchEvent(new Event('change'))
|
||||||
|
},
|
||||||
|
readUI: () => hypernetworkModelField.value,
|
||||||
|
parse: (val) => val
|
||||||
|
},
|
||||||
|
hypernetwork_strength: { name: 'Hypernetwork Strength',
|
||||||
|
setUI: (hypernetwork_strength) => {
|
||||||
|
hypernetworkStrengthField.value = hypernetwork_strength
|
||||||
|
updateHypernetworkStrengthSlider()
|
||||||
|
},
|
||||||
|
readUI: () => parseFloat(hypernetworkStrengthField.value),
|
||||||
|
parse: (val) => parseFloat(val)
|
||||||
|
},
|
||||||
|
|
||||||
|
num_outputs: { name: 'Parallel Images',
|
||||||
|
setUI: (num_outputs) => {
|
||||||
|
numOutputsParallelField.value = num_outputs
|
||||||
},
|
},
|
||||||
readUI: () => parseInt(numOutputsParallelField.value),
|
readUI: () => parseInt(numOutputsParallelField.value),
|
||||||
parse: (val) => val
|
parse: (val) => val
|
||||||
@@ -198,20 +269,6 @@ const TASK_MAPPING = {
|
|||||||
readUI: () => useCPUField.checked,
|
readUI: () => useCPUField.checked,
|
||||||
parse: (val) => val
|
parse: (val) => val
|
||||||
},
|
},
|
||||||
turbo: { name: 'Turbo',
|
|
||||||
setUI: (turbo) => {
|
|
||||||
turboField.checked = turbo
|
|
||||||
},
|
|
||||||
readUI: () => turboField.checked,
|
|
||||||
parse: (val) => Boolean(val)
|
|
||||||
},
|
|
||||||
use_full_precision: { name: 'Use Full Precision',
|
|
||||||
setUI: (use_full_precision) => {
|
|
||||||
useFullPrecisionField.checked = use_full_precision
|
|
||||||
},
|
|
||||||
readUI: () => useFullPrecisionField.checked,
|
|
||||||
parse: (val) => Boolean(val)
|
|
||||||
},
|
|
||||||
|
|
||||||
stream_image_progress: { name: 'Stream Image Progress',
|
stream_image_progress: { name: 'Stream Image Progress',
|
||||||
setUI: (stream_image_progress) => {
|
setUI: (stream_image_progress) => {
|
||||||
@@ -243,7 +300,10 @@ const TASK_MAPPING = {
|
|||||||
parse: (val) => val
|
parse: (val) => val
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
function restoreTaskToUI(task) {
|
|
||||||
|
function restoreTaskToUI(task, fieldsToSkip) {
|
||||||
|
fieldsToSkip = fieldsToSkip || []
|
||||||
|
|
||||||
if ('numOutputsTotal' in task) {
|
if ('numOutputsTotal' in task) {
|
||||||
numOutputsTotalField.value = task.numOutputsTotal
|
numOutputsTotalField.value = task.numOutputsTotal
|
||||||
}
|
}
|
||||||
@@ -255,10 +315,53 @@ function restoreTaskToUI(task) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
for (const key in TASK_MAPPING) {
|
for (const key in TASK_MAPPING) {
|
||||||
if (key in task.reqBody) {
|
if (key in task.reqBody && !fieldsToSkip.includes(key)) {
|
||||||
TASK_MAPPING[key].setUI(task.reqBody[key])
|
TASK_MAPPING[key].setUI(task.reqBody[key])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// properly reset fields not present in the task
|
||||||
|
if (!('use_hypernetwork_model' in task.reqBody)) {
|
||||||
|
hypernetworkModelField.value = ""
|
||||||
|
hypernetworkModelField.dispatchEvent(new Event("change"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// restore the original prompt if provided (e.g. use settings), fallback to prompt as needed (e.g. copy/paste or d&d)
|
||||||
|
promptField.value = task.reqBody.original_prompt
|
||||||
|
if (!('original_prompt' in task.reqBody)) {
|
||||||
|
promptField.value = task.reqBody.prompt
|
||||||
|
}
|
||||||
|
|
||||||
|
// properly reset checkboxes
|
||||||
|
if (!('use_face_correction' in task.reqBody)) {
|
||||||
|
useFaceCorrectionField.checked = false
|
||||||
|
gfpganModelField.disabled = true
|
||||||
|
}
|
||||||
|
if (!('use_upscale' in task.reqBody)) {
|
||||||
|
useUpscalingField.checked = false
|
||||||
|
}
|
||||||
|
if (!('mask' in task.reqBody) && maskSetting.checked) {
|
||||||
|
maskSetting.checked = false
|
||||||
|
maskSetting.dispatchEvent(new Event("click"))
|
||||||
|
}
|
||||||
|
upscaleModelField.disabled = !useUpscalingField.checked
|
||||||
|
upscaleAmountField.disabled = !useUpscalingField.checked
|
||||||
|
|
||||||
|
// hide/show source picture as needed
|
||||||
|
if (IMAGE_REGEX.test(initImagePreview.src) && task.reqBody.init_image == undefined) {
|
||||||
|
// hide source image
|
||||||
|
initImageClearBtn.dispatchEvent(new Event("click"))
|
||||||
|
}
|
||||||
|
else if (task.reqBody.init_image !== undefined) {
|
||||||
|
// listen for inpainter loading event, which happens AFTER the main image loads (which reloads the inpainter)
|
||||||
|
initImagePreview.addEventListener('load', function() {
|
||||||
|
if (Boolean(task.reqBody.mask)) {
|
||||||
|
imageInpainter.setImg(task.reqBody.mask)
|
||||||
|
maskSetting.checked = true
|
||||||
|
}
|
||||||
|
}, { once: true })
|
||||||
|
initImagePreview.src = task.reqBody.init_image
|
||||||
|
}
|
||||||
}
|
}
|
||||||
function readUI() {
|
function readUI() {
|
||||||
const reqBody = {}
|
const reqBody = {}
|
||||||
@@ -271,8 +374,32 @@ function readUI() {
|
|||||||
'reqBody': reqBody
|
'reqBody': reqBody
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
function getModelPath(filename, extensions)
|
||||||
|
{
|
||||||
|
if (typeof filename !== "string") {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
let pathIdx
|
||||||
|
if (filename.includes('/models/stable-diffusion/')) {
|
||||||
|
pathIdx = filename.indexOf('/models/stable-diffusion/') + 25 // Linux, Mac paths
|
||||||
|
}
|
||||||
|
else if (filename.includes('\\models\\stable-diffusion\\')) {
|
||||||
|
pathIdx = filename.indexOf('\\models\\stable-diffusion\\') + 25 // Linux, Mac paths
|
||||||
|
}
|
||||||
|
if (pathIdx >= 0) {
|
||||||
|
filename = filename.slice(pathIdx)
|
||||||
|
}
|
||||||
|
extensions.forEach(ext => {
|
||||||
|
if (filename.endsWith(ext)) {
|
||||||
|
filename = filename.slice(0, filename.length - ext.length)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
return filename
|
||||||
|
}
|
||||||
|
|
||||||
const TASK_TEXT_MAPPING = {
|
const TASK_TEXT_MAPPING = {
|
||||||
|
prompt: 'Prompt',
|
||||||
width: 'Width',
|
width: 'Width',
|
||||||
height: 'Height',
|
height: 'Height',
|
||||||
seed: 'Seed',
|
seed: 'Seed',
|
||||||
@@ -281,24 +408,39 @@ const TASK_TEXT_MAPPING = {
|
|||||||
prompt_strength: 'Prompt Strength',
|
prompt_strength: 'Prompt Strength',
|
||||||
use_face_correction: 'Use Face Correction',
|
use_face_correction: 'Use Face Correction',
|
||||||
use_upscale: 'Use Upscaling',
|
use_upscale: 'Use Upscaling',
|
||||||
sampler: 'Sampler',
|
upscale_amount: 'Upscale By',
|
||||||
|
sampler_name: 'Sampler',
|
||||||
negative_prompt: 'Negative Prompt',
|
negative_prompt: 'Negative Prompt',
|
||||||
use_stable_diffusion_model: 'Stable Diffusion model'
|
use_stable_diffusion_model: 'Stable Diffusion model',
|
||||||
|
use_hypernetwork_model: 'Hypernetwork model',
|
||||||
|
hypernetwork_strength: 'Hypernetwork Strength'
|
||||||
}
|
}
|
||||||
const afterPromptRe = /^\s*Width\s*:\s*\d+\s*(?:\r\n|\r|\n)+\s*Height\s*:\s*\d+\s*(\r\n|\r|\n)+Seed\s*:\s*\d+\s*$/igm
|
|
||||||
function parseTaskFromText(str) {
|
function parseTaskFromText(str) {
|
||||||
const taskReqBody = {}
|
const taskReqBody = {}
|
||||||
|
|
||||||
|
const lines = str.split('\n')
|
||||||
|
if (lines.length === 0) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// Prompt
|
// Prompt
|
||||||
afterPromptRe.lastIndex = 0
|
let knownKeyOnFirstLine = false
|
||||||
const match = afterPromptRe.exec(str)
|
for (let key in TASK_TEXT_MAPPING) {
|
||||||
if (match) {
|
if (lines[0].startsWith(TASK_TEXT_MAPPING[key] + ':')) {
|
||||||
let prompt = str.slice(0, match.index)
|
knownKeyOnFirstLine = true
|
||||||
str = str.slice(prompt.length)
|
break
|
||||||
taskReqBody.prompt = prompt.trim()
|
}
|
||||||
|
}
|
||||||
|
if (!knownKeyOnFirstLine) {
|
||||||
|
taskReqBody.prompt = lines[0]
|
||||||
console.log('Prompt:', taskReqBody.prompt)
|
console.log('Prompt:', taskReqBody.prompt)
|
||||||
}
|
}
|
||||||
|
|
||||||
for (const key in TASK_TEXT_MAPPING) {
|
for (const key in TASK_TEXT_MAPPING) {
|
||||||
|
if (key in taskReqBody) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
const name = TASK_TEXT_MAPPING[key];
|
const name = TASK_TEXT_MAPPING[key];
|
||||||
let val = undefined
|
let val = undefined
|
||||||
|
|
||||||
@@ -326,29 +468,38 @@ function parseTaskFromText(str) {
|
|||||||
return task
|
return task
|
||||||
}
|
}
|
||||||
|
|
||||||
async function readFile(file, i) {
|
async function parseContent(text) {
|
||||||
const fileContent = (await file.text()).trim()
|
text = text.trim();
|
||||||
|
if (text.startsWith('{') && text.endsWith('}')) {
|
||||||
// JSON File.
|
|
||||||
if (fileContent.startsWith('{') && fileContent.endsWith('}')) {
|
|
||||||
try {
|
try {
|
||||||
const task = JSON.parse(fileContent)
|
const task = JSON.parse(text)
|
||||||
|
if (!('reqBody' in task)) { // support the format saved to the disk, by the UI
|
||||||
|
task.reqBody = Object.assign({}, task)
|
||||||
|
}
|
||||||
restoreTaskToUI(task)
|
restoreTaskToUI(task)
|
||||||
|
return true
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
console.warn(`file[${i}]:${file.name} - File couldn't be parsed.`, e)
|
console.warn(`JSON text content couldn't be parsed.`, e)
|
||||||
}
|
}
|
||||||
return
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Normal txt file.
|
// Normal txt file.
|
||||||
const task = parseTaskFromText(fileContent)
|
const task = parseTaskFromText(text)
|
||||||
if (task) {
|
if (text.toLowerCase().includes('seed:') && task) { // only parse valid task content
|
||||||
restoreTaskToUI(task)
|
restoreTaskToUI(task)
|
||||||
|
return true
|
||||||
} else {
|
} else {
|
||||||
console.warn(`file[${i}]:${file.name} - File couldn't be parsed.`)
|
console.warn(`Raw text content couldn't be parsed.`)
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async function readFile(file, i) {
|
||||||
|
console.log(`Event %o reading file[${i}]:${file.name}...`)
|
||||||
|
const fileContent = (await file.text()).trim()
|
||||||
|
return await parseContent(fileContent)
|
||||||
|
}
|
||||||
|
|
||||||
function dropHandler(ev) {
|
function dropHandler(ev) {
|
||||||
console.log('Content dropped...')
|
console.log('Content dropped...')
|
||||||
let items = []
|
let items = []
|
||||||
@@ -382,7 +533,7 @@ function dragOverHandler(ev) {
|
|||||||
ev.dataTransfer.dropEffect = "copy"
|
ev.dataTransfer.dropEffect = "copy"
|
||||||
|
|
||||||
let img = new Image()
|
let img = new Image()
|
||||||
img.src = location.host + '/media/images/favicon-32x32.png'
|
img.src = '//' + location.host + '/media/images/favicon-32x32.png'
|
||||||
ev.dataTransfer.setDragImage(img, 16, 16)
|
ev.dataTransfer.setDragImage(img, 16, 16)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -391,49 +542,64 @@ document.addEventListener("dragover", dragOverHandler)
|
|||||||
|
|
||||||
const TASK_REQ_NO_EXPORT = [
|
const TASK_REQ_NO_EXPORT = [
|
||||||
"use_cpu",
|
"use_cpu",
|
||||||
"turbo",
|
|
||||||
"use_full_precision",
|
|
||||||
"save_to_disk_path"
|
"save_to_disk_path"
|
||||||
]
|
]
|
||||||
|
const resetSettings = document.getElementById('reset-image-settings')
|
||||||
|
|
||||||
// Retrieve clipboard content and try to parse it
|
function checkReadTextClipboardPermission (result) {
|
||||||
async function pasteFromClipboard() {
|
if (result.state != "granted" && result.state != "prompt") {
|
||||||
//const text = await navigator.clipboard.readText()
|
|
||||||
let text = await navigator.clipboard.readText();
|
|
||||||
text=text.trim();
|
|
||||||
if (text.startsWith('{') && text.endsWith('}')) {
|
|
||||||
try {
|
|
||||||
const task = JSON.parse(text)
|
|
||||||
restoreTaskToUI(task)
|
|
||||||
} catch (e) {
|
|
||||||
console.warn(`Clipboard JSON couldn't be parsed.`, e)
|
|
||||||
}
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Normal txt file.
|
// PASTE ICON
|
||||||
const task = parseTaskFromText(text)
|
const pasteIcon = document.createElement('i')
|
||||||
if (task) {
|
pasteIcon.className = 'fa-solid fa-paste section-button'
|
||||||
restoreTaskToUI(task)
|
pasteIcon.innerHTML = `<span class="simple-tooltip top-left">Paste Image Settings</span>`
|
||||||
} else {
|
pasteIcon.addEventListener('click', async (event) => {
|
||||||
console.warn(`Clipboard content - File couldn't be parsed.`)
|
event.stopPropagation()
|
||||||
}
|
// Add css class 'active'
|
||||||
|
pasteIcon.classList.add('active')
|
||||||
|
// In 350 ms remove the 'active' class
|
||||||
|
asyncDelay(350).then(() => pasteIcon.classList.remove('active'))
|
||||||
|
|
||||||
|
// Retrieve clipboard content and try to parse it
|
||||||
|
const text = await navigator.clipboard.readText();
|
||||||
|
await parseContent(text)
|
||||||
|
})
|
||||||
|
resetSettings.parentNode.insertBefore(pasteIcon, resetSettings)
|
||||||
}
|
}
|
||||||
|
navigator.permissions.query({ name: "clipboard-read" }).then(checkReadTextClipboardPermission, (reason) => console.log('clipboard-read is not available. %o', reason))
|
||||||
|
|
||||||
|
document.addEventListener('paste', async (event) => {
|
||||||
|
if (event.target) {
|
||||||
|
const targetTag = event.target.tagName.toLowerCase()
|
||||||
|
// Disable when targeting input elements.
|
||||||
|
if (targetTag === 'input' || targetTag === 'textarea') {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
const paste = (event.clipboardData || window.clipboardData).getData('text')
|
||||||
|
const selection = window.getSelection()
|
||||||
|
if (selection.toString().trim().length <= 0 && await parseContent(paste)) {
|
||||||
|
event.preventDefault()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
// Adds a copy and a paste icon if the browser grants permission to write to clipboard.
|
// Adds a copy and a paste icon if the browser grants permission to write to clipboard.
|
||||||
function checkWriteToClipboardPermission (result) {
|
function checkWriteToClipboardPermission (result) {
|
||||||
if (result.state == "granted" || result.state == "prompt") {
|
if (result.state != "granted" && result.state != "prompt") {
|
||||||
const resetSettings = document.getElementById('reset-image-settings')
|
return
|
||||||
|
}
|
||||||
// COPY ICON
|
// COPY ICON
|
||||||
const copyIcon = document.createElement('i')
|
const copyIcon = document.createElement('i')
|
||||||
copyIcon.className = 'fa-solid fa-clipboard section-button'
|
copyIcon.className = 'fa-solid fa-clipboard section-button'
|
||||||
copyIcon.innerHTML = `<span class="simple-tooltip right">Copy Image Settings</span>`
|
copyIcon.innerHTML = `<span class="simple-tooltip top-left">Copy Image Settings</span>`
|
||||||
copyIcon.addEventListener('click', (event) => {
|
copyIcon.addEventListener('click', (event) => {
|
||||||
event.stopPropagation()
|
event.stopPropagation()
|
||||||
// Add css class 'active'
|
// Add css class 'active'
|
||||||
copyIcon.classList.add('active')
|
copyIcon.classList.add('active')
|
||||||
// In 1000 ms remove the 'active' class
|
// In 350 ms remove the 'active' class
|
||||||
asyncDelay(1000).then(() => copyIcon.classList.remove('active'))
|
asyncDelay(350).then(() => copyIcon.classList.remove('active'))
|
||||||
const uiState = readUI()
|
const uiState = readUI()
|
||||||
TASK_REQ_NO_EXPORT.forEach((key) => delete uiState.reqBody[key])
|
TASK_REQ_NO_EXPORT.forEach((key) => delete uiState.reqBody[key])
|
||||||
if (uiState.reqBody.init_image && !IMAGE_REGEX.test(uiState.reqBody.init_image)) {
|
if (uiState.reqBody.init_image && !IMAGE_REGEX.test(uiState.reqBody.init_image)) {
|
||||||
@@ -443,23 +609,7 @@ function checkWriteToClipboardPermission (result) {
|
|||||||
navigator.clipboard.writeText(JSON.stringify(uiState, undefined, 4))
|
navigator.clipboard.writeText(JSON.stringify(uiState, undefined, 4))
|
||||||
})
|
})
|
||||||
resetSettings.parentNode.insertBefore(copyIcon, resetSettings)
|
resetSettings.parentNode.insertBefore(copyIcon, resetSettings)
|
||||||
|
|
||||||
// PASTE ICON
|
|
||||||
const pasteIcon = document.createElement('i')
|
|
||||||
pasteIcon.className = 'fa-solid fa-paste section-button'
|
|
||||||
pasteIcon.innerHTML = `<span class="simple-tooltip right">Paste Image Settings</span>`
|
|
||||||
pasteIcon.addEventListener('click', (event) => {
|
|
||||||
event.stopPropagation()
|
|
||||||
// Add css class 'active'
|
|
||||||
pasteIcon.classList.add('active')
|
|
||||||
// In 1000 ms remove the 'active' class
|
|
||||||
asyncDelay(1000).then(() => pasteIcon.classList.remove('active'))
|
|
||||||
pasteFromClipboard()
|
|
||||||
})
|
|
||||||
resetSettings.parentNode.insertBefore(pasteIcon, resetSettings)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Determine which access we have to the clipboard. Clipboard access is only available on localhost or via TLS.
|
// Determine which access we have to the clipboard. Clipboard access is only available on localhost or via TLS.
|
||||||
navigator.permissions.query({ name: "clipboard-write" }).then(checkWriteToClipboardPermission, (e) => {
|
navigator.permissions.query({ name: "clipboard-write" }).then(checkWriteToClipboardPermission, (e) => {
|
||||||
if (e instanceof TypeError && typeof navigator?.clipboard?.writeText === 'function') {
|
if (e instanceof TypeError && typeof navigator?.clipboard?.writeText === 'function') {
|
||||||
|
|||||||
4
ui/media/js/drawingboard.min.js
vendored
1311
ui/media/js/engine.js
Normal file
837
ui/media/js/image-editor.js
Normal file
@@ -0,0 +1,837 @@
|
|||||||
|
var editorControlsLeft = document.getElementById("image-editor-controls-left")
|
||||||
|
|
||||||
|
const IMAGE_EDITOR_MAX_SIZE = 800
|
||||||
|
|
||||||
|
const IMAGE_EDITOR_BUTTONS = [
|
||||||
|
{
|
||||||
|
name: "Cancel",
|
||||||
|
icon: "fa-regular fa-circle-xmark",
|
||||||
|
handler: editor => {
|
||||||
|
editor.hide()
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Save",
|
||||||
|
icon: "fa-solid fa-floppy-disk",
|
||||||
|
handler: editor => {
|
||||||
|
editor.saveImage()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
const defaultToolBegin = (editor, ctx, x, y, is_overlay = false) => {
|
||||||
|
ctx.beginPath()
|
||||||
|
ctx.moveTo(x, y)
|
||||||
|
}
|
||||||
|
const defaultToolMove = (editor, ctx, x, y, is_overlay = false) => {
|
||||||
|
ctx.lineTo(x, y)
|
||||||
|
if (is_overlay) {
|
||||||
|
ctx.clearRect(0, 0, editor.width, editor.height)
|
||||||
|
ctx.stroke()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
const defaultToolEnd = (editor, ctx, x, y, is_overlay = false) => {
|
||||||
|
ctx.stroke()
|
||||||
|
if (is_overlay) {
|
||||||
|
ctx.clearRect(0, 0, editor.width, editor.height)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
const toolDoNothing = (editor, ctx, x, y, is_overlay = false) => {}
|
||||||
|
|
||||||
|
const IMAGE_EDITOR_TOOLS = [
|
||||||
|
{
|
||||||
|
id: "draw",
|
||||||
|
name: "Draw",
|
||||||
|
icon: "fa-solid fa-pencil",
|
||||||
|
cursor: "url(/media/images/fa-pencil.svg) 0 24, pointer",
|
||||||
|
begin: defaultToolBegin,
|
||||||
|
move: defaultToolMove,
|
||||||
|
end: defaultToolEnd
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "erase",
|
||||||
|
name: "Erase",
|
||||||
|
icon: "fa-solid fa-eraser",
|
||||||
|
cursor: "url(/media/images/fa-eraser.svg) 0 14, pointer",
|
||||||
|
begin: defaultToolBegin,
|
||||||
|
move: (editor, ctx, x, y, is_overlay = false) => {
|
||||||
|
ctx.lineTo(x, y)
|
||||||
|
if (is_overlay) {
|
||||||
|
ctx.clearRect(0, 0, editor.width, editor.height)
|
||||||
|
ctx.globalCompositeOperation = "source-over"
|
||||||
|
ctx.globalAlpha = 1
|
||||||
|
ctx.filter = "none"
|
||||||
|
ctx.drawImage(editor.canvas_current, 0, 0)
|
||||||
|
editor.setBrush(editor.layers.overlay)
|
||||||
|
ctx.stroke()
|
||||||
|
editor.canvas_current.style.opacity = 0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
end: (editor, ctx, x, y, is_overlay = false) => {
|
||||||
|
ctx.stroke()
|
||||||
|
if (is_overlay) {
|
||||||
|
ctx.clearRect(0, 0, editor.width, editor.height)
|
||||||
|
editor.canvas_current.style.opacity = ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
setBrush: (editor, layer) => {
|
||||||
|
layer.ctx.globalCompositeOperation = "destination-out"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "fill",
|
||||||
|
name: "Fill",
|
||||||
|
icon: "fa-solid fa-fill",
|
||||||
|
cursor: "url(/media/images/fa-fill.svg) 20 6, pointer",
|
||||||
|
begin: (editor, ctx, x, y, is_overlay = false) => {
|
||||||
|
if (!is_overlay) {
|
||||||
|
var color = hexToRgb(ctx.fillStyle)
|
||||||
|
color.a = parseInt(ctx.globalAlpha * 255) // layer.ctx.globalAlpha
|
||||||
|
flood_fill(editor, ctx, parseInt(x), parseInt(y), color)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
move: toolDoNothing,
|
||||||
|
end: toolDoNothing
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "colorpicker",
|
||||||
|
name: "Picker",
|
||||||
|
icon: "fa-solid fa-eye-dropper",
|
||||||
|
cursor: "url(/media/images/fa-eye-dropper.svg) 0 24, pointer",
|
||||||
|
begin: (editor, ctx, x, y, is_overlay = false) => {
|
||||||
|
if (!is_overlay) {
|
||||||
|
var img_rgb = editor.layers.background.ctx.getImageData(x, y, 1, 1).data
|
||||||
|
var drawn_rgb = editor.ctx_current.getImageData(x, y, 1, 1).data
|
||||||
|
var drawn_opacity = drawn_rgb[3] / 255
|
||||||
|
editor.custom_color_input.value = rgbToHex({
|
||||||
|
r: (drawn_rgb[0] * drawn_opacity) + (img_rgb[0] * (1 - drawn_opacity)),
|
||||||
|
g: (drawn_rgb[1] * drawn_opacity) + (img_rgb[1] * (1 - drawn_opacity)),
|
||||||
|
b: (drawn_rgb[2] * drawn_opacity) + (img_rgb[2] * (1 - drawn_opacity)),
|
||||||
|
})
|
||||||
|
editor.custom_color_input.dispatchEvent(new Event("change"))
|
||||||
|
}
|
||||||
|
},
|
||||||
|
move: toolDoNothing,
|
||||||
|
end: toolDoNothing
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
const IMAGE_EDITOR_ACTIONS = [
|
||||||
|
{
|
||||||
|
id: "fill_all",
|
||||||
|
name: "Fill all",
|
||||||
|
icon: "fa-solid fa-paint-roller",
|
||||||
|
handler: (editor) => {
|
||||||
|
editor.ctx_current.globalCompositeOperation = "source-over"
|
||||||
|
editor.ctx_current.rect(0, 0, editor.width, editor.height)
|
||||||
|
editor.ctx_current.fill()
|
||||||
|
editor.setBrush()
|
||||||
|
},
|
||||||
|
trackHistory: true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "clear",
|
||||||
|
name: "Clear",
|
||||||
|
icon: "fa-solid fa-xmark",
|
||||||
|
handler: (editor) => {
|
||||||
|
editor.ctx_current.clearRect(0, 0, editor.width, editor.height)
|
||||||
|
},
|
||||||
|
trackHistory: true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "undo",
|
||||||
|
name: "Undo",
|
||||||
|
icon: "fa-solid fa-rotate-left",
|
||||||
|
handler: (editor) => {
|
||||||
|
editor.history.undo()
|
||||||
|
},
|
||||||
|
trackHistory: false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "redo",
|
||||||
|
name: "Redo",
|
||||||
|
icon: "fa-solid fa-rotate-right",
|
||||||
|
handler: (editor) => {
|
||||||
|
editor.history.redo()
|
||||||
|
},
|
||||||
|
trackHistory: false
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
var IMAGE_EDITOR_SECTIONS = [
|
||||||
|
{
|
||||||
|
name: "tool",
|
||||||
|
title: "Tool",
|
||||||
|
default: "draw",
|
||||||
|
options: Array.from(IMAGE_EDITOR_TOOLS.map(t => t.id)),
|
||||||
|
initElement: (element, option) => {
|
||||||
|
var tool_info = IMAGE_EDITOR_TOOLS.find(t => t.id == option)
|
||||||
|
element.className = "image-editor-button button"
|
||||||
|
var sub_element = document.createElement("div")
|
||||||
|
var icon = document.createElement("i")
|
||||||
|
tool_info.icon.split(" ").forEach(c => icon.classList.add(c))
|
||||||
|
sub_element.appendChild(icon)
|
||||||
|
sub_element.append(tool_info.name)
|
||||||
|
element.appendChild(sub_element)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "color",
|
||||||
|
title: "Color",
|
||||||
|
default: "#f1c232",
|
||||||
|
options: [
|
||||||
|
"custom",
|
||||||
|
"#ea9999", "#e06666", "#cc0000", "#990000", "#660000",
|
||||||
|
"#f9cb9c", "#f6b26b", "#e69138", "#b45f06", "#783f04",
|
||||||
|
"#ffe599", "#ffd966", "#f1c232", "#bf9000", "#7f6000",
|
||||||
|
"#b6d7a8", "#93c47d", "#6aa84f", "#38761d", "#274e13",
|
||||||
|
"#a4c2f4", "#6d9eeb", "#3c78d8", "#1155cc", "#1c4587",
|
||||||
|
"#b4a7d6", "#8e7cc3", "#674ea7", "#351c75", "#20124d",
|
||||||
|
"#d5a6bd", "#c27ba0", "#a64d79", "#741b47", "#4c1130",
|
||||||
|
"#ffffff", "#c0c0c0", "#838383", "#525252", "#000000",
|
||||||
|
],
|
||||||
|
initElement: (element, option) => {
|
||||||
|
if (option == "custom") {
|
||||||
|
var input = document.createElement("input")
|
||||||
|
input.type = "color"
|
||||||
|
element.appendChild(input)
|
||||||
|
var span = document.createElement("span")
|
||||||
|
span.textContent = "Custom"
|
||||||
|
span.onclick = function(e) {
|
||||||
|
input.click()
|
||||||
|
}
|
||||||
|
element.appendChild(span)
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
element.style.background = option
|
||||||
|
}
|
||||||
|
},
|
||||||
|
getCustom: editor => {
|
||||||
|
var input = editor.popup.querySelector(".image_editor_color input")
|
||||||
|
return input.value
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "brush_size",
|
||||||
|
title: "Brush Size",
|
||||||
|
default: 48,
|
||||||
|
options: [ 6, 12, 16, 24, 30, 40, 48, 64 ],
|
||||||
|
initElement: (element, option) => {
|
||||||
|
element.parentElement.style.flex = option
|
||||||
|
element.style.width = option + "px"
|
||||||
|
element.style.height = option + "px"
|
||||||
|
element.style['margin-right'] = '2px'
|
||||||
|
element.style["border-radius"] = (option / 2).toFixed() + "px"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "opacity",
|
||||||
|
title: "Opacity",
|
||||||
|
default: 0,
|
||||||
|
options: [ 0, 0.2, 0.4, 0.6, 0.8 ],
|
||||||
|
initElement: (element, option) => {
|
||||||
|
element.style.background = `repeating-conic-gradient(rgba(0, 0, 0, ${option}) 0% 25%, rgba(255, 255, 255, ${option}) 0% 50%) 50% / 10px 10px`
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "sharpness",
|
||||||
|
title: "Sharpness",
|
||||||
|
default: 0,
|
||||||
|
options: [ 0, 0.05, 0.1, 0.2, 0.3 ],
|
||||||
|
initElement: (element, option) => {
|
||||||
|
var size = 32
|
||||||
|
var blur_amount = parseInt(option * size)
|
||||||
|
var sub_element = document.createElement("div")
|
||||||
|
sub_element.style.background = `var(--background-color3)`
|
||||||
|
sub_element.style.filter = `blur(${blur_amount}px)`
|
||||||
|
sub_element.style.width = `${size - 2}px`
|
||||||
|
sub_element.style.height = `${size - 2}px`
|
||||||
|
sub_element.style['border-radius'] = `${size}px`
|
||||||
|
element.style.background = "none"
|
||||||
|
element.appendChild(sub_element)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
class EditorHistory {
|
||||||
|
constructor(editor) {
|
||||||
|
this.editor = editor
|
||||||
|
this.events = [] // stack of all events (actions/edits)
|
||||||
|
this.current_edit = null
|
||||||
|
this.rewind_index = 0 // how many events back into the history we've rewound to. (current state is just after event at index 'length - this.rewind_index - 1')
|
||||||
|
}
|
||||||
|
push(event) {
|
||||||
|
// probably add something here eventually to save state every x events
|
||||||
|
if (this.rewind_index != 0) {
|
||||||
|
this.events = this.events.slice(0, 0 - this.rewind_index)
|
||||||
|
this.rewind_index = 0
|
||||||
|
}
|
||||||
|
var snapshot_frequency = 20 // (every x edits, take a snapshot of the current drawing state, for faster rewinding)
|
||||||
|
if (this.events.length > 0 && this.events.length % snapshot_frequency == 0) {
|
||||||
|
event.snapshot = this.editor.layers.drawing.ctx.getImageData(0, 0, this.editor.width, this.editor.height)
|
||||||
|
}
|
||||||
|
this.events.push(event)
|
||||||
|
}
|
||||||
|
pushAction(action) {
|
||||||
|
this.push({
|
||||||
|
type: "action",
|
||||||
|
id: action
|
||||||
|
});
|
||||||
|
}
|
||||||
|
editBegin(x, y) {
|
||||||
|
this.current_edit = {
|
||||||
|
type: "edit",
|
||||||
|
id: this.editor.getOptionValue("tool"),
|
||||||
|
options: Object.assign({}, this.editor.options),
|
||||||
|
points: [ { x: x, y: y } ]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
editMove(x, y) {
|
||||||
|
if (this.current_edit) {
|
||||||
|
this.current_edit.points.push({ x: x, y: y })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
editEnd(x, y) {
|
||||||
|
if (this.current_edit) {
|
||||||
|
this.push(this.current_edit)
|
||||||
|
this.current_edit = null
|
||||||
|
}
|
||||||
|
}
|
||||||
|
clear() {
|
||||||
|
this.events = []
|
||||||
|
}
|
||||||
|
undo() {
|
||||||
|
this.rewindTo(this.rewind_index + 1)
|
||||||
|
}
|
||||||
|
redo() {
|
||||||
|
this.rewindTo(this.rewind_index - 1)
|
||||||
|
}
|
||||||
|
rewindTo(new_rewind_index) {
|
||||||
|
if (new_rewind_index < 0 || new_rewind_index > this.events.length) {
|
||||||
|
return; // do nothing if target index is out of bounds
|
||||||
|
}
|
||||||
|
|
||||||
|
var ctx = this.editor.layers.drawing.ctx
|
||||||
|
ctx.clearRect(0, 0, this.editor.width, this.editor.height)
|
||||||
|
|
||||||
|
var target_index = this.events.length - 1 - new_rewind_index
|
||||||
|
var snapshot_index = target_index
|
||||||
|
while (snapshot_index > -1) {
|
||||||
|
if (this.events[snapshot_index].snapshot) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
snapshot_index--
|
||||||
|
}
|
||||||
|
|
||||||
|
if (snapshot_index != -1) {
|
||||||
|
ctx.putImageData(this.events[snapshot_index].snapshot, 0, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (var i = (snapshot_index + 1); i <= target_index; i++) {
|
||||||
|
var event = this.events[i]
|
||||||
|
if (event.type == "action") {
|
||||||
|
var action = IMAGE_EDITOR_ACTIONS.find(a => a.id == event.id)
|
||||||
|
action.handler(this.editor)
|
||||||
|
}
|
||||||
|
else if (event.type == "edit") {
|
||||||
|
var tool = IMAGE_EDITOR_TOOLS.find(t => t.id == event.id)
|
||||||
|
this.editor.setBrush(this.editor.layers.drawing, event.options)
|
||||||
|
|
||||||
|
var first_point = event.points[0]
|
||||||
|
tool.begin(this.editor, ctx, first_point.x, first_point.y)
|
||||||
|
for (var point_i = 1; point_i < event.points.length; point_i++) {
|
||||||
|
tool.move(this.editor, ctx, event.points[point_i].x, event.points[point_i].y)
|
||||||
|
}
|
||||||
|
var last_point = event.points[event.points.length - 1]
|
||||||
|
tool.end(this.editor, ctx, last_point.x, last_point.y)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// re-set brush to current settings
|
||||||
|
this.editor.setBrush(this.editor.layers.drawing)
|
||||||
|
|
||||||
|
this.rewind_index = new_rewind_index
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
class ImageEditor {
|
||||||
|
constructor(popup, inpainter = false) {
|
||||||
|
this.inpainter = inpainter
|
||||||
|
this.popup = popup
|
||||||
|
this.history = new EditorHistory(this)
|
||||||
|
if (inpainter) {
|
||||||
|
this.popup.classList.add("inpainter")
|
||||||
|
}
|
||||||
|
this.drawing = false
|
||||||
|
this.temp_previous_tool = null // used for the ctrl-colorpicker functionality
|
||||||
|
this.container = popup.querySelector(".editor-controls-center > div")
|
||||||
|
this.layers = {}
|
||||||
|
var layer_names = [
|
||||||
|
"background",
|
||||||
|
"drawing",
|
||||||
|
"overlay"
|
||||||
|
]
|
||||||
|
layer_names.forEach(name => {
|
||||||
|
let canvas = document.createElement("canvas")
|
||||||
|
canvas.className = `editor-canvas-${name}`
|
||||||
|
this.container.appendChild(canvas)
|
||||||
|
this.layers[name] = {
|
||||||
|
name: name,
|
||||||
|
canvas: canvas,
|
||||||
|
ctx: canvas.getContext("2d")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// add mouse handlers
|
||||||
|
this.container.addEventListener("mousedown", this.mouseHandler.bind(this))
|
||||||
|
this.container.addEventListener("mouseup", this.mouseHandler.bind(this))
|
||||||
|
this.container.addEventListener("mousemove", this.mouseHandler.bind(this))
|
||||||
|
this.container.addEventListener("mouseout", this.mouseHandler.bind(this))
|
||||||
|
this.container.addEventListener("mouseenter", this.mouseHandler.bind(this))
|
||||||
|
|
||||||
|
this.container.addEventListener("touchstart", this.mouseHandler.bind(this))
|
||||||
|
this.container.addEventListener("touchmove", this.mouseHandler.bind(this))
|
||||||
|
this.container.addEventListener("touchcancel", this.mouseHandler.bind(this))
|
||||||
|
this.container.addEventListener("touchend", this.mouseHandler.bind(this))
|
||||||
|
|
||||||
|
// initialize editor controls
|
||||||
|
this.options = {}
|
||||||
|
this.optionElements = {}
|
||||||
|
IMAGE_EDITOR_SECTIONS.forEach(section => {
|
||||||
|
section.id = `image_editor_${section.name}`
|
||||||
|
var sectionElement = document.createElement("div")
|
||||||
|
sectionElement.className = section.id
|
||||||
|
|
||||||
|
var title = document.createElement("h4")
|
||||||
|
title.innerText = section.title
|
||||||
|
sectionElement.appendChild(title)
|
||||||
|
|
||||||
|
var optionsContainer = document.createElement("div")
|
||||||
|
optionsContainer.classList.add("editor-options-container")
|
||||||
|
|
||||||
|
this.optionElements[section.name] = []
|
||||||
|
section.options.forEach((option, index) => {
|
||||||
|
var optionHolder = document.createElement("div")
|
||||||
|
var optionElement = document.createElement("div")
|
||||||
|
optionHolder.appendChild(optionElement)
|
||||||
|
section.initElement(optionElement, option)
|
||||||
|
optionElement.addEventListener("click", target => this.selectOption(section.name, index))
|
||||||
|
optionsContainer.appendChild(optionHolder)
|
||||||
|
this.optionElements[section.name].push(optionElement)
|
||||||
|
})
|
||||||
|
this.selectOption(section.name, section.options.indexOf(section.default))
|
||||||
|
|
||||||
|
sectionElement.appendChild(optionsContainer)
|
||||||
|
|
||||||
|
this.popup.querySelector(".editor-controls-left").appendChild(sectionElement)
|
||||||
|
})
|
||||||
|
|
||||||
|
this.custom_color_input = this.popup.querySelector(`input[type="color"]`)
|
||||||
|
this.custom_color_input.addEventListener("change", () => {
|
||||||
|
this.custom_color_input.parentElement.style.background = this.custom_color_input.value
|
||||||
|
this.selectOption("color", 0)
|
||||||
|
})
|
||||||
|
|
||||||
|
if (this.inpainter) {
|
||||||
|
this.selectOption("color", IMAGE_EDITOR_SECTIONS.find(s => s.name == "color").options.indexOf("#ffffff"))
|
||||||
|
this.selectOption("opacity", IMAGE_EDITOR_SECTIONS.find(s => s.name == "opacity").options.indexOf(0.4))
|
||||||
|
}
|
||||||
|
|
||||||
|
// initialize the right-side controls
|
||||||
|
var buttonContainer = document.createElement("div")
|
||||||
|
IMAGE_EDITOR_BUTTONS.forEach(button => {
|
||||||
|
var element = document.createElement("div")
|
||||||
|
var icon = document.createElement("i")
|
||||||
|
element.className = "image-editor-button button"
|
||||||
|
icon.className = button.icon
|
||||||
|
element.appendChild(icon)
|
||||||
|
element.append(button.name)
|
||||||
|
buttonContainer.appendChild(element)
|
||||||
|
element.addEventListener("click", event => button.handler(this))
|
||||||
|
})
|
||||||
|
var actionsContainer = document.createElement("div")
|
||||||
|
var actionsTitle = document.createElement("h4")
|
||||||
|
actionsTitle.textContent = "Actions"
|
||||||
|
actionsContainer.appendChild(actionsTitle);
|
||||||
|
IMAGE_EDITOR_ACTIONS.forEach(action => {
|
||||||
|
var element = document.createElement("div")
|
||||||
|
var icon = document.createElement("i")
|
||||||
|
element.className = "image-editor-button button"
|
||||||
|
icon.className = action.icon
|
||||||
|
element.appendChild(icon)
|
||||||
|
element.append(action.name)
|
||||||
|
actionsContainer.appendChild(element)
|
||||||
|
element.addEventListener("click", event => this.runAction(action.id))
|
||||||
|
})
|
||||||
|
this.popup.querySelector(".editor-controls-right").appendChild(actionsContainer)
|
||||||
|
this.popup.querySelector(".editor-controls-right").appendChild(buttonContainer)
|
||||||
|
|
||||||
|
this.keyHandlerBound = this.keyHandler.bind(this)
|
||||||
|
|
||||||
|
this.setSize(512, 512)
|
||||||
|
}
|
||||||
|
show() {
|
||||||
|
this.popup.classList.add("active")
|
||||||
|
document.addEventListener("keydown", this.keyHandlerBound)
|
||||||
|
document.addEventListener("keyup", this.keyHandlerBound)
|
||||||
|
}
|
||||||
|
hide() {
|
||||||
|
this.popup.classList.remove("active")
|
||||||
|
document.removeEventListener("keydown", this.keyHandlerBound)
|
||||||
|
document.removeEventListener("keyup", this.keyHandlerBound)
|
||||||
|
}
|
||||||
|
setSize(width, height) {
|
||||||
|
if (width == this.width && height == this.height) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if (width > height) {
|
||||||
|
var max_size = Math.min(parseInt(window.innerWidth * 0.9), width, 768)
|
||||||
|
var multiplier = max_size / width
|
||||||
|
width = (multiplier * width).toFixed()
|
||||||
|
height = (multiplier * height).toFixed()
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
var max_size = Math.min(parseInt(window.innerHeight * 0.9), height, 768)
|
||||||
|
var multiplier = max_size / height
|
||||||
|
width = (multiplier * width).toFixed()
|
||||||
|
height = (multiplier * height).toFixed()
|
||||||
|
}
|
||||||
|
this.width = parseInt(width)
|
||||||
|
this.height = parseInt(height)
|
||||||
|
|
||||||
|
this.container.style.width = width + "px"
|
||||||
|
this.container.style.height = height + "px"
|
||||||
|
|
||||||
|
Object.values(this.layers).forEach(layer => {
|
||||||
|
layer.canvas.width = width
|
||||||
|
layer.canvas.height = height
|
||||||
|
})
|
||||||
|
|
||||||
|
if (this.inpainter) {
|
||||||
|
this.saveImage() // We've reset the size of the image so inpainting is different
|
||||||
|
}
|
||||||
|
this.setBrush()
|
||||||
|
this.history.clear()
|
||||||
|
}
|
||||||
|
get tool() {
|
||||||
|
var tool_id = this.getOptionValue("tool")
|
||||||
|
return IMAGE_EDITOR_TOOLS.find(t => t.id == tool_id);
|
||||||
|
}
|
||||||
|
loadTool() {
|
||||||
|
this.drawing = false
|
||||||
|
this.container.style.cursor = this.tool.cursor;
|
||||||
|
}
|
||||||
|
setImage(url, width, height) {
|
||||||
|
this.setSize(width, height)
|
||||||
|
this.layers.background.ctx.clearRect(0, 0, this.width, this.height)
|
||||||
|
if (!(url && this.inpainter)) {
|
||||||
|
this.layers.drawing.ctx.clearRect(0, 0, this.width, this.height)
|
||||||
|
}
|
||||||
|
if (url) {
|
||||||
|
var image = new Image()
|
||||||
|
image.onload = () => {
|
||||||
|
this.layers.background.ctx.drawImage(image, 0, 0, this.width, this.height)
|
||||||
|
}
|
||||||
|
image.src = url
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
this.layers.background.ctx.fillStyle = "#ffffff"
|
||||||
|
this.layers.background.ctx.beginPath()
|
||||||
|
this.layers.background.ctx.rect(0, 0, this.width, this.height)
|
||||||
|
this.layers.background.ctx.fill()
|
||||||
|
}
|
||||||
|
this.history.clear()
|
||||||
|
}
|
||||||
|
saveImage() {
|
||||||
|
if (!this.inpainter) {
|
||||||
|
// This is not an inpainter, so save the image as the new img2img input
|
||||||
|
this.layers.background.ctx.drawImage(this.layers.drawing.canvas, 0, 0, this.width, this.height)
|
||||||
|
var base64 = this.layers.background.canvas.toDataURL()
|
||||||
|
initImagePreview.src = base64 // this will trigger the rest of the app to use it
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
// This is an inpainter, so make sure the toggle is set accordingly
|
||||||
|
var is_blank = !this.layers.drawing.ctx
|
||||||
|
.getImageData(0, 0, this.width, this.height).data
|
||||||
|
.some(channel => channel !== 0)
|
||||||
|
maskSetting.checked = !is_blank
|
||||||
|
}
|
||||||
|
this.hide()
|
||||||
|
}
|
||||||
|
getImg() { // a drop-in replacement of the drawingboard version
|
||||||
|
return this.layers.drawing.canvas.toDataURL()
|
||||||
|
}
|
||||||
|
setImg(dataUrl) { // a drop-in replacement of the drawingboard version
|
||||||
|
var image = new Image()
|
||||||
|
image.onload = () => {
|
||||||
|
var ctx = this.layers.drawing.ctx;
|
||||||
|
ctx.clearRect(0, 0, this.width, this.height)
|
||||||
|
ctx.globalCompositeOperation = "source-over"
|
||||||
|
ctx.globalAlpha = 1
|
||||||
|
ctx.filter = "none"
|
||||||
|
ctx.drawImage(image, 0, 0, this.width, this.height)
|
||||||
|
this.setBrush(this.layers.drawing)
|
||||||
|
}
|
||||||
|
image.src = dataUrl
|
||||||
|
}
|
||||||
|
runAction(action_id) {
|
||||||
|
var action = IMAGE_EDITOR_ACTIONS.find(a => a.id == action_id)
|
||||||
|
if (action.trackHistory) {
|
||||||
|
this.history.pushAction(action_id)
|
||||||
|
}
|
||||||
|
action.handler(this)
|
||||||
|
}
|
||||||
|
setBrush(layer = null, options = null) {
|
||||||
|
if (options == null) {
|
||||||
|
options = this.options
|
||||||
|
}
|
||||||
|
if (layer) {
|
||||||
|
layer.ctx.lineCap = "round"
|
||||||
|
layer.ctx.lineJoin = "round"
|
||||||
|
layer.ctx.lineWidth = options.brush_size
|
||||||
|
layer.ctx.fillStyle = options.color
|
||||||
|
layer.ctx.strokeStyle = options.color
|
||||||
|
var sharpness = parseInt(options.sharpness * options.brush_size)
|
||||||
|
layer.ctx.filter = sharpness == 0 ? `none` : `blur(${sharpness}px)`
|
||||||
|
layer.ctx.globalAlpha = (1 - options.opacity)
|
||||||
|
layer.ctx.globalCompositeOperation = "source-over"
|
||||||
|
var tool = IMAGE_EDITOR_TOOLS.find(t => t.id == options.tool)
|
||||||
|
if (tool && tool.setBrush) {
|
||||||
|
tool.setBrush(editor, layer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
Object.values([ "drawing", "overlay" ]).map(name => this.layers[name]).forEach(l => {
|
||||||
|
this.setBrush(l)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
get ctx_overlay() {
|
||||||
|
return this.layers.overlay.ctx
|
||||||
|
}
|
||||||
|
get ctx_current() { // the idea is this will help support having custom layers and editing each one
|
||||||
|
return this.layers.drawing.ctx
|
||||||
|
}
|
||||||
|
get canvas_current() {
|
||||||
|
return this.layers.drawing.canvas
|
||||||
|
}
|
||||||
|
keyHandler(event) { // handles keybinds like ctrl+z, ctrl+y
|
||||||
|
if (!this.popup.classList.contains("active")) {
|
||||||
|
document.removeEventListener("keydown", this.keyHandlerBound)
|
||||||
|
document.removeEventListener("keyup", this.keyHandlerBound)
|
||||||
|
return // this catches if something else closes the window but doesnt properly unbind the key handler
|
||||||
|
}
|
||||||
|
|
||||||
|
// keybindings
|
||||||
|
if (event.type == "keydown") {
|
||||||
|
if ((event.key == "z" || event.key == "Z") && event.ctrlKey) {
|
||||||
|
if (!event.shiftKey) {
|
||||||
|
this.history.undo()
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
this.history.redo()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (event.key == "y" && event.ctrlKey) {
|
||||||
|
this.history.redo()
|
||||||
|
}
|
||||||
|
if (event.key === "Escape") {
|
||||||
|
this.hide()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// dropper ctrl holding handler stuff
|
||||||
|
var dropper_active = this.temp_previous_tool != null;
|
||||||
|
if (dropper_active && !event.ctrlKey) {
|
||||||
|
this.selectOption("tool", IMAGE_EDITOR_TOOLS.findIndex(t => t.id == this.temp_previous_tool))
|
||||||
|
this.temp_previous_tool = null
|
||||||
|
}
|
||||||
|
else if (!dropper_active && event.ctrlKey) {
|
||||||
|
this.temp_previous_tool = this.getOptionValue("tool")
|
||||||
|
this.selectOption("tool", IMAGE_EDITOR_TOOLS.findIndex(t => t.id == "colorpicker"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mouseHandler(event) {
|
||||||
|
var bbox = this.layers.overlay.canvas.getBoundingClientRect()
|
||||||
|
var x = (event.clientX || 0) - bbox.left
|
||||||
|
var y = (event.clientY || 0) - bbox.top
|
||||||
|
var type = event.type;
|
||||||
|
var touchmap = {
|
||||||
|
touchstart: "mousedown",
|
||||||
|
touchmove: "mousemove",
|
||||||
|
touchend: "mouseup",
|
||||||
|
touchcancel: "mouseup"
|
||||||
|
}
|
||||||
|
if (type in touchmap) {
|
||||||
|
type = touchmap[type]
|
||||||
|
if (event.touches && event.touches[0]) {
|
||||||
|
var touch = event.touches[0]
|
||||||
|
var x = (touch.clientX || 0) - bbox.left
|
||||||
|
var y = (touch.clientY || 0) - bbox.top
|
||||||
|
}
|
||||||
|
}
|
||||||
|
event.preventDefault()
|
||||||
|
// do drawing-related stuff
|
||||||
|
if (type == "mousedown" || (type == "mouseenter" && event.buttons == 1)) {
|
||||||
|
this.drawing = true
|
||||||
|
this.tool.begin(this, this.ctx_current, x, y)
|
||||||
|
this.tool.begin(this, this.ctx_overlay, x, y, true)
|
||||||
|
this.history.editBegin(x, y)
|
||||||
|
}
|
||||||
|
if (type == "mouseup" || type == "mousemove") {
|
||||||
|
if (this.drawing) {
|
||||||
|
if (x > 0 && y > 0) {
|
||||||
|
this.tool.move(this, this.ctx_current, x, y)
|
||||||
|
this.tool.move(this, this.ctx_overlay, x, y, true)
|
||||||
|
this.history.editMove(x, y)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (type == "mouseup" || type == "mouseout") {
|
||||||
|
if (this.drawing) {
|
||||||
|
this.drawing = false
|
||||||
|
this.tool.end(this, this.ctx_current, x, y)
|
||||||
|
this.tool.end(this, this.ctx_overlay, x, y, true)
|
||||||
|
this.history.editEnd(x, y)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
getOptionValue(section_name) {
|
||||||
|
var section = IMAGE_EDITOR_SECTIONS.find(s => s.name == section_name)
|
||||||
|
return this.options && section_name in this.options ? this.options[section_name] : section.default
|
||||||
|
}
|
||||||
|
selectOption(section_name, option_index) {
|
||||||
|
var section = IMAGE_EDITOR_SECTIONS.find(s => s.name == section_name)
|
||||||
|
var value = section.options[option_index]
|
||||||
|
this.options[section_name] = value == "custom" ? section.getCustom(this) : value
|
||||||
|
|
||||||
|
this.optionElements[section_name].forEach(element => element.classList.remove("active"))
|
||||||
|
this.optionElements[section_name][option_index].classList.add("active")
|
||||||
|
|
||||||
|
// change the editor
|
||||||
|
this.setBrush()
|
||||||
|
if (section.name == "tool") {
|
||||||
|
this.loadTool()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const imageEditor = new ImageEditor(document.getElementById("image-editor"))
|
||||||
|
const imageInpainter = new ImageEditor(document.getElementById("image-inpainter"), true)
|
||||||
|
|
||||||
|
imageEditor.setImage(null, 512, 512)
|
||||||
|
imageInpainter.setImage(null, 512, 512)
|
||||||
|
|
||||||
|
document.getElementById("init_image_button_draw").addEventListener("click", () => {
|
||||||
|
imageEditor.show()
|
||||||
|
})
|
||||||
|
document.getElementById("init_image_button_inpaint").addEventListener("click", () => {
|
||||||
|
imageInpainter.show()
|
||||||
|
})
|
||||||
|
|
||||||
|
img2imgUnload() // no init image when the app starts
|
||||||
|
|
||||||
|
|
||||||
|
function rgbToHex(rgb) {
|
||||||
|
function componentToHex(c) {
|
||||||
|
var hex = parseInt(c).toString(16)
|
||||||
|
return hex.length == 1 ? "0" + hex : hex
|
||||||
|
}
|
||||||
|
return "#" + componentToHex(rgb.r) + componentToHex(rgb.g) + componentToHex(rgb.b)
|
||||||
|
}
|
||||||
|
|
||||||
|
function hexToRgb(hex) {
|
||||||
|
var result = /^#?([a-f\d]{2})([a-f\d]{2})([a-f\d]{2})$/i.exec(hex);
|
||||||
|
return result ? {
|
||||||
|
r: parseInt(result[1], 16),
|
||||||
|
g: parseInt(result[2], 16),
|
||||||
|
b: parseInt(result[3], 16)
|
||||||
|
} : null;
|
||||||
|
}
|
||||||
|
|
||||||
|
function pixelCompare(int1, int2) {
|
||||||
|
return Math.abs(int1 - int2) < 4
|
||||||
|
}
|
||||||
|
|
||||||
|
// adapted from https://ben.akrin.com/canvas_fill/fill_04.html
|
||||||
|
function flood_fill(editor, the_canvas_context, x, y, color) {
|
||||||
|
pixel_stack = [{x:x, y:y}] ;
|
||||||
|
pixels = the_canvas_context.getImageData( 0, 0, editor.width, editor.height ) ;
|
||||||
|
var linear_cords = ( y * editor.width + x ) * 4 ;
|
||||||
|
var original_color = {r:pixels.data[linear_cords],
|
||||||
|
g:pixels.data[linear_cords+1],
|
||||||
|
b:pixels.data[linear_cords+2],
|
||||||
|
a:pixels.data[linear_cords+3]} ;
|
||||||
|
|
||||||
|
var opacity = color.a / 255;
|
||||||
|
var new_color = {
|
||||||
|
r: parseInt((color.r * opacity) + (original_color.r * (1 - opacity))),
|
||||||
|
g: parseInt((color.g * opacity) + (original_color.g * (1 - opacity))),
|
||||||
|
b: parseInt((color.b * opacity) + (original_color.b * (1 - opacity)))
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((pixelCompare(new_color.r, original_color.r) &&
|
||||||
|
pixelCompare(new_color.g, original_color.g) &&
|
||||||
|
pixelCompare(new_color.b, original_color.b)))
|
||||||
|
{
|
||||||
|
return; // This color is already the color we want, so do nothing
|
||||||
|
}
|
||||||
|
var max_stack_size = editor.width * editor.height;
|
||||||
|
while( pixel_stack.length > 0 && pixel_stack.length < max_stack_size ) {
|
||||||
|
new_pixel = pixel_stack.shift() ;
|
||||||
|
x = new_pixel.x ;
|
||||||
|
y = new_pixel.y ;
|
||||||
|
|
||||||
|
linear_cords = ( y * editor.width + x ) * 4 ;
|
||||||
|
while( y-->=0 &&
|
||||||
|
(pixelCompare(pixels.data[linear_cords], original_color.r) &&
|
||||||
|
pixelCompare(pixels.data[linear_cords+1], original_color.g) &&
|
||||||
|
pixelCompare(pixels.data[linear_cords+2], original_color.b))) {
|
||||||
|
linear_cords -= editor.width * 4 ;
|
||||||
|
}
|
||||||
|
linear_cords += editor.width * 4 ;
|
||||||
|
y++ ;
|
||||||
|
|
||||||
|
var reached_left = false ;
|
||||||
|
var reached_right = false ;
|
||||||
|
while( y++<editor.height &&
|
||||||
|
(pixelCompare(pixels.data[linear_cords], original_color.r) &&
|
||||||
|
pixelCompare(pixels.data[linear_cords+1], original_color.g) &&
|
||||||
|
pixelCompare(pixels.data[linear_cords+2], original_color.b))) {
|
||||||
|
pixels.data[linear_cords] = new_color.r ;
|
||||||
|
pixels.data[linear_cords+1] = new_color.g ;
|
||||||
|
pixels.data[linear_cords+2] = new_color.b ;
|
||||||
|
pixels.data[linear_cords+3] = 255 ;
|
||||||
|
|
||||||
|
if( x>0 ) {
|
||||||
|
if( pixelCompare(pixels.data[linear_cords-4], original_color.r) &&
|
||||||
|
pixelCompare(pixels.data[linear_cords-4+1], original_color.g) &&
|
||||||
|
pixelCompare(pixels.data[linear_cords-4+2], original_color.b)) {
|
||||||
|
if( !reached_left ) {
|
||||||
|
pixel_stack.push( {x:x-1, y:y} ) ;
|
||||||
|
reached_left = true ;
|
||||||
|
}
|
||||||
|
} else if( reached_left ) {
|
||||||
|
reached_left = false ;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if( x<editor.width-1 ) {
|
||||||
|
if( pixelCompare(pixels.data[linear_cords+4], original_color.r) &&
|
||||||
|
pixelCompare(pixels.data[linear_cords+4+1], original_color.g) &&
|
||||||
|
pixelCompare(pixels.data[linear_cords+4+2], original_color.b)) {
|
||||||
|
if( !reached_right ) {
|
||||||
|
pixel_stack.push( {x:x+1,y:y} ) ;
|
||||||
|
reached_right = true ;
|
||||||
|
}
|
||||||
|
} else if( reached_right ) {
|
||||||
|
reached_right = false ;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
linear_cords += editor.width * 4 ;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
the_canvas_context.putImageData( pixels, 0, 0 ) ;
|
||||||
|
}
|
||||||
@@ -16,7 +16,7 @@ const modifierThumbnailPath = 'media/modifier-thumbnails'
|
|||||||
const activeCardClass = 'modifier-card-active'
|
const activeCardClass = 'modifier-card-active'
|
||||||
const CUSTOM_MODIFIERS_KEY = "customModifiers"
|
const CUSTOM_MODIFIERS_KEY = "customModifiers"
|
||||||
|
|
||||||
function createModifierCard(name, previews) {
|
function createModifierCard(name, previews, removeBy) {
|
||||||
const modifierCard = document.createElement('div')
|
const modifierCard = document.createElement('div')
|
||||||
modifierCard.className = 'modifier-card'
|
modifierCard.className = 'modifier-card'
|
||||||
modifierCard.innerHTML = `
|
modifierCard.innerHTML = `
|
||||||
@@ -44,10 +44,10 @@ function createModifierCard(name, previews) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const maxLabelLength = 30
|
const maxLabelLength = 30
|
||||||
const nameWithoutBy = name.replace('by ', '')
|
const cardLabel = removeBy ? name.replace('by ', '') : name
|
||||||
|
|
||||||
if(nameWithoutBy.length <= maxLabelLength) {
|
if(cardLabel.length <= maxLabelLength) {
|
||||||
label.querySelector('p').innerText = nameWithoutBy
|
label.querySelector('p').innerText = cardLabel
|
||||||
} else {
|
} else {
|
||||||
const tooltipText = document.createElement('span')
|
const tooltipText = document.createElement('span')
|
||||||
tooltipText.className = 'tooltip-text'
|
tooltipText.className = 'tooltip-text'
|
||||||
@@ -56,13 +56,14 @@ function createModifierCard(name, previews) {
|
|||||||
label.classList.add('tooltip')
|
label.classList.add('tooltip')
|
||||||
label.appendChild(tooltipText)
|
label.appendChild(tooltipText)
|
||||||
|
|
||||||
label.querySelector('p').innerText = nameWithoutBy.substring(0, maxLabelLength) + '...'
|
label.querySelector('p').innerText = cardLabel.substring(0, maxLabelLength) + '...'
|
||||||
}
|
}
|
||||||
|
label.querySelector('p').dataset.fullName = name // preserve the full name
|
||||||
|
|
||||||
return modifierCard
|
return modifierCard
|
||||||
}
|
}
|
||||||
|
|
||||||
function createModifierGroup(modifierGroup, initiallyExpanded) {
|
function createModifierGroup(modifierGroup, initiallyExpanded, removeBy) {
|
||||||
const title = modifierGroup.category
|
const title = modifierGroup.category
|
||||||
const modifiers = modifierGroup.modifiers
|
const modifiers = modifierGroup.modifiers
|
||||||
|
|
||||||
@@ -79,20 +80,19 @@ function createModifierGroup(modifierGroup, initiallyExpanded) {
|
|||||||
|
|
||||||
modifiers.forEach(modObj => {
|
modifiers.forEach(modObj => {
|
||||||
const modifierName = modObj.modifier
|
const modifierName = modObj.modifier
|
||||||
const modifierPreviews = modObj?.previews?.map(preview => `${modifierThumbnailPath}/${preview.path}`)
|
const modifierPreviews = modObj?.previews?.map(preview => `${IMAGE_REGEX.test(preview.image) ? preview.image : modifierThumbnailPath + '/' + preview.path}`)
|
||||||
|
|
||||||
const modifierCard = createModifierCard(modifierName, modifierPreviews)
|
const modifierCard = createModifierCard(modifierName, modifierPreviews, removeBy)
|
||||||
|
|
||||||
if(typeof modifierCard == 'object') {
|
if(typeof modifierCard == 'object') {
|
||||||
modifiersEl.appendChild(modifierCard)
|
modifiersEl.appendChild(modifierCard)
|
||||||
|
const trimmedName = trimModifiers(modifierName)
|
||||||
|
|
||||||
modifierCard.addEventListener('click', () => {
|
modifierCard.addEventListener('click', () => {
|
||||||
if (activeTags.map(x => x.name).includes(modifierName)) {
|
if (activeTags.map(x => trimModifiers(x.name)).includes(trimmedName)) {
|
||||||
// remove modifier from active array
|
// remove modifier from active array
|
||||||
activeTags = activeTags.filter(x => x.name != modifierName)
|
activeTags = activeTags.filter(x => trimModifiers(x.name) != trimmedName)
|
||||||
modifierCard.classList.remove(activeCardClass)
|
toggleCardState(trimmedName, false)
|
||||||
|
|
||||||
modifierCard.querySelector('.modifier-card-image-overlay').innerText = '+'
|
|
||||||
} else {
|
} else {
|
||||||
// add modifier to active array
|
// add modifier to active array
|
||||||
activeTags.push({
|
activeTags.push({
|
||||||
@@ -101,13 +101,11 @@ function createModifierGroup(modifierGroup, initiallyExpanded) {
|
|||||||
'originElement': modifierCard,
|
'originElement': modifierCard,
|
||||||
'previews': modifierPreviews
|
'previews': modifierPreviews
|
||||||
})
|
})
|
||||||
|
toggleCardState(trimmedName, true)
|
||||||
modifierCard.classList.add(activeCardClass)
|
|
||||||
|
|
||||||
modifierCard.querySelector('.modifier-card-image-overlay').innerText = '-'
|
|
||||||
}
|
}
|
||||||
|
|
||||||
refreshTagsList()
|
refreshTagsList()
|
||||||
|
document.dispatchEvent(new Event('refreshImageModifiers'))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@@ -117,6 +115,7 @@ function createModifierGroup(modifierGroup, initiallyExpanded) {
|
|||||||
modifiersEl.appendChild(brk)
|
modifiersEl.appendChild(brk)
|
||||||
|
|
||||||
let e = document.createElement('div')
|
let e = document.createElement('div')
|
||||||
|
e.className = 'modifier-category'
|
||||||
e.appendChild(titleEl)
|
e.appendChild(titleEl)
|
||||||
e.appendChild(modifiersEl)
|
e.appendChild(modifiersEl)
|
||||||
|
|
||||||
@@ -125,6 +124,10 @@ function createModifierGroup(modifierGroup, initiallyExpanded) {
|
|||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function trimModifiers(tag) {
|
||||||
|
return tag.replace(/^\(+|\)+$/g, '').replace(/^\[+|\]+$/g, '')
|
||||||
|
}
|
||||||
|
|
||||||
async function loadModifiers() {
|
async function loadModifiers() {
|
||||||
try {
|
try {
|
||||||
let res = await fetch('/get/modifiers')
|
let res = await fetch('/get/modifiers')
|
||||||
@@ -136,7 +139,7 @@ async function loadModifiers() {
|
|||||||
res.reverse()
|
res.reverse()
|
||||||
|
|
||||||
res.forEach((modifierGroup, idx) => {
|
res.forEach((modifierGroup, idx) => {
|
||||||
createModifierGroup(modifierGroup, idx === res.length - 1)
|
createModifierGroup(modifierGroup, idx === res.length - 1, modifierGroup === 'Artist' ? true : false) // only remove "By " for artists
|
||||||
})
|
})
|
||||||
|
|
||||||
createCollapsibles(editorModifierEntries)
|
createCollapsibles(editorModifierEntries)
|
||||||
@@ -146,6 +149,84 @@ async function loadModifiers() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
loadCustomModifiers()
|
loadCustomModifiers()
|
||||||
|
document.dispatchEvent(new Event('loadImageModifiers'))
|
||||||
|
}
|
||||||
|
|
||||||
|
function refreshModifiersState(newTags) {
|
||||||
|
// clear existing modifiers
|
||||||
|
document.querySelector('#editor-modifiers').querySelectorAll('.modifier-card').forEach(modifierCard => {
|
||||||
|
const modifierName = modifierCard.querySelector('.modifier-card-label p').dataset.fullName // pick the full modifier name
|
||||||
|
if (activeTags.map(x => x.name).includes(modifierName)) {
|
||||||
|
modifierCard.classList.remove(activeCardClass)
|
||||||
|
modifierCard.querySelector('.modifier-card-image-overlay').innerText = '+'
|
||||||
|
}
|
||||||
|
})
|
||||||
|
activeTags = []
|
||||||
|
|
||||||
|
// set new modifiers
|
||||||
|
newTags.forEach(tag => {
|
||||||
|
let found = false
|
||||||
|
document.querySelector('#editor-modifiers').querySelectorAll('.modifier-card').forEach(modifierCard => {
|
||||||
|
const modifierName = modifierCard.querySelector('.modifier-card-label p').dataset.fullName
|
||||||
|
const shortModifierName = modifierCard.querySelector('.modifier-card-label p').innerText
|
||||||
|
if (trimModifiers(tag) == trimModifiers(modifierName)) {
|
||||||
|
// add modifier to active array
|
||||||
|
if (!activeTags.map(x => x.name).includes(tag)) { // only add each tag once even if several custom modifier cards share the same tag
|
||||||
|
const imageModifierCard = modifierCard.cloneNode(true)
|
||||||
|
imageModifierCard.querySelector('.modifier-card-label p').innerText = shortModifierName
|
||||||
|
activeTags.push({
|
||||||
|
'name': modifierName,
|
||||||
|
'element': imageModifierCard,
|
||||||
|
'originElement': modifierCard
|
||||||
|
})
|
||||||
|
}
|
||||||
|
modifierCard.classList.add(activeCardClass)
|
||||||
|
modifierCard.querySelector('.modifier-card-image-overlay').innerText = '-'
|
||||||
|
found = true
|
||||||
|
}
|
||||||
|
})
|
||||||
|
if (found == false) { // custom tag went missing, create one here
|
||||||
|
let modifierCard = createModifierCard(tag, undefined, false) // create a modifier card for the missing tag, no image
|
||||||
|
|
||||||
|
modifierCard.addEventListener('click', () => {
|
||||||
|
if (activeTags.map(x => x.name).includes(tag)) {
|
||||||
|
// remove modifier from active array
|
||||||
|
activeTags = activeTags.filter(x => x.name != tag)
|
||||||
|
modifierCard.classList.remove(activeCardClass)
|
||||||
|
|
||||||
|
modifierCard.querySelector('.modifier-card-image-overlay').innerText = '+'
|
||||||
|
}
|
||||||
|
refreshTagsList()
|
||||||
|
})
|
||||||
|
|
||||||
|
activeTags.push({
|
||||||
|
'name': tag,
|
||||||
|
'element': modifierCard,
|
||||||
|
'originElement': undefined // no origin element for missing tags
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
refreshTagsList()
|
||||||
|
}
|
||||||
|
|
||||||
|
function refreshInactiveTags(inactiveTags) {
|
||||||
|
// update inactive tags
|
||||||
|
if (inactiveTags !== undefined && inactiveTags.length > 0) {
|
||||||
|
activeTags.forEach (tag => {
|
||||||
|
if (inactiveTags.find(element => element === tag.name) !== undefined) {
|
||||||
|
tag.inactive = true
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// update cards
|
||||||
|
let overlays = document.querySelector('#editor-inputs-tags-list').querySelectorAll('.modifier-card-overlay')
|
||||||
|
overlays.forEach (i => {
|
||||||
|
let modifierName = i.parentElement.getElementsByClassName('modifier-card-label')[0].getElementsByTagName("p")[0].innerText
|
||||||
|
if (inactiveTags.find(element => element === modifierName) !== undefined) {
|
||||||
|
i.parentElement.classList.add('modifier-toggle-inactive')
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
function refreshTagsList() {
|
function refreshTagsList() {
|
||||||
@@ -165,15 +246,15 @@ function refreshTagsList() {
|
|||||||
editorModifierTagsList.appendChild(tag.element)
|
editorModifierTagsList.appendChild(tag.element)
|
||||||
|
|
||||||
tag.element.addEventListener('click', () => {
|
tag.element.addEventListener('click', () => {
|
||||||
let idx = activeTags.indexOf(tag)
|
let idx = activeTags.findIndex(o => { return o.name === tag.name })
|
||||||
|
|
||||||
if (idx !== -1) {
|
if (idx !== -1) {
|
||||||
activeTags[idx].originElement.classList.remove(activeCardClass)
|
toggleCardState(activeTags[idx].name, false)
|
||||||
activeTags[idx].originElement.querySelector('.modifier-card-image-overlay').innerText = '+'
|
|
||||||
|
|
||||||
activeTags.splice(idx, 1)
|
activeTags.splice(idx, 1)
|
||||||
refreshTagsList()
|
refreshTagsList()
|
||||||
}
|
}
|
||||||
|
document.dispatchEvent(new Event('refreshImageModifiers'))
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -182,6 +263,23 @@ function refreshTagsList() {
|
|||||||
editorModifierTagsList.appendChild(brk)
|
editorModifierTagsList.appendChild(brk)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function toggleCardState(modifierName, makeActive) {
|
||||||
|
document.querySelector('#editor-modifiers').querySelectorAll('.modifier-card').forEach(card => {
|
||||||
|
const name = card.querySelector('.modifier-card-label').innerText
|
||||||
|
if ( trimModifiers(modifierName) == trimModifiers(name)
|
||||||
|
|| trimModifiers(modifierName) == 'by ' + trimModifiers(name)) {
|
||||||
|
if(makeActive) {
|
||||||
|
card.classList.add(activeCardClass)
|
||||||
|
card.querySelector('.modifier-card-image-overlay').innerText = '-'
|
||||||
|
}
|
||||||
|
else{
|
||||||
|
card.classList.remove(activeCardClass)
|
||||||
|
card.querySelector('.modifier-card-image-overlay').innerText = '+'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
function changePreviewImages(val) {
|
function changePreviewImages(val) {
|
||||||
const previewImages = document.querySelectorAll('.modifier-card-image-container img')
|
const previewImages = document.querySelectorAll('.modifier-card-image-container img')
|
||||||
|
|
||||||
@@ -256,31 +354,7 @@ function saveCustomModifiers() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function loadCustomModifiers() {
|
function loadCustomModifiers() {
|
||||||
let customModifiers = localStorage.getItem(CUSTOM_MODIFIERS_KEY, '')
|
PLUGINS['MODIFIERS_LOAD'].forEach(fn=>fn.loader.call())
|
||||||
customModifiersTextBox.value = customModifiers
|
|
||||||
|
|
||||||
if (customModifiersGroupElement !== undefined) {
|
|
||||||
customModifiersGroupElement.remove()
|
|
||||||
}
|
|
||||||
|
|
||||||
if (customModifiers && customModifiers.trim() !== '') {
|
|
||||||
customModifiers = customModifiers.split('\n')
|
|
||||||
customModifiers = customModifiers.filter(m => m.trim() !== '')
|
|
||||||
customModifiers = customModifiers.map(function(m) {
|
|
||||||
return {
|
|
||||||
"modifier": m
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
let customGroup = {
|
|
||||||
'category': 'Custom Modifiers',
|
|
||||||
'modifiers': customModifiers
|
|
||||||
}
|
|
||||||
|
|
||||||
customModifiersGroupElement = createModifierGroup(customGroup, true)
|
|
||||||
|
|
||||||
createCollapsibles(customModifiersGroupElement)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
customModifiersTextBox.addEventListener('change', saveCustomModifiers)
|
customModifiersTextBox.addEventListener('change', saveCustomModifiers)
|
||||||
|
|||||||
@@ -1,41 +0,0 @@
|
|||||||
const INPAINTING_EDITOR_SIZE = 450
|
|
||||||
|
|
||||||
let inpaintingEditorContainer = document.querySelector('#inpaintingEditor')
|
|
||||||
let inpaintingEditor = new DrawingBoard.Board('inpaintingEditor', {
|
|
||||||
color: "#ffffff",
|
|
||||||
background: false,
|
|
||||||
size: 30,
|
|
||||||
webStorage: false,
|
|
||||||
controls: [{'DrawingMode': {'filler': false}}, 'Size', 'Navigation']
|
|
||||||
})
|
|
||||||
let inpaintingEditorCanvasBackground = document.querySelector('.drawing-board-canvas-wrapper')
|
|
||||||
|
|
||||||
function resizeInpaintingEditor(widthValue, heightValue) {
|
|
||||||
if (widthValue === heightValue) {
|
|
||||||
widthValue = INPAINTING_EDITOR_SIZE
|
|
||||||
heightValue = INPAINTING_EDITOR_SIZE
|
|
||||||
} else if (widthValue > heightValue) {
|
|
||||||
heightValue = (heightValue / widthValue) * INPAINTING_EDITOR_SIZE
|
|
||||||
widthValue = INPAINTING_EDITOR_SIZE
|
|
||||||
} else {
|
|
||||||
widthValue = (widthValue / heightValue) * INPAINTING_EDITOR_SIZE
|
|
||||||
heightValue = INPAINTING_EDITOR_SIZE
|
|
||||||
}
|
|
||||||
if (inpaintingEditor.opts.aspectRatio === (widthValue / heightValue).toFixed(3)) {
|
|
||||||
// Same ratio, don't reset the canvas.
|
|
||||||
return
|
|
||||||
}
|
|
||||||
inpaintingEditor.opts.aspectRatio = (widthValue / heightValue).toFixed(3)
|
|
||||||
|
|
||||||
inpaintingEditorContainer.style.width = widthValue + 'px'
|
|
||||||
inpaintingEditorContainer.style.height = heightValue + 'px'
|
|
||||||
inpaintingEditor.opts.enlargeYourContainer = true
|
|
||||||
|
|
||||||
inpaintingEditor.opts.size = inpaintingEditor.ctx.lineWidth
|
|
||||||
inpaintingEditor.resize()
|
|
||||||
|
|
||||||
inpaintingEditor.ctx.lineCap = "round"
|
|
||||||
inpaintingEditor.ctx.lineJoin = "round"
|
|
||||||
inpaintingEditor.ctx.lineWidth = inpaintingEditor.opts.size
|
|
||||||
inpaintingEditor.setColor(inpaintingEditor.opts.color)
|
|
||||||
}
|
|
||||||
10
ui/media/js/jquery-confirm.min.js
vendored
Normal file
1452
ui/media/js/main.js
6
ui/media/js/marked.min.js
vendored
Normal file
@@ -7,6 +7,7 @@
|
|||||||
checkbox: "checkbox",
|
checkbox: "checkbox",
|
||||||
select: "select",
|
select: "select",
|
||||||
select_multiple: "select_multiple",
|
select_multiple: "select_multiple",
|
||||||
|
slider: "slider",
|
||||||
custom: "custom",
|
custom: "custom",
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -28,18 +29,21 @@ var PARAMETERS = [
|
|||||||
type: ParameterType.select,
|
type: ParameterType.select,
|
||||||
label: "Theme",
|
label: "Theme",
|
||||||
default: "theme-default",
|
default: "theme-default",
|
||||||
|
note: "customize the look and feel of the ui",
|
||||||
options: [ // Note: options expanded dynamically
|
options: [ // Note: options expanded dynamically
|
||||||
{
|
{
|
||||||
value: "theme-default",
|
value: "theme-default",
|
||||||
label: "Default"
|
label: "Default"
|
||||||
}
|
}
|
||||||
]
|
],
|
||||||
|
icon: "fa-palette"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: "save_to_disk",
|
id: "save_to_disk",
|
||||||
type: ParameterType.checkbox,
|
type: ParameterType.checkbox,
|
||||||
label: "Auto-Save Images",
|
label: "Auto-Save Images",
|
||||||
note: "automatically saves images to the specified location",
|
note: "automatically saves images to the specified location",
|
||||||
|
icon: "fa-download",
|
||||||
default: false,
|
default: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -50,32 +54,85 @@ var PARAMETERS = [
|
|||||||
return `<input id="${parameter.id}" name="${parameter.id}" size="30" disabled>`
|
return `<input id="${parameter.id}" name="${parameter.id}" size="30" disabled>`
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
id: "metadata_output_format",
|
||||||
|
type: ParameterType.select,
|
||||||
|
label: "Metadata format",
|
||||||
|
note: "will be saved to disk in this format",
|
||||||
|
default: "txt",
|
||||||
|
options: [
|
||||||
|
{
|
||||||
|
value: "none",
|
||||||
|
label: "none"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
value: "txt",
|
||||||
|
label: "txt"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
value: "json",
|
||||||
|
label: "json"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
value: "embed",
|
||||||
|
label: "embed"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "block_nsfw",
|
||||||
|
type: ParameterType.checkbox,
|
||||||
|
label: "Block NSFW images",
|
||||||
|
note: "blurs out NSFW images",
|
||||||
|
icon: "fa-land-mine-on",
|
||||||
|
default: false,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
id: "sound_toggle",
|
id: "sound_toggle",
|
||||||
type: ParameterType.checkbox,
|
type: ParameterType.checkbox,
|
||||||
label: "Enable Sound",
|
label: "Enable Sound",
|
||||||
note: "plays a sound on task completion",
|
note: "plays a sound on task completion",
|
||||||
|
icon: "fa-volume-low",
|
||||||
default: true,
|
default: true,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
id: "process_order_toggle",
|
||||||
|
type: ParameterType.checkbox,
|
||||||
|
label: "Process newest jobs first",
|
||||||
|
note: "reverse the normal processing order",
|
||||||
|
icon: "fa-arrow-down-short-wide",
|
||||||
|
default: false,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
id: "ui_open_browser_on_start",
|
id: "ui_open_browser_on_start",
|
||||||
type: ParameterType.checkbox,
|
type: ParameterType.checkbox,
|
||||||
label: "Open browser on startup",
|
label: "Open browser on startup",
|
||||||
note: "starts the default browser on startup",
|
note: "starts the default browser on startup",
|
||||||
|
icon: "fa-window-restore",
|
||||||
default: true,
|
default: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: "turbo",
|
id: "vram_usage_level",
|
||||||
type: ParameterType.checkbox,
|
type: ParameterType.select,
|
||||||
label: "Turbo Mode",
|
label: "GPU Memory Usage",
|
||||||
default: true,
|
note: "Faster performance requires more GPU memory (VRAM)<br/><br/>" +
|
||||||
note: "generates images faster, but uses an additional 1 GB of GPU memory",
|
"<b>Balanced:</b> nearly as fast as High, much lower VRAM usage<br/>" +
|
||||||
|
"<b>High:</b> fastest, maximum GPU memory usage</br>" +
|
||||||
|
"<b>Low:</b> slowest, recommended for GPUs with 3 to 4 GB memory",
|
||||||
|
icon: "fa-forward",
|
||||||
|
default: "balanced",
|
||||||
|
options: [
|
||||||
|
{value: "balanced", label: "Balanced"},
|
||||||
|
{value: "high", label: "High"},
|
||||||
|
{value: "low", label: "Low"}
|
||||||
|
],
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: "use_cpu",
|
id: "use_cpu",
|
||||||
type: ParameterType.checkbox,
|
type: ParameterType.checkbox,
|
||||||
label: "Use CPU (not GPU)",
|
label: "Use CPU (not GPU)",
|
||||||
note: "warning: this will be *very* slow",
|
note: "warning: this will be *very* slow",
|
||||||
|
icon: "fa-microchip",
|
||||||
default: false,
|
default: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -91,25 +148,46 @@ var PARAMETERS = [
|
|||||||
note: "to process in parallel",
|
note: "to process in parallel",
|
||||||
default: false,
|
default: false,
|
||||||
},
|
},
|
||||||
{
|
|
||||||
id: "use_full_precision",
|
|
||||||
type: ParameterType.checkbox,
|
|
||||||
label: "Use Full Precision",
|
|
||||||
note: "for GPU-only. warning: this will consume more VRAM",
|
|
||||||
default: false,
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
id: "auto_save_settings",
|
id: "auto_save_settings",
|
||||||
type: ParameterType.checkbox,
|
type: ParameterType.checkbox,
|
||||||
label: "Auto-Save Settings",
|
label: "Auto-Save Settings",
|
||||||
note: "restores settings on browser load",
|
note: "restores settings on browser load",
|
||||||
|
icon: "fa-gear",
|
||||||
default: true,
|
default: true,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
id: "confirm_dangerous_actions",
|
||||||
|
type: ParameterType.checkbox,
|
||||||
|
label: "Confirm dangerous actions",
|
||||||
|
note: "Actions that might lead to data loss must either be clicked with the shift key pressed, or confirmed in an 'Are you sure?' dialog",
|
||||||
|
icon: "fa-check-double",
|
||||||
|
default: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "listen_to_network",
|
||||||
|
type: ParameterType.checkbox,
|
||||||
|
label: "Make Stable Diffusion available on your network",
|
||||||
|
note: "Other devices on your network can access this web page",
|
||||||
|
icon: "fa-network-wired",
|
||||||
|
default: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "listen_port",
|
||||||
|
type: ParameterType.custom,
|
||||||
|
label: "Network port",
|
||||||
|
note: "Port that this server listens to. The '9000' part in 'http://localhost:9000'",
|
||||||
|
icon: "fa-anchor",
|
||||||
|
render: (parameter) => {
|
||||||
|
return `<input id="${parameter.id}" name="${parameter.id}" size="6" value="9000" onkeypress="preventNonNumericalInput(event)">`
|
||||||
|
}
|
||||||
|
},
|
||||||
{
|
{
|
||||||
id: "use_beta_channel",
|
id: "use_beta_channel",
|
||||||
type: ParameterType.checkbox,
|
type: ParameterType.checkbox,
|
||||||
label: "🔥Beta channel",
|
label: "Beta channel",
|
||||||
note: "Get the latest features immediately (but could be less stable). Please restart the program after changing this.",
|
note: "Get the latest features immediately (but could be less stable). Please restart the program after changing this.",
|
||||||
|
icon: "fa-fire",
|
||||||
default: false,
|
default: false,
|
||||||
},
|
},
|
||||||
];
|
];
|
||||||
@@ -122,6 +200,18 @@ function getParameterSettingsEntry(id) {
|
|||||||
return parameter[0].settingsEntry
|
return parameter[0].settingsEntry
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function sliderUpdate(event) {
|
||||||
|
if (event.srcElement.id.endsWith('-input')) {
|
||||||
|
let slider = document.getElementById(event.srcElement.id.slice(0,-6))
|
||||||
|
slider.value = event.srcElement.value
|
||||||
|
slider.dispatchEvent(new Event("change"))
|
||||||
|
} else {
|
||||||
|
let field = document.getElementById(event.srcElement.id+'-input')
|
||||||
|
field.value = event.srcElement.value
|
||||||
|
field.dispatchEvent(new Event("change"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
function getParameterElement(parameter) {
|
function getParameterElement(parameter) {
|
||||||
switch (parameter.type) {
|
switch (parameter.type) {
|
||||||
case ParameterType.checkbox:
|
case ParameterType.checkbox:
|
||||||
@@ -132,6 +222,8 @@ function getParameterElement(parameter) {
|
|||||||
var options = (parameter.options || []).map(option => `<option value="${option.value}">${option.label}</option>`).join("")
|
var options = (parameter.options || []).map(option => `<option value="${option.value}">${option.label}</option>`).join("")
|
||||||
var multiple = (parameter.type == ParameterType.select_multiple ? 'multiple' : '')
|
var multiple = (parameter.type == ParameterType.select_multiple ? 'multiple' : '')
|
||||||
return `<select id="${parameter.id}" name="${parameter.id}" ${multiple}>${options}</select>`
|
return `<select id="${parameter.id}" name="${parameter.id}" ${multiple}>${options}</select>`
|
||||||
|
case ParameterType.slider:
|
||||||
|
return `<input id="${parameter.id}" name="${parameter.id}" class="editor-slider" type="range" value="${parameter.default}" min="${parameter.slider_min}" max="${parameter.slider_max}" oninput="sliderUpdate(event)"> <input id="${parameter.id}-input" name="${parameter.id}-input" size="4" value="${parameter.default}" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)" oninput="sliderUpdate(event)"> ${parameter.slider_unit}`
|
||||||
case ParameterType.custom:
|
case ParameterType.custom:
|
||||||
return parameter.render(parameter)
|
return parameter.render(parameter)
|
||||||
default:
|
default:
|
||||||
@@ -140,16 +232,18 @@ function getParameterElement(parameter) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let parametersTable = document.querySelector("#system-settings table")
|
let parametersTable = document.querySelector("#system-settings .parameters-table")
|
||||||
/* fill in the system settings popup table */
|
/* fill in the system settings popup table */
|
||||||
function initParameters() {
|
function initParameters() {
|
||||||
PARAMETERS.forEach(parameter => {
|
PARAMETERS.forEach(parameter => {
|
||||||
var element = getParameterElement(parameter)
|
var element = getParameterElement(parameter)
|
||||||
var note = parameter.note ? `<small>${parameter.note}</small>` : "";
|
var note = parameter.note ? `<small>${parameter.note}</small>` : "";
|
||||||
var newrow = document.createElement('tr')
|
var icon = parameter.icon ? `<i class="fa ${parameter.icon}"></i>` : "";
|
||||||
|
var newrow = document.createElement('div')
|
||||||
newrow.innerHTML = `
|
newrow.innerHTML = `
|
||||||
<td><label for="${parameter.id}">${parameter.label}</label></td>
|
<div>${icon}</div>
|
||||||
<td><div>${element}${note}<div></td>`
|
<div><label for="${parameter.id}">${parameter.label}</label>${note}</div>
|
||||||
|
<div>${element}</div>`
|
||||||
parametersTable.appendChild(newrow)
|
parametersTable.appendChild(newrow)
|
||||||
parameter.settingsEntry = newrow
|
parameter.settingsEntry = newrow
|
||||||
})
|
})
|
||||||
@@ -157,18 +251,22 @@ function initParameters() {
|
|||||||
|
|
||||||
initParameters()
|
initParameters()
|
||||||
|
|
||||||
let turboField = document.querySelector('#turbo')
|
let vramUsageLevelField = document.querySelector('#vram_usage_level')
|
||||||
let useCPUField = document.querySelector('#use_cpu')
|
let useCPUField = document.querySelector('#use_cpu')
|
||||||
let autoPickGPUsField = document.querySelector('#auto_pick_gpus')
|
let autoPickGPUsField = document.querySelector('#auto_pick_gpus')
|
||||||
let useGPUsField = document.querySelector('#use_gpus')
|
let useGPUsField = document.querySelector('#use_gpus')
|
||||||
let useFullPrecisionField = document.querySelector('#use_full_precision')
|
|
||||||
let saveToDiskField = document.querySelector('#save_to_disk')
|
let saveToDiskField = document.querySelector('#save_to_disk')
|
||||||
let diskPathField = document.querySelector('#diskPath')
|
let diskPathField = document.querySelector('#diskPath')
|
||||||
|
let metadataOutputFormatField = document.querySelector('#metadata_output_format')
|
||||||
|
let listenToNetworkField = document.querySelector("#listen_to_network")
|
||||||
|
let listenPortField = document.querySelector("#listen_port")
|
||||||
let useBetaChannelField = document.querySelector("#use_beta_channel")
|
let useBetaChannelField = document.querySelector("#use_beta_channel")
|
||||||
let uiOpenBrowserOnStartField = document.querySelector("#ui_open_browser_on_start")
|
let uiOpenBrowserOnStartField = document.querySelector("#ui_open_browser_on_start")
|
||||||
|
let confirmDangerousActionsField = document.querySelector("#confirm_dangerous_actions")
|
||||||
|
|
||||||
let saveSettingsBtn = document.querySelector('#save-system-settings-btn')
|
let saveSettingsBtn = document.querySelector('#save-system-settings-btn')
|
||||||
|
|
||||||
|
|
||||||
async function changeAppConfig(configDelta) {
|
async function changeAppConfig(configDelta) {
|
||||||
try {
|
try {
|
||||||
let res = await fetch('/app_config', {
|
let res = await fetch('/app_config', {
|
||||||
@@ -193,10 +291,17 @@ async function getAppConfig() {
|
|||||||
|
|
||||||
if (config.update_branch === 'beta') {
|
if (config.update_branch === 'beta') {
|
||||||
useBetaChannelField.checked = true
|
useBetaChannelField.checked = true
|
||||||
|
document.querySelector("#updateBranchLabel").innerText = "(beta)"
|
||||||
}
|
}
|
||||||
if (config.ui && config.ui.open_browser_on_start === false) {
|
if (config.ui && config.ui.open_browser_on_start === false) {
|
||||||
uiOpenBrowserOnStartField.checked = false
|
uiOpenBrowserOnStartField.checked = false
|
||||||
}
|
}
|
||||||
|
if (config.net && config.net.listen_to_network === false) {
|
||||||
|
listenToNetworkField.checked = false
|
||||||
|
}
|
||||||
|
if (config.net && config.net.listen_port !== undefined) {
|
||||||
|
listenPortField.value = config.net.listen_port
|
||||||
|
}
|
||||||
|
|
||||||
console.log('get config status response', config)
|
console.log('get config status response', config)
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
@@ -206,6 +311,7 @@ async function getAppConfig() {
|
|||||||
|
|
||||||
saveToDiskField.addEventListener('change', function(e) {
|
saveToDiskField.addEventListener('change', function(e) {
|
||||||
diskPathField.disabled = !this.checked
|
diskPathField.disabled = !this.checked
|
||||||
|
metadataOutputFormatField.disabled = !this.checked
|
||||||
})
|
})
|
||||||
|
|
||||||
function getCurrentRenderDeviceSelection() {
|
function getCurrentRenderDeviceSelection() {
|
||||||
@@ -256,31 +362,49 @@ autoPickGPUsField.addEventListener('click', function() {
|
|||||||
gpuSettingEntry.style.display = (this.checked ? 'none' : '')
|
gpuSettingEntry.style.display = (this.checked ? 'none' : '')
|
||||||
})
|
})
|
||||||
|
|
||||||
async function getDiskPath() {
|
async function setDiskPath(defaultDiskPath, force=false) {
|
||||||
try {
|
|
||||||
var diskPath = getSetting("diskPath")
|
var diskPath = getSetting("diskPath")
|
||||||
if (diskPath == '' || diskPath == undefined || diskPath == "undefined") {
|
if (force || diskPath == '' || diskPath == undefined || diskPath == "undefined") {
|
||||||
let res = await fetch('/get/output_dir')
|
setSetting("diskPath", defaultDiskPath)
|
||||||
if (res.status === 200) {
|
|
||||||
res = await res.json()
|
|
||||||
res = res.output_dir
|
|
||||||
|
|
||||||
setSetting("diskPath", res)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} catch (e) {
|
|
||||||
console.log('error fetching output dir path', e)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async function getDevices() {
|
function setDeviceInfo(devices) {
|
||||||
try {
|
let cpu = devices.all.cpu.name
|
||||||
let res = await fetch('/get/devices')
|
let allGPUs = Object.keys(devices.all).filter(d => d != 'cpu')
|
||||||
if (res.status === 200) {
|
let activeGPUs = Object.keys(devices.active)
|
||||||
res = await res.json()
|
|
||||||
|
|
||||||
let allDeviceIds = Object.keys(res['all']).filter(d => d !== 'cpu')
|
function ID_TO_TEXT(d) {
|
||||||
let activeDeviceIds = Object.keys(res['active']).filter(d => d !== 'cpu')
|
let info = devices.all[d]
|
||||||
|
if ("mem_free" in info && "mem_total" in info) {
|
||||||
|
return `${info.name} <small>(${d}) (${info.mem_free.toFixed(1)}Gb free / ${info.mem_total.toFixed(1)} Gb total)</small>`
|
||||||
|
} else {
|
||||||
|
return `${info.name} <small>(${d}) (no memory info)</small>`
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
allGPUs = allGPUs.map(ID_TO_TEXT)
|
||||||
|
activeGPUs = activeGPUs.map(ID_TO_TEXT)
|
||||||
|
|
||||||
|
let systemInfoEl = document.querySelector('#system-info')
|
||||||
|
systemInfoEl.querySelector('#system-info-cpu').innerText = cpu
|
||||||
|
systemInfoEl.querySelector('#system-info-gpus-all').innerHTML = allGPUs.join('</br>')
|
||||||
|
systemInfoEl.querySelector('#system-info-rendering-devices').innerHTML = activeGPUs.join('</br>')
|
||||||
|
}
|
||||||
|
|
||||||
|
function setHostInfo(hosts) {
|
||||||
|
let port = listenPortField.value
|
||||||
|
hosts = hosts.map(addr => `http://${addr}:${port}/`).map(url => `<div><a href="${url}">${url}</a></div>`)
|
||||||
|
document.querySelector('#system-info-server-hosts').innerHTML = hosts.join('')
|
||||||
|
}
|
||||||
|
|
||||||
|
async function getSystemInfo() {
|
||||||
|
try {
|
||||||
|
const res = await SD.getSystemInfo()
|
||||||
|
let devices = res['devices']
|
||||||
|
|
||||||
|
let allDeviceIds = Object.keys(devices['all']).filter(d => d !== 'cpu')
|
||||||
|
let activeDeviceIds = Object.keys(devices['active']).filter(d => d !== 'cpu')
|
||||||
|
|
||||||
if (activeDeviceIds.length === 0) {
|
if (activeDeviceIds.length === 0) {
|
||||||
useCPUField.checked = true
|
useCPUField.checked = true
|
||||||
@@ -298,11 +422,11 @@ async function getDevices() {
|
|||||||
useCPUField.disabled = true // no compatible GPUs, so make the CPU mandatory
|
useCPUField.disabled = true // no compatible GPUs, so make the CPU mandatory
|
||||||
}
|
}
|
||||||
|
|
||||||
autoPickGPUsField.checked = (res['config'] === 'auto')
|
autoPickGPUsField.checked = (devices['config'] === 'auto')
|
||||||
|
|
||||||
useGPUsField.innerHTML = ''
|
useGPUsField.innerHTML = ''
|
||||||
allDeviceIds.forEach(device => {
|
allDeviceIds.forEach(device => {
|
||||||
let deviceName = res['all'][device]['name']
|
let deviceName = devices['all'][device]['name']
|
||||||
let deviceOption = `<option value="${device}">${deviceName} (${device})</option>`
|
let deviceOption = `<option value="${device}">${deviceName} (${device})</option>`
|
||||||
useGPUsField.insertAdjacentHTML('beforeend', deviceOption)
|
useGPUsField.insertAdjacentHTML('beforeend', deviceOption)
|
||||||
})
|
})
|
||||||
@@ -313,18 +437,43 @@ async function getDevices() {
|
|||||||
} else {
|
} else {
|
||||||
$('#use_gpus').val(activeDeviceIds)
|
$('#use_gpus').val(activeDeviceIds)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
setDeviceInfo(devices)
|
||||||
|
setHostInfo(res['hosts'])
|
||||||
|
let force = false
|
||||||
|
if (res['enforce_output_dir'] !== undefined) {
|
||||||
|
force = res['enforce_output_dir']
|
||||||
|
if (force == true) {
|
||||||
|
saveToDiskField.checked = true
|
||||||
|
metadataOutputFormatField.disabled = false
|
||||||
}
|
}
|
||||||
|
saveToDiskField.disabled = force
|
||||||
|
diskPathField.disabled = force
|
||||||
|
}
|
||||||
|
setDiskPath(res['default_output_dir'], force)
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
console.log('error fetching devices', e)
|
console.log('error fetching devices', e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
saveSettingsBtn.addEventListener('click', function() {
|
saveSettingsBtn.addEventListener('click', function() {
|
||||||
|
if (listenPortField.value == '') {
|
||||||
|
alert('The network port field must not be empty.')
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if (listenPortField.value < 1 || listenPortField.value > 65535) {
|
||||||
|
alert('The network port must be a number from 1 to 65535')
|
||||||
|
return
|
||||||
|
}
|
||||||
let updateBranch = (useBetaChannelField.checked ? 'beta' : 'main')
|
let updateBranch = (useBetaChannelField.checked ? 'beta' : 'main')
|
||||||
|
|
||||||
changeAppConfig({
|
changeAppConfig({
|
||||||
'render_devices': getCurrentRenderDeviceSelection(),
|
'render_devices': getCurrentRenderDeviceSelection(),
|
||||||
'update_branch': updateBranch,
|
'update_branch': updateBranch,
|
||||||
'ui_open_browser_on_start': uiOpenBrowserOnStartField.checked
|
'ui_open_browser_on_start': uiOpenBrowserOnStartField.checked,
|
||||||
|
'listen_to_network': listenToNetworkField.checked,
|
||||||
|
'listen_port': listenPortField.value
|
||||||
})
|
})
|
||||||
|
saveSettingsBtn.classList.add('active')
|
||||||
|
asyncDelay(300).then(() => saveSettingsBtn.classList.remove('active'))
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|||||||
@@ -24,23 +24,50 @@ const PLUGINS = {
|
|||||||
* }
|
* }
|
||||||
* })
|
* })
|
||||||
*/
|
*/
|
||||||
IMAGE_INFO_BUTTONS: []
|
IMAGE_INFO_BUTTONS: [],
|
||||||
|
GET_PROMPTS_HOOK: [],
|
||||||
|
MODIFIERS_LOAD: [],
|
||||||
|
TASK_CREATE: [],
|
||||||
|
OUTPUTS_FORMATS: new ServiceContainer(
|
||||||
|
function png() { return (reqBody) => new SD.RenderTask(reqBody) }
|
||||||
|
, function jpeg() { return (reqBody) => new SD.RenderTask(reqBody) }
|
||||||
|
, function webp() { return (reqBody) => new SD.RenderTask(reqBody) }
|
||||||
|
),
|
||||||
|
}
|
||||||
|
PLUGINS.OUTPUTS_FORMATS.register = function(...args) {
|
||||||
|
const service = ServiceContainer.prototype.register.apply(this, args)
|
||||||
|
if (typeof outputFormatField !== 'undefined') {
|
||||||
|
const newOption = document.createElement("option")
|
||||||
|
newOption.setAttribute("value", service.name)
|
||||||
|
newOption.innerText = service.name
|
||||||
|
outputFormatField.appendChild(newOption)
|
||||||
|
}
|
||||||
|
return service
|
||||||
|
}
|
||||||
|
|
||||||
|
function loadScript(url) {
|
||||||
|
const script = document.createElement('script')
|
||||||
|
const promiseSrc = new PromiseSource()
|
||||||
|
script.addEventListener('error', () => promiseSrc.reject(new Error(`Script "${url}" couldn't be loaded.`)))
|
||||||
|
script.addEventListener('load', () => promiseSrc.resolve(url))
|
||||||
|
script.src = url + '?t=' + Date.now()
|
||||||
|
|
||||||
|
console.log('loading script', url)
|
||||||
|
document.head.appendChild(script)
|
||||||
|
|
||||||
|
return promiseSrc.promise
|
||||||
}
|
}
|
||||||
|
|
||||||
async function loadUIPlugins() {
|
async function loadUIPlugins() {
|
||||||
try {
|
try {
|
||||||
let res = await fetch('/get/ui_plugins')
|
const res = await fetch('/get/ui_plugins')
|
||||||
if (res.status === 200) {
|
if (!res.ok) {
|
||||||
res = await res.json()
|
console.error(`Error HTTP${res.status} while loading plugins list. - ${res.statusText}`)
|
||||||
res.forEach(pluginPath => {
|
return
|
||||||
let script = document.createElement('script')
|
|
||||||
script.src = pluginPath + '?t=' + Date.now()
|
|
||||||
|
|
||||||
console.log('loading plugin', pluginPath)
|
|
||||||
|
|
||||||
document.head.appendChild(script)
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
const plugins = await res.json()
|
||||||
|
const loadingPromises = plugins.map(loadScript)
|
||||||
|
return await Promise.allSettled(loadingPromises)
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
console.log('error fetching plugin paths', e)
|
console.log('error fetching plugin paths', e)
|
||||||
}
|
}
|
||||||
|
|||||||
687
ui/media/js/searchable-models.js
Normal file
@@ -0,0 +1,687 @@
|
|||||||
|
"use strict"
|
||||||
|
|
||||||
|
let modelsCache
|
||||||
|
let modelsOptions
|
||||||
|
|
||||||
|
/*
|
||||||
|
*** SEARCHABLE MODELS ***
|
||||||
|
Creates searchable dropdowns for SD, VAE, or HN models.
|
||||||
|
Also adds a reload models button (placed next to SD models, reloads everything including VAE and HN models).
|
||||||
|
More reload buttons may be added at strategic UI locations as needed.
|
||||||
|
Merely calling getModels() makes all the magic happen behind the scene to refresh the dropdowns.
|
||||||
|
|
||||||
|
HOW TO CREATE A MODEL DROPDOWN:
|
||||||
|
1) Create an input element. Make sure to add a data-path property, as this is how model dropdowns are identified in auto-save.js.
|
||||||
|
<input id="stable_diffusion_model" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
|
||||||
|
|
||||||
|
2) Just declare one of these for your own dropdown (remember to change the element id, e.g. #stable_diffusion_models to your own input's id).
|
||||||
|
let stableDiffusionModelField = new ModelDropdown(document.querySelector('#stable_diffusion_model'), 'stable-diffusion')
|
||||||
|
let vaeModelField = new ModelDropdown(document.querySelector('#vae_model'), 'vae', 'None')
|
||||||
|
let hypernetworkModelField = new ModelDropdown(document.querySelector('#hypernetwork_model'), 'hypernetwork', 'None')
|
||||||
|
|
||||||
|
3) Model dropdowns will be refreshed automatically when the reload models button is invoked.
|
||||||
|
*/
|
||||||
|
class ModelDropdown
|
||||||
|
{
|
||||||
|
modelFilter //= document.querySelector("#model-filter")
|
||||||
|
modelFilterArrow //= document.querySelector("#model-filter-arrow")
|
||||||
|
modelList //= document.querySelector("#model-list")
|
||||||
|
modelResult //= document.querySelector("#model-result")
|
||||||
|
modelNoResult //= document.querySelector("#model-no-result")
|
||||||
|
|
||||||
|
currentSelection //= { elem: undefined, value: '', path: ''}
|
||||||
|
highlightedModelEntry //= undefined
|
||||||
|
activeModel //= undefined
|
||||||
|
|
||||||
|
inputModels //= undefined
|
||||||
|
modelKey //= undefined
|
||||||
|
flatModelList //= []
|
||||||
|
noneEntry //= ''
|
||||||
|
modelFilterInitialized //= undefined
|
||||||
|
|
||||||
|
/* MIMIC A REGULAR INPUT FIELD */
|
||||||
|
get parentElement() {
|
||||||
|
return this.modelFilter.parentElement
|
||||||
|
}
|
||||||
|
get parentNode() {
|
||||||
|
return this.modelFilter.parentNode
|
||||||
|
}
|
||||||
|
get value() {
|
||||||
|
return this.modelFilter.dataset.path
|
||||||
|
}
|
||||||
|
set value(path) {
|
||||||
|
this.modelFilter.dataset.path = path
|
||||||
|
this.selectEntry(path)
|
||||||
|
}
|
||||||
|
get disabled() {
|
||||||
|
return this.modelFilter.disabled
|
||||||
|
}
|
||||||
|
set disabled(state) {
|
||||||
|
this.modelFilter.disabled = state
|
||||||
|
if (this.modelFilterArrow) {
|
||||||
|
this.modelFilterArrow.style.color = state ? 'dimgray' : ''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
get modelElements() {
|
||||||
|
return this.modelList.querySelectorAll('.model-file')
|
||||||
|
}
|
||||||
|
addEventListener(type, listener, options) {
|
||||||
|
return this.modelFilter.addEventListener(type, listener, options)
|
||||||
|
}
|
||||||
|
dispatchEvent(event) {
|
||||||
|
return this.modelFilter.dispatchEvent(event)
|
||||||
|
}
|
||||||
|
appendChild(option) {
|
||||||
|
// do nothing
|
||||||
|
}
|
||||||
|
|
||||||
|
// remember 'this' - http://blog.niftysnippets.org/2008/04/you-must-remember-this.html
|
||||||
|
bind(f, obj) {
|
||||||
|
return function() {
|
||||||
|
return f.apply(obj, arguments)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* SEARCHABLE INPUT */
|
||||||
|
constructor (input, modelKey, noneEntry = '') {
|
||||||
|
this.modelFilter = input
|
||||||
|
this.noneEntry = noneEntry
|
||||||
|
this.modelKey = modelKey
|
||||||
|
|
||||||
|
if (modelsOptions !== undefined) { // reuse models from cache (only useful for plugins, which are loaded after models)
|
||||||
|
this.inputModels = modelsOptions[this.modelKey]
|
||||||
|
this.populateModels()
|
||||||
|
}
|
||||||
|
document.addEventListener("refreshModels", this.bind(function(e) {
|
||||||
|
// reload the models
|
||||||
|
this.inputModels = modelsOptions[this.modelKey]
|
||||||
|
this.populateModels()
|
||||||
|
}, this))
|
||||||
|
}
|
||||||
|
|
||||||
|
saveCurrentSelection(elem, value, path) {
|
||||||
|
this.currentSelection.elem = elem
|
||||||
|
this.currentSelection.value = value
|
||||||
|
this.currentSelection.path = path
|
||||||
|
this.modelFilter.dataset.path = path
|
||||||
|
this.modelFilter.value = value
|
||||||
|
this.modelFilter.dispatchEvent(new Event('change'))
|
||||||
|
}
|
||||||
|
|
||||||
|
processClick(e) {
|
||||||
|
e.preventDefault()
|
||||||
|
if (e.srcElement.classList.contains('model-file') || e.srcElement.classList.contains('fa-file')) {
|
||||||
|
const elem = e.srcElement.classList.contains('model-file') ? e.srcElement : e.srcElement.parentElement
|
||||||
|
this.saveCurrentSelection(elem, elem.innerText, elem.dataset.path)
|
||||||
|
this.hideModelList()
|
||||||
|
this.modelFilter.focus()
|
||||||
|
this.modelFilter.select()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
getPreviousVisibleSibling(elem) {
|
||||||
|
const modelElements = Array.from(this.modelElements)
|
||||||
|
const index = modelElements.indexOf(elem)
|
||||||
|
if (index <= 0) {
|
||||||
|
return undefined
|
||||||
|
}
|
||||||
|
|
||||||
|
return modelElements.slice(0, index).reverse().find(e => e.style.display === 'list-item')
|
||||||
|
}
|
||||||
|
|
||||||
|
getLastVisibleChild(elem) {
|
||||||
|
let lastElementChild = elem.lastElementChild
|
||||||
|
if (lastElementChild.style.display == 'list-item') return lastElementChild
|
||||||
|
return this.getPreviousVisibleSibling(lastElementChild)
|
||||||
|
}
|
||||||
|
|
||||||
|
getNextVisibleSibling(elem) {
|
||||||
|
const modelElements = Array.from(this.modelElements)
|
||||||
|
const index = modelElements.indexOf(elem)
|
||||||
|
return modelElements.slice(index + 1).find(e => e.style.display === 'list-item')
|
||||||
|
}
|
||||||
|
|
||||||
|
getFirstVisibleChild(elem) {
|
||||||
|
let firstElementChild = elem.firstElementChild
|
||||||
|
if (firstElementChild.style.display == 'list-item') return firstElementChild
|
||||||
|
return this.getNextVisibleSibling(firstElementChild)
|
||||||
|
}
|
||||||
|
|
||||||
|
selectModelEntry(elem) {
|
||||||
|
if (elem) {
|
||||||
|
if (this.highlightedModelEntry !== undefined) {
|
||||||
|
this.highlightedModelEntry.classList.remove('selected')
|
||||||
|
}
|
||||||
|
this.saveCurrentSelection(elem, elem.innerText, elem.dataset.path)
|
||||||
|
elem.classList.add('selected')
|
||||||
|
elem.scrollIntoView({block: 'nearest'})
|
||||||
|
this.highlightedModelEntry = elem
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
selectPreviousFile() {
|
||||||
|
const elem = this.getPreviousVisibleSibling(this.highlightedModelEntry)
|
||||||
|
if (elem) {
|
||||||
|
this.selectModelEntry(elem)
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
//this.highlightedModelEntry.parentElement.parentElement.scrollIntoView({block: 'nearest'})
|
||||||
|
this.highlightedModelEntry.closest('.model-list').scrollTop = 0
|
||||||
|
}
|
||||||
|
this.modelFilter.select()
|
||||||
|
}
|
||||||
|
|
||||||
|
selectNextFile() {
|
||||||
|
this.selectModelEntry(this.getNextVisibleSibling(this.highlightedModelEntry))
|
||||||
|
this.modelFilter.select()
|
||||||
|
}
|
||||||
|
|
||||||
|
selectFirstFile() {
|
||||||
|
this.selectModelEntry(this.modelList.querySelector('.model-file'))
|
||||||
|
this.highlightedModelEntry.scrollIntoView({block: 'nearest'})
|
||||||
|
this.modelFilter.select()
|
||||||
|
}
|
||||||
|
|
||||||
|
selectLastFile() {
|
||||||
|
const elems = this.modelList.querySelectorAll('.model-file:last-child')
|
||||||
|
this.selectModelEntry(elems[elems.length -1])
|
||||||
|
this.modelFilter.select()
|
||||||
|
}
|
||||||
|
|
||||||
|
resetSelection() {
|
||||||
|
this.hideModelList()
|
||||||
|
this.showAllEntries()
|
||||||
|
this.modelFilter.value = this.currentSelection.value
|
||||||
|
this.modelFilter.focus()
|
||||||
|
this.modelFilter.select()
|
||||||
|
}
|
||||||
|
|
||||||
|
validEntrySelected() {
|
||||||
|
return (this.modelNoResult.style.display === 'none')
|
||||||
|
}
|
||||||
|
|
||||||
|
processKey(e) {
|
||||||
|
switch (e.key) {
|
||||||
|
case 'Escape':
|
||||||
|
e.preventDefault()
|
||||||
|
this.resetSelection()
|
||||||
|
break
|
||||||
|
case 'Enter':
|
||||||
|
e.preventDefault()
|
||||||
|
if (this.validEntrySelected()) {
|
||||||
|
if (this.modelList.style.display != 'block') {
|
||||||
|
this.showModelList()
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
this.saveCurrentSelection(this.highlightedModelEntry, this.highlightedModelEntry.innerText, this.highlightedModelEntry.dataset.path)
|
||||||
|
this.hideModelList()
|
||||||
|
this.showAllEntries()
|
||||||
|
}
|
||||||
|
this.modelFilter.focus()
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
this.resetSelection()
|
||||||
|
}
|
||||||
|
break
|
||||||
|
case 'ArrowUp':
|
||||||
|
e.preventDefault()
|
||||||
|
if (this.validEntrySelected()) {
|
||||||
|
this.selectPreviousFile()
|
||||||
|
}
|
||||||
|
break
|
||||||
|
case 'ArrowDown':
|
||||||
|
e.preventDefault()
|
||||||
|
if (this.validEntrySelected()) {
|
||||||
|
this.selectNextFile()
|
||||||
|
}
|
||||||
|
break
|
||||||
|
case 'ArrowLeft':
|
||||||
|
if (this.modelList.style.display != 'block') {
|
||||||
|
e.preventDefault()
|
||||||
|
}
|
||||||
|
break
|
||||||
|
case 'ArrowRight':
|
||||||
|
if (this.modelList.style.display != 'block') {
|
||||||
|
e.preventDefault()
|
||||||
|
}
|
||||||
|
break
|
||||||
|
case 'PageUp':
|
||||||
|
e.preventDefault()
|
||||||
|
if (this.validEntrySelected()) {
|
||||||
|
this.selectPreviousFile()
|
||||||
|
this.selectPreviousFile()
|
||||||
|
this.selectPreviousFile()
|
||||||
|
this.selectPreviousFile()
|
||||||
|
this.selectPreviousFile()
|
||||||
|
this.selectPreviousFile()
|
||||||
|
this.selectPreviousFile()
|
||||||
|
this.selectPreviousFile()
|
||||||
|
}
|
||||||
|
break
|
||||||
|
case 'PageDown':
|
||||||
|
e.preventDefault()
|
||||||
|
if (this.validEntrySelected()) {
|
||||||
|
this.selectNextFile()
|
||||||
|
this.selectNextFile()
|
||||||
|
this.selectNextFile()
|
||||||
|
this.selectNextFile()
|
||||||
|
this.selectNextFile()
|
||||||
|
this.selectNextFile()
|
||||||
|
this.selectNextFile()
|
||||||
|
this.selectNextFile()
|
||||||
|
}
|
||||||
|
break
|
||||||
|
case 'Home':
|
||||||
|
//if (this.modelList.style.display != 'block') {
|
||||||
|
e.preventDefault()
|
||||||
|
if (this.validEntrySelected()) {
|
||||||
|
this.selectFirstFile()
|
||||||
|
}
|
||||||
|
//}
|
||||||
|
break
|
||||||
|
case 'End':
|
||||||
|
//if (this.modelList.style.display != 'block') {
|
||||||
|
e.preventDefault()
|
||||||
|
if (this.validEntrySelected()) {
|
||||||
|
this.selectLastFile()
|
||||||
|
}
|
||||||
|
//}
|
||||||
|
break
|
||||||
|
default:
|
||||||
|
//console.log(e.key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
modelListFocus() {
|
||||||
|
this.selectEntry()
|
||||||
|
this.showAllEntries()
|
||||||
|
}
|
||||||
|
|
||||||
|
showModelList() {
|
||||||
|
this.modelList.style.display = 'block'
|
||||||
|
this.selectEntry()
|
||||||
|
this.showAllEntries()
|
||||||
|
//this.modelFilter.value = ''
|
||||||
|
this.modelFilter.select() // preselect the entire string so user can just start typing.
|
||||||
|
this.modelFilter.focus()
|
||||||
|
this.modelFilter.style.cursor = 'auto'
|
||||||
|
}
|
||||||
|
|
||||||
|
hideModelList() {
|
||||||
|
this.modelList.style.display = 'none'
|
||||||
|
this.modelFilter.value = this.currentSelection.value
|
||||||
|
this.modelFilter.style.cursor = ''
|
||||||
|
}
|
||||||
|
|
||||||
|
toggleModelList(e) {
|
||||||
|
e.preventDefault()
|
||||||
|
if (!this.modelFilter.disabled) {
|
||||||
|
if (this.modelList.style.display != 'block') {
|
||||||
|
this.showModelList()
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
this.hideModelList()
|
||||||
|
this.modelFilter.select()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
selectEntry(path) {
|
||||||
|
if (path !== undefined) {
|
||||||
|
const entries = this.modelElements;
|
||||||
|
|
||||||
|
for (const elem of entries) {
|
||||||
|
if (elem.dataset.path == path) {
|
||||||
|
this.saveCurrentSelection(elem, elem.innerText, elem.dataset.path)
|
||||||
|
this.highlightedModelEntry = elem
|
||||||
|
elem.scrollIntoView({block: 'nearest'})
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.currentSelection.elem !== undefined) {
|
||||||
|
// select the previous element
|
||||||
|
if (this.highlightedModelEntry !== undefined && this.highlightedModelEntry != this.currentSelection.elem) {
|
||||||
|
this.highlightedModelEntry.classList.remove('selected')
|
||||||
|
}
|
||||||
|
this.currentSelection.elem.classList.add('selected')
|
||||||
|
this.highlightedModelEntry = this.currentSelection.elem
|
||||||
|
this.currentSelection.elem.scrollIntoView({block: 'nearest'})
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
this.selectFirstFile()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
highlightModelAtPosition(e) {
|
||||||
|
let elem = document.elementFromPoint(e.clientX, e.clientY)
|
||||||
|
|
||||||
|
if (elem.classList.contains('model-file')) {
|
||||||
|
this.highlightModel(elem)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
highlightModel(elem) {
|
||||||
|
if (elem.classList.contains('model-file')) {
|
||||||
|
if (this.highlightedModelEntry !== undefined && this.highlightedModelEntry != elem) {
|
||||||
|
this.highlightedModelEntry.classList.remove('selected')
|
||||||
|
}
|
||||||
|
elem.classList.add('selected')
|
||||||
|
this.highlightedModelEntry = elem
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
showAllEntries() {
|
||||||
|
this.modelList.querySelectorAll('li').forEach(function(li) {
|
||||||
|
if (li.id !== 'model-no-result') {
|
||||||
|
li.style.display = 'list-item'
|
||||||
|
}
|
||||||
|
})
|
||||||
|
this.modelNoResult.style.display = 'none'
|
||||||
|
}
|
||||||
|
|
||||||
|
filterList(e) {
|
||||||
|
const filter = this.modelFilter.value.toLowerCase()
|
||||||
|
let found = false
|
||||||
|
let showAllChildren = false
|
||||||
|
|
||||||
|
this.modelList.querySelectorAll('li').forEach(function(li) {
|
||||||
|
if (li.classList.contains('model-folder')) {
|
||||||
|
showAllChildren = false
|
||||||
|
}
|
||||||
|
if (filter == '') {
|
||||||
|
li.style.display = 'list-item'
|
||||||
|
found = true
|
||||||
|
} else if (showAllChildren || li.textContent.toLowerCase().match(filter)) {
|
||||||
|
li.style.display = 'list-item'
|
||||||
|
if (li.classList.contains('model-folder') && li.firstChild.textContent.toLowerCase().match(filter)) {
|
||||||
|
showAllChildren = true
|
||||||
|
}
|
||||||
|
found = true
|
||||||
|
} else {
|
||||||
|
li.style.display = 'none'
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
if (found) {
|
||||||
|
this.modelResult.style.display = 'list-item'
|
||||||
|
this.modelNoResult.style.display = 'none'
|
||||||
|
const elem = this.getNextVisibleSibling(this.modelList.querySelector('.model-file'))
|
||||||
|
this.highlightModel(elem)
|
||||||
|
elem.scrollIntoView({block: 'nearest'})
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
this.modelResult.style.display = 'none'
|
||||||
|
this.modelNoResult.style.display = 'list-item'
|
||||||
|
}
|
||||||
|
this.modelList.style.display = 'block'
|
||||||
|
}
|
||||||
|
|
||||||
|
/* MODEL LOADER */
|
||||||
|
getElementDimensions(element) {
|
||||||
|
// Clone the element
|
||||||
|
const clone = element.cloneNode(true)
|
||||||
|
|
||||||
|
// Copy the styles of the original element to the cloned element
|
||||||
|
const originalStyles = window.getComputedStyle(element)
|
||||||
|
for (let i = 0; i < originalStyles.length; i++) {
|
||||||
|
const property = originalStyles[i]
|
||||||
|
clone.style[property] = originalStyles.getPropertyValue(property)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set its visibility to hidden and display to inline-block
|
||||||
|
clone.style.visibility = "hidden"
|
||||||
|
clone.style.display = "inline-block"
|
||||||
|
|
||||||
|
// Put the cloned element next to the original element
|
||||||
|
element.parentNode.insertBefore(clone, element.nextSibling)
|
||||||
|
|
||||||
|
// Get its width and height
|
||||||
|
const width = clone.offsetWidth
|
||||||
|
const height = clone.offsetHeight
|
||||||
|
|
||||||
|
// Remove it from the DOM
|
||||||
|
clone.remove()
|
||||||
|
|
||||||
|
// Return its width and height
|
||||||
|
return { width, height }
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param {Array<string>} models
|
||||||
|
*/
|
||||||
|
sortStringArray(models) {
|
||||||
|
models.sort((a, b) => a.localeCompare(b, undefined, { sensitivity: 'base' }))
|
||||||
|
}
|
||||||
|
|
||||||
|
populateModels() {
|
||||||
|
this.activeModel = this.modelFilter.dataset.path
|
||||||
|
|
||||||
|
this.currentSelection = { elem: undefined, value: '', path: ''}
|
||||||
|
this.highlightedModelEntry = undefined
|
||||||
|
this.flatModelList = []
|
||||||
|
|
||||||
|
if(this.modelList !== undefined) {
|
||||||
|
this.modelList.remove()
|
||||||
|
this.modelFilterArrow.remove()
|
||||||
|
}
|
||||||
|
this.createDropdown()
|
||||||
|
}
|
||||||
|
|
||||||
|
createDropdown() {
|
||||||
|
// create dropdown entries
|
||||||
|
let rootModelList = this.createRootModelList(this.inputModels)
|
||||||
|
this.modelFilter.insertAdjacentElement('afterend', rootModelList)
|
||||||
|
this.modelFilter.insertAdjacentElement(
|
||||||
|
'afterend',
|
||||||
|
this.createElement(
|
||||||
|
'i',
|
||||||
|
{ id: `${this.modelFilter.id}-model-filter-arrow` },
|
||||||
|
['model-selector-arrow', 'fa-solid', 'fa-angle-down'],
|
||||||
|
),
|
||||||
|
)
|
||||||
|
this.modelFilter.classList.add('model-selector')
|
||||||
|
this.modelFilterArrow = document.querySelector(`#${this.modelFilter.id}-model-filter-arrow`)
|
||||||
|
if (this.modelFilterArrow) {
|
||||||
|
this.modelFilterArrow.style.color = this.modelFilter.disabled ? 'dimgray' : ''
|
||||||
|
}
|
||||||
|
this.modelList = document.querySelector(`#${this.modelFilter.id}-model-list`)
|
||||||
|
this.modelResult = document.querySelector(`#${this.modelFilter.id}-model-result`)
|
||||||
|
this.modelNoResult = document.querySelector(`#${this.modelFilter.id}-model-no-result`)
|
||||||
|
|
||||||
|
if (this.modelFilterInitialized !== true) {
|
||||||
|
this.modelFilter.addEventListener('input', this.bind(this.filterList, this))
|
||||||
|
this.modelFilter.addEventListener('focus', this.bind(this.modelListFocus, this))
|
||||||
|
this.modelFilter.addEventListener('blur', this.bind(this.hideModelList, this))
|
||||||
|
this.modelFilter.addEventListener('click', this.bind(this.showModelList, this))
|
||||||
|
this.modelFilter.addEventListener('keydown', this.bind(this.processKey, this))
|
||||||
|
|
||||||
|
this.modelFilterInitialized = true
|
||||||
|
}
|
||||||
|
this.modelFilterArrow.addEventListener('mousedown', this.bind(this.toggleModelList, this))
|
||||||
|
this.modelList.addEventListener('mousemove', this.bind(this.highlightModelAtPosition, this))
|
||||||
|
this.modelList.addEventListener('mousedown', this.bind(this.processClick, this))
|
||||||
|
|
||||||
|
let mf = this.modelFilter
|
||||||
|
this.modelFilter.addEventListener('focus', function() {
|
||||||
|
let modelFilterStyle = window.getComputedStyle(mf)
|
||||||
|
rootModelList.style.minWidth = modelFilterStyle.width
|
||||||
|
})
|
||||||
|
|
||||||
|
this.selectEntry(this.activeModel)
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @param {string} tag
|
||||||
|
* @param {object} attributes
|
||||||
|
* @param {Array<string>} classes
|
||||||
|
* @returns {HTMLElement}
|
||||||
|
*/
|
||||||
|
createElement(tagName, attributes, classes, text, icon) {
|
||||||
|
const element = document.createElement(tagName)
|
||||||
|
if (attributes) {
|
||||||
|
Object.entries(attributes).forEach(([key, value]) => {
|
||||||
|
element.setAttribute(key, value)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if (classes) {
|
||||||
|
classes.forEach(className => element.classList.add(className))
|
||||||
|
}
|
||||||
|
if (icon) {
|
||||||
|
let iconEl = document.createElement('i')
|
||||||
|
iconEl.className = icon + ' icon'
|
||||||
|
element.appendChild(iconEl)
|
||||||
|
}
|
||||||
|
if (text) {
|
||||||
|
element.appendChild(document.createTextNode(text))
|
||||||
|
}
|
||||||
|
return element
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param {Array<string | object} modelTree
|
||||||
|
* @param {string} folderName
|
||||||
|
* @param {boolean} isRootFolder
|
||||||
|
* @returns {HTMLElement}
|
||||||
|
*/
|
||||||
|
createModelNodeList(folderName, modelTree, isRootFolder) {
|
||||||
|
const listElement = this.createElement('ul')
|
||||||
|
|
||||||
|
const foldersMap = new Map()
|
||||||
|
const modelsMap = new Map()
|
||||||
|
|
||||||
|
modelTree.forEach(model => {
|
||||||
|
if (Array.isArray(model)) {
|
||||||
|
const [childFolderName, childModels] = model
|
||||||
|
foldersMap.set(
|
||||||
|
childFolderName,
|
||||||
|
this.createModelNodeList(
|
||||||
|
`${folderName || ''}/${childFolderName}`,
|
||||||
|
childModels,
|
||||||
|
false,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
const classes = ['model-file']
|
||||||
|
if (isRootFolder) {
|
||||||
|
classes.push('in-root-folder')
|
||||||
|
}
|
||||||
|
// Remove the leading slash from the model path
|
||||||
|
const fullPath = folderName ? `${folderName.substring(1)}/${model}` : model
|
||||||
|
modelsMap.set(
|
||||||
|
model,
|
||||||
|
this.createElement('li', { 'data-path': fullPath }, classes, model, 'fa-regular fa-file'),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
const childFolderNames = Array.from(foldersMap.keys())
|
||||||
|
this.sortStringArray(childFolderNames)
|
||||||
|
const folderElements = childFolderNames.map(name => foldersMap.get(name))
|
||||||
|
|
||||||
|
const modelNames = Array.from(modelsMap.keys())
|
||||||
|
this.sortStringArray(modelNames)
|
||||||
|
const modelElements = modelNames.map(name => modelsMap.get(name))
|
||||||
|
|
||||||
|
if (modelElements.length && folderName) {
|
||||||
|
listElement.appendChild(this.createElement('li', undefined, ['model-folder'], folderName.substring(1), 'fa-solid fa-folder-open'))
|
||||||
|
}
|
||||||
|
|
||||||
|
// const allModelElements = isRootFolder ? [...folderElements, ...modelElements] : [...modelElements, ...folderElements]
|
||||||
|
const allModelElements = [...modelElements, ...folderElements]
|
||||||
|
allModelElements.forEach(e => listElement.appendChild(e))
|
||||||
|
return listElement
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param {object} modelTree
|
||||||
|
* @returns {HTMLElement}
|
||||||
|
*/
|
||||||
|
createRootModelList(modelTree) {
|
||||||
|
const rootList = this.createElement(
|
||||||
|
'ul',
|
||||||
|
{ id: `${this.modelFilter.id}-model-list` },
|
||||||
|
['model-list'],
|
||||||
|
)
|
||||||
|
rootList.appendChild(
|
||||||
|
this.createElement(
|
||||||
|
'li',
|
||||||
|
{ id: `${this.modelFilter.id}-model-no-result` },
|
||||||
|
['model-no-result'],
|
||||||
|
'No result'
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
if (this.noneEntry) {
|
||||||
|
rootList.appendChild(
|
||||||
|
this.createElement(
|
||||||
|
'li',
|
||||||
|
{ 'data-path': '' },
|
||||||
|
['model-file', 'in-root-folder'],
|
||||||
|
this.noneEntry,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
if (modelTree.length > 0) {
|
||||||
|
const containerListItem = this.createElement(
|
||||||
|
'li',
|
||||||
|
{ id: `${this.modelFilter.id}-model-result` },
|
||||||
|
['model-result'],
|
||||||
|
)
|
||||||
|
//console.log(containerListItem)
|
||||||
|
containerListItem.appendChild(this.createModelNodeList(undefined, modelTree, true))
|
||||||
|
rootList.appendChild(containerListItem)
|
||||||
|
}
|
||||||
|
|
||||||
|
return rootList
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* (RE)LOAD THE MODELS */
|
||||||
|
async function getModels() {
|
||||||
|
try {
|
||||||
|
modelsCache = await SD.getModels()
|
||||||
|
modelsOptions = modelsCache['options']
|
||||||
|
if ("scan-error" in modelsCache) {
|
||||||
|
// let previewPane = document.getElementById('tab-content-wrapper')
|
||||||
|
let previewPane = document.getElementById('preview')
|
||||||
|
previewPane.style.background="red"
|
||||||
|
previewPane.style.textAlign="center"
|
||||||
|
previewPane.innerHTML = '<H1>🔥Malware alert!🔥</H1><h2>The file <i>' + modelsCache['scan-error'] + '</i> in your <tt>models/stable-diffusion</tt> folder is probably malware infected.</h2><h2>Please delete this file from the folder before proceeding!</h2>After deleting the file, reload this page.<br><br><button onClick="window.location.reload();">Reload Page</button>'
|
||||||
|
makeImageBtn.disabled = true
|
||||||
|
}
|
||||||
|
|
||||||
|
/* This code should no longer be needed. Commenting out for now, will cleanup later.
|
||||||
|
const sd_model_setting_key = "stable_diffusion_model"
|
||||||
|
const vae_model_setting_key = "vae_model"
|
||||||
|
const hypernetwork_model_key = "hypernetwork_model"
|
||||||
|
|
||||||
|
const stableDiffusionOptions = modelsOptions['stable-diffusion']
|
||||||
|
const vaeOptions = modelsOptions['vae']
|
||||||
|
const hypernetworkOptions = modelsOptions['hypernetwork']
|
||||||
|
|
||||||
|
// TODO: set default for model here too
|
||||||
|
SETTINGS[sd_model_setting_key].default = stableDiffusionOptions[0]
|
||||||
|
if (getSetting(sd_model_setting_key) == '' || SETTINGS[sd_model_setting_key].value == '') {
|
||||||
|
setSetting(sd_model_setting_key, stableDiffusionOptions[0])
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
|
// notify ModelDropdown objects to refresh
|
||||||
|
document.dispatchEvent(new Event('refreshModels'))
|
||||||
|
} catch (e) {
|
||||||
|
console.log('get models error', e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// reload models button
|
||||||
|
document.querySelector('#reload-models').addEventListener('click', getModels)
|
||||||
@@ -13,8 +13,15 @@ function initTheme() {
|
|||||||
.filter(sheet => sheet.href?.startsWith(window.location.origin))
|
.filter(sheet => sheet.href?.startsWith(window.location.origin))
|
||||||
.flatMap(sheet => Array.from(sheet.cssRules))
|
.flatMap(sheet => Array.from(sheet.cssRules))
|
||||||
.forEach(rule => {
|
.forEach(rule => {
|
||||||
var selector = rule.selectorText; // TODO: also do selector == ":root", re-run un-set props
|
var selector = rule.selectorText;
|
||||||
if (selector && selector.startsWith(".theme-")) {
|
if (selector && selector.startsWith(".theme-") && !selector.includes(" ")) {
|
||||||
|
if (DEFAULT_THEME) { // re-add props that dont change (css needs this so they update correctly)
|
||||||
|
Array.from(DEFAULT_THEME.rule.style)
|
||||||
|
.filter(cssVariable => !Array.from(rule.style).includes(cssVariable))
|
||||||
|
.forEach(cssVariable => {
|
||||||
|
rule.style.setProperty(cssVariable, DEFAULT_THEME.rule.style.getPropertyValue(cssVariable));
|
||||||
|
});
|
||||||
|
}
|
||||||
var theme_key = selector.substring(1);
|
var theme_key = selector.substring(1);
|
||||||
THEMES.push({
|
THEMES.push({
|
||||||
key: theme_key,
|
key: theme_key,
|
||||||
@@ -60,14 +67,16 @@ function themeFieldChanged() {
|
|||||||
|
|
||||||
body.style = "";
|
body.style = "";
|
||||||
var theme = THEMES.find(t => t.key == theme_key);
|
var theme = THEMES.find(t => t.key == theme_key);
|
||||||
|
let borderColor = undefined
|
||||||
if (theme) {
|
if (theme) {
|
||||||
// refresh variables incase they are back referencing
|
borderColor = theme.rule.style.getPropertyValue('--input-border-color').trim()
|
||||||
Array.from(DEFAULT_THEME.rule.style)
|
if (!borderColor.startsWith('#')) {
|
||||||
.filter(cssVariable => !Array.from(theme.rule.style).includes(cssVariable))
|
borderColor = theme.rule.style.getPropertyValue('--theme-color-fallback')
|
||||||
.forEach(cssVariable => {
|
|
||||||
body.style.setProperty(cssVariable, DEFAULT_THEME.rule.style.getPropertyValue(cssVariable));
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
borderColor = DEFAULT_THEME.rule.style.getPropertyValue('--theme-color-fallback')
|
||||||
|
}
|
||||||
|
document.querySelector('meta[name="theme-color"]').setAttribute("content", borderColor)
|
||||||
}
|
}
|
||||||
|
|
||||||
themeField.addEventListener('change', themeFieldChanged);
|
themeField.addEventListener('change', themeFieldChanged);
|
||||||
|
|||||||
@@ -1,32 +1,37 @@
|
|||||||
|
"use strict";
|
||||||
|
|
||||||
// https://gomakethings.com/finding-the-next-and-previous-sibling-elements-that-match-a-selector-with-vanilla-js/
|
// https://gomakethings.com/finding-the-next-and-previous-sibling-elements-that-match-a-selector-with-vanilla-js/
|
||||||
function getNextSibling(elem, selector) {
|
function getNextSibling(elem, selector) {
|
||||||
// Get the next sibling element
|
// Get the next sibling element
|
||||||
var sibling = elem.nextElementSibling
|
let sibling = elem.nextElementSibling
|
||||||
|
|
||||||
// If there's no selector, return the first sibling
|
// If there's no selector, return the first sibling
|
||||||
if (!selector) return sibling
|
if (!selector) {
|
||||||
|
return sibling
|
||||||
|
}
|
||||||
|
|
||||||
// If the sibling matches our selector, use it
|
// If the sibling matches our selector, use it
|
||||||
// If not, jump to the next sibling and continue the loop
|
// If not, jump to the next sibling and continue the loop
|
||||||
while (sibling) {
|
while (sibling) {
|
||||||
if (sibling.matches(selector)) return sibling
|
if (sibling.matches(selector)) {
|
||||||
|
return sibling
|
||||||
|
}
|
||||||
sibling = sibling.nextElementSibling
|
sibling = sibling.nextElementSibling
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/* Panel Stuff */
|
/* Panel Stuff */
|
||||||
|
|
||||||
// true = open
|
// true = open
|
||||||
var COLLAPSIBLES_INITIALIZED = false;
|
let COLLAPSIBLES_INITIALIZED = false;
|
||||||
const COLLAPSIBLES_KEY = "collapsibles";
|
const COLLAPSIBLES_KEY = "collapsibles";
|
||||||
const COLLAPSIBLE_PANELS = []; // filled in by createCollapsibles with all the elements matching .collapsible
|
const COLLAPSIBLE_PANELS = []; // filled in by createCollapsibles with all the elements matching .collapsible
|
||||||
|
|
||||||
// on-init call this for any panels that are marked open
|
// on-init call this for any panels that are marked open
|
||||||
function toggleCollapsible(element) {
|
function toggleCollapsible(element) {
|
||||||
var collapsibleHeader = element.querySelector(".collapsible");
|
const collapsibleHeader = element.querySelector(".collapsible");
|
||||||
var handle = element.querySelector(".collapsible-handle");
|
const handle = element.querySelector(".collapsible-handle");
|
||||||
collapsibleHeader.classList.toggle("active")
|
collapsibleHeader.classList.toggle("active")
|
||||||
let content = getNextSibling(collapsibleHeader, '.collapsible-content')
|
let content = getNextSibling(collapsibleHeader, '.collapsible-content')
|
||||||
if (!collapsibleHeader.classList.contains("active")) {
|
if (!collapsibleHeader.classList.contains("active")) {
|
||||||
@@ -40,6 +45,7 @@ function toggleCollapsible(element) {
|
|||||||
handle.innerHTML = '➖' // minus
|
handle.innerHTML = '➖' // minus
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
document.dispatchEvent(new CustomEvent('collapsibleClick', { detail: collapsibleHeader }))
|
||||||
|
|
||||||
if (COLLAPSIBLES_INITIALIZED && COLLAPSIBLE_PANELS.includes(element)) {
|
if (COLLAPSIBLES_INITIALIZED && COLLAPSIBLE_PANELS.includes(element)) {
|
||||||
saveCollapsibles()
|
saveCollapsibles()
|
||||||
@@ -47,16 +53,16 @@ function toggleCollapsible(element) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function saveCollapsibles() {
|
function saveCollapsibles() {
|
||||||
var values = {}
|
let values = {}
|
||||||
COLLAPSIBLE_PANELS.forEach(element => {
|
COLLAPSIBLE_PANELS.forEach(element => {
|
||||||
var value = element.querySelector(".collapsible").className.indexOf("active") !== -1
|
let value = element.querySelector(".collapsible").className.indexOf("active") !== -1
|
||||||
values[element.id] = value
|
values[element.id] = value
|
||||||
})
|
})
|
||||||
localStorage.setItem(COLLAPSIBLES_KEY, JSON.stringify(values))
|
localStorage.setItem(COLLAPSIBLES_KEY, JSON.stringify(values))
|
||||||
}
|
}
|
||||||
|
|
||||||
function createCollapsibles(node) {
|
function createCollapsibles(node) {
|
||||||
var save = false
|
let save = false
|
||||||
if (!node) {
|
if (!node) {
|
||||||
node = document
|
node = document
|
||||||
save = true
|
save = true
|
||||||
@@ -81,7 +87,7 @@ function createCollapsibles(node) {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
if (save) {
|
if (save) {
|
||||||
var saved = localStorage.getItem(COLLAPSIBLES_KEY)
|
let saved = localStorage.getItem(COLLAPSIBLES_KEY)
|
||||||
if (!saved) {
|
if (!saved) {
|
||||||
saved = tryLoadOldCollapsibles();
|
saved = tryLoadOldCollapsibles();
|
||||||
}
|
}
|
||||||
@@ -89,9 +95,9 @@ function createCollapsibles(node) {
|
|||||||
saveCollapsibles()
|
saveCollapsibles()
|
||||||
saved = localStorage.getItem(COLLAPSIBLES_KEY)
|
saved = localStorage.getItem(COLLAPSIBLES_KEY)
|
||||||
}
|
}
|
||||||
var values = JSON.parse(saved)
|
let values = JSON.parse(saved)
|
||||||
COLLAPSIBLE_PANELS.forEach(element => {
|
COLLAPSIBLE_PANELS.forEach(element => {
|
||||||
var value = element.querySelector(".collapsible").className.indexOf("active") !== -1
|
let value = element.querySelector(".collapsible").className.indexOf("active") !== -1
|
||||||
if (values[element.id] != value) {
|
if (values[element.id] != value) {
|
||||||
toggleCollapsible(element)
|
toggleCollapsible(element)
|
||||||
}
|
}
|
||||||
@@ -101,17 +107,17 @@ function createCollapsibles(node) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function tryLoadOldCollapsibles() {
|
function tryLoadOldCollapsibles() {
|
||||||
var old_map = {
|
const old_map = {
|
||||||
"advancedPanelOpen": "editor-settings",
|
"advancedPanelOpen": "editor-settings",
|
||||||
"modifiersPanelOpen": "editor-modifiers",
|
"modifiersPanelOpen": "editor-modifiers",
|
||||||
"negativePromptPanelOpen": "editor-inputs-prompt"
|
"negativePromptPanelOpen": "editor-inputs-prompt"
|
||||||
};
|
};
|
||||||
if (localStorage.getItem(Object.keys(old_map)[0])) {
|
if (localStorage.getItem(Object.keys(old_map)[0])) {
|
||||||
var result = {};
|
let result = {};
|
||||||
Object.keys(old_map).forEach(key => {
|
Object.keys(old_map).forEach(key => {
|
||||||
var value = localStorage.getItem(key);
|
const value = localStorage.getItem(key);
|
||||||
if (value !== null) {
|
if (value !== null) {
|
||||||
result[old_map[key]] = value == true || value == "true"
|
result[old_map[key]] = (value == true || value == "true")
|
||||||
localStorage.removeItem(key)
|
localStorage.removeItem(key)
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
@@ -150,17 +156,17 @@ function millisecondsToStr(milliseconds) {
|
|||||||
return (number > 1) ? 's' : ''
|
return (number > 1) ? 's' : ''
|
||||||
}
|
}
|
||||||
|
|
||||||
var temp = Math.floor(milliseconds / 1000)
|
let temp = Math.floor(milliseconds / 1000)
|
||||||
var hours = Math.floor((temp %= 86400) / 3600)
|
let hours = Math.floor((temp %= 86400) / 3600)
|
||||||
var s = ''
|
let s = ''
|
||||||
if (hours) {
|
if (hours) {
|
||||||
s += hours + ' hour' + numberEnding(hours) + ' '
|
s += hours + ' hour' + numberEnding(hours) + ' '
|
||||||
}
|
}
|
||||||
var minutes = Math.floor((temp %= 3600) / 60)
|
let minutes = Math.floor((temp %= 3600) / 60)
|
||||||
if (minutes) {
|
if (minutes) {
|
||||||
s += minutes + ' minute' + numberEnding(minutes) + ' '
|
s += minutes + ' minute' + numberEnding(minutes) + ' '
|
||||||
}
|
}
|
||||||
var seconds = temp % 60
|
let seconds = temp % 60
|
||||||
if (!hours && minutes < 4 && seconds) {
|
if (!hours && minutes < 4 && seconds) {
|
||||||
s += seconds + ' second' + numberEnding(seconds)
|
s += seconds + ' second' + numberEnding(seconds)
|
||||||
}
|
}
|
||||||
@@ -178,7 +184,7 @@ function BraceExpander() {
|
|||||||
function bracePair(tkns, iPosn, iNest, lstCommas) {
|
function bracePair(tkns, iPosn, iNest, lstCommas) {
|
||||||
if (iPosn >= tkns.length || iPosn < 0) return null;
|
if (iPosn >= tkns.length || iPosn < 0) return null;
|
||||||
|
|
||||||
var t = tkns[iPosn],
|
let t = tkns[iPosn],
|
||||||
n = (t === '{') ? (
|
n = (t === '{') ? (
|
||||||
iNest + 1
|
iNest + 1
|
||||||
) : (t === '}' ? (
|
) : (t === '}' ? (
|
||||||
@@ -198,7 +204,7 @@ function BraceExpander() {
|
|||||||
function andTree(dctSofar, tkns) {
|
function andTree(dctSofar, tkns) {
|
||||||
if (!tkns.length) return [dctSofar, []];
|
if (!tkns.length) return [dctSofar, []];
|
||||||
|
|
||||||
var dctParse = dctSofar ? dctSofar : {
|
let dctParse = dctSofar ? dctSofar : {
|
||||||
fn: and,
|
fn: and,
|
||||||
args: []
|
args: []
|
||||||
},
|
},
|
||||||
@@ -231,14 +237,14 @@ function BraceExpander() {
|
|||||||
// Parse of a PARADIGM subtree
|
// Parse of a PARADIGM subtree
|
||||||
function orTree(dctSofar, tkns, lstCommas) {
|
function orTree(dctSofar, tkns, lstCommas) {
|
||||||
if (!tkns.length) return [dctSofar, []];
|
if (!tkns.length) return [dctSofar, []];
|
||||||
var iLast = lstCommas.length;
|
let iLast = lstCommas.length;
|
||||||
|
|
||||||
return {
|
return {
|
||||||
fn: or,
|
fn: or,
|
||||||
args: splitsAt(
|
args: splitsAt(
|
||||||
lstCommas, tkns
|
lstCommas, tkns
|
||||||
).map(function (x, i) {
|
).map(function (x, i) {
|
||||||
var ts = x.slice(
|
let ts = x.slice(
|
||||||
1, i === iLast ? (
|
1, i === iLast ? (
|
||||||
-1
|
-1
|
||||||
) : void 0
|
) : void 0
|
||||||
@@ -256,7 +262,7 @@ function BraceExpander() {
|
|||||||
// List of unescaped braces and commas, and remaining strings
|
// List of unescaped braces and commas, and remaining strings
|
||||||
function tokens(str) {
|
function tokens(str) {
|
||||||
// Filter function excludes empty splitting artefacts
|
// Filter function excludes empty splitting artefacts
|
||||||
var toS = function (x) {
|
let toS = function (x) {
|
||||||
return x.toString();
|
return x.toString();
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -270,7 +276,7 @@ function BraceExpander() {
|
|||||||
// PARSE TREE OPERATOR (1 of 2)
|
// PARSE TREE OPERATOR (1 of 2)
|
||||||
// Each possible head * each possible tail
|
// Each possible head * each possible tail
|
||||||
function and(args) {
|
function and(args) {
|
||||||
var lng = args.length,
|
let lng = args.length,
|
||||||
head = lng ? args[0] : null,
|
head = lng ? args[0] : null,
|
||||||
lstHead = "string" === typeof head ? (
|
lstHead = "string" === typeof head ? (
|
||||||
[head]
|
[head]
|
||||||
@@ -330,7 +336,7 @@ function BraceExpander() {
|
|||||||
// s -> [s]
|
// s -> [s]
|
||||||
this.expand = function(s) {
|
this.expand = function(s) {
|
||||||
// BRACE EXPRESSION PARSED
|
// BRACE EXPRESSION PARSED
|
||||||
var dctParse = andTree(null, tokens(s))[0];
|
let dctParse = andTree(null, tokens(s))[0];
|
||||||
|
|
||||||
// ABSTRACT SYNTAX TREE LOGGED
|
// ABSTRACT SYNTAX TREE LOGGED
|
||||||
// console.log(pp(dctParse));
|
// console.log(pp(dctParse));
|
||||||
@@ -341,12 +347,76 @@ function BraceExpander() {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/** Pause the execution of an async function until timer elapse.
|
||||||
|
* @Returns a promise that will resolve after the specified timeout.
|
||||||
|
*/
|
||||||
function asyncDelay(timeout) {
|
function asyncDelay(timeout) {
|
||||||
return new Promise(function(resolve, reject) {
|
return new Promise(function(resolve, reject) {
|
||||||
setTimeout(resolve, timeout, true)
|
setTimeout(resolve, timeout, true)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function PromiseSource() {
|
||||||
|
const srcPromise = new Promise((resolve, reject) => {
|
||||||
|
Object.defineProperties(this, {
|
||||||
|
resolve: { value: resolve, writable: false }
|
||||||
|
, reject: { value: reject, writable: false }
|
||||||
|
})
|
||||||
|
})
|
||||||
|
Object.defineProperties(this, {
|
||||||
|
promise: {value: makeQuerablePromise(srcPromise), writable: false}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/** A debounce is a higher-order function, which is a function that returns another function
|
||||||
|
* that, as long as it continues to be invoked, will not be triggered.
|
||||||
|
* The function will be called after it stops being called for N milliseconds.
|
||||||
|
* If `immediate` is passed, trigger the function on the leading edge, instead of the trailing.
|
||||||
|
* @Returns a promise that will resolve to func return value.
|
||||||
|
*/
|
||||||
|
function debounce (func, wait, immediate) {
|
||||||
|
if (typeof wait === "undefined") {
|
||||||
|
wait = 40
|
||||||
|
}
|
||||||
|
if (typeof wait !== "number") {
|
||||||
|
throw new Error("wait is not an number.")
|
||||||
|
}
|
||||||
|
let timeout = null
|
||||||
|
let lastPromiseSrc = new PromiseSource()
|
||||||
|
const applyFn = function(context, args) {
|
||||||
|
let result = undefined
|
||||||
|
try {
|
||||||
|
result = func.apply(context, args)
|
||||||
|
} catch (err) {
|
||||||
|
lastPromiseSrc.reject(err)
|
||||||
|
}
|
||||||
|
if (result instanceof Promise) {
|
||||||
|
result.then(lastPromiseSrc.resolve, lastPromiseSrc.reject)
|
||||||
|
} else {
|
||||||
|
lastPromiseSrc.resolve(result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return function(...args) {
|
||||||
|
const callNow = Boolean(immediate && !timeout)
|
||||||
|
const context = this;
|
||||||
|
if (timeout) {
|
||||||
|
clearTimeout(timeout)
|
||||||
|
}
|
||||||
|
timeout = setTimeout(function () {
|
||||||
|
if (!immediate) {
|
||||||
|
applyFn(context, args)
|
||||||
|
}
|
||||||
|
lastPromiseSrc = new PromiseSource()
|
||||||
|
timeout = null
|
||||||
|
}, wait)
|
||||||
|
if (callNow) {
|
||||||
|
applyFn(context, args)
|
||||||
|
}
|
||||||
|
return lastPromiseSrc.promise
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
function preventNonNumericalInput(e) {
|
function preventNonNumericalInput(e) {
|
||||||
e = e || window.event;
|
e = e || window.event;
|
||||||
let charCode = (typeof e.which == "undefined") ? e.keyCode : e.which;
|
let charCode = (typeof e.which == "undefined") ? e.keyCode : e.which;
|
||||||
@@ -358,3 +428,252 @@ function preventNonNumericalInput(e) {
|
|||||||
e.preventDefault();
|
e.preventDefault();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Returns the global object for the current execution environement.
|
||||||
|
* @Returns window in a browser, global in node and self in a ServiceWorker.
|
||||||
|
* @Notes Allows unit testing and use of the engine outside of a browser.
|
||||||
|
*/
|
||||||
|
function getGlobal() {
|
||||||
|
if (typeof globalThis === 'object') {
|
||||||
|
return globalThis
|
||||||
|
} else if (typeof global === 'object') {
|
||||||
|
return global
|
||||||
|
} else if (typeof self === 'object') {
|
||||||
|
return self
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
return Function('return this')()
|
||||||
|
} catch {
|
||||||
|
// If the Function constructor fails, we're in a browser with eval disabled by CSP headers.
|
||||||
|
return window
|
||||||
|
} // Returns undefined if global can't be found.
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Check if x is an Array or a TypedArray.
|
||||||
|
* @Returns true if x is an Array or a TypedArray, false otherwise.
|
||||||
|
*/
|
||||||
|
function isArrayOrTypedArray(x) {
|
||||||
|
return Boolean(typeof x === 'object' && (Array.isArray(x) || (ArrayBuffer.isView(x) && !(x instanceof DataView))))
|
||||||
|
}
|
||||||
|
|
||||||
|
function makeQuerablePromise(promise) {
|
||||||
|
if (typeof promise !== 'object') {
|
||||||
|
throw new Error('promise is not an object.')
|
||||||
|
}
|
||||||
|
if (!(promise instanceof Promise)) {
|
||||||
|
throw new Error('Argument is not a promise.')
|
||||||
|
}
|
||||||
|
// Don't modify a promise that's been already modified.
|
||||||
|
if ('isResolved' in promise || 'isRejected' in promise || 'isPending' in promise) {
|
||||||
|
return promise
|
||||||
|
}
|
||||||
|
let isPending = true
|
||||||
|
let isRejected = false
|
||||||
|
let rejectReason = undefined
|
||||||
|
let isResolved = false
|
||||||
|
let resolvedValue = undefined
|
||||||
|
const qurPro = promise.then(
|
||||||
|
function(val){
|
||||||
|
isResolved = true
|
||||||
|
isPending = false
|
||||||
|
resolvedValue = val
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
, function(reason) {
|
||||||
|
rejectReason = reason
|
||||||
|
isRejected = true
|
||||||
|
isPending = false
|
||||||
|
throw reason
|
||||||
|
}
|
||||||
|
)
|
||||||
|
Object.defineProperties(qurPro, {
|
||||||
|
'isResolved': {
|
||||||
|
get: () => isResolved
|
||||||
|
}
|
||||||
|
, 'resolvedValue': {
|
||||||
|
get: () => resolvedValue
|
||||||
|
}
|
||||||
|
, 'isPending': {
|
||||||
|
get: () => isPending
|
||||||
|
}
|
||||||
|
, 'isRejected': {
|
||||||
|
get: () => isRejected
|
||||||
|
}
|
||||||
|
, 'rejectReason': {
|
||||||
|
get: () => rejectReason
|
||||||
|
}
|
||||||
|
})
|
||||||
|
return qurPro
|
||||||
|
}
|
||||||
|
|
||||||
|
/* inserts custom html to allow prettifying of inputs */
|
||||||
|
function prettifyInputs(root_element) {
|
||||||
|
root_element.querySelectorAll(`input[type="checkbox"]`).forEach(element => {
|
||||||
|
if (element.style.display === "none") {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var parent = element.parentNode;
|
||||||
|
if (!parent.classList.contains("input-toggle")) {
|
||||||
|
var wrapper = document.createElement("div");
|
||||||
|
wrapper.classList.add("input-toggle");
|
||||||
|
parent.replaceChild(wrapper, element);
|
||||||
|
wrapper.appendChild(element);
|
||||||
|
var label = document.createElement("label");
|
||||||
|
label.htmlFor = element.id;
|
||||||
|
wrapper.appendChild(label);
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
class GenericEventSource {
|
||||||
|
#events = {};
|
||||||
|
#types = []
|
||||||
|
constructor(...eventsTypes) {
|
||||||
|
if (Array.isArray(eventsTypes) && eventsTypes.length === 1 && Array.isArray(eventsTypes[0])) {
|
||||||
|
eventsTypes = eventsTypes[0]
|
||||||
|
}
|
||||||
|
this.#types.push(...eventsTypes)
|
||||||
|
}
|
||||||
|
get eventTypes() {
|
||||||
|
return this.#types
|
||||||
|
}
|
||||||
|
/** Add a new event listener
|
||||||
|
*/
|
||||||
|
addEventListener(name, handler) {
|
||||||
|
if (!this.#types.includes(name)) {
|
||||||
|
throw new Error('Invalid event name.')
|
||||||
|
}
|
||||||
|
if (this.#events.hasOwnProperty(name)) {
|
||||||
|
this.#events[name].push(handler)
|
||||||
|
} else {
|
||||||
|
this.#events[name] = [handler]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/** Remove the event listener
|
||||||
|
*/
|
||||||
|
removeEventListener(name, handler) {
|
||||||
|
if (!this.#events.hasOwnProperty(name)) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
const index = this.#events[name].indexOf(handler)
|
||||||
|
if (index != -1) {
|
||||||
|
this.#events[name].splice(index, 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fireEvent(name, ...args) {
|
||||||
|
if (!this.#types.includes(name)) {
|
||||||
|
throw new Error(`Event ${String(name)} missing from Events.types`)
|
||||||
|
}
|
||||||
|
if (!this.#events.hasOwnProperty(name)) {
|
||||||
|
return Promise.resolve()
|
||||||
|
}
|
||||||
|
if (!args || !args.length) {
|
||||||
|
args = []
|
||||||
|
}
|
||||||
|
const evs = this.#events[name]
|
||||||
|
if (evs.length <= 0) {
|
||||||
|
return Promise.resolve()
|
||||||
|
}
|
||||||
|
return Promise.allSettled(evs.map((callback) => {
|
||||||
|
try {
|
||||||
|
return Promise.resolve(callback.apply(SD, args))
|
||||||
|
} catch (ex) {
|
||||||
|
return Promise.reject(ex)
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
class ServiceContainer {
|
||||||
|
#services = new Map()
|
||||||
|
#singletons = new Map()
|
||||||
|
constructor(...servicesParams) {
|
||||||
|
servicesParams.forEach(this.register.bind(this))
|
||||||
|
}
|
||||||
|
get services () {
|
||||||
|
return this.#services
|
||||||
|
}
|
||||||
|
get singletons() {
|
||||||
|
return this.#singletons
|
||||||
|
}
|
||||||
|
register(params) {
|
||||||
|
if (ServiceContainer.isConstructor(params)) {
|
||||||
|
if (typeof params.name !== 'string') {
|
||||||
|
throw new Error('params.name is not a string.')
|
||||||
|
}
|
||||||
|
params = {name:params.name, definition:params}
|
||||||
|
}
|
||||||
|
if (typeof params !== 'object') {
|
||||||
|
throw new Error('params is not an object.')
|
||||||
|
}
|
||||||
|
[ 'name',
|
||||||
|
'definition',
|
||||||
|
].forEach((key) => {
|
||||||
|
if (!(key in params)) {
|
||||||
|
console.error('Invalid service %o registration.', params)
|
||||||
|
throw new Error(`params.${key} is not defined.`)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
const opts = {definition: params.definition}
|
||||||
|
if ('dependencies' in params) {
|
||||||
|
if (Array.isArray(params.dependencies)) {
|
||||||
|
params.dependencies.forEach((dep) => {
|
||||||
|
if (typeof dep !== 'string') {
|
||||||
|
throw new Error('dependency name is not a string.')
|
||||||
|
}
|
||||||
|
})
|
||||||
|
opts.dependencies = params.dependencies
|
||||||
|
} else {
|
||||||
|
throw new Error('params.dependencies is not an array.')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (params.singleton) {
|
||||||
|
opts.singleton = true
|
||||||
|
}
|
||||||
|
this.#services.set(params.name, opts)
|
||||||
|
return Object.assign({name: params.name}, opts)
|
||||||
|
}
|
||||||
|
get(name) {
|
||||||
|
const ctorInfos = this.#services.get(name)
|
||||||
|
if (!ctorInfos) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if(!ServiceContainer.isConstructor(ctorInfos.definition)) {
|
||||||
|
return ctorInfos.definition
|
||||||
|
}
|
||||||
|
if(!ctorInfos.singleton) {
|
||||||
|
return this._createInstance(ctorInfos)
|
||||||
|
}
|
||||||
|
const singletonInstance = this.#singletons.get(name)
|
||||||
|
if(singletonInstance) {
|
||||||
|
return singletonInstance
|
||||||
|
}
|
||||||
|
const newSingletonInstance = this._createInstance(ctorInfos)
|
||||||
|
this.#singletons.set(name, newSingletonInstance)
|
||||||
|
return newSingletonInstance
|
||||||
|
}
|
||||||
|
|
||||||
|
_getResolvedDependencies(service) {
|
||||||
|
let classDependencies = []
|
||||||
|
if(service.dependencies) {
|
||||||
|
classDependencies = service.dependencies.map(this.get.bind(this))
|
||||||
|
}
|
||||||
|
return classDependencies
|
||||||
|
}
|
||||||
|
|
||||||
|
_createInstance(service) {
|
||||||
|
if (!ServiceContainer.isClass(service.definition)) {
|
||||||
|
// Call as normal function.
|
||||||
|
return service.definition(...this._getResolvedDependencies(service))
|
||||||
|
}
|
||||||
|
// Use new
|
||||||
|
return new service.definition(...this._getResolvedDependencies(service))
|
||||||
|
}
|
||||||
|
|
||||||
|
static isClass(definition) {
|
||||||
|
return typeof definition === 'function' && Boolean(definition.prototype) && definition.prototype.constructor === definition
|
||||||
|
}
|
||||||
|
static isConstructor(definition) {
|
||||||
|
return typeof definition === 'function'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
8
ui/media/manifest.webmanifest
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
{
|
||||||
|
"name": "Stable Diffusion UI",
|
||||||
|
"display": "standalone",
|
||||||
|
"display_override": [
|
||||||
|
"window-controls-overlay"
|
||||||
|
],
|
||||||
|
"theme_color": "#000000"
|
||||||
|
}
|
||||||
28
ui/plugins/ui/Autoscroll.plugin.js
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
(function () {
|
||||||
|
"use strict"
|
||||||
|
|
||||||
|
let autoScroll = document.querySelector("#auto_scroll")
|
||||||
|
|
||||||
|
// observe for changes in the preview pane
|
||||||
|
var observer = new MutationObserver(function (mutations) {
|
||||||
|
mutations.forEach(function (mutation) {
|
||||||
|
if (mutation.target.className == 'img-batch') {
|
||||||
|
Autoscroll(mutation.target)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
observer.observe(document.getElementById('preview'), {
|
||||||
|
childList: true,
|
||||||
|
subtree: true
|
||||||
|
})
|
||||||
|
|
||||||
|
function Autoscroll(target) {
|
||||||
|
if (autoScroll.checked && target !== null) {
|
||||||
|
const img = target.querySelector('img')
|
||||||
|
img.addEventListener('load', function() {
|
||||||
|
img.closest('.imageTaskContainer').scrollIntoView()
|
||||||
|
}, { once: true })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})()
|
||||||
95
ui/plugins/ui/Modifiers-dnd.plugin.js
Normal file
@@ -0,0 +1,95 @@
|
|||||||
|
(function () { "use strict"
|
||||||
|
if (typeof editorModifierTagsList !== 'object') {
|
||||||
|
console.error('editorModifierTagsList missing...')
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
const styleSheet = document.createElement("style");
|
||||||
|
styleSheet.textContent = `
|
||||||
|
.modifier-card-tiny.drag-sort-active {
|
||||||
|
background: transparent;
|
||||||
|
border: 2px dashed white;
|
||||||
|
opacity:0.2;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
document.head.appendChild(styleSheet);
|
||||||
|
|
||||||
|
// observe for changes in tag list
|
||||||
|
const observer = new MutationObserver(function (mutations) {
|
||||||
|
// mutations.forEach(function (mutation) {
|
||||||
|
if (editorModifierTagsList.childNodes.length > 0) {
|
||||||
|
ModifierDragAndDrop(editorModifierTagsList)
|
||||||
|
}
|
||||||
|
// })
|
||||||
|
})
|
||||||
|
|
||||||
|
observer.observe(editorModifierTagsList, {
|
||||||
|
childList: true
|
||||||
|
})
|
||||||
|
|
||||||
|
let current
|
||||||
|
function ModifierDragAndDrop(target) {
|
||||||
|
let overlays = document.querySelector('#editor-inputs-tags-list').querySelectorAll('.modifier-card-overlay')
|
||||||
|
overlays.forEach (i => {
|
||||||
|
i.parentElement.draggable = true;
|
||||||
|
|
||||||
|
i.parentElement.ondragstart = (e) => {
|
||||||
|
current = i
|
||||||
|
i.parentElement.getElementsByClassName('modifier-card-image-overlay')[0].innerText = ''
|
||||||
|
i.parentElement.draggable = true
|
||||||
|
i.parentElement.classList.add('drag-sort-active')
|
||||||
|
for(let item of document.querySelector('#editor-inputs-tags-list').getElementsByClassName('modifier-card-image-overlay')) {
|
||||||
|
if (item.parentElement.parentElement.getElementsByClassName('modifier-card-overlay')[0] != current) {
|
||||||
|
item.parentElement.parentElement.getElementsByClassName('modifier-card-image-overlay')[0].style.opacity = 0
|
||||||
|
if(item.parentElement.getElementsByClassName('modifier-card-image').length > 0) {
|
||||||
|
item.parentElement.getElementsByClassName('modifier-card-image')[0].style.filter = 'none'
|
||||||
|
}
|
||||||
|
item.parentElement.parentElement.style.transform = 'none'
|
||||||
|
item.parentElement.parentElement.style.boxShadow = 'none'
|
||||||
|
}
|
||||||
|
item.innerText = ''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
i.ondragenter = (e) => {
|
||||||
|
e.preventDefault()
|
||||||
|
if (i != current) {
|
||||||
|
let currentPos = 0, droppedPos = 0;
|
||||||
|
for (let it = 0; it < overlays.length; it++) {
|
||||||
|
if (current == overlays[it]) { currentPos = it; }
|
||||||
|
if (i == overlays[it]) { droppedPos = it; }
|
||||||
|
}
|
||||||
|
|
||||||
|
if (i.parentElement != current.parentElement) {
|
||||||
|
let currentPos = 0, droppedPos = 0
|
||||||
|
for (let it = 0; it < overlays.length; it++) {
|
||||||
|
if (current == overlays[it]) { currentPos = it }
|
||||||
|
if (i == overlays[it]) { droppedPos = it }
|
||||||
|
}
|
||||||
|
if (currentPos < droppedPos) {
|
||||||
|
current = i.parentElement.parentNode.insertBefore(current.parentElement, i.parentElement.nextSibling).getElementsByClassName('modifier-card-overlay')[0]
|
||||||
|
} else {
|
||||||
|
current = i.parentElement.parentNode.insertBefore(current.parentElement, i.parentElement).getElementsByClassName('modifier-card-overlay')[0]
|
||||||
|
}
|
||||||
|
// update activeTags
|
||||||
|
const tag = activeTags.splice(currentPos, 1)
|
||||||
|
activeTags.splice(droppedPos, 0, tag[0])
|
||||||
|
document.dispatchEvent(new Event('refreshImageModifiers'))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
i.ondragover = (e) => {
|
||||||
|
e.preventDefault()
|
||||||
|
}
|
||||||
|
|
||||||
|
i.parentElement.ondragend = (e) => {
|
||||||
|
i.parentElement.classList.remove('drag-sort-active')
|
||||||
|
for(let item of document.querySelector('#editor-inputs-tags-list').getElementsByClassName('modifier-card-image-overlay')) {
|
||||||
|
item.style.opacity = ''
|
||||||
|
item.innerText = '-'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})()
|
||||||
66
ui/plugins/ui/Modifiers-wheel.plugin.js
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
(function () { "use strict"
|
||||||
|
if (typeof editorModifierTagsList !== 'object') {
|
||||||
|
console.error('editorModifierTagsList missing...')
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// observe for changes in tag list
|
||||||
|
const observer = new MutationObserver(function (mutations) {
|
||||||
|
// mutations.forEach(function (mutation) {
|
||||||
|
if (editorModifierTagsList.childNodes.length > 0) {
|
||||||
|
ModifierMouseWheel(editorModifierTagsList)
|
||||||
|
}
|
||||||
|
// })
|
||||||
|
})
|
||||||
|
|
||||||
|
observer.observe(editorModifierTagsList, {
|
||||||
|
childList: true
|
||||||
|
})
|
||||||
|
|
||||||
|
function ModifierMouseWheel(target) {
|
||||||
|
let overlays = document.querySelector('#editor-inputs-tags-list').querySelectorAll('.modifier-card-overlay')
|
||||||
|
overlays.forEach (i => {
|
||||||
|
i.onwheel = (e) => {
|
||||||
|
if (e.ctrlKey == true) {
|
||||||
|
e.preventDefault()
|
||||||
|
|
||||||
|
const delta = Math.sign(event.deltaY)
|
||||||
|
let s = i.parentElement.getElementsByClassName('modifier-card-label')[0].getElementsByTagName("p")[0].innerText
|
||||||
|
if (delta < 0) {
|
||||||
|
// wheel scrolling up
|
||||||
|
if (s.substring(0, 1) == '[' && s.substring(s.length-1) == ']') {
|
||||||
|
s = s.substring(1, s.length - 1)
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if (s.substring(0, 10) !== '('.repeat(10) && s.substring(s.length-10) !== ')'.repeat(10)) {
|
||||||
|
s = '(' + s + ')'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else{
|
||||||
|
// wheel scrolling down
|
||||||
|
if (s.substring(0, 1) == '(' && s.substring(s.length-1) == ')') {
|
||||||
|
s = s.substring(1, s.length - 1)
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if (s.substring(0, 10) !== '['.repeat(10) && s.substring(s.length-10) !== ']'.repeat(10)) {
|
||||||
|
s = '[' + s + ']'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
i.parentElement.getElementsByClassName('modifier-card-label')[0].getElementsByTagName("p")[0].innerText = s
|
||||||
|
// update activeTags
|
||||||
|
for (let it = 0; it < overlays.length; it++) {
|
||||||
|
if (i == overlays[it]) {
|
||||||
|
activeTags[it].name = s
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
document.dispatchEvent(new Event('refreshImageModifiers'))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})()
|
||||||
@@ -0,0 +1,3 @@
|
|||||||
|
Custom plugins in this folder will be shipped to all the users by default.
|
||||||
|
|
||||||
|
This allows UI features to be built as plugins (testing our Plugins API, and keeping our core lean and modular).
|
||||||
29
ui/plugins/ui/SpecRunner.html
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<meta charset="utf-8">
|
||||||
|
<title>Jasmine Spec Runner v4.5.0</title>
|
||||||
|
|
||||||
|
<link rel="shortcut icon" type="image/png" href="./jasmine/jasmine_favicon.png">
|
||||||
|
<link rel="stylesheet" href="./jasmine/jasmine.css">
|
||||||
|
|
||||||
|
<script src="./jasmine/jasmine.js"></script>
|
||||||
|
<script src="./jasmine/jasmine-html.js"></script>
|
||||||
|
<script src="./jasmine/boot0.js"></script>
|
||||||
|
<!-- optional: include a file here that configures the Jasmine env -->
|
||||||
|
<script src="./jasmine/boot1.js"></script>
|
||||||
|
|
||||||
|
<!-- include source files here... -->
|
||||||
|
<script src="/media/js/utils.js?v=4"></script>
|
||||||
|
<script src="/media/js/engine.js?v=1"></script>
|
||||||
|
<!-- <script src="./engine.js?v=1"></script> -->
|
||||||
|
<script src="/media/js/plugins.js?v=1"></script>
|
||||||
|
|
||||||
|
<!-- include spec files here... -->
|
||||||
|
<script src="./jasmineSpec.js"></script>
|
||||||
|
|
||||||
|
</head>
|
||||||
|
|
||||||
|
<body>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
31
ui/plugins/ui/custom-modifiers.plugin.js
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
(function() {
|
||||||
|
PLUGINS['MODIFIERS_LOAD'].push({
|
||||||
|
loader: function() {
|
||||||
|
let customModifiers = localStorage.getItem(CUSTOM_MODIFIERS_KEY, '')
|
||||||
|
customModifiersTextBox.value = customModifiers
|
||||||
|
|
||||||
|
if (customModifiersGroupElement !== undefined) {
|
||||||
|
customModifiersGroupElement.remove()
|
||||||
|
}
|
||||||
|
|
||||||
|
if (customModifiers && customModifiers.trim() !== '') {
|
||||||
|
customModifiers = customModifiers.split('\n')
|
||||||
|
customModifiers = customModifiers.filter(m => m.trim() !== '')
|
||||||
|
customModifiers = customModifiers.map(function(m) {
|
||||||
|
return {
|
||||||
|
"modifier": m
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
let customGroup = {
|
||||||
|
'category': 'Custom Modifiers',
|
||||||
|
'modifiers': customModifiers
|
||||||
|
}
|
||||||
|
|
||||||
|
customModifiersGroupElement = createModifierGroup(customGroup, true)
|
||||||
|
|
||||||
|
createCollapsibles(customModifiersGroupElement)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})()
|
||||||
64
ui/plugins/ui/jasmine/boot0.js
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
/*
|
||||||
|
Copyright (c) 2008-2022 Pivotal Labs
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining
|
||||||
|
a copy of this software and associated documentation files (the
|
||||||
|
"Software"), to deal in the Software without restriction, including
|
||||||
|
without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
distribute, sublicense, and/or sell copies of the Software, and to
|
||||||
|
permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be
|
||||||
|
included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
|
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||||
|
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||||
|
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||||
|
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
This file starts the process of "booting" Jasmine. It initializes Jasmine,
|
||||||
|
makes its globals available, and creates the env. This file should be loaded
|
||||||
|
after `jasmine.js` and `jasmine_html.js`, but before `boot1.js` or any project
|
||||||
|
source files or spec files are loaded.
|
||||||
|
*/
|
||||||
|
(function() {
|
||||||
|
const jasmineRequire = window.jasmineRequire || require('./jasmine.js');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ## Require & Instantiate
|
||||||
|
*
|
||||||
|
* Require Jasmine's core files. Specifically, this requires and attaches all of Jasmine's code to the `jasmine` reference.
|
||||||
|
*/
|
||||||
|
const jasmine = jasmineRequire.core(jasmineRequire),
|
||||||
|
global = jasmine.getGlobal();
|
||||||
|
global.jasmine = jasmine;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Since this is being run in a browser and the results should populate to an HTML page, require the HTML-specific Jasmine code, injecting the same reference.
|
||||||
|
*/
|
||||||
|
jasmineRequire.html(jasmine);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create the Jasmine environment. This is used to run all specs in a project.
|
||||||
|
*/
|
||||||
|
const env = jasmine.getEnv();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ## The Global Interface
|
||||||
|
*
|
||||||
|
* Build up the functions that will be exposed as the Jasmine public interface. A project can customize, rename or alias any of these functions as desired, provided the implementation remains unchanged.
|
||||||
|
*/
|
||||||
|
const jasmineInterface = jasmineRequire.interface(jasmine, env);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add all of the Jasmine global/public interface to the global scope, so a project can use the public interface directly. For example, calling `describe` in specs instead of `jasmine.getEnv().describe`.
|
||||||
|
*/
|
||||||
|
for (const property in jasmineInterface) {
|
||||||
|
global[property] = jasmineInterface[property];
|
||||||
|
}
|
||||||
|
})();
|
||||||
132
ui/plugins/ui/jasmine/boot1.js
Normal file
@@ -0,0 +1,132 @@
|
|||||||
|
/*
|
||||||
|
Copyright (c) 2008-2022 Pivotal Labs
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining
|
||||||
|
a copy of this software and associated documentation files (the
|
||||||
|
"Software"), to deal in the Software without restriction, including
|
||||||
|
without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
distribute, sublicense, and/or sell copies of the Software, and to
|
||||||
|
permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be
|
||||||
|
included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
|
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||||
|
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||||
|
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||||
|
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
This file finishes 'booting' Jasmine, performing all of the necessary
|
||||||
|
initialization before executing the loaded environment and all of a project's
|
||||||
|
specs. This file should be loaded after `boot0.js` but before any project
|
||||||
|
source files or spec files are loaded. Thus this file can also be used to
|
||||||
|
customize Jasmine for a project.
|
||||||
|
|
||||||
|
If a project is using Jasmine via the standalone distribution, this file can
|
||||||
|
be customized directly. If you only wish to configure the Jasmine env, you
|
||||||
|
can load another file that calls `jasmine.getEnv().configure({...})`
|
||||||
|
after `boot0.js` is loaded and before this file is loaded.
|
||||||
|
*/
|
||||||
|
|
||||||
|
(function() {
|
||||||
|
const env = jasmine.getEnv();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ## Runner Parameters
|
||||||
|
*
|
||||||
|
* More browser specific code - wrap the query string in an object and to allow for getting/setting parameters from the runner user interface.
|
||||||
|
*/
|
||||||
|
|
||||||
|
const queryString = new jasmine.QueryString({
|
||||||
|
getWindowLocation: function() {
|
||||||
|
return window.location;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
const filterSpecs = !!queryString.getParam('spec');
|
||||||
|
|
||||||
|
const config = {
|
||||||
|
stopOnSpecFailure: queryString.getParam('stopOnSpecFailure'),
|
||||||
|
stopSpecOnExpectationFailure: queryString.getParam(
|
||||||
|
'stopSpecOnExpectationFailure'
|
||||||
|
),
|
||||||
|
hideDisabled: queryString.getParam('hideDisabled')
|
||||||
|
};
|
||||||
|
|
||||||
|
const random = queryString.getParam('random');
|
||||||
|
|
||||||
|
if (random !== undefined && random !== '') {
|
||||||
|
config.random = random;
|
||||||
|
}
|
||||||
|
|
||||||
|
const seed = queryString.getParam('seed');
|
||||||
|
if (seed) {
|
||||||
|
config.seed = seed;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ## Reporters
|
||||||
|
* The `HtmlReporter` builds all of the HTML UI for the runner page. This reporter paints the dots, stars, and x's for specs, as well as all spec names and all failures (if any).
|
||||||
|
*/
|
||||||
|
const htmlReporter = new jasmine.HtmlReporter({
|
||||||
|
env: env,
|
||||||
|
navigateWithNewParam: function(key, value) {
|
||||||
|
return queryString.navigateWithNewParam(key, value);
|
||||||
|
},
|
||||||
|
addToExistingQueryString: function(key, value) {
|
||||||
|
return queryString.fullStringWithNewParam(key, value);
|
||||||
|
},
|
||||||
|
getContainer: function() {
|
||||||
|
return document.body;
|
||||||
|
},
|
||||||
|
createElement: function() {
|
||||||
|
return document.createElement.apply(document, arguments);
|
||||||
|
},
|
||||||
|
createTextNode: function() {
|
||||||
|
return document.createTextNode.apply(document, arguments);
|
||||||
|
},
|
||||||
|
timer: new jasmine.Timer(),
|
||||||
|
filterSpecs: filterSpecs
|
||||||
|
});
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The `jsApiReporter` also receives spec results, and is used by any environment that needs to extract the results from JavaScript.
|
||||||
|
*/
|
||||||
|
env.addReporter(jsApiReporter);
|
||||||
|
env.addReporter(htmlReporter);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Filter which specs will be run by matching the start of the full name against the `spec` query param.
|
||||||
|
*/
|
||||||
|
const specFilter = new jasmine.HtmlSpecFilter({
|
||||||
|
filterString: function() {
|
||||||
|
return queryString.getParam('spec');
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
config.specFilter = function(spec) {
|
||||||
|
return specFilter.matches(spec.getFullName());
|
||||||
|
};
|
||||||
|
|
||||||
|
env.configure(config);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ## Execution
|
||||||
|
*
|
||||||
|
* Replace the browser window's `onload`, ensure it's called, and then run all of the loaded specs. This includes initializing the `HtmlReporter` instance and then executing the loaded Jasmine environment. All of this will happen after all of the specs are loaded.
|
||||||
|
*/
|
||||||
|
const currentWindowOnload = window.onload;
|
||||||
|
|
||||||
|
window.onload = function() {
|
||||||
|
if (currentWindowOnload) {
|
||||||
|
currentWindowOnload();
|
||||||
|
}
|
||||||
|
htmlReporter.initialize();
|
||||||
|
env.execute();
|
||||||
|
};
|
||||||
|
})();
|
||||||
964
ui/plugins/ui/jasmine/jasmine-html.js
Normal file
@@ -0,0 +1,964 @@
|
|||||||
|
/*
|
||||||
|
Copyright (c) 2008-2022 Pivotal Labs
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining
|
||||||
|
a copy of this software and associated documentation files (the
|
||||||
|
"Software"), to deal in the Software without restriction, including
|
||||||
|
without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
distribute, sublicense, and/or sell copies of the Software, and to
|
||||||
|
permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be
|
||||||
|
included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
|
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||||
|
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||||
|
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||||
|
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*/
|
||||||
|
// eslint-disable-next-line no-var
|
||||||
|
var jasmineRequire = window.jasmineRequire || require('./jasmine.js');
|
||||||
|
|
||||||
|
jasmineRequire.html = function(j$) {
|
||||||
|
j$.ResultsNode = jasmineRequire.ResultsNode();
|
||||||
|
j$.HtmlReporter = jasmineRequire.HtmlReporter(j$);
|
||||||
|
j$.QueryString = jasmineRequire.QueryString();
|
||||||
|
j$.HtmlSpecFilter = jasmineRequire.HtmlSpecFilter();
|
||||||
|
};
|
||||||
|
|
||||||
|
jasmineRequire.HtmlReporter = function(j$) {
|
||||||
|
function ResultsStateBuilder() {
|
||||||
|
this.topResults = new j$.ResultsNode({}, '', null);
|
||||||
|
this.currentParent = this.topResults;
|
||||||
|
this.specsExecuted = 0;
|
||||||
|
this.failureCount = 0;
|
||||||
|
this.pendingSpecCount = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
ResultsStateBuilder.prototype.suiteStarted = function(result) {
|
||||||
|
this.currentParent.addChild(result, 'suite');
|
||||||
|
this.currentParent = this.currentParent.last();
|
||||||
|
};
|
||||||
|
|
||||||
|
ResultsStateBuilder.prototype.suiteDone = function(result) {
|
||||||
|
this.currentParent.updateResult(result);
|
||||||
|
if (this.currentParent !== this.topResults) {
|
||||||
|
this.currentParent = this.currentParent.parent;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (result.status === 'failed') {
|
||||||
|
this.failureCount++;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
ResultsStateBuilder.prototype.specStarted = function(result) {};
|
||||||
|
|
||||||
|
ResultsStateBuilder.prototype.specDone = function(result) {
|
||||||
|
this.currentParent.addChild(result, 'spec');
|
||||||
|
|
||||||
|
if (result.status !== 'excluded') {
|
||||||
|
this.specsExecuted++;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (result.status === 'failed') {
|
||||||
|
this.failureCount++;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (result.status == 'pending') {
|
||||||
|
this.pendingSpecCount++;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
ResultsStateBuilder.prototype.jasmineDone = function(result) {
|
||||||
|
if (result.failedExpectations) {
|
||||||
|
this.failureCount += result.failedExpectations.length;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
function HtmlReporter(options) {
|
||||||
|
function config() {
|
||||||
|
return (options.env && options.env.configuration()) || {};
|
||||||
|
}
|
||||||
|
|
||||||
|
const getContainer = options.getContainer;
|
||||||
|
const createElement = options.createElement;
|
||||||
|
const createTextNode = options.createTextNode;
|
||||||
|
const navigateWithNewParam = options.navigateWithNewParam || function() {};
|
||||||
|
const addToExistingQueryString =
|
||||||
|
options.addToExistingQueryString || defaultQueryString;
|
||||||
|
const filterSpecs = options.filterSpecs;
|
||||||
|
let htmlReporterMain;
|
||||||
|
let symbols;
|
||||||
|
const deprecationWarnings = [];
|
||||||
|
const failures = [];
|
||||||
|
|
||||||
|
this.initialize = function() {
|
||||||
|
clearPrior();
|
||||||
|
htmlReporterMain = createDom(
|
||||||
|
'div',
|
||||||
|
{ className: 'jasmine_html-reporter' },
|
||||||
|
createDom(
|
||||||
|
'div',
|
||||||
|
{ className: 'jasmine-banner' },
|
||||||
|
createDom('a', {
|
||||||
|
className: 'jasmine-title',
|
||||||
|
href: 'http://jasmine.github.io/',
|
||||||
|
target: '_blank'
|
||||||
|
}),
|
||||||
|
createDom('span', { className: 'jasmine-version' }, j$.version)
|
||||||
|
),
|
||||||
|
createDom('ul', { className: 'jasmine-symbol-summary' }),
|
||||||
|
createDom('div', { className: 'jasmine-alert' }),
|
||||||
|
createDom(
|
||||||
|
'div',
|
||||||
|
{ className: 'jasmine-results' },
|
||||||
|
createDom('div', { className: 'jasmine-failures' })
|
||||||
|
)
|
||||||
|
);
|
||||||
|
getContainer().appendChild(htmlReporterMain);
|
||||||
|
};
|
||||||
|
|
||||||
|
let totalSpecsDefined;
|
||||||
|
this.jasmineStarted = function(options) {
|
||||||
|
totalSpecsDefined = options.totalSpecsDefined || 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
const summary = createDom('div', { className: 'jasmine-summary' });
|
||||||
|
|
||||||
|
const stateBuilder = new ResultsStateBuilder();
|
||||||
|
|
||||||
|
this.suiteStarted = function(result) {
|
||||||
|
stateBuilder.suiteStarted(result);
|
||||||
|
};
|
||||||
|
|
||||||
|
this.suiteDone = function(result) {
|
||||||
|
stateBuilder.suiteDone(result);
|
||||||
|
|
||||||
|
if (result.status === 'failed') {
|
||||||
|
failures.push(failureDom(result));
|
||||||
|
}
|
||||||
|
addDeprecationWarnings(result, 'suite');
|
||||||
|
};
|
||||||
|
|
||||||
|
this.specStarted = function(result) {
|
||||||
|
stateBuilder.specStarted(result);
|
||||||
|
};
|
||||||
|
|
||||||
|
this.specDone = function(result) {
|
||||||
|
stateBuilder.specDone(result);
|
||||||
|
|
||||||
|
if (noExpectations(result)) {
|
||||||
|
const noSpecMsg = "Spec '" + result.fullName + "' has no expectations.";
|
||||||
|
if (result.status === 'failed') {
|
||||||
|
console.error(noSpecMsg);
|
||||||
|
} else {
|
||||||
|
console.warn(noSpecMsg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!symbols) {
|
||||||
|
symbols = find('.jasmine-symbol-summary');
|
||||||
|
}
|
||||||
|
|
||||||
|
symbols.appendChild(
|
||||||
|
createDom('li', {
|
||||||
|
className: this.displaySpecInCorrectFormat(result),
|
||||||
|
id: 'spec_' + result.id,
|
||||||
|
title: result.fullName
|
||||||
|
})
|
||||||
|
);
|
||||||
|
|
||||||
|
if (result.status === 'failed') {
|
||||||
|
failures.push(failureDom(result));
|
||||||
|
}
|
||||||
|
|
||||||
|
addDeprecationWarnings(result, 'spec');
|
||||||
|
};
|
||||||
|
|
||||||
|
this.displaySpecInCorrectFormat = function(result) {
|
||||||
|
return noExpectations(result) && result.status === 'passed'
|
||||||
|
? 'jasmine-empty'
|
||||||
|
: this.resultStatus(result.status);
|
||||||
|
};
|
||||||
|
|
||||||
|
this.resultStatus = function(status) {
|
||||||
|
if (status === 'excluded') {
|
||||||
|
return config().hideDisabled
|
||||||
|
? 'jasmine-excluded-no-display'
|
||||||
|
: 'jasmine-excluded';
|
||||||
|
}
|
||||||
|
return 'jasmine-' + status;
|
||||||
|
};
|
||||||
|
|
||||||
|
this.jasmineDone = function(doneResult) {
|
||||||
|
stateBuilder.jasmineDone(doneResult);
|
||||||
|
const banner = find('.jasmine-banner');
|
||||||
|
const alert = find('.jasmine-alert');
|
||||||
|
const order = doneResult && doneResult.order;
|
||||||
|
|
||||||
|
alert.appendChild(
|
||||||
|
createDom(
|
||||||
|
'span',
|
||||||
|
{ className: 'jasmine-duration' },
|
||||||
|
'finished in ' + doneResult.totalTime / 1000 + 's'
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
banner.appendChild(optionsMenu(config()));
|
||||||
|
|
||||||
|
if (stateBuilder.specsExecuted < totalSpecsDefined) {
|
||||||
|
const skippedMessage =
|
||||||
|
'Ran ' +
|
||||||
|
stateBuilder.specsExecuted +
|
||||||
|
' of ' +
|
||||||
|
totalSpecsDefined +
|
||||||
|
' specs - run all';
|
||||||
|
// include window.location.pathname to fix issue with karma-jasmine-html-reporter in angular: see https://github.com/jasmine/jasmine/issues/1906
|
||||||
|
const skippedLink =
|
||||||
|
(window.location.pathname || '') +
|
||||||
|
addToExistingQueryString('spec', '');
|
||||||
|
alert.appendChild(
|
||||||
|
createDom(
|
||||||
|
'span',
|
||||||
|
{ className: 'jasmine-bar jasmine-skipped' },
|
||||||
|
createDom(
|
||||||
|
'a',
|
||||||
|
{ href: skippedLink, title: 'Run all specs' },
|
||||||
|
skippedMessage
|
||||||
|
)
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
let statusBarMessage = '';
|
||||||
|
let statusBarClassName = 'jasmine-overall-result jasmine-bar ';
|
||||||
|
const globalFailures =
|
||||||
|
(doneResult && doneResult.failedExpectations) || [];
|
||||||
|
const failed = stateBuilder.failureCount + globalFailures.length > 0;
|
||||||
|
|
||||||
|
if (totalSpecsDefined > 0 || failed) {
|
||||||
|
statusBarMessage +=
|
||||||
|
pluralize('spec', stateBuilder.specsExecuted) +
|
||||||
|
', ' +
|
||||||
|
pluralize('failure', stateBuilder.failureCount);
|
||||||
|
if (stateBuilder.pendingSpecCount) {
|
||||||
|
statusBarMessage +=
|
||||||
|
', ' + pluralize('pending spec', stateBuilder.pendingSpecCount);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (doneResult.overallStatus === 'passed') {
|
||||||
|
statusBarClassName += ' jasmine-passed ';
|
||||||
|
} else if (doneResult.overallStatus === 'incomplete') {
|
||||||
|
statusBarClassName += ' jasmine-incomplete ';
|
||||||
|
statusBarMessage =
|
||||||
|
'Incomplete: ' +
|
||||||
|
doneResult.incompleteReason +
|
||||||
|
', ' +
|
||||||
|
statusBarMessage;
|
||||||
|
} else {
|
||||||
|
statusBarClassName += ' jasmine-failed ';
|
||||||
|
}
|
||||||
|
|
||||||
|
let seedBar;
|
||||||
|
if (order && order.random) {
|
||||||
|
seedBar = createDom(
|
||||||
|
'span',
|
||||||
|
{ className: 'jasmine-seed-bar' },
|
||||||
|
', randomized with seed ',
|
||||||
|
createDom(
|
||||||
|
'a',
|
||||||
|
{
|
||||||
|
title: 'randomized with seed ' + order.seed,
|
||||||
|
href: seedHref(order.seed)
|
||||||
|
},
|
||||||
|
order.seed
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
alert.appendChild(
|
||||||
|
createDom(
|
||||||
|
'span',
|
||||||
|
{ className: statusBarClassName },
|
||||||
|
statusBarMessage,
|
||||||
|
seedBar
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
const errorBarClassName = 'jasmine-bar jasmine-errored';
|
||||||
|
const afterAllMessagePrefix = 'AfterAll ';
|
||||||
|
|
||||||
|
for (let i = 0; i < globalFailures.length; i++) {
|
||||||
|
alert.appendChild(
|
||||||
|
createDom(
|
||||||
|
'span',
|
||||||
|
{ className: errorBarClassName },
|
||||||
|
globalFailureMessage(globalFailures[i])
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
function globalFailureMessage(failure) {
|
||||||
|
if (failure.globalErrorType === 'load') {
|
||||||
|
const prefix = 'Error during loading: ' + failure.message;
|
||||||
|
|
||||||
|
if (failure.filename) {
|
||||||
|
return (
|
||||||
|
prefix + ' in ' + failure.filename + ' line ' + failure.lineno
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
return prefix;
|
||||||
|
}
|
||||||
|
} else if (failure.globalErrorType === 'afterAll') {
|
||||||
|
return afterAllMessagePrefix + failure.message;
|
||||||
|
} else {
|
||||||
|
return failure.message;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
addDeprecationWarnings(doneResult);
|
||||||
|
|
||||||
|
for (let i = 0; i < deprecationWarnings.length; i++) {
|
||||||
|
const children = [];
|
||||||
|
let context;
|
||||||
|
|
||||||
|
switch (deprecationWarnings[i].runnableType) {
|
||||||
|
case 'spec':
|
||||||
|
context = '(in spec: ' + deprecationWarnings[i].runnableName + ')';
|
||||||
|
break;
|
||||||
|
case 'suite':
|
||||||
|
context = '(in suite: ' + deprecationWarnings[i].runnableName + ')';
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
context = '';
|
||||||
|
}
|
||||||
|
|
||||||
|
deprecationWarnings[i].message.split('\n').forEach(function(line) {
|
||||||
|
children.push(line);
|
||||||
|
children.push(createDom('br'));
|
||||||
|
});
|
||||||
|
|
||||||
|
children[0] = 'DEPRECATION: ' + children[0];
|
||||||
|
children.push(context);
|
||||||
|
|
||||||
|
if (deprecationWarnings[i].stack) {
|
||||||
|
children.push(createExpander(deprecationWarnings[i].stack));
|
||||||
|
}
|
||||||
|
|
||||||
|
alert.appendChild(
|
||||||
|
createDom(
|
||||||
|
'span',
|
||||||
|
{ className: 'jasmine-bar jasmine-warning' },
|
||||||
|
children
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
const results = find('.jasmine-results');
|
||||||
|
results.appendChild(summary);
|
||||||
|
|
||||||
|
summaryList(stateBuilder.topResults, summary);
|
||||||
|
|
||||||
|
if (failures.length) {
|
||||||
|
alert.appendChild(
|
||||||
|
createDom(
|
||||||
|
'span',
|
||||||
|
{ className: 'jasmine-menu jasmine-bar jasmine-spec-list' },
|
||||||
|
createDom('span', {}, 'Spec List | '),
|
||||||
|
createDom(
|
||||||
|
'a',
|
||||||
|
{ className: 'jasmine-failures-menu', href: '#' },
|
||||||
|
'Failures'
|
||||||
|
)
|
||||||
|
)
|
||||||
|
);
|
||||||
|
alert.appendChild(
|
||||||
|
createDom(
|
||||||
|
'span',
|
||||||
|
{ className: 'jasmine-menu jasmine-bar jasmine-failure-list' },
|
||||||
|
createDom(
|
||||||
|
'a',
|
||||||
|
{ className: 'jasmine-spec-list-menu', href: '#' },
|
||||||
|
'Spec List'
|
||||||
|
),
|
||||||
|
createDom('span', {}, ' | Failures ')
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
find('.jasmine-failures-menu').onclick = function() {
|
||||||
|
setMenuModeTo('jasmine-failure-list');
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
find('.jasmine-spec-list-menu').onclick = function() {
|
||||||
|
setMenuModeTo('jasmine-spec-list');
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
|
||||||
|
setMenuModeTo('jasmine-failure-list');
|
||||||
|
|
||||||
|
const failureNode = find('.jasmine-failures');
|
||||||
|
for (let i = 0; i < failures.length; i++) {
|
||||||
|
failureNode.appendChild(failures[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
return this;
|
||||||
|
|
||||||
|
function failureDom(result) {
|
||||||
|
const failure = createDom(
|
||||||
|
'div',
|
||||||
|
{ className: 'jasmine-spec-detail jasmine-failed' },
|
||||||
|
failureDescription(result, stateBuilder.currentParent),
|
||||||
|
createDom('div', { className: 'jasmine-messages' })
|
||||||
|
);
|
||||||
|
const messages = failure.childNodes[1];
|
||||||
|
|
||||||
|
for (let i = 0; i < result.failedExpectations.length; i++) {
|
||||||
|
const expectation = result.failedExpectations[i];
|
||||||
|
messages.appendChild(
|
||||||
|
createDom(
|
||||||
|
'div',
|
||||||
|
{ className: 'jasmine-result-message' },
|
||||||
|
expectation.message
|
||||||
|
)
|
||||||
|
);
|
||||||
|
messages.appendChild(
|
||||||
|
createDom(
|
||||||
|
'div',
|
||||||
|
{ className: 'jasmine-stack-trace' },
|
||||||
|
expectation.stack
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (result.failedExpectations.length === 0) {
|
||||||
|
messages.appendChild(
|
||||||
|
createDom(
|
||||||
|
'div',
|
||||||
|
{ className: 'jasmine-result-message' },
|
||||||
|
'Spec has no expectations'
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (result.debugLogs) {
|
||||||
|
messages.appendChild(debugLogTable(result.debugLogs));
|
||||||
|
}
|
||||||
|
|
||||||
|
return failure;
|
||||||
|
}
|
||||||
|
|
||||||
|
function debugLogTable(debugLogs) {
|
||||||
|
const tbody = createDom('tbody');
|
||||||
|
|
||||||
|
debugLogs.forEach(function(entry) {
|
||||||
|
tbody.appendChild(
|
||||||
|
createDom(
|
||||||
|
'tr',
|
||||||
|
{},
|
||||||
|
createDom('td', {}, entry.timestamp.toString()),
|
||||||
|
createDom('td', {}, entry.message)
|
||||||
|
)
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
return createDom(
|
||||||
|
'div',
|
||||||
|
{ className: 'jasmine-debug-log' },
|
||||||
|
createDom(
|
||||||
|
'div',
|
||||||
|
{ className: 'jasmine-debug-log-header' },
|
||||||
|
'Debug logs'
|
||||||
|
),
|
||||||
|
createDom(
|
||||||
|
'table',
|
||||||
|
{},
|
||||||
|
createDom(
|
||||||
|
'thead',
|
||||||
|
{},
|
||||||
|
createDom(
|
||||||
|
'tr',
|
||||||
|
{},
|
||||||
|
createDom('th', {}, 'Time (ms)'),
|
||||||
|
createDom('th', {}, 'Message')
|
||||||
|
)
|
||||||
|
),
|
||||||
|
tbody
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
function summaryList(resultsTree, domParent) {
|
||||||
|
let specListNode;
|
||||||
|
for (let i = 0; i < resultsTree.children.length; i++) {
|
||||||
|
const resultNode = resultsTree.children[i];
|
||||||
|
if (filterSpecs && !hasActiveSpec(resultNode)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (resultNode.type === 'suite') {
|
||||||
|
const suiteListNode = createDom(
|
||||||
|
'ul',
|
||||||
|
{ className: 'jasmine-suite', id: 'suite-' + resultNode.result.id },
|
||||||
|
createDom(
|
||||||
|
'li',
|
||||||
|
{
|
||||||
|
className:
|
||||||
|
'jasmine-suite-detail jasmine-' + resultNode.result.status
|
||||||
|
},
|
||||||
|
createDom(
|
||||||
|
'a',
|
||||||
|
{ href: specHref(resultNode.result) },
|
||||||
|
resultNode.result.description
|
||||||
|
)
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
summaryList(resultNode, suiteListNode);
|
||||||
|
domParent.appendChild(suiteListNode);
|
||||||
|
}
|
||||||
|
if (resultNode.type === 'spec') {
|
||||||
|
if (domParent.getAttribute('class') !== 'jasmine-specs') {
|
||||||
|
specListNode = createDom('ul', { className: 'jasmine-specs' });
|
||||||
|
domParent.appendChild(specListNode);
|
||||||
|
}
|
||||||
|
let specDescription = resultNode.result.description;
|
||||||
|
if (noExpectations(resultNode.result)) {
|
||||||
|
specDescription = 'SPEC HAS NO EXPECTATIONS ' + specDescription;
|
||||||
|
}
|
||||||
|
if (
|
||||||
|
resultNode.result.status === 'pending' &&
|
||||||
|
resultNode.result.pendingReason !== ''
|
||||||
|
) {
|
||||||
|
specDescription =
|
||||||
|
specDescription +
|
||||||
|
' PENDING WITH MESSAGE: ' +
|
||||||
|
resultNode.result.pendingReason;
|
||||||
|
}
|
||||||
|
specListNode.appendChild(
|
||||||
|
createDom(
|
||||||
|
'li',
|
||||||
|
{
|
||||||
|
className: 'jasmine-' + resultNode.result.status,
|
||||||
|
id: 'spec-' + resultNode.result.id
|
||||||
|
},
|
||||||
|
createDom(
|
||||||
|
'a',
|
||||||
|
{ href: specHref(resultNode.result) },
|
||||||
|
specDescription
|
||||||
|
)
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function optionsMenu(config) {
|
||||||
|
const optionsMenuDom = createDom(
|
||||||
|
'div',
|
||||||
|
{ className: 'jasmine-run-options' },
|
||||||
|
createDom('span', { className: 'jasmine-trigger' }, 'Options'),
|
||||||
|
createDom(
|
||||||
|
'div',
|
||||||
|
{ className: 'jasmine-payload' },
|
||||||
|
createDom(
|
||||||
|
'div',
|
||||||
|
{ className: 'jasmine-stop-on-failure' },
|
||||||
|
createDom('input', {
|
||||||
|
className: 'jasmine-fail-fast',
|
||||||
|
id: 'jasmine-fail-fast',
|
||||||
|
type: 'checkbox'
|
||||||
|
}),
|
||||||
|
createDom(
|
||||||
|
'label',
|
||||||
|
{ className: 'jasmine-label', for: 'jasmine-fail-fast' },
|
||||||
|
'stop execution on spec failure'
|
||||||
|
)
|
||||||
|
),
|
||||||
|
createDom(
|
||||||
|
'div',
|
||||||
|
{ className: 'jasmine-throw-failures' },
|
||||||
|
createDom('input', {
|
||||||
|
className: 'jasmine-throw',
|
||||||
|
id: 'jasmine-throw-failures',
|
||||||
|
type: 'checkbox'
|
||||||
|
}),
|
||||||
|
createDom(
|
||||||
|
'label',
|
||||||
|
{ className: 'jasmine-label', for: 'jasmine-throw-failures' },
|
||||||
|
'stop spec on expectation failure'
|
||||||
|
)
|
||||||
|
),
|
||||||
|
createDom(
|
||||||
|
'div',
|
||||||
|
{ className: 'jasmine-random-order' },
|
||||||
|
createDom('input', {
|
||||||
|
className: 'jasmine-random',
|
||||||
|
id: 'jasmine-random-order',
|
||||||
|
type: 'checkbox'
|
||||||
|
}),
|
||||||
|
createDom(
|
||||||
|
'label',
|
||||||
|
{ className: 'jasmine-label', for: 'jasmine-random-order' },
|
||||||
|
'run tests in random order'
|
||||||
|
)
|
||||||
|
),
|
||||||
|
createDom(
|
||||||
|
'div',
|
||||||
|
{ className: 'jasmine-hide-disabled' },
|
||||||
|
createDom('input', {
|
||||||
|
className: 'jasmine-disabled',
|
||||||
|
id: 'jasmine-hide-disabled',
|
||||||
|
type: 'checkbox'
|
||||||
|
}),
|
||||||
|
createDom(
|
||||||
|
'label',
|
||||||
|
{ className: 'jasmine-label', for: 'jasmine-hide-disabled' },
|
||||||
|
'hide disabled tests'
|
||||||
|
)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
const failFastCheckbox = optionsMenuDom.querySelector(
|
||||||
|
'#jasmine-fail-fast'
|
||||||
|
);
|
||||||
|
failFastCheckbox.checked = config.stopOnSpecFailure;
|
||||||
|
failFastCheckbox.onclick = function() {
|
||||||
|
navigateWithNewParam('stopOnSpecFailure', !config.stopOnSpecFailure);
|
||||||
|
};
|
||||||
|
|
||||||
|
const throwCheckbox = optionsMenuDom.querySelector(
|
||||||
|
'#jasmine-throw-failures'
|
||||||
|
);
|
||||||
|
throwCheckbox.checked = config.stopSpecOnExpectationFailure;
|
||||||
|
throwCheckbox.onclick = function() {
|
||||||
|
navigateWithNewParam(
|
||||||
|
'stopSpecOnExpectationFailure',
|
||||||
|
!config.stopSpecOnExpectationFailure
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
|
const randomCheckbox = optionsMenuDom.querySelector(
|
||||||
|
'#jasmine-random-order'
|
||||||
|
);
|
||||||
|
randomCheckbox.checked = config.random;
|
||||||
|
randomCheckbox.onclick = function() {
|
||||||
|
navigateWithNewParam('random', !config.random);
|
||||||
|
};
|
||||||
|
|
||||||
|
const hideDisabled = optionsMenuDom.querySelector(
|
||||||
|
'#jasmine-hide-disabled'
|
||||||
|
);
|
||||||
|
hideDisabled.checked = config.hideDisabled;
|
||||||
|
hideDisabled.onclick = function() {
|
||||||
|
navigateWithNewParam('hideDisabled', !config.hideDisabled);
|
||||||
|
};
|
||||||
|
|
||||||
|
const optionsTrigger = optionsMenuDom.querySelector('.jasmine-trigger'),
|
||||||
|
optionsPayload = optionsMenuDom.querySelector('.jasmine-payload'),
|
||||||
|
isOpen = /\bjasmine-open\b/;
|
||||||
|
|
||||||
|
optionsTrigger.onclick = function() {
|
||||||
|
if (isOpen.test(optionsPayload.className)) {
|
||||||
|
optionsPayload.className = optionsPayload.className.replace(
|
||||||
|
isOpen,
|
||||||
|
''
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
optionsPayload.className += ' jasmine-open';
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
return optionsMenuDom;
|
||||||
|
}
|
||||||
|
|
||||||
|
function failureDescription(result, suite) {
|
||||||
|
const wrapper = createDom(
|
||||||
|
'div',
|
||||||
|
{ className: 'jasmine-description' },
|
||||||
|
createDom(
|
||||||
|
'a',
|
||||||
|
{ title: result.description, href: specHref(result) },
|
||||||
|
result.description
|
||||||
|
)
|
||||||
|
);
|
||||||
|
let suiteLink;
|
||||||
|
|
||||||
|
while (suite && suite.parent) {
|
||||||
|
wrapper.insertBefore(createTextNode(' > '), wrapper.firstChild);
|
||||||
|
suiteLink = createDom(
|
||||||
|
'a',
|
||||||
|
{ href: suiteHref(suite) },
|
||||||
|
suite.result.description
|
||||||
|
);
|
||||||
|
wrapper.insertBefore(suiteLink, wrapper.firstChild);
|
||||||
|
|
||||||
|
suite = suite.parent;
|
||||||
|
}
|
||||||
|
|
||||||
|
return wrapper;
|
||||||
|
}
|
||||||
|
|
||||||
|
function suiteHref(suite) {
|
||||||
|
const els = [];
|
||||||
|
|
||||||
|
while (suite && suite.parent) {
|
||||||
|
els.unshift(suite.result.description);
|
||||||
|
suite = suite.parent;
|
||||||
|
}
|
||||||
|
|
||||||
|
// include window.location.pathname to fix issue with karma-jasmine-html-reporter in angular: see https://github.com/jasmine/jasmine/issues/1906
|
||||||
|
return (
|
||||||
|
(window.location.pathname || '') +
|
||||||
|
addToExistingQueryString('spec', els.join(' '))
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
function addDeprecationWarnings(result, runnableType) {
|
||||||
|
if (result && result.deprecationWarnings) {
|
||||||
|
for (let i = 0; i < result.deprecationWarnings.length; i++) {
|
||||||
|
const warning = result.deprecationWarnings[i].message;
|
||||||
|
deprecationWarnings.push({
|
||||||
|
message: warning,
|
||||||
|
stack: result.deprecationWarnings[i].stack,
|
||||||
|
runnableName: result.fullName,
|
||||||
|
runnableType: runnableType
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function createExpander(stackTrace) {
|
||||||
|
const expandLink = createDom('a', { href: '#' }, 'Show stack trace');
|
||||||
|
const root = createDom(
|
||||||
|
'div',
|
||||||
|
{ className: 'jasmine-expander' },
|
||||||
|
expandLink,
|
||||||
|
createDom(
|
||||||
|
'div',
|
||||||
|
{ className: 'jasmine-expander-contents jasmine-stack-trace' },
|
||||||
|
stackTrace
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
expandLink.addEventListener('click', function(e) {
|
||||||
|
e.preventDefault();
|
||||||
|
|
||||||
|
if (root.classList.contains('jasmine-expanded')) {
|
||||||
|
root.classList.remove('jasmine-expanded');
|
||||||
|
expandLink.textContent = 'Show stack trace';
|
||||||
|
} else {
|
||||||
|
root.classList.add('jasmine-expanded');
|
||||||
|
expandLink.textContent = 'Hide stack trace';
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
return root;
|
||||||
|
}
|
||||||
|
|
||||||
|
function find(selector) {
|
||||||
|
return getContainer().querySelector('.jasmine_html-reporter ' + selector);
|
||||||
|
}
|
||||||
|
|
||||||
|
function clearPrior() {
|
||||||
|
const oldReporter = find('');
|
||||||
|
|
||||||
|
if (oldReporter) {
|
||||||
|
getContainer().removeChild(oldReporter);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function createDom(type, attrs, childrenArrayOrVarArgs) {
|
||||||
|
const el = createElement(type);
|
||||||
|
let children;
|
||||||
|
|
||||||
|
if (j$.isArray_(childrenArrayOrVarArgs)) {
|
||||||
|
children = childrenArrayOrVarArgs;
|
||||||
|
} else {
|
||||||
|
children = [];
|
||||||
|
|
||||||
|
for (let i = 2; i < arguments.length; i++) {
|
||||||
|
children.push(arguments[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (let i = 0; i < children.length; i++) {
|
||||||
|
const child = children[i];
|
||||||
|
|
||||||
|
if (typeof child === 'string') {
|
||||||
|
el.appendChild(createTextNode(child));
|
||||||
|
} else {
|
||||||
|
if (child) {
|
||||||
|
el.appendChild(child);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const attr in attrs) {
|
||||||
|
if (attr == 'className') {
|
||||||
|
el[attr] = attrs[attr];
|
||||||
|
} else {
|
||||||
|
el.setAttribute(attr, attrs[attr]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return el;
|
||||||
|
}
|
||||||
|
|
||||||
|
function pluralize(singular, count) {
|
||||||
|
const word = count == 1 ? singular : singular + 's';
|
||||||
|
|
||||||
|
return '' + count + ' ' + word;
|
||||||
|
}
|
||||||
|
|
||||||
|
function specHref(result) {
|
||||||
|
// include window.location.pathname to fix issue with karma-jasmine-html-reporter in angular: see https://github.com/jasmine/jasmine/issues/1906
|
||||||
|
return (
|
||||||
|
(window.location.pathname || '') +
|
||||||
|
addToExistingQueryString('spec', result.fullName)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
function seedHref(seed) {
|
||||||
|
// include window.location.pathname to fix issue with karma-jasmine-html-reporter in angular: see https://github.com/jasmine/jasmine/issues/1906
|
||||||
|
return (
|
||||||
|
(window.location.pathname || '') +
|
||||||
|
addToExistingQueryString('seed', seed)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
function defaultQueryString(key, value) {
|
||||||
|
return '?' + key + '=' + value;
|
||||||
|
}
|
||||||
|
|
||||||
|
function setMenuModeTo(mode) {
|
||||||
|
htmlReporterMain.setAttribute('class', 'jasmine_html-reporter ' + mode);
|
||||||
|
}
|
||||||
|
|
||||||
|
function noExpectations(result) {
|
||||||
|
const allExpectations =
|
||||||
|
result.failedExpectations.length + result.passedExpectations.length;
|
||||||
|
|
||||||
|
return (
|
||||||
|
allExpectations === 0 &&
|
||||||
|
(result.status === 'passed' || result.status === 'failed')
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
function hasActiveSpec(resultNode) {
|
||||||
|
if (resultNode.type == 'spec' && resultNode.result.status != 'excluded') {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (resultNode.type == 'suite') {
|
||||||
|
for (let i = 0, j = resultNode.children.length; i < j; i++) {
|
||||||
|
if (hasActiveSpec(resultNode.children[i])) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return HtmlReporter;
|
||||||
|
};
|
||||||
|
|
||||||
|
jasmineRequire.HtmlSpecFilter = function() {
|
||||||
|
function HtmlSpecFilter(options) {
|
||||||
|
const filterString =
|
||||||
|
options &&
|
||||||
|
options.filterString() &&
|
||||||
|
options.filterString().replace(/[-[\]{}()*+?.,\\^$|#\s]/g, '\\$&');
|
||||||
|
const filterPattern = new RegExp(filterString);
|
||||||
|
|
||||||
|
this.matches = function(specName) {
|
||||||
|
return filterPattern.test(specName);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
return HtmlSpecFilter;
|
||||||
|
};
|
||||||
|
|
||||||
|
jasmineRequire.ResultsNode = function() {
|
||||||
|
function ResultsNode(result, type, parent) {
|
||||||
|
this.result = result;
|
||||||
|
this.type = type;
|
||||||
|
this.parent = parent;
|
||||||
|
|
||||||
|
this.children = [];
|
||||||
|
|
||||||
|
this.addChild = function(result, type) {
|
||||||
|
this.children.push(new ResultsNode(result, type, this));
|
||||||
|
};
|
||||||
|
|
||||||
|
this.last = function() {
|
||||||
|
return this.children[this.children.length - 1];
|
||||||
|
};
|
||||||
|
|
||||||
|
this.updateResult = function(result) {
|
||||||
|
this.result = result;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
return ResultsNode;
|
||||||
|
};
|
||||||
|
|
||||||
|
jasmineRequire.QueryString = function() {
|
||||||
|
function QueryString(options) {
|
||||||
|
this.navigateWithNewParam = function(key, value) {
|
||||||
|
options.getWindowLocation().search = this.fullStringWithNewParam(
|
||||||
|
key,
|
||||||
|
value
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
|
this.fullStringWithNewParam = function(key, value) {
|
||||||
|
const paramMap = queryStringToParamMap();
|
||||||
|
paramMap[key] = value;
|
||||||
|
return toQueryString(paramMap);
|
||||||
|
};
|
||||||
|
|
||||||
|
this.getParam = function(key) {
|
||||||
|
return queryStringToParamMap()[key];
|
||||||
|
};
|
||||||
|
|
||||||
|
return this;
|
||||||
|
|
||||||
|
function toQueryString(paramMap) {
|
||||||
|
const qStrPairs = [];
|
||||||
|
for (const prop in paramMap) {
|
||||||
|
qStrPairs.push(
|
||||||
|
encodeURIComponent(prop) + '=' + encodeURIComponent(paramMap[prop])
|
||||||
|
);
|
||||||
|
}
|
||||||
|
return '?' + qStrPairs.join('&');
|
||||||
|
}
|
||||||
|
|
||||||
|
function queryStringToParamMap() {
|
||||||
|
const paramStr = options.getWindowLocation().search.substring(1);
|
||||||
|
let params = [];
|
||||||
|
const paramMap = {};
|
||||||
|
|
||||||
|
if (paramStr.length > 0) {
|
||||||
|
params = paramStr.split('&');
|
||||||
|
for (let i = 0; i < params.length; i++) {
|
||||||
|
const p = params[i].split('=');
|
||||||
|
let value = decodeURIComponent(p[1]);
|
||||||
|
if (value === 'true' || value === 'false') {
|
||||||
|
value = JSON.parse(value);
|
||||||
|
}
|
||||||
|
paramMap[decodeURIComponent(p[0])] = value;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return paramMap;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return QueryString;
|
||||||
|
};
|
||||||
301
ui/plugins/ui/jasmine/jasmine.css
Normal file
10468
ui/plugins/ui/jasmine/jasmine.js
Normal file
BIN
ui/plugins/ui/jasmine/jasmine_favicon.png
Normal file
|
After Width: | Height: | Size: 1.5 KiB |
412
ui/plugins/ui/jasmineSpec.js
Normal file
@@ -0,0 +1,412 @@
|
|||||||
|
"use strict"
|
||||||
|
|
||||||
|
const JASMINE_SESSION_ID = `jasmine-${String(Date.now()).slice(8)}`
|
||||||
|
|
||||||
|
beforeEach(function () {
|
||||||
|
jasmine.DEFAULT_TIMEOUT_INTERVAL = 15 * 60 * 1000 // Test timeout after 15 minutes
|
||||||
|
jasmine.addMatchers({
|
||||||
|
toBeOneOf: function () {
|
||||||
|
return {
|
||||||
|
compare: function (actual, expected) {
|
||||||
|
return {
|
||||||
|
pass: expected.includes(actual)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
describe('stable-diffusion-ui', function() {
|
||||||
|
beforeEach(function() {
|
||||||
|
expect(typeof SD).toBe('object')
|
||||||
|
expect(typeof SD.serverState).toBe('object')
|
||||||
|
expect(typeof SD.serverState.status).toBe('string')
|
||||||
|
})
|
||||||
|
it('should be able to reach the backend', async function() {
|
||||||
|
expect(SD.serverState.status).toBe(SD.ServerStates.unavailable)
|
||||||
|
SD.sessionId = JASMINE_SESSION_ID
|
||||||
|
await SD.init()
|
||||||
|
expect(SD.isServerAvailable()).toBeTrue()
|
||||||
|
})
|
||||||
|
|
||||||
|
it('enfore the current task state', function() {
|
||||||
|
const task = new SD.Task()
|
||||||
|
expect(task.status).toBe(SD.TaskStatus.init)
|
||||||
|
expect(task.isPending).toBeTrue()
|
||||||
|
|
||||||
|
task._setStatus(SD.TaskStatus.pending)
|
||||||
|
expect(task.status).toBe(SD.TaskStatus.pending)
|
||||||
|
expect(task.isPending).toBeTrue()
|
||||||
|
expect(function() {
|
||||||
|
task._setStatus(SD.TaskStatus.init)
|
||||||
|
}).toThrowError()
|
||||||
|
|
||||||
|
task._setStatus(SD.TaskStatus.waiting)
|
||||||
|
expect(task.status).toBe(SD.TaskStatus.waiting)
|
||||||
|
expect(task.isPending).toBeTrue()
|
||||||
|
expect(function() {
|
||||||
|
task._setStatus(SD.TaskStatus.pending)
|
||||||
|
}).toThrowError()
|
||||||
|
|
||||||
|
task._setStatus(SD.TaskStatus.processing)
|
||||||
|
expect(task.status).toBe(SD.TaskStatus.processing)
|
||||||
|
expect(task.isPending).toBeTrue()
|
||||||
|
expect(function() {
|
||||||
|
task._setStatus(SD.TaskStatus.pending)
|
||||||
|
}).toThrowError()
|
||||||
|
|
||||||
|
task._setStatus(SD.TaskStatus.failed)
|
||||||
|
expect(task.status).toBe(SD.TaskStatus.failed)
|
||||||
|
expect(task.isPending).toBeFalse()
|
||||||
|
expect(function() {
|
||||||
|
task._setStatus(SD.TaskStatus.processing)
|
||||||
|
}).toThrowError()
|
||||||
|
expect(function() {
|
||||||
|
task._setStatus(SD.TaskStatus.completed)
|
||||||
|
}).toThrowError()
|
||||||
|
})
|
||||||
|
it('should be able to run tasks', async function() {
|
||||||
|
expect(typeof SD.Task.run).toBe('function')
|
||||||
|
const promiseGenerator = (function*(val) {
|
||||||
|
expect(val).toBe('start')
|
||||||
|
expect(yield 1 + 1).toBe(4)
|
||||||
|
expect(yield 2 + 2).toBe(8)
|
||||||
|
yield asyncDelay(500)
|
||||||
|
expect(yield 3 + 3).toBe(12)
|
||||||
|
expect(yield 4 + 4).toBe(16)
|
||||||
|
return 8 + 8
|
||||||
|
})('start')
|
||||||
|
const callback = function({value, done}) {
|
||||||
|
return {value: 2 * value, done}
|
||||||
|
}
|
||||||
|
expect(await SD.Task.run(promiseGenerator, {callback})).toBe(32)
|
||||||
|
})
|
||||||
|
it('should be able to queue tasks', async function() {
|
||||||
|
expect(typeof SD.Task.enqueue).toBe('function')
|
||||||
|
const promiseGenerator = (function*(val) {
|
||||||
|
expect(val).toBe('start')
|
||||||
|
expect(yield 1 + 1).toBe(4)
|
||||||
|
expect(yield 2 + 2).toBe(8)
|
||||||
|
yield asyncDelay(500)
|
||||||
|
expect(yield 3 + 3).toBe(12)
|
||||||
|
expect(yield 4 + 4).toBe(16)
|
||||||
|
return 8 + 8
|
||||||
|
})('start')
|
||||||
|
const callback = function({value, done}) {
|
||||||
|
return {value: 2 * value, done}
|
||||||
|
}
|
||||||
|
const gen = SD.Task.asGenerator({generator: promiseGenerator, callback})
|
||||||
|
expect(await SD.Task.enqueue(gen)).toBe(32)
|
||||||
|
})
|
||||||
|
it('should be able to chain handlers', async function() {
|
||||||
|
expect(typeof SD.Task.enqueue).toBe('function')
|
||||||
|
const promiseGenerator = (function*(val) {
|
||||||
|
expect(val).toBe('start')
|
||||||
|
expect(yield {test: '1'}).toEqual({test: '1', foo: 'bar'})
|
||||||
|
expect(yield 2 + 2).toEqual(8)
|
||||||
|
yield asyncDelay(500)
|
||||||
|
expect(yield 3 + 3).toEqual(12)
|
||||||
|
expect(yield {test: 4}).toEqual({test: 8, foo: 'bar'})
|
||||||
|
return {test: 8}
|
||||||
|
})('start')
|
||||||
|
const gen1 = SD.Task.asGenerator({generator: promiseGenerator, callback: function({value, done}) {
|
||||||
|
if (typeof value === "object") {
|
||||||
|
value['foo'] = 'bar'
|
||||||
|
}
|
||||||
|
return {value, done}
|
||||||
|
}})
|
||||||
|
const gen2 = SD.Task.asGenerator({generator: gen1, callback: function({value, done}) {
|
||||||
|
if (typeof value === 'number') {
|
||||||
|
value = 2 * value
|
||||||
|
}
|
||||||
|
if (typeof value === 'object' && typeof value.test === 'number') {
|
||||||
|
value.test = 2 * value.test
|
||||||
|
}
|
||||||
|
return {value, done}
|
||||||
|
}})
|
||||||
|
expect(await SD.Task.enqueue(gen2)).toEqual({test:32, foo: 'bar'})
|
||||||
|
})
|
||||||
|
describe('ServiceContainer', function() {
|
||||||
|
it('should be able to register providers', function() {
|
||||||
|
const cont = new ServiceContainer(
|
||||||
|
function foo() {
|
||||||
|
this.bar = ''
|
||||||
|
},
|
||||||
|
function bar() {
|
||||||
|
return () => 0
|
||||||
|
},
|
||||||
|
{ name: 'zero', definition: 0 },
|
||||||
|
{ name: 'ctx', definition: () => Object.create(null), singleton: true },
|
||||||
|
{ name: 'test',
|
||||||
|
definition: (ctx, missing, one, foo) => {
|
||||||
|
expect(ctx).toEqual({ran: true})
|
||||||
|
expect(one).toBe(1)
|
||||||
|
expect(typeof foo).toBe('object')
|
||||||
|
expect(foo.bar).toBeDefined()
|
||||||
|
expect(typeof missing).toBe('undefined')
|
||||||
|
return {foo: 'bar'}
|
||||||
|
}, dependencies: ['ctx', 'missing', 'one', 'foo']
|
||||||
|
}
|
||||||
|
)
|
||||||
|
const fooObj = cont.get('foo')
|
||||||
|
expect(typeof fooObj).toBe('object')
|
||||||
|
fooObj.ran = true
|
||||||
|
|
||||||
|
const ctx = cont.get('ctx')
|
||||||
|
expect(ctx).toEqual({})
|
||||||
|
ctx.ran = true
|
||||||
|
|
||||||
|
const bar = cont.get('bar')
|
||||||
|
expect(typeof bar).toBe('function')
|
||||||
|
expect(bar()).toBe(0)
|
||||||
|
|
||||||
|
cont.register({name: 'one', definition: 1})
|
||||||
|
const test = cont.get('test')
|
||||||
|
expect(typeof test).toBe('object')
|
||||||
|
expect(test.foo).toBe('bar')
|
||||||
|
})
|
||||||
|
})
|
||||||
|
it('should be able to stream data in chunks', async function() {
|
||||||
|
expect(SD.isServerAvailable()).toBeTrue()
|
||||||
|
const nbr_steps = 15
|
||||||
|
let res = await fetch('/render', {
|
||||||
|
method: 'POST',
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json'
|
||||||
|
},
|
||||||
|
body: JSON.stringify({
|
||||||
|
"prompt": "a photograph of an astronaut riding a horse",
|
||||||
|
"negative_prompt": "",
|
||||||
|
"width": 128,
|
||||||
|
"height": 128,
|
||||||
|
"seed": Math.floor(Math.random() * 10000000),
|
||||||
|
|
||||||
|
"sampler": "plms",
|
||||||
|
"use_stable_diffusion_model": "sd-v1-4",
|
||||||
|
"num_inference_steps": nbr_steps,
|
||||||
|
"guidance_scale": 7.5,
|
||||||
|
|
||||||
|
"numOutputsParallel": 1,
|
||||||
|
"stream_image_progress": true,
|
||||||
|
"show_only_filtered_image": true,
|
||||||
|
"output_format": "jpeg",
|
||||||
|
|
||||||
|
"session_id": JASMINE_SESSION_ID,
|
||||||
|
}),
|
||||||
|
})
|
||||||
|
expect(res.ok).toBeTruthy()
|
||||||
|
const renderRequest = await res.json()
|
||||||
|
expect(typeof renderRequest.stream).toBe('string')
|
||||||
|
expect(renderRequest.task).toBeDefined()
|
||||||
|
|
||||||
|
// Wait for server status to update.
|
||||||
|
await SD.waitUntil(() => {
|
||||||
|
console.log('Waiting for %s to be received...', renderRequest.task)
|
||||||
|
return (!SD.serverState.tasks || SD.serverState.tasks[String(renderRequest.task)])
|
||||||
|
}, 250, 10 * 60 * 1000)
|
||||||
|
// Wait for task to start on server.
|
||||||
|
await SD.waitUntil(() => {
|
||||||
|
console.log('Waiting for %s to start...', renderRequest.task)
|
||||||
|
return !SD.serverState.tasks || SD.serverState.tasks[String(renderRequest.task)] !== 'pending'
|
||||||
|
}, 250)
|
||||||
|
|
||||||
|
const reader = new SD.ChunkedStreamReader(renderRequest.stream)
|
||||||
|
const parseToString = reader.parse
|
||||||
|
reader.parse = function(value) {
|
||||||
|
value = parseToString.call(this, value)
|
||||||
|
if (!value || value.length <= 0) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return reader.readStreamAsJSON(value.join(''))
|
||||||
|
}
|
||||||
|
reader.onNext = function({done, value}) {
|
||||||
|
console.log(value)
|
||||||
|
if (typeof value === 'object' && 'status' in value) {
|
||||||
|
done = true
|
||||||
|
}
|
||||||
|
return {done, value}
|
||||||
|
}
|
||||||
|
let lastUpdate = undefined
|
||||||
|
let stepCount = 0
|
||||||
|
let complete = false
|
||||||
|
//for await (const stepUpdate of reader) {
|
||||||
|
for await (const stepUpdate of reader.open()) {
|
||||||
|
console.log('ChunkedStreamReader received ', stepUpdate)
|
||||||
|
lastUpdate = stepUpdate
|
||||||
|
if (complete) {
|
||||||
|
expect(stepUpdate.status).toBe('succeeded')
|
||||||
|
expect(stepUpdate.output).toHaveSize(1)
|
||||||
|
} else {
|
||||||
|
expect(stepUpdate.total_steps).toBe(nbr_steps)
|
||||||
|
expect(stepUpdate.step).toBe(stepCount)
|
||||||
|
if (stepUpdate.step === stepUpdate.total_steps) {
|
||||||
|
complete = true
|
||||||
|
} else {
|
||||||
|
stepCount++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for(let i=1; i <= 5; ++i) {
|
||||||
|
res = await fetch(renderRequest.stream)
|
||||||
|
expect(res.ok).toBeTruthy()
|
||||||
|
const cachedResponse = await res.json()
|
||||||
|
console.log('Cache test %s received %o', i, cachedResponse)
|
||||||
|
expect(lastUpdate).toEqual(cachedResponse)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('should be able to make renders', function() {
|
||||||
|
beforeEach(function() {
|
||||||
|
expect(SD.isServerAvailable()).toBeTrue()
|
||||||
|
})
|
||||||
|
it('basic inline request', async function() {
|
||||||
|
let stepCount = 0
|
||||||
|
let complete = false
|
||||||
|
const result = await SD.render({
|
||||||
|
"prompt": "a photograph of an astronaut riding a horse",
|
||||||
|
"width": 128,
|
||||||
|
"height": 128,
|
||||||
|
"num_inference_steps": 10,
|
||||||
|
"show_only_filtered_image": false,
|
||||||
|
//"use_face_correction": 'GFPGANv1.3',
|
||||||
|
"use_upscale": "RealESRGAN_x4plus",
|
||||||
|
"session_id": JASMINE_SESSION_ID,
|
||||||
|
}, function(event) {
|
||||||
|
console.log(this, event)
|
||||||
|
if ('update' in event) {
|
||||||
|
const stepUpdate = event.update
|
||||||
|
if (complete || (stepUpdate.status && stepUpdate.step === stepUpdate.total_steps)) {
|
||||||
|
expect(stepUpdate.status).toBe('succeeded')
|
||||||
|
expect(stepUpdate.output).toHaveSize(2)
|
||||||
|
} else {
|
||||||
|
expect(stepUpdate.step).toBe(stepCount)
|
||||||
|
if (stepUpdate.step === stepUpdate.total_steps) {
|
||||||
|
complete = true
|
||||||
|
} else {
|
||||||
|
stepCount++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
console.log(result)
|
||||||
|
expect(result.status).toBe('succeeded')
|
||||||
|
expect(result.output).toHaveSize(2)
|
||||||
|
})
|
||||||
|
it('post and reader request', async function() {
|
||||||
|
const renderTask = new SD.RenderTask({
|
||||||
|
"prompt": "a photograph of an astronaut riding a horse",
|
||||||
|
"width": 128,
|
||||||
|
"height": 128,
|
||||||
|
"seed": SD.MAX_SEED_VALUE,
|
||||||
|
"num_inference_steps": 10,
|
||||||
|
"session_id": JASMINE_SESSION_ID,
|
||||||
|
})
|
||||||
|
expect(renderTask.status).toBe(SD.TaskStatus.init)
|
||||||
|
|
||||||
|
const timeout = -1
|
||||||
|
const renderRequest = await renderTask.post(timeout)
|
||||||
|
expect(typeof renderRequest.stream).toBe('string')
|
||||||
|
expect(renderTask.status).toBe(SD.TaskStatus.waiting)
|
||||||
|
expect(renderTask.streamUrl).toBe(renderRequest.stream)
|
||||||
|
|
||||||
|
await renderTask.waitUntil({state: SD.TaskStatus.processing, callback: () => console.log('Waiting for render task to start...') })
|
||||||
|
expect(renderTask.status).toBe(SD.TaskStatus.processing)
|
||||||
|
|
||||||
|
let stepCount = 0
|
||||||
|
let complete = false
|
||||||
|
//for await (const stepUpdate of renderTask.reader) {
|
||||||
|
for await (const stepUpdate of renderTask.reader.open()) {
|
||||||
|
console.log(stepUpdate)
|
||||||
|
if (complete || (stepUpdate.status && stepUpdate.step === stepUpdate.total_steps)) {
|
||||||
|
expect(stepUpdate.status).toBe('succeeded')
|
||||||
|
expect(stepUpdate.output).toHaveSize(1)
|
||||||
|
} else {
|
||||||
|
expect(stepUpdate.step).toBe(stepCount)
|
||||||
|
if (stepUpdate.step === stepUpdate.total_steps) {
|
||||||
|
complete = true
|
||||||
|
} else {
|
||||||
|
stepCount++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
expect(renderTask.status).toBe(SD.TaskStatus.completed)
|
||||||
|
expect(renderTask.result.status).toBe('succeeded')
|
||||||
|
expect(renderTask.result.output).toHaveSize(1)
|
||||||
|
})
|
||||||
|
it('queued request', async function() {
|
||||||
|
let stepCount = 0
|
||||||
|
let complete = false
|
||||||
|
const renderTask = new SD.RenderTask({
|
||||||
|
"prompt": "a photograph of an astronaut riding a horse",
|
||||||
|
"width": 128,
|
||||||
|
"height": 128,
|
||||||
|
"num_inference_steps": 10,
|
||||||
|
"show_only_filtered_image": false,
|
||||||
|
//"use_face_correction": 'GFPGANv1.3',
|
||||||
|
"use_upscale": "RealESRGAN_x4plus",
|
||||||
|
"session_id": JASMINE_SESSION_ID,
|
||||||
|
})
|
||||||
|
await renderTask.enqueue(function(event) {
|
||||||
|
console.log(this, event)
|
||||||
|
if ('update' in event) {
|
||||||
|
const stepUpdate = event.update
|
||||||
|
if (complete || (stepUpdate.status && stepUpdate.step === stepUpdate.total_steps)) {
|
||||||
|
expect(stepUpdate.status).toBe('succeeded')
|
||||||
|
expect(stepUpdate.output).toHaveSize(2)
|
||||||
|
} else {
|
||||||
|
expect(stepUpdate.step).toBe(stepCount)
|
||||||
|
if (stepUpdate.step === stepUpdate.total_steps) {
|
||||||
|
complete = true
|
||||||
|
} else {
|
||||||
|
stepCount++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
console.log(renderTask.result)
|
||||||
|
expect(renderTask.result.status).toBe('succeeded')
|
||||||
|
expect(renderTask.result.output).toHaveSize(2)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
describe('# Special cases', function() {
|
||||||
|
it('should throw an exception on set for invalid sessionId', function() {
|
||||||
|
expect(function() {
|
||||||
|
SD.sessionId = undefined
|
||||||
|
}).toThrowError("Can't set sessionId to undefined.")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
const loadCompleted = window.onload
|
||||||
|
let loadEvent = undefined
|
||||||
|
window.onload = function(evt) {
|
||||||
|
loadEvent = evt
|
||||||
|
}
|
||||||
|
if (!PLUGINS.SELFTEST) {
|
||||||
|
PLUGINS.SELFTEST = {}
|
||||||
|
}
|
||||||
|
loadUIPlugins().then(function() {
|
||||||
|
console.log('loadCompleted', loadEvent)
|
||||||
|
describe('@Plugins', function() {
|
||||||
|
it('exposes hooks to overide', function() {
|
||||||
|
expect(typeof PLUGINS.IMAGE_INFO_BUTTONS).toBe('object')
|
||||||
|
expect(typeof PLUGINS.TASK_CREATE).toBe('object')
|
||||||
|
})
|
||||||
|
describe('supports selftests', function() { // Hook to allow plugins to define tests.
|
||||||
|
const pluginsTests = Object.keys(PLUGINS.SELFTEST).filter((key) => PLUGINS.SELFTEST.hasOwnProperty(key))
|
||||||
|
if (!pluginsTests || pluginsTests.length <= 0) {
|
||||||
|
it('but nothing loaded...', function() {
|
||||||
|
expect(true).toBeTruthy()
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for (const pTest of pluginsTests) {
|
||||||
|
describe(pTest, function() {
|
||||||
|
const testFn = PLUGINS.SELFTEST[pTest]
|
||||||
|
return Promise.resolve(testFn.call(jasmine, pTest))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
loadCompleted.call(window, loadEvent)
|
||||||
|
})
|
||||||
458
ui/plugins/ui/merge.plugin.js
Normal file
@@ -0,0 +1,458 @@
|
|||||||
|
(function() {
|
||||||
|
"use strict"
|
||||||
|
|
||||||
|
///////////////////// Function section
|
||||||
|
function smoothstep(x) {
|
||||||
|
return x * x * (3 - 2 * x)
|
||||||
|
}
|
||||||
|
|
||||||
|
function smootherstep(x) {
|
||||||
|
return x * x * x * (x * (x * 6 - 15) + 10)
|
||||||
|
}
|
||||||
|
|
||||||
|
function smootheststep(x) {
|
||||||
|
let y = -20 * Math.pow(x, 7)
|
||||||
|
y += 70 * Math.pow(x, 6)
|
||||||
|
y -= 84 * Math.pow(x, 5)
|
||||||
|
y += 35 * Math.pow(x, 4)
|
||||||
|
return y
|
||||||
|
}
|
||||||
|
function getCurrentTime() {
|
||||||
|
const now = new Date();
|
||||||
|
let hours = now.getHours();
|
||||||
|
let minutes = now.getMinutes();
|
||||||
|
let seconds = now.getSeconds();
|
||||||
|
|
||||||
|
hours = hours < 10 ? `0${hours}` : hours;
|
||||||
|
minutes = minutes < 10 ? `0${minutes}` : minutes;
|
||||||
|
seconds = seconds < 10 ? `0${seconds}` : seconds;
|
||||||
|
|
||||||
|
return `${hours}:${minutes}:${seconds}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
function addLogMessage(message) {
|
||||||
|
const logContainer = document.getElementById('merge-log');
|
||||||
|
logContainer.innerHTML += `<i>${getCurrentTime()}</i> ${message}<br>`;
|
||||||
|
|
||||||
|
// Scroll to the bottom of the log
|
||||||
|
logContainer.scrollTop = logContainer.scrollHeight;
|
||||||
|
|
||||||
|
document.querySelector('#merge-log-container').style.display = 'block'
|
||||||
|
}
|
||||||
|
|
||||||
|
function addLogSeparator() {
|
||||||
|
const logContainer = document.getElementById('merge-log');
|
||||||
|
logContainer.innerHTML += '<hr>'
|
||||||
|
|
||||||
|
logContainer.scrollTop = logContainer.scrollHeight;
|
||||||
|
}
|
||||||
|
|
||||||
|
function drawDiagram(fn) {
|
||||||
|
const SIZE = 300
|
||||||
|
const canvas = document.getElementById('merge-canvas');
|
||||||
|
canvas.height = canvas.width = SIZE
|
||||||
|
const ctx = canvas.getContext('2d');
|
||||||
|
|
||||||
|
// Draw coordinate system
|
||||||
|
ctx.scale(1, -1);
|
||||||
|
ctx.translate(0, -canvas.height);
|
||||||
|
ctx.lineWidth = 1;
|
||||||
|
ctx.beginPath();
|
||||||
|
|
||||||
|
ctx.strokeStyle = 'white'
|
||||||
|
ctx.moveTo(0,0); ctx.lineTo(0,SIZE); ctx.lineTo(SIZE,SIZE); ctx.lineTo(SIZE,0); ctx.lineTo(0,0); ctx.lineTo(SIZE,SIZE);
|
||||||
|
ctx.stroke()
|
||||||
|
ctx.beginPath()
|
||||||
|
ctx.setLineDash([1,2])
|
||||||
|
const n = SIZE / 10
|
||||||
|
for (let i=n; i<SIZE; i+=n) {
|
||||||
|
ctx.moveTo(0,i)
|
||||||
|
ctx.lineTo(SIZE,i)
|
||||||
|
ctx.moveTo(i,0)
|
||||||
|
ctx.lineTo(i,SIZE)
|
||||||
|
}
|
||||||
|
ctx.stroke()
|
||||||
|
ctx.beginPath()
|
||||||
|
ctx.setLineDash([])
|
||||||
|
ctx.beginPath();
|
||||||
|
ctx.strokeStyle = 'black'
|
||||||
|
ctx.lineWidth = 3;
|
||||||
|
// Plot function
|
||||||
|
const numSamples = 20;
|
||||||
|
for (let i = 0; i <= numSamples; i++) {
|
||||||
|
const x = i / numSamples;
|
||||||
|
const y = fn(x);
|
||||||
|
|
||||||
|
const canvasX = x * SIZE;
|
||||||
|
const canvasY = y * SIZE;
|
||||||
|
|
||||||
|
if (i === 0) {
|
||||||
|
ctx.moveTo(canvasX, canvasY);
|
||||||
|
} else {
|
||||||
|
ctx.lineTo(canvasX, canvasY);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ctx.stroke()
|
||||||
|
// Plot alpha values (yellow boxes)
|
||||||
|
let start = parseFloat( document.querySelector('#merge-start').value )
|
||||||
|
let step = parseFloat( document.querySelector('#merge-step').value )
|
||||||
|
let iterations = document.querySelector('#merge-count').value>>0
|
||||||
|
ctx.beginPath()
|
||||||
|
ctx.fillStyle = "yellow"
|
||||||
|
for (let i=0; i< iterations; i++) {
|
||||||
|
const alpha = ( start + i * step ) / 100
|
||||||
|
const x = alpha*SIZE
|
||||||
|
const y = fn(alpha) * SIZE
|
||||||
|
if (x <= SIZE) {
|
||||||
|
ctx.rect(x-3,y-3,6,6)
|
||||||
|
ctx.fill()
|
||||||
|
} else {
|
||||||
|
ctx.strokeStyle = 'red'
|
||||||
|
ctx.moveTo(0,0); ctx.lineTo(0,SIZE); ctx.lineTo(SIZE,SIZE); ctx.lineTo(SIZE,0); ctx.lineTo(0,0); ctx.lineTo(SIZE,SIZE);
|
||||||
|
ctx.stroke()
|
||||||
|
addLogMessage('<i>Warning: maximum ratio is ≥ 100%</i>')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function updateChart() {
|
||||||
|
let fn = (x) => x
|
||||||
|
switch (document.querySelector('#merge-interpolation').value) {
|
||||||
|
case 'SmoothStep':
|
||||||
|
fn = smoothstep
|
||||||
|
break
|
||||||
|
case 'SmootherStep':
|
||||||
|
fn = smootherstep
|
||||||
|
break
|
||||||
|
case 'SmoothestStep':
|
||||||
|
fn = smootheststep
|
||||||
|
break
|
||||||
|
}
|
||||||
|
drawDiagram(fn)
|
||||||
|
}
|
||||||
|
|
||||||
|
/////////////////////// Tab implementation
|
||||||
|
document.querySelector('.tab-container')?.insertAdjacentHTML('beforeend', `
|
||||||
|
<span id="tab-merge" class="tab">
|
||||||
|
<span><i class="fa fa-code-merge icon"></i> Merge models</span>
|
||||||
|
</span>
|
||||||
|
`)
|
||||||
|
|
||||||
|
document.querySelector('#tab-content-wrapper')?.insertAdjacentHTML('beforeend', `
|
||||||
|
<div id="tab-content-merge" class="tab-content">
|
||||||
|
<div id="merge" class="tab-content-inner">
|
||||||
|
Loading..
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
`)
|
||||||
|
|
||||||
|
const tabMerge = document.querySelector('#tab-merge')
|
||||||
|
if (tabMerge) {
|
||||||
|
linkTabContents(tabMerge)
|
||||||
|
}
|
||||||
|
const merge = document.querySelector('#merge')
|
||||||
|
if (!merge) {
|
||||||
|
// merge tab not found, dont exec plugin code.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
document.querySelector('body').insertAdjacentHTML('beforeend', `
|
||||||
|
<style>
|
||||||
|
#tab-content-merge .tab-content-inner {
|
||||||
|
max-width: 100%;
|
||||||
|
padding: 10pt;
|
||||||
|
}
|
||||||
|
.merge-container {
|
||||||
|
margin-left: 15%;
|
||||||
|
margin-right: 15%;
|
||||||
|
text-align: left;
|
||||||
|
display: inline-grid;
|
||||||
|
grid-template-columns: 1fr 1fr;
|
||||||
|
grid-template-rows: auto auto auto;
|
||||||
|
gap: 0px 0px;
|
||||||
|
grid-auto-flow: row;
|
||||||
|
grid-template-areas:
|
||||||
|
"merge-input merge-config"
|
||||||
|
"merge-buttons merge-buttons";
|
||||||
|
}
|
||||||
|
.merge-container p {
|
||||||
|
margin-top: 3pt;
|
||||||
|
margin-bottom: 3pt;
|
||||||
|
}
|
||||||
|
.merge-config .tab-content {
|
||||||
|
background: var(--background-color1);
|
||||||
|
border-radius: 3pt;
|
||||||
|
}
|
||||||
|
.merge-config .tab-content-inner {
|
||||||
|
text-align: left;
|
||||||
|
}
|
||||||
|
|
||||||
|
.merge-input {
|
||||||
|
grid-area: merge-input;
|
||||||
|
padding-left:1em;
|
||||||
|
}
|
||||||
|
.merge-config {
|
||||||
|
grid-area: merge-config;
|
||||||
|
padding:1em;
|
||||||
|
}
|
||||||
|
.merge-config input {
|
||||||
|
margin-bottom: 3px;
|
||||||
|
}
|
||||||
|
.merge-config select {
|
||||||
|
margin-bottom: 3px;
|
||||||
|
}
|
||||||
|
.merge-buttons {
|
||||||
|
grid-area: merge-buttons;
|
||||||
|
padding:1em;
|
||||||
|
text-align: center;
|
||||||
|
}
|
||||||
|
#merge-button {
|
||||||
|
padding: 8px;
|
||||||
|
width:20em;
|
||||||
|
}
|
||||||
|
div#merge-log {
|
||||||
|
height:150px;
|
||||||
|
overflow-x:hidden;
|
||||||
|
overflow-y:scroll;
|
||||||
|
background:var(--background-color1);
|
||||||
|
border-radius: 3pt;
|
||||||
|
}
|
||||||
|
div#merge-log i {
|
||||||
|
color: hsl(var(--accent-hue), 100%, calc(2*var(--accent-lightness)));
|
||||||
|
font-family: monospace;
|
||||||
|
}
|
||||||
|
.disabled {
|
||||||
|
background: var(--background-color4);
|
||||||
|
color: var(--text-color);
|
||||||
|
}
|
||||||
|
#merge-type-tabs {
|
||||||
|
border-bottom: 1px solid black;
|
||||||
|
}
|
||||||
|
#merge-log-container {
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
.merge-container #merge-warning {
|
||||||
|
color: rgb(153, 153, 153);
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
`)
|
||||||
|
|
||||||
|
merge.innerHTML = `
|
||||||
|
<div class="merge-container panel-box">
|
||||||
|
<div class="merge-input">
|
||||||
|
<p><label for="#mergeModelA">Select Model A:</label></p>
|
||||||
|
<input id="mergeModelA" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
|
||||||
|
<p><label for="#mergeModelB">Select Model B:</label></p>
|
||||||
|
<input id="mergeModelB" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
|
||||||
|
<br/><br/>
|
||||||
|
<p id="merge-warning"><small><b>Important:</b> Please merge models of similar type.<br/>For e.g. <code>SD 1.4</code> models with only <code>SD 1.4/1.5</code> models,<br/><code>SD 2.0</code> with <code>SD 2.0</code>-type, and <code>SD 2.1</code> with <code>SD 2.1</code>-type models.</small></p>
|
||||||
|
<br/>
|
||||||
|
<table>
|
||||||
|
<tr>
|
||||||
|
<td><label for="#merge-filename">Output file name:</label></td>
|
||||||
|
<td><input id="merge-filename" size=24> <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Base name of the output file.<br>Mix ratio and file suffix will be appended to this.</span></i></td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><label for="#merge-fp">Output precision:</label></td>
|
||||||
|
<td><select id="merge-fp">
|
||||||
|
<option value="fp16">fp16 (smaller file size)</option>
|
||||||
|
<option value="fp32">fp32 (larger file size)</option>
|
||||||
|
</select>
|
||||||
|
<i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Image generation uses fp16, so it's a good choice.<br>Use fp32 if you want to use the result models for more mixes</span></i>
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><label for="#merge-format">Output file format:</label></td>
|
||||||
|
<td><select id="merge-format">
|
||||||
|
<option value="safetensors">Safetensors (recommended)</option>
|
||||||
|
<option value="ckpt">CKPT/Pickle (legacy format)</option>
|
||||||
|
</select>
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
</table>
|
||||||
|
<br/>
|
||||||
|
<div id="merge-log-container">
|
||||||
|
<p><label for="#merge-log">Log messages:</label></p>
|
||||||
|
<div id="merge-log"></div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="merge-config">
|
||||||
|
<div class="tab-container">
|
||||||
|
<span id="tab-merge-opts-single" class="tab active">
|
||||||
|
<span>Make a single file</small></span>
|
||||||
|
</span>
|
||||||
|
<span id="tab-merge-opts-batch" class="tab">
|
||||||
|
<span>Make multiple variations</small></span>
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
<div id="tab-content-merge-opts-single" class="tab-content active">
|
||||||
|
<div class="tab-content-inner">
|
||||||
|
<small>Saves a single merged model file, at the specified merge ratio.</small><br/><br/>
|
||||||
|
<label for="#single-merge-ratio-slider">Merge ratio:</label>
|
||||||
|
<input id="single-merge-ratio-slider" name="single-merge-ratio-slider" class="editor-slider" value="50" type="range" min="1" max="1000">
|
||||||
|
<input id="single-merge-ratio" size=2 value="5">%
|
||||||
|
<i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Model A's contribution to the mix. The rest will be from Model B.</span></i>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div id="tab-content-merge-opts-batch" class="tab-content">
|
||||||
|
<div class="tab-content-inner">
|
||||||
|
<small>Saves multiple variations of the model, at different merge ratios.<br/>Each variation will be saved as a separate file.</small><br/><br/>
|
||||||
|
<table>
|
||||||
|
<tr><td><label for="#merge-count">Number of variations:</label></td>
|
||||||
|
<td> <input id="merge-count" size=2 value="5"></td>
|
||||||
|
<td> <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Number of models to create</span></i></td></tr>
|
||||||
|
<tr><td><label for="#merge-start">Starting merge ratio:</label></td>
|
||||||
|
<td> <input id="merge-start" size=2 value="5">%</td>
|
||||||
|
<td> <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Smallest share of model A in the mix</span></i></td></tr>
|
||||||
|
<tr><td><label for="#merge-step">Increment each step:</label></td>
|
||||||
|
<td> <input id="merge-step" size=2 value="10">%</td>
|
||||||
|
<td> <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Share of model A added into the mix per step</span></i></td></tr>
|
||||||
|
<tr><td><label for="#merge-interpolation">Interpolation model:</label></td>
|
||||||
|
<td> <select id="merge-interpolation">
|
||||||
|
<option>Exact</option>
|
||||||
|
<option>SmoothStep</option>
|
||||||
|
<option>SmootherStep</option>
|
||||||
|
<option>SmoothestStep</option>
|
||||||
|
</select></td>
|
||||||
|
<td> <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Sigmoid function to be applied to the model share before mixing</span></i></td></tr>
|
||||||
|
</table>
|
||||||
|
<br/>
|
||||||
|
<small>Preview of variation ratios:</small><br/>
|
||||||
|
<canvas id="merge-canvas" width="400" height="400"></canvas>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="merge-buttons">
|
||||||
|
<button id="merge-button" class="primaryButton">Merge models</button>
|
||||||
|
</div>
|
||||||
|
</div>`
|
||||||
|
|
||||||
|
const tabSettingsSingle = document.querySelector('#tab-merge-opts-single')
|
||||||
|
const tabSettingsBatch = document.querySelector('#tab-merge-opts-batch')
|
||||||
|
linkTabContents(tabSettingsSingle)
|
||||||
|
linkTabContents(tabSettingsBatch)
|
||||||
|
|
||||||
|
console.log('Activate')
|
||||||
|
let mergeModelAField = new ModelDropdown(document.querySelector('#mergeModelA'), 'stable-diffusion')
|
||||||
|
let mergeModelBField = new ModelDropdown(document.querySelector('#mergeModelB'), 'stable-diffusion')
|
||||||
|
updateChart()
|
||||||
|
|
||||||
|
// slider
|
||||||
|
const singleMergeRatioField = document.querySelector('#single-merge-ratio')
|
||||||
|
const singleMergeRatioSlider = document.querySelector('#single-merge-ratio-slider')
|
||||||
|
|
||||||
|
function updateSingleMergeRatio() {
|
||||||
|
singleMergeRatioField.value = singleMergeRatioSlider.value / 10
|
||||||
|
singleMergeRatioField.dispatchEvent(new Event("change"))
|
||||||
|
}
|
||||||
|
|
||||||
|
function updateSingleMergeRatioSlider() {
|
||||||
|
if (singleMergeRatioField.value < 0) {
|
||||||
|
singleMergeRatioField.value = 0
|
||||||
|
} else if (singleMergeRatioField.value > 100) {
|
||||||
|
singleMergeRatioField.value = 100
|
||||||
|
}
|
||||||
|
|
||||||
|
singleMergeRatioSlider.value = singleMergeRatioField.value * 10
|
||||||
|
singleMergeRatioSlider.dispatchEvent(new Event("change"))
|
||||||
|
}
|
||||||
|
|
||||||
|
singleMergeRatioSlider.addEventListener('input', updateSingleMergeRatio)
|
||||||
|
singleMergeRatioField.addEventListener('input', updateSingleMergeRatioSlider)
|
||||||
|
updateSingleMergeRatio()
|
||||||
|
|
||||||
|
document.querySelector('.merge-config').addEventListener('change', updateChart)
|
||||||
|
|
||||||
|
document.querySelector('#merge-button').addEventListener('click', async function(e) {
|
||||||
|
// Build request template
|
||||||
|
let model0 = document.querySelector('#mergeModelA').value
|
||||||
|
let model1 = document.querySelector('#mergeModelB').value
|
||||||
|
let request = { model0: model0, model1: model1 }
|
||||||
|
request['use_fp16'] = document.querySelector('#merge-fp').value == 'fp16'
|
||||||
|
let iterations = document.querySelector('#merge-count').value>>0
|
||||||
|
let start = parseFloat( document.querySelector('#merge-start').value )
|
||||||
|
let step = parseFloat( document.querySelector('#merge-step').value )
|
||||||
|
|
||||||
|
if (isTabActive(tabSettingsSingle)) {
|
||||||
|
start = parseFloat(singleMergeRatioField.value)
|
||||||
|
step = 0
|
||||||
|
iterations = 1
|
||||||
|
addLogMessage(`merge ratio = ${start}%`)
|
||||||
|
} else {
|
||||||
|
addLogMessage(`start = ${start}%`)
|
||||||
|
addLogMessage(`step = ${step}%`)
|
||||||
|
}
|
||||||
|
|
||||||
|
if (start + (iterations-1) * step >= 100) {
|
||||||
|
addLogMessage('<i>Aborting: maximum ratio is ≥ 100%</i>')
|
||||||
|
addLogMessage('Reduce the number of variations or the step size')
|
||||||
|
addLogSeparator()
|
||||||
|
document.querySelector('#merge-count').focus()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if (document.querySelector('#merge-filename').value == "") {
|
||||||
|
addLogMessage('<i>Aborting: No output file name specified</i>')
|
||||||
|
addLogSeparator()
|
||||||
|
document.querySelector('#merge-filename').focus()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Disable merge button
|
||||||
|
e.target.disabled=true
|
||||||
|
e.target.classList.add('disabled')
|
||||||
|
let cursor = $("body").css("cursor");
|
||||||
|
let label = document.querySelector('#merge-button').innerHTML
|
||||||
|
$("body").css("cursor", "progress");
|
||||||
|
document.querySelector('#merge-button').innerHTML = 'Merging models ...'
|
||||||
|
|
||||||
|
addLogMessage("Merging models")
|
||||||
|
addLogMessage("Model A: "+model0)
|
||||||
|
addLogMessage("Model B: "+model1)
|
||||||
|
|
||||||
|
// Batch main loop
|
||||||
|
for (let i=0; i<iterations; i++) {
|
||||||
|
let alpha = ( start + i * step ) / 100
|
||||||
|
switch (document.querySelector('#merge-interpolation').value) {
|
||||||
|
case 'SmoothStep':
|
||||||
|
alpha = smoothstep(alpha)
|
||||||
|
break
|
||||||
|
case 'SmootherStep':
|
||||||
|
alpha = smootherstep(alpha)
|
||||||
|
break
|
||||||
|
case 'SmoothestStep':
|
||||||
|
alpha = smootheststep(alpha)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
addLogMessage(`merging batch job ${i+1}/${iterations}, alpha = ${alpha.toFixed(5)}...`)
|
||||||
|
|
||||||
|
request['out_path'] = document.querySelector('#merge-filename').value
|
||||||
|
request['out_path'] += '-' + alpha.toFixed(5) + '.' + document.querySelector('#merge-format').value
|
||||||
|
addLogMessage(` filename: ${request['out_path']}`)
|
||||||
|
|
||||||
|
request['ratio'] = alpha
|
||||||
|
let res = await fetch('/model/merge', {
|
||||||
|
method: 'POST',
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
body: JSON.stringify(request) })
|
||||||
|
const data = await res.json();
|
||||||
|
addLogMessage(JSON.stringify(data))
|
||||||
|
}
|
||||||
|
addLogMessage("<b>Done.</b> The models have been saved to your <tt>models/stable-diffusion</tt> folder.")
|
||||||
|
addLogSeparator()
|
||||||
|
// Re-enable merge button
|
||||||
|
$("body").css("cursor", cursor);
|
||||||
|
document.querySelector('#merge-button').innerHTML = label
|
||||||
|
e.target.disabled=false
|
||||||
|
e.target.classList.remove('disabled')
|
||||||
|
|
||||||
|
// Update model list
|
||||||
|
stableDiffusionModelField.innerHTML = ''
|
||||||
|
vaeModelField.innerHTML = ''
|
||||||
|
hypernetworkModelField.innerHTML = ''
|
||||||
|
await getModels()
|
||||||
|
})
|
||||||
|
|
||||||
|
})()
|
||||||
53
ui/plugins/ui/modifiers-toggle.plugin.js
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
(function () {
|
||||||
|
"use strict"
|
||||||
|
|
||||||
|
var styleSheet = document.createElement("style");
|
||||||
|
styleSheet.textContent = `
|
||||||
|
.modifier-card-tiny.modifier-toggle-inactive {
|
||||||
|
background: transparent;
|
||||||
|
border: 2px dashed red;
|
||||||
|
opacity:0.2;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
document.head.appendChild(styleSheet);
|
||||||
|
|
||||||
|
// observe for changes in tag list
|
||||||
|
var observer = new MutationObserver(function (mutations) {
|
||||||
|
// mutations.forEach(function (mutation) {
|
||||||
|
if (editorModifierTagsList.childNodes.length > 0) {
|
||||||
|
ModifierToggle()
|
||||||
|
}
|
||||||
|
// })
|
||||||
|
})
|
||||||
|
|
||||||
|
observer.observe(editorModifierTagsList, {
|
||||||
|
childList: true
|
||||||
|
})
|
||||||
|
|
||||||
|
function ModifierToggle() {
|
||||||
|
let overlays = document.querySelector('#editor-inputs-tags-list').querySelectorAll('.modifier-card-overlay')
|
||||||
|
overlays.forEach (i => {
|
||||||
|
i.oncontextmenu = (e) => {
|
||||||
|
e.preventDefault()
|
||||||
|
|
||||||
|
if (i.parentElement.classList.contains('modifier-toggle-inactive')) {
|
||||||
|
i.parentElement.classList.remove('modifier-toggle-inactive')
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
i.parentElement.classList.add('modifier-toggle-inactive')
|
||||||
|
}
|
||||||
|
// refresh activeTags
|
||||||
|
let modifierName = i.parentElement.getElementsByClassName('modifier-card-label')[0].getElementsByTagName("p")[0].dataset.fullName
|
||||||
|
activeTags = activeTags.map(obj => {
|
||||||
|
if (trimModifiers(obj.name) === trimModifiers(modifierName)) {
|
||||||
|
return {...obj, inactive: (obj.element.classList.contains('modifier-toggle-inactive'))};
|
||||||
|
}
|
||||||
|
|
||||||
|
return obj;
|
||||||
|
});
|
||||||
|
document.dispatchEvent(new Event('refreshImageModifiers'))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})()
|
||||||
64
ui/plugins/ui/release-notes.plugin.js
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
(function() {
|
||||||
|
// Register selftests when loaded by jasmine.
|
||||||
|
if (typeof PLUGINS?.SELFTEST === 'object') {
|
||||||
|
PLUGINS.SELFTEST["release-notes"] = function() {
|
||||||
|
it('should be able to fetch CHANGES.md', async function() {
|
||||||
|
let releaseNotes = await fetch(`https://raw.githubusercontent.com/cmdr2/stable-diffusion-ui/main/CHANGES.md`)
|
||||||
|
expect(releaseNotes.status).toBe(200)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
document.querySelector('.tab-container')?.insertAdjacentHTML('beforeend', `
|
||||||
|
<span id="tab-news" class="tab">
|
||||||
|
<span><i class="fa fa-bolt icon"></i> What's new?</span>
|
||||||
|
</span>
|
||||||
|
`)
|
||||||
|
|
||||||
|
document.querySelector('#tab-content-wrapper')?.insertAdjacentHTML('beforeend', `
|
||||||
|
<div id="tab-content-news" class="tab-content">
|
||||||
|
<div id="news" class="tab-content-inner">
|
||||||
|
Loading..
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
`)
|
||||||
|
|
||||||
|
const tabNews = document.querySelector('#tab-news')
|
||||||
|
if (tabNews) {
|
||||||
|
linkTabContents(tabNews)
|
||||||
|
}
|
||||||
|
const news = document.querySelector('#news')
|
||||||
|
if (!news) {
|
||||||
|
// news tab not found, dont exec plugin code.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
document.querySelector('body').insertAdjacentHTML('beforeend', `
|
||||||
|
<style>
|
||||||
|
#tab-content-news .tab-content-inner {
|
||||||
|
max-width: 100%;
|
||||||
|
text-align: left;
|
||||||
|
padding: 10pt;
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
`)
|
||||||
|
|
||||||
|
loadScript('/media/js/marked.min.js').then(async function() {
|
||||||
|
let appConfig = await fetch('/get/app_config')
|
||||||
|
if (!appConfig.ok) {
|
||||||
|
console.error('[release-notes] Failed to get app_config.')
|
||||||
|
return
|
||||||
|
}
|
||||||
|
appConfig = await appConfig.json()
|
||||||
|
|
||||||
|
const updateBranch = appConfig.update_branch || 'main'
|
||||||
|
|
||||||
|
let releaseNotes = await fetch(`https://raw.githubusercontent.com/cmdr2/stable-diffusion-ui/${updateBranch}/CHANGES.md`)
|
||||||
|
if (!releaseNotes.ok) {
|
||||||
|
console.error('[release-notes] Failed to get CHANGES.md.')
|
||||||
|
return
|
||||||
|
}
|
||||||
|
releaseNotes = await releaseNotes.text()
|
||||||
|
news.innerHTML = marked.parse(releaseNotes)
|
||||||
|
})
|
||||||
|
})()
|
||||||
25
ui/plugins/ui/selftest.plugin.js
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
/* SD-UI Selftest Plugin.js
|
||||||
|
*/
|
||||||
|
(function() { "use strict"
|
||||||
|
const ID_PREFIX = "selftest-plugin"
|
||||||
|
|
||||||
|
const links = document.getElementById("community-links")
|
||||||
|
if (!links) {
|
||||||
|
console.error('%s the ID "community-links" cannot be found.', ID_PREFIX)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add link to Jasmine SpecRunner
|
||||||
|
const pluginLink = document.createElement('li')
|
||||||
|
const options = {
|
||||||
|
'stopSpecOnExpectationFailure': "true",
|
||||||
|
'stopOnSpecFailure': 'false',
|
||||||
|
'random': 'false',
|
||||||
|
'hideDisabled': 'false'
|
||||||
|
}
|
||||||
|
const optStr = Object.entries(options).map(([key, val]) => `${key}=${val}`).join('&')
|
||||||
|
pluginLink.innerHTML = `<a id="${ID_PREFIX}-starttest" href="${location.protocol}/plugins/core/SpecRunner.html?${optStr}" target="_blank"><i class="fa-solid fa-vial-circle-check"></i> Start SelfTest</a>`
|
||||||
|
links.appendChild(pluginLink)
|
||||||
|
|
||||||
|
console.log('%s loaded!', ID_PREFIX)
|
||||||
|
})()
|
||||||
@@ -1,108 +0,0 @@
|
|||||||
import json
|
|
||||||
|
|
||||||
class Request:
|
|
||||||
session_id: str = "session"
|
|
||||||
prompt: str = ""
|
|
||||||
negative_prompt: str = ""
|
|
||||||
init_image: str = None # base64
|
|
||||||
mask: str = None # base64
|
|
||||||
num_outputs: int = 1
|
|
||||||
num_inference_steps: int = 50
|
|
||||||
guidance_scale: float = 7.5
|
|
||||||
width: int = 512
|
|
||||||
height: int = 512
|
|
||||||
seed: int = 42
|
|
||||||
prompt_strength: float = 0.8
|
|
||||||
sampler: str = None # "ddim", "plms", "heun", "euler", "euler_a", "dpm2", "dpm2_a", "lms"
|
|
||||||
# allow_nsfw: bool = False
|
|
||||||
precision: str = "autocast" # or "full"
|
|
||||||
save_to_disk_path: str = None
|
|
||||||
turbo: bool = True
|
|
||||||
use_full_precision: bool = False
|
|
||||||
use_face_correction: str = None # or "GFPGANv1.3"
|
|
||||||
use_upscale: str = None # or "RealESRGAN_x4plus" or "RealESRGAN_x4plus_anime_6B"
|
|
||||||
use_stable_diffusion_model: str = "sd-v1-4"
|
|
||||||
use_vae_model: str = None
|
|
||||||
show_only_filtered_image: bool = False
|
|
||||||
output_format: str = "jpeg" # or "png"
|
|
||||||
|
|
||||||
stream_progress_updates: bool = False
|
|
||||||
stream_image_progress: bool = False
|
|
||||||
|
|
||||||
def json(self):
|
|
||||||
return {
|
|
||||||
"session_id": self.session_id,
|
|
||||||
"prompt": self.prompt,
|
|
||||||
"negative_prompt": self.negative_prompt,
|
|
||||||
"num_outputs": self.num_outputs,
|
|
||||||
"num_inference_steps": self.num_inference_steps,
|
|
||||||
"guidance_scale": self.guidance_scale,
|
|
||||||
"width": self.width,
|
|
||||||
"height": self.height,
|
|
||||||
"seed": self.seed,
|
|
||||||
"prompt_strength": self.prompt_strength,
|
|
||||||
"sampler": self.sampler,
|
|
||||||
"use_face_correction": self.use_face_correction,
|
|
||||||
"use_upscale": self.use_upscale,
|
|
||||||
"use_stable_diffusion_model": self.use_stable_diffusion_model,
|
|
||||||
"use_vae_model": self.use_vae_model,
|
|
||||||
"output_format": self.output_format,
|
|
||||||
}
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return f'''
|
|
||||||
session_id: {self.session_id}
|
|
||||||
prompt: {self.prompt}
|
|
||||||
negative_prompt: {self.negative_prompt}
|
|
||||||
seed: {self.seed}
|
|
||||||
num_inference_steps: {self.num_inference_steps}
|
|
||||||
sampler: {self.sampler}
|
|
||||||
guidance_scale: {self.guidance_scale}
|
|
||||||
w: {self.width}
|
|
||||||
h: {self.height}
|
|
||||||
precision: {self.precision}
|
|
||||||
save_to_disk_path: {self.save_to_disk_path}
|
|
||||||
turbo: {self.turbo}
|
|
||||||
use_full_precision: {self.use_full_precision}
|
|
||||||
use_face_correction: {self.use_face_correction}
|
|
||||||
use_upscale: {self.use_upscale}
|
|
||||||
use_stable_diffusion_model: {self.use_stable_diffusion_model}
|
|
||||||
use_vae_model: {self.use_vae_model}
|
|
||||||
show_only_filtered_image: {self.show_only_filtered_image}
|
|
||||||
output_format: {self.output_format}
|
|
||||||
|
|
||||||
stream_progress_updates: {self.stream_progress_updates}
|
|
||||||
stream_image_progress: {self.stream_image_progress}'''
|
|
||||||
|
|
||||||
class Image:
|
|
||||||
data: str # base64
|
|
||||||
seed: int
|
|
||||||
is_nsfw: bool
|
|
||||||
path_abs: str = None
|
|
||||||
|
|
||||||
def __init__(self, data, seed):
|
|
||||||
self.data = data
|
|
||||||
self.seed = seed
|
|
||||||
|
|
||||||
def json(self):
|
|
||||||
return {
|
|
||||||
"data": self.data,
|
|
||||||
"seed": self.seed,
|
|
||||||
"path_abs": self.path_abs,
|
|
||||||
}
|
|
||||||
|
|
||||||
class Response:
|
|
||||||
request: Request
|
|
||||||
images: list
|
|
||||||
|
|
||||||
def json(self):
|
|
||||||
res = {
|
|
||||||
"status": 'succeeded',
|
|
||||||
"request": self.request.json(),
|
|
||||||
"output": [],
|
|
||||||
}
|
|
||||||
|
|
||||||
for image in self.images:
|
|
||||||
res["output"].append(image.json())
|
|
||||||
|
|
||||||
return res
|
|
||||||
@@ -1,332 +0,0 @@
|
|||||||
diff --git a/optimizedSD/ddpm.py b/optimizedSD/ddpm.py
|
|
||||||
index b967b55..35ef520 100644
|
|
||||||
--- a/optimizedSD/ddpm.py
|
|
||||||
+++ b/optimizedSD/ddpm.py
|
|
||||||
@@ -22,7 +22,7 @@ from ldm.util import exists, default, instantiate_from_config
|
|
||||||
from ldm.modules.diffusionmodules.util import make_beta_schedule
|
|
||||||
from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like
|
|
||||||
from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
|
|
||||||
-from samplers import CompVisDenoiser, get_ancestral_step, to_d, append_dims,linear_multistep_coeff
|
|
||||||
+from .samplers import CompVisDenoiser, get_ancestral_step, to_d, append_dims,linear_multistep_coeff
|
|
||||||
|
|
||||||
def disabled_train(self):
|
|
||||||
"""Overwrite model.train with this function to make sure train/eval mode
|
|
||||||
@@ -506,6 +506,8 @@ class UNet(DDPM):
|
|
||||||
|
|
||||||
x_latent = noise if x0 is None else x0
|
|
||||||
# sampling
|
|
||||||
+ if sampler in ('ddim', 'dpm2', 'heun', 'dpm2_a', 'lms') and not hasattr(self, 'ddim_timesteps'):
|
|
||||||
+ self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=False)
|
|
||||||
|
|
||||||
if sampler == "plms":
|
|
||||||
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=False)
|
|
||||||
@@ -528,39 +530,46 @@ class UNet(DDPM):
|
|
||||||
elif sampler == "ddim":
|
|
||||||
samples = self.ddim_sampling(x_latent, conditioning, S, unconditional_guidance_scale=unconditional_guidance_scale,
|
|
||||||
unconditional_conditioning=unconditional_conditioning,
|
|
||||||
- mask = mask,init_latent=x_T,use_original_steps=False)
|
|
||||||
+ mask = mask,init_latent=x_T,use_original_steps=False,
|
|
||||||
+ callback=callback, img_callback=img_callback)
|
|
||||||
|
|
||||||
elif sampler == "euler":
|
|
||||||
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=False)
|
|
||||||
samples = self.euler_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
|
||||||
- unconditional_guidance_scale=unconditional_guidance_scale)
|
|
||||||
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
|
||||||
+ img_callback=img_callback)
|
|
||||||
elif sampler == "euler_a":
|
|
||||||
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=False)
|
|
||||||
samples = self.euler_ancestral_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
|
||||||
- unconditional_guidance_scale=unconditional_guidance_scale)
|
|
||||||
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
|
||||||
+ img_callback=img_callback)
|
|
||||||
|
|
||||||
elif sampler == "dpm2":
|
|
||||||
samples = self.dpm_2_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
|
||||||
- unconditional_guidance_scale=unconditional_guidance_scale)
|
|
||||||
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
|
||||||
+ img_callback=img_callback)
|
|
||||||
elif sampler == "heun":
|
|
||||||
samples = self.heun_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
|
||||||
- unconditional_guidance_scale=unconditional_guidance_scale)
|
|
||||||
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
|
||||||
+ img_callback=img_callback)
|
|
||||||
|
|
||||||
elif sampler == "dpm2_a":
|
|
||||||
samples = self.dpm_2_ancestral_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
|
||||||
- unconditional_guidance_scale=unconditional_guidance_scale)
|
|
||||||
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
|
||||||
+ img_callback=img_callback)
|
|
||||||
|
|
||||||
|
|
||||||
elif sampler == "lms":
|
|
||||||
samples = self.lms_sampling(self.alphas_cumprod,x_latent, S, conditioning, unconditional_conditioning=unconditional_conditioning,
|
|
||||||
- unconditional_guidance_scale=unconditional_guidance_scale)
|
|
||||||
+ unconditional_guidance_scale=unconditional_guidance_scale,
|
|
||||||
+ img_callback=img_callback)
|
|
||||||
+
|
|
||||||
+ yield from samples
|
|
||||||
|
|
||||||
if(self.turbo):
|
|
||||||
self.model1.to("cpu")
|
|
||||||
self.model2.to("cpu")
|
|
||||||
|
|
||||||
- return samples
|
|
||||||
-
|
|
||||||
@torch.no_grad()
|
|
||||||
def plms_sampling(self, cond,b, img,
|
|
||||||
ddim_use_original_steps=False,
|
|
||||||
@@ -599,10 +608,10 @@ class UNet(DDPM):
|
|
||||||
old_eps.append(e_t)
|
|
||||||
if len(old_eps) >= 4:
|
|
||||||
old_eps.pop(0)
|
|
||||||
- if callback: callback(i)
|
|
||||||
- if img_callback: img_callback(pred_x0, i)
|
|
||||||
+ if callback: yield from callback(i)
|
|
||||||
+ if img_callback: yield from img_callback(pred_x0, i)
|
|
||||||
|
|
||||||
- return img
|
|
||||||
+ yield from img_callback(img, len(iterator)-1)
|
|
||||||
|
|
||||||
@torch.no_grad()
|
|
||||||
def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
|
|
||||||
@@ -706,7 +715,8 @@ class UNet(DDPM):
|
|
||||||
|
|
||||||
@torch.no_grad()
|
|
||||||
def ddim_sampling(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,
|
|
||||||
- mask = None,init_latent=None,use_original_steps=False):
|
|
||||||
+ mask = None,init_latent=None,use_original_steps=False,
|
|
||||||
+ callback=None, img_callback=None):
|
|
||||||
|
|
||||||
timesteps = self.ddim_timesteps
|
|
||||||
timesteps = timesteps[:t_start]
|
|
||||||
@@ -730,10 +740,13 @@ class UNet(DDPM):
|
|
||||||
unconditional_guidance_scale=unconditional_guidance_scale,
|
|
||||||
unconditional_conditioning=unconditional_conditioning)
|
|
||||||
|
|
||||||
+ if callback: yield from callback(i)
|
|
||||||
+ if img_callback: yield from img_callback(x_dec, i)
|
|
||||||
+
|
|
||||||
if mask is not None:
|
|
||||||
- return x0 * mask + (1. - mask) * x_dec
|
|
||||||
+ x_dec = x0 * mask + (1. - mask) * x_dec
|
|
||||||
|
|
||||||
- return x_dec
|
|
||||||
+ yield from img_callback(x_dec, len(iterator)-1)
|
|
||||||
|
|
||||||
|
|
||||||
@torch.no_grad()
|
|
||||||
@@ -779,13 +792,16 @@ class UNet(DDPM):
|
|
||||||
|
|
||||||
|
|
||||||
@torch.no_grad()
|
|
||||||
- def euler_sampling(self, ac, x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None,callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.):
|
|
||||||
+ def euler_sampling(self, ac, x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None,callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.,
|
|
||||||
+ img_callback=None):
|
|
||||||
"""Implements Algorithm 2 (Euler steps) from Karras et al. (2022)."""
|
|
||||||
extra_args = {} if extra_args is None else extra_args
|
|
||||||
cvd = CompVisDenoiser(ac)
|
|
||||||
sigmas = cvd.get_sigmas(S)
|
|
||||||
x = x*sigmas[0]
|
|
||||||
|
|
||||||
+ print(f"Running Euler Sampling with {len(sigmas) - 1} timesteps")
|
|
||||||
+
|
|
||||||
s_in = x.new_ones([x.shape[0]]).half()
|
|
||||||
for i in trange(len(sigmas) - 1, disable=disable):
|
|
||||||
gamma = min(s_churn / (len(sigmas) - 1), 2 ** 0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.
|
|
||||||
@@ -807,13 +823,18 @@ class UNet(DDPM):
|
|
||||||
d = to_d(x, sigma_hat, denoised)
|
|
||||||
if callback is not None:
|
|
||||||
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})
|
|
||||||
+
|
|
||||||
+ if img_callback: yield from img_callback(x, i)
|
|
||||||
+
|
|
||||||
dt = sigmas[i + 1] - sigma_hat
|
|
||||||
# Euler method
|
|
||||||
x = x + d * dt
|
|
||||||
- return x
|
|
||||||
+
|
|
||||||
+ yield from img_callback(x, len(sigmas)-1)
|
|
||||||
|
|
||||||
@torch.no_grad()
|
|
||||||
- def euler_ancestral_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None, callback=None, disable=None):
|
|
||||||
+ def euler_ancestral_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None, callback=None, disable=None,
|
|
||||||
+ img_callback=None):
|
|
||||||
"""Ancestral sampling with Euler method steps."""
|
|
||||||
extra_args = {} if extra_args is None else extra_args
|
|
||||||
|
|
||||||
@@ -822,6 +843,8 @@ class UNet(DDPM):
|
|
||||||
sigmas = cvd.get_sigmas(S)
|
|
||||||
x = x*sigmas[0]
|
|
||||||
|
|
||||||
+ print(f"Running Euler Ancestral Sampling with {len(sigmas) - 1} timesteps")
|
|
||||||
+
|
|
||||||
s_in = x.new_ones([x.shape[0]]).half()
|
|
||||||
for i in trange(len(sigmas) - 1, disable=disable):
|
|
||||||
|
|
||||||
@@ -837,17 +860,22 @@ class UNet(DDPM):
|
|
||||||
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1])
|
|
||||||
if callback is not None:
|
|
||||||
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
|
|
||||||
+
|
|
||||||
+ if img_callback: yield from img_callback(x, i)
|
|
||||||
+
|
|
||||||
d = to_d(x, sigmas[i], denoised)
|
|
||||||
# Euler method
|
|
||||||
dt = sigma_down - sigmas[i]
|
|
||||||
x = x + d * dt
|
|
||||||
x = x + torch.randn_like(x) * sigma_up
|
|
||||||
- return x
|
|
||||||
+
|
|
||||||
+ yield from img_callback(x, len(sigmas)-1)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@torch.no_grad()
|
|
||||||
- def heun_sampling(self, ac, x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.):
|
|
||||||
+ def heun_sampling(self, ac, x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.,
|
|
||||||
+ img_callback=None):
|
|
||||||
"""Implements Algorithm 2 (Heun steps) from Karras et al. (2022)."""
|
|
||||||
extra_args = {} if extra_args is None else extra_args
|
|
||||||
|
|
||||||
@@ -855,6 +883,8 @@ class UNet(DDPM):
|
|
||||||
sigmas = cvd.get_sigmas(S)
|
|
||||||
x = x*sigmas[0]
|
|
||||||
|
|
||||||
+ print(f"Running Heun Sampling with {len(sigmas) - 1} timesteps")
|
|
||||||
+
|
|
||||||
|
|
||||||
s_in = x.new_ones([x.shape[0]]).half()
|
|
||||||
for i in trange(len(sigmas) - 1, disable=disable):
|
|
||||||
@@ -876,6 +906,9 @@ class UNet(DDPM):
|
|
||||||
d = to_d(x, sigma_hat, denoised)
|
|
||||||
if callback is not None:
|
|
||||||
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})
|
|
||||||
+
|
|
||||||
+ if img_callback: yield from img_callback(x, i)
|
|
||||||
+
|
|
||||||
dt = sigmas[i + 1] - sigma_hat
|
|
||||||
if sigmas[i + 1] == 0:
|
|
||||||
# Euler method
|
|
||||||
@@ -895,11 +928,13 @@ class UNet(DDPM):
|
|
||||||
d_2 = to_d(x_2, sigmas[i + 1], denoised_2)
|
|
||||||
d_prime = (d + d_2) / 2
|
|
||||||
x = x + d_prime * dt
|
|
||||||
- return x
|
|
||||||
+
|
|
||||||
+ yield from img_callback(x, len(sigmas)-1)
|
|
||||||
|
|
||||||
|
|
||||||
@torch.no_grad()
|
|
||||||
- def dpm_2_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.):
|
|
||||||
+ def dpm_2_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1,extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.,
|
|
||||||
+ img_callback=None):
|
|
||||||
"""A sampler inspired by DPM-Solver-2 and Algorithm 2 from Karras et al. (2022)."""
|
|
||||||
extra_args = {} if extra_args is None else extra_args
|
|
||||||
|
|
||||||
@@ -907,6 +942,8 @@ class UNet(DDPM):
|
|
||||||
sigmas = cvd.get_sigmas(S)
|
|
||||||
x = x*sigmas[0]
|
|
||||||
|
|
||||||
+ print(f"Running DPM2 Sampling with {len(sigmas) - 1} timesteps")
|
|
||||||
+
|
|
||||||
s_in = x.new_ones([x.shape[0]]).half()
|
|
||||||
for i in trange(len(sigmas) - 1, disable=disable):
|
|
||||||
gamma = min(s_churn / (len(sigmas) - 1), 2 ** 0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.
|
|
||||||
@@ -924,7 +961,7 @@ class UNet(DDPM):
|
|
||||||
e_t_uncond, e_t = (x_in + eps * c_out).chunk(2)
|
|
||||||
denoised = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
|
|
||||||
|
|
||||||
-
|
|
||||||
+ if img_callback: yield from img_callback(x, i)
|
|
||||||
|
|
||||||
d = to_d(x, sigma_hat, denoised)
|
|
||||||
# Midpoint method, where the midpoint is chosen according to a rho=3 Karras schedule
|
|
||||||
@@ -945,11 +982,13 @@ class UNet(DDPM):
|
|
||||||
|
|
||||||
d_2 = to_d(x_2, sigma_mid, denoised_2)
|
|
||||||
x = x + d_2 * dt_2
|
|
||||||
- return x
|
|
||||||
+
|
|
||||||
+ yield from img_callback(x, len(sigmas)-1)
|
|
||||||
|
|
||||||
|
|
||||||
@torch.no_grad()
|
|
||||||
- def dpm_2_ancestral_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None):
|
|
||||||
+ def dpm_2_ancestral_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None,
|
|
||||||
+ img_callback=None):
|
|
||||||
"""Ancestral sampling with DPM-Solver inspired second-order steps."""
|
|
||||||
extra_args = {} if extra_args is None else extra_args
|
|
||||||
|
|
||||||
@@ -957,6 +996,8 @@ class UNet(DDPM):
|
|
||||||
sigmas = cvd.get_sigmas(S)
|
|
||||||
x = x*sigmas[0]
|
|
||||||
|
|
||||||
+ print(f"Running DPM2 Ancestral Sampling with {len(sigmas) - 1} timesteps")
|
|
||||||
+
|
|
||||||
s_in = x.new_ones([x.shape[0]]).half()
|
|
||||||
for i in trange(len(sigmas) - 1, disable=disable):
|
|
||||||
|
|
||||||
@@ -973,6 +1014,9 @@ class UNet(DDPM):
|
|
||||||
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1])
|
|
||||||
if callback is not None:
|
|
||||||
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
|
|
||||||
+
|
|
||||||
+ if img_callback: yield from img_callback(x, i)
|
|
||||||
+
|
|
||||||
d = to_d(x, sigmas[i], denoised)
|
|
||||||
# Midpoint method, where the midpoint is chosen according to a rho=3 Karras schedule
|
|
||||||
sigma_mid = ((sigmas[i] ** (1 / 3) + sigma_down ** (1 / 3)) / 2) ** 3
|
|
||||||
@@ -993,11 +1037,13 @@ class UNet(DDPM):
|
|
||||||
d_2 = to_d(x_2, sigma_mid, denoised_2)
|
|
||||||
x = x + d_2 * dt_2
|
|
||||||
x = x + torch.randn_like(x) * sigma_up
|
|
||||||
- return x
|
|
||||||
+
|
|
||||||
+ yield from img_callback(x, len(sigmas)-1)
|
|
||||||
|
|
||||||
|
|
||||||
@torch.no_grad()
|
|
||||||
- def lms_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None, order=4):
|
|
||||||
+ def lms_sampling(self,ac,x, S, cond, unconditional_conditioning = None, unconditional_guidance_scale = 1, extra_args=None, callback=None, disable=None, order=4,
|
|
||||||
+ img_callback=None):
|
|
||||||
extra_args = {} if extra_args is None else extra_args
|
|
||||||
s_in = x.new_ones([x.shape[0]])
|
|
||||||
|
|
||||||
@@ -1005,6 +1051,8 @@ class UNet(DDPM):
|
|
||||||
sigmas = cvd.get_sigmas(S)
|
|
||||||
x = x*sigmas[0]
|
|
||||||
|
|
||||||
+ print(f"Running LMS Sampling with {len(sigmas) - 1} timesteps")
|
|
||||||
+
|
|
||||||
ds = []
|
|
||||||
for i in trange(len(sigmas) - 1, disable=disable):
|
|
||||||
|
|
||||||
@@ -1017,6 +1065,7 @@ class UNet(DDPM):
|
|
||||||
e_t_uncond, e_t = (x_in + eps * c_out).chunk(2)
|
|
||||||
denoised = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
|
|
||||||
|
|
||||||
+ if img_callback: yield from img_callback(x, i)
|
|
||||||
|
|
||||||
d = to_d(x, sigmas[i], denoised)
|
|
||||||
ds.append(d)
|
|
||||||
@@ -1027,4 +1076,5 @@ class UNet(DDPM):
|
|
||||||
cur_order = min(i + 1, order)
|
|
||||||
coeffs = [linear_multistep_coeff(cur_order, sigmas.cpu(), i, j) for j in range(cur_order)]
|
|
||||||
x = x + sum(coeff * d for coeff, d in zip(coeffs, reversed(ds)))
|
|
||||||
- return x
|
|
||||||
+
|
|
||||||
+ yield from img_callback(x, len(sigmas)-1)
|
|
||||||
diff --git a/optimizedSD/openaimodelSplit.py b/optimizedSD/openaimodelSplit.py
|
|
||||||
index abc3098..7a32ffe 100644
|
|
||||||
--- a/optimizedSD/openaimodelSplit.py
|
|
||||||
+++ b/optimizedSD/openaimodelSplit.py
|
|
||||||
@@ -13,7 +13,7 @@ from ldm.modules.diffusionmodules.util import (
|
|
||||||
normalization,
|
|
||||||
timestep_embedding,
|
|
||||||
)
|
|
||||||
-from splitAttention import SpatialTransformer
|
|
||||||
+from .splitAttention import SpatialTransformer
|
|
||||||
|
|
||||||
|
|
||||||
class AttentionPool2d(nn.Module):
|
|
||||||
@@ -1,168 +0,0 @@
|
|||||||
import os
|
|
||||||
import torch
|
|
||||||
import traceback
|
|
||||||
import re
|
|
||||||
|
|
||||||
COMPARABLE_GPU_PERCENTILE = 0.65 # if a GPU's free_mem is within this % of the GPU with the most free_mem, it will be picked
|
|
||||||
|
|
||||||
mem_free_threshold = 0
|
|
||||||
|
|
||||||
def get_device_delta(render_devices, active_devices):
|
|
||||||
'''
|
|
||||||
render_devices: 'cpu', or 'auto' or ['cuda:N'...]
|
|
||||||
active_devices: ['cpu', 'cuda:N'...]
|
|
||||||
'''
|
|
||||||
|
|
||||||
if render_devices in ('cpu', 'auto'):
|
|
||||||
render_devices = [render_devices]
|
|
||||||
elif render_devices is not None:
|
|
||||||
if isinstance(render_devices, str):
|
|
||||||
render_devices = [render_devices]
|
|
||||||
if isinstance(render_devices, list) and len(render_devices) > 0:
|
|
||||||
render_devices = list(filter(lambda x: x.startswith('cuda:'), render_devices))
|
|
||||||
if len(render_devices) == 0:
|
|
||||||
raise Exception('Invalid render_devices value in config.json. Valid: {"render_devices": ["cuda:0", "cuda:1"...]}, or {"render_devices": "cpu"} or {"render_devices": "auto"}')
|
|
||||||
|
|
||||||
render_devices = list(filter(lambda x: is_device_compatible(x), render_devices))
|
|
||||||
if len(render_devices) == 0:
|
|
||||||
raise Exception('Sorry, none of the render_devices configured in config.json are compatible with Stable Diffusion')
|
|
||||||
else:
|
|
||||||
raise Exception('Invalid render_devices value in config.json. Valid: {"render_devices": ["cuda:0", "cuda:1"...]}, or {"render_devices": "cpu"} or {"render_devices": "auto"}')
|
|
||||||
else:
|
|
||||||
render_devices = ['auto']
|
|
||||||
|
|
||||||
if 'auto' in render_devices:
|
|
||||||
render_devices = auto_pick_devices(active_devices)
|
|
||||||
if 'cpu' in render_devices:
|
|
||||||
print('WARNING: Could not find a compatible GPU. Using the CPU, but this will be very slow!')
|
|
||||||
|
|
||||||
active_devices = set(active_devices)
|
|
||||||
render_devices = set(render_devices)
|
|
||||||
|
|
||||||
devices_to_start = render_devices - active_devices
|
|
||||||
devices_to_stop = active_devices - render_devices
|
|
||||||
|
|
||||||
return devices_to_start, devices_to_stop
|
|
||||||
|
|
||||||
def auto_pick_devices(currently_active_devices):
|
|
||||||
global mem_free_threshold
|
|
||||||
|
|
||||||
if not torch.cuda.is_available(): return ['cpu']
|
|
||||||
|
|
||||||
device_count = torch.cuda.device_count()
|
|
||||||
if device_count == 1:
|
|
||||||
return ['cuda:0'] if is_device_compatible('cuda:0') else ['cpu']
|
|
||||||
|
|
||||||
print('Autoselecting GPU. Using most free memory.')
|
|
||||||
devices = []
|
|
||||||
for device in range(device_count):
|
|
||||||
device = f'cuda:{device}'
|
|
||||||
if not is_device_compatible(device):
|
|
||||||
continue
|
|
||||||
|
|
||||||
mem_free, mem_total = torch.cuda.mem_get_info(device)
|
|
||||||
mem_free /= float(10**9)
|
|
||||||
mem_total /= float(10**9)
|
|
||||||
device_name = torch.cuda.get_device_name(device)
|
|
||||||
print(f'{device} detected: {device_name} - Memory (free/total): {round(mem_free, 2)}Gb / {round(mem_total, 2)}Gb')
|
|
||||||
devices.append({'device': device, 'device_name': device_name, 'mem_free': mem_free})
|
|
||||||
|
|
||||||
devices.sort(key=lambda x:x['mem_free'], reverse=True)
|
|
||||||
max_mem_free = devices[0]['mem_free']
|
|
||||||
curr_mem_free_threshold = COMPARABLE_GPU_PERCENTILE * max_mem_free
|
|
||||||
mem_free_threshold = max(curr_mem_free_threshold, mem_free_threshold)
|
|
||||||
|
|
||||||
# Auto-pick algorithm:
|
|
||||||
# 1. Pick the top 75 percentile of the GPUs, sorted by free_mem.
|
|
||||||
# 2. Also include already-running devices (GPU-only), otherwise their free_mem will
|
|
||||||
# always be very low (since their VRAM contains the model).
|
|
||||||
# These already-running devices probably aren't terrible, since they were picked in the past.
|
|
||||||
# Worst case, the user can restart the program and that'll get rid of them.
|
|
||||||
devices = list(filter((lambda x: x['mem_free'] > mem_free_threshold or x['device'] in currently_active_devices), devices))
|
|
||||||
devices = list(map(lambda x: x['device'], devices))
|
|
||||||
return devices
|
|
||||||
|
|
||||||
def device_init(thread_data, device):
|
|
||||||
'''
|
|
||||||
This function assumes the 'device' has already been verified to be compatible.
|
|
||||||
`get_device_delta()` has already filtered out incompatible devices.
|
|
||||||
'''
|
|
||||||
|
|
||||||
validate_device_id(device, log_prefix='device_init')
|
|
||||||
|
|
||||||
if device == 'cpu':
|
|
||||||
thread_data.device = 'cpu'
|
|
||||||
thread_data.device_name = get_processor_name()
|
|
||||||
print('Render device CPU available as', thread_data.device_name)
|
|
||||||
return
|
|
||||||
|
|
||||||
thread_data.device_name = torch.cuda.get_device_name(device)
|
|
||||||
thread_data.device = device
|
|
||||||
|
|
||||||
# Force full precision on 1660 and 1650 NVIDIA cards to avoid creating green images
|
|
||||||
device_name = thread_data.device_name.lower()
|
|
||||||
thread_data.force_full_precision = ('nvidia' in device_name or 'geforce' in device_name) and (' 1660' in device_name or ' 1650' in device_name)
|
|
||||||
if thread_data.force_full_precision:
|
|
||||||
print('forcing full precision on NVIDIA 16xx cards, to avoid green images. GPU detected: ', thread_data.device_name)
|
|
||||||
# Apply force_full_precision now before models are loaded.
|
|
||||||
thread_data.precision = 'full'
|
|
||||||
|
|
||||||
print(f'Setting {device} as active')
|
|
||||||
torch.cuda.device(device)
|
|
||||||
|
|
||||||
return
|
|
||||||
|
|
||||||
def validate_device_id(device, log_prefix=''):
|
|
||||||
def is_valid():
|
|
||||||
if not isinstance(device, str):
|
|
||||||
return False
|
|
||||||
if device == 'cpu':
|
|
||||||
return True
|
|
||||||
if not device.startswith('cuda:') or not device[5:].isnumeric():
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
if not is_valid():
|
|
||||||
raise EnvironmentError(f"{log_prefix}: device id should be 'cpu', or 'cuda:N' (where N is an integer index for the GPU). Got: {device}")
|
|
||||||
|
|
||||||
def is_device_compatible(device):
|
|
||||||
'''
|
|
||||||
Returns True/False, and prints any compatibility errors
|
|
||||||
'''
|
|
||||||
try:
|
|
||||||
validate_device_id(device, log_prefix='is_device_compatible')
|
|
||||||
except:
|
|
||||||
print(str(e))
|
|
||||||
return False
|
|
||||||
|
|
||||||
if device == 'cpu': return True
|
|
||||||
# Memory check
|
|
||||||
try:
|
|
||||||
_, mem_total = torch.cuda.mem_get_info(device)
|
|
||||||
mem_total /= float(10**9)
|
|
||||||
if mem_total < 3.0:
|
|
||||||
print(f'GPU {device} with less than 3 GB of VRAM is not compatible with Stable Diffusion')
|
|
||||||
return False
|
|
||||||
except RuntimeError as e:
|
|
||||||
print(str(e))
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
def get_processor_name():
|
|
||||||
try:
|
|
||||||
import platform, subprocess
|
|
||||||
if platform.system() == "Windows":
|
|
||||||
return platform.processor()
|
|
||||||
elif platform.system() == "Darwin":
|
|
||||||
os.environ['PATH'] = os.environ['PATH'] + os.pathsep + '/usr/sbin'
|
|
||||||
command = "sysctl -n machdep.cpu.brand_string"
|
|
||||||
return subprocess.check_output(command).strip()
|
|
||||||
elif platform.system() == "Linux":
|
|
||||||
command = "cat /proc/cpuinfo"
|
|
||||||
all_info = subprocess.check_output(command, shell=True).decode().strip()
|
|
||||||
for line in all_info.split("\n"):
|
|
||||||
if "model name" in line:
|
|
||||||
return re.sub(".*model name.*:", "", line, 1).strip()
|
|
||||||
except:
|
|
||||||
print(traceback.format_exc())
|
|
||||||
return "cpu"
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
diff --git a/environment.yaml b/environment.yaml
|
|
||||||
index 7f25da8..306750f 100644
|
|
||||||
--- a/environment.yaml
|
|
||||||
+++ b/environment.yaml
|
|
||||||
@@ -23,6 +23,8 @@ dependencies:
|
|
||||||
- torch-fidelity==0.3.0
|
|
||||||
- transformers==4.19.2
|
|
||||||
- torchmetrics==0.6.0
|
|
||||||
+ - pywavelets==1.3.0
|
|
||||||
+ - pandas==1.4.4
|
|
||||||
- kornia==0.6
|
|
||||||
- -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
|
|
||||||
- -e git+https://github.com/openai/CLIP.git@main#egg=clip
|
|
||||||
@@ -1,778 +0,0 @@
|
|||||||
"""runtime.py: torch device owned by a thread.
|
|
||||||
Notes:
|
|
||||||
Avoid device switching, transfering all models will get too complex.
|
|
||||||
To use a diffrent device signal the current render device to exit
|
|
||||||
And then start a new clean thread for the new device.
|
|
||||||
"""
|
|
||||||
import json
|
|
||||||
import os, re
|
|
||||||
import traceback
|
|
||||||
import torch
|
|
||||||
import numpy as np
|
|
||||||
from gc import collect as gc_collect
|
|
||||||
from omegaconf import OmegaConf
|
|
||||||
from PIL import Image, ImageOps
|
|
||||||
from tqdm import tqdm, trange
|
|
||||||
from itertools import islice
|
|
||||||
from einops import rearrange
|
|
||||||
import time
|
|
||||||
from pytorch_lightning import seed_everything
|
|
||||||
from torch import autocast
|
|
||||||
from contextlib import nullcontext
|
|
||||||
from einops import rearrange, repeat
|
|
||||||
from ldm.util import instantiate_from_config
|
|
||||||
from optimizedSD.optimUtils import split_weighted_subprompts
|
|
||||||
from transformers import logging
|
|
||||||
|
|
||||||
from gfpgan import GFPGANer
|
|
||||||
from basicsr.archs.rrdbnet_arch import RRDBNet
|
|
||||||
from realesrgan import RealESRGANer
|
|
||||||
|
|
||||||
import uuid
|
|
||||||
|
|
||||||
logging.set_verbosity_error()
|
|
||||||
|
|
||||||
# consts
|
|
||||||
config_yaml = "optimizedSD/v1-inference.yaml"
|
|
||||||
filename_regex = re.compile('[^a-zA-Z0-9]')
|
|
||||||
force_gfpgan_to_cuda0 = True # workaround: gfpgan currently works only on cuda:0
|
|
||||||
|
|
||||||
# api stuff
|
|
||||||
from sd_internal import device_manager
|
|
||||||
from . import Request, Response, Image as ResponseImage
|
|
||||||
import base64
|
|
||||||
from io import BytesIO
|
|
||||||
#from colorama import Fore
|
|
||||||
|
|
||||||
from threading import local as LocalThreadVars
|
|
||||||
thread_data = LocalThreadVars()
|
|
||||||
|
|
||||||
def thread_init(device):
|
|
||||||
# Thread bound properties
|
|
||||||
thread_data.stop_processing = False
|
|
||||||
thread_data.temp_images = {}
|
|
||||||
|
|
||||||
thread_data.ckpt_file = None
|
|
||||||
thread_data.vae_file = None
|
|
||||||
thread_data.gfpgan_file = None
|
|
||||||
thread_data.real_esrgan_file = None
|
|
||||||
|
|
||||||
thread_data.model = None
|
|
||||||
thread_data.modelCS = None
|
|
||||||
thread_data.modelFS = None
|
|
||||||
thread_data.model_gfpgan = None
|
|
||||||
thread_data.model_real_esrgan = None
|
|
||||||
|
|
||||||
thread_data.model_is_half = False
|
|
||||||
thread_data.model_fs_is_half = False
|
|
||||||
thread_data.device = None
|
|
||||||
thread_data.device_name = None
|
|
||||||
thread_data.unet_bs = 1
|
|
||||||
thread_data.precision = 'autocast'
|
|
||||||
thread_data.sampler_plms = None
|
|
||||||
thread_data.sampler_ddim = None
|
|
||||||
|
|
||||||
thread_data.turbo = False
|
|
||||||
thread_data.force_full_precision = False
|
|
||||||
thread_data.reduced_memory = True
|
|
||||||
|
|
||||||
device_manager.device_init(thread_data, device)
|
|
||||||
|
|
||||||
def load_model_ckpt():
|
|
||||||
if not thread_data.ckpt_file: raise ValueError(f'Thread ckpt_file is undefined.')
|
|
||||||
if not os.path.exists(thread_data.ckpt_file + '.ckpt'): raise FileNotFoundError(f'Cannot find {thread_data.ckpt_file}.ckpt')
|
|
||||||
|
|
||||||
if not thread_data.precision:
|
|
||||||
thread_data.precision = 'full' if thread_data.force_full_precision else 'autocast'
|
|
||||||
|
|
||||||
if not thread_data.unet_bs:
|
|
||||||
thread_data.unet_bs = 1
|
|
||||||
|
|
||||||
if thread_data.device == 'cpu':
|
|
||||||
thread_data.precision = 'full'
|
|
||||||
|
|
||||||
print('loading', thread_data.ckpt_file + '.ckpt', 'to device', thread_data.device, 'using precision', thread_data.precision)
|
|
||||||
sd = load_model_from_config(thread_data.ckpt_file + '.ckpt')
|
|
||||||
li, lo = [], []
|
|
||||||
for key, value in sd.items():
|
|
||||||
sp = key.split(".")
|
|
||||||
if (sp[0]) == "model":
|
|
||||||
if "input_blocks" in sp:
|
|
||||||
li.append(key)
|
|
||||||
elif "middle_block" in sp:
|
|
||||||
li.append(key)
|
|
||||||
elif "time_embed" in sp:
|
|
||||||
li.append(key)
|
|
||||||
else:
|
|
||||||
lo.append(key)
|
|
||||||
for key in li:
|
|
||||||
sd["model1." + key[6:]] = sd.pop(key)
|
|
||||||
for key in lo:
|
|
||||||
sd["model2." + key[6:]] = sd.pop(key)
|
|
||||||
|
|
||||||
config = OmegaConf.load(f"{config_yaml}")
|
|
||||||
|
|
||||||
model = instantiate_from_config(config.modelUNet)
|
|
||||||
_, _ = model.load_state_dict(sd, strict=False)
|
|
||||||
model.eval()
|
|
||||||
model.cdevice = torch.device(thread_data.device)
|
|
||||||
model.unet_bs = thread_data.unet_bs
|
|
||||||
model.turbo = thread_data.turbo
|
|
||||||
# if thread_data.device != 'cpu':
|
|
||||||
# model.to(thread_data.device)
|
|
||||||
#if thread_data.reduced_memory:
|
|
||||||
#model.model1.to("cpu")
|
|
||||||
#model.model2.to("cpu")
|
|
||||||
thread_data.model = model
|
|
||||||
|
|
||||||
modelCS = instantiate_from_config(config.modelCondStage)
|
|
||||||
_, _ = modelCS.load_state_dict(sd, strict=False)
|
|
||||||
modelCS.eval()
|
|
||||||
modelCS.cond_stage_model.device = torch.device(thread_data.device)
|
|
||||||
# if thread_data.device != 'cpu':
|
|
||||||
# if thread_data.reduced_memory:
|
|
||||||
# modelCS.to('cpu')
|
|
||||||
# else:
|
|
||||||
# modelCS.to(thread_data.device) # Preload on device if not already there.
|
|
||||||
thread_data.modelCS = modelCS
|
|
||||||
|
|
||||||
modelFS = instantiate_from_config(config.modelFirstStage)
|
|
||||||
_, _ = modelFS.load_state_dict(sd, strict=False)
|
|
||||||
|
|
||||||
if thread_data.vae_file is not None:
|
|
||||||
for model_extension in ['.ckpt', '.vae.pt']:
|
|
||||||
if os.path.exists(thread_data.vae_file + model_extension):
|
|
||||||
print(f"Loading VAE weights from: {thread_data.vae_file}{model_extension}")
|
|
||||||
vae_ckpt = torch.load(thread_data.vae_file + model_extension, map_location="cpu")
|
|
||||||
vae_dict = {k: v for k, v in vae_ckpt["state_dict"].items() if k[0:4] != "loss"}
|
|
||||||
modelFS.first_stage_model.load_state_dict(vae_dict, strict=False)
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
print(f'Cannot find VAE file: {thread_data.vae_file}{model_extension}')
|
|
||||||
|
|
||||||
modelFS.eval()
|
|
||||||
# if thread_data.device != 'cpu':
|
|
||||||
# if thread_data.reduced_memory:
|
|
||||||
# modelFS.to('cpu')
|
|
||||||
# else:
|
|
||||||
# modelFS.to(thread_data.device) # Preload on device if not already there.
|
|
||||||
thread_data.modelFS = modelFS
|
|
||||||
del sd
|
|
||||||
|
|
||||||
if thread_data.device != "cpu" and thread_data.precision == "autocast":
|
|
||||||
thread_data.model.half()
|
|
||||||
thread_data.modelCS.half()
|
|
||||||
thread_data.modelFS.half()
|
|
||||||
thread_data.model_is_half = True
|
|
||||||
thread_data.model_fs_is_half = True
|
|
||||||
else:
|
|
||||||
thread_data.model_is_half = False
|
|
||||||
thread_data.model_fs_is_half = False
|
|
||||||
|
|
||||||
print(f'''loaded model
|
|
||||||
model file: {thread_data.ckpt_file}.ckpt
|
|
||||||
model.device: {model.device}
|
|
||||||
modelCS.device: {modelCS.cond_stage_model.device}
|
|
||||||
modelFS.device: {thread_data.modelFS.device}
|
|
||||||
using precision: {thread_data.precision}''')
|
|
||||||
|
|
||||||
def unload_filters():
|
|
||||||
if thread_data.model_gfpgan is not None:
|
|
||||||
if thread_data.device != 'cpu': thread_data.model_gfpgan.gfpgan.to('cpu')
|
|
||||||
|
|
||||||
del thread_data.model_gfpgan
|
|
||||||
thread_data.model_gfpgan = None
|
|
||||||
|
|
||||||
if thread_data.model_real_esrgan is not None:
|
|
||||||
if thread_data.device != 'cpu': thread_data.model_real_esrgan.model.to('cpu')
|
|
||||||
|
|
||||||
del thread_data.model_real_esrgan
|
|
||||||
thread_data.model_real_esrgan = None
|
|
||||||
|
|
||||||
gc()
|
|
||||||
|
|
||||||
def unload_models():
|
|
||||||
if thread_data.model is not None:
|
|
||||||
print('Unloading models...')
|
|
||||||
if thread_data.device != 'cpu':
|
|
||||||
thread_data.modelFS.to('cpu')
|
|
||||||
thread_data.modelCS.to('cpu')
|
|
||||||
thread_data.model.model1.to("cpu")
|
|
||||||
thread_data.model.model2.to("cpu")
|
|
||||||
|
|
||||||
del thread_data.model
|
|
||||||
del thread_data.modelCS
|
|
||||||
del thread_data.modelFS
|
|
||||||
|
|
||||||
thread_data.model = None
|
|
||||||
thread_data.modelCS = None
|
|
||||||
thread_data.modelFS = None
|
|
||||||
|
|
||||||
gc()
|
|
||||||
|
|
||||||
def wait_model_move_to(model, target_device): # Send to target_device and wait until complete.
|
|
||||||
if thread_data.device == target_device: return
|
|
||||||
start_mem = torch.cuda.memory_allocated(thread_data.device) / 1e6
|
|
||||||
if start_mem <= 0: return
|
|
||||||
model_name = model.__class__.__name__
|
|
||||||
print(f'Device {thread_data.device} - Sending model {model_name} to {target_device} | Memory transfer starting. Memory Used: {round(start_mem)}Mb')
|
|
||||||
start_time = time.time()
|
|
||||||
model.to(target_device)
|
|
||||||
time_step = start_time
|
|
||||||
WARNING_TIMEOUT = 1.5 # seconds - Show activity in console after timeout.
|
|
||||||
last_mem = start_mem
|
|
||||||
is_transfering = True
|
|
||||||
while is_transfering:
|
|
||||||
time.sleep(0.5) # 500ms
|
|
||||||
mem = torch.cuda.memory_allocated(thread_data.device) / 1e6
|
|
||||||
is_transfering = bool(mem > 0 and mem < last_mem) # still stuff loaded, but less than last time.
|
|
||||||
last_mem = mem
|
|
||||||
if not is_transfering:
|
|
||||||
break;
|
|
||||||
if time.time() - time_step > WARNING_TIMEOUT: # Long delay, print to console to show activity.
|
|
||||||
print(f'Device {thread_data.device} - Waiting for Memory transfer. Memory Used: {round(mem)}Mb, Transfered: {round(start_mem - mem)}Mb')
|
|
||||||
time_step = time.time()
|
|
||||||
print(f'Device {thread_data.device} - {model_name} Moved: {round(start_mem - last_mem)}Mb in {round(time.time() - start_time, 3)} seconds to {target_device}')
|
|
||||||
|
|
||||||
def load_model_gfpgan():
|
|
||||||
if thread_data.gfpgan_file is None: raise ValueError(f'Thread gfpgan_file is undefined.')
|
|
||||||
model_path = thread_data.gfpgan_file + ".pth"
|
|
||||||
device = 'cuda:0' if force_gfpgan_to_cuda0 else thread_data.device
|
|
||||||
thread_data.model_gfpgan = GFPGANer(device=torch.device(device), model_path=model_path, upscale=1, arch='clean', channel_multiplier=2, bg_upsampler=None)
|
|
||||||
print('loaded', thread_data.gfpgan_file, 'to', thread_data.model_gfpgan.device, 'precision', thread_data.precision)
|
|
||||||
|
|
||||||
def load_model_real_esrgan():
|
|
||||||
if thread_data.real_esrgan_file is None: raise ValueError(f'Thread real_esrgan_file is undefined.')
|
|
||||||
model_path = thread_data.real_esrgan_file + ".pth"
|
|
||||||
|
|
||||||
RealESRGAN_models = {
|
|
||||||
'RealESRGAN_x4plus': RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4),
|
|
||||||
'RealESRGAN_x4plus_anime_6B': RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
|
|
||||||
}
|
|
||||||
|
|
||||||
model_to_use = RealESRGAN_models[thread_data.real_esrgan_file]
|
|
||||||
|
|
||||||
if thread_data.device == 'cpu':
|
|
||||||
thread_data.model_real_esrgan = RealESRGANer(device=torch.device(thread_data.device), scale=2, model_path=model_path, model=model_to_use, pre_pad=0, half=False) # cpu does not support half
|
|
||||||
#thread_data.model_real_esrgan.device = torch.device(thread_data.device)
|
|
||||||
thread_data.model_real_esrgan.model.to('cpu')
|
|
||||||
else:
|
|
||||||
thread_data.model_real_esrgan = RealESRGANer(device=torch.device(thread_data.device), scale=2, model_path=model_path, model=model_to_use, pre_pad=0, half=thread_data.model_is_half)
|
|
||||||
|
|
||||||
thread_data.model_real_esrgan.model.name = thread_data.real_esrgan_file
|
|
||||||
print('loaded ', thread_data.real_esrgan_file, 'to', thread_data.model_real_esrgan.device, 'precision', thread_data.precision)
|
|
||||||
|
|
||||||
|
|
||||||
def get_session_out_path(disk_path, session_id):
|
|
||||||
if disk_path is None: return None
|
|
||||||
if session_id is None: return None
|
|
||||||
|
|
||||||
session_out_path = os.path.join(disk_path, filename_regex.sub('_',session_id))
|
|
||||||
os.makedirs(session_out_path, exist_ok=True)
|
|
||||||
return session_out_path
|
|
||||||
|
|
||||||
def get_base_path(disk_path, session_id, prompt, img_id, ext, suffix=None):
|
|
||||||
if disk_path is None: return None
|
|
||||||
if session_id is None: return None
|
|
||||||
if ext is None: raise Exception('Missing ext')
|
|
||||||
|
|
||||||
session_out_path = get_session_out_path(disk_path, session_id)
|
|
||||||
|
|
||||||
prompt_flattened = filename_regex.sub('_', prompt)[:50]
|
|
||||||
|
|
||||||
if suffix is not None:
|
|
||||||
return os.path.join(session_out_path, f"{prompt_flattened}_{img_id}_{suffix}.{ext}")
|
|
||||||
return os.path.join(session_out_path, f"{prompt_flattened}_{img_id}.{ext}")
|
|
||||||
|
|
||||||
def apply_filters(filter_name, image_data, model_path=None):
|
|
||||||
print(f'Applying filter {filter_name}...')
|
|
||||||
gc() # Free space before loading new data.
|
|
||||||
|
|
||||||
if filter_name == 'gfpgan':
|
|
||||||
if isinstance(image_data, torch.Tensor):
|
|
||||||
image_data.to('cuda:0' if force_gfpgan_to_cuda0 else thread_data.device)
|
|
||||||
|
|
||||||
if model_path is not None and model_path != thread_data.gfpgan_file:
|
|
||||||
thread_data.gfpgan_file = model_path
|
|
||||||
load_model_gfpgan()
|
|
||||||
elif not thread_data.model_gfpgan:
|
|
||||||
load_model_gfpgan()
|
|
||||||
if thread_data.model_gfpgan is None: raise Exception('Model "gfpgan" not loaded.')
|
|
||||||
print('enhance with', thread_data.gfpgan_file, 'on', thread_data.model_gfpgan.device, 'precision', thread_data.precision)
|
|
||||||
_, _, output = thread_data.model_gfpgan.enhance(image_data[:,:,::-1], has_aligned=False, only_center_face=False, paste_back=True)
|
|
||||||
image_data = output[:,:,::-1]
|
|
||||||
|
|
||||||
if filter_name == 'real_esrgan':
|
|
||||||
if isinstance(image_data, torch.Tensor):
|
|
||||||
image_data.to(thread_data.device)
|
|
||||||
|
|
||||||
if model_path is not None and model_path != thread_data.real_esrgan_file:
|
|
||||||
thread_data.real_esrgan_file = model_path
|
|
||||||
load_model_real_esrgan()
|
|
||||||
elif not thread_data.model_real_esrgan:
|
|
||||||
load_model_real_esrgan()
|
|
||||||
if thread_data.model_real_esrgan is None: raise Exception('Model "gfpgan" not loaded.')
|
|
||||||
print('enhance with', thread_data.real_esrgan_file, 'on', thread_data.model_real_esrgan.device, 'precision', thread_data.precision)
|
|
||||||
output, _ = thread_data.model_real_esrgan.enhance(image_data[:,:,::-1])
|
|
||||||
image_data = output[:,:,::-1]
|
|
||||||
|
|
||||||
return image_data
|
|
||||||
|
|
||||||
def mk_img(req: Request):
|
|
||||||
try:
|
|
||||||
yield from do_mk_img(req)
|
|
||||||
except Exception as e:
|
|
||||||
print(traceback.format_exc())
|
|
||||||
|
|
||||||
if thread_data.device != 'cpu':
|
|
||||||
thread_data.modelFS.to('cpu')
|
|
||||||
thread_data.modelCS.to('cpu')
|
|
||||||
thread_data.model.model1.to("cpu")
|
|
||||||
thread_data.model.model2.to("cpu")
|
|
||||||
|
|
||||||
gc() # Release from memory.
|
|
||||||
yield json.dumps({
|
|
||||||
"status": 'failed',
|
|
||||||
"detail": str(e)
|
|
||||||
})
|
|
||||||
|
|
||||||
def update_temp_img(req, x_samples):
|
|
||||||
partial_images = []
|
|
||||||
for i in range(req.num_outputs):
|
|
||||||
x_sample_ddim = thread_data.modelFS.decode_first_stage(x_samples[i].unsqueeze(0))
|
|
||||||
x_sample = torch.clamp((x_sample_ddim + 1.0) / 2.0, min=0.0, max=1.0)
|
|
||||||
x_sample = 255.0 * rearrange(x_sample[0].cpu().numpy(), "c h w -> h w c")
|
|
||||||
x_sample = x_sample.astype(np.uint8)
|
|
||||||
img = Image.fromarray(x_sample)
|
|
||||||
buf = BytesIO()
|
|
||||||
img.save(buf, format='JPEG')
|
|
||||||
buf.seek(0)
|
|
||||||
|
|
||||||
del img, x_sample, x_sample_ddim
|
|
||||||
# don't delete x_samples, it is used in the code that called this callback
|
|
||||||
|
|
||||||
thread_data.temp_images[str(req.session_id) + '/' + str(i)] = buf
|
|
||||||
partial_images.append({'path': f'/image/tmp/{req.session_id}/{i}'})
|
|
||||||
return partial_images
|
|
||||||
|
|
||||||
# Build and return the apropriate generator for do_mk_img
|
|
||||||
def get_image_progress_generator(req, extra_props=None):
|
|
||||||
if not req.stream_progress_updates:
|
|
||||||
def empty_callback(x_samples, i): return x_samples
|
|
||||||
return empty_callback
|
|
||||||
|
|
||||||
thread_data.partial_x_samples = None
|
|
||||||
last_callback_time = -1
|
|
||||||
def img_callback(x_samples, i):
|
|
||||||
nonlocal last_callback_time
|
|
||||||
|
|
||||||
thread_data.partial_x_samples = x_samples
|
|
||||||
step_time = time.time() - last_callback_time if last_callback_time != -1 else -1
|
|
||||||
last_callback_time = time.time()
|
|
||||||
|
|
||||||
progress = {"step": i, "step_time": step_time}
|
|
||||||
if extra_props is not None:
|
|
||||||
progress.update(extra_props)
|
|
||||||
|
|
||||||
if req.stream_image_progress and i % 5 == 0:
|
|
||||||
progress['output'] = update_temp_img(req, x_samples)
|
|
||||||
|
|
||||||
yield json.dumps(progress)
|
|
||||||
|
|
||||||
if thread_data.stop_processing:
|
|
||||||
raise UserInitiatedStop("User requested that we stop processing")
|
|
||||||
return img_callback
|
|
||||||
|
|
||||||
def do_mk_img(req: Request):
|
|
||||||
thread_data.stop_processing = False
|
|
||||||
|
|
||||||
res = Response()
|
|
||||||
res.request = req
|
|
||||||
res.images = []
|
|
||||||
|
|
||||||
thread_data.temp_images.clear()
|
|
||||||
|
|
||||||
# custom model support:
|
|
||||||
# the req.use_stable_diffusion_model needs to be a valid path
|
|
||||||
# to the ckpt file (without the extension).
|
|
||||||
if not os.path.exists(req.use_stable_diffusion_model + '.ckpt'): raise FileNotFoundError(f'Cannot find {req.use_stable_diffusion_model}.ckpt')
|
|
||||||
|
|
||||||
needs_model_reload = False
|
|
||||||
if not thread_data.model or thread_data.ckpt_file != req.use_stable_diffusion_model or thread_data.vae_file != req.use_vae_model:
|
|
||||||
thread_data.ckpt_file = req.use_stable_diffusion_model
|
|
||||||
thread_data.vae_file = req.use_vae_model
|
|
||||||
needs_model_reload = True
|
|
||||||
|
|
||||||
if thread_data.device != 'cpu':
|
|
||||||
if (thread_data.precision == 'autocast' and (req.use_full_precision or not thread_data.model_is_half)) or \
|
|
||||||
(thread_data.precision == 'full' and not req.use_full_precision and not thread_data.force_full_precision):
|
|
||||||
thread_data.precision = 'full' if req.use_full_precision else 'autocast'
|
|
||||||
needs_model_reload = True
|
|
||||||
|
|
||||||
if needs_model_reload:
|
|
||||||
unload_models()
|
|
||||||
unload_filters()
|
|
||||||
load_model_ckpt()
|
|
||||||
|
|
||||||
if thread_data.turbo != req.turbo:
|
|
||||||
thread_data.turbo = req.turbo
|
|
||||||
thread_data.model.turbo = req.turbo
|
|
||||||
|
|
||||||
# Start by cleaning memory, loading and unloading things can leave memory allocated.
|
|
||||||
gc()
|
|
||||||
|
|
||||||
opt_prompt = req.prompt
|
|
||||||
opt_seed = req.seed
|
|
||||||
opt_n_iter = 1
|
|
||||||
opt_C = 4
|
|
||||||
opt_f = 8
|
|
||||||
opt_ddim_eta = 0.0
|
|
||||||
|
|
||||||
print(req, '\n device', torch.device(thread_data.device), "as", thread_data.device_name)
|
|
||||||
print('\n\n Using precision:', thread_data.precision)
|
|
||||||
|
|
||||||
seed_everything(opt_seed)
|
|
||||||
|
|
||||||
batch_size = req.num_outputs
|
|
||||||
prompt = opt_prompt
|
|
||||||
assert prompt is not None
|
|
||||||
data = [batch_size * [prompt]]
|
|
||||||
|
|
||||||
if thread_data.precision == "autocast" and thread_data.device != "cpu":
|
|
||||||
precision_scope = autocast
|
|
||||||
else:
|
|
||||||
precision_scope = nullcontext
|
|
||||||
|
|
||||||
mask = None
|
|
||||||
|
|
||||||
if req.init_image is None:
|
|
||||||
handler = _txt2img
|
|
||||||
|
|
||||||
init_latent = None
|
|
||||||
t_enc = None
|
|
||||||
else:
|
|
||||||
handler = _img2img
|
|
||||||
|
|
||||||
init_image = load_img(req.init_image, req.width, req.height)
|
|
||||||
init_image = init_image.to(thread_data.device)
|
|
||||||
|
|
||||||
if thread_data.device != "cpu" and thread_data.precision == "autocast":
|
|
||||||
init_image = init_image.half()
|
|
||||||
|
|
||||||
thread_data.modelFS.to(thread_data.device)
|
|
||||||
|
|
||||||
init_image = repeat(init_image, '1 ... -> b ...', b=batch_size)
|
|
||||||
init_latent = thread_data.modelFS.get_first_stage_encoding(thread_data.modelFS.encode_first_stage(init_image)) # move to latent space
|
|
||||||
|
|
||||||
if req.mask is not None:
|
|
||||||
mask = load_mask(req.mask, req.width, req.height, init_latent.shape[2], init_latent.shape[3], True).to(thread_data.device)
|
|
||||||
mask = mask[0][0].unsqueeze(0).repeat(4, 1, 1).unsqueeze(0)
|
|
||||||
mask = repeat(mask, '1 ... -> b ...', b=batch_size)
|
|
||||||
|
|
||||||
if thread_data.device != "cpu" and thread_data.precision == "autocast":
|
|
||||||
mask = mask.half()
|
|
||||||
|
|
||||||
# Send to CPU and wait until complete.
|
|
||||||
wait_model_move_to(thread_data.modelFS, 'cpu')
|
|
||||||
|
|
||||||
assert 0. <= req.prompt_strength <= 1., 'can only work with strength in [0.0, 1.0]'
|
|
||||||
t_enc = int(req.prompt_strength * req.num_inference_steps)
|
|
||||||
print(f"target t_enc is {t_enc} steps")
|
|
||||||
|
|
||||||
if req.save_to_disk_path is not None:
|
|
||||||
session_out_path = get_session_out_path(req.save_to_disk_path, req.session_id)
|
|
||||||
else:
|
|
||||||
session_out_path = None
|
|
||||||
|
|
||||||
with torch.no_grad():
|
|
||||||
for n in trange(opt_n_iter, desc="Sampling"):
|
|
||||||
for prompts in tqdm(data, desc="data"):
|
|
||||||
|
|
||||||
with precision_scope("cuda"):
|
|
||||||
if thread_data.reduced_memory:
|
|
||||||
thread_data.modelCS.to(thread_data.device)
|
|
||||||
uc = None
|
|
||||||
if req.guidance_scale != 1.0:
|
|
||||||
uc = thread_data.modelCS.get_learned_conditioning(batch_size * [req.negative_prompt])
|
|
||||||
if isinstance(prompts, tuple):
|
|
||||||
prompts = list(prompts)
|
|
||||||
|
|
||||||
subprompts, weights = split_weighted_subprompts(prompts[0])
|
|
||||||
if len(subprompts) > 1:
|
|
||||||
c = torch.zeros_like(uc)
|
|
||||||
totalWeight = sum(weights)
|
|
||||||
# normalize each "sub prompt" and add it
|
|
||||||
for i in range(len(subprompts)):
|
|
||||||
weight = weights[i]
|
|
||||||
# if not skip_normalize:
|
|
||||||
weight = weight / totalWeight
|
|
||||||
c = torch.add(c, thread_data.modelCS.get_learned_conditioning(subprompts[i]), alpha=weight)
|
|
||||||
else:
|
|
||||||
c = thread_data.modelCS.get_learned_conditioning(prompts)
|
|
||||||
|
|
||||||
if thread_data.reduced_memory:
|
|
||||||
thread_data.modelFS.to(thread_data.device)
|
|
||||||
|
|
||||||
n_steps = req.num_inference_steps if req.init_image is None else t_enc
|
|
||||||
img_callback = get_image_progress_generator(req, {"total_steps": n_steps})
|
|
||||||
|
|
||||||
# run the handler
|
|
||||||
try:
|
|
||||||
print('Running handler...')
|
|
||||||
if handler == _txt2img:
|
|
||||||
x_samples = _txt2img(req.width, req.height, req.num_outputs, req.num_inference_steps, req.guidance_scale, None, opt_C, opt_f, opt_ddim_eta, c, uc, opt_seed, img_callback, mask, req.sampler)
|
|
||||||
else:
|
|
||||||
x_samples = _img2img(init_latent, t_enc, batch_size, req.guidance_scale, c, uc, req.num_inference_steps, opt_ddim_eta, opt_seed, img_callback, mask)
|
|
||||||
|
|
||||||
if req.stream_progress_updates:
|
|
||||||
yield from x_samples
|
|
||||||
if hasattr(thread_data, 'partial_x_samples'):
|
|
||||||
if thread_data.partial_x_samples is not None:
|
|
||||||
x_samples = thread_data.partial_x_samples
|
|
||||||
del thread_data.partial_x_samples
|
|
||||||
except UserInitiatedStop:
|
|
||||||
if not hasattr(thread_data, 'partial_x_samples'):
|
|
||||||
continue
|
|
||||||
if thread_data.partial_x_samples is None:
|
|
||||||
del thread_data.partial_x_samples
|
|
||||||
continue
|
|
||||||
x_samples = thread_data.partial_x_samples
|
|
||||||
del thread_data.partial_x_samples
|
|
||||||
|
|
||||||
print("decoding images")
|
|
||||||
img_data = [None] * batch_size
|
|
||||||
for i in range(batch_size):
|
|
||||||
x_samples_ddim = thread_data.modelFS.decode_first_stage(x_samples[i].unsqueeze(0))
|
|
||||||
x_sample = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
|
|
||||||
x_sample = 255.0 * rearrange(x_sample[0].cpu().numpy(), "c h w -> h w c")
|
|
||||||
x_sample = x_sample.astype(np.uint8)
|
|
||||||
img_data[i] = x_sample
|
|
||||||
del x_samples, x_samples_ddim, x_sample
|
|
||||||
|
|
||||||
if thread_data.reduced_memory:
|
|
||||||
# Send to CPU and wait until complete.
|
|
||||||
wait_model_move_to(thread_data.modelFS, 'cpu')
|
|
||||||
|
|
||||||
print("saving images")
|
|
||||||
for i in range(batch_size):
|
|
||||||
img = Image.fromarray(img_data[i])
|
|
||||||
img_id = base64.b64encode(int(time.time()+i).to_bytes(8, 'big')).decode() # Generate unique ID based on time.
|
|
||||||
img_id = img_id.translate({43:None, 47:None, 61:None})[-8:] # Remove + / = and keep last 8 chars.
|
|
||||||
|
|
||||||
has_filters = (req.use_face_correction is not None and req.use_face_correction.startswith('GFPGAN')) or \
|
|
||||||
(req.use_upscale is not None and req.use_upscale.startswith('RealESRGAN'))
|
|
||||||
|
|
||||||
return_orig_img = not has_filters or not req.show_only_filtered_image
|
|
||||||
|
|
||||||
if thread_data.stop_processing:
|
|
||||||
return_orig_img = True
|
|
||||||
|
|
||||||
if req.save_to_disk_path is not None:
|
|
||||||
if return_orig_img:
|
|
||||||
img_out_path = get_base_path(req.save_to_disk_path, req.session_id, prompts[0], img_id, req.output_format)
|
|
||||||
save_image(img, img_out_path)
|
|
||||||
meta_out_path = get_base_path(req.save_to_disk_path, req.session_id, prompts[0], img_id, 'txt')
|
|
||||||
save_metadata(meta_out_path, req, prompts[0], opt_seed)
|
|
||||||
|
|
||||||
if return_orig_img:
|
|
||||||
img_str = img_to_base64_str(img, req.output_format)
|
|
||||||
res_image_orig = ResponseImage(data=img_str, seed=opt_seed)
|
|
||||||
res.images.append(res_image_orig)
|
|
||||||
|
|
||||||
if req.save_to_disk_path is not None:
|
|
||||||
res_image_orig.path_abs = img_out_path
|
|
||||||
del img
|
|
||||||
|
|
||||||
if has_filters and not thread_data.stop_processing:
|
|
||||||
filters_applied = []
|
|
||||||
if req.use_face_correction:
|
|
||||||
img_data[i] = apply_filters('gfpgan', img_data[i], req.use_face_correction)
|
|
||||||
filters_applied.append(req.use_face_correction)
|
|
||||||
if req.use_upscale:
|
|
||||||
img_data[i] = apply_filters('real_esrgan', img_data[i], req.use_upscale)
|
|
||||||
filters_applied.append(req.use_upscale)
|
|
||||||
if (len(filters_applied) > 0):
|
|
||||||
filtered_image = Image.fromarray(img_data[i])
|
|
||||||
filtered_img_data = img_to_base64_str(filtered_image, req.output_format)
|
|
||||||
response_image = ResponseImage(data=filtered_img_data, seed=opt_seed)
|
|
||||||
res.images.append(response_image)
|
|
||||||
if req.save_to_disk_path is not None:
|
|
||||||
filtered_img_out_path = get_base_path(req.save_to_disk_path, req.session_id, prompts[0], img_id, req.output_format, "_".join(filters_applied))
|
|
||||||
save_image(filtered_image, filtered_img_out_path)
|
|
||||||
response_image.path_abs = filtered_img_out_path
|
|
||||||
del filtered_image
|
|
||||||
# Filter Applied, move to next seed
|
|
||||||
opt_seed += 1
|
|
||||||
|
|
||||||
# if thread_data.reduced_memory:
|
|
||||||
# unload_filters()
|
|
||||||
del img_data
|
|
||||||
gc()
|
|
||||||
if thread_data.device != 'cpu':
|
|
||||||
print(f'memory_final = {round(torch.cuda.memory_allocated(thread_data.device) / 1e6, 2)}Mb')
|
|
||||||
|
|
||||||
print('Task completed')
|
|
||||||
yield json.dumps(res.json())
|
|
||||||
|
|
||||||
def save_image(img, img_out_path):
|
|
||||||
try:
|
|
||||||
img.save(img_out_path)
|
|
||||||
except:
|
|
||||||
print('could not save the file', traceback.format_exc())
|
|
||||||
|
|
||||||
def save_metadata(meta_out_path, req, prompt, opt_seed):
|
|
||||||
metadata = f'''{prompt}
|
|
||||||
Width: {req.width}
|
|
||||||
Height: {req.height}
|
|
||||||
Seed: {opt_seed}
|
|
||||||
Steps: {req.num_inference_steps}
|
|
||||||
Guidance Scale: {req.guidance_scale}
|
|
||||||
Prompt Strength: {req.prompt_strength}
|
|
||||||
Use Face Correction: {req.use_face_correction}
|
|
||||||
Use Upscaling: {req.use_upscale}
|
|
||||||
Sampler: {req.sampler}
|
|
||||||
Negative Prompt: {req.negative_prompt}
|
|
||||||
Stable Diffusion model: {req.use_stable_diffusion_model + '.ckpt'}
|
|
||||||
VAE model: {req.use_vae_model}
|
|
||||||
'''
|
|
||||||
try:
|
|
||||||
with open(meta_out_path, 'w', encoding='utf-8') as f:
|
|
||||||
f.write(metadata)
|
|
||||||
except:
|
|
||||||
print('could not save the file', traceback.format_exc())
|
|
||||||
|
|
||||||
def _txt2img(opt_W, opt_H, opt_n_samples, opt_ddim_steps, opt_scale, start_code, opt_C, opt_f, opt_ddim_eta, c, uc, opt_seed, img_callback, mask, sampler_name):
|
|
||||||
shape = [opt_n_samples, opt_C, opt_H // opt_f, opt_W // opt_f]
|
|
||||||
|
|
||||||
# Send to CPU and wait until complete.
|
|
||||||
wait_model_move_to(thread_data.modelCS, 'cpu')
|
|
||||||
|
|
||||||
if sampler_name == 'ddim':
|
|
||||||
thread_data.model.make_schedule(ddim_num_steps=opt_ddim_steps, ddim_eta=opt_ddim_eta, verbose=False)
|
|
||||||
|
|
||||||
samples_ddim = thread_data.model.sample(
|
|
||||||
S=opt_ddim_steps,
|
|
||||||
conditioning=c,
|
|
||||||
seed=opt_seed,
|
|
||||||
shape=shape,
|
|
||||||
verbose=False,
|
|
||||||
unconditional_guidance_scale=opt_scale,
|
|
||||||
unconditional_conditioning=uc,
|
|
||||||
eta=opt_ddim_eta,
|
|
||||||
x_T=start_code,
|
|
||||||
img_callback=img_callback,
|
|
||||||
mask=mask,
|
|
||||||
sampler = sampler_name,
|
|
||||||
)
|
|
||||||
yield from samples_ddim
|
|
||||||
|
|
||||||
def _img2img(init_latent, t_enc, batch_size, opt_scale, c, uc, opt_ddim_steps, opt_ddim_eta, opt_seed, img_callback, mask):
|
|
||||||
# encode (scaled latent)
|
|
||||||
z_enc = thread_data.model.stochastic_encode(
|
|
||||||
init_latent,
|
|
||||||
torch.tensor([t_enc] * batch_size).to(thread_data.device),
|
|
||||||
opt_seed,
|
|
||||||
opt_ddim_eta,
|
|
||||||
opt_ddim_steps,
|
|
||||||
)
|
|
||||||
x_T = None if mask is None else init_latent
|
|
||||||
|
|
||||||
# decode it
|
|
||||||
samples_ddim = thread_data.model.sample(
|
|
||||||
t_enc,
|
|
||||||
c,
|
|
||||||
z_enc,
|
|
||||||
unconditional_guidance_scale=opt_scale,
|
|
||||||
unconditional_conditioning=uc,
|
|
||||||
img_callback=img_callback,
|
|
||||||
mask=mask,
|
|
||||||
x_T=x_T,
|
|
||||||
sampler = 'ddim'
|
|
||||||
)
|
|
||||||
yield from samples_ddim
|
|
||||||
|
|
||||||
def gc():
|
|
||||||
gc_collect()
|
|
||||||
if thread_data.device == 'cpu':
|
|
||||||
return
|
|
||||||
torch.cuda.empty_cache()
|
|
||||||
torch.cuda.ipc_collect()
|
|
||||||
|
|
||||||
# internal
|
|
||||||
|
|
||||||
def chunk(it, size):
|
|
||||||
it = iter(it)
|
|
||||||
return iter(lambda: tuple(islice(it, size)), ())
|
|
||||||
|
|
||||||
def load_model_from_config(ckpt, verbose=False):
|
|
||||||
print(f"Loading model from {ckpt}")
|
|
||||||
pl_sd = torch.load(ckpt, map_location="cpu")
|
|
||||||
if "global_step" in pl_sd:
|
|
||||||
print(f"Global Step: {pl_sd['global_step']}")
|
|
||||||
sd = pl_sd["state_dict"]
|
|
||||||
return sd
|
|
||||||
|
|
||||||
# utils
|
|
||||||
class UserInitiatedStop(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def load_img(img_str, w0, h0):
|
|
||||||
image = base64_str_to_img(img_str).convert("RGB")
|
|
||||||
w, h = image.size
|
|
||||||
print(f"loaded input image of size ({w}, {h}) from base64")
|
|
||||||
if h0 is not None and w0 is not None:
|
|
||||||
h, w = h0, w0
|
|
||||||
|
|
||||||
w, h = map(lambda x: x - x % 64, (w, h)) # resize to integer multiple of 64
|
|
||||||
image = image.resize((w, h), resample=Image.Resampling.LANCZOS)
|
|
||||||
image = np.array(image).astype(np.float32) / 255.0
|
|
||||||
image = image[None].transpose(0, 3, 1, 2)
|
|
||||||
image = torch.from_numpy(image)
|
|
||||||
return 2.*image - 1.
|
|
||||||
|
|
||||||
def load_mask(mask_str, h0, w0, newH, newW, invert=False):
|
|
||||||
image = base64_str_to_img(mask_str).convert("RGB")
|
|
||||||
w, h = image.size
|
|
||||||
print(f"loaded input mask of size ({w}, {h})")
|
|
||||||
|
|
||||||
if invert:
|
|
||||||
print("inverted")
|
|
||||||
image = ImageOps.invert(image)
|
|
||||||
# where_0, where_1 = np.where(image == 0), np.where(image == 255)
|
|
||||||
# image[where_0], image[where_1] = 255, 0
|
|
||||||
|
|
||||||
if h0 is not None and w0 is not None:
|
|
||||||
h, w = h0, w0
|
|
||||||
|
|
||||||
w, h = map(lambda x: x - x % 64, (w, h)) # resize to integer multiple of 64
|
|
||||||
|
|
||||||
print(f"New mask size ({w}, {h})")
|
|
||||||
image = image.resize((newW, newH), resample=Image.Resampling.LANCZOS)
|
|
||||||
image = np.array(image)
|
|
||||||
|
|
||||||
image = image.astype(np.float32) / 255.0
|
|
||||||
image = image[None].transpose(0, 3, 1, 2)
|
|
||||||
image = torch.from_numpy(image)
|
|
||||||
return image
|
|
||||||
|
|
||||||
# https://stackoverflow.com/a/61114178
|
|
||||||
def img_to_base64_str(img, output_format="PNG"):
|
|
||||||
buffered = BytesIO()
|
|
||||||
img.save(buffered, format=output_format)
|
|
||||||
buffered.seek(0)
|
|
||||||
img_byte = buffered.getvalue()
|
|
||||||
mime_type = "image/png" if output_format.lower() == "png" else "image/jpeg"
|
|
||||||
img_str = f"data:{mime_type};base64," + base64.b64encode(img_byte).decode()
|
|
||||||
return img_str
|
|
||||||
|
|
||||||
def base64_str_to_buffer(img_str):
|
|
||||||
mime_type = "image/png" if img_str.startswith("data:image/png;") else "image/jpeg"
|
|
||||||
img_str = img_str[len(f"data:{mime_type};base64,"):]
|
|
||||||
data = base64.b64decode(img_str)
|
|
||||||
buffered = BytesIO(data)
|
|
||||||
return buffered
|
|
||||||
|
|
||||||
def base64_str_to_img(img_str):
|
|
||||||
buffered = base64_str_to_buffer(img_str)
|
|
||||||
img = Image.open(buffered)
|
|
||||||
return img
|
|
||||||
@@ -1,548 +0,0 @@
|
|||||||
"""task_manager.py: manage tasks dispatching and render threads.
|
|
||||||
Notes:
|
|
||||||
render_threads should be the only hard reference held by the manager to the threads.
|
|
||||||
Use weak_thread_data to store all other data using weak keys.
|
|
||||||
This will allow for garbage collection after the thread dies.
|
|
||||||
"""
|
|
||||||
import json
|
|
||||||
import traceback
|
|
||||||
|
|
||||||
TASK_TTL = 15 * 60 # seconds, Discard last session's task timeout
|
|
||||||
|
|
||||||
import torch
|
|
||||||
import queue, threading, time, weakref
|
|
||||||
from typing import Any, Generator, Hashable, Optional, Union
|
|
||||||
|
|
||||||
from pydantic import BaseModel
|
|
||||||
from sd_internal import Request, Response, runtime, device_manager
|
|
||||||
|
|
||||||
THREAD_NAME_PREFIX = 'Runtime-Render/'
|
|
||||||
ERR_LOCK_FAILED = ' failed to acquire lock within timeout.'
|
|
||||||
LOCK_TIMEOUT = 15 # Maximum locking time in seconds before failing a task.
|
|
||||||
# It's better to get an exception than a deadlock... ALWAYS use timeout in critical paths.
|
|
||||||
|
|
||||||
DEVICE_START_TIMEOUT = 60 # seconds - Maximum time to wait for a render device to init.
|
|
||||||
|
|
||||||
class SymbolClass(type): # Print nicely formatted Symbol names.
|
|
||||||
def __repr__(self): return self.__qualname__
|
|
||||||
def __str__(self): return self.__name__
|
|
||||||
class Symbol(metaclass=SymbolClass): pass
|
|
||||||
|
|
||||||
class ServerStates:
|
|
||||||
class Init(Symbol): pass
|
|
||||||
class LoadingModel(Symbol): pass
|
|
||||||
class Online(Symbol): pass
|
|
||||||
class Rendering(Symbol): pass
|
|
||||||
class Unavailable(Symbol): pass
|
|
||||||
|
|
||||||
class RenderTask(): # Task with output queue and completion lock.
|
|
||||||
def __init__(self, req: Request):
|
|
||||||
self.request: Request = req # Initial Request
|
|
||||||
self.response: Any = None # Copy of the last reponse
|
|
||||||
self.render_device = None # Select the task affinity. (Not used to change active devices).
|
|
||||||
self.temp_images:list = [None] * req.num_outputs * (1 if req.show_only_filtered_image else 2)
|
|
||||||
self.error: Exception = None
|
|
||||||
self.lock: threading.Lock = threading.Lock() # Locks at task start and unlocks when task is completed
|
|
||||||
self.buffer_queue: queue.Queue = queue.Queue() # Queue of JSON string segments
|
|
||||||
async def read_buffer_generator(self):
|
|
||||||
try:
|
|
||||||
while not self.buffer_queue.empty():
|
|
||||||
res = self.buffer_queue.get(block=False)
|
|
||||||
self.buffer_queue.task_done()
|
|
||||||
yield res
|
|
||||||
except queue.Empty as e: yield
|
|
||||||
|
|
||||||
# defaults from https://huggingface.co/blog/stable_diffusion
|
|
||||||
class ImageRequest(BaseModel):
|
|
||||||
session_id: str = "session"
|
|
||||||
prompt: str = ""
|
|
||||||
negative_prompt: str = ""
|
|
||||||
init_image: str = None # base64
|
|
||||||
mask: str = None # base64
|
|
||||||
num_outputs: int = 1
|
|
||||||
num_inference_steps: int = 50
|
|
||||||
guidance_scale: float = 7.5
|
|
||||||
width: int = 512
|
|
||||||
height: int = 512
|
|
||||||
seed: int = 42
|
|
||||||
prompt_strength: float = 0.8
|
|
||||||
sampler: str = None # "ddim", "plms", "heun", "euler", "euler_a", "dpm2", "dpm2_a", "lms"
|
|
||||||
# allow_nsfw: bool = False
|
|
||||||
save_to_disk_path: str = None
|
|
||||||
turbo: bool = True
|
|
||||||
use_cpu: bool = False ##TODO Remove after UI and plugins transition.
|
|
||||||
render_device: str = None # Select the task affinity. (Not used to change active devices).
|
|
||||||
use_full_precision: bool = False
|
|
||||||
use_face_correction: str = None # or "GFPGANv1.3"
|
|
||||||
use_upscale: str = None # or "RealESRGAN_x4plus" or "RealESRGAN_x4plus_anime_6B"
|
|
||||||
use_stable_diffusion_model: str = "sd-v1-4"
|
|
||||||
use_vae_model: str = None
|
|
||||||
show_only_filtered_image: bool = False
|
|
||||||
output_format: str = "jpeg" # or "png"
|
|
||||||
|
|
||||||
stream_progress_updates: bool = False
|
|
||||||
stream_image_progress: bool = False
|
|
||||||
|
|
||||||
class FilterRequest(BaseModel):
|
|
||||||
session_id: str = "session"
|
|
||||||
model: str = None
|
|
||||||
name: str = ""
|
|
||||||
init_image: str = None # base64
|
|
||||||
width: int = 512
|
|
||||||
height: int = 512
|
|
||||||
save_to_disk_path: str = None
|
|
||||||
turbo: bool = True
|
|
||||||
render_device: str = None
|
|
||||||
use_full_precision: bool = False
|
|
||||||
output_format: str = "jpeg" # or "png"
|
|
||||||
|
|
||||||
# Temporary cache to allow to query tasks results for a short time after they are completed.
|
|
||||||
class TaskCache():
|
|
||||||
def __init__(self):
|
|
||||||
self._base = dict()
|
|
||||||
self._lock: threading.Lock = threading.Lock()
|
|
||||||
def _get_ttl_time(self, ttl: int) -> int:
|
|
||||||
return int(time.time()) + ttl
|
|
||||||
def _is_expired(self, timestamp: int) -> bool:
|
|
||||||
return int(time.time()) >= timestamp
|
|
||||||
def clean(self) -> None:
|
|
||||||
if not self._lock.acquire(blocking=True, timeout=LOCK_TIMEOUT): raise Exception('TaskCache.clean' + ERR_LOCK_FAILED)
|
|
||||||
try:
|
|
||||||
# Create a list of expired keys to delete
|
|
||||||
to_delete = []
|
|
||||||
for key in self._base:
|
|
||||||
ttl, _ = self._base[key]
|
|
||||||
if self._is_expired(ttl):
|
|
||||||
to_delete.append(key)
|
|
||||||
# Remove Items
|
|
||||||
for key in to_delete:
|
|
||||||
del self._base[key]
|
|
||||||
print(f'Session {key} expired. Data removed.')
|
|
||||||
finally:
|
|
||||||
self._lock.release()
|
|
||||||
def clear(self) -> None:
|
|
||||||
if not self._lock.acquire(blocking=True, timeout=LOCK_TIMEOUT): raise Exception('TaskCache.clear' + ERR_LOCK_FAILED)
|
|
||||||
try: self._base.clear()
|
|
||||||
finally: self._lock.release()
|
|
||||||
def delete(self, key: Hashable) -> bool:
|
|
||||||
if not self._lock.acquire(blocking=True, timeout=LOCK_TIMEOUT): raise Exception('TaskCache.delete' + ERR_LOCK_FAILED)
|
|
||||||
try:
|
|
||||||
if key not in self._base:
|
|
||||||
return False
|
|
||||||
del self._base[key]
|
|
||||||
return True
|
|
||||||
finally:
|
|
||||||
self._lock.release()
|
|
||||||
def keep(self, key: Hashable, ttl: int) -> bool:
|
|
||||||
if not self._lock.acquire(blocking=True, timeout=LOCK_TIMEOUT): raise Exception('TaskCache.keep' + ERR_LOCK_FAILED)
|
|
||||||
try:
|
|
||||||
if key in self._base:
|
|
||||||
_, value = self._base.get(key)
|
|
||||||
self._base[key] = (self._get_ttl_time(ttl), value)
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
finally:
|
|
||||||
self._lock.release()
|
|
||||||
def put(self, key: Hashable, value: Any, ttl: int) -> bool:
|
|
||||||
if not self._lock.acquire(blocking=True, timeout=LOCK_TIMEOUT): raise Exception('TaskCache.put' + ERR_LOCK_FAILED)
|
|
||||||
try:
|
|
||||||
self._base[key] = (
|
|
||||||
self._get_ttl_time(ttl), value
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
print(str(e))
|
|
||||||
print(traceback.format_exc())
|
|
||||||
return False
|
|
||||||
else:
|
|
||||||
return True
|
|
||||||
finally:
|
|
||||||
self._lock.release()
|
|
||||||
def tryGet(self, key: Hashable) -> Any:
|
|
||||||
if not self._lock.acquire(blocking=True, timeout=LOCK_TIMEOUT): raise Exception('TaskCache.tryGet' + ERR_LOCK_FAILED)
|
|
||||||
try:
|
|
||||||
ttl, value = self._base.get(key, (None, None))
|
|
||||||
if ttl is not None and self._is_expired(ttl):
|
|
||||||
print(f'Session {key} expired. Discarding data.')
|
|
||||||
del self._base[key]
|
|
||||||
return None
|
|
||||||
return value
|
|
||||||
finally:
|
|
||||||
self._lock.release()
|
|
||||||
|
|
||||||
manager_lock = threading.RLock()
|
|
||||||
render_threads = []
|
|
||||||
current_state = ServerStates.Init
|
|
||||||
current_state_error:Exception = None
|
|
||||||
current_model_path = None
|
|
||||||
current_vae_path = None
|
|
||||||
tasks_queue = []
|
|
||||||
task_cache = TaskCache()
|
|
||||||
default_model_to_load = None
|
|
||||||
default_vae_to_load = None
|
|
||||||
weak_thread_data = weakref.WeakKeyDictionary()
|
|
||||||
|
|
||||||
def preload_model(ckpt_file_path=None, vae_file_path=None):
|
|
||||||
global current_state, current_state_error, current_model_path, current_vae_path
|
|
||||||
if ckpt_file_path == None:
|
|
||||||
ckpt_file_path = default_model_to_load
|
|
||||||
if vae_file_path == None:
|
|
||||||
vae_file_path = default_vae_to_load
|
|
||||||
if ckpt_file_path == current_model_path and vae_file_path == current_vae_path:
|
|
||||||
return
|
|
||||||
current_state = ServerStates.LoadingModel
|
|
||||||
try:
|
|
||||||
from . import runtime
|
|
||||||
runtime.thread_data.ckpt_file = ckpt_file_path
|
|
||||||
runtime.thread_data.vae_file = vae_file_path
|
|
||||||
runtime.load_model_ckpt()
|
|
||||||
current_model_path = ckpt_file_path
|
|
||||||
current_vae_path = vae_file_path
|
|
||||||
current_state_error = None
|
|
||||||
current_state = ServerStates.Online
|
|
||||||
except Exception as e:
|
|
||||||
current_model_path = None
|
|
||||||
current_vae_path = None
|
|
||||||
current_state_error = e
|
|
||||||
current_state = ServerStates.Unavailable
|
|
||||||
print(traceback.format_exc())
|
|
||||||
|
|
||||||
def thread_get_next_task():
|
|
||||||
from . import runtime
|
|
||||||
if not manager_lock.acquire(blocking=True, timeout=LOCK_TIMEOUT):
|
|
||||||
print('Render thread on device', runtime.thread_data.device, 'failed to acquire manager lock.')
|
|
||||||
return None
|
|
||||||
if len(tasks_queue) <= 0:
|
|
||||||
manager_lock.release()
|
|
||||||
return None
|
|
||||||
task = None
|
|
||||||
try: # Select a render task.
|
|
||||||
for queued_task in tasks_queue:
|
|
||||||
if queued_task.request.use_face_correction and runtime.thread_data.device == 'cpu' and is_alive() == 1:
|
|
||||||
queued_task.error = Exception('The CPU cannot be used to run this task currently. Please remove "Fix incorrect faces" from Image Settings and try again.')
|
|
||||||
task = queued_task
|
|
||||||
break
|
|
||||||
if queued_task.render_device and runtime.thread_data.device != queued_task.render_device:
|
|
||||||
# Is asking for a specific render device.
|
|
||||||
if is_alive(queued_task.render_device) > 0:
|
|
||||||
continue # requested device alive, skip current one.
|
|
||||||
else:
|
|
||||||
# Requested device is not active, return error to UI.
|
|
||||||
queued_task.error = Exception(queued_task.render_device + ' is not currently active.')
|
|
||||||
task = queued_task
|
|
||||||
break
|
|
||||||
if not queued_task.render_device and runtime.thread_data.device == 'cpu' and is_alive() > 1:
|
|
||||||
# not asking for any specific devices, cpu want to grab task but other render devices are alive.
|
|
||||||
continue # Skip Tasks, don't run on CPU unless there is nothing else or user asked for it.
|
|
||||||
task = queued_task
|
|
||||||
break
|
|
||||||
if task is not None:
|
|
||||||
del tasks_queue[tasks_queue.index(task)]
|
|
||||||
return task
|
|
||||||
finally:
|
|
||||||
manager_lock.release()
|
|
||||||
|
|
||||||
def thread_render(device):
|
|
||||||
global current_state, current_state_error, current_model_path, current_vae_path
|
|
||||||
from . import runtime
|
|
||||||
try:
|
|
||||||
runtime.thread_init(device)
|
|
||||||
except Exception as e:
|
|
||||||
print(traceback.format_exc())
|
|
||||||
weak_thread_data[threading.current_thread()] = {
|
|
||||||
'error': e
|
|
||||||
}
|
|
||||||
return
|
|
||||||
weak_thread_data[threading.current_thread()] = {
|
|
||||||
'device': runtime.thread_data.device,
|
|
||||||
'device_name': runtime.thread_data.device_name,
|
|
||||||
'alive': True
|
|
||||||
}
|
|
||||||
if runtime.thread_data.device != 'cpu' or is_alive() == 1:
|
|
||||||
preload_model()
|
|
||||||
current_state = ServerStates.Online
|
|
||||||
while True:
|
|
||||||
task_cache.clean()
|
|
||||||
if not weak_thread_data[threading.current_thread()]['alive']:
|
|
||||||
print(f'Shutting down thread for device {runtime.thread_data.device}')
|
|
||||||
runtime.unload_models()
|
|
||||||
runtime.unload_filters()
|
|
||||||
return
|
|
||||||
if isinstance(current_state_error, SystemExit):
|
|
||||||
current_state = ServerStates.Unavailable
|
|
||||||
return
|
|
||||||
task = thread_get_next_task()
|
|
||||||
if task is None:
|
|
||||||
time.sleep(1)
|
|
||||||
continue
|
|
||||||
if task.error is not None:
|
|
||||||
print(task.error)
|
|
||||||
task.response = {"status": 'failed', "detail": str(task.error)}
|
|
||||||
task.buffer_queue.put(json.dumps(task.response))
|
|
||||||
continue
|
|
||||||
if current_state_error:
|
|
||||||
task.error = current_state_error
|
|
||||||
task.response = {"status": 'failed', "detail": str(task.error)}
|
|
||||||
task.buffer_queue.put(json.dumps(task.response))
|
|
||||||
continue
|
|
||||||
print(f'Session {task.request.session_id} starting task {id(task)} on {runtime.thread_data.device_name}')
|
|
||||||
if not task.lock.acquire(blocking=False): raise Exception('Got locked task from queue.')
|
|
||||||
try:
|
|
||||||
if runtime.thread_data.device == 'cpu' and is_alive() > 1:
|
|
||||||
# CPU is not the only device. Keep track of active time to unload resources later.
|
|
||||||
runtime.thread_data.lastActive = time.time()
|
|
||||||
# Open data generator.
|
|
||||||
res = runtime.mk_img(task.request)
|
|
||||||
if current_model_path == task.request.use_stable_diffusion_model:
|
|
||||||
current_state = ServerStates.Rendering
|
|
||||||
else:
|
|
||||||
current_state = ServerStates.LoadingModel
|
|
||||||
# Start reading from generator.
|
|
||||||
dataQueue = None
|
|
||||||
if task.request.stream_progress_updates:
|
|
||||||
dataQueue = task.buffer_queue
|
|
||||||
for result in res:
|
|
||||||
if current_state == ServerStates.LoadingModel:
|
|
||||||
current_state = ServerStates.Rendering
|
|
||||||
current_model_path = task.request.use_stable_diffusion_model
|
|
||||||
current_vae_path = task.request.use_vae_model
|
|
||||||
if isinstance(current_state_error, SystemExit) or isinstance(current_state_error, StopAsyncIteration) or isinstance(task.error, StopAsyncIteration):
|
|
||||||
runtime.thread_data.stop_processing = True
|
|
||||||
if isinstance(current_state_error, StopAsyncIteration):
|
|
||||||
task.error = current_state_error
|
|
||||||
current_state_error = None
|
|
||||||
print(f'Session {task.request.session_id} sent cancel signal for task {id(task)}')
|
|
||||||
if dataQueue:
|
|
||||||
dataQueue.put(result)
|
|
||||||
if isinstance(result, str):
|
|
||||||
result = json.loads(result)
|
|
||||||
task.response = result
|
|
||||||
if 'output' in result:
|
|
||||||
for out_obj in result['output']:
|
|
||||||
if 'path' in out_obj:
|
|
||||||
img_id = out_obj['path'][out_obj['path'].rindex('/') + 1:]
|
|
||||||
task.temp_images[int(img_id)] = runtime.thread_data.temp_images[out_obj['path'][11:]]
|
|
||||||
elif 'data' in out_obj:
|
|
||||||
buf = runtime.base64_str_to_buffer(out_obj['data'])
|
|
||||||
task.temp_images[result['output'].index(out_obj)] = buf
|
|
||||||
# Before looping back to the generator, mark cache as still alive.
|
|
||||||
task_cache.keep(task.request.session_id, TASK_TTL)
|
|
||||||
except Exception as e:
|
|
||||||
task.error = e
|
|
||||||
print(traceback.format_exc())
|
|
||||||
continue
|
|
||||||
finally:
|
|
||||||
# Task completed
|
|
||||||
task.lock.release()
|
|
||||||
task_cache.keep(task.request.session_id, TASK_TTL)
|
|
||||||
if isinstance(task.error, StopAsyncIteration):
|
|
||||||
print(f'Session {task.request.session_id} task {id(task)} cancelled!')
|
|
||||||
elif task.error is not None:
|
|
||||||
print(f'Session {task.request.session_id} task {id(task)} failed!')
|
|
||||||
else:
|
|
||||||
print(f'Session {task.request.session_id} task {id(task)} completed by {runtime.thread_data.device_name}.')
|
|
||||||
current_state = ServerStates.Online
|
|
||||||
|
|
||||||
def get_cached_task(session_id:str, update_ttl:bool=False):
|
|
||||||
# By calling keep before tryGet, wont discard if was expired.
|
|
||||||
if update_ttl and not task_cache.keep(session_id, TASK_TTL):
|
|
||||||
# Failed to keep task, already gone.
|
|
||||||
return None
|
|
||||||
return task_cache.tryGet(session_id)
|
|
||||||
|
|
||||||
def get_devices():
|
|
||||||
devices = {
|
|
||||||
'all': {},
|
|
||||||
'active': {},
|
|
||||||
}
|
|
||||||
|
|
||||||
def get_device_info(device):
|
|
||||||
if device == 'cpu':
|
|
||||||
return {'name': device_manager.get_processor_name()}
|
|
||||||
|
|
||||||
mem_free, mem_total = torch.cuda.mem_get_info(device)
|
|
||||||
mem_free /= float(10**9)
|
|
||||||
mem_total /= float(10**9)
|
|
||||||
|
|
||||||
return {
|
|
||||||
'name': torch.cuda.get_device_name(device),
|
|
||||||
'mem_free': mem_free,
|
|
||||||
'mem_total': mem_total,
|
|
||||||
}
|
|
||||||
|
|
||||||
# list the compatible devices
|
|
||||||
gpu_count = torch.cuda.device_count()
|
|
||||||
for device in range(gpu_count):
|
|
||||||
device = f'cuda:{device}'
|
|
||||||
if not device_manager.is_device_compatible(device):
|
|
||||||
continue
|
|
||||||
|
|
||||||
devices['all'].update({device: get_device_info(device)})
|
|
||||||
|
|
||||||
devices['all'].update({'cpu': get_device_info('cpu')})
|
|
||||||
|
|
||||||
# list the activated devices
|
|
||||||
if not manager_lock.acquire(blocking=True, timeout=LOCK_TIMEOUT): raise Exception('get_devices' + ERR_LOCK_FAILED)
|
|
||||||
try:
|
|
||||||
for rthread in render_threads:
|
|
||||||
if not rthread.is_alive():
|
|
||||||
continue
|
|
||||||
weak_data = weak_thread_data.get(rthread)
|
|
||||||
if not weak_data or not 'device' in weak_data or not 'device_name' in weak_data:
|
|
||||||
continue
|
|
||||||
device = weak_data['device']
|
|
||||||
devices['active'].update({device: get_device_info(device)})
|
|
||||||
finally:
|
|
||||||
manager_lock.release()
|
|
||||||
|
|
||||||
return devices
|
|
||||||
|
|
||||||
def is_alive(device=None):
|
|
||||||
if not manager_lock.acquire(blocking=True, timeout=LOCK_TIMEOUT): raise Exception('is_alive' + ERR_LOCK_FAILED)
|
|
||||||
nbr_alive = 0
|
|
||||||
try:
|
|
||||||
for rthread in render_threads:
|
|
||||||
if device is not None:
|
|
||||||
weak_data = weak_thread_data.get(rthread)
|
|
||||||
if weak_data is None or not 'device' in weak_data or weak_data['device'] is None:
|
|
||||||
continue
|
|
||||||
thread_device = weak_data['device']
|
|
||||||
if thread_device != device:
|
|
||||||
continue
|
|
||||||
if rthread.is_alive():
|
|
||||||
nbr_alive += 1
|
|
||||||
return nbr_alive
|
|
||||||
finally:
|
|
||||||
manager_lock.release()
|
|
||||||
|
|
||||||
def start_render_thread(device):
|
|
||||||
if not manager_lock.acquire(blocking=True, timeout=LOCK_TIMEOUT): raise Exception('start_render_thread' + ERR_LOCK_FAILED)
|
|
||||||
print('Start new Rendering Thread on device', device)
|
|
||||||
try:
|
|
||||||
rthread = threading.Thread(target=thread_render, kwargs={'device': device})
|
|
||||||
rthread.daemon = True
|
|
||||||
rthread.name = THREAD_NAME_PREFIX + device
|
|
||||||
rthread.start()
|
|
||||||
render_threads.append(rthread)
|
|
||||||
finally:
|
|
||||||
manager_lock.release()
|
|
||||||
timeout = DEVICE_START_TIMEOUT
|
|
||||||
while not rthread.is_alive() or not rthread in weak_thread_data or not 'device' in weak_thread_data[rthread]:
|
|
||||||
if rthread in weak_thread_data and 'error' in weak_thread_data[rthread]:
|
|
||||||
print(rthread, device, 'error:', weak_thread_data[rthread]['error'])
|
|
||||||
return False
|
|
||||||
if timeout <= 0:
|
|
||||||
return False
|
|
||||||
timeout -= 1
|
|
||||||
time.sleep(1)
|
|
||||||
return True
|
|
||||||
|
|
||||||
def stop_render_thread(device):
|
|
||||||
try:
|
|
||||||
device_manager.validate_device_id(device, log_prefix='stop_render_thread')
|
|
||||||
except:
|
|
||||||
print(traceback.format_exec())
|
|
||||||
return False
|
|
||||||
|
|
||||||
if not manager_lock.acquire(blocking=True, timeout=LOCK_TIMEOUT): raise Exception('stop_render_thread' + ERR_LOCK_FAILED)
|
|
||||||
print('Stopping Rendering Thread on device', device)
|
|
||||||
|
|
||||||
try:
|
|
||||||
thread_to_remove = None
|
|
||||||
for rthread in render_threads:
|
|
||||||
weak_data = weak_thread_data.get(rthread)
|
|
||||||
if weak_data is None or not 'device' in weak_data or weak_data['device'] is None:
|
|
||||||
continue
|
|
||||||
thread_device = weak_data['device']
|
|
||||||
if thread_device == device:
|
|
||||||
weak_data['alive'] = False
|
|
||||||
thread_to_remove = rthread
|
|
||||||
break
|
|
||||||
if thread_to_remove is not None:
|
|
||||||
render_threads.remove(rthread)
|
|
||||||
return True
|
|
||||||
finally:
|
|
||||||
manager_lock.release()
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
def update_render_threads(render_devices, active_devices):
|
|
||||||
devices_to_start, devices_to_stop = device_manager.get_device_delta(render_devices, active_devices)
|
|
||||||
print('devices_to_start', devices_to_start)
|
|
||||||
print('devices_to_stop', devices_to_stop)
|
|
||||||
|
|
||||||
for device in devices_to_stop:
|
|
||||||
if is_alive(device) <= 0:
|
|
||||||
print(device, 'is not alive')
|
|
||||||
continue
|
|
||||||
if not stop_render_thread(device):
|
|
||||||
print(device, 'could not stop render thread')
|
|
||||||
|
|
||||||
for device in devices_to_start:
|
|
||||||
if is_alive(device) >= 1:
|
|
||||||
print(device, 'already registered.')
|
|
||||||
continue
|
|
||||||
if not start_render_thread(device):
|
|
||||||
print(device, 'failed to start.')
|
|
||||||
|
|
||||||
if is_alive() <= 0: # No running devices, probably invalid user config.
|
|
||||||
raise EnvironmentError('ERROR: No active render devices! Please verify the "render_devices" value in config.json')
|
|
||||||
|
|
||||||
print('active devices', get_devices()['active'])
|
|
||||||
|
|
||||||
def shutdown_event(): # Signal render thread to close on shutdown
|
|
||||||
global current_state_error
|
|
||||||
current_state_error = SystemExit('Application shutting down.')
|
|
||||||
|
|
||||||
def render(req : ImageRequest):
|
|
||||||
if is_alive() <= 0: # Render thread is dead
|
|
||||||
raise ChildProcessError('Rendering thread has died.')
|
|
||||||
# Alive, check if task in cache
|
|
||||||
task = task_cache.tryGet(req.session_id)
|
|
||||||
if task and not task.response and not task.error and not task.lock.locked():
|
|
||||||
# Unstarted task pending, deny queueing more than one.
|
|
||||||
raise ConnectionRefusedError(f'Session {req.session_id} has an already pending task.')
|
|
||||||
#
|
|
||||||
from . import runtime
|
|
||||||
r = Request()
|
|
||||||
r.session_id = req.session_id
|
|
||||||
r.prompt = req.prompt
|
|
||||||
r.negative_prompt = req.negative_prompt
|
|
||||||
r.init_image = req.init_image
|
|
||||||
r.mask = req.mask
|
|
||||||
r.num_outputs = req.num_outputs
|
|
||||||
r.num_inference_steps = req.num_inference_steps
|
|
||||||
r.guidance_scale = req.guidance_scale
|
|
||||||
r.width = req.width
|
|
||||||
r.height = req.height
|
|
||||||
r.seed = req.seed
|
|
||||||
r.prompt_strength = req.prompt_strength
|
|
||||||
r.sampler = req.sampler
|
|
||||||
# r.allow_nsfw = req.allow_nsfw
|
|
||||||
r.turbo = req.turbo
|
|
||||||
r.use_full_precision = req.use_full_precision
|
|
||||||
r.save_to_disk_path = req.save_to_disk_path
|
|
||||||
r.use_upscale: str = req.use_upscale
|
|
||||||
r.use_face_correction = req.use_face_correction
|
|
||||||
r.use_stable_diffusion_model = req.use_stable_diffusion_model
|
|
||||||
r.use_vae_model = req.use_vae_model
|
|
||||||
r.show_only_filtered_image = req.show_only_filtered_image
|
|
||||||
r.output_format = req.output_format
|
|
||||||
|
|
||||||
r.stream_progress_updates = True # the underlying implementation only supports streaming
|
|
||||||
r.stream_image_progress = req.stream_image_progress
|
|
||||||
|
|
||||||
if not req.stream_progress_updates:
|
|
||||||
r.stream_image_progress = False
|
|
||||||
|
|
||||||
new_task = RenderTask(r)
|
|
||||||
|
|
||||||
if task_cache.put(r.session_id, new_task, TASK_TTL):
|
|
||||||
# Use twice the normal timeout for adding user requests.
|
|
||||||
# Tries to force task_cache.put to fail before tasks_queue.put would.
|
|
||||||
if manager_lock.acquire(blocking=True, timeout=LOCK_TIMEOUT * 2):
|
|
||||||
try:
|
|
||||||
tasks_queue.append(new_task)
|
|
||||||
return new_task
|
|
||||||
finally:
|
|
||||||
manager_lock.release()
|
|
||||||
raise RuntimeError('Failed to add task to cache.')
|
|
||||||
402
ui/server.py
@@ -1,402 +0,0 @@
|
|||||||
"""server.py: FastAPI SD-UI Web Host.
|
|
||||||
Notes:
|
|
||||||
async endpoints always run on the main thread. Without they run on the thread pool.
|
|
||||||
"""
|
|
||||||
import json
|
|
||||||
import traceback
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import os
|
|
||||||
|
|
||||||
SD_DIR = os.getcwd()
|
|
||||||
print('started in ', SD_DIR)
|
|
||||||
|
|
||||||
SD_UI_DIR = os.getenv('SD_UI_PATH', None)
|
|
||||||
sys.path.append(os.path.dirname(SD_UI_DIR))
|
|
||||||
|
|
||||||
CONFIG_DIR = os.path.abspath(os.path.join(SD_UI_DIR, '..', 'scripts'))
|
|
||||||
MODELS_DIR = os.path.abspath(os.path.join(SD_DIR, '..', 'models'))
|
|
||||||
UI_PLUGINS_DIR = os.path.abspath(os.path.join(SD_DIR, '..', 'plugins', 'ui'))
|
|
||||||
|
|
||||||
OUTPUT_DIRNAME = "Stable Diffusion UI" # in the user's home folder
|
|
||||||
TASK_TTL = 15 * 60 # Discard last session's task timeout
|
|
||||||
APP_CONFIG_DEFAULTS = {
|
|
||||||
# auto: selects the cuda device with the most free memory, cuda: use the currently active cuda device.
|
|
||||||
'render_devices': 'auto', # valid entries: 'auto', 'cpu' or 'cuda:N' (where N is a GPU index)
|
|
||||||
'update_branch': 'main',
|
|
||||||
'ui': {
|
|
||||||
'open_browser_on_start': True,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
APP_CONFIG_DEFAULT_MODELS = [
|
|
||||||
# needed to support the legacy installations
|
|
||||||
'custom-model', # Check if user has a custom model, use it first.
|
|
||||||
'sd-v1-4', # Default fallback.
|
|
||||||
]
|
|
||||||
|
|
||||||
from fastapi import FastAPI, HTTPException
|
|
||||||
from fastapi.staticfiles import StaticFiles
|
|
||||||
from starlette.responses import FileResponse, JSONResponse, StreamingResponse
|
|
||||||
from pydantic import BaseModel
|
|
||||||
import logging
|
|
||||||
#import queue, threading, time
|
|
||||||
from typing import Any, Generator, Hashable, List, Optional, Union
|
|
||||||
|
|
||||||
from sd_internal import Request, Response, task_manager
|
|
||||||
|
|
||||||
app = FastAPI()
|
|
||||||
|
|
||||||
modifiers_cache = None
|
|
||||||
outpath = os.path.join(os.path.expanduser("~"), OUTPUT_DIRNAME)
|
|
||||||
|
|
||||||
os.makedirs(UI_PLUGINS_DIR, exist_ok=True)
|
|
||||||
|
|
||||||
# don't show access log entries for URLs that start with the given prefix
|
|
||||||
ACCESS_LOG_SUPPRESS_PATH_PREFIXES = ['/ping', '/image', '/modifier-thumbnails']
|
|
||||||
|
|
||||||
NOCACHE_HEADERS={"Cache-Control": "no-cache, no-store, must-revalidate", "Pragma": "no-cache", "Expires": "0"}
|
|
||||||
|
|
||||||
app.mount('/media', StaticFiles(directory=os.path.join(SD_UI_DIR, 'media')), name="media")
|
|
||||||
app.mount('/plugins', StaticFiles(directory=UI_PLUGINS_DIR), name="plugins")
|
|
||||||
|
|
||||||
def getConfig(default_val=APP_CONFIG_DEFAULTS):
|
|
||||||
try:
|
|
||||||
config_json_path = os.path.join(CONFIG_DIR, 'config.json')
|
|
||||||
if not os.path.exists(config_json_path):
|
|
||||||
return default_val
|
|
||||||
with open(config_json_path, 'r', encoding='utf-8') as f:
|
|
||||||
return json.load(f)
|
|
||||||
except Exception as e:
|
|
||||||
print(str(e))
|
|
||||||
print(traceback.format_exc())
|
|
||||||
return default_val
|
|
||||||
|
|
||||||
def setConfig(config):
|
|
||||||
try: # config.json
|
|
||||||
config_json_path = os.path.join(CONFIG_DIR, 'config.json')
|
|
||||||
with open(config_json_path, 'w', encoding='utf-8') as f:
|
|
||||||
json.dump(config, f)
|
|
||||||
except:
|
|
||||||
print(traceback.format_exc())
|
|
||||||
|
|
||||||
try: # config.bat
|
|
||||||
config_bat_path = os.path.join(CONFIG_DIR, 'config.bat')
|
|
||||||
config_bat = []
|
|
||||||
|
|
||||||
if 'update_branch' in config:
|
|
||||||
config_bat.append(f"@set update_branch={config['update_branch']}")
|
|
||||||
if os.getenv('SD_UI_BIND_PORT') is not None:
|
|
||||||
config_bat.append(f"@set SD_UI_BIND_PORT={os.getenv('SD_UI_BIND_PORT')}")
|
|
||||||
if os.getenv('SD_UI_BIND_IP') is not None:
|
|
||||||
config_bat.append(f"@set SD_UI_BIND_IP={os.getenv('SD_UI_BIND_IP')}")
|
|
||||||
|
|
||||||
if len(config_bat) > 0:
|
|
||||||
with open(config_bat_path, 'w', encoding='utf-8') as f:
|
|
||||||
f.write('\r\n'.join(config_bat))
|
|
||||||
except:
|
|
||||||
print(traceback.format_exc())
|
|
||||||
|
|
||||||
try: # config.sh
|
|
||||||
config_sh_path = os.path.join(CONFIG_DIR, 'config.sh')
|
|
||||||
config_sh = ['#!/bin/bash']
|
|
||||||
|
|
||||||
if 'update_branch' in config:
|
|
||||||
config_sh.append(f"export update_branch={config['update_branch']}")
|
|
||||||
if os.getenv('SD_UI_BIND_PORT') is not None:
|
|
||||||
config_sh.append(f"export SD_UI_BIND_PORT={os.getenv('SD_UI_BIND_PORT')}")
|
|
||||||
if os.getenv('SD_UI_BIND_IP') is not None:
|
|
||||||
config_sh.append(f"export SD_UI_BIND_IP={os.getenv('SD_UI_BIND_IP')}")
|
|
||||||
|
|
||||||
if len(config_sh) > 1:
|
|
||||||
with open(config_sh_path, 'w', encoding='utf-8') as f:
|
|
||||||
f.write('\n'.join(config_sh))
|
|
||||||
except:
|
|
||||||
print(traceback.format_exc())
|
|
||||||
|
|
||||||
def resolve_model_to_use(model_name:str, model_type:str, model_dir:str, model_extensions:list, default_models=[]):
|
|
||||||
model_dirs = [os.path.join(MODELS_DIR, model_dir), SD_DIR]
|
|
||||||
if not model_name: # When None try user configured model.
|
|
||||||
config = getConfig()
|
|
||||||
if 'model' in config and model_type in config['model']:
|
|
||||||
model_name = config['model'][model_type]
|
|
||||||
if model_name:
|
|
||||||
# Check models directory
|
|
||||||
models_dir_path = os.path.join(MODELS_DIR, model_dir, model_name)
|
|
||||||
for model_extension in model_extensions:
|
|
||||||
if os.path.exists(models_dir_path + model_extension):
|
|
||||||
return models_dir_path
|
|
||||||
if os.path.exists(model_name + model_extension):
|
|
||||||
# Direct Path to file
|
|
||||||
model_name = os.path.abspath(model_name)
|
|
||||||
return model_name
|
|
||||||
# Default locations
|
|
||||||
if model_name in default_models:
|
|
||||||
default_model_path = os.path.join(SD_DIR, model_name)
|
|
||||||
for model_extension in model_extensions:
|
|
||||||
if os.path.exists(default_model_path + model_extension):
|
|
||||||
return default_model_path
|
|
||||||
# Can't find requested model, check the default paths.
|
|
||||||
for default_model in default_models:
|
|
||||||
for model_dir in model_dirs:
|
|
||||||
default_model_path = os.path.join(model_dir, default_model)
|
|
||||||
for model_extension in model_extensions:
|
|
||||||
if os.path.exists(default_model_path + model_extension):
|
|
||||||
if model_name is not None:
|
|
||||||
print(f'Could not find the configured custom model {model_name}{model_extension}. Using the default one: {default_model_path}{model_extension}')
|
|
||||||
return default_model_path
|
|
||||||
raise Exception('No valid models found.')
|
|
||||||
|
|
||||||
def resolve_ckpt_to_use(model_name:str=None):
|
|
||||||
return resolve_model_to_use(model_name, model_type='stable-diffusion', model_dir='stable-diffusion', model_extensions=['.ckpt'], default_models=APP_CONFIG_DEFAULT_MODELS)
|
|
||||||
|
|
||||||
def resolve_vae_to_use(model_name:str=None):
|
|
||||||
try:
|
|
||||||
return resolve_model_to_use(model_name, model_type='vae', model_dir='vae', model_extensions=['.vae.pt', '.ckpt'], default_models=[])
|
|
||||||
except:
|
|
||||||
return None
|
|
||||||
|
|
||||||
class SetAppConfigRequest(BaseModel):
|
|
||||||
update_branch: str = None
|
|
||||||
render_devices: Union[List[str], List[int], str, int] = None
|
|
||||||
model_vae: str = None
|
|
||||||
ui_open_browser_on_start: bool = None
|
|
||||||
|
|
||||||
@app.post('/app_config')
|
|
||||||
async def setAppConfig(req : SetAppConfigRequest):
|
|
||||||
config = getConfig()
|
|
||||||
if req.update_branch is not None:
|
|
||||||
config['update_branch'] = req.update_branch
|
|
||||||
if req.render_devices is not None:
|
|
||||||
update_render_devices_in_config(config, req.render_devices)
|
|
||||||
if req.ui_open_browser_on_start is not None:
|
|
||||||
if 'ui' not in config:
|
|
||||||
config['ui'] = {}
|
|
||||||
config['ui']['open_browser_on_start'] = req.ui_open_browser_on_start
|
|
||||||
try:
|
|
||||||
setConfig(config)
|
|
||||||
|
|
||||||
if req.render_devices:
|
|
||||||
update_render_threads()
|
|
||||||
|
|
||||||
return JSONResponse({'status': 'OK'}, headers=NOCACHE_HEADERS)
|
|
||||||
except Exception as e:
|
|
||||||
print(traceback.format_exc())
|
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
||||||
def getModels():
|
|
||||||
models = {
|
|
||||||
'active': {
|
|
||||||
'stable-diffusion': 'sd-v1-4',
|
|
||||||
'vae': '',
|
|
||||||
},
|
|
||||||
'options': {
|
|
||||||
'stable-diffusion': ['sd-v1-4'],
|
|
||||||
'vae': [],
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
def listModels(models_dirname, model_type, model_extensions):
|
|
||||||
models_dir = os.path.join(MODELS_DIR, models_dirname)
|
|
||||||
if not os.path.exists(models_dir):
|
|
||||||
os.makedirs(models_dir)
|
|
||||||
|
|
||||||
for file in os.listdir(models_dir):
|
|
||||||
for model_extension in model_extensions:
|
|
||||||
if file.endswith(model_extension):
|
|
||||||
model_name = file[:-len(model_extension)]
|
|
||||||
models['options'][model_type].append(model_name)
|
|
||||||
|
|
||||||
models['options'][model_type] = [*set(models['options'][model_type])] # remove duplicates
|
|
||||||
models['options'][model_type].sort()
|
|
||||||
|
|
||||||
# custom models
|
|
||||||
listModels(models_dirname='stable-diffusion', model_type='stable-diffusion', model_extensions=['.ckpt'])
|
|
||||||
listModels(models_dirname='vae', model_type='vae', model_extensions=['.vae.pt', '.ckpt'])
|
|
||||||
|
|
||||||
# legacy
|
|
||||||
custom_weight_path = os.path.join(SD_DIR, 'custom-model.ckpt')
|
|
||||||
if os.path.exists(custom_weight_path):
|
|
||||||
models['options']['stable-diffusion'].append('custom-model')
|
|
||||||
|
|
||||||
return models
|
|
||||||
|
|
||||||
def getUIPlugins():
|
|
||||||
plugins = []
|
|
||||||
|
|
||||||
for file in os.listdir(UI_PLUGINS_DIR):
|
|
||||||
if file.endswith('.plugin.js'):
|
|
||||||
plugins.append(f'/plugins/{file}')
|
|
||||||
|
|
||||||
return plugins
|
|
||||||
|
|
||||||
@app.get('/get/{key:path}')
|
|
||||||
def read_web_data(key:str=None):
|
|
||||||
if not key: # /get without parameters, stable-diffusion easter egg.
|
|
||||||
raise HTTPException(status_code=418, detail="StableDiffusion is drawing a teapot!") # HTTP418 I'm a teapot
|
|
||||||
elif key == 'app_config':
|
|
||||||
config = getConfig(default_val=None)
|
|
||||||
if config is None:
|
|
||||||
config = APP_CONFIG_DEFAULTS
|
|
||||||
return JSONResponse(config, headers=NOCACHE_HEADERS)
|
|
||||||
elif key == 'devices':
|
|
||||||
config = getConfig()
|
|
||||||
devices = task_manager.get_devices()
|
|
||||||
devices['config'] = config.get('render_devices', "auto")
|
|
||||||
return JSONResponse(devices, headers=NOCACHE_HEADERS)
|
|
||||||
elif key == 'models':
|
|
||||||
return JSONResponse(getModels(), headers=NOCACHE_HEADERS)
|
|
||||||
elif key == 'modifiers': return FileResponse(os.path.join(SD_UI_DIR, 'modifiers.json'), headers=NOCACHE_HEADERS)
|
|
||||||
elif key == 'output_dir': return JSONResponse({ 'output_dir': outpath }, headers=NOCACHE_HEADERS)
|
|
||||||
elif key == 'ui_plugins': return JSONResponse(getUIPlugins(), headers=NOCACHE_HEADERS)
|
|
||||||
else:
|
|
||||||
raise HTTPException(status_code=404, detail=f'Request for unknown {key}') # HTTP404 Not Found
|
|
||||||
|
|
||||||
@app.get('/ping') # Get server and optionally session status.
|
|
||||||
def ping(session_id:str=None):
|
|
||||||
if task_manager.is_alive() <= 0: # Check that render threads are alive.
|
|
||||||
if task_manager.current_state_error: raise HTTPException(status_code=500, detail=str(task_manager.current_state_error))
|
|
||||||
raise HTTPException(status_code=500, detail='Render thread is dead.')
|
|
||||||
if task_manager.current_state_error and not isinstance(task_manager.current_state_error, StopAsyncIteration): raise HTTPException(status_code=500, detail=str(task_manager.current_state_error))
|
|
||||||
# Alive
|
|
||||||
response = {'status': str(task_manager.current_state)}
|
|
||||||
if session_id:
|
|
||||||
task = task_manager.get_cached_task(session_id, update_ttl=True)
|
|
||||||
if task:
|
|
||||||
response['task'] = id(task)
|
|
||||||
if task.lock.locked():
|
|
||||||
response['session'] = 'running'
|
|
||||||
elif isinstance(task.error, StopAsyncIteration):
|
|
||||||
response['session'] = 'stopped'
|
|
||||||
elif task.error:
|
|
||||||
response['session'] = 'error'
|
|
||||||
elif not task.buffer_queue.empty():
|
|
||||||
response['session'] = 'buffer'
|
|
||||||
elif task.response:
|
|
||||||
response['session'] = 'completed'
|
|
||||||
else:
|
|
||||||
response['session'] = 'pending'
|
|
||||||
response['devices'] = task_manager.get_devices()
|
|
||||||
return JSONResponse(response, headers=NOCACHE_HEADERS)
|
|
||||||
|
|
||||||
def save_model_to_config(ckpt_model_name, vae_model_name):
|
|
||||||
config = getConfig()
|
|
||||||
if 'model' not in config:
|
|
||||||
config['model'] = {}
|
|
||||||
|
|
||||||
config['model']['stable-diffusion'] = ckpt_model_name
|
|
||||||
config['model']['vae'] = vae_model_name
|
|
||||||
|
|
||||||
if vae_model_name is None or vae_model_name == "":
|
|
||||||
del config['model']['vae']
|
|
||||||
|
|
||||||
setConfig(config)
|
|
||||||
|
|
||||||
def update_render_devices_in_config(config, render_devices):
|
|
||||||
if render_devices not in ('cpu', 'auto') and not render_devices.startswith('cuda:'):
|
|
||||||
raise HTTPException(status_code=400, detail=f'Invalid render device requested: {render_devices}')
|
|
||||||
|
|
||||||
if render_devices.startswith('cuda:'):
|
|
||||||
render_devices = render_devices.split(',')
|
|
||||||
|
|
||||||
config['render_devices'] = render_devices
|
|
||||||
|
|
||||||
@app.post('/render')
|
|
||||||
def render(req : task_manager.ImageRequest):
|
|
||||||
try:
|
|
||||||
save_model_to_config(req.use_stable_diffusion_model, req.use_vae_model)
|
|
||||||
req.use_stable_diffusion_model = resolve_ckpt_to_use(req.use_stable_diffusion_model)
|
|
||||||
req.use_vae_model = resolve_vae_to_use(req.use_vae_model)
|
|
||||||
new_task = task_manager.render(req)
|
|
||||||
response = {
|
|
||||||
'status': str(task_manager.current_state),
|
|
||||||
'queue': len(task_manager.tasks_queue),
|
|
||||||
'stream': f'/image/stream/{req.session_id}/{id(new_task)}',
|
|
||||||
'task': id(new_task)
|
|
||||||
}
|
|
||||||
return JSONResponse(response, headers=NOCACHE_HEADERS)
|
|
||||||
except ChildProcessError as e: # Render thread is dead
|
|
||||||
raise HTTPException(status_code=500, detail=f'Rendering thread has died.') # HTTP500 Internal Server Error
|
|
||||||
except ConnectionRefusedError as e: # Unstarted task pending, deny queueing more than one.
|
|
||||||
raise HTTPException(status_code=503, detail=f'Session {req.session_id} has an already pending task.') # HTTP503 Service Unavailable
|
|
||||||
except Exception as e:
|
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
||||||
@app.get('/image/stream/{session_id:str}/{task_id:int}')
|
|
||||||
def stream(session_id:str, task_id:int):
|
|
||||||
#TODO Move to WebSockets ??
|
|
||||||
task = task_manager.get_cached_task(session_id, update_ttl=True)
|
|
||||||
if not task: raise HTTPException(status_code=410, detail='No request received.') # HTTP410 Gone
|
|
||||||
if (id(task) != task_id): raise HTTPException(status_code=409, detail=f'Wrong task id received. Expected:{id(task)}, Received:{task_id}') # HTTP409 Conflict
|
|
||||||
if task.buffer_queue.empty() and not task.lock.locked():
|
|
||||||
if task.response:
|
|
||||||
#print(f'Session {session_id} sending cached response')
|
|
||||||
return JSONResponse(task.response, headers=NOCACHE_HEADERS)
|
|
||||||
raise HTTPException(status_code=425, detail='Too Early, task not started yet.') # HTTP425 Too Early
|
|
||||||
#print(f'Session {session_id} opened live render stream {id(task.buffer_queue)}')
|
|
||||||
return StreamingResponse(task.read_buffer_generator(), media_type='application/json')
|
|
||||||
|
|
||||||
@app.get('/image/stop')
|
|
||||||
def stop(session_id:str=None):
|
|
||||||
if not session_id:
|
|
||||||
if task_manager.current_state == task_manager.ServerStates.Online or task_manager.current_state == task_manager.ServerStates.Unavailable:
|
|
||||||
raise HTTPException(status_code=409, detail='Not currently running any tasks.') # HTTP409 Conflict
|
|
||||||
task_manager.current_state_error = StopAsyncIteration('')
|
|
||||||
return {'OK'}
|
|
||||||
task = task_manager.get_cached_task(session_id, update_ttl=False)
|
|
||||||
if not task: raise HTTPException(status_code=404, detail=f'Session {session_id} has no active task.') # HTTP404 Not Found
|
|
||||||
if isinstance(task.error, StopAsyncIteration): raise HTTPException(status_code=409, detail=f'Session {session_id} task is already stopped.') # HTTP409 Conflict
|
|
||||||
task.error = StopAsyncIteration('')
|
|
||||||
return {'OK'}
|
|
||||||
|
|
||||||
@app.get('/image/tmp/{session_id}/{img_id:int}')
|
|
||||||
def get_image(session_id, img_id):
|
|
||||||
task = task_manager.get_cached_task(session_id, update_ttl=True)
|
|
||||||
if not task: raise HTTPException(status_code=410, detail=f'Session {session_id} has not submitted a task.') # HTTP410 Gone
|
|
||||||
if not task.temp_images[img_id]: raise HTTPException(status_code=425, detail='Too Early, task data is not available yet.') # HTTP425 Too Early
|
|
||||||
try:
|
|
||||||
img_data = task.temp_images[img_id]
|
|
||||||
img_data.seek(0)
|
|
||||||
return StreamingResponse(img_data, media_type='image/jpeg')
|
|
||||||
except KeyError as e:
|
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
||||||
@app.get('/')
|
|
||||||
def read_root():
|
|
||||||
return FileResponse(os.path.join(SD_UI_DIR, 'index.html'), headers=NOCACHE_HEADERS)
|
|
||||||
|
|
||||||
@app.on_event("shutdown")
|
|
||||||
def shutdown_event(): # Signal render thread to close on shutdown
|
|
||||||
task_manager.current_state_error = SystemExit('Application shutting down.')
|
|
||||||
|
|
||||||
# don't log certain requests
|
|
||||||
class LogSuppressFilter(logging.Filter):
|
|
||||||
def filter(self, record: logging.LogRecord) -> bool:
|
|
||||||
path = record.getMessage()
|
|
||||||
for prefix in ACCESS_LOG_SUPPRESS_PATH_PREFIXES:
|
|
||||||
if path.find(prefix) != -1:
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
logging.getLogger('uvicorn.access').addFilter(LogSuppressFilter())
|
|
||||||
|
|
||||||
# Start the task_manager
|
|
||||||
task_manager.default_model_to_load = resolve_ckpt_to_use()
|
|
||||||
task_manager.default_vae_to_load = resolve_vae_to_use()
|
|
||||||
|
|
||||||
def update_render_threads():
|
|
||||||
config = getConfig()
|
|
||||||
render_devices = config.get('render_devices', 'auto')
|
|
||||||
active_devices = task_manager.get_devices()['active'].keys()
|
|
||||||
|
|
||||||
print('requesting for render_devices', render_devices)
|
|
||||||
task_manager.update_render_threads(render_devices, active_devices)
|
|
||||||
|
|
||||||
update_render_threads()
|
|
||||||
|
|
||||||
# start the browser ui
|
|
||||||
def open_browser():
|
|
||||||
config = getConfig()
|
|
||||||
ui = config.get('ui', {})
|
|
||||||
if ui.get('open_browser_on_start', True):
|
|
||||||
import webbrowser; webbrowser.open('http://localhost:9000')
|
|
||||||
|
|
||||||
open_browser()
|
|
||||||