2022-10-19 05:58:55 +02:00
|
|
|
"""task_manager.py: manage tasks dispatching and render threads.
|
|
|
|
Notes:
|
|
|
|
render_threads should be the only hard reference held by the manager to the threads.
|
|
|
|
Use weak_thread_data to store all other data using weak keys.
|
|
|
|
This will allow for garbage collection after the thread dies.
|
|
|
|
"""
|
2022-10-15 09:28:20 +02:00
|
|
|
import json
|
|
|
|
import traceback
|
2022-12-09 17:00:18 +01:00
|
|
|
import logging
|
2022-10-15 09:28:20 +02:00
|
|
|
|
2022-10-20 13:52:01 +02:00
|
|
|
TASK_TTL = 15 * 60 # seconds, Discard last session's task timeout
|
2022-10-15 09:28:20 +02:00
|
|
|
|
2022-11-09 14:47:44 +01:00
|
|
|
import torch
|
2022-10-18 19:21:15 +02:00
|
|
|
import queue, threading, time, weakref
|
2022-12-09 11:15:36 +01:00
|
|
|
from typing import Any, Hashable
|
2022-10-15 09:28:20 +02:00
|
|
|
|
2022-12-11 13:46:29 +01:00
|
|
|
from sd_internal import TaskData, device_manager
|
2022-12-12 16:16:11 +01:00
|
|
|
from diffusionkit.types import GenerateImageRequest
|
2022-10-15 09:28:20 +02:00
|
|
|
|
2022-12-09 17:00:18 +01:00
|
|
|
log = logging.getLogger()
|
|
|
|
|
2022-12-09 18:17:34 +01:00
|
|
|
THREAD_NAME_PREFIX = ''
|
2022-10-17 03:41:39 +02:00
|
|
|
ERR_LOCK_FAILED = ' failed to acquire lock within timeout.'
|
|
|
|
LOCK_TIMEOUT = 15 # Maximum locking time in seconds before failing a task.
|
|
|
|
# It's better to get an exception than a deadlock... ALWAYS use timeout in critical paths.
|
|
|
|
|
2022-10-28 02:27:21 +02:00
|
|
|
DEVICE_START_TIMEOUT = 60 # seconds - Maximum time to wait for a render device to init.
|
|
|
|
|
2022-10-15 09:28:20 +02:00
|
|
|
class SymbolClass(type): # Print nicely formatted Symbol names.
|
|
|
|
def __repr__(self): return self.__qualname__
|
|
|
|
def __str__(self): return self.__name__
|
|
|
|
class Symbol(metaclass=SymbolClass): pass
|
|
|
|
|
|
|
|
class ServerStates:
|
|
|
|
class Init(Symbol): pass
|
|
|
|
class LoadingModel(Symbol): pass
|
|
|
|
class Online(Symbol): pass
|
|
|
|
class Rendering(Symbol): pass
|
|
|
|
class Unavailable(Symbol): pass
|
|
|
|
|
|
|
|
class RenderTask(): # Task with output queue and completion lock.
|
2022-12-11 13:46:29 +01:00
|
|
|
def __init__(self, req: GenerateImageRequest, task_data: TaskData):
|
2022-12-12 10:48:56 +01:00
|
|
|
task_data.request_id = id(self)
|
2022-12-11 13:46:29 +01:00
|
|
|
self.render_request: GenerateImageRequest = req # Initial Request
|
|
|
|
self.task_data: TaskData = task_data
|
2022-10-15 09:28:20 +02:00
|
|
|
self.response: Any = None # Copy of the last reponse
|
2022-11-15 04:20:21 +01:00
|
|
|
self.render_device = None # Select the task affinity. (Not used to change active devices).
|
2022-12-12 10:48:56 +01:00
|
|
|
self.temp_images:list = [None] * req.num_outputs * (1 if task_data.show_only_filtered_image else 2)
|
2022-10-15 09:28:20 +02:00
|
|
|
self.error: Exception = None
|
|
|
|
self.lock: threading.Lock = threading.Lock() # Locks at task start and unlocks when task is completed
|
|
|
|
self.buffer_queue: queue.Queue = queue.Queue() # Queue of JSON string segments
|
|
|
|
async def read_buffer_generator(self):
|
|
|
|
try:
|
|
|
|
while not self.buffer_queue.empty():
|
|
|
|
res = self.buffer_queue.get(block=False)
|
|
|
|
self.buffer_queue.task_done()
|
|
|
|
yield res
|
|
|
|
except queue.Empty as e: yield
|
2022-12-08 06:42:46 +01:00
|
|
|
@property
|
|
|
|
def status(self):
|
|
|
|
if self.lock.locked():
|
|
|
|
return 'running'
|
|
|
|
if isinstance(self.error, StopAsyncIteration):
|
|
|
|
return 'stopped'
|
|
|
|
if self.error:
|
|
|
|
return 'error'
|
|
|
|
if not self.buffer_queue.empty():
|
|
|
|
return 'buffer'
|
|
|
|
if self.response:
|
|
|
|
return 'completed'
|
|
|
|
return 'pending'
|
|
|
|
@property
|
|
|
|
def is_pending(self):
|
|
|
|
return bool(not self.response and not self.error)
|
2022-10-15 09:28:20 +02:00
|
|
|
|
|
|
|
# Temporary cache to allow to query tasks results for a short time after they are completed.
|
2022-12-08 06:42:46 +01:00
|
|
|
class DataCache():
|
2022-10-15 09:28:20 +02:00
|
|
|
def __init__(self):
|
|
|
|
self._base = dict()
|
2022-10-17 03:41:39 +02:00
|
|
|
self._lock: threading.Lock = threading.Lock()
|
2022-10-15 09:28:20 +02:00
|
|
|
def _get_ttl_time(self, ttl: int) -> int:
|
|
|
|
return int(time.time()) + ttl
|
|
|
|
def _is_expired(self, timestamp: int) -> bool:
|
|
|
|
return int(time.time()) >= timestamp
|
|
|
|
def clean(self) -> None:
|
2022-12-08 06:42:46 +01:00
|
|
|
if not self._lock.acquire(blocking=True, timeout=LOCK_TIMEOUT): raise Exception('DataCache.clean' + ERR_LOCK_FAILED)
|
2022-10-15 09:28:20 +02:00
|
|
|
try:
|
2022-10-15 10:39:45 +02:00
|
|
|
# Create a list of expired keys to delete
|
|
|
|
to_delete = []
|
2022-10-15 09:28:20 +02:00
|
|
|
for key in self._base:
|
|
|
|
ttl, _ = self._base[key]
|
|
|
|
if self._is_expired(ttl):
|
2022-10-15 10:39:45 +02:00
|
|
|
to_delete.append(key)
|
|
|
|
# Remove Items
|
|
|
|
for key in to_delete:
|
2022-12-08 06:42:46 +01:00
|
|
|
(_, val) = self._base[key]
|
|
|
|
if isinstance(val, RenderTask):
|
2022-12-09 17:00:18 +01:00
|
|
|
log.debug(f'RenderTask {key} expired. Data removed.')
|
2022-12-08 06:42:46 +01:00
|
|
|
elif isinstance(val, SessionState):
|
2022-12-09 17:00:18 +01:00
|
|
|
log.debug(f'Session {key} expired. Data removed.')
|
2022-12-08 06:42:46 +01:00
|
|
|
else:
|
2022-12-09 17:00:18 +01:00
|
|
|
log.debug(f'Key {key} expired. Data removed.')
|
2022-10-15 10:39:45 +02:00
|
|
|
del self._base[key]
|
2022-10-15 09:28:20 +02:00
|
|
|
finally:
|
|
|
|
self._lock.release()
|
|
|
|
def clear(self) -> None:
|
2022-12-08 06:42:46 +01:00
|
|
|
if not self._lock.acquire(blocking=True, timeout=LOCK_TIMEOUT): raise Exception('DataCache.clear' + ERR_LOCK_FAILED)
|
2022-10-15 09:28:20 +02:00
|
|
|
try: self._base.clear()
|
|
|
|
finally: self._lock.release()
|
|
|
|
def delete(self, key: Hashable) -> bool:
|
2022-12-08 06:42:46 +01:00
|
|
|
if not self._lock.acquire(blocking=True, timeout=LOCK_TIMEOUT): raise Exception('DataCache.delete' + ERR_LOCK_FAILED)
|
2022-10-15 09:28:20 +02:00
|
|
|
try:
|
|
|
|
if key not in self._base:
|
|
|
|
return False
|
|
|
|
del self._base[key]
|
|
|
|
return True
|
|
|
|
finally:
|
|
|
|
self._lock.release()
|
|
|
|
def keep(self, key: Hashable, ttl: int) -> bool:
|
2022-12-08 06:42:46 +01:00
|
|
|
if not self._lock.acquire(blocking=True, timeout=LOCK_TIMEOUT): raise Exception('DataCache.keep' + ERR_LOCK_FAILED)
|
2022-10-15 09:28:20 +02:00
|
|
|
try:
|
|
|
|
if key in self._base:
|
|
|
|
_, value = self._base.get(key)
|
|
|
|
self._base[key] = (self._get_ttl_time(ttl), value)
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
finally:
|
|
|
|
self._lock.release()
|
|
|
|
def put(self, key: Hashable, value: Any, ttl: int) -> bool:
|
2022-12-08 06:42:46 +01:00
|
|
|
if not self._lock.acquire(blocking=True, timeout=LOCK_TIMEOUT): raise Exception('DataCache.put' + ERR_LOCK_FAILED)
|
2022-10-15 09:28:20 +02:00
|
|
|
try:
|
|
|
|
self._base[key] = (
|
|
|
|
self._get_ttl_time(ttl), value
|
|
|
|
)
|
2022-10-15 10:08:17 +02:00
|
|
|
except Exception as e:
|
2022-12-09 17:00:18 +01:00
|
|
|
log.error(traceback.format_exc())
|
2022-10-15 09:28:20 +02:00
|
|
|
return False
|
|
|
|
else:
|
|
|
|
return True
|
|
|
|
finally:
|
|
|
|
self._lock.release()
|
|
|
|
def tryGet(self, key: Hashable) -> Any:
|
2022-12-08 06:42:46 +01:00
|
|
|
if not self._lock.acquire(blocking=True, timeout=LOCK_TIMEOUT): raise Exception('DataCache.tryGet' + ERR_LOCK_FAILED)
|
2022-10-15 09:28:20 +02:00
|
|
|
try:
|
|
|
|
ttl, value = self._base.get(key, (None, None))
|
|
|
|
if ttl is not None and self._is_expired(ttl):
|
2022-12-09 17:00:18 +01:00
|
|
|
log.debug(f'Session {key} expired. Discarding data.')
|
2022-10-17 03:41:39 +02:00
|
|
|
del self._base[key]
|
2022-10-15 09:28:20 +02:00
|
|
|
return None
|
|
|
|
return value
|
|
|
|
finally:
|
|
|
|
self._lock.release()
|
|
|
|
|
2022-10-17 07:05:27 +02:00
|
|
|
manager_lock = threading.RLock()
|
2022-10-17 03:41:39 +02:00
|
|
|
render_threads = []
|
2022-10-15 09:28:20 +02:00
|
|
|
current_state = ServerStates.Init
|
|
|
|
current_state_error:Exception = None
|
2022-10-17 03:41:39 +02:00
|
|
|
tasks_queue = []
|
2022-12-08 06:42:46 +01:00
|
|
|
session_cache = DataCache()
|
|
|
|
task_cache = DataCache()
|
2022-10-18 19:21:15 +02:00
|
|
|
weak_thread_data = weakref.WeakKeyDictionary()
|
2022-12-08 06:42:46 +01:00
|
|
|
idle_event: threading.Event = threading.Event()
|
|
|
|
|
|
|
|
class SessionState():
|
|
|
|
def __init__(self, id: str):
|
|
|
|
self._id = id
|
|
|
|
self._tasks_ids = []
|
|
|
|
@property
|
|
|
|
def id(self):
|
|
|
|
return self._id
|
|
|
|
@property
|
|
|
|
def tasks(self):
|
|
|
|
tasks = []
|
|
|
|
for task_id in self._tasks_ids:
|
|
|
|
task = task_cache.tryGet(task_id)
|
|
|
|
if task:
|
|
|
|
tasks.append(task)
|
|
|
|
return tasks
|
|
|
|
def put(self, task, ttl=TASK_TTL):
|
|
|
|
task_id = id(task)
|
|
|
|
self._tasks_ids.append(task_id)
|
|
|
|
if not task_cache.put(task_id, task, ttl):
|
|
|
|
return False
|
|
|
|
while len(self._tasks_ids) > len(render_threads) * 2:
|
|
|
|
self._tasks_ids.pop(0)
|
|
|
|
return True
|
2022-10-15 09:28:20 +02:00
|
|
|
|
2022-10-22 04:45:19 +02:00
|
|
|
def thread_get_next_task():
|
2022-12-11 15:51:25 +01:00
|
|
|
from sd_internal import renderer
|
2022-10-22 04:45:19 +02:00
|
|
|
if not manager_lock.acquire(blocking=True, timeout=LOCK_TIMEOUT):
|
2022-12-11 15:51:25 +01:00
|
|
|
log.warn(f'Render thread on device: {renderer.context.device} failed to acquire manager lock.')
|
2022-10-22 04:45:19 +02:00
|
|
|
return None
|
|
|
|
if len(tasks_queue) <= 0:
|
|
|
|
manager_lock.release()
|
|
|
|
return None
|
|
|
|
task = None
|
|
|
|
try: # Select a render task.
|
|
|
|
for queued_task in tasks_queue:
|
2022-12-11 15:51:25 +01:00
|
|
|
if queued_task.render_device and renderer.context.device != queued_task.render_device:
|
2022-10-29 23:33:44 +02:00
|
|
|
# Is asking for a specific render device.
|
|
|
|
if is_alive(queued_task.render_device) > 0:
|
|
|
|
continue # requested device alive, skip current one.
|
2022-10-22 04:45:19 +02:00
|
|
|
else:
|
2022-10-29 23:33:44 +02:00
|
|
|
# Requested device is not active, return error to UI.
|
2022-11-11 10:16:05 +01:00
|
|
|
queued_task.error = Exception(queued_task.render_device + ' is not currently active.')
|
2022-10-22 04:45:19 +02:00
|
|
|
task = queued_task
|
|
|
|
break
|
2022-12-11 15:51:25 +01:00
|
|
|
if not queued_task.render_device and renderer.context.device == 'cpu' and is_alive() > 1:
|
2022-10-30 06:33:17 +01:00
|
|
|
# not asking for any specific devices, cpu want to grab task but other render devices are alive.
|
|
|
|
continue # Skip Tasks, don't run on CPU unless there is nothing else or user asked for it.
|
2022-10-22 04:45:19 +02:00
|
|
|
task = queued_task
|
|
|
|
break
|
|
|
|
if task is not None:
|
|
|
|
del tasks_queue[tasks_queue.index(task)]
|
|
|
|
return task
|
|
|
|
finally:
|
|
|
|
manager_lock.release()
|
|
|
|
|
2022-10-17 03:41:39 +02:00
|
|
|
def thread_render(device):
|
2022-12-07 17:45:35 +01:00
|
|
|
global current_state, current_state_error
|
|
|
|
|
2022-12-11 15:51:25 +01:00
|
|
|
from sd_internal import renderer, model_manager
|
2022-10-17 03:41:39 +02:00
|
|
|
try:
|
2022-12-11 15:51:25 +01:00
|
|
|
renderer.init(device)
|
2022-10-30 11:04:06 +01:00
|
|
|
except Exception as e:
|
2022-12-09 17:00:18 +01:00
|
|
|
log.error(traceback.format_exc())
|
2022-10-30 11:04:06 +01:00
|
|
|
weak_thread_data[threading.current_thread()] = {
|
|
|
|
'error': e
|
|
|
|
}
|
2022-10-17 03:41:39 +02:00
|
|
|
return
|
2022-12-07 17:45:35 +01:00
|
|
|
|
2022-10-18 19:21:15 +02:00
|
|
|
weak_thread_data[threading.current_thread()] = {
|
2022-12-11 15:51:25 +01:00
|
|
|
'device': renderer.context.device,
|
|
|
|
'device_name': renderer.context.device_name,
|
2022-11-14 06:53:22 +01:00
|
|
|
'alive': True
|
2022-10-18 19:21:15 +02:00
|
|
|
}
|
2022-12-07 17:45:35 +01:00
|
|
|
|
2022-12-12 10:48:56 +01:00
|
|
|
current_state = ServerStates.LoadingModel
|
2022-12-11 15:51:25 +01:00
|
|
|
model_manager.load_default_models(renderer.context)
|
2022-12-12 10:48:56 +01:00
|
|
|
|
2022-12-07 17:45:35 +01:00
|
|
|
current_state = ServerStates.Online
|
|
|
|
|
2022-10-15 09:28:20 +02:00
|
|
|
while True:
|
2022-12-08 06:42:46 +01:00
|
|
|
session_cache.clean()
|
2022-10-15 09:28:20 +02:00
|
|
|
task_cache.clean()
|
2022-11-14 06:53:22 +01:00
|
|
|
if not weak_thread_data[threading.current_thread()]['alive']:
|
2022-12-11 15:51:25 +01:00
|
|
|
log.info(f'Shutting down thread for device {renderer.context.device}')
|
|
|
|
model_manager.unload_all(renderer.context)
|
2022-11-14 06:53:22 +01:00
|
|
|
return
|
2022-10-15 09:28:20 +02:00
|
|
|
if isinstance(current_state_error, SystemExit):
|
|
|
|
current_state = ServerStates.Unavailable
|
|
|
|
return
|
2022-10-22 04:45:19 +02:00
|
|
|
task = thread_get_next_task()
|
2022-10-17 03:41:39 +02:00
|
|
|
if task is None:
|
2022-12-08 06:42:46 +01:00
|
|
|
idle_event.clear()
|
|
|
|
idle_event.wait(timeout=1)
|
2022-10-17 03:41:39 +02:00
|
|
|
continue
|
2022-10-19 09:02:26 +02:00
|
|
|
if task.error is not None:
|
2022-12-09 17:00:18 +01:00
|
|
|
log.error(task.error)
|
2022-10-22 04:45:19 +02:00
|
|
|
task.response = {"status": 'failed', "detail": str(task.error)}
|
2022-10-19 09:02:26 +02:00
|
|
|
task.buffer_queue.put(json.dumps(task.response))
|
|
|
|
continue
|
2022-10-15 09:28:20 +02:00
|
|
|
if current_state_error:
|
|
|
|
task.error = current_state_error
|
2022-10-22 04:45:19 +02:00
|
|
|
task.response = {"status": 'failed', "detail": str(task.error)}
|
|
|
|
task.buffer_queue.put(json.dumps(task.response))
|
2022-10-15 09:28:20 +02:00
|
|
|
continue
|
2022-12-11 15:51:25 +01:00
|
|
|
log.info(f'Session {task.task_data.session_id} starting task {id(task)} on {renderer.context.device_name}')
|
2022-10-17 03:41:39 +02:00
|
|
|
if not task.lock.acquire(blocking=False): raise Exception('Got locked task from queue.')
|
2022-10-15 09:28:20 +02:00
|
|
|
try:
|
2022-11-29 08:33:57 +01:00
|
|
|
def step_callback():
|
2022-11-29 09:00:08 +01:00
|
|
|
global current_state_error
|
|
|
|
|
2022-10-17 03:41:39 +02:00
|
|
|
if isinstance(current_state_error, SystemExit) or isinstance(current_state_error, StopAsyncIteration) or isinstance(task.error, StopAsyncIteration):
|
2022-12-11 15:51:25 +01:00
|
|
|
renderer.context.stop_processing = True
|
2022-10-17 03:41:39 +02:00
|
|
|
if isinstance(current_state_error, StopAsyncIteration):
|
|
|
|
task.error = current_state_error
|
|
|
|
current_state_error = None
|
2022-12-11 13:46:29 +01:00
|
|
|
log.info(f'Session {task.task_data.session_id} sent cancel signal for task {id(task)}')
|
2022-11-29 08:33:57 +01:00
|
|
|
|
2022-12-08 09:20:46 +01:00
|
|
|
current_state = ServerStates.LoadingModel
|
2022-12-11 15:43:44 +01:00
|
|
|
model_manager.resolve_model_paths(task.task_data)
|
2022-12-11 16:12:31 +01:00
|
|
|
model_manager.set_vram_optimizations(renderer.context, task.task_data)
|
2022-12-11 15:51:25 +01:00
|
|
|
model_manager.reload_models_if_necessary(renderer.context, task.task_data)
|
2022-12-08 09:20:46 +01:00
|
|
|
|
2022-11-29 08:33:57 +01:00
|
|
|
current_state = ServerStates.Rendering
|
2022-12-11 15:51:25 +01:00
|
|
|
task.response = renderer.make_images(task.render_request, task.task_data, task.buffer_queue, task.temp_images, step_callback)
|
2022-12-08 06:42:46 +01:00
|
|
|
# Before looping back to the generator, mark cache as still alive.
|
|
|
|
task_cache.keep(id(task), TASK_TTL)
|
2022-12-11 13:46:29 +01:00
|
|
|
session_cache.keep(task.task_data.session_id, TASK_TTL)
|
2022-10-15 09:28:20 +02:00
|
|
|
except Exception as e:
|
|
|
|
task.error = e
|
2022-12-13 07:14:20 +01:00
|
|
|
task.response = {"status": 'failed', "detail": str(task.error)}
|
|
|
|
task.buffer_queue.put(json.dumps(task.response))
|
2022-12-09 17:00:18 +01:00
|
|
|
log.error(traceback.format_exc())
|
2022-10-15 09:28:20 +02:00
|
|
|
continue
|
2022-10-17 03:41:39 +02:00
|
|
|
finally:
|
|
|
|
# Task completed
|
|
|
|
task.lock.release()
|
2022-12-08 06:42:46 +01:00
|
|
|
task_cache.keep(id(task), TASK_TTL)
|
2022-12-11 13:46:29 +01:00
|
|
|
session_cache.keep(task.task_data.session_id, TASK_TTL)
|
2022-10-15 09:28:20 +02:00
|
|
|
if isinstance(task.error, StopAsyncIteration):
|
2022-12-11 13:46:29 +01:00
|
|
|
log.info(f'Session {task.task_data.session_id} task {id(task)} cancelled!')
|
2022-10-15 09:28:20 +02:00
|
|
|
elif task.error is not None:
|
2022-12-11 13:46:29 +01:00
|
|
|
log.info(f'Session {task.task_data.session_id} task {id(task)} failed!')
|
2022-10-15 09:28:20 +02:00
|
|
|
else:
|
2022-12-11 15:51:25 +01:00
|
|
|
log.info(f'Session {task.task_data.session_id} task {id(task)} completed by {renderer.context.device_name}.')
|
2022-10-15 09:28:20 +02:00
|
|
|
current_state = ServerStates.Online
|
|
|
|
|
2022-12-08 06:42:46 +01:00
|
|
|
def get_cached_task(task_id:str, update_ttl:bool=False):
|
2022-10-22 19:52:13 +02:00
|
|
|
# By calling keep before tryGet, wont discard if was expired.
|
2022-12-08 06:42:46 +01:00
|
|
|
if update_ttl and not task_cache.keep(task_id, TASK_TTL):
|
2022-10-22 19:52:13 +02:00
|
|
|
# Failed to keep task, already gone.
|
|
|
|
return None
|
2022-12-08 06:42:46 +01:00
|
|
|
return task_cache.tryGet(task_id)
|
|
|
|
|
|
|
|
def get_cached_session(session_id:str, update_ttl:bool=False):
|
|
|
|
if update_ttl:
|
|
|
|
session_cache.keep(session_id, TASK_TTL)
|
|
|
|
session = session_cache.tryGet(session_id)
|
|
|
|
if not session:
|
|
|
|
session = SessionState(session_id)
|
|
|
|
session_cache.put(session_id, session, TASK_TTL)
|
|
|
|
return session
|
2022-10-22 19:52:13 +02:00
|
|
|
|
2022-10-29 23:33:44 +02:00
|
|
|
def get_devices():
|
2022-11-09 14:47:44 +01:00
|
|
|
devices = {
|
|
|
|
'all': {},
|
|
|
|
'active': {},
|
|
|
|
}
|
|
|
|
|
2022-11-14 16:05:47 +01:00
|
|
|
def get_device_info(device):
|
|
|
|
if device == 'cpu':
|
|
|
|
return {'name': device_manager.get_processor_name()}
|
|
|
|
|
|
|
|
mem_free, mem_total = torch.cuda.mem_get_info(device)
|
|
|
|
mem_free /= float(10**9)
|
|
|
|
mem_total /= float(10**9)
|
|
|
|
|
|
|
|
return {
|
|
|
|
'name': torch.cuda.get_device_name(device),
|
|
|
|
'mem_free': mem_free,
|
|
|
|
'mem_total': mem_total,
|
|
|
|
}
|
|
|
|
|
2022-11-09 14:47:44 +01:00
|
|
|
# list the compatible devices
|
|
|
|
gpu_count = torch.cuda.device_count()
|
|
|
|
for device in range(gpu_count):
|
2022-11-11 10:16:05 +01:00
|
|
|
device = f'cuda:{device}'
|
2022-11-14 06:53:22 +01:00
|
|
|
if not device_manager.is_device_compatible(device):
|
2022-11-09 14:47:44 +01:00
|
|
|
continue
|
|
|
|
|
2022-11-14 16:05:47 +01:00
|
|
|
devices['all'].update({device: get_device_info(device)})
|
2022-11-09 14:47:44 +01:00
|
|
|
|
2022-11-14 16:05:47 +01:00
|
|
|
devices['all'].update({'cpu': get_device_info('cpu')})
|
2022-11-09 14:47:44 +01:00
|
|
|
|
|
|
|
# list the activated devices
|
2022-10-29 23:33:44 +02:00
|
|
|
if not manager_lock.acquire(blocking=True, timeout=LOCK_TIMEOUT): raise Exception('get_devices' + ERR_LOCK_FAILED)
|
|
|
|
try:
|
|
|
|
for rthread in render_threads:
|
2022-10-30 06:38:32 +01:00
|
|
|
if not rthread.is_alive():
|
|
|
|
continue
|
2022-10-29 23:33:44 +02:00
|
|
|
weak_data = weak_thread_data.get(rthread)
|
2022-10-30 06:38:32 +01:00
|
|
|
if not weak_data or not 'device' in weak_data or not 'device_name' in weak_data:
|
|
|
|
continue
|
2022-11-14 16:05:47 +01:00
|
|
|
device = weak_data['device']
|
|
|
|
devices['active'].update({device: get_device_info(device)})
|
2022-10-29 23:33:44 +02:00
|
|
|
finally:
|
|
|
|
manager_lock.release()
|
|
|
|
|
2022-11-09 14:47:44 +01:00
|
|
|
return devices
|
|
|
|
|
2022-11-10 15:33:11 +01:00
|
|
|
def is_alive(device=None):
|
2022-10-17 03:41:39 +02:00
|
|
|
if not manager_lock.acquire(blocking=True, timeout=LOCK_TIMEOUT): raise Exception('is_alive' + ERR_LOCK_FAILED)
|
|
|
|
nbr_alive = 0
|
|
|
|
try:
|
|
|
|
for rthread in render_threads:
|
2022-11-10 15:33:11 +01:00
|
|
|
if device is not None:
|
2022-10-18 19:21:15 +02:00
|
|
|
weak_data = weak_thread_data.get(rthread)
|
2022-10-30 11:04:06 +01:00
|
|
|
if weak_data is None or not 'device' in weak_data or weak_data['device'] is None:
|
2022-10-18 19:21:15 +02:00
|
|
|
continue
|
2022-11-10 15:33:11 +01:00
|
|
|
thread_device = weak_data['device']
|
|
|
|
if thread_device != device:
|
2022-10-17 05:06:41 +02:00
|
|
|
continue
|
2022-10-17 03:41:39 +02:00
|
|
|
if rthread.is_alive():
|
|
|
|
nbr_alive += 1
|
|
|
|
return nbr_alive
|
|
|
|
finally:
|
|
|
|
manager_lock.release()
|
2022-10-15 09:28:20 +02:00
|
|
|
|
2022-11-14 06:53:22 +01:00
|
|
|
def start_render_thread(device):
|
|
|
|
if not manager_lock.acquire(blocking=True, timeout=LOCK_TIMEOUT): raise Exception('start_render_thread' + ERR_LOCK_FAILED)
|
2022-12-09 17:00:18 +01:00
|
|
|
log.info(f'Start new Rendering Thread on device: {device}')
|
2022-10-17 03:41:39 +02:00
|
|
|
try:
|
|
|
|
rthread = threading.Thread(target=thread_render, kwargs={'device': device})
|
|
|
|
rthread.daemon = True
|
2022-11-14 06:53:22 +01:00
|
|
|
rthread.name = THREAD_NAME_PREFIX + device
|
2022-10-17 03:41:39 +02:00
|
|
|
rthread.start()
|
|
|
|
render_threads.append(rthread)
|
|
|
|
finally:
|
|
|
|
manager_lock.release()
|
2022-10-29 04:52:00 +02:00
|
|
|
timeout = DEVICE_START_TIMEOUT
|
|
|
|
while not rthread.is_alive() or not rthread in weak_thread_data or not 'device' in weak_thread_data[rthread]:
|
2022-10-30 11:04:06 +01:00
|
|
|
if rthread in weak_thread_data and 'error' in weak_thread_data[rthread]:
|
2022-12-09 17:00:18 +01:00
|
|
|
log.error(f"{rthread}, {device}, error: {weak_thread_data[rthread]['error']}")
|
2022-10-30 11:04:06 +01:00
|
|
|
return False
|
2022-10-29 04:52:00 +02:00
|
|
|
if timeout <= 0:
|
|
|
|
return False
|
|
|
|
timeout -= 1
|
|
|
|
time.sleep(1)
|
|
|
|
return True
|
2022-10-15 09:28:20 +02:00
|
|
|
|
2022-11-14 06:53:22 +01:00
|
|
|
def stop_render_thread(device):
|
|
|
|
try:
|
|
|
|
device_manager.validate_device_id(device, log_prefix='stop_render_thread')
|
|
|
|
except:
|
2022-12-09 17:00:18 +01:00
|
|
|
log.error(traceback.format_exc())
|
2022-11-14 06:53:22 +01:00
|
|
|
return False
|
|
|
|
|
|
|
|
if not manager_lock.acquire(blocking=True, timeout=LOCK_TIMEOUT): raise Exception('stop_render_thread' + ERR_LOCK_FAILED)
|
2022-12-09 17:00:18 +01:00
|
|
|
log.info(f'Stopping Rendering Thread on device: {device}')
|
2022-11-14 06:53:22 +01:00
|
|
|
|
|
|
|
try:
|
|
|
|
thread_to_remove = None
|
|
|
|
for rthread in render_threads:
|
|
|
|
weak_data = weak_thread_data.get(rthread)
|
|
|
|
if weak_data is None or not 'device' in weak_data or weak_data['device'] is None:
|
|
|
|
continue
|
|
|
|
thread_device = weak_data['device']
|
|
|
|
if thread_device == device:
|
|
|
|
weak_data['alive'] = False
|
|
|
|
thread_to_remove = rthread
|
|
|
|
break
|
|
|
|
if thread_to_remove is not None:
|
|
|
|
render_threads.remove(rthread)
|
|
|
|
return True
|
|
|
|
finally:
|
|
|
|
manager_lock.release()
|
|
|
|
|
|
|
|
return False
|
|
|
|
|
|
|
|
def update_render_threads(render_devices, active_devices):
|
|
|
|
devices_to_start, devices_to_stop = device_manager.get_device_delta(render_devices, active_devices)
|
2022-12-09 17:00:18 +01:00
|
|
|
log.debug(f'devices_to_start: {devices_to_start}')
|
|
|
|
log.debug(f'devices_to_stop: {devices_to_stop}')
|
2022-11-14 06:53:22 +01:00
|
|
|
|
|
|
|
for device in devices_to_stop:
|
|
|
|
if is_alive(device) <= 0:
|
2022-12-09 17:00:18 +01:00
|
|
|
log.debug(f'{device} is not alive')
|
2022-11-14 06:53:22 +01:00
|
|
|
continue
|
|
|
|
if not stop_render_thread(device):
|
2022-12-09 17:00:18 +01:00
|
|
|
log.warn(f'{device} could not stop render thread')
|
2022-11-14 06:53:22 +01:00
|
|
|
|
|
|
|
for device in devices_to_start:
|
|
|
|
if is_alive(device) >= 1:
|
2022-12-09 17:00:18 +01:00
|
|
|
log.debug(f'{device} already registered.')
|
2022-11-14 06:53:22 +01:00
|
|
|
continue
|
|
|
|
if not start_render_thread(device):
|
2022-12-09 17:00:18 +01:00
|
|
|
log.warn(f'{device} failed to start.')
|
2022-11-14 06:53:22 +01:00
|
|
|
|
|
|
|
if is_alive() <= 0: # No running devices, probably invalid user config.
|
|
|
|
raise EnvironmentError('ERROR: No active render devices! Please verify the "render_devices" value in config.json')
|
|
|
|
|
2022-12-09 17:00:18 +01:00
|
|
|
log.debug(f"active devices: {get_devices()['active']}")
|
2022-11-14 06:53:22 +01:00
|
|
|
|
2022-10-15 09:28:20 +02:00
|
|
|
def shutdown_event(): # Signal render thread to close on shutdown
|
|
|
|
global current_state_error
|
|
|
|
current_state_error = SystemExit('Application shutting down.')
|
|
|
|
|
2022-12-11 13:46:29 +01:00
|
|
|
def render(render_req: GenerateImageRequest, task_data: TaskData):
|
2022-12-08 06:42:46 +01:00
|
|
|
current_thread_count = is_alive()
|
|
|
|
if current_thread_count <= 0: # Render thread is dead
|
2022-10-15 09:28:20 +02:00
|
|
|
raise ChildProcessError('Rendering thread has died.')
|
2022-12-08 06:42:46 +01:00
|
|
|
|
2022-10-15 09:28:20 +02:00
|
|
|
# Alive, check if task in cache
|
2022-12-11 13:46:29 +01:00
|
|
|
session = get_cached_session(task_data.session_id, update_ttl=True)
|
2022-12-08 06:42:46 +01:00
|
|
|
pending_tasks = list(filter(lambda t: t.is_pending, session.tasks))
|
|
|
|
if current_thread_count < len(pending_tasks):
|
2022-12-11 13:46:29 +01:00
|
|
|
raise ConnectionRefusedError(f'Session {task_data.session_id} already has {len(pending_tasks)} pending tasks out of {current_thread_count}.')
|
|
|
|
|
|
|
|
new_task = RenderTask(render_req, task_data)
|
2022-12-08 06:42:46 +01:00
|
|
|
if session.put(new_task, TASK_TTL):
|
2022-10-17 03:41:39 +02:00
|
|
|
# Use twice the normal timeout for adding user requests.
|
2022-12-08 06:42:46 +01:00
|
|
|
# Tries to force session.put to fail before tasks_queue.put would.
|
2022-10-17 03:41:39 +02:00
|
|
|
if manager_lock.acquire(blocking=True, timeout=LOCK_TIMEOUT * 2):
|
|
|
|
try:
|
|
|
|
tasks_queue.append(new_task)
|
2022-12-08 06:42:46 +01:00
|
|
|
idle_event.set()
|
2022-10-17 03:41:39 +02:00
|
|
|
return new_task
|
|
|
|
finally:
|
|
|
|
manager_lock.release()
|
2022-10-15 10:08:17 +02:00
|
|
|
raise RuntimeError('Failed to add task to cache.')
|