formatting with black so pre-commit does not fail

This commit is contained in:
Italo Maia 2025-02-18 18:30:46 +01:00
parent 888be2383e
commit a5c80a6cc7

View File

@ -7,7 +7,8 @@
# https://docs.docker.com/compose/django/
# https://docs.docker.com/compose/wordpress/
# TODO: podman pod logs --color -n -f pod_testlogs
from __future__ import annotations # If you see an error here, use Python 3.7 or greater
# If you see an error here, use Python 3.7 or greater
from __future__ import annotations
import argparse
import asyncio.exceptions
@ -160,8 +161,10 @@ def parse_short_mount(mount_str, basedir):
# User-relative path
# - ~/configs:/etc/configs/:ro
mount_type = "bind"
if os.name != 'nt' or (os.name == 'nt' and ".sock" not in mount_src):
mount_src = os.path.abspath(os.path.join(basedir, os.path.expanduser(mount_src)))
if os.name != "nt" or (os.name == "nt" and ".sock" not in mount_src):
mount_src = os.path.abspath(
os.path.join(basedir, os.path.expanduser(mount_src))
)
else:
# Named volume
# - datavolume:/var/lib/mysql
@ -214,11 +217,13 @@ def fix_mount_dict(compose, mount_dict, srv_name):
# handle anonymous or implied volume
if not source:
# missing source
vol["name"] = "_".join([
compose.project_name,
srv_name,
hashlib.sha256(mount_dict["target"].encode("utf-8")).hexdigest(),
])
vol["name"] = "_".join(
[
compose.project_name,
srv_name,
hashlib.sha256(mount_dict["target"].encode("utf-8")).hexdigest(),
]
)
elif not name:
external = vol.get("external")
if isinstance(external, dict):
@ -265,10 +270,12 @@ def rec_subs(value, subs_dict):
do bash-like substitution in value and if list of dictionary do that recursively
"""
if isinstance(value, dict):
if 'environment' in value and isinstance(value['environment'], dict):
if "environment" in value and isinstance(value["environment"], dict):
# Load service's environment variables
subs_dict = subs_dict.copy()
svc_envs = {k: v for k, v in value['environment'].items() if k not in subs_dict}
svc_envs = {
k: v for k, v in value["environment"].items() if k not in subs_dict
}
# we need to add `svc_envs` to the `subs_dict` so that it can evaluate the
# service environment that reference to another service environment.
svc_envs = rec_subs(svc_envs, subs_dict)
@ -389,7 +396,9 @@ async def assert_volume(compose, mount_dict):
if mount_dict["type"] == "bind":
basedir = os.path.realpath(compose.dirname)
mount_src = mount_dict["source"]
mount_src = os.path.realpath(os.path.join(basedir, os.path.expanduser(mount_src)))
mount_src = os.path.realpath(
os.path.join(basedir, os.path.expanduser(mount_src))
)
if not os.path.exists(mount_src):
try:
os.makedirs(mount_src, exist_ok=True)
@ -405,7 +414,9 @@ async def assert_volume(compose, mount_dict):
# podman volume list --format '{{.Name}}\t{{.MountPoint}}' \
# -f 'label=io.podman.compose.project=HERE'
try:
_ = (await compose.podman.output([], "volume", ["inspect", vol_name])).decode("utf-8")
_ = (await compose.podman.output([], "volume", ["inspect", vol_name])).decode(
"utf-8"
)
except subprocess.CalledProcessError as e:
if is_ext:
raise RuntimeError(f"External volume [{vol_name}] does not exists") from e
@ -427,10 +438,14 @@ async def assert_volume(compose, mount_dict):
args.extend(["--opt", f"{opt}={value}"])
args.append(vol_name)
await compose.podman.output([], "volume", args)
_ = (await compose.podman.output([], "volume", ["inspect", vol_name])).decode("utf-8")
_ = (await compose.podman.output([], "volume", ["inspect", vol_name])).decode(
"utf-8"
)
def mount_desc_to_mount_args(compose, mount_desc, srv_name, cnt_name): # pylint: disable=unused-argument
def mount_desc_to_mount_args(
compose, mount_desc, srv_name, cnt_name
): # pylint: disable=unused-argument
mount_type = mount_desc.get("type")
vol = mount_desc.get("_vol") if mount_type == "volume" else None
source = vol["name"] if vol else mount_desc.get("source")
@ -493,7 +508,9 @@ def container_to_ulimit_build_args(cnt, podman_args):
ulimit_to_ulimit_args(build.get("ulimits", []), podman_args)
def mount_desc_to_volume_args(compose, mount_desc, srv_name, cnt_name): # pylint: disable=unused-argument
def mount_desc_to_volume_args(
compose, mount_desc, srv_name, cnt_name
): # pylint: disable=unused-argument
mount_type = mount_desc["type"]
if mount_type not in ("bind", "volume"):
raise ValueError("unknown mount type:" + mount_type)
@ -504,9 +521,13 @@ def mount_desc_to_volume_args(compose, mount_desc, srv_name, cnt_name): # pylin
target = mount_desc["target"]
opts = []
propagations = set(filteri(mount_desc.get(mount_type, {}).get("propagation", "").split(",")))
propagations = set(
filteri(mount_desc.get(mount_type, {}).get("propagation", "").split(","))
)
if mount_type != "bind":
propagations.update(filteri(mount_desc.get("bind", {}).get("propagation", "").split(",")))
propagations.update(
filteri(mount_desc.get("bind", {}).get("propagation", "").split(","))
)
opts.extend(propagations)
# --volume, -v[=[[SOURCE-VOLUME|HOST-DIR:]CONTAINER-DIR[:OPTIONS]]]
# [rw|ro]
@ -574,7 +595,9 @@ def get_secret_args(compose, cnt, secret, podman_is_building=False):
"""
secret_name = secret if isinstance(secret, str) else secret.get("source")
if not secret_name or secret_name not in compose.declared_secrets.keys():
raise ValueError(f'ERROR: undeclared secret: "{secret}", service: {cnt["_service"]}')
raise ValueError(
f'ERROR: undeclared secret: "{secret}", service: {cnt["_service"]}'
)
declared_secret = compose.declared_secrets[secret_name]
source_file = declared_secret.get("file")
@ -596,7 +619,9 @@ def get_secret_args(compose, cnt, secret, podman_is_building=False):
if source_file:
# assemble path for source file first, because we need it for all cases
basedir = compose.dirname
source_file = os.path.realpath(os.path.join(basedir, os.path.expanduser(source_file)))
source_file = os.path.realpath(
os.path.join(basedir, os.path.expanduser(source_file))
)
if podman_is_building:
# pass file secrets to "podman build" with param --secret
@ -642,7 +667,9 @@ def get_secret_args(compose, cnt, secret, podman_is_building=False):
secret_opts += f",gid={secret_gid}" if secret_gid else ""
secret_opts += f",mode={secret_mode}" if secret_mode else ""
secret_opts += f",type={secret_type}" if secret_type else ""
secret_opts += f",target={secret_target}" if secret_target and secret_type == "env" else ""
secret_opts += (
f",target={secret_target}" if secret_target and secret_type == "env" else ""
)
# The target option is only valid for type=env,
# which in an ideal world would work
# for type=mount as well.
@ -655,9 +682,9 @@ def get_secret_args(compose, cnt, secret, podman_is_building=False):
)
if ext_name and ext_name != secret_name:
raise ValueError(err_str.format(secret_name, ext_name))
if secret_target and secret_target != secret_name and secret_type != 'env':
if secret_target and secret_target != secret_name and secret_type != "env":
raise ValueError(err_str.format(secret_target, secret_name))
if secret_target and secret_type != 'env':
if secret_target and secret_type != "env":
log.warning(
'WARNING: Service "%s" uses target: "%s" for secret: "%s".'
+ " That is un-supported and a no-op and is ignored.",
@ -668,7 +695,9 @@ def get_secret_args(compose, cnt, secret, podman_is_building=False):
return ["--secret", "{}{}".format(secret_name, secret_opts)]
raise ValueError(
'ERROR: unparsable secret: "{}", service: "{}"'.format(secret_name, cnt["_service"])
'ERROR: unparsable secret: "{}", service: "{}"'.format(
secret_name, cnt["_service"]
)
)
@ -702,26 +731,32 @@ def container_to_gpu_res_args(cnt, podman_args):
device_ids = device.get("device_ids", "all")
if device_ids != "all" and len(device_ids) > 0:
for device_id in device_ids:
podman_args.extend((
"--device",
f"nvidia.com/gpu={device_id}",
))
podman_args.extend(
(
"--device",
f"nvidia.com/gpu={device_id}",
)
)
gpu_on = True
continue
if count != "all":
for device_id in range(count):
podman_args.extend((
"--device",
f"nvidia.com/gpu={device_id}",
))
podman_args.extend(
(
"--device",
f"nvidia.com/gpu={device_id}",
)
)
gpu_on = True
continue
podman_args.extend((
"--device",
"nvidia.com/gpu=all",
))
podman_args.extend(
(
"--device",
"nvidia.com/gpu=all",
)
)
gpu_on = True
if gpu_on:
@ -749,27 +784,35 @@ def container_to_cpu_res_args(cnt, podman_args):
# add args
cpus = cpus_limit_v3 or cpus_limit_v2
if cpus:
podman_args.extend((
"--cpus",
str(cpus),
))
podman_args.extend(
(
"--cpus",
str(cpus),
)
)
if cpu_shares_v2:
podman_args.extend((
"--cpu-shares",
str(cpu_shares_v2),
))
podman_args.extend(
(
"--cpu-shares",
str(cpu_shares_v2),
)
)
mem = mem_limit_v3 or mem_limit_v2
if mem:
podman_args.extend((
"-m",
str(mem).lower(),
))
podman_args.extend(
(
"-m",
str(mem).lower(),
)
)
mem_res = mem_res_v3 or mem_res_v2
if mem_res:
podman_args.extend((
"--memory-reservation",
str(mem_res).lower(),
))
podman_args.extend(
(
"--memory-reservation",
str(mem_res).lower(),
)
)
def port_dict_to_str(port_desc):
@ -836,10 +879,12 @@ def get_network_create_args(net_desc, proj_name, net_name):
if net_desc.get("x-podman.disable_dns"):
args.append("--disable-dns")
if net_desc.get("x-podman.dns"):
args.extend((
"--dns",
",".join(norm_as_list(net_desc.get("x-podman.dns"))),
))
args.extend(
(
"--dns",
",".join(norm_as_list(net_desc.get("x-podman.dns"))),
)
)
if isinstance(ipam_config_ls, dict):
ipam_config_ls = [ipam_config_ls]
@ -880,7 +925,9 @@ async def assert_cnt_nets(compose, cnt):
await compose.podman.output([], "network", ["exists", net_name])
except subprocess.CalledProcessError as e:
if is_ext:
raise RuntimeError(f"External network [{net_name}] does not exists") from e
raise RuntimeError(
f"External network [{net_name}] does not exists"
) from e
args = get_network_create_args(net_desc, compose.project_name, net_name)
await compose.podman.output([], "network", args)
await compose.podman.output([], "network", ["exists", net_name])
@ -968,13 +1015,17 @@ def get_net_args_from_networks(compose, cnt):
if is_list(multiple_nets):
multiple_nets = {net: {} for net in multiple_nets}
else:
multiple_nets = {net: net_config or {} for net, net_config in multiple_nets.items()}
multiple_nets = {
net: net_config or {} for net, net_config in multiple_nets.items()
}
# if a mac_address was specified on the container level, we need to check that it is not
# specified on the network level as well
if mac_address is not None:
for net_config in multiple_nets.values():
network_mac = net_config.get("mac_address", net_config.get("x-podman.mac_address"))
network_mac = net_config.get(
"mac_address", net_config.get("x-podman.mac_address")
)
if network_mac is not None:
raise RuntimeError(
f"conflicting mac addresses {mac_address} and {network_mac}:"
@ -1198,10 +1249,12 @@ async def container_to_args(compose, cnt, detached=True, no_deps=False):
# If it's a string, it's equivalent to specifying CMD-SHELL
if isinstance(healthcheck_test, str):
# podman does not add shell to handle command with whitespace
podman_args.extend([
"--healthcheck-command",
"/bin/sh -c " + cmd_quote(healthcheck_test),
])
podman_args.extend(
[
"--healthcheck-command",
"/bin/sh -c " + cmd_quote(healthcheck_test),
]
)
elif is_list(healthcheck_test):
healthcheck_test = healthcheck_test.copy()
# If it's a list, first item is either NONE, CMD or CMD-SHELL.
@ -1237,24 +1290,26 @@ async def container_to_args(compose, cnt, detached=True, no_deps=False):
podman_args.extend(["--healthcheck-retries", str(healthcheck["retries"])])
# handle podman extension
if 'x-podman' in cnt:
if "x-podman" in cnt:
raise ValueError(
'Configuration under x-podman has been migrated to x-podman.uidmaps and '
'x-podman.gidmaps fields'
"Configuration under x-podman has been migrated to x-podman.uidmaps and "
"x-podman.gidmaps fields"
)
rootfs_mode = False
for uidmap in cnt.get('x-podman.uidmaps', []):
for uidmap in cnt.get("x-podman.uidmaps", []):
podman_args.extend(["--uidmap", uidmap])
for gidmap in cnt.get('x-podman.gidmaps', []):
for gidmap in cnt.get("x-podman.gidmaps", []):
podman_args.extend(["--gidmap", gidmap])
if cnt.get("x-podman.no_hosts", False):
podman_args.extend(["--no-hosts"])
rootfs = cnt.get('x-podman.rootfs')
rootfs = cnt.get("x-podman.rootfs")
if rootfs is not None:
rootfs_mode = True
podman_args.extend(["--rootfs", rootfs])
log.warning("WARNING: x-podman.rootfs and image both specified, image field ignored")
log.warning(
"WARNING: x-podman.rootfs and image both specified, image field ignored"
)
if not rootfs_mode:
podman_args.append(cnt["image"]) # command, ..etc.
@ -1296,7 +1351,10 @@ class ServiceDependencyCondition(Enum):
try:
return docker_to_podman_cond[value]
except KeyError:
raise ValueError(f"Value '{value}' is not a valid condition for a service dependency") # pylint: disable=raise-missing-from
# pylint: disable-next=raise-missing-from
raise ValueError(
f"Value '{value}' is not a valid condition for a service dependency"
)
class ServiceDependency:
@ -1314,7 +1372,7 @@ class ServiceDependency:
def __hash__(self):
# Compute hash based on the frozenset of items to ensure order does not matter
return hash(('name', self._name) + ('condition', self._condition))
return hash(("name", self._name) + ("condition", self._condition))
def __eq__(self, other):
# Compare equality based on dictionary content
@ -1370,7 +1428,9 @@ def flat_deps(services, with_extends=False):
links_ls = srv.get("links", [])
if not is_list(links_ls):
links_ls = [links_ls]
deps.update([ServiceDependency(c.split(":")[0], "service_started") for c in links_ls])
deps.update(
[ServiceDependency(c.split(":")[0], "service_started") for c in links_ls]
)
for c in links_ls:
if ":" in c:
dep_name, dep_alias = c.split(":")
@ -1432,7 +1492,9 @@ class Podman:
if p.returncode == 0:
return stdout_data
raise subprocess.CalledProcessError(p.returncode, " ".join(cmd_ls), stderr_data)
raise subprocess.CalledProcessError(
p.returncode, " ".join(cmd_ls), stderr_data
)
async def _readchunk(self, reader):
try:
@ -1527,7 +1589,9 @@ class Podman:
err_t.add_done_callback(task_reference.discard)
else:
p = await asyncio.create_subprocess_exec(*cmd_ls, close_fds=False) # pylint: disable=consider-using-with
p = await asyncio.create_subprocess_exec(
*cmd_ls, close_fds=False
) # pylint: disable=consider-using-with
try:
exit_code = await p.wait()
@ -1638,7 +1702,7 @@ def normalize_service(service, sub_dir=""):
# the dependency service_started is set by default
# unless requested otherwise.
for k, v in deps.items():
v.setdefault('condition', 'service_started')
v.setdefault("condition", "service_started")
service["depends_on"] = deps
return service
@ -1697,13 +1761,17 @@ def rec_merge_one(target, source):
if not isinstance(value2, type(value)):
value_type = type(value)
value2_type = type(value2)
raise ValueError(f"can't merge value of [{key}] of type {value_type} and {value2_type}")
raise ValueError(
f"can't merge value of [{key}] of type {value_type} and {value2_type}"
)
if is_list(value2):
if key == "volumes":
# clean duplicate mount targets
pts = {v.split(":", 2)[1] for v in value2 if ":" in v}
del_ls = [
ix for (ix, v) in enumerate(value) if ":" in v and v.split(":", 2)[1] in pts
ix
for (ix, v) in enumerate(value)
if ":" in v and v.split(":", 2)[1] in pts
]
for ix in reversed(del_ls):
del value[ix]
@ -1846,14 +1914,16 @@ class PodmanCompose:
if args.dry_run is False:
log.fatal("Binary %s has not been found.", podman_path)
sys.exit(1)
self.podman = Podman(self, podman_path, args.dry_run, asyncio.Semaphore(args.parallel))
self.podman = Podman(
self, podman_path, args.dry_run, asyncio.Semaphore(args.parallel)
)
if not args.dry_run:
# just to make sure podman is running
try:
self.podman_version = (await self.podman.output(["--version"], "", [])).decode(
"utf-8"
).strip() or ""
self.podman_version = (
await self.podman.output(["--version"], "", [])
).decode("utf-8").strip() or ""
self.podman_version = (self.podman_version.split() or [""])[-1]
except subprocess.CalledProcessError:
self.podman_version = None
@ -1908,7 +1978,7 @@ class PodmanCompose:
"pass files with -f"
)
sys.exit(-1)
ex = map(lambda x: x == '-' or os.path.exists(x), files)
ex = map(lambda x: x == "-" or os.path.exists(x), files)
missing = [fn0 for ex0, fn0 in zip(ex, files) if not ex0]
if missing:
log.fatal("missing files: %s", missing)
@ -1937,20 +2007,26 @@ class PodmanCompose:
dotenv_path = os.path.realpath(args.env_file)
dotenv_dict.update(dotenv_to_dict(dotenv_path))
os.environ.update({
key: value for key, value in dotenv_dict.items() if key.startswith("PODMAN_")
})
os.environ.update(
{
key: value
for key, value in dotenv_dict.items()
if key.startswith("PODMAN_")
}
)
self.environ = dotenv_dict
self.environ.update(dict(os.environ))
# see: https://docs.docker.com/compose/reference/envvars/
# see: https://docs.docker.com/compose/env-file/
self.environ.update({
"COMPOSE_PROJECT_DIR": dirname,
"COMPOSE_FILE": pathsep.join(relative_files),
"COMPOSE_PATH_SEPARATOR": pathsep,
})
self.environ.update(
{
"COMPOSE_PROJECT_DIR": dirname,
"COMPOSE_FILE": pathsep.join(relative_files),
"COMPOSE_PATH_SEPARATOR": pathsep,
}
)
if args and 'env' in args and args.env:
if args and "env" in args and args.env:
env_vars = norm_as_dict(args.env)
self.environ.update(env_vars)
@ -1964,7 +2040,7 @@ class PodmanCompose:
except StopIteration:
break
if filename.strip().split('/')[-1] == '-':
if filename.strip().split("/")[-1] == "-":
content = yaml.safe_load(sys.stdin)
else:
with open(filename, "r", encoding="utf-8") as f:
@ -1978,10 +2054,12 @@ class PodmanCompose:
content = normalize(content)
# log(filename, json.dumps(content, indent = 2))
content = rec_subs(content, self.environ)
if isinstance(services := content.get('services'), dict):
if isinstance(services := content.get("services"), dict):
for service in services.values():
if 'extends' in service and (service_file := service['extends'].get('file')):
service['extends']['file'] = os.path.join(
if "extends" in service and (
service_file := service["extends"].get("file")
):
service["extends"]["file"] = os.path.join(
os.path.dirname(filename), service_file
)
@ -1989,14 +2067,18 @@ class PodmanCompose:
# If `include` is used, append included files to files
include = compose.get("include")
if include:
files.extend([os.path.join(os.path.dirname(filename), i) for i in include])
files.extend(
[os.path.join(os.path.dirname(filename), i) for i in include]
)
# As compose obj is updated and tested with every loop, not deleting `include`
# from it, results in it being tested again and again, original values for
# `include` be appended to `files`, and, included files be processed for ever.
# Solution is to remove 'include' key from compose obj. This doesn't break
# having `include` present and correctly processed in included files
del compose["include"]
resolved_services = self._resolve_profiles(compose.get("services", {}), set(args.profile))
resolved_services = self._resolve_profiles(
compose.get("services", {}), set(args.profile)
)
compose["services"] = resolved_services
if not getattr(args, "no_normalize", None):
compose = normalize_final(compose, self.dirname)
@ -2014,10 +2096,14 @@ class PodmanCompose:
if project_name is None:
# More strict then actually needed for simplicity:
# podman requires [a-zA-Z0-9][a-zA-Z0-9_.-]*
project_name = self.environ.get("COMPOSE_PROJECT_NAME", dir_basename.lower())
project_name = self.environ.get(
"COMPOSE_PROJECT_NAME", dir_basename.lower()
)
project_name = norm_re.sub("", project_name)
if not project_name:
raise RuntimeError(f"Project name [{dir_basename}] normalized to empty")
raise RuntimeError(
f"Project name [{dir_basename}] normalized to empty"
)
self.project_name = project_name
self.environ.update({"COMPOSE_PROJECT_NAME": self.project_name})
@ -2031,11 +2117,15 @@ class PodmanCompose:
# NOTE: maybe add "extends.service" to _deps at this stage
flat_deps(services, with_extends=True)
service_names = sorted([(len(srv["_deps"]), name) for name, srv in services.items()])
service_names = sorted(
[(len(srv["_deps"]), name) for name, srv in services.items()]
)
service_names = [name for _, name in service_names]
resolve_extends(services, service_names, self.environ)
flat_deps(services)
service_names = sorted([(len(srv["_deps"]), name) for name, srv in services.items()])
service_names = sorted(
[(len(srv["_deps"]), name) for name, srv in services.items()]
)
service_names = [name for _, name in service_names]
nets = compose.get("networks", {})
if not nets:
@ -2061,7 +2151,9 @@ class PodmanCompose:
for name, srv in services.items():
srv_nets = srv.get("networks", self.default_net)
srv_nets = (
list(srv_nets.keys()) if isinstance(srv_nets, dict) else norm_as_list(srv_nets)
list(srv_nets.keys())
if isinstance(srv_nets, dict)
else norm_as_list(srv_nets)
)
allnets.update(srv_nets)
given_nets = set(nets.keys())
@ -2092,7 +2184,9 @@ class PodmanCompose:
container_names_by_service = {}
self.services = services
for service_name, service_desc in services.items():
replicas = try_int(service_desc.get("deploy", {}).get("replicas"), fallback=1)
replicas = try_int(
service_desc.get("deploy", {}).get("replicas"), fallback=1
)
container_names_by_service[service_name] = []
for num in range(1, replicas + 1):
@ -2110,16 +2204,20 @@ class PodmanCompose:
**service_desc,
}
x_podman = service_desc.get("x-podman")
rootfs_mode = x_podman is not None and x_podman.get("rootfs") is not None
rootfs_mode = (
x_podman is not None and x_podman.get("rootfs") is not None
)
if "image" not in cnt and not rootfs_mode:
cnt["image"] = f"{project_name}_{service_name}"
labels = norm_as_list(cnt.get("labels"))
cnt["ports"] = norm_ports(cnt.get("ports"))
labels.extend(podman_compose_labels)
labels.extend([
f"com.docker.compose.container-number={num}",
"com.docker.compose.service=" + service_name,
])
labels.extend(
[
f"com.docker.compose.container-number={num}",
"com.docker.compose.service=" + service_name,
]
)
cnt["labels"] = labels
cnt["_service"] = service_name
cnt["_project"] = project_name
@ -2133,7 +2231,9 @@ class PodmanCompose:
and mnt_dict["source"] not in self.vols
):
vol_name = mnt_dict["source"]
raise RuntimeError(f"volume [{vol_name}] not defined in top level")
raise RuntimeError(
f"volume [{vol_name}] not defined in top level"
)
self.container_names_by_service = container_names_by_service
self.all_services = set(container_names_by_service.keys())
container_by_name = {c["name"]: c for c in given_containers}
@ -2169,7 +2269,9 @@ class PodmanCompose:
for name, config in defined_services.items():
service_profiles = set(config.get("profiles", []))
if not service_profiles or requested_profiles.intersection(service_profiles):
if not service_profiles or requested_profiles.intersection(
service_profiles
):
services[name] = config
return services
@ -2179,26 +2281,35 @@ class PodmanCompose:
subparsers = parser.add_subparsers(title="command", dest="command")
subparser = subparsers.add_parser("help", help="show help")
for cmd_name, cmd in self.commands.items():
subparser = subparsers.add_parser(cmd_name, help=cmd.desc) # pylint: disable=protected-access
subparser = subparsers.add_parser(
cmd_name, help=cmd.desc
) # pylint: disable=protected-access
for cmd_parser in cmd._parse_args: # pylint: disable=protected-access
cmd_parser(subparser)
self.global_args = parser.parse_args(argv)
if self.global_args.in_pod is not None and self.global_args.in_pod.lower() not in (
'',
'true',
'1',
'false',
'0',
if (
self.global_args.in_pod is not None
and self.global_args.in_pod.lower()
not in (
"",
"true",
"1",
"false",
"0",
)
):
raise ValueError(
f'Invalid --in-pod value: \'{self.global_args.in_pod}\'. '
'It must be set to either of: empty value, true, 1, false, 0'
f"Invalid --in-pod value: '{self.global_args.in_pod}'. "
"It must be set to either of: empty value, true, 1, false, 0"
)
if self.global_args.in_pod == '' or self.global_args.in_pod is None:
if self.global_args.in_pod == "" or self.global_args.in_pod is None:
self.global_args.in_pod_bool = None
else:
self.global_args.in_pod_bool = self.global_args.in_pod.lower() in ('true', '1')
self.global_args.in_pod_bool = self.global_args.in_pod.lower() in (
"true",
"1",
)
if self.global_args.version:
self.global_args.command = "version"
@ -2292,7 +2403,9 @@ class PodmanCompose:
action="store_true",
)
parser.add_argument(
"--parallel", type=int, default=os.environ.get("COMPOSE_PARALLEL_LIMIT", sys.maxsize)
"--parallel",
type=int,
default=os.environ.get("COMPOSE_PARALLEL_LIMIT", sys.maxsize),
)
parser.add_argument(
"--verbose",
@ -2500,7 +2613,9 @@ def container_to_build_args(compose, cnt, args, path_exists, cleanup_callbacks=N
dockerfile_inline = str(dockerfile_inline)
# Error if both `dockerfile_inline` and `dockerfile` are set
if dockerfile and dockerfile_inline:
raise OSError("dockerfile_inline and dockerfile can't be used simultaneously")
raise OSError(
"dockerfile_inline and dockerfile can't be used simultaneously"
)
dockerfile = tempfile.NamedTemporaryFile(delete=False, suffix=".containerfile")
dockerfile.write(dockerfile_inline.encode())
dockerfile.close()
@ -2534,7 +2649,9 @@ def container_to_build_args(compose, cnt, args, path_exists, cleanup_callbacks=N
if "platform" in cnt:
build_args.extend(["--platform", cnt["platform"]])
for secret in build_desc.get("secrets", []):
build_args.extend(get_secret_args(compose, cnt, secret, podman_is_building=True))
build_args.extend(
get_secret_args(compose, cnt, secret, podman_is_building=True)
)
for tag in build_desc.get("tags", []):
build_args.extend(["-t", tag])
labels = build_desc.get("labels", [])
@ -2557,10 +2674,12 @@ def container_to_build_args(compose, cnt, args, path_exists, cleanup_callbacks=N
build_args.append("--pull")
args_list = norm_as_list(build_desc.get("args", {}))
for build_arg in args_list + args.build_arg:
build_args.extend((
"--build-arg",
build_arg,
))
build_args.extend(
(
"--build-arg",
build_arg,
)
)
for cache_img in build_desc.get("cache_from", []):
build_args.extend(["--cache-from", cache_img])
for cache_img in build_desc.get("cache_to", []):
@ -2673,12 +2792,14 @@ async def check_dep_conditions(compose: PodmanCompose, deps: set) -> None:
log.debug(
"dependencies for condition %s have been fulfilled on containers %s",
condition.value,
', '.join(deps_cd),
", ".join(deps_cd),
)
break
except subprocess.CalledProcessError as _exc:
output = list(
((_exc.stdout or b"") + (_exc.stderr or b"")).decode().split('\n')
((_exc.stdout or b"") + (_exc.stderr or b""))
.decode()
.split("\n")
)
log.debug(
'Podman wait returned an error (%d) when executing "%s": %s',
@ -2690,7 +2811,11 @@ async def check_dep_conditions(compose: PodmanCompose, deps: set) -> None:
async def run_container(
compose: PodmanCompose, name: str, deps: set, command: tuple, log_formatter: str = None
compose: PodmanCompose,
name: str,
deps: set,
command: tuple,
log_formatter: str = None,
):
"""runs a container after waiting for its dependencies to be fulfilled"""
@ -2707,10 +2832,12 @@ async def run_container(
def deps_from_container(args, cnt):
if args.no_deps:
return set()
return cnt['_deps']
return cnt["_deps"]
@cmd_run(podman_compose, "up", "Create and start the entire stack or some of its services")
@cmd_run(
podman_compose, "up", "Create and start the entire stack or some of its services"
)
async def compose_up(compose: PodmanCompose, args):
excluded = get_excluded(compose, args)
if not args.no_build:
@ -2757,7 +2884,10 @@ async def compose_up(compose: PodmanCompose, args):
subproc = await compose.podman.run([], podman_command, podman_args)
if podman_command == "run" and subproc is not None:
await run_container(
compose, cnt["name"], deps_from_container(args, cnt), ([], "start", [cnt["name"]])
compose,
cnt["name"],
deps_from_container(args, cnt),
([], "start", [cnt["name"]]),
)
if args.no_start or args.detach or args.dry_run:
return
@ -2771,12 +2901,16 @@ async def compose_up(compose: PodmanCompose, args):
max_service_length = 0
for cnt in compose.containers:
curr_length = len(cnt["_service"])
max_service_length = curr_length if curr_length > max_service_length else max_service_length
max_service_length = (
curr_length if curr_length > max_service_length else max_service_length
)
tasks = set()
loop = asyncio.get_event_loop()
loop.add_signal_handler(signal.SIGINT, lambda: [t.cancel("User exit") for t in tasks])
loop.add_signal_handler(
signal.SIGINT, lambda: [t.cancel("User exit") for t in tasks]
)
for i, cnt in enumerate(compose.containers):
# Add colored service prefix to output by piping output through sed
@ -2868,7 +3002,8 @@ async def compose_down(compose: PodmanCompose, args):
podman_stop_args.extend(["-t", str(timeout)])
down_tasks.append(
asyncio.create_task(
compose.podman.run([], "stop", [*podman_stop_args, cnt["name"]]), name=cnt["name"]
compose.podman.run([], "stop", [*podman_stop_args, cnt["name"]]),
name=cnt["name"],
)
)
await asyncio.gather(*down_tasks)
@ -2920,7 +3055,11 @@ async def compose_down(compose: PodmanCompose, args):
@cmd_run(podman_compose, "ps", "show status of containers")
async def compose_ps(compose, args):
ps_args = ["-a", "--filter", f"label=io.podman.compose.project={compose.project_name}"]
ps_args = [
"-a",
"--filter",
f"label=io.podman.compose.project={compose.project_name}",
]
if args.quiet is True:
ps_args.extend(["--format", "{{.ID}}"])
elif args.format:
@ -2965,7 +3104,10 @@ async def compose_run(compose, args):
await compose.commands["up"](compose, up_args)
build_args = argparse.Namespace(
services=[args.service], if_not_exists=(not args.build), build_arg=[], **args.__dict__
services=[args.service],
if_not_exists=(not args.build),
build_arg=[],
**args.__dict__,
)
await compose.commands["build"](compose, build_args)
@ -2982,7 +3124,9 @@ async def compose_run(compose, args):
def compose_run_update_container_from_args(compose, cnt, args):
# adjust one-off container options
name0 = "{}_{}_tmp{}".format(compose.project_name, args.service, random.randrange(0, 65536))
name0 = "{}_{}_tmp{}".format(
compose.project_name, args.service, random.randrange(0, 65536)
)
cnt["name"] = args.name or name0
if args.entrypoint:
cnt["entrypoint"] = args.entrypoint
@ -2992,7 +3136,9 @@ def compose_run_update_container_from_args(compose, cnt, args):
cnt["working_dir"] = args.workdir
env = dict(cnt.get("environment", {}))
if args.env:
additional_env_vars = dict(map(lambda each: each.split("=", maxsplit=1), args.env))
additional_env_vars = dict(
map(lambda each: each.split("=", maxsplit=1), args.env)
)
env.update(additional_env_vars)
cnt["environment"] = env
if not args.service_ports:
@ -3042,7 +3188,12 @@ def compose_exec_args(cnt, container_name, args):
env = dict(cnt.get("environment", {}))
if args.env:
additional_env_vars = dict(
map(lambda each: each.split("=", maxsplit=1) if "=" in each else (each, None), args.env)
map(
lambda each: (
each.split("=", maxsplit=1) if "=" in each else (each, None)
),
args.env,
)
)
env.update(additional_env_vars)
for name, value in env.items():
@ -3079,7 +3230,9 @@ async def transfer_service_status(compose, args, action):
timeout = str_to_seconds(timeout_str)
if timeout is not None:
podman_args.extend(["-t", str(timeout)])
tasks.append(asyncio.create_task(compose.podman.run([], action, podman_args + [target])))
tasks.append(
asyncio.create_task(compose.podman.run([], action, podman_args + [target]))
)
await asyncio.gather(*tasks)
@ -3188,7 +3341,9 @@ async def compose_unpause(compose, args):
await compose.podman.run([], "unpause", targets)
@cmd_run(podman_compose, "kill", "Kill one or more running containers with a specific signal")
@cmd_run(
podman_compose, "kill", "Kill one or more running containers with a specific signal"
)
async def compose_kill(compose, args):
# to ensure that the user did not execute the command by mistake
if not args.services and not args.all:
@ -3258,7 +3413,9 @@ async def compose_images(compose, args):
if args.quiet is True:
for img in img_containers:
name = img["name"]
output = await compose.podman.output([], "images", ["--quiet", img["image"]])
output = await compose.podman.output(
[], "images", ["--quiet", img["image"]]
)
data.append(output.decode("utf-8").split())
else:
data.append(["CONTAINER", "REPOSITORY", "TAG", "IMAGE ID", "SIZE", ""])
@ -3317,13 +3474,17 @@ def compose_up_parse(parser):
help="Detached mode: Run container in the background, print new container name. \
Incompatible with --abort-on-container-exit.",
)
parser.add_argument("--no-color", action="store_true", help="Produce monochrome output.")
parser.add_argument(
"--no-color", action="store_true", help="Produce monochrome output."
)
parser.add_argument(
"--quiet-pull",
action="store_true",
help="Pull without printing progress information.",
)
parser.add_argument("--no-deps", action="store_true", help="Don't start linked services.")
parser.add_argument(
"--no-deps", action="store_true", help="Don't start linked services."
)
parser.add_argument(
"--force-recreate",
action="store_true",
@ -3422,7 +3583,9 @@ def compose_run_parse(parser):
action="store_true",
help="Detached mode: Run container in the background, print new container name.",
)
parser.add_argument("--name", type=str, default=None, help="Assign a name to the container")
parser.add_argument(
"--name", type=str, default=None, help="Assign a name to the container"
)
parser.add_argument(
"--entrypoint",
type=str,
@ -3446,7 +3609,9 @@ def compose_run_parse(parser):
parser.add_argument(
"-u", "--user", type=str, default=None, help="Run as specified username or uid"
)
parser.add_argument("--no-deps", action="store_true", help="Don't start linked services")
parser.add_argument(
"--no-deps", action="store_true", help="Don't start linked services"
)
parser.add_argument(
"--rm",
action="store_true",
@ -3572,15 +3737,21 @@ def compose_logs_parse(parser):
action="store_true",
help="Output the container name in the log",
)
parser.add_argument("--since", help="Show logs since TIMESTAMP", type=str, default=None)
parser.add_argument("-t", "--timestamps", action="store_true", help="Show timestamps.")
parser.add_argument(
"--since", help="Show logs since TIMESTAMP", type=str, default=None
)
parser.add_argument(
"-t", "--timestamps", action="store_true", help="Show timestamps."
)
parser.add_argument(
"--tail",
help="Number of lines to show from the end of the logs for each container.",
type=str,
default="all",
)
parser.add_argument("--until", help="Show logs until TIMESTAMP", type=str, default=None)
parser.add_argument(
"--until", help="Show logs until TIMESTAMP", type=str, default=None
)
parser.add_argument(
"services", metavar="services", nargs="*", default=None, help="service names"
)
@ -3605,7 +3776,9 @@ def compose_pull_parse(parser):
default=False,
help="Also pull unprefixed images for services which have a build section",
)
parser.add_argument("services", metavar="services", nargs="*", help="services to pull")
parser.add_argument(
"services", metavar="services", nargs="*", help="services to pull"
)
@cmd_parse(podman_compose, "push")
@ -3615,12 +3788,16 @@ def compose_push_parse(parser):
action="store_true",
help="Push what it can and ignores images with push failures. (not implemented)",
)
parser.add_argument("services", metavar="services", nargs="*", help="services to push")
parser.add_argument(
"services", metavar="services", nargs="*", help="services to push"
)
@cmd_parse(podman_compose, "ps")
def compose_ps_parse(parser):
parser.add_argument("-q", "--quiet", help="Only display container IDs", action="store_true")
parser.add_argument(
"-q", "--quiet", help="Only display container IDs", action="store_true"
)
@cmd_parse(podman_compose, ["build", "up"])
@ -3723,7 +3900,9 @@ def compose_kill_parse(parser):
@cmd_parse(podman_compose, "images")
def compose_images_parse(parser):
parser.add_argument("-q", "--quiet", help="Only display images IDs", action="store_true")
parser.add_argument(
"-q", "--quiet", help="Only display images IDs", action="store_true"
)
@cmd_parse(podman_compose, ["stats"])