Merge pull request #870 from p12tic/ruff

Use ruff for formatting
This commit is contained in:
Povilas Kanapickas 2024-03-07 18:28:52 +02:00 committed by GitHub
commit 9e29891aa7
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
12 changed files with 236 additions and 331 deletions

View File

@ -5,37 +5,18 @@ on:
- pull_request
jobs:
lint-black:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Install psf/black requirements
run: |
sudo apt-get update
sudo apt-get install -y python3 python3-venv
- uses: psf/black@stable
with:
options: "--check --verbose"
version: "~= 23.3"
lint-pylint:
lint-ruff:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"]
python-version: ["3.11"]
steps:
- uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
- uses: actions/checkout@v3
name: Set up Python ${{ matrix.python-version }}
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
- name: Analysing the code with ruff
run: |
python -m pip install --upgrade pip
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
pip install pylint
- name: Analysing the code with pylint
run: |
python -m compileall podman_compose.py
pylint podman_compose.py
# pylint $(git ls-files '*.py')
pip install -r test-requirements.txt
ruff format --check

View File

@ -50,11 +50,7 @@ def is_dict(dict_object):
def is_list(list_object):
return (
not is_str(list_object)
and not is_dict(list_object)
and hasattr(list_object, "__iter__")
)
return not is_str(list_object) and not is_dict(list_object) and hasattr(list_object, "__iter__")
# identity filter
@ -170,9 +166,7 @@ def parse_short_mount(mount_str, basedir):
# User-relative path
# - ~/configs:/etc/configs/:ro
mount_type = "bind"
mount_src = os.path.abspath(
os.path.join(basedir, os.path.expanduser(mount_src))
)
mount_src = os.path.abspath(os.path.join(basedir, os.path.expanduser(mount_src)))
else:
# Named volume
# - datavolume:/var/lib/mysql
@ -225,13 +219,11 @@ def fix_mount_dict(compose, mount_dict, proj_name, srv_name):
# handle anonymous or implied volume
if not source:
# missing source
vol["name"] = "_".join(
[
proj_name,
srv_name,
hashlib.sha256(mount_dict["target"].encode("utf-8")).hexdigest(),
]
)
vol["name"] = "_".join([
proj_name,
srv_name,
hashlib.sha256(mount_dict["target"].encode("utf-8")).hexdigest(),
])
elif not name:
external = vol.get("external", None)
if isinstance(external, dict):
@ -382,9 +374,7 @@ async def assert_volume(compose, mount_dict):
if mount_dict["type"] == "bind":
basedir = os.path.realpath(compose.dirname)
mount_src = mount_dict["source"]
mount_src = os.path.realpath(
os.path.join(basedir, os.path.expanduser(mount_src))
)
mount_src = os.path.realpath(os.path.join(basedir, os.path.expanduser(mount_src)))
if not os.path.exists(mount_src):
try:
os.makedirs(mount_src, exist_ok=True)
@ -425,9 +415,7 @@ async def assert_volume(compose, mount_dict):
_ = (await compose.podman.output([], "volume", ["inspect", vol_name])).decode("utf-8")
def mount_desc_to_mount_args(
compose, mount_desc, srv_name, cnt_name
): # pylint: disable=unused-argument
def mount_desc_to_mount_args(compose, mount_desc, srv_name, cnt_name): # pylint: disable=unused-argument
mount_type = mount_desc.get("type", None)
vol = mount_desc.get("_vol", None) if mount_type == "volume" else None
source = vol["name"] if vol else mount_desc.get("source", None)
@ -475,9 +463,7 @@ def container_to_ulimit_args(cnt, podman_args):
podman_args.extend(["--ulimit", i])
def mount_desc_to_volume_args(
compose, mount_desc, srv_name, cnt_name
): # pylint: disable=unused-argument
def mount_desc_to_volume_args(compose, mount_desc, srv_name, cnt_name): # pylint: disable=unused-argument
mount_type = mount_desc["type"]
if mount_type not in ("bind", "volume"):
raise ValueError("unknown mount type:" + mount_type)
@ -488,13 +474,9 @@ def mount_desc_to_volume_args(
target = mount_desc["target"]
opts = []
propagations = set(
filteri(mount_desc.get(mount_type, {}).get("propagation", "").split(","))
)
propagations = set(filteri(mount_desc.get(mount_type, {}).get("propagation", "").split(",")))
if mount_type != "bind":
propagations.update(
filteri(mount_desc.get("bind", {}).get("propagation", "").split(","))
)
propagations.update(filteri(mount_desc.get("bind", {}).get("propagation", "").split(",")))
opts.extend(propagations)
# --volume, -v[=[[SOURCE-VOLUME|HOST-DIR:]CONTAINER-DIR[:OPTIONS]]]
# [rw|ro]
@ -554,9 +536,7 @@ async def get_mount_args(compose, cnt, volume):
def get_secret_args(compose, cnt, secret):
secret_name = secret if is_str(secret) else secret.get("source", None)
if not secret_name or secret_name not in compose.declared_secrets.keys():
raise ValueError(
f'ERROR: undeclared secret: "{secret}", service: {cnt["_service"]}'
)
raise ValueError(f'ERROR: undeclared secret: "{secret}", service: {cnt["_service"]}')
declared_secret = compose.declared_secrets[secret_name]
source_file = declared_secret.get("file", None)
@ -577,9 +557,7 @@ def get_secret_args(compose, cnt, secret):
else:
dest_file = target
basedir = compose.dirname
source_file = os.path.realpath(
os.path.join(basedir, os.path.expanduser(source_file))
)
source_file = os.path.realpath(os.path.join(basedir, os.path.expanduser(source_file)))
volume_ref = ["--volume", f"{source_file}:{dest_file}:ro,rprivate,rbind"]
if uid or gid or mode:
sec = target if target else secret_name
@ -620,9 +598,7 @@ def get_secret_args(compose, cnt, secret):
return ["--secret", "{}{}".format(secret_name, secret_opts)]
raise ValueError(
'ERROR: unparsable secret: "{}", service: "{}"'.format(
secret_name, cnt["_service"]
)
'ERROR: unparsable secret: "{}", service: "{}"'.format(secret_name, cnt["_service"])
)
@ -647,35 +623,27 @@ def container_to_res_args(cnt, podman_args):
# add args
cpus = cpus_limit_v3 or cpus_limit_v2
if cpus:
podman_args.extend(
(
"--cpus",
str(cpus),
)
)
podman_args.extend((
"--cpus",
str(cpus),
))
if cpu_shares_v2:
podman_args.extend(
(
"--cpu-shares",
str(cpu_shares_v2),
)
)
podman_args.extend((
"--cpu-shares",
str(cpu_shares_v2),
))
mem = mem_limit_v3 or mem_limit_v2
if mem:
podman_args.extend(
(
"-m",
str(mem).lower(),
)
)
podman_args.extend((
"-m",
str(mem).lower(),
))
mem_res = mem_res_v3 or mem_res_v2
if mem_res:
podman_args.extend(
(
"--memory-reservation",
str(mem_res).lower(),
)
)
podman_args.extend((
"--memory-reservation",
str(mem_res).lower(),
))
def port_dict_to_str(port_desc):
@ -731,16 +699,12 @@ async def assert_cnt_nets(compose, cnt):
is_ext = net_desc.get("external", None)
ext_desc = is_ext if is_dict(is_ext) else {}
default_net_name = net if is_ext else f"{proj_name}_{net}"
net_name = (
ext_desc.get("name", None) or net_desc.get("name", None) or default_net_name
)
net_name = ext_desc.get("name", None) or net_desc.get("name", None) or default_net_name
try:
await compose.podman.output([], "network", ["exists", net_name])
except subprocess.CalledProcessError as e:
if is_ext:
raise RuntimeError(
f"External network [{net_name}] does not exists"
) from e
raise RuntimeError(f"External network [{net_name}] does not exists") from e
args = [
"create",
"--label",
@ -843,12 +807,10 @@ def get_net_args(compose, cnt):
if not ip6:
ip6 = net_value.get("ipv6_address", None)
net_priority = net_value.get("priority", 0)
prioritized_cnt_nets.append(
(
net_priority,
net_key,
)
)
prioritized_cnt_nets.append((
net_priority,
net_key,
))
# sort dict by priority
prioritized_cnt_nets.sort(reverse=True)
cnt_nets = [net_key for _, net_key in prioritized_cnt_nets]
@ -859,9 +821,7 @@ def get_net_args(compose, cnt):
is_ext = net_desc.get("external", None)
ext_desc = is_ext if is_dict(is_ext) else {}
default_net_name = net if is_ext else f"{proj_name}_{net}"
net_name = (
ext_desc.get("name", None) or net_desc.get("name", None) or default_net_name
)
net_name = ext_desc.get("name", None) or net_desc.get("name", None) or default_net_name
net_names.append(net_name)
net_names_str = ",".join(net_names)
@ -874,11 +834,7 @@ def get_net_args(compose, cnt):
is_ext = net_desc.get("external", None)
ext_desc = is_ext if is_dict(is_ext) else {}
default_net_name = net_ if is_ext else f"{proj_name}_{net_}"
net_name = (
ext_desc.get("name", None)
or net_desc.get("name", None)
or default_net_name
)
net_name = ext_desc.get("name", None) or net_desc.get("name", None) or default_net_name
ipv4 = multiple_nets[net_].get("ipv4_address", None)
ipv6 = multiple_nets[net_].get("ipv6_address", None)
@ -890,9 +846,7 @@ def get_net_args(compose, cnt):
net_args.extend(["--network", f"{net_name}:ip={ipv4}"])
else:
if is_bridge:
net_args.extend(
["--net", net_names_str, "--network-alias", ",".join(aliases)]
)
net_args.extend(["--net", net_names_str, "--network-alias", ",".join(aliases)])
if ip:
net_args.append(f"--ip={ip}")
if ip6:
@ -1041,9 +995,10 @@ async def container_to_args(compose, cnt, detached=True):
# If it's a string, it's equivalent to specifying CMD-SHELL
if is_str(healthcheck_test):
# podman does not add shell to handle command with whitespace
podman_args.extend(
["--healthcheck-command", "/bin/sh -c " + cmd_quote(healthcheck_test)]
)
podman_args.extend([
"--healthcheck-command",
"/bin/sh -c " + cmd_quote(healthcheck_test),
])
elif is_list(healthcheck_test):
healthcheck_test = healthcheck_test.copy()
# If it's a list, first item is either NONE, CMD or CMD-SHELL.
@ -1158,7 +1113,13 @@ def flat_deps(services, with_extends=False):
class Podman:
def __init__(self, compose, podman_path="podman", dry_run=False, semaphore: asyncio.Semaphore = asyncio.Semaphore(sys.maxsize)):
def __init__(
self,
compose,
podman_path="podman",
dry_run=False,
semaphore: asyncio.Semaphore = asyncio.Semaphore(sys.maxsize),
):
self.compose = compose
self.podman_path = podman_path
self.dry_run = dry_run
@ -1171,9 +1132,7 @@ class Podman:
cmd_ls = [self.podman_path, *podman_args, cmd] + xargs + cmd_args
log(cmd_ls)
p = await asyncio.subprocess.create_subprocess_exec(
*cmd_ls,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE
*cmd_ls, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
stdout_data, stderr_data = await p.communicate()
@ -1202,7 +1161,7 @@ class Podman:
log_formatter=None,
*,
# Intentionally mutable default argument to hold references to tasks
task_reference=set()
task_reference=set(),
) -> int:
async with self.semaphore:
cmd_args = list(map(str, cmd_args or []))
@ -1258,18 +1217,20 @@ class Podman:
async def volume_ls(self, proj=None):
if not proj:
proj = self.compose.project_name
output = (await self.output(
[],
"volume",
[
"ls",
"--noheading",
"--filter",
f"label=io.podman.compose.project={proj}",
"--format",
"{{.Name}}",
],
)).decode("utf-8")
output = (
await self.output(
[],
"volume",
[
"ls",
"--noheading",
"--filter",
f"label=io.podman.compose.project={proj}",
"--format",
"{{.Name}}",
],
)
).decode("utf-8")
volumes = output.splitlines()
return volumes
@ -1333,9 +1294,7 @@ def normalize_service_final(service: dict, project_dir: str) -> dict:
context = build if is_str(build) else build.get("context", ".")
context = os.path.normpath(os.path.join(project_dir, context))
dockerfile = (
"Dockerfile"
if is_str(build)
else service["build"].get("dockerfile", "Dockerfile")
"Dockerfile" if is_str(build) else service["build"].get("dockerfile", "Dockerfile")
)
if not is_dict(service["build"]):
service["build"] = {}
@ -1377,17 +1336,13 @@ def rec_merge_one(target, source):
if not isinstance(value2, type(value)):
value_type = type(value)
value2_type = type(value2)
raise ValueError(
f"can't merge value of [{key}] of type {value_type} and {value2_type}"
)
raise ValueError(f"can't merge value of [{key}] of type {value_type} and {value2_type}")
if is_list(value2):
if key == "volumes":
# clean duplicate mount targets
pts = {v.split(":", 2)[1] for v in value2 if ":" in v}
del_ls = [
ix
for (ix, v) in enumerate(value)
if ":" in v and v.split(":", 2)[1] in pts
ix for (ix, v) in enumerate(value) if ":" in v and v.split(":", 2)[1] in pts
]
for ix in reversed(del_ls):
del value[ix]
@ -1490,11 +1445,11 @@ class PodmanCompose:
self.merged_yaml = None
self.yaml_hash = ""
self.console_colors = [
"\x1B[1;32m",
"\x1B[1;33m",
"\x1B[1;34m",
"\x1B[1;35m",
"\x1B[1;36m",
"\x1b[1;32m",
"\x1b[1;33m",
"\x1b[1;34m",
"\x1b[1;35m",
"\x1b[1;36m",
]
def assert_services(self, services):
@ -1534,10 +1489,9 @@ class PodmanCompose:
if not args.dry_run:
# just to make sure podman is running
try:
self.podman_version = (
(await self.podman.output(["--version"], "", [])).decode("utf-8").strip()
or ""
)
self.podman_version = (await self.podman.output(["--version"], "", [])).decode(
"utf-8"
).strip() or ""
self.podman_version = (self.podman_version.split() or [""])[-1]
except subprocess.CalledProcessError:
self.podman_version = None
@ -1603,24 +1557,18 @@ class PodmanCompose:
# TODO: remove next line
os.chdir(dirname)
os.environ.update(
{
key: value
for key, value in dotenv_dict.items()
if key.startswith("PODMAN_")
}
)
os.environ.update({
key: value for key, value in dotenv_dict.items() if key.startswith("PODMAN_")
})
self.environ = dict(os.environ)
self.environ.update(dotenv_dict)
# see: https://docs.docker.com/compose/reference/envvars/
# see: https://docs.docker.com/compose/env-file/
self.environ.update(
{
"COMPOSE_PROJECT_DIR": dirname,
"COMPOSE_FILE": pathsep.join(relative_files),
"COMPOSE_PATH_SEPARATOR": pathsep,
}
)
self.environ.update({
"COMPOSE_PROJECT_DIR": dirname,
"COMPOSE_FILE": pathsep.join(relative_files),
"COMPOSE_PATH_SEPARATOR": pathsep,
})
compose = {}
# Iterate over files primitively to allow appending to files in-loop
files_iter = iter(files)
@ -1636,8 +1584,7 @@ class PodmanCompose:
# log(filename, json.dumps(content, indent = 2))
if not isinstance(content, dict):
sys.stderr.write(
"Compose file does not contain a top level object: %s\n"
% filename
"Compose file does not contain a top level object: %s\n" % filename
)
sys.exit(1)
content = normalize(content)
@ -1654,9 +1601,7 @@ class PodmanCompose:
# Solution is to remove 'include' key from compose obj. This doesn't break
# having `include` present and correctly processed in included files
del compose["include"]
resolved_services = self._resolve_profiles(
compose.get("services", {}), set(args.profile)
)
resolved_services = self._resolve_profiles(compose.get("services", {}), set(args.profile))
compose["services"] = resolved_services
if not getattr(args, "no_normalize", None):
compose = normalize_final(compose, self.dirname)
@ -1674,14 +1619,11 @@ class PodmanCompose:
if project_name is None:
# More strict then actually needed for simplicity: podman requires [a-zA-Z0-9][a-zA-Z0-9_.-]*
project_name = (
self.environ.get("COMPOSE_PROJECT_NAME", None)
or dir_basename.lower()
self.environ.get("COMPOSE_PROJECT_NAME", None) or dir_basename.lower()
)
project_name = norm_re.sub("", project_name)
if not project_name:
raise RuntimeError(
f"Project name [{dir_basename}] normalized to empty"
)
raise RuntimeError(f"Project name [{dir_basename}] normalized to empty")
self.project_name = project_name
self.environ.update({"COMPOSE_PROJECT_NAME": self.project_name})
@ -1695,15 +1637,11 @@ class PodmanCompose:
# NOTE: maybe add "extends.service" to _deps at this stage
flat_deps(services, with_extends=True)
service_names = sorted(
[(len(srv["_deps"]), name) for name, srv in services.items()]
)
service_names = sorted([(len(srv["_deps"]), name) for name, srv in services.items()])
service_names = [name for _, name in service_names]
resolve_extends(services, service_names, self.environ)
flat_deps(services)
service_names = sorted(
[(len(srv["_deps"]), name) for name, srv in services.items()]
)
service_names = sorted([(len(srv["_deps"]), name) for name, srv in services.items()])
service_names = [name for _, name in service_names]
nets = compose.get("networks", None) or {}
if not nets:
@ -1719,9 +1657,7 @@ class PodmanCompose:
allnets = set()
for name, srv in services.items():
srv_nets = srv.get("networks", None) or default_net
srv_nets = (
list(srv_nets.keys()) if is_dict(srv_nets) else norm_as_list(srv_nets)
)
srv_nets = list(srv_nets.keys()) if is_dict(srv_nets) else norm_as_list(srv_nets)
allnets.update(srv_nets)
given_nets = set(nets.keys())
missing_nets = allnets - given_nets
@ -1772,12 +1708,10 @@ class PodmanCompose:
labels = norm_as_list(cnt.get("labels", None))
cnt["ports"] = norm_ports(cnt.get("ports", None))
labels.extend(podman_compose_labels)
labels.extend(
[
f"com.docker.compose.container-number={num}",
"com.docker.compose.service=" + service_name,
]
)
labels.extend([
f"com.docker.compose.container-number={num}",
"com.docker.compose.service=" + service_name,
])
cnt["labels"] = labels
cnt["_service"] = service_name
cnt["_project"] = project_name
@ -1791,9 +1725,7 @@ class PodmanCompose:
and mnt_dict["source"] not in self.vols
):
vol_name = mnt_dict["source"]
raise RuntimeError(
f"volume [{vol_name}] not defined in top level"
)
raise RuntimeError(f"volume [{vol_name}] not defined in top level")
self.container_names_by_service = container_names_by_service
self.all_services = set(container_names_by_service.keys())
container_by_name = {c["name"]: c for c in given_containers}
@ -1824,9 +1756,7 @@ class PodmanCompose:
for name, config in defined_services.items():
service_profiles = set(config.get("profiles", []))
if not service_profiles or requested_profiles.intersection(
service_profiles
):
if not service_profiles or requested_profiles.intersection(service_profiles):
services[name] = config
return services
@ -1836,9 +1766,7 @@ class PodmanCompose:
subparsers = parser.add_subparsers(title="command", dest="command")
subparser = subparsers.add_parser("help", help="show help")
for cmd_name, cmd in self.commands.items():
subparser = subparsers.add_parser(
cmd_name, help=cmd.desc
) # pylint: disable=protected-access
subparser = subparsers.add_parser(cmd_name, help=cmd.desc) # pylint: disable=protected-access
for cmd_parser in cmd._parse_args: # pylint: disable=protected-access
cmd_parser(subparser)
self.global_args = parser.parse_args()
@ -1932,9 +1860,7 @@ class PodmanCompose:
action="store_true",
)
parser.add_argument(
"--parallel",
type=int,
default=os.environ.get("COMPOSE_PARALLEL_LIMIT", sys.maxsize)
"--parallel", type=int, default=os.environ.get("COMPOSE_PARALLEL_LIMIT", sys.maxsize)
)
@ -2179,12 +2105,10 @@ async def build_one(compose, args, cnt):
args_list = norm_as_list(build_desc.get("args", {}))
for build_arg in args_list + args.build_arg:
build_args.extend(
(
"--build-arg",
build_arg,
)
)
build_args.extend((
"--build-arg",
build_arg,
))
build_args.append(ctx)
status = await compose.podman.run([], "build", build_args)
return status
@ -2243,9 +2167,7 @@ def get_excluded(compose, args):
return excluded
@cmd_run(
podman_compose, "up", "Create and start the entire stack or some of its services"
)
@cmd_run(podman_compose, "up", "Create and start the entire stack or some of its services")
async def compose_up(compose: PodmanCompose, args):
proj_name = compose.project_name
excluded = get_excluded(compose, args)
@ -2256,17 +2178,19 @@ async def compose_up(compose: PodmanCompose, args):
log("Build command failed")
hashes = (
(await compose.podman.output(
[],
"ps",
[
"--filter",
f"label=io.podman.compose.project={proj_name}",
"-a",
"--format",
'{{ index .Labels "io.podman.compose.config-hash"}}',
],
))
(
await compose.podman.output(
[],
"ps",
[
"--filter",
f"label=io.podman.compose.project={proj_name}",
"-a",
"--format",
'{{ index .Labels "io.podman.compose.config-hash"}}',
],
)
)
.decode("utf-8")
.splitlines()
)
@ -2301,9 +2225,7 @@ async def compose_up(compose: PodmanCompose, args):
max_service_length = 0
for cnt in compose.containers:
curr_length = len(cnt["_service"])
max_service_length = (
curr_length if curr_length > max_service_length else max_service_length
)
max_service_length = curr_length if curr_length > max_service_length else max_service_length
tasks = set()
@ -2315,9 +2237,7 @@ async def compose_up(compose: PodmanCompose, args):
color_idx = i % len(compose.console_colors)
color = compose.console_colors[color_idx]
space_suffix = " " * (max_service_length - len(cnt["_service"]) + 1)
log_formatter = "{}[{}]{}|\x1B[0m".format(
color, cnt["_service"], space_suffix
)
log_formatter = "{}[{}]{}|\x1b[0m".format(color, cnt["_service"], space_suffix)
if cnt["_service"] in excluded:
log("** skipping: ", cnt["name"])
continue
@ -2325,7 +2245,7 @@ async def compose_up(compose: PodmanCompose, args):
tasks.add(
asyncio.create_task(
compose.podman.run([], "start", ["-a", cnt["name"]], log_formatter=log_formatter),
name=cnt["_service"]
name=cnt["_service"],
)
)
@ -2384,7 +2304,11 @@ async def compose_down(compose, args):
timeout = str_to_seconds(timeout_str)
if timeout is not None:
podman_stop_args.extend(["-t", str(timeout)])
down_tasks.append(asyncio.create_task(compose.podman.run([], "stop", [*podman_stop_args, cnt["name"]]), name=cnt["name"]))
down_tasks.append(
asyncio.create_task(
compose.podman.run([], "stop", [*podman_stop_args, cnt["name"]]), name=cnt["name"]
)
)
await asyncio.gather(*down_tasks)
for cnt in containers:
if cnt["_service"] in excluded:
@ -2392,17 +2316,19 @@ async def compose_down(compose, args):
await compose.podman.run([], "rm", [cnt["name"]])
if args.remove_orphans:
names = (
(await compose.podman.output(
[],
"ps",
[
"--filter",
f"label=io.podman.compose.project={compose.project_name}",
"-a",
"--format",
"{{ .Names }}",
],
))
(
await compose.podman.output(
[],
"ps",
[
"--filter",
f"label=io.podman.compose.project={compose.project_name}",
"-a",
"--format",
"{{ .Names }}",
],
)
)
.decode("utf-8")
.splitlines()
)
@ -2470,23 +2396,18 @@ async def compose_run(compose, args):
no_cache=False,
build_arg=[],
parallel=1,
remove_orphans=True
remove_orphans=True,
)
)
await compose.commands["up"](compose, up_args)
build_args = argparse.Namespace(
services=[args.service],
if_not_exists=(not args.build),
build_arg=[],
**args.__dict__
services=[args.service], if_not_exists=(not args.build), build_arg=[], **args.__dict__
)
await compose.commands["build"](compose, build_args)
# adjust one-off container options
name0 = "{}_{}_tmp{}".format(
compose.project_name, args.service, random.randrange(0, 65536)
)
name0 = "{}_{}_tmp{}".format(compose.project_name, args.service, random.randrange(0, 65536))
cnt["name"] = args.name or name0
if args.entrypoint:
cnt["entrypoint"] = args.entrypoint
@ -2692,9 +2613,7 @@ async def compose_unpause(compose, args):
await compose.podman.run([], "unpause", targets)
@cmd_run(
podman_compose, "kill", "Kill one or more running containers with a specific signal"
)
@cmd_run(podman_compose, "kill", "Kill one or more running containers with a specific signal")
async def compose_kill(compose, args):
# to ensure that the user did not execute the command by mistake
if not args.services and not args.all:
@ -2787,17 +2706,13 @@ def compose_up_parse(parser):
help="Detached mode: Run container in the background, print new container name. \
Incompatible with --abort-on-container-exit.",
)
parser.add_argument(
"--no-color", action="store_true", help="Produce monochrome output."
)
parser.add_argument("--no-color", action="store_true", help="Produce monochrome output.")
parser.add_argument(
"--quiet-pull",
action="store_true",
help="Pull without printing progress information.",
)
parser.add_argument(
"--no-deps", action="store_true", help="Don't start linked services."
)
parser.add_argument("--no-deps", action="store_true", help="Don't start linked services.")
parser.add_argument(
"--force-recreate",
action="store_true",
@ -2893,9 +2808,7 @@ def compose_run_parse(parser):
action="store_true",
help="Detached mode: Run container in the background, print new container name.",
)
parser.add_argument(
"--name", type=str, default=None, help="Assign a name to the container"
)
parser.add_argument("--name", type=str, default=None, help="Assign a name to the container")
parser.add_argument(
"--entrypoint",
type=str,
@ -2919,9 +2832,7 @@ def compose_run_parse(parser):
parser.add_argument(
"-u", "--user", type=str, default=None, help="Run as specified username or uid"
)
parser.add_argument(
"--no-deps", action="store_true", help="Don't start linked services"
)
parser.add_argument("--no-deps", action="store_true", help="Don't start linked services")
parser.add_argument(
"--rm",
action="store_true",
@ -3047,21 +2958,15 @@ def compose_logs_parse(parser):
action="store_true",
help="Output the container name in the log",
)
parser.add_argument(
"--since", help="Show logs since TIMESTAMP", type=str, default=None
)
parser.add_argument(
"-t", "--timestamps", action="store_true", help="Show timestamps."
)
parser.add_argument("--since", help="Show logs since TIMESTAMP", type=str, default=None)
parser.add_argument("-t", "--timestamps", action="store_true", help="Show timestamps.")
parser.add_argument(
"--tail",
help="Number of lines to show from the end of the logs for each " "container.",
type=str,
default="all",
)
parser.add_argument(
"--until", help="Show logs until TIMESTAMP", type=str, default=None
)
parser.add_argument("--until", help="Show logs until TIMESTAMP", type=str, default=None)
parser.add_argument(
"services", metavar="services", nargs="*", default=None, help="service names"
)
@ -3086,9 +2991,7 @@ def compose_pull_parse(parser):
default=False,
help="Also pull unprefixed images for services which have a build section",
)
parser.add_argument(
"services", metavar="services", nargs="*", help="services to pull"
)
parser.add_argument("services", metavar="services", nargs="*", help="services to pull")
@cmd_parse(podman_compose, "push")
@ -3098,16 +3001,12 @@ def compose_push_parse(parser):
action="store_true",
help="Push what it can and ignores images with push failures. (not implemented)",
)
parser.add_argument(
"services", metavar="services", nargs="*", help="services to push"
)
parser.add_argument("services", metavar="services", nargs="*", help="services to push")
@cmd_parse(podman_compose, "ps")
def compose_ps_parse(parser):
parser.add_argument(
"-q", "--quiet", help="Only display container IDs", action="store_true"
)
parser.add_argument("-q", "--quiet", help="Only display container IDs", action="store_true")
@cmd_parse(podman_compose, ["build", "up"])
@ -3239,11 +3138,14 @@ def compose_format_parse(parser):
help="Pretty-print container statistics to JSON or using a Go template",
)
async def async_main():
await podman_compose.run()
def main():
asyncio.run(async_main())
if __name__ == "__main__":
main()

15
pyproject.toml Normal file
View File

@ -0,0 +1,15 @@
[tool.ruff]
line-length = 100
target-version = "py38"
[tool.ruff.lint]
select = ["W", "E", "F", "I"]
ignore = [
]
[tool.ruff.lint.isort]
force-single-line = true
[tool.ruff.format]
preview = true # needed for quote-style
quote-style = "preserve"

View File

@ -71,9 +71,7 @@ def test_normalize_service():
def test__parse_compose_file_when_multiple_composes() -> None:
for base_template, override_template, expected_template in copy.deepcopy(
test_cases_merges
):
for base_template, override_template, expected_template in copy.deepcopy(test_cases_merges):
for key in test_keys:
base, override, expected = template_to_expression(
base_template, override_template, expected_template, key

View File

@ -243,9 +243,7 @@ test_cases_with_merges = [
# running full parse over merged
#
def test__parse_compose_file_when_multiple_composes() -> None:
for test_input, test_override, expected_result in copy.deepcopy(
test_cases_with_merges
):
for test_input, test_override, expected_result in copy.deepcopy(test_cases_with_merges):
compose_test_1 = {"services": {"test-service": test_input}}
compose_test_2 = {"services": {"test-service": test_override}}
dump_yaml(compose_test_1, "test-compose-1.yaml")
@ -273,9 +271,7 @@ def test__parse_compose_file_when_multiple_composes() -> None:
assert compose_expected == actual_compose
def set_args(
podman_compose: PodmanCompose, file_names: list[str], no_normalize: bool
) -> None:
def set_args(podman_compose: PodmanCompose, file_names: list[str], no_normalize: bool) -> None:
podman_compose.global_args = argparse.Namespace()
podman_compose.global_args.file = file_names
podman_compose.global_args.project_name = None

View File

@ -2,9 +2,7 @@ import os
from setuptools import setup
try:
README = open(
os.path.join(os.path.dirname(__file__), "README.md"), encoding="utf-8"
).read()
README = open(os.path.join(os.path.dirname(__file__), "README.md"), encoding="utf-8").read()
except: # noqa: E722 # pylint: disable=bare-except
README = ""
@ -39,15 +37,7 @@ setup(
"pyyaml",
"python-dotenv",
],
extras_require={
"devel": [
"flake8",
"black",
"pylint",
"pre-commit",
"coverage"
]
}
extras_require={"devel": ["ruff", "pre-commit", "coverage"]},
# test_suite='tests',
# tests_require=[
# 'coverage',

View File

@ -1,9 +1,25 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
coverage==7.4.3
pytest==8.0.2
tox==4.13.0
ruff==0.3.1
coverage
pytest
tox
black
flake8
# The packages below are transitive dependencies of the packages above and are included here
# to make testing reproducible.
# To refresh, create a new virtualenv and do:
# pip install -r requirements.txt -r test-requirements.txt
# pip freeze > test-requirements.txt
# and edit test-requirements.txt to add this comment
cachetools==5.3.3
chardet==5.2.0
colorama==0.4.6
distlib==0.3.8
filelock==3.13.1
iniconfig==2.0.0
packaging==23.2
platformdirs==4.2.0
pluggy==1.4.0
pyproject-api==1.6.1
python-dotenv==1.0.1
PyYAML==6.0.1
virtualenv==20.25.1

View File

@ -2,6 +2,7 @@
Defines global pytest fixtures available to all tests.
"""
# pylint: disable=redefined-outer-name
from pathlib import Path
import os

View File

@ -77,9 +77,7 @@ def test_podman_compose_extends_w_empty_service():
"python3",
str(main_path.joinpath("podman_compose.py")),
"-f",
str(
main_path.joinpath("tests", "extends_w_empty_service", "docker-compose.yml")
),
str(main_path.joinpath("tests", "extends_w_empty_service", "docker-compose.yml")),
"up",
"-d",
]

View File

@ -3,6 +3,7 @@ test_podman_compose_config.py
Tests the podman-compose config command which is used to return defined compose services.
"""
# pylint: disable=redefined-outer-name
import os
from test_podman_compose import capture
@ -50,9 +51,7 @@ def test_config_no_profiles(podman_compose_path, profile_compose_file):
),
],
)
def test_config_profiles(
podman_compose_path, profile_compose_file, profiles, expected_services
):
def test_config_profiles(podman_compose_path, profile_compose_file, profiles, expected_services):
"""
Tests podman-compose
:param podman_compose_path: The fixture used to specify the path to the podman compose file.

View File

@ -3,6 +3,7 @@ test_podman_compose_up_down.py
Tests the podman compose up and down commands used to create and remove services.
"""
# pylint: disable=redefined-outer-name
import os
import time
@ -17,7 +18,7 @@ def test_exit_from(podman_compose_path, test_path):
podman_compose_path,
"-f",
os.path.join(test_path, "exit-from", "docker-compose.yaml"),
"up"
"up",
]
out, _, return_code = capture(up_cmd + ["--exit-code-from", "sh1"])
@ -42,7 +43,7 @@ def test_run(podman_compose_path, test_path):
"sleep",
"/bin/sh",
"-c",
"wget -q -O - http://web:8000/hosts"
"wget -q -O - http://web:8000/hosts",
]
out, _, return_code = capture(run_cmd)
@ -61,7 +62,7 @@ def test_run(podman_compose_path, test_path):
"sleep",
"/bin/sh",
"-c",
"wget -q -O - http://web:8000/hosts"
"wget -q -O - http://web:8000/hosts",
]
out, _, return_code = capture(run_cmd)
@ -83,8 +84,6 @@ def test_run(podman_compose_path, test_path):
def test_up_with_ports(podman_compose_path, test_path):
up_cmd = [
"coverage",
"run",
@ -93,7 +92,7 @@ def test_up_with_ports(podman_compose_path, test_path):
os.path.join(test_path, "ports", "docker-compose.yml"),
"up",
"-d",
"--force-recreate"
"--force-recreate",
]
down_cmd = [
@ -103,21 +102,19 @@ def test_up_with_ports(podman_compose_path, test_path):
"-f",
os.path.join(test_path, "ports", "docker-compose.yml"),
"down",
"--volumes"
"--volumes",
]
try:
out, _, return_code = capture(up_cmd)
assert return_code == 0
finally:
out, _, return_code = capture(down_cmd)
assert return_code == 0
def test_down_with_vols(podman_compose_path, test_path):
up_cmd = [
"coverage",
"run",
@ -125,7 +122,7 @@ def test_down_with_vols(podman_compose_path, test_path):
"-f",
os.path.join(test_path, "vol", "docker-compose.yaml"),
"up",
"-d"
"-d",
]
down_cmd = [
@ -135,7 +132,7 @@ def test_down_with_vols(podman_compose_path, test_path):
"-f",
os.path.join(test_path, "vol", "docker-compose.yaml"),
"down",
"--volumes"
"--volumes",
]
try:
@ -157,8 +154,20 @@ def test_down_with_vols(podman_compose_path, test_path):
def test_down_with_orphans(podman_compose_path, test_path):
container_id, _ , return_code = capture(["podman", "run", "--rm", "-d", "busybox", "/bin/busybox", "httpd", "-f", "-h", "/etc/", "-p", "8000"])
container_id, _, return_code = capture([
"podman",
"run",
"--rm",
"-d",
"busybox",
"/bin/busybox",
"httpd",
"-f",
"-h",
"/etc/",
"-p",
"8000",
])
down_cmd = [
"coverage",
@ -168,7 +177,7 @@ def test_down_with_orphans(podman_compose_path, test_path):
os.path.join(test_path, "ports", "docker-compose.yml"),
"down",
"--volumes",
"--remove-orphans"
"--remove-orphans",
]
out, _, return_code = capture(down_cmd)
@ -177,4 +186,3 @@ def test_down_with_orphans(podman_compose_path, test_path):
_, _, exists = capture(["podman", "container", "exists", container_id.decode("utf-8")])
assert exists == 1

View File

@ -3,6 +3,7 @@ test_podman_compose_up_down.py
Tests the podman compose up and down commands used to create and remove services.
"""
# pylint: disable=redefined-outer-name
import os
from test_podman_compose import capture