2019-05-09 22:15:05 +02:00
|
|
|
#! /usr/bin/python3
|
2019-08-10 17:08:21 +02:00
|
|
|
# -*- coding: utf-8 -*-
|
2019-03-04 10:30:14 +01:00
|
|
|
|
2019-03-04 22:58:25 +01:00
|
|
|
# https://docs.docker.com/compose/compose-file/#service-configuration-reference
|
|
|
|
# https://docs.docker.com/samples/
|
|
|
|
# https://docs.docker.com/compose/gettingstarted/
|
|
|
|
# https://docs.docker.com/compose/django/
|
|
|
|
# https://docs.docker.com/compose/wordpress/
|
|
|
|
|
2019-03-24 00:39:22 +01:00
|
|
|
import sys
|
2019-03-04 10:30:14 +01:00
|
|
|
import os
|
|
|
|
import argparse
|
|
|
|
import subprocess
|
2021-09-06 06:45:50 +02:00
|
|
|
import textwrap
|
2019-03-04 10:30:14 +01:00
|
|
|
import time
|
2019-03-24 00:08:26 +01:00
|
|
|
import re
|
2019-06-09 02:26:13 +02:00
|
|
|
import hashlib
|
2019-08-10 13:11:28 +02:00
|
|
|
import random
|
2019-10-05 21:37:14 +02:00
|
|
|
import json
|
2019-03-04 22:58:25 +01:00
|
|
|
|
2019-08-17 22:39:42 +02:00
|
|
|
from threading import Thread
|
|
|
|
|
2019-10-04 19:36:30 +02:00
|
|
|
import shlex
|
|
|
|
|
2019-07-08 22:53:38 +02:00
|
|
|
try:
|
|
|
|
from shlex import quote as cmd_quote
|
|
|
|
except ImportError:
|
|
|
|
from pipes import quote as cmd_quote
|
|
|
|
|
2019-05-09 22:15:05 +02:00
|
|
|
# import fnmatch
|
2019-03-04 22:58:25 +01:00
|
|
|
# fnmatch.fnmatchcase(env, "*_HOST")
|
2019-03-04 10:30:14 +01:00
|
|
|
|
|
|
|
import yaml
|
2021-12-10 00:01:45 +01:00
|
|
|
from dotenv import dotenv_values
|
2019-03-04 10:30:14 +01:00
|
|
|
|
2021-12-21 22:15:52 +01:00
|
|
|
__version__ = '1.0.3'
|
2019-09-03 17:38:57 +02:00
|
|
|
|
2019-05-09 22:16:40 +02:00
|
|
|
# helper functions
|
2021-12-24 17:55:30 +01:00
|
|
|
is_str = lambda s: isinstance(s, str)
|
2019-05-09 22:15:05 +02:00
|
|
|
is_dict = lambda d: isinstance(d, dict)
|
|
|
|
is_list = lambda l: not is_str(l) and not is_dict(l) and hasattr(l, "__iter__")
|
2019-08-16 14:29:09 +02:00
|
|
|
# identity filter
|
2020-04-18 17:39:59 +02:00
|
|
|
filteri = lambda a: filter(lambda i: i, a)
|
2019-05-09 22:15:05 +02:00
|
|
|
|
2019-05-09 22:16:40 +02:00
|
|
|
def try_int(i, fallback=None):
|
|
|
|
try:
|
|
|
|
return int(i)
|
|
|
|
except ValueError:
|
|
|
|
pass
|
|
|
|
except TypeError:
|
|
|
|
pass
|
|
|
|
return fallback
|
|
|
|
|
2021-06-22 22:30:22 +02:00
|
|
|
def try_float(i, fallback=None):
|
|
|
|
try:
|
|
|
|
return float(i)
|
|
|
|
except ValueError:
|
|
|
|
pass
|
|
|
|
except TypeError:
|
|
|
|
pass
|
|
|
|
return fallback
|
|
|
|
|
2021-12-24 17:55:30 +01:00
|
|
|
def log(*msgs, sep=" ", end="\n"):
|
|
|
|
line = (sep.join(["{}".format(msg) for msg in msgs]))+end
|
|
|
|
sys.stderr.write(line)
|
|
|
|
sys.stderr.flush()
|
|
|
|
|
2019-06-09 02:26:13 +02:00
|
|
|
dir_re = re.compile("^[~/\.]")
|
2021-12-21 21:13:40 +01:00
|
|
|
propagation_re = re.compile("^(?:z|Z|O|U|r?shared|r?slave|r?private|r?unbindable|r?bind|(?:no)?(?:exec|dev|suid))$")
|
2020-12-02 14:31:51 +01:00
|
|
|
norm_re = re.compile('[^-_a-z0-9]')
|
2021-10-24 16:35:36 +02:00
|
|
|
num_split_re = re.compile(r'(\d+|\D+)')
|
2019-08-14 17:22:36 +02:00
|
|
|
|
2021-05-17 14:03:47 +02:00
|
|
|
PODMAN_CMDS = (
|
|
|
|
"pull", "push", "build", "inspect",
|
|
|
|
"run", "start", "stop", "rm", "volume",
|
|
|
|
)
|
|
|
|
|
2021-10-24 16:35:36 +02:00
|
|
|
def ver_as_list(a):
|
|
|
|
return [try_int(i, i) for i in num_split_re.findall(a)]
|
|
|
|
|
|
|
|
def strverscmp_lt(a, b):
|
2021-11-13 12:08:32 +01:00
|
|
|
a_ls = ver_as_list(a or '')
|
|
|
|
b_ls = ver_as_list(b or '')
|
2021-10-24 16:35:36 +02:00
|
|
|
return a_ls < b_ls
|
|
|
|
|
2019-06-09 02:26:13 +02:00
|
|
|
def parse_short_mount(mount_str, basedir):
|
|
|
|
mount_a = mount_str.split(':')
|
|
|
|
mount_opt_dict = {}
|
|
|
|
mount_opt = None
|
2020-04-18 17:39:59 +02:00
|
|
|
if len(mount_a) == 1:
|
|
|
|
# Anonymous: Just specify a path and let the engine creates the volume
|
2019-06-09 02:26:13 +02:00
|
|
|
# - /var/lib/mysql
|
2020-04-18 17:39:59 +02:00
|
|
|
mount_src, mount_dst = None, mount_str
|
|
|
|
elif len(mount_a) == 2:
|
2019-06-09 02:26:13 +02:00
|
|
|
mount_src, mount_dst = mount_a
|
2020-04-18 17:39:59 +02:00
|
|
|
# dest must start with / like /foo:/var/lib/mysql
|
|
|
|
# otherwise it's option like /var/lib/mysql:rw
|
2019-06-09 02:26:13 +02:00
|
|
|
if not mount_dst.startswith('/'):
|
|
|
|
mount_dst, mount_opt = mount_a
|
|
|
|
mount_src = None
|
2020-04-18 17:39:59 +02:00
|
|
|
elif len(mount_a) == 3:
|
2019-06-09 02:26:13 +02:00
|
|
|
mount_src, mount_dst, mount_opt = mount_a
|
|
|
|
else:
|
|
|
|
raise ValueError("could not parse mount "+mount_str)
|
|
|
|
if mount_src and dir_re.match(mount_src):
|
|
|
|
# Specify an absolute path mapping
|
|
|
|
# - /opt/data:/var/lib/mysql
|
|
|
|
# Path on the host, relative to the Compose file
|
|
|
|
# - ./cache:/tmp/cache
|
|
|
|
# User-relative path
|
|
|
|
# - ~/configs:/etc/configs/:ro
|
|
|
|
mount_type = "bind"
|
2021-12-11 00:50:40 +01:00
|
|
|
mount_src = os.path.realpath(os.path.join(basedir, os.path.expanduser(mount_src)))
|
2019-06-09 02:26:13 +02:00
|
|
|
else:
|
|
|
|
# Named volume
|
|
|
|
# - datavolume:/var/lib/mysql
|
|
|
|
mount_type = "volume"
|
2019-08-16 14:29:09 +02:00
|
|
|
mount_opts = filteri((mount_opt or '').split(','))
|
2022-02-11 01:15:02 +01:00
|
|
|
propagation_opts = []
|
2019-06-11 14:07:49 +02:00
|
|
|
for opt in mount_opts:
|
2020-04-18 17:39:59 +02:00
|
|
|
if opt == 'ro': mount_opt_dict["read_only"] = True
|
|
|
|
elif opt == 'rw': mount_opt_dict["read_only"] = False
|
|
|
|
elif opt in ('consistent', 'delegated', 'cached'):
|
|
|
|
mount_opt_dict["consistency"] = opt
|
2022-02-11 01:15:02 +01:00
|
|
|
elif propagation_re.match(opt):
|
|
|
|
propagation_opts.append(opt)
|
2019-06-09 02:26:13 +02:00
|
|
|
else:
|
|
|
|
# TODO: ignore
|
|
|
|
raise ValueError("unknown mount option "+opt)
|
2022-02-11 01:15:02 +01:00
|
|
|
mount_opt_dict["bind"] = dict(propagation=','.join(propagation_opts))
|
2019-06-09 02:26:13 +02:00
|
|
|
return dict(type=mount_type, source=mount_src, target=mount_dst, **mount_opt_dict)
|
|
|
|
|
2020-04-18 17:39:59 +02:00
|
|
|
# NOTE: if a named volume is used but not defined it
|
|
|
|
# gives ERROR: Named volume "abc" is used in service "xyz"
|
|
|
|
# but no declaration was found in the volumes section.
|
|
|
|
# unless it's anonymous-volume
|
|
|
|
|
2021-10-14 00:30:44 +02:00
|
|
|
def fix_mount_dict(compose, mount_dict, proj_name, srv_name):
|
2019-06-09 02:26:13 +02:00
|
|
|
"""
|
2019-08-14 17:22:36 +02:00
|
|
|
in-place fix mount dictionary to:
|
2021-10-14 00:30:44 +02:00
|
|
|
- define _vol to be the corresponding top-level volume
|
|
|
|
- if name is missing it would be source prefixed with project
|
|
|
|
- if no source it would be generated
|
2019-06-09 02:26:13 +02:00
|
|
|
"""
|
2020-04-18 17:39:59 +02:00
|
|
|
# if already applied nothing todo
|
2021-10-14 00:30:44 +02:00
|
|
|
if "_vol" in mount_dict: return mount_dict
|
2020-04-18 17:39:59 +02:00
|
|
|
if mount_dict["type"] == "volume":
|
2021-10-14 00:30:44 +02:00
|
|
|
vols = compose.vols
|
2020-04-18 17:39:59 +02:00
|
|
|
source = mount_dict.get("source", None)
|
2021-10-14 00:30:44 +02:00
|
|
|
vol = (vols.get(source, None) or {}) if source else {}
|
2021-10-14 01:11:45 +02:00
|
|
|
name = vol.get('name', None)
|
2021-10-14 00:30:44 +02:00
|
|
|
mount_dict["_vol"] = vol
|
|
|
|
# handle anonymouse or implied volume
|
2019-08-14 17:22:36 +02:00
|
|
|
if not source:
|
|
|
|
# missing source
|
2021-10-14 00:30:44 +02:00
|
|
|
vol["name"] = "_".join([
|
2019-08-14 17:22:36 +02:00
|
|
|
proj_name, srv_name,
|
2020-06-15 19:50:52 +02:00
|
|
|
hashlib.sha256(mount_dict["target"].encode("utf-8")).hexdigest(),
|
2019-08-14 17:22:36 +02:00
|
|
|
])
|
2021-10-14 00:30:44 +02:00
|
|
|
elif not name:
|
2021-10-14 01:11:45 +02:00
|
|
|
external = vol.get("external", None)
|
|
|
|
ext_name = external.get("name", None) if isinstance(external, dict) else None
|
|
|
|
vol["name"] = ext_name if ext_name else f"{proj_name}_{source}"
|
2019-06-09 02:26:13 +02:00
|
|
|
return mount_dict
|
2019-05-09 22:16:40 +02:00
|
|
|
|
2019-03-24 00:08:26 +01:00
|
|
|
# docker and docker-compose support subset of bash variable substitution
|
|
|
|
# https://docs.docker.com/compose/compose-file/#variable-substitution
|
|
|
|
# https://docs.docker.com/compose/env-file/
|
|
|
|
# https://www.gnu.org/software/bash/manual/html_node/Shell-Parameter-Expansion.html
|
|
|
|
# $VARIABLE
|
|
|
|
# ${VARIABLE}
|
|
|
|
# ${VARIABLE:-default} default if not set or empty
|
|
|
|
# ${VARIABLE-default} default if not set
|
|
|
|
# ${VARIABLE:?err} raise error if not set or empty
|
|
|
|
# ${VARIABLE?err} raise error if not set
|
|
|
|
# $$ means $
|
|
|
|
|
2021-05-05 23:49:42 +02:00
|
|
|
var_re = re.compile(r"""
|
|
|
|
\$(?:
|
|
|
|
(?P<escaped>\$) |
|
|
|
|
(?P<named>[_a-zA-Z][_a-zA-Z0-9]*) |
|
|
|
|
(?:{
|
|
|
|
(?P<braced>[_a-zA-Z][_a-zA-Z0-9]*)
|
2021-12-29 12:46:45 +01:00
|
|
|
(?:(?P<empty>:)?(?:
|
2021-12-30 11:19:53 +01:00
|
|
|
(?:-(?P<default>[^}]*)) |
|
|
|
|
(?:\?(?P<err>[^}]*))
|
2021-12-29 12:46:45 +01:00
|
|
|
))?
|
2021-05-05 23:49:42 +02:00
|
|
|
})
|
|
|
|
)
|
|
|
|
""", re.VERBOSE)
|
2019-03-24 00:08:26 +01:00
|
|
|
|
2021-11-13 22:27:43 +01:00
|
|
|
def rec_subs(value, subs_dict):
|
2019-04-19 13:44:47 +02:00
|
|
|
"""
|
|
|
|
do bash-like substitution in value and if list of dictionary do that recursively
|
|
|
|
"""
|
2019-05-09 22:15:05 +02:00
|
|
|
if is_dict(value):
|
2021-11-13 22:27:43 +01:00
|
|
|
value = dict([(k, rec_subs(v, subs_dict)) for k, v in value.items()])
|
2019-05-09 22:15:05 +02:00
|
|
|
elif is_str(value):
|
2021-05-05 23:49:42 +02:00
|
|
|
def convert(m):
|
|
|
|
if m.group("escaped") is not None:
|
|
|
|
return "$"
|
|
|
|
name = m.group("named") or m.group("braced")
|
2021-11-13 22:27:43 +01:00
|
|
|
value = subs_dict.get(name)
|
2021-12-29 12:46:45 +01:00
|
|
|
if value == "" and m.group('empty'):
|
|
|
|
value = None
|
2021-11-13 22:27:43 +01:00
|
|
|
if value is not None:
|
|
|
|
return "%s" % value
|
2021-05-05 23:49:42 +02:00
|
|
|
if m.group("err") is not None:
|
|
|
|
raise RuntimeError(m.group("err"))
|
|
|
|
return m.group("default") or ""
|
|
|
|
value = var_re.sub(convert, value)
|
2019-05-05 13:33:07 +02:00
|
|
|
elif hasattr(value, "__iter__"):
|
2021-11-13 22:27:43 +01:00
|
|
|
value = [rec_subs(i, subs_dict) for i in value]
|
2019-03-24 00:08:26 +01:00
|
|
|
return value
|
|
|
|
|
2019-03-04 22:58:25 +01:00
|
|
|
def norm_as_list(src):
|
|
|
|
"""
|
|
|
|
given a dictionary {key1:value1, key2: None} or list
|
|
|
|
return a list of ["key1=value1", "key2"]
|
|
|
|
"""
|
|
|
|
if src is None:
|
2019-03-20 23:49:17 +01:00
|
|
|
dst = []
|
2019-05-09 22:15:05 +02:00
|
|
|
elif is_dict(src):
|
2020-10-08 13:41:34 +02:00
|
|
|
dst = [("{}={}".format(k, v) if v is not None else k) for k, v in src.items()]
|
2019-05-09 22:15:05 +02:00
|
|
|
elif is_list(src):
|
2019-03-20 23:49:17 +01:00
|
|
|
dst = list(src)
|
2019-03-04 22:58:25 +01:00
|
|
|
else:
|
2019-03-20 23:49:17 +01:00
|
|
|
dst = [src]
|
2019-03-04 22:58:25 +01:00
|
|
|
return dst
|
2019-03-04 10:30:14 +01:00
|
|
|
|
2019-03-23 20:42:04 +01:00
|
|
|
|
2019-03-04 22:58:25 +01:00
|
|
|
def norm_as_dict(src):
|
|
|
|
"""
|
|
|
|
given a list ["key1=value1", "key2"]
|
|
|
|
return a dictionary {key1:value1, key2: None}
|
|
|
|
"""
|
|
|
|
if src is None:
|
2019-03-20 23:49:17 +01:00
|
|
|
dst = {}
|
2019-05-09 22:15:05 +02:00
|
|
|
elif is_dict(src):
|
2019-03-20 23:49:17 +01:00
|
|
|
dst = dict(src)
|
2019-05-09 22:15:05 +02:00
|
|
|
elif is_list(src):
|
2019-03-23 20:42:04 +01:00
|
|
|
dst = [i.split("=", 1) for i in src if i]
|
2019-03-20 23:49:17 +01:00
|
|
|
dst = dict([(a if len(a) == 2 else (a[0], None)) for a in dst])
|
2019-09-08 01:20:48 +02:00
|
|
|
elif is_str(src):
|
|
|
|
key, value = src.split("=", 1) if "=" in src else (src, None)
|
|
|
|
dst = {key: value}
|
2019-03-04 22:58:25 +01:00
|
|
|
else:
|
|
|
|
raise ValueError("dictionary or iterable is expected")
|
|
|
|
return dst
|
|
|
|
|
2019-09-11 17:50:00 +02:00
|
|
|
def norm_ulimit(inner_value):
|
|
|
|
if is_dict(inner_value):
|
|
|
|
if not inner_value.keys() & {"soft", "hard"}:
|
|
|
|
raise ValueError("expected at least one soft or hard limit")
|
2020-04-18 17:39:59 +02:00
|
|
|
soft = inner_value.get("soft", inner_value.get("hard", None))
|
|
|
|
hard = inner_value.get("hard", inner_value.get("soft", None))
|
2019-09-11 17:50:00 +02:00
|
|
|
return "{}:{}".format(soft, hard)
|
|
|
|
elif is_list(inner_value): return norm_ulimit(norm_as_dict(inner_value))
|
|
|
|
# if int or string return as is
|
|
|
|
return inner_value
|
|
|
|
|
2021-11-21 00:23:29 +01:00
|
|
|
#def tr_identity(project_name, given_containers):
|
|
|
|
# pod_name = f'pod_{project_name}'
|
|
|
|
# pod = dict(name=pod_name)
|
|
|
|
# containers = []
|
|
|
|
# for cnt in given_containers:
|
|
|
|
# containers.append(dict(cnt, pod=pod_name))
|
|
|
|
# return [pod], containers
|
|
|
|
|
|
|
|
def tr_identity(project_name, given_containers):
|
2019-03-20 23:49:17 +01:00
|
|
|
containers = []
|
2019-03-04 10:30:14 +01:00
|
|
|
for cnt in given_containers:
|
2019-03-04 22:58:25 +01:00
|
|
|
containers.append(dict(cnt))
|
|
|
|
return [], containers
|
|
|
|
|
2019-03-23 20:42:04 +01:00
|
|
|
|
2020-04-18 17:39:59 +02:00
|
|
|
def assert_volume(compose, mount_dict):
|
2019-06-09 02:26:13 +02:00
|
|
|
"""
|
|
|
|
inspect volume to get directory
|
|
|
|
create volume if needed
|
|
|
|
"""
|
2021-10-14 00:30:44 +02:00
|
|
|
vol = mount_dict.get("_vol", None)
|
2021-12-10 21:45:49 +01:00
|
|
|
if mount_dict["type"] == "bind":
|
|
|
|
basedir = os.path.realpath(compose.dirname)
|
|
|
|
mount_src = mount_dict["source"]
|
2021-12-11 00:50:40 +01:00
|
|
|
mount_src = os.path.realpath(os.path.join(basedir, os.path.expanduser(mount_src)))
|
2021-12-10 21:45:49 +01:00
|
|
|
if not os.path.exists(mount_src):
|
|
|
|
try:
|
|
|
|
os.makedirs(mount_src, exist_ok=True)
|
|
|
|
except OSError:
|
|
|
|
pass
|
|
|
|
return
|
2021-10-14 00:30:44 +02:00
|
|
|
if mount_dict["type"] != "volume" or not vol or vol.get("external", None) or not vol.get("name", None): return
|
2019-08-10 21:19:16 +02:00
|
|
|
proj_name = compose.project_name
|
2021-10-14 00:30:44 +02:00
|
|
|
vol_name = vol["name"]
|
2021-12-24 17:55:30 +01:00
|
|
|
log("podman volume inspect {vol_name} || podman volume create {vol_name}".format(vol_name=vol_name))
|
2020-04-18 17:39:59 +02:00
|
|
|
# TODO: might move to using "volume list"
|
2019-06-11 16:03:24 +02:00
|
|
|
# podman volume list --format '{{.Name}}\t{{.MountPoint}}' -f 'label=io.podman.compose.project=HERE'
|
2021-05-17 14:03:47 +02:00
|
|
|
try: out = compose.podman.output([], "volume", ["inspect", vol_name]).decode('utf-8')
|
2019-06-09 02:26:13 +02:00
|
|
|
except subprocess.CalledProcessError:
|
2021-10-14 01:11:45 +02:00
|
|
|
labels = vol.get("labels", None) or []
|
|
|
|
args = [
|
|
|
|
"create",
|
|
|
|
"--label", "io.podman.compose.project={}".format(proj_name),
|
|
|
|
"--label", "com.docker.compose.project={}".format(proj_name),
|
|
|
|
]
|
|
|
|
for item in norm_as_list(labels):
|
|
|
|
args.extend(["--label", item])
|
2021-12-30 23:06:48 +01:00
|
|
|
driver = vol.get("driver", None)
|
2021-12-29 15:49:19 +01:00
|
|
|
if driver:
|
|
|
|
args.extend(["--driver", driver])
|
2021-12-30 23:06:48 +01:00
|
|
|
driver_opts = vol.get("driver_opts", None) or {}
|
2021-12-29 15:49:19 +01:00
|
|
|
for opt, value in driver_opts.items():
|
|
|
|
args.extend(["--opt", "{opt}={value}".format(opt=opt, value=value)])
|
2021-10-14 01:11:45 +02:00
|
|
|
args.append(vol_name)
|
|
|
|
compose.podman.output([], "volume", args)
|
2021-05-17 14:03:47 +02:00
|
|
|
out = compose.podman.output([], "volume", ["inspect", vol_name]).decode('utf-8')
|
2019-06-09 02:26:13 +02:00
|
|
|
|
2020-04-18 17:39:59 +02:00
|
|
|
def mount_desc_to_mount_args(compose, mount_desc, srv_name, cnt_name):
|
|
|
|
mount_type = mount_desc.get("type", None)
|
2021-10-14 00:30:44 +02:00
|
|
|
vol = mount_desc.get("_vol", None) if mount_type=="volume" else None
|
|
|
|
source = vol["name"] if vol else mount_desc.get("source", None)
|
2019-06-09 02:26:13 +02:00
|
|
|
target = mount_desc["target"]
|
2020-04-18 17:39:59 +02:00
|
|
|
opts = []
|
|
|
|
if mount_desc.get(mount_type, None):
|
|
|
|
# TODO: we might need to add mount_dict[mount_type]["propagation"] = "z"
|
|
|
|
mount_prop = mount_desc.get(mount_type, {}).get("propagation", None)
|
|
|
|
if mount_prop: opts.append("{}-propagation={}".format(mount_type, mount_prop))
|
2019-06-09 02:26:13 +02:00
|
|
|
if mount_desc.get("read_only", False): opts.append("ro")
|
2020-04-18 17:39:59 +02:00
|
|
|
if mount_type == 'tmpfs':
|
2019-06-09 02:26:13 +02:00
|
|
|
tmpfs_opts = mount_desc.get("tmpfs", {})
|
2020-04-18 17:39:59 +02:00
|
|
|
tmpfs_size = tmpfs_opts.get("size", None)
|
2019-06-09 02:26:13 +02:00
|
|
|
if tmpfs_size:
|
|
|
|
opts.append("tmpfs-size={}".format(tmpfs_size))
|
2020-04-18 17:39:59 +02:00
|
|
|
tmpfs_mode = tmpfs_opts.get("mode", None)
|
2019-06-09 02:26:13 +02:00
|
|
|
if tmpfs_mode:
|
|
|
|
opts.append("tmpfs-mode={}".format(tmpfs_mode))
|
2020-04-18 17:39:59 +02:00
|
|
|
opts = ",".join(opts)
|
|
|
|
if mount_type == 'bind':
|
2019-06-09 02:26:13 +02:00
|
|
|
return "type=bind,source={source},destination={target},{opts}".format(
|
|
|
|
source=source,
|
|
|
|
target=target,
|
|
|
|
opts=opts
|
|
|
|
).rstrip(",")
|
2020-04-18 17:39:59 +02:00
|
|
|
elif mount_type == 'volume':
|
|
|
|
return "type=volume,source={source},destination={target},{opts}".format(
|
|
|
|
source=source,
|
|
|
|
target=target,
|
|
|
|
opts=opts
|
|
|
|
).rstrip(",")
|
|
|
|
elif mount_type == 'tmpfs':
|
2019-06-09 02:26:13 +02:00
|
|
|
return "type=tmpfs,destination={target},{opts}".format(
|
|
|
|
target=target,
|
|
|
|
opts=opts
|
|
|
|
).rstrip(",")
|
|
|
|
else:
|
|
|
|
raise ValueError("unknown mount type:"+mount_type)
|
|
|
|
|
2020-02-27 10:30:53 +01:00
|
|
|
def container_to_ulimit_args(cnt, podman_args):
|
|
|
|
ulimit = cnt.get('ulimits', [])
|
|
|
|
if ulimit is not None:
|
|
|
|
# ulimit can be a single value, i.e. ulimit: host
|
|
|
|
if is_str(ulimit):
|
|
|
|
podman_args.extend(['--ulimit', ulimit])
|
|
|
|
# or a dictionary or list:
|
|
|
|
else:
|
|
|
|
ulimit = norm_as_dict(ulimit)
|
|
|
|
ulimit = [ "{}={}".format(ulimit_key, norm_ulimit(inner_value)) for ulimit_key, inner_value in ulimit.items()]
|
|
|
|
for i in ulimit:
|
|
|
|
podman_args.extend(['--ulimit', i])
|
|
|
|
|
2020-04-18 17:39:59 +02:00
|
|
|
def mount_desc_to_volume_args(compose, mount_desc, srv_name, cnt_name):
|
|
|
|
mount_type = mount_desc["type"]
|
|
|
|
if mount_type != 'bind' and mount_type != 'volume':
|
|
|
|
raise ValueError("unknown mount type:"+mount_type)
|
2021-10-14 00:30:44 +02:00
|
|
|
vol = mount_desc.get("_vol", None) if mount_type=="volume" else None
|
|
|
|
source = vol["name"] if vol else mount_desc.get("source", None)
|
|
|
|
if not source:
|
|
|
|
raise ValueError(f"missing mount source for {mount_type} on {srv_name}")
|
|
|
|
target = mount_desc["target"]
|
|
|
|
opts = []
|
|
|
|
|
2020-04-18 17:39:59 +02:00
|
|
|
propagations = set(filteri(mount_desc.get(mount_type, {}).get("propagation", "").split(',')))
|
|
|
|
if mount_type != 'bind':
|
|
|
|
propagations.update(filteri(mount_desc.get('bind', {}).get("propagation", "").split(',')))
|
|
|
|
opts.extend(propagations)
|
|
|
|
# --volume, -v[=[[SOURCE-VOLUME|HOST-DIR:]CONTAINER-DIR[:OPTIONS]]]
|
|
|
|
# [rw|ro]
|
|
|
|
# [z|Z]
|
2021-12-21 21:13:40 +01:00
|
|
|
# [[r]shared|[r]slave|[r]private]|[r]unbindable
|
2020-04-18 17:39:59 +02:00
|
|
|
# [[r]bind]
|
|
|
|
# [noexec|exec]
|
|
|
|
# [nodev|dev]
|
|
|
|
# [nosuid|suid]
|
2021-12-21 21:13:40 +01:00
|
|
|
# [O]
|
|
|
|
# [U]
|
2020-04-18 17:39:59 +02:00
|
|
|
read_only = mount_desc.get("read_only", None)
|
|
|
|
if read_only is not None:
|
|
|
|
opts.append('ro' if read_only else 'rw')
|
|
|
|
args = f'{source}:{target}'
|
|
|
|
if opts: args += ':' + ','.join(opts)
|
|
|
|
return args
|
|
|
|
|
2022-01-21 23:15:05 +01:00
|
|
|
def get_mnt_dict(compose, cnt, volume):
|
2020-04-18 17:39:59 +02:00
|
|
|
proj_name = compose.project_name
|
|
|
|
srv_name = cnt['_service']
|
|
|
|
basedir = compose.dirname
|
2022-01-21 23:15:05 +01:00
|
|
|
if is_str(volume):
|
|
|
|
volume = parse_short_mount(volume, basedir)
|
|
|
|
return fix_mount_dict(compose, volume, proj_name, srv_name)
|
2020-12-21 11:24:17 +01:00
|
|
|
|
2022-01-21 23:15:05 +01:00
|
|
|
def get_mount_args(compose, cnt, volume):
|
|
|
|
volume = get_mnt_dict(compose, cnt, volume)
|
|
|
|
proj_name = compose.project_name
|
|
|
|
srv_name = cnt['_service']
|
|
|
|
mount_type = volume["type"]
|
|
|
|
assert_volume(compose, volume)
|
2020-04-18 17:39:59 +02:00
|
|
|
if compose._prefer_volume_over_mount:
|
|
|
|
if mount_type == 'tmpfs':
|
|
|
|
# TODO: --tmpfs /tmp:rw,size=787448k,mode=1777
|
|
|
|
args = volume['target']
|
|
|
|
tmpfs_opts = volume.get("tmpfs", {})
|
|
|
|
opts = []
|
|
|
|
size = tmpfs_opts.get("size", None)
|
|
|
|
if size: opts.append('size={}'.format(size))
|
|
|
|
mode = tmpfs_opts.get("mode", None)
|
2021-01-29 04:23:17 +01:00
|
|
|
if mode: opts.append('mode={}'.format(mode))
|
2020-04-18 17:39:59 +02:00
|
|
|
if opts: args += ':' + ','.join(opts)
|
|
|
|
return ['--tmpfs', args]
|
|
|
|
else:
|
|
|
|
args = mount_desc_to_volume_args(compose, volume, srv_name, cnt['name'])
|
|
|
|
return ['-v', args]
|
|
|
|
else:
|
|
|
|
args = mount_desc_to_mount_args(compose, volume, srv_name, cnt['name'])
|
|
|
|
return ['--mount', args]
|
2020-02-27 10:30:53 +01:00
|
|
|
|
2021-07-21 18:22:07 +02:00
|
|
|
|
|
|
|
def get_secret_args(compose, cnt, secret):
|
|
|
|
secret_name = secret if is_str(secret) else secret.get('source', None)
|
|
|
|
if not secret_name or secret_name not in compose.declared_secrets.keys():
|
|
|
|
raise ValueError(
|
|
|
|
'ERROR: undeclared secret: "{}", service: "{}"'
|
|
|
|
.format(secret, cnt['_service'])
|
|
|
|
)
|
|
|
|
declared_secret = compose.declared_secrets[secret_name]
|
|
|
|
|
|
|
|
source_file = declared_secret.get('file', None)
|
|
|
|
dest_file = ''
|
|
|
|
secret_opts = ''
|
|
|
|
|
|
|
|
target = None if is_str(secret) else secret.get('target', None)
|
|
|
|
uid = None if is_str(secret) else secret.get('uid', None)
|
|
|
|
gid = None if is_str(secret) else secret.get('gid', None)
|
|
|
|
mode = None if is_str(secret) else secret.get('mode', None)
|
|
|
|
|
|
|
|
if source_file:
|
|
|
|
if not target:
|
|
|
|
dest_file = '/run/secrets/{}'.format(secret_name)
|
|
|
|
elif not target.startswith("/"):
|
|
|
|
dest_file = '/run/secrets/{}'.format(target if target else secret_name)
|
|
|
|
else:
|
|
|
|
dest_file = target
|
|
|
|
volume_ref = [
|
|
|
|
'--volume', '{}:{}:ro,rprivate,rbind'.format(source_file, dest_file)
|
|
|
|
]
|
|
|
|
if uid or gid or mode:
|
2021-12-24 17:55:30 +01:00
|
|
|
log(
|
2021-07-21 18:22:07 +02:00
|
|
|
'WARNING: Service "{}" uses secret "{}" with uid, gid, or mode.'
|
|
|
|
.format(cnt['_service'], target if target else secret_name)
|
|
|
|
+ ' These fields are not supported by this implementation of the Compose file'
|
|
|
|
)
|
|
|
|
return volume_ref
|
|
|
|
# v3.5 and up added external flag, earlier the spec
|
|
|
|
# only required a name to be specified.
|
|
|
|
# docker-compose does not support external secrets outside of swarm mode.
|
|
|
|
# However accessing these via podman is trivial
|
|
|
|
# since these commands are directly translated to
|
|
|
|
# podman-create commands, albiet we can only support a 1:1 mapping
|
|
|
|
# at the moment
|
|
|
|
if declared_secret.get('external', False) or declared_secret.get('name', None):
|
|
|
|
secret_opts += ',uid={}'.format(uid) if uid else ''
|
|
|
|
secret_opts += ',gid={}'.format(gid) if gid else ''
|
|
|
|
secret_opts += ',mode={}'.format(mode) if mode else ''
|
|
|
|
# The target option is only valid for type=env,
|
|
|
|
# which in an ideal world would work
|
|
|
|
# for type=mount as well.
|
|
|
|
# having a custom name for the external secret
|
|
|
|
# has the same problem as well
|
|
|
|
ext_name = declared_secret.get('name', None)
|
|
|
|
err_str = 'ERROR: Custom name/target reference "{}" for mounted external secret "{}" is not supported'
|
|
|
|
if ext_name and ext_name != secret_name:
|
|
|
|
raise ValueError(err_str.format(secret_name, ext_name))
|
|
|
|
elif target and target != secret_name:
|
|
|
|
raise ValueError(err_str.format(target, secret_name))
|
|
|
|
elif target:
|
2021-12-24 17:55:30 +01:00
|
|
|
log('WARNING: Service "{}" uses target: "{}" for secret: "{}".'
|
2021-07-21 18:22:07 +02:00
|
|
|
.format(cnt['_service'], target, secret_name)
|
|
|
|
+ ' That is un-supported and a no-op and is ignored.')
|
|
|
|
return [ '--secret', '{}{}'.format(secret_name, secret_opts) ]
|
|
|
|
|
|
|
|
raise ValueError('ERROR: unparseable secret: "{}", service: "{}"'
|
|
|
|
.format(secret_name, cnt['_service']))
|
|
|
|
|
|
|
|
|
2021-06-22 22:30:22 +02:00
|
|
|
def container_to_res_args(cnt, podman_args):
|
|
|
|
# v2 < https://docs.docker.com/compose/compose-file/compose-file-v2/#cpu-and-other-resources
|
2021-06-22 22:48:05 +02:00
|
|
|
cpus_limit_v2 = try_float(cnt.get('cpus', None), None)
|
2021-08-18 10:32:01 +02:00
|
|
|
cpu_shares_v2 = try_int(cnt.get('cpu_shares', None), None)
|
2021-06-22 22:30:22 +02:00
|
|
|
mem_limit_v2 = cnt.get('mem_limit', None)
|
|
|
|
mem_res_v2 = cnt.get('mem_reservation', None)
|
|
|
|
# v3 < https://docs.docker.com/compose/compose-file/compose-file-v3/#resources
|
|
|
|
# spec < https://github.com/compose-spec/compose-spec/blob/master/deploy.md#resources
|
|
|
|
deploy = cnt.get('deploy', None) or {}
|
|
|
|
res = deploy.get('resources', None) or {}
|
|
|
|
limits = res.get('limits', None) or {}
|
|
|
|
cpus_limit_v3 = try_float(limits.get('cpus', None), None)
|
|
|
|
mem_limit_v3 = limits.get('memory', None)
|
|
|
|
reservations = res.get('reservations', None) or {}
|
|
|
|
#cpus_res_v3 = try_float(reservations.get('cpus', None), None)
|
|
|
|
mem_res_v3 = reservations.get('memory', None)
|
|
|
|
# add args
|
2021-06-22 22:48:05 +02:00
|
|
|
cpus = cpus_limit_v3 or cpus_limit_v2
|
|
|
|
if cpus:
|
2021-06-22 22:52:24 +02:00
|
|
|
podman_args.extend(('--cpus', str(cpus),))
|
2021-08-18 10:32:01 +02:00
|
|
|
if cpu_shares_v2:
|
2021-08-18 10:42:52 +02:00
|
|
|
podman_args.extend(('--cpu-shares', str(cpu_shares_v2),))
|
2021-06-22 22:30:22 +02:00
|
|
|
mem = mem_limit_v3 or mem_limit_v2
|
2021-06-22 22:48:05 +02:00
|
|
|
if mem:
|
2021-06-22 22:52:24 +02:00
|
|
|
podman_args.extend(('-m', str(mem).lower(),))
|
2021-06-22 22:30:22 +02:00
|
|
|
mem_res = mem_res_v3 or mem_res_v2
|
2021-06-22 22:48:05 +02:00
|
|
|
if mem_res:
|
2021-06-22 22:52:24 +02:00
|
|
|
podman_args.extend(('--memory-reservation', str(mem_res).lower(),))
|
2021-06-22 22:30:22 +02:00
|
|
|
|
2021-10-09 23:43:01 +02:00
|
|
|
def port_dict_to_str(port_desc):
|
|
|
|
# NOTE: `mode: host|ingress` is ignored
|
|
|
|
cnt_port = port_desc.get("target", None)
|
|
|
|
published = port_desc.get("published", None) or ""
|
|
|
|
host_ip = port_desc.get("host_ip", None)
|
|
|
|
protocol = port_desc.get("protocol", None) or "tcp"
|
|
|
|
if not cnt_port:
|
|
|
|
raise ValueError("target container port must be specified")
|
|
|
|
if host_ip:
|
|
|
|
ret = f"{host_ip}:{published}:{cnt_port}"
|
|
|
|
else:
|
|
|
|
ret = f"{published}:{cnt_port}" if published else f"{cnt_port}"
|
|
|
|
if protocol!="tcp":
|
|
|
|
ret+= f"/{protocol}"
|
|
|
|
return ret
|
|
|
|
|
|
|
|
def norm_ports(ports_in):
|
|
|
|
if not ports_in:
|
|
|
|
ports_in = []
|
|
|
|
if isinstance(ports_in, str):
|
|
|
|
ports_in = [ports_in]
|
|
|
|
ports_out = []
|
|
|
|
for port in ports_in:
|
|
|
|
if isinstance(port, dict):
|
|
|
|
port = port_dict_to_str(port)
|
|
|
|
elif not isinstance(port, str):
|
|
|
|
raise TypeError("port should be either string or dict")
|
|
|
|
ports_out.append(port)
|
|
|
|
return ports_out
|
|
|
|
|
2021-11-21 00:23:29 +01:00
|
|
|
def assert_cnt_nets(compose, cnt):
|
|
|
|
"""
|
|
|
|
create missing networks
|
|
|
|
"""
|
2022-01-18 19:40:29 +01:00
|
|
|
net = cnt.get("network_mode", None)
|
|
|
|
if net and not net.startswith("bridge"):
|
|
|
|
return
|
2021-11-21 00:23:29 +01:00
|
|
|
proj_name = compose.project_name
|
|
|
|
nets = compose.networks
|
|
|
|
default_net = compose.default_net
|
2021-12-25 20:06:29 +01:00
|
|
|
cnt_nets = cnt.get("networks", None)
|
|
|
|
if cnt_nets and is_dict(cnt_nets):
|
|
|
|
cnt_nets = list(cnt_nets.keys())
|
|
|
|
cnt_nets = norm_as_list(cnt_nets or default_net)
|
2021-11-21 00:23:29 +01:00
|
|
|
for net in cnt_nets:
|
|
|
|
net_desc = nets[net] or {}
|
2021-12-12 23:21:53 +01:00
|
|
|
is_ext = net_desc.get("external", None)
|
2021-12-13 00:21:34 +01:00
|
|
|
ext_desc = is_ext if is_dict(is_ext) else {}
|
2021-12-12 23:21:53 +01:00
|
|
|
default_net_name = net if is_ext else f"{proj_name}_{net}"
|
2021-12-13 00:21:34 +01:00
|
|
|
net_name = ext_desc.get("name", None) or net_desc.get("name", None) or default_net_name
|
2021-11-21 00:23:29 +01:00
|
|
|
try: compose.podman.output([], "network", ["exists", net_name])
|
|
|
|
except subprocess.CalledProcessError:
|
2021-12-12 23:21:53 +01:00
|
|
|
if is_ext:
|
|
|
|
raise RuntimeError(f"External network [{net_name}] does not exists")
|
2021-11-21 00:23:29 +01:00
|
|
|
args = [
|
|
|
|
"create",
|
|
|
|
"--label", "io.podman.compose.project={}".format(proj_name),
|
|
|
|
"--label", "com.docker.compose.project={}".format(proj_name),
|
|
|
|
]
|
|
|
|
# TODO: add more options here, like driver, internal, ..etc
|
|
|
|
labels = net_desc.get("labels", None) or []
|
|
|
|
for item in norm_as_list(labels):
|
|
|
|
args.extend(["--label", item])
|
2021-12-12 23:21:53 +01:00
|
|
|
if net_desc.get("internal", None):
|
|
|
|
args.append("--internal")
|
2022-01-12 20:48:42 +01:00
|
|
|
driver = net_desc.get("driver", None)
|
|
|
|
if driver:
|
|
|
|
args.extend(("--driver", driver))
|
|
|
|
ipam_config_ls = (net_desc.get("ipam", None) or {}).get("config", None) or []
|
|
|
|
if is_dict(ipam_config_ls):
|
|
|
|
ipam_config_ls=[ipam_config_ls]
|
|
|
|
for ipam in ipam_config_ls:
|
|
|
|
subnet = ipam.get("subnet", None)
|
|
|
|
ip_range = ipam.get("ip_range", None)
|
|
|
|
gateway = ipam.get("gateway", None)
|
|
|
|
if subnet: args.extend(("--subnet", subnet))
|
|
|
|
if ip_range: args.extend(("--ip-range", ip_range))
|
|
|
|
if gateway: args.extend(("--gateway", gateway))
|
2021-12-13 02:25:17 +01:00
|
|
|
args.append(net_name)
|
2021-11-21 00:23:29 +01:00
|
|
|
compose.podman.output([], "network", args)
|
|
|
|
compose.podman.output([], "network", ["exists", net_name])
|
|
|
|
|
|
|
|
def get_net_args(compose, cnt):
|
2021-11-21 11:35:13 +01:00
|
|
|
service_name = cnt["service_name"]
|
2022-01-18 19:40:29 +01:00
|
|
|
net = cnt.get("network_mode", None)
|
|
|
|
if net:
|
|
|
|
if net=="host":
|
|
|
|
return ['--network', net]
|
|
|
|
if net.startswith("service:"):
|
|
|
|
other_srv = net.split(":", 1)[1].strip()
|
|
|
|
other_cnt = compose.container_names_by_service[other_srv][0]
|
|
|
|
return ['--network', f"container:{other_cnt}"]
|
2022-01-29 23:45:25 +01:00
|
|
|
if net.startswith("container:"):
|
|
|
|
other_cnt = net.split(":",1)[1].strip()
|
|
|
|
return ['--network', f"container:{other_cnt}"]
|
2021-12-12 23:24:23 +01:00
|
|
|
proj_name = compose.project_name
|
2021-11-21 00:23:29 +01:00
|
|
|
default_net = compose.default_net
|
|
|
|
nets = compose.networks
|
2021-12-25 20:06:29 +01:00
|
|
|
cnt_nets = cnt.get("networks", None)
|
|
|
|
aliases = [service_name]
|
|
|
|
# NOTE: from podman manpage:
|
|
|
|
# NOTE: A container will only have access to aliases on the first network that it joins. This is a limitation that will be removed in a later release.
|
2022-01-12 20:48:42 +01:00
|
|
|
ip = None
|
2021-12-25 20:06:29 +01:00
|
|
|
if cnt_nets and is_dict(cnt_nets):
|
|
|
|
for net_key, net_value in cnt_nets.items():
|
|
|
|
aliases.extend(norm_as_list(net_value.get("aliases", None)))
|
2022-01-12 20:48:42 +01:00
|
|
|
if ip: continue
|
|
|
|
ip = net_value.get("ipv4_address", None)
|
2021-12-25 20:06:29 +01:00
|
|
|
cnt_nets = list(cnt_nets.keys())
|
|
|
|
cnt_nets = norm_as_list(cnt_nets or default_net)
|
2021-11-21 00:23:29 +01:00
|
|
|
net_names = set()
|
|
|
|
for net in cnt_nets:
|
|
|
|
net_desc = nets[net] or {}
|
2021-12-12 23:24:23 +01:00
|
|
|
is_ext = net_desc.get("external", None)
|
2021-12-13 00:21:34 +01:00
|
|
|
ext_desc = is_ext if is_dict(is_ext) else {}
|
2021-12-12 23:24:23 +01:00
|
|
|
default_net_name = net if is_ext else f"{proj_name}_{net}"
|
2021-12-13 00:21:34 +01:00
|
|
|
net_name = ext_desc.get("name", None) or net_desc.get("name", None) or default_net_name
|
2021-11-21 00:23:29 +01:00
|
|
|
net_names.add(net_name)
|
|
|
|
net_names_str = ",".join(net_names)
|
2022-01-12 20:48:42 +01:00
|
|
|
net_args = ["--net", net_names_str, "--network-alias", ",".join(aliases)]
|
|
|
|
if ip:
|
|
|
|
net_args.append(f"--ip={ip}")
|
|
|
|
return net_args
|
|
|
|
|
2021-11-21 00:23:29 +01:00
|
|
|
|
2021-05-17 14:03:47 +02:00
|
|
|
def container_to_args(compose, cnt, detached=True):
|
2019-08-17 22:39:42 +02:00
|
|
|
# TODO: double check -e , --add-host, -v, --read-only
|
2019-08-09 15:31:56 +02:00
|
|
|
dirname = compose.dirname
|
2020-04-18 17:39:59 +02:00
|
|
|
pod = cnt.get('pod', None) or ''
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_args = [
|
2020-04-18 17:39:59 +02:00
|
|
|
'--name={}'.format(cnt.get('name', None)),
|
2019-03-04 10:30:14 +01:00
|
|
|
]
|
2019-03-23 20:42:04 +01:00
|
|
|
|
2019-08-10 13:11:28 +02:00
|
|
|
if detached:
|
|
|
|
podman_args.append("-d")
|
|
|
|
|
2019-03-04 23:04:53 +01:00
|
|
|
if pod:
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_args.append('--pod={}'.format(pod))
|
2020-04-18 17:39:59 +02:00
|
|
|
sec = norm_as_list(cnt.get("security_opt", None))
|
2019-06-27 10:41:27 +02:00
|
|
|
for s in sec:
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_args.extend(['--security-opt', s])
|
2021-02-20 18:21:32 +01:00
|
|
|
ann = norm_as_list(cnt.get("annotations", None))
|
|
|
|
for a in ann:
|
|
|
|
podman_args.extend(['--annotation', a])
|
2020-04-18 17:39:59 +02:00
|
|
|
if cnt.get('read_only', None):
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_args.append('--read-only')
|
2019-03-04 10:30:14 +01:00
|
|
|
for i in cnt.get('labels', []):
|
2019-10-01 12:06:50 +02:00
|
|
|
podman_args.extend(['--label', i])
|
2019-11-30 01:45:35 +01:00
|
|
|
for c in cnt.get('cap_add', []):
|
|
|
|
podman_args.extend(['--cap-add', c])
|
|
|
|
for c in cnt.get('cap_drop', []):
|
|
|
|
podman_args.extend(['--cap-drop', c])
|
2019-09-13 21:11:57 +02:00
|
|
|
for d in cnt.get('devices', []):
|
|
|
|
podman_args.extend(['--device', d])
|
2020-09-09 16:19:07 +02:00
|
|
|
env_file = cnt.get('env_file', [])
|
|
|
|
if is_str(env_file): env_file = [env_file]
|
|
|
|
for i in env_file:
|
2019-03-20 23:49:17 +01:00
|
|
|
i = os.path.realpath(os.path.join(dirname, i))
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_args.extend(['--env-file', i])
|
2020-10-08 13:34:02 +02:00
|
|
|
env = norm_as_list(cnt.get('environment', {}))
|
|
|
|
for e in env:
|
|
|
|
podman_args.extend(['-e', e])
|
2019-06-11 16:03:24 +02:00
|
|
|
tmpfs_ls = cnt.get('tmpfs', [])
|
2020-04-18 17:39:59 +02:00
|
|
|
if is_str(tmpfs_ls): tmpfs_ls = [tmpfs_ls]
|
2019-06-11 16:03:24 +02:00
|
|
|
for i in tmpfs_ls:
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_args.extend(['--tmpfs', i])
|
|
|
|
for volume in cnt.get('volumes', []):
|
2020-04-18 17:39:59 +02:00
|
|
|
podman_args.extend(get_mount_args(compose, cnt, volume))
|
2022-01-12 14:44:47 +01:00
|
|
|
|
2022-01-18 19:40:29 +01:00
|
|
|
assert_cnt_nets(compose, cnt)
|
|
|
|
podman_args.extend(get_net_args(compose, cnt))
|
2022-01-12 14:44:47 +01:00
|
|
|
|
2021-07-27 11:25:01 +02:00
|
|
|
log = cnt.get('logging')
|
|
|
|
if log is not None:
|
|
|
|
podman_args.append(f'--log-driver={log.get("driver", "k8s-file")}')
|
|
|
|
log_opts = log.get('options') or {}
|
|
|
|
podman_args += [f'--log-opt={name}={value}' for name, value in log_opts.items()]
|
2021-07-21 18:22:07 +02:00
|
|
|
for secret in cnt.get('secrets', []):
|
|
|
|
podman_args.extend(get_secret_args(compose, cnt, secret))
|
2019-03-04 10:30:14 +01:00
|
|
|
for i in cnt.get('extra_hosts', []):
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_args.extend(['--add-host', i])
|
2019-03-04 10:30:14 +01:00
|
|
|
for i in cnt.get('expose', []):
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_args.extend(['--expose', i])
|
2020-04-18 17:39:59 +02:00
|
|
|
if cnt.get('publishall', None):
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_args.append('-P')
|
2021-08-26 11:37:14 +02:00
|
|
|
ports = cnt.get('ports', None) or []
|
|
|
|
if isinstance(ports, str):
|
|
|
|
ports = [ports]
|
2021-10-09 23:43:01 +02:00
|
|
|
for port in ports:
|
|
|
|
if isinstance(port, dict):
|
|
|
|
port = port_dict_to_str(port)
|
|
|
|
elif not isinstance(port, str):
|
|
|
|
raise TypeError("port should be either string or dict")
|
|
|
|
podman_args.extend(['-p', port])
|
2021-11-21 00:23:29 +01:00
|
|
|
|
2020-04-18 17:39:59 +02:00
|
|
|
user = cnt.get('user', None)
|
2019-03-04 10:30:14 +01:00
|
|
|
if user is not None:
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_args.extend(['-u', user])
|
2020-04-18 17:39:59 +02:00
|
|
|
if cnt.get('working_dir', None) is not None:
|
|
|
|
podman_args.extend(['-w', cnt['working_dir']])
|
|
|
|
if cnt.get('hostname', None):
|
|
|
|
podman_args.extend(['--hostname', cnt['hostname']])
|
|
|
|
if cnt.get('shm_size', None):
|
2020-04-21 11:39:08 +02:00
|
|
|
podman_args.extend(['--shm-size', '{}'.format(cnt['shm_size'])])
|
2020-04-18 17:39:59 +02:00
|
|
|
if cnt.get('stdin_open', None):
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_args.append('-i')
|
2021-05-18 19:47:25 +02:00
|
|
|
if cnt.get('stop_signal', None):
|
|
|
|
podman_args.extend(['--stop-signal', cnt['stop_signal']])
|
2020-12-24 23:13:52 +01:00
|
|
|
for i in cnt.get('sysctls', []):
|
|
|
|
podman_args.extend(['--sysctl', i])
|
2020-04-18 17:39:59 +02:00
|
|
|
if cnt.get('tty', None):
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_args.append('--tty')
|
2020-04-18 17:39:59 +02:00
|
|
|
if cnt.get('privileged', None):
|
2019-11-17 08:09:41 +01:00
|
|
|
podman_args.append('--privileged')
|
2021-11-16 10:12:08 +01:00
|
|
|
pull_policy = cnt.get('pull_policy', None)
|
|
|
|
if pull_policy is not None and pull_policy!='build':
|
|
|
|
podman_args.extend(['--pull', pull_policy])
|
2020-05-22 14:37:29 +02:00
|
|
|
if cnt.get('restart', None) is not None:
|
2020-05-22 14:52:49 +02:00
|
|
|
podman_args.extend(['--restart', cnt['restart']])
|
2020-02-27 10:30:53 +01:00
|
|
|
container_to_ulimit_args(cnt, podman_args)
|
2021-06-22 22:30:22 +02:00
|
|
|
container_to_res_args(cnt, podman_args)
|
2019-03-04 10:30:14 +01:00
|
|
|
# currently podman shipped by fedora does not package this
|
2020-05-22 22:52:48 +02:00
|
|
|
if cnt.get('init', None):
|
|
|
|
podman_args.append('--init')
|
|
|
|
if cnt.get('init-path', None):
|
|
|
|
podman_args.extend(['--init-path', cnt['init-path']])
|
2020-04-18 17:39:59 +02:00
|
|
|
entrypoint = cnt.get('entrypoint', None)
|
2019-03-04 10:30:14 +01:00
|
|
|
if entrypoint is not None:
|
2019-05-09 22:15:05 +02:00
|
|
|
if is_str(entrypoint):
|
2021-10-24 23:14:19 +02:00
|
|
|
entrypoint = shlex.split(entrypoint)
|
|
|
|
podman_args.extend(['--entrypoint', json.dumps(entrypoint)])
|
2019-07-08 13:47:27 +02:00
|
|
|
|
2019-07-08 22:53:38 +02:00
|
|
|
# WIP: healthchecks are still work in progress
|
|
|
|
healthcheck = cnt.get('healthcheck', None) or {}
|
|
|
|
if not is_dict(healthcheck):
|
|
|
|
raise ValueError("'healthcheck' must be an key-value mapping")
|
2020-04-18 17:39:59 +02:00
|
|
|
healthcheck_test = healthcheck.get('test', None)
|
2019-07-08 22:53:38 +02:00
|
|
|
if healthcheck_test:
|
2019-08-10 17:08:21 +02:00
|
|
|
# If it's a string, it's equivalent to specifying CMD-SHELL
|
2019-07-08 22:53:38 +02:00
|
|
|
if is_str(healthcheck_test):
|
|
|
|
# podman does not add shell to handle command with whitespace
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_args.extend(['--healthcheck-command', '/bin/sh -c {}'.format(cmd_quote(healthcheck_test))])
|
2019-07-08 22:53:38 +02:00
|
|
|
elif is_list(healthcheck_test):
|
2022-01-15 04:51:30 +01:00
|
|
|
healthcheck_test = healthcheck_test.copy()
|
2019-08-10 17:08:21 +02:00
|
|
|
# If it's a list, first item is either NONE, CMD or CMD-SHELL.
|
2019-07-08 22:53:38 +02:00
|
|
|
healthcheck_type = healthcheck_test.pop(0)
|
|
|
|
if healthcheck_type == 'NONE':
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_args.append("--no-healthcheck")
|
2019-07-08 22:53:38 +02:00
|
|
|
elif healthcheck_type == 'CMD':
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_args.extend(['--healthcheck-command', '/bin/sh -c {}'.format(
|
2019-07-08 22:53:38 +02:00
|
|
|
"' '".join([cmd_quote(i) for i in healthcheck_test])
|
|
|
|
)])
|
|
|
|
elif healthcheck_type == 'CMD-SHELL':
|
|
|
|
if len(healthcheck_test)!=1:
|
|
|
|
raise ValueError("'CMD_SHELL' takes a single string after it")
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_args.extend(['--healthcheck-command', '/bin/sh -c {}'.format(cmd_quote(healthcheck_test[0]))])
|
2019-07-08 22:53:38 +02:00
|
|
|
else:
|
|
|
|
raise ValueError(
|
|
|
|
"unknown healthcheck test type [{}],\
|
|
|
|
expecting NONE, CMD or CMD-SHELL."
|
|
|
|
.format(healthcheck_type)
|
|
|
|
)
|
2019-07-08 13:47:27 +02:00
|
|
|
else:
|
2019-07-08 22:53:38 +02:00
|
|
|
raise ValueError("'healthcheck.test' either a string or a list")
|
|
|
|
|
|
|
|
# interval, timeout and start_period are specified as durations.
|
|
|
|
if 'interval' in healthcheck:
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_args.extend(['--healthcheck-interval', healthcheck['interval']])
|
2019-07-08 22:53:38 +02:00
|
|
|
if 'timeout' in healthcheck:
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_args.extend(['--healthcheck-timeout', healthcheck['timeout']])
|
2019-07-08 22:53:38 +02:00
|
|
|
if 'start_period' in healthcheck:
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_args.extend(['--healthcheck-start-period', healthcheck['start_period']])
|
2019-07-08 22:53:38 +02:00
|
|
|
|
|
|
|
# convert other parameters to string
|
|
|
|
if 'retries' in healthcheck:
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_args.extend(['--healthcheck-retries', '{}'.format(healthcheck['retries'])])
|
2019-07-08 13:47:27 +02:00
|
|
|
|
2020-04-18 17:39:59 +02:00
|
|
|
podman_args.append(cnt['image']) # command, ..etc.
|
|
|
|
command = cnt.get('command', None)
|
2019-03-04 10:30:14 +01:00
|
|
|
if command is not None:
|
2019-07-08 08:12:25 +02:00
|
|
|
if is_str(command):
|
2019-10-04 19:36:30 +02:00
|
|
|
podman_args.extend(shlex.split(command))
|
2019-07-08 08:12:25 +02:00
|
|
|
else:
|
2021-11-11 16:47:26 +01:00
|
|
|
podman_args.extend([str(i) for i in command])
|
2019-08-09 15:31:56 +02:00
|
|
|
return podman_args
|
2019-03-04 10:30:14 +01:00
|
|
|
|
2019-10-05 21:37:14 +02:00
|
|
|
def rec_deps(services, service_name, start_point=None):
|
|
|
|
"""
|
|
|
|
return all dependencies of service_name recursively
|
|
|
|
"""
|
|
|
|
if not start_point:
|
|
|
|
start_point = service_name
|
|
|
|
deps = services[service_name]["_deps"]
|
|
|
|
for dep_name in deps.copy():
|
2019-11-07 18:09:32 +01:00
|
|
|
# avoid A depens on A
|
2020-04-18 17:39:59 +02:00
|
|
|
if dep_name == service_name:
|
2019-11-07 18:09:32 +01:00
|
|
|
continue
|
2020-04-18 17:39:59 +02:00
|
|
|
dep_srv = services.get(dep_name, None)
|
2019-10-05 21:37:14 +02:00
|
|
|
if not dep_srv:
|
2019-03-23 20:42:04 +01:00
|
|
|
continue
|
2019-10-05 21:37:14 +02:00
|
|
|
# NOTE: avoid creating loops, A->B->A
|
|
|
|
if start_point and start_point in dep_srv["_deps"]:
|
|
|
|
continue
|
|
|
|
new_deps = rec_deps(services, dep_name, start_point)
|
|
|
|
deps.update(new_deps)
|
2019-03-09 22:25:32 +01:00
|
|
|
return deps
|
|
|
|
|
2019-10-05 21:37:14 +02:00
|
|
|
def flat_deps(services, with_extends=False):
|
|
|
|
"""
|
|
|
|
create dependencies "_deps" or update it recursively for all services
|
|
|
|
"""
|
|
|
|
for name, srv in services.items():
|
|
|
|
deps = set()
|
2019-10-25 09:49:51 +02:00
|
|
|
srv["_deps"] = deps
|
2019-10-05 21:37:14 +02:00
|
|
|
if with_extends:
|
|
|
|
ext = srv.get("extends", {}).get("service", None)
|
|
|
|
if ext:
|
2019-11-07 18:09:32 +01:00
|
|
|
if ext != name: deps.add(ext)
|
2019-10-05 21:37:14 +02:00
|
|
|
continue
|
2021-11-14 00:37:22 +01:00
|
|
|
deps_ls = srv.get("depends_on", None) or []
|
2021-12-09 15:18:52 +01:00
|
|
|
if is_str(deps_ls): deps_ls=[deps_ls]
|
|
|
|
elif is_dict(deps_ls): deps_ls=list(deps_ls.keys())
|
2021-11-14 00:37:22 +01:00
|
|
|
deps.update(deps_ls)
|
2019-10-05 21:37:14 +02:00
|
|
|
# parse link to get service name and remove alias
|
2021-11-14 00:37:22 +01:00
|
|
|
links_ls = srv.get("links", None) or []
|
|
|
|
if not is_list(links_ls): links_ls=[links_ls]
|
2019-10-05 21:37:14 +02:00
|
|
|
deps.update([(c.split(":")[0] if ":" in c else c)
|
2021-11-14 00:37:22 +01:00
|
|
|
for c in links_ls])
|
2019-10-05 21:37:14 +02:00
|
|
|
for name, srv in services.items():
|
|
|
|
rec_deps(services, name)
|
2019-03-09 22:25:32 +01:00
|
|
|
|
2019-08-09 15:31:56 +02:00
|
|
|
###################
|
|
|
|
# podman and compose classes
|
|
|
|
###################
|
|
|
|
|
|
|
|
class Podman:
|
|
|
|
def __init__(self, compose, podman_path='podman', dry_run=False):
|
|
|
|
self.compose = compose
|
|
|
|
self.podman_path = podman_path
|
|
|
|
self.dry_run = dry_run
|
2019-11-17 08:09:41 +01:00
|
|
|
|
2021-05-17 14:03:47 +02:00
|
|
|
def output(self, podman_args, cmd='', cmd_args=None):
|
|
|
|
cmd_args = cmd_args or []
|
|
|
|
xargs = self.compose.get_podman_args(cmd) if cmd else []
|
|
|
|
cmd_ls = [self.podman_path, *podman_args, cmd] + xargs + cmd_args
|
2021-12-24 17:55:30 +01:00
|
|
|
log(cmd_ls)
|
2021-05-17 14:03:47 +02:00
|
|
|
return subprocess.check_output(cmd_ls)
|
|
|
|
|
2022-02-03 21:50:12 +01:00
|
|
|
def run(self, podman_args, cmd='', cmd_args=None, wait=True, sleep=1, obj=None, log_formatter=None):
|
2021-11-13 23:59:41 +01:00
|
|
|
if obj is not None:
|
|
|
|
obj.exit_code = None
|
2021-06-16 19:33:16 +02:00
|
|
|
cmd_args = list(map(str, cmd_args or []))
|
2021-05-17 14:03:47 +02:00
|
|
|
xargs = self.compose.get_podman_args(cmd) if cmd else []
|
|
|
|
cmd_ls = [self.podman_path, *podman_args, cmd] + xargs + cmd_args
|
2021-12-24 17:55:30 +01:00
|
|
|
log(" ".join([str(i) for i in cmd_ls]))
|
2019-08-09 15:31:56 +02:00
|
|
|
if self.dry_run:
|
|
|
|
return None
|
|
|
|
# subprocess.Popen(args, bufsize = 0, executable = None, stdin = None, stdout = None, stderr = None, preexec_fn = None, close_fds = False, shell = False, cwd = None, env = None, universal_newlines = False, startupinfo = None, creationflags = 0)
|
2022-02-03 21:50:12 +01:00
|
|
|
if log_formatter is not None:
|
|
|
|
# Pipe podman process output through log_formatter (which can add colored prefix)
|
|
|
|
p = subprocess.Popen(cmd_ls, stdout=subprocess.PIPE)
|
|
|
|
_ = subprocess.Popen(log_formatter, stdin=p.stdout)
|
|
|
|
p.stdout.close() # Allow p_process to receive a SIGPIPE if logging process exits.
|
|
|
|
else:
|
|
|
|
p = subprocess.Popen(cmd_ls)
|
|
|
|
|
2019-08-09 15:31:56 +02:00
|
|
|
if wait:
|
2021-05-06 01:08:48 +02:00
|
|
|
exit_code = p.wait()
|
2021-12-24 17:55:30 +01:00
|
|
|
log("exit code:", exit_code)
|
2021-05-06 01:08:48 +02:00
|
|
|
if obj is not None:
|
|
|
|
obj.exit_code = exit_code
|
2021-09-06 06:45:50 +02:00
|
|
|
|
2019-08-09 15:31:56 +02:00
|
|
|
if sleep:
|
|
|
|
time.sleep(sleep)
|
|
|
|
return p
|
|
|
|
|
2022-01-06 15:54:37 +01:00
|
|
|
def volume_ls(self, proj=None):
|
2021-12-21 21:44:34 +01:00
|
|
|
if not proj:
|
|
|
|
proj = self.compose.project_name
|
2022-01-06 15:54:37 +01:00
|
|
|
output = self.output([], "volume", [
|
|
|
|
"ls", "--noheading", "--filter", f"label=io.podman.compose.project={proj}",
|
|
|
|
"--format", "{{.Name}}",
|
|
|
|
]).decode('utf-8')
|
|
|
|
volumes = output.splitlines()
|
|
|
|
return volumes
|
2021-11-22 19:34:40 +01:00
|
|
|
|
2019-10-05 21:37:14 +02:00
|
|
|
def normalize_service(service):
|
2021-12-11 00:50:40 +01:00
|
|
|
for key in ("env_file", "security_opt", "volumes"):
|
2019-10-05 21:37:14 +02:00
|
|
|
if key not in service: continue
|
|
|
|
if is_str(service[key]): service[key]=[service[key]]
|
2021-12-11 00:50:40 +01:00
|
|
|
if "security_opt" in service:
|
|
|
|
sec_ls = service["security_opt"]
|
|
|
|
for ix, item in enumerate(sec_ls):
|
|
|
|
if item=="seccomp:unconfined" or item=="apparmor:unconfined":
|
|
|
|
sec_ls[ix] = item.replace(":", "=")
|
2019-10-05 21:37:14 +02:00
|
|
|
for key in ("environment", "labels"):
|
|
|
|
if key not in service: continue
|
|
|
|
service[key] = norm_as_dict(service[key])
|
|
|
|
if "extends" in service:
|
|
|
|
extends = service["extends"]
|
|
|
|
if is_str(extends):
|
|
|
|
extends = {"service": extends}
|
|
|
|
service["extends"] = extends
|
|
|
|
return service
|
|
|
|
|
2019-09-08 01:20:48 +02:00
|
|
|
def normalize(compose):
|
|
|
|
"""
|
|
|
|
convert compose dict of some keys from string or dicts into arrays
|
|
|
|
"""
|
|
|
|
services = compose.get("services", None) or {}
|
|
|
|
for service_name, service in services.items():
|
2019-10-05 21:37:14 +02:00
|
|
|
normalize_service(service)
|
2019-09-08 01:20:48 +02:00
|
|
|
return compose
|
|
|
|
|
2019-10-05 21:37:14 +02:00
|
|
|
def rec_merge_one(target, source):
|
2019-09-08 01:20:48 +02:00
|
|
|
"""
|
2019-10-05 21:37:14 +02:00
|
|
|
update target from source recursively
|
2019-09-08 01:20:48 +02:00
|
|
|
"""
|
|
|
|
done = set()
|
|
|
|
for key, value in source.items():
|
|
|
|
if key in target: continue
|
|
|
|
target[key]=value
|
|
|
|
done.add(key)
|
|
|
|
for key, value in target.items():
|
|
|
|
if key in done: continue
|
|
|
|
if key not in source: continue
|
|
|
|
value2 = source[key]
|
|
|
|
if type(value2)!=type(value):
|
|
|
|
raise ValueError("can't merge value of {} of type {} and {}".format(key, type(value), type(value2)))
|
2019-10-05 21:37:14 +02:00
|
|
|
if is_list(value2):
|
2021-12-10 01:06:43 +01:00
|
|
|
if key == 'volumes':
|
|
|
|
# clean duplicate mount targets
|
|
|
|
pts = set([ v.split(':', 1)[1] for v in value2 if ":" in v ])
|
|
|
|
del_ls = [ ix for (ix, v) in enumerate(value) if ":" in v and v.split(':', 1)[1] in pts ]
|
|
|
|
for ix in reversed(del_ls):
|
|
|
|
del value[ix]
|
|
|
|
value.extend(value2)
|
|
|
|
else:
|
|
|
|
value.extend(value2)
|
2019-09-08 01:20:48 +02:00
|
|
|
elif is_dict(value2):
|
2019-10-05 21:37:14 +02:00
|
|
|
rec_merge_one(value, value2)
|
2019-09-08 01:20:48 +02:00
|
|
|
else:
|
2019-10-05 21:37:14 +02:00
|
|
|
target[key]=value2
|
2019-09-08 01:20:48 +02:00
|
|
|
return target
|
|
|
|
|
2019-10-05 21:37:14 +02:00
|
|
|
def rec_merge(target, *sources):
|
|
|
|
"""
|
|
|
|
update target recursively from sources
|
|
|
|
"""
|
|
|
|
for source in sources:
|
|
|
|
ret = rec_merge_one(target, source)
|
|
|
|
return ret
|
|
|
|
|
2021-11-13 22:27:43 +01:00
|
|
|
def resolve_extends(services, service_names, environ):
|
2019-10-05 21:37:14 +02:00
|
|
|
for name in service_names:
|
|
|
|
service = services[name]
|
|
|
|
ext = service.get("extends", {})
|
|
|
|
if is_str(ext): ext = {"service": ext}
|
|
|
|
from_service_name = ext.get("service", None)
|
|
|
|
if not from_service_name: continue
|
|
|
|
filename = ext.get("file", None)
|
|
|
|
if filename:
|
|
|
|
with open(filename, 'r') as f:
|
|
|
|
content = yaml.safe_load(f) or {}
|
|
|
|
if "services" in content:
|
|
|
|
content = content["services"]
|
2021-11-13 22:27:43 +01:00
|
|
|
content = rec_subs(content, environ)
|
2019-10-05 21:37:14 +02:00
|
|
|
from_service = content.get(from_service_name, {})
|
|
|
|
normalize_service(from_service)
|
|
|
|
else:
|
|
|
|
from_service = services.get(from_service_name, {}).copy()
|
|
|
|
del from_service["_deps"]
|
2019-10-25 09:49:51 +02:00
|
|
|
try:
|
|
|
|
del from_service["extends"]
|
|
|
|
except KeyError:
|
|
|
|
pass
|
2019-10-05 21:37:14 +02:00
|
|
|
new_service = rec_merge({}, from_service, service)
|
|
|
|
services[name] = new_service
|
|
|
|
|
2021-12-10 00:01:45 +01:00
|
|
|
def dotenv_to_dict(dotenv_path):
|
|
|
|
if not os.path.isfile(dotenv_path):
|
|
|
|
return {}
|
|
|
|
return dotenv_values(dotenv_path)
|
2019-10-05 21:37:14 +02:00
|
|
|
|
2021-12-10 22:26:13 +01:00
|
|
|
COMPOSE_DEFAULT_LS = [
|
|
|
|
"compose.yaml",
|
|
|
|
"compose.yml",
|
|
|
|
"compose.override.yaml",
|
|
|
|
"compose.override.yml",
|
|
|
|
"podman-compose.yaml",
|
|
|
|
"podman-compose.yml",
|
|
|
|
"docker-compose.yml",
|
|
|
|
"docker-compose.yaml",
|
|
|
|
"docker-compose.override.yml",
|
|
|
|
"docker-compose.override.yaml",
|
|
|
|
"container-compose.yml",
|
|
|
|
"container-compose.yaml",
|
|
|
|
"container-compose.override.yml",
|
|
|
|
"container-compose.override.yaml",
|
|
|
|
]
|
|
|
|
|
2019-08-09 15:31:56 +02:00
|
|
|
class PodmanCompose:
|
|
|
|
def __init__(self):
|
2021-11-13 12:08:32 +01:00
|
|
|
self.podman_version = None
|
2021-05-06 01:08:48 +02:00
|
|
|
self.exit_code = None
|
2019-08-09 15:31:56 +02:00
|
|
|
self.commands = {}
|
|
|
|
self.global_args = None
|
|
|
|
self.project_name = None
|
|
|
|
self.dirname = None
|
|
|
|
self.pods = None
|
|
|
|
self.containers = None
|
2021-10-14 00:30:44 +02:00
|
|
|
self.vols = None
|
2021-11-21 00:23:29 +01:00
|
|
|
self.networks = {}
|
|
|
|
self.default_net = "default"
|
2021-07-21 18:22:07 +02:00
|
|
|
self.declared_secrets = None
|
2019-08-09 15:31:56 +02:00
|
|
|
self.container_names_by_service = None
|
2019-08-10 13:11:28 +02:00
|
|
|
self.container_by_name = None
|
2020-04-18 17:39:59 +02:00
|
|
|
self._prefer_volume_over_mount = True
|
2022-02-03 21:50:12 +01:00
|
|
|
self.console_colors = ["\x1B[1;32m", "\x1B[1;33m", "\x1B[1;34m", "\x1B[1;35m", "\x1B[1;36m"]
|
2019-08-09 15:31:56 +02:00
|
|
|
|
2021-05-17 14:03:47 +02:00
|
|
|
def get_podman_args(self, cmd):
|
|
|
|
xargs = []
|
|
|
|
for args in self.global_args.podman_args:
|
|
|
|
xargs.extend(shlex.split(args))
|
|
|
|
cmd_norm = cmd if cmd != 'create' else 'run'
|
|
|
|
cmd_args = self.global_args.__dict__.get(f"podman_{cmd_norm}_args", None) or []
|
|
|
|
for args in cmd_args:
|
|
|
|
xargs.extend(shlex.split(args))
|
|
|
|
return xargs
|
|
|
|
|
2019-08-09 15:31:56 +02:00
|
|
|
def run(self):
|
|
|
|
args = self._parse_args()
|
2019-08-10 17:08:21 +02:00
|
|
|
podman_path = args.podman_path
|
2019-08-09 15:31:56 +02:00
|
|
|
if podman_path != 'podman':
|
|
|
|
if os.path.isfile(podman_path) and os.access(podman_path, os.X_OK):
|
|
|
|
podman_path = os.path.realpath(podman_path)
|
|
|
|
else:
|
|
|
|
# this also works if podman hasn't been installed now
|
2020-04-18 21:15:55 +02:00
|
|
|
if args.dry_run == False:
|
2020-04-18 21:18:36 +02:00
|
|
|
sys.stderr.write("Binary {} has not been found.\n".format(podman_path))
|
|
|
|
exit(1)
|
2019-08-10 17:08:21 +02:00
|
|
|
self.podman = Podman(self, podman_path, args.dry_run)
|
2020-04-18 21:15:55 +02:00
|
|
|
if not args.dry_run:
|
|
|
|
# just to make sure podman is running
|
|
|
|
try:
|
2021-10-24 16:35:36 +02:00
|
|
|
self.podman_version = self.podman.output(["--version"], '', []).decode('utf-8').strip() or ""
|
|
|
|
self.podman_version = (self.podman_version.split() or [""])[-1]
|
2020-04-18 21:15:55 +02:00
|
|
|
except subprocess.CalledProcessError:
|
|
|
|
self.podman_version = None
|
|
|
|
if not self.podman_version:
|
2020-04-18 21:18:36 +02:00
|
|
|
sys.stderr.write("it seems that you do not have `podman` installed\n")
|
|
|
|
exit(1)
|
2021-12-24 17:55:30 +01:00
|
|
|
log("using podman version: "+self.podman_version)
|
2019-08-10 17:08:21 +02:00
|
|
|
cmd_name = args.command
|
2020-09-28 22:27:29 +02:00
|
|
|
if (cmd_name != "version"):
|
|
|
|
self._parse_compose_file()
|
2019-08-09 15:31:56 +02:00
|
|
|
cmd = self.commands[cmd_name]
|
|
|
|
cmd(self, args)
|
|
|
|
|
|
|
|
def _parse_compose_file(self):
|
|
|
|
args = self.global_args
|
|
|
|
cmd = args.command
|
2021-12-10 22:26:13 +01:00
|
|
|
pathsep = os.environ.get("COMPOSE_PATH_SEPARATOR", None) or os.pathsep
|
2019-09-08 01:20:48 +02:00
|
|
|
if not args.file:
|
2021-12-10 22:26:13 +01:00
|
|
|
default_str = os.environ.get("COMPOSE_FILE", None)
|
|
|
|
if default_str:
|
|
|
|
default_ls = default_str.split(pathsep)
|
|
|
|
else:
|
|
|
|
default_ls = COMPOSE_DEFAULT_LS
|
|
|
|
args.file = list(filter(os.path.exists, default_ls))
|
2019-09-08 01:20:48 +02:00
|
|
|
files = args.file
|
|
|
|
if not files:
|
2021-12-24 17:55:30 +01:00
|
|
|
log("no compose.yaml, docker-compose.yml or container-compose.yml file found, pass files with -f")
|
2019-11-07 17:55:49 +01:00
|
|
|
exit(-1)
|
2019-09-08 01:20:48 +02:00
|
|
|
ex = map(os.path.exists, files)
|
|
|
|
missing = [ fn0 for ex0, fn0 in zip(ex, files) if not ex0 ]
|
|
|
|
if missing:
|
2021-12-24 17:55:30 +01:00
|
|
|
log("missing files: ", missing)
|
2019-09-08 01:20:48 +02:00
|
|
|
exit(1)
|
|
|
|
# make absolute
|
2021-03-03 16:30:26 +01:00
|
|
|
relative_files = files
|
2019-09-08 01:20:48 +02:00
|
|
|
files = list(map(os.path.realpath, files))
|
|
|
|
filename = files[0]
|
2019-08-09 15:31:56 +02:00
|
|
|
project_name = args.project_name
|
|
|
|
no_ansi = args.no_ansi
|
|
|
|
no_cleanup = args.no_cleanup
|
|
|
|
dry_run = args.dry_run
|
|
|
|
host_env = None
|
2021-12-10 21:45:49 +01:00
|
|
|
dirname = os.path.realpath(os.path.dirname(filename))
|
2019-08-09 15:31:56 +02:00
|
|
|
dir_basename = os.path.basename(dirname)
|
|
|
|
self.dirname = dirname
|
2019-08-10 17:08:21 +02:00
|
|
|
# TODO: remove next line
|
2019-08-10 13:11:28 +02:00
|
|
|
os.chdir(dirname)
|
2019-08-09 15:31:56 +02:00
|
|
|
|
|
|
|
if not project_name:
|
2021-06-16 21:13:25 +02:00
|
|
|
# More strict then actually needed for simplicity: podman requires [a-zA-Z0-9][a-zA-Z0-9_.-]*
|
2022-01-12 14:37:07 +01:00
|
|
|
project_name = os.environ.get("COMPOSE_PROJECT_NAME", None) or dir_basename.lower()
|
|
|
|
project_name = norm_re.sub('', project_name)
|
2020-12-02 14:31:51 +01:00
|
|
|
if not project_name:
|
|
|
|
raise RuntimeError("Project name [{}] normalized to empty".format(dir_basename))
|
|
|
|
|
2019-08-09 15:31:56 +02:00
|
|
|
self.project_name = project_name
|
2019-11-17 08:09:41 +01:00
|
|
|
|
2019-08-09 15:31:56 +02:00
|
|
|
|
|
|
|
dotenv_path = os.path.join(dirname, ".env")
|
2021-11-13 22:27:43 +01:00
|
|
|
self.environ = dict(os.environ)
|
2022-01-12 21:06:47 +01:00
|
|
|
dotenv_dict = dotenv_to_dict(dotenv_path)
|
|
|
|
self.environ.update(dotenv_dict)
|
|
|
|
os.environ.update({ key: value for key, value in dotenv_dict.items() if key.startswith('PODMAN_')})
|
2020-11-04 23:08:19 +01:00
|
|
|
# see: https://docs.docker.com/compose/reference/envvars/
|
|
|
|
# see: https://docs.docker.com/compose/env-file/
|
2021-11-13 22:27:43 +01:00
|
|
|
self.environ.update({
|
2020-11-04 23:14:57 +01:00
|
|
|
"COMPOSE_FILE": os.path.basename(filename),
|
2020-11-04 23:08:19 +01:00
|
|
|
"COMPOSE_PROJECT_NAME": self.project_name,
|
2021-12-10 22:26:13 +01:00
|
|
|
"COMPOSE_PATH_SEPARATOR": pathsep,
|
2020-11-04 23:08:19 +01:00
|
|
|
})
|
2021-12-31 00:54:32 +01:00
|
|
|
compose = {}
|
2019-09-08 01:20:48 +02:00
|
|
|
for filename in files:
|
|
|
|
with open(filename, 'r') as f:
|
|
|
|
content = yaml.safe_load(f)
|
2021-12-24 17:55:30 +01:00
|
|
|
#log(filename, json.dumps(content, indent = 2))
|
2020-05-09 17:40:10 +02:00
|
|
|
if not isinstance(content, dict):
|
|
|
|
sys.stderr.write("Compose file does not contain a top level object: %s\n"%filename)
|
|
|
|
exit(1)
|
2019-09-08 01:20:48 +02:00
|
|
|
content = normalize(content)
|
2021-12-24 17:55:30 +01:00
|
|
|
#log(filename, json.dumps(content, indent = 2))
|
2021-11-13 22:27:43 +01:00
|
|
|
content = rec_subs(content, self.environ)
|
2019-09-08 01:20:48 +02:00
|
|
|
rec_merge(compose, content)
|
2021-12-31 00:54:32 +01:00
|
|
|
self.merged_yaml = yaml.safe_dump(compose)
|
|
|
|
compose['_dirname'] = dirname
|
2019-08-09 15:31:56 +02:00
|
|
|
# debug mode
|
2019-09-08 01:20:48 +02:00
|
|
|
if len(files)>1:
|
2021-12-24 17:55:30 +01:00
|
|
|
log(" ** merged:\n", json.dumps(compose, indent = 2))
|
2020-04-18 17:39:59 +02:00
|
|
|
ver = compose.get('version', None)
|
|
|
|
services = compose.get('services', None)
|
2020-04-04 23:09:37 +02:00
|
|
|
if services is None:
|
|
|
|
services = {}
|
2021-12-24 17:55:30 +01:00
|
|
|
log("WARNING: No services defined")
|
2021-09-06 06:45:50 +02:00
|
|
|
|
2019-10-05 21:37:14 +02:00
|
|
|
# NOTE: maybe add "extends.service" to _deps at this stage
|
|
|
|
flat_deps(services, with_extends=True)
|
|
|
|
service_names = sorted([ (len(srv["_deps"]), name) for name, srv in services.items() ])
|
|
|
|
service_names = [ name for _, name in service_names]
|
2021-11-13 22:27:43 +01:00
|
|
|
resolve_extends(services, service_names, self.environ)
|
2019-10-05 21:37:14 +02:00
|
|
|
flat_deps(services)
|
|
|
|
service_names = sorted([ (len(srv["_deps"]), name) for name, srv in services.items() ])
|
|
|
|
service_names = [ name for _, name in service_names]
|
2021-11-21 00:23:29 +01:00
|
|
|
nets = compose.get("networks", None) or {}
|
|
|
|
if not nets:
|
|
|
|
nets["default"] = None
|
|
|
|
self.networks = nets
|
|
|
|
if len(self.networks)==1:
|
|
|
|
self.default_net = list(nets.keys())[0]
|
|
|
|
elif "default" in nets:
|
|
|
|
self.default_net = "default"
|
|
|
|
else:
|
|
|
|
self.default_net = None
|
|
|
|
default_net = self.default_net
|
|
|
|
allnets = set()
|
|
|
|
for name, srv in services.items():
|
2022-01-12 20:48:42 +01:00
|
|
|
srv_nets = srv.get("networks", None) or default_net
|
|
|
|
srv_nets = list(srv_nets.keys()) if is_dict(srv_nets) else norm_as_list(srv_nets)
|
2021-11-21 00:23:29 +01:00
|
|
|
allnets.update(srv_nets)
|
|
|
|
given_nets = set(nets.keys())
|
|
|
|
missing_nets = given_nets - allnets
|
|
|
|
if len(missing_nets):
|
|
|
|
missing_nets_str= ",".join(missing_nets)
|
|
|
|
raise RuntimeError(f"missing networks: {missing_nets_str}")
|
2019-08-09 15:31:56 +02:00
|
|
|
# volumes: [...]
|
2021-10-14 00:30:44 +02:00
|
|
|
self.vols = compose.get('volumes', {})
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_compose_labels = [
|
|
|
|
"io.podman.compose.config-hash=123",
|
|
|
|
"io.podman.compose.project=" + project_name,
|
|
|
|
"io.podman.compose.version=0.0.1",
|
2021-03-03 16:30:26 +01:00
|
|
|
"com.docker.compose.project=" + project_name,
|
|
|
|
"com.docker.compose.project.working_dir=" + dirname,
|
|
|
|
"com.docker.compose.project.config_files=" + ','.join(relative_files),
|
2019-08-09 15:31:56 +02:00
|
|
|
]
|
|
|
|
# other top-levels:
|
|
|
|
# networks: {driver: ...}
|
|
|
|
# configs: {...}
|
2021-07-21 18:22:07 +02:00
|
|
|
self.declared_secrets = compose.get('secrets', {})
|
2019-08-09 15:31:56 +02:00
|
|
|
given_containers = []
|
|
|
|
container_names_by_service = {}
|
2021-11-13 23:28:43 +01:00
|
|
|
self.services = services
|
2019-08-09 15:31:56 +02:00
|
|
|
for service_name, service_desc in services.items():
|
|
|
|
replicas = try_int(service_desc.get('deploy', {}).get('replicas', '1'))
|
|
|
|
container_names_by_service[service_name] = []
|
|
|
|
for num in range(1, replicas+1):
|
|
|
|
name0 = "{project_name}_{service_name}_{num}".format(
|
|
|
|
project_name=project_name,
|
|
|
|
service_name=service_name,
|
|
|
|
num=num,
|
|
|
|
)
|
|
|
|
if num == 1:
|
|
|
|
name = service_desc.get("container_name", name0)
|
|
|
|
else:
|
|
|
|
name = name0
|
|
|
|
container_names_by_service[service_name].append(name)
|
2021-12-24 17:55:30 +01:00
|
|
|
# log(service_name,service_desc)
|
2019-08-09 15:31:56 +02:00
|
|
|
cnt = dict(name=name, num=num,
|
|
|
|
service_name=service_name, **service_desc)
|
|
|
|
if 'image' not in cnt:
|
|
|
|
cnt['image'] = "{project_name}_{service_name}".format(
|
|
|
|
project_name=project_name,
|
|
|
|
service_name=service_name,
|
|
|
|
)
|
2020-04-18 17:39:59 +02:00
|
|
|
labels = norm_as_list(cnt.get('labels', None))
|
2021-10-09 23:43:01 +02:00
|
|
|
cnt["ports"] = norm_ports(cnt.get("ports", None))
|
2019-08-09 15:31:56 +02:00
|
|
|
labels.extend(podman_compose_labels)
|
|
|
|
labels.extend([
|
|
|
|
"com.docker.compose.container-number={}".format(num),
|
|
|
|
"com.docker.compose.service=" + service_name,
|
|
|
|
])
|
|
|
|
cnt['labels'] = labels
|
|
|
|
cnt['_service'] = service_name
|
|
|
|
cnt['_project'] = project_name
|
|
|
|
given_containers.append(cnt)
|
2022-01-21 23:15:05 +01:00
|
|
|
volumes = cnt.get("volumes", None) or []
|
|
|
|
for volume in volumes:
|
|
|
|
mnt_dict = get_mnt_dict(self, cnt, volume)
|
|
|
|
if mnt_dict.get("type", None)=="volume" and mnt_dict["source"] and mnt_dict["source"] not in self.vols:
|
|
|
|
vol_name = mnt_dict["source"]
|
|
|
|
raise RuntimeError(f"volume [{vol_name}] not defined in top level")
|
2019-08-09 15:31:56 +02:00
|
|
|
self.container_names_by_service = container_names_by_service
|
|
|
|
container_by_name = dict([(c["name"], c) for c in given_containers])
|
2021-12-24 17:55:30 +01:00
|
|
|
#log("deps:", [(c["name"], c["_deps"]) for c in given_containers])
|
2019-08-09 15:31:56 +02:00
|
|
|
given_containers = list(container_by_name.values())
|
2020-04-18 17:39:59 +02:00
|
|
|
given_containers.sort(key=lambda c: len(c.get('_deps', None) or []))
|
2021-12-24 17:55:30 +01:00
|
|
|
#log("sorted:", [c["name"] for c in given_containers])
|
2021-11-21 00:23:29 +01:00
|
|
|
pods, containers = tr_identity(project_name, given_containers)
|
2019-08-09 15:31:56 +02:00
|
|
|
self.pods = pods
|
|
|
|
self.containers = containers
|
2019-08-10 13:11:28 +02:00
|
|
|
self.container_by_name = dict([ (c["name"], c) for c in containers])
|
2019-08-09 15:31:56 +02:00
|
|
|
|
|
|
|
def _parse_args(self):
|
2021-09-06 06:45:50 +02:00
|
|
|
parser = argparse.ArgumentParser(
|
|
|
|
formatter_class=argparse.RawTextHelpFormatter
|
|
|
|
)
|
2019-08-09 15:31:56 +02:00
|
|
|
self._init_global_parser(parser)
|
2019-08-10 17:08:21 +02:00
|
|
|
subparsers = parser.add_subparsers(title='command', dest='command')
|
2019-09-08 01:20:48 +02:00
|
|
|
subparser = subparsers.add_parser('help', help='show help')
|
2019-08-10 17:08:21 +02:00
|
|
|
for cmd_name, cmd in self.commands.items():
|
|
|
|
subparser = subparsers.add_parser(cmd_name, help=cmd._cmd_desc)
|
|
|
|
for cmd_parser in cmd._parse_args:
|
|
|
|
cmd_parser(subparser)
|
2019-08-09 15:31:56 +02:00
|
|
|
self.global_args = parser.parse_args()
|
2021-09-08 23:33:07 +02:00
|
|
|
if self.global_args.version:
|
|
|
|
self.global_args.command = "version"
|
2019-09-08 01:20:48 +02:00
|
|
|
if not self.global_args.command or self.global_args.command=='help':
|
2019-09-02 23:19:07 +02:00
|
|
|
parser.print_help()
|
|
|
|
exit(-1)
|
2019-08-10 17:08:21 +02:00
|
|
|
return self.global_args
|
|
|
|
|
2019-08-09 15:31:56 +02:00
|
|
|
def _init_global_parser(self, parser):
|
2021-09-08 23:33:07 +02:00
|
|
|
parser.add_argument("-v", "--version",
|
|
|
|
help="show version", action='store_true')
|
2019-08-09 15:31:56 +02:00
|
|
|
parser.add_argument("-f", "--file",
|
|
|
|
help="Specify an alternate compose file (default: docker-compose.yml)",
|
2019-09-08 01:20:48 +02:00
|
|
|
metavar='file', action='append', default=[])
|
2019-08-09 15:31:56 +02:00
|
|
|
parser.add_argument("-p", "--project-name",
|
|
|
|
help="Specify an alternate project name (default: directory name)",
|
|
|
|
type=str, default=None)
|
|
|
|
parser.add_argument("--podman-path",
|
|
|
|
help="Specify an alternate path to podman (default: use location in $PATH variable)",
|
|
|
|
type=str, default="podman")
|
2021-05-17 14:03:47 +02:00
|
|
|
parser.add_argument("--podman-args",
|
|
|
|
help="custom global arguments to be passed to `podman`",
|
|
|
|
metavar='args', action='append', default=[])
|
|
|
|
for podman_cmd in PODMAN_CMDS:
|
|
|
|
parser.add_argument(f"--podman-{podman_cmd}-args",
|
|
|
|
help=f"custom arguments to be passed to `podman {podman_cmd}`",
|
|
|
|
metavar='args', action='append', default=[])
|
2019-08-09 15:31:56 +02:00
|
|
|
parser.add_argument("--no-ansi",
|
|
|
|
help="Do not print ANSI control characters", action='store_true')
|
|
|
|
parser.add_argument("--no-cleanup",
|
|
|
|
help="Do not stop and remove existing pod & containers", action='store_true')
|
|
|
|
parser.add_argument("--dry-run",
|
|
|
|
help="No action; perform a simulation of commands", action='store_true')
|
|
|
|
|
|
|
|
podman_compose = PodmanCompose()
|
|
|
|
|
|
|
|
###################
|
|
|
|
# decorators to add commands and parse options
|
|
|
|
###################
|
|
|
|
|
|
|
|
class cmd_run:
|
|
|
|
def __init__(self, compose, cmd_name, cmd_desc):
|
|
|
|
self.compose = compose
|
|
|
|
self.cmd_name = cmd_name
|
|
|
|
self.cmd_desc = cmd_desc
|
|
|
|
def __call__(self, func):
|
|
|
|
def wrapped(*args, **kw):
|
|
|
|
return func(*args, **kw)
|
|
|
|
wrapped._compose = self.compose
|
|
|
|
wrapped._cmd_name = self.cmd_name
|
|
|
|
wrapped._cmd_desc = self.cmd_desc
|
|
|
|
wrapped._parse_args = []
|
|
|
|
self.compose.commands[self.cmd_name] = wrapped
|
|
|
|
return wrapped
|
|
|
|
|
|
|
|
class cmd_parse:
|
|
|
|
def __init__(self, compose, cmd_names):
|
|
|
|
self.compose = compose
|
|
|
|
self.cmd_names = cmd_names if is_list(cmd_names) else [cmd_names]
|
|
|
|
|
|
|
|
def __call__(self, func):
|
|
|
|
def wrapped(*args, **kw):
|
|
|
|
return func(*args, **kw)
|
|
|
|
for cmd_name in self.cmd_names:
|
|
|
|
self.compose.commands[cmd_name]._parse_args.append(wrapped)
|
|
|
|
return wrapped
|
|
|
|
|
|
|
|
###################
|
|
|
|
# actual commands
|
|
|
|
###################
|
|
|
|
|
2019-10-04 19:57:07 +02:00
|
|
|
@cmd_run(podman_compose, 'version', 'show version')
|
|
|
|
def compose_version(compose, args):
|
2021-12-29 22:23:24 +01:00
|
|
|
if getattr(args, 'short', False):
|
|
|
|
print(__version__)
|
|
|
|
return
|
|
|
|
if getattr(args, 'format', 'pretty') == 'json':
|
2021-12-31 00:32:15 +01:00
|
|
|
res = {"version": __version__}
|
|
|
|
print(json.dumps(res))
|
2021-12-29 22:23:24 +01:00
|
|
|
return
|
2021-12-30 14:54:30 +01:00
|
|
|
log("podman-composer version", __version__)
|
2021-05-17 14:03:47 +02:00
|
|
|
compose.podman.run(["--version"], "", [], sleep=0)
|
2019-10-04 19:57:07 +02:00
|
|
|
|
2021-08-05 11:24:35 +02:00
|
|
|
def is_local(container: dict) -> bool:
|
|
|
|
"""Test if a container is local, i.e. if it is
|
|
|
|
* prefixed with localhost/
|
|
|
|
* has a build section and is not prefixed
|
|
|
|
"""
|
|
|
|
return (
|
|
|
|
not "/" in container["image"]
|
|
|
|
if "build" in container
|
|
|
|
else container["image"].startswith("localhost/")
|
|
|
|
)
|
|
|
|
|
|
|
|
@cmd_run(podman_compose, "pull", "pull stack images")
|
2019-08-09 15:31:56 +02:00
|
|
|
def compose_pull(compose, args):
|
2021-08-05 11:24:35 +02:00
|
|
|
img_containers = [cnt for cnt in compose.containers if "image" in cnt]
|
|
|
|
images = {cnt["image"] for cnt in img_containers}
|
|
|
|
if not args.force_local:
|
|
|
|
local_images = {cnt["image"] for cnt in img_containers if is_local(cnt)}
|
|
|
|
images -= local_images
|
2020-11-27 12:00:34 +01:00
|
|
|
for image in images:
|
2021-05-17 14:03:47 +02:00
|
|
|
compose.podman.run([], "pull", [image], sleep=0)
|
2019-04-19 17:24:30 +02:00
|
|
|
|
2019-08-09 15:31:56 +02:00
|
|
|
@cmd_run(podman_compose, 'push', 'push stack images')
|
|
|
|
def compose_push(compose, args):
|
2019-06-09 03:22:58 +02:00
|
|
|
services = set(args.services)
|
2019-08-09 15:31:56 +02:00
|
|
|
for cnt in compose.containers:
|
2019-06-09 03:21:55 +02:00
|
|
|
if 'build' not in cnt: continue
|
2019-06-09 03:22:58 +02:00
|
|
|
if services and cnt['_service'] not in services: continue
|
2021-05-17 14:03:47 +02:00
|
|
|
compose.podman.run([], "push", [cnt["image"]], sleep=0)
|
2019-08-09 15:31:56 +02:00
|
|
|
|
2019-08-17 22:39:42 +02:00
|
|
|
def build_one(compose, args, cnt):
|
|
|
|
if 'build' not in cnt: return
|
2019-09-03 15:13:24 +02:00
|
|
|
if getattr(args, 'if_not_exists', None):
|
2021-05-17 14:03:47 +02:00
|
|
|
try: img_id = compose.podman.output([], 'inspect', ['-t', 'image', '-f', '{{.Id}}', cnt["image"]])
|
2019-08-17 22:39:42 +02:00
|
|
|
except subprocess.CalledProcessError: img_id = None
|
|
|
|
if img_id: return
|
|
|
|
build_desc = cnt['build']
|
|
|
|
if not hasattr(build_desc, 'items'):
|
|
|
|
build_desc = dict(context=build_desc)
|
|
|
|
ctx = build_desc.get('context', '.')
|
2021-11-11 10:27:33 +01:00
|
|
|
dockerfile = build_desc.get("dockerfile", None)
|
|
|
|
if dockerfile:
|
|
|
|
dockerfile = os.path.join(ctx, dockerfile)
|
|
|
|
else:
|
|
|
|
dockerfile_alts = [
|
|
|
|
'Containerfile', 'ContainerFile', 'containerfile',
|
|
|
|
'Dockerfile', 'DockerFile','dockerfile',
|
|
|
|
]
|
|
|
|
for dockerfile in dockerfile_alts:
|
|
|
|
dockerfile = os.path.join(ctx, dockerfile)
|
|
|
|
if os.path.exists(dockerfile): break
|
2019-08-17 22:39:42 +02:00
|
|
|
if not os.path.exists(dockerfile):
|
2021-11-11 10:27:33 +01:00
|
|
|
raise OSError("Dockerfile not found in "+ctx)
|
2021-05-17 14:03:47 +02:00
|
|
|
build_args = ["-t", cnt["image"], "-f", dockerfile]
|
2020-03-23 19:52:17 +01:00
|
|
|
if "target" in build_desc:
|
2020-04-22 13:44:36 +02:00
|
|
|
build_args.extend(["--target", build_desc["target"]])
|
2020-02-27 10:30:53 +01:00
|
|
|
container_to_ulimit_args(cnt, build_args)
|
2021-12-31 00:17:26 +01:00
|
|
|
if getattr(args, 'no_cache', None):
|
2020-04-08 16:51:40 +02:00
|
|
|
build_args.append("--no-cache")
|
2019-09-03 15:13:24 +02:00
|
|
|
if getattr(args, 'pull_always', None): build_args.append("--pull-always")
|
|
|
|
elif getattr(args, 'pull', None): build_args.append("--pull")
|
2019-08-17 22:39:42 +02:00
|
|
|
args_list = norm_as_list(build_desc.get('args', {}))
|
2020-04-02 16:18:16 +02:00
|
|
|
for build_arg in args_list + args.build_arg:
|
2019-08-17 22:39:42 +02:00
|
|
|
build_args.extend(("--build-arg", build_arg,))
|
|
|
|
build_args.append(ctx)
|
2021-05-17 14:03:47 +02:00
|
|
|
compose.podman.run([], "build", build_args, sleep=0)
|
2019-08-17 22:39:42 +02:00
|
|
|
|
2019-08-09 15:31:56 +02:00
|
|
|
@cmd_run(podman_compose, 'build', 'build stack images')
|
|
|
|
def compose_build(compose, args):
|
2020-04-22 14:31:57 +02:00
|
|
|
if args.services:
|
|
|
|
container_names_by_service = compose.container_names_by_service
|
|
|
|
for service in args.services:
|
|
|
|
try:
|
|
|
|
cnt = compose.container_by_name[container_names_by_service[service][0]]
|
|
|
|
except:
|
|
|
|
raise ValueError("unknown service: " + service)
|
|
|
|
build_one(compose, args, cnt)
|
|
|
|
else:
|
|
|
|
for cnt in compose.containers:
|
|
|
|
build_one(compose, args, cnt)
|
2019-03-23 20:42:04 +01:00
|
|
|
|
2019-08-10 13:11:28 +02:00
|
|
|
def create_pods(compose, args):
|
2019-08-09 15:31:56 +02:00
|
|
|
for pod in compose.pods:
|
|
|
|
podman_args = [
|
2021-05-17 14:03:47 +02:00
|
|
|
"create",
|
2019-03-20 23:49:17 +01:00
|
|
|
"--name={}".format(pod["name"]),
|
|
|
|
]
|
2021-11-21 00:23:29 +01:00
|
|
|
#if compose.podman_version and not strverscmp_lt(compose.podman_version, "3.4.0"):
|
|
|
|
# podman_args.append("--infra-name={}_infra".format(pod["name"]))
|
2020-04-18 17:39:59 +02:00
|
|
|
ports = pod.get("ports", None) or []
|
2021-08-26 11:37:14 +02:00
|
|
|
if isinstance(ports, str):
|
|
|
|
ports = [ports]
|
2019-03-20 23:49:17 +01:00
|
|
|
for i in ports:
|
2021-05-28 19:06:45 +02:00
|
|
|
podman_args.extend(['-p', str(i)])
|
2021-05-17 14:03:47 +02:00
|
|
|
compose.podman.run([], "pod", podman_args)
|
2019-03-23 20:42:04 +01:00
|
|
|
|
2021-05-28 19:06:45 +02:00
|
|
|
|
2019-08-17 22:39:42 +02:00
|
|
|
def up_specific(compose, args):
|
|
|
|
deps = []
|
|
|
|
if not args.no_deps:
|
|
|
|
for service in args.services:
|
|
|
|
deps.extend([])
|
2019-11-17 08:09:41 +01:00
|
|
|
# args.always_recreate_deps
|
2021-12-24 17:55:30 +01:00
|
|
|
log("services", args.services)
|
2019-08-17 22:39:42 +02:00
|
|
|
raise NotImplementedError("starting specific services is not yet implemented")
|
|
|
|
|
2021-11-13 23:28:43 +01:00
|
|
|
def get_excluded(compose, args):
|
|
|
|
excluded = set()
|
2019-08-17 22:39:42 +02:00
|
|
|
if args.services:
|
2021-11-13 23:28:43 +01:00
|
|
|
excluded = set(compose.services)
|
|
|
|
for service in args.services:
|
|
|
|
excluded-= compose.services[service]['_deps']
|
|
|
|
excluded.discard(service)
|
2021-12-24 17:55:30 +01:00
|
|
|
log("** excluding: ", excluded)
|
2021-11-13 23:28:43 +01:00
|
|
|
return excluded
|
2019-08-17 22:39:42 +02:00
|
|
|
|
2021-11-13 23:28:43 +01:00
|
|
|
@cmd_run(podman_compose, 'up', 'Create and start the entire stack or some of its services')
|
|
|
|
def compose_up(compose, args):
|
|
|
|
excluded = get_excluded(compose, args)
|
2019-08-17 22:39:42 +02:00
|
|
|
if not args.no_build:
|
|
|
|
# `podman build` does not cache, so don't always build
|
|
|
|
build_args = argparse.Namespace(
|
|
|
|
if_not_exists=(not args.build),
|
2020-04-18 17:39:59 +02:00
|
|
|
**args.__dict__)
|
2019-08-17 22:39:42 +02:00
|
|
|
compose.commands['build'](compose, build_args)
|
2019-11-17 08:09:41 +01:00
|
|
|
|
2019-08-17 22:39:42 +02:00
|
|
|
# TODO: implement check hash label for change
|
|
|
|
if args.force_recreate:
|
2021-12-21 21:02:59 +01:00
|
|
|
down_args = argparse.Namespace(**dict(args.__dict__, volumes=False))
|
|
|
|
compose.commands['down'](compose, down_args)
|
2019-08-17 22:39:42 +02:00
|
|
|
# args.no_recreate disables check for changes (which is not implemented)
|
2019-08-10 13:11:28 +02:00
|
|
|
|
2019-08-17 22:39:42 +02:00
|
|
|
podman_command = 'run' if args.detach and not args.no_start else 'create'
|
2019-08-10 13:11:28 +02:00
|
|
|
|
2019-08-17 22:39:42 +02:00
|
|
|
create_pods(compose, args)
|
2019-08-09 15:31:56 +02:00
|
|
|
for cnt in compose.containers:
|
2021-11-13 23:28:43 +01:00
|
|
|
if cnt["_service"] in excluded:
|
2021-12-24 17:55:30 +01:00
|
|
|
log("** skipping: ", cnt['name'])
|
2021-11-13 23:28:43 +01:00
|
|
|
continue
|
2021-05-17 14:03:47 +02:00
|
|
|
podman_args = container_to_args(compose, cnt, detached=args.detach)
|
|
|
|
subproc = compose.podman.run([], podman_command, podman_args)
|
2021-11-13 23:28:43 +01:00
|
|
|
if podman_command == 'run' and subproc and subproc.returncode:
|
2021-05-17 14:03:47 +02:00
|
|
|
compose.podman.run([], 'start', [cnt['name']])
|
|
|
|
if args.no_start or args.detach or args.dry_run:
|
|
|
|
return
|
2019-09-02 23:19:07 +02:00
|
|
|
# TODO: handle already existing
|
|
|
|
# TODO: if error creating do not enter loop
|
2019-08-17 22:39:42 +02:00
|
|
|
# TODO: colors if sys.stdout.isatty()
|
2021-11-13 23:59:41 +01:00
|
|
|
exit_code_from = args.__dict__.get('exit_code_from', None)
|
|
|
|
if exit_code_from:
|
|
|
|
args.abort_on_container_exit=True
|
2019-03-04 10:30:14 +01:00
|
|
|
|
2019-08-17 22:39:42 +02:00
|
|
|
threads = []
|
2022-02-03 21:50:12 +01:00
|
|
|
|
|
|
|
max_service_length = 0
|
2019-08-17 22:39:42 +02:00
|
|
|
for cnt in compose.containers:
|
2022-02-03 21:50:12 +01:00
|
|
|
curr_length = len(cnt["_service"])
|
|
|
|
max_service_length = curr_length if curr_length > max_service_length else max_service_length
|
|
|
|
|
|
|
|
for i, cnt in enumerate(compose.containers):
|
|
|
|
# Add colored service prefix to output by piping output through sed
|
|
|
|
color_idx = i % len(compose.console_colors)
|
|
|
|
color = compose.console_colors[color_idx]
|
|
|
|
space_suffix=' ' * (max_service_length - len(cnt["_service"]) + 1)
|
|
|
|
log_formatter = 's/^/{}[{}]{}|\x1B[0m\ /;'.format(color, cnt["_service"], space_suffix)
|
|
|
|
log_formatter = ["sed", "-e", log_formatter]
|
2021-11-13 23:28:43 +01:00
|
|
|
if cnt["_service"] in excluded:
|
2021-12-24 17:55:30 +01:00
|
|
|
log("** skipping: ", cnt['name'])
|
2021-11-13 23:28:43 +01:00
|
|
|
continue
|
2019-08-17 22:39:42 +02:00
|
|
|
# TODO: remove sleep from podman.run
|
2021-11-13 23:59:41 +01:00
|
|
|
obj = compose if exit_code_from == cnt['_service'] else None
|
2022-02-03 21:58:02 +01:00
|
|
|
thread = Thread(target=compose.podman.run, args=[[], 'start', ['-a', cnt['name']]], kwargs={"obj":obj, "log_formatter": log_formatter}, daemon=True, name=cnt['name'])
|
|
|
|
thread.start()
|
2019-08-17 22:39:42 +02:00
|
|
|
threads.append(thread)
|
|
|
|
time.sleep(1)
|
2021-11-13 23:59:41 +01:00
|
|
|
|
2020-06-02 21:39:22 +02:00
|
|
|
while threads:
|
2019-08-17 22:39:42 +02:00
|
|
|
for thread in threads:
|
|
|
|
thread.join(timeout=1.0)
|
2020-06-02 21:39:22 +02:00
|
|
|
if not thread.is_alive():
|
|
|
|
threads.remove(thread)
|
|
|
|
if args.abort_on_container_exit:
|
2021-11-13 23:59:41 +01:00
|
|
|
time.sleep(1)
|
2021-05-06 01:08:48 +02:00
|
|
|
exit_code = compose.exit_code if compose.exit_code is not None else -1
|
|
|
|
exit(exit_code)
|
2019-03-23 20:42:04 +01:00
|
|
|
|
2021-12-23 00:17:34 +01:00
|
|
|
def get_volume_names(compose, cnt):
|
|
|
|
proj_name = compose.project_name
|
|
|
|
basedir = compose.dirname
|
|
|
|
srv_name = cnt['_service']
|
|
|
|
ls = []
|
|
|
|
for volume in cnt.get('volumes', []):
|
|
|
|
if is_str(volume): volume = parse_short_mount(volume, basedir)
|
|
|
|
volume = fix_mount_dict(compose, volume, proj_name, srv_name)
|
|
|
|
mount_type = volume["type"]
|
|
|
|
if mount_type!='volume': continue
|
|
|
|
volume_name = (volume.get("_vol", None) or {}).get("name", None)
|
|
|
|
ls.append(volume_name)
|
|
|
|
return ls
|
|
|
|
|
2019-08-09 15:31:56 +02:00
|
|
|
@cmd_run(podman_compose, 'down', 'tear down entire stack')
|
|
|
|
def compose_down(compose, args):
|
2021-12-23 00:17:34 +01:00
|
|
|
proj_name = compose.project_name
|
2021-11-13 23:28:43 +01:00
|
|
|
excluded = get_excluded(compose, args)
|
2020-05-24 16:09:56 +02:00
|
|
|
podman_args=[]
|
|
|
|
timeout=getattr(args, 'timeout', None)
|
|
|
|
if timeout is None:
|
|
|
|
timeout = 1
|
|
|
|
podman_args.extend(['-t', "{}".format(timeout)])
|
2021-11-11 10:32:24 +01:00
|
|
|
containers = list(reversed(compose.containers))
|
2020-05-24 16:09:56 +02:00
|
|
|
|
2021-11-11 10:32:24 +01:00
|
|
|
for cnt in containers:
|
2021-11-13 23:28:43 +01:00
|
|
|
if cnt["_service"] in excluded: continue
|
2021-05-17 14:03:47 +02:00
|
|
|
compose.podman.run([], "stop", [*podman_args, cnt["name"]], sleep=0)
|
2021-11-11 10:32:24 +01:00
|
|
|
for cnt in containers:
|
2021-11-13 23:28:43 +01:00
|
|
|
if cnt["_service"] in excluded: continue
|
2021-05-17 14:03:47 +02:00
|
|
|
compose.podman.run([], "rm", [cnt["name"]], sleep=0)
|
2021-11-22 19:34:40 +01:00
|
|
|
if args.volumes:
|
2021-12-23 00:17:34 +01:00
|
|
|
vol_names_to_keep = set()
|
|
|
|
for cnt in containers:
|
|
|
|
if cnt["_service"] not in excluded: continue
|
|
|
|
vol_names_to_keep.update(get_volume_names(compose, cnt))
|
2021-12-24 17:55:30 +01:00
|
|
|
log("keep", vol_names_to_keep)
|
2022-01-06 15:54:37 +01:00
|
|
|
for volume_name in compose.podman.volume_ls():
|
2021-12-23 00:17:34 +01:00
|
|
|
if volume_name in vol_names_to_keep: continue
|
2021-12-21 21:44:34 +01:00
|
|
|
compose.podman.run([], "volume", ["rm", volume_name])
|
2019-08-09 15:31:56 +02:00
|
|
|
|
2021-12-23 00:17:34 +01:00
|
|
|
if excluded:
|
|
|
|
return
|
|
|
|
for pod in compose.pods:
|
|
|
|
compose.podman.run([], "pod", ["rm", pod["name"]], sleep=0)
|
|
|
|
|
2019-10-04 21:56:51 +02:00
|
|
|
@cmd_run(podman_compose, 'ps', 'show status of containers')
|
|
|
|
def compose_ps(compose, args):
|
2019-10-05 21:47:04 +02:00
|
|
|
proj_name = compose.project_name
|
2019-10-04 21:56:51 +02:00
|
|
|
if args.quiet == True:
|
2021-05-17 14:03:47 +02:00
|
|
|
compose.podman.run([], "ps", ["-a", "--format", "{{.ID}}", "--filter", f"label=io.podman.compose.project={proj_name}"])
|
2019-10-04 21:56:51 +02:00
|
|
|
else:
|
2021-05-17 14:03:47 +02:00
|
|
|
compose.podman.run([], "ps", ["-a", "--filter", f"label=io.podman.compose.project={proj_name}"])
|
2019-10-04 21:56:51 +02:00
|
|
|
|
2019-08-10 13:11:28 +02:00
|
|
|
@cmd_run(podman_compose, 'run', 'create a container similar to a service to run a one-off command')
|
|
|
|
def compose_run(compose, args):
|
|
|
|
create_pods(compose, args)
|
|
|
|
container_names=compose.container_names_by_service[args.service]
|
|
|
|
container_name=container_names[0]
|
2021-12-21 21:54:27 +01:00
|
|
|
cnt = dict(compose.container_by_name[container_name])
|
2019-08-10 13:11:28 +02:00
|
|
|
deps = cnt["_deps"]
|
|
|
|
if not args.no_deps:
|
2021-11-14 00:37:22 +01:00
|
|
|
up_args = argparse.Namespace(**dict(args.__dict__,
|
|
|
|
detach=True, services=deps,
|
|
|
|
# defaults
|
2021-12-28 20:03:21 +01:00
|
|
|
no_build=False, build=None, force_recreate=False, no_start=False, no_cache=False, build_arg=[],
|
2021-11-14 00:37:22 +01:00
|
|
|
)
|
|
|
|
)
|
|
|
|
compose.commands['up'](compose, up_args)
|
2019-08-10 13:11:28 +02:00
|
|
|
# adjust one-off container options
|
|
|
|
name0 = "{}_{}_tmp{}".format(compose.project_name, args.service, random.randrange(0, 65536))
|
|
|
|
cnt["name"] = args.name or name0
|
|
|
|
if args.entrypoint: cnt["entrypoint"] = args.entrypoint
|
|
|
|
if args.user: cnt["user"] = args.user
|
|
|
|
if args.workdir: cnt["working_dir"] = args.workdir
|
2021-05-06 00:17:01 +02:00
|
|
|
env = dict(cnt.get('environment', {}))
|
|
|
|
if args.env:
|
|
|
|
additional_env_vars = dict(map(lambda each: each.split('='), args.env))
|
|
|
|
env.update(additional_env_vars)
|
|
|
|
cnt['environment'] = env
|
2019-08-10 13:11:28 +02:00
|
|
|
if not args.service_ports:
|
|
|
|
for k in ("expose", "publishall", "ports"):
|
|
|
|
try: del cnt[k]
|
|
|
|
except KeyError: pass
|
|
|
|
if args.volume:
|
|
|
|
# TODO: handle volumes
|
|
|
|
pass
|
|
|
|
cnt['tty']=False if args.T else True
|
2019-11-08 20:06:32 +01:00
|
|
|
if args.cnt_command is not None and len(args.cnt_command) > 0:
|
|
|
|
cnt['command']=args.cnt_command
|
2021-12-21 21:54:27 +01:00
|
|
|
# can't restart and --rm
|
|
|
|
if args.rm and 'restart' in cnt:
|
|
|
|
del cnt['restart']
|
2019-08-10 13:11:28 +02:00
|
|
|
# run podman
|
|
|
|
podman_args = container_to_args(compose, cnt, args.detach)
|
|
|
|
if not args.detach:
|
|
|
|
podman_args.insert(1, '-i')
|
|
|
|
if args.rm:
|
|
|
|
podman_args.insert(1, '--rm')
|
2022-01-21 23:24:17 +01:00
|
|
|
p = compose.podman.run([], 'run', podman_args, sleep=0)
|
|
|
|
exit(p.returncode)
|
2019-11-17 08:09:41 +01:00
|
|
|
|
2021-04-26 13:31:09 +02:00
|
|
|
@cmd_run(podman_compose, 'exec', 'execute a command in a running container')
|
|
|
|
def compose_exec(compose, args):
|
|
|
|
container_names=compose.container_names_by_service[args.service]
|
|
|
|
container_name=container_names[args.index - 1]
|
|
|
|
cnt = compose.container_by_name[container_name]
|
2021-05-17 14:03:47 +02:00
|
|
|
podman_args = ['--interactive']
|
2021-04-26 14:07:14 +02:00
|
|
|
if args.privileged: podman_args += ['--privileged']
|
|
|
|
if args.user: podman_args += ['--user', args.user]
|
|
|
|
if args.workdir: podman_args += ['--workdir', args.workdir]
|
|
|
|
if not args.T: podman_args += ['--tty']
|
2021-06-23 17:04:13 +02:00
|
|
|
env = dict(cnt.get('environment', {}))
|
2021-04-26 13:31:09 +02:00
|
|
|
if args.env:
|
|
|
|
additional_env_vars = dict(map(lambda each: each.split('='), args.env))
|
2021-04-26 14:07:14 +02:00
|
|
|
env.update(additional_env_vars)
|
|
|
|
for name, value in env.items():
|
|
|
|
podman_args += ['--env', "%s=%s" % (name, value)]
|
|
|
|
podman_args += [container_name]
|
|
|
|
if args.cnt_command is not None and len(args.cnt_command) > 0:
|
|
|
|
podman_args += args.cnt_command
|
2022-01-21 23:24:17 +01:00
|
|
|
p = compose.podman.run([], 'exec', podman_args, sleep=0)
|
|
|
|
exit(p.returncode)
|
2021-04-26 13:31:09 +02:00
|
|
|
|
2019-08-09 15:31:56 +02:00
|
|
|
|
|
|
|
def transfer_service_status(compose, args, action):
|
|
|
|
# TODO: handle dependencies, handle creations
|
|
|
|
container_names_by_service = compose.container_names_by_service
|
2021-12-13 21:19:26 +01:00
|
|
|
if not args.services:
|
|
|
|
args.services = container_names_by_service.keys()
|
2019-08-09 15:31:56 +02:00
|
|
|
targets = []
|
|
|
|
for service in args.services:
|
|
|
|
if service not in container_names_by_service:
|
|
|
|
raise ValueError("unknown service: " + service)
|
|
|
|
targets.extend(container_names_by_service[service])
|
2021-12-13 21:19:26 +01:00
|
|
|
if action in ['stop', 'restart']:
|
|
|
|
targets = list(reversed(targets))
|
2021-05-17 14:03:47 +02:00
|
|
|
podman_args=[]
|
2019-08-09 15:31:56 +02:00
|
|
|
timeout=getattr(args, 'timeout', None)
|
|
|
|
if timeout is not None:
|
|
|
|
podman_args.extend(['-t', "{}".format(timeout)])
|
|
|
|
for target in targets:
|
2021-05-17 14:03:47 +02:00
|
|
|
compose.podman.run([], action, podman_args+[target], sleep=0)
|
2019-08-09 15:31:56 +02:00
|
|
|
|
|
|
|
@cmd_run(podman_compose, 'start', 'start specific services')
|
|
|
|
def compose_start(compose, args):
|
|
|
|
transfer_service_status(compose, args, 'start')
|
|
|
|
|
|
|
|
@cmd_run(podman_compose, 'stop', 'stop specific services')
|
|
|
|
def compose_stop(compose, args):
|
2019-10-26 20:28:24 +02:00
|
|
|
transfer_service_status(compose, args, 'stop')
|
2019-08-09 15:31:56 +02:00
|
|
|
|
|
|
|
@cmd_run(podman_compose, 'restart', 'restart specific services')
|
|
|
|
def compose_restart(compose, args):
|
|
|
|
transfer_service_status(compose, args, 'restart')
|
2019-03-23 21:04:07 +01:00
|
|
|
|
2019-11-05 08:42:53 +01:00
|
|
|
@cmd_run(podman_compose, 'logs', 'show logs from services')
|
|
|
|
def compose_logs(compose, args):
|
|
|
|
container_names_by_service = compose.container_names_by_service
|
2021-12-13 21:41:07 +01:00
|
|
|
if not args.services and not args.latest:
|
2021-12-13 21:20:45 +01:00
|
|
|
args.services = container_names_by_service.keys()
|
|
|
|
targets = []
|
|
|
|
for service in args.services:
|
|
|
|
if service not in container_names_by_service:
|
|
|
|
raise ValueError("unknown service: " + service)
|
|
|
|
targets.extend(container_names_by_service[service])
|
2021-05-17 14:03:47 +02:00
|
|
|
podman_args = []
|
2019-11-05 08:42:53 +01:00
|
|
|
if args.follow:
|
|
|
|
podman_args.append('-f')
|
2021-12-13 21:41:07 +01:00
|
|
|
if args.latest:
|
|
|
|
podman_args.append("-l")
|
|
|
|
if args.names:
|
|
|
|
podman_args.append('-n')
|
|
|
|
if args.since:
|
|
|
|
podman_args.extend(['--since', args.since])
|
2019-11-05 08:42:53 +01:00
|
|
|
# the default value is to print all logs which is in podman = 0 and not
|
|
|
|
# needed to be passed
|
|
|
|
if args.tail and args.tail != 'all':
|
|
|
|
podman_args.extend(['--tail', args.tail])
|
|
|
|
if args.timestamps:
|
|
|
|
podman_args.append('-t')
|
2021-12-13 21:41:07 +01:00
|
|
|
if args.until:
|
|
|
|
podman_args.extend(['--until', args.until])
|
2021-12-13 21:20:45 +01:00
|
|
|
for target in targets:
|
|
|
|
podman_args.append(target)
|
|
|
|
compose.podman.run([], 'logs', podman_args)
|
2019-11-05 08:42:53 +01:00
|
|
|
|
2021-12-30 14:39:35 +01:00
|
|
|
@cmd_run(podman_compose, 'config', "displays the compose file")
|
|
|
|
def compose_config(compose, args):
|
2021-12-31 00:54:32 +01:00
|
|
|
print(compose.merged_yaml)
|
2021-12-30 14:39:35 +01:00
|
|
|
|
2019-08-09 15:31:56 +02:00
|
|
|
###################
|
|
|
|
# command arguments parsing
|
|
|
|
###################
|
|
|
|
|
2021-12-29 22:23:24 +01:00
|
|
|
@cmd_parse(podman_compose, 'version')
|
|
|
|
def compose_version_parse(parser):
|
2021-12-31 00:32:15 +01:00
|
|
|
parser.add_argument("-f", "--format", choices=['pretty', 'json'], default='pretty',
|
|
|
|
help="Format the output")
|
2021-12-29 22:23:24 +01:00
|
|
|
parser.add_argument("--short", action='store_true',
|
|
|
|
help="Shows only Podman Compose's version number")
|
|
|
|
|
2019-08-14 17:49:21 +02:00
|
|
|
@cmd_parse(podman_compose, 'up')
|
|
|
|
def compose_up_parse(parser):
|
|
|
|
parser.add_argument("-d", "--detach", action='store_true',
|
|
|
|
help="Detached mode: Run container in the background, print new container name. Incompatible with --abort-on-container-exit.")
|
|
|
|
parser.add_argument("--no-color", action='store_true',
|
|
|
|
help="Produce monochrome output.")
|
|
|
|
parser.add_argument("--quiet-pull", action='store_true',
|
|
|
|
help="Pull without printing progress information.")
|
|
|
|
parser.add_argument("--no-deps", action='store_true',
|
|
|
|
help="Don't start linked services.")
|
|
|
|
parser.add_argument("--force-recreate", action='store_true',
|
|
|
|
help="Recreate containers even if their configuration and image haven't changed.")
|
|
|
|
parser.add_argument("--always-recreate-deps", action='store_true',
|
|
|
|
help="Recreate dependent containers. Incompatible with --no-recreate.")
|
|
|
|
parser.add_argument("--no-recreate", action='store_true',
|
|
|
|
help="If containers already exist, don't recreate them. Incompatible with --force-recreate and -V.")
|
|
|
|
parser.add_argument("--no-build", action='store_true',
|
|
|
|
help="Don't build an image, even if it's missing.")
|
|
|
|
parser.add_argument("--no-start", action='store_true',
|
|
|
|
help="Don't start the services after creating them.")
|
|
|
|
parser.add_argument("--build", action='store_true',
|
|
|
|
help="Build images before starting containers.")
|
|
|
|
parser.add_argument("--abort-on-container-exit", action='store_true',
|
|
|
|
help="Stops all containers if any container was stopped. Incompatible with -d.")
|
|
|
|
parser.add_argument("-t", "--timeout", type=float, default=10,
|
|
|
|
help="Use this timeout in seconds for container shutdown when attached or when containers are already running. (default: 10)")
|
|
|
|
parser.add_argument("-V", "--renew-anon-volumes", action='store_true',
|
|
|
|
help="Recreate anonymous volumes instead of retrieving data from the previous containers.")
|
|
|
|
parser.add_argument("--remove-orphans", action='store_true',
|
|
|
|
help="Remove containers for services not defined in the Compose file.")
|
|
|
|
parser.add_argument('--scale', metavar="SERVICE=NUM", action='append',
|
|
|
|
help="Scale SERVICE to NUM instances. Overrides the `scale` setting in the Compose file if present.")
|
|
|
|
parser.add_argument("--exit-code-from", metavar='SERVICE', type=str, default=None,
|
|
|
|
help="Return the exit code of the selected service container. Implies --abort-on-container-exit.")
|
|
|
|
|
2021-11-22 19:34:40 +01:00
|
|
|
@cmd_parse(podman_compose, 'down')
|
|
|
|
def compose_down_parse(parser):
|
|
|
|
parser.add_argument("-v", "--volumes", action='store_true', default=False,
|
|
|
|
help="Remove named volumes declared in the `volumes` section of the Compose file and "
|
|
|
|
"anonymous volumes attached to containers.")
|
|
|
|
|
2019-08-10 13:11:28 +02:00
|
|
|
@cmd_parse(podman_compose, 'run')
|
|
|
|
def compose_run_parse(parser):
|
|
|
|
parser.add_argument("-d", "--detach", action='store_true',
|
|
|
|
help="Detached mode: Run container in the background, print new container name.")
|
|
|
|
parser.add_argument("--name", type=str, default=None,
|
|
|
|
help="Assign a name to the container")
|
|
|
|
parser.add_argument("--entrypoint", type=str, default=None,
|
|
|
|
help="Override the entrypoint of the image.")
|
2021-05-06 00:17:01 +02:00
|
|
|
parser.add_argument('-e', '--env', metavar="KEY=VAL", action='append',
|
2019-08-10 13:11:28 +02:00
|
|
|
help="Set an environment variable (can be used multiple times)")
|
|
|
|
parser.add_argument('-l', '--label', metavar="KEY=VAL", action='append',
|
|
|
|
help="Add or override a label (can be used multiple times)")
|
|
|
|
parser.add_argument("-u", "--user", type=str, default=None,
|
|
|
|
help="Run as specified username or uid")
|
|
|
|
parser.add_argument("--no-deps", action='store_true',
|
|
|
|
help="Don't start linked services")
|
|
|
|
parser.add_argument("--rm", action='store_true',
|
|
|
|
help="Remove container after run. Ignored in detached mode.")
|
|
|
|
parser.add_argument('-p', '--publish', action='append',
|
|
|
|
help="Publish a container's port(s) to the host (can be used multiple times)")
|
|
|
|
parser.add_argument("--service-ports", action='store_true',
|
|
|
|
help="Run command with the service's ports enabled and mapped to the host.")
|
|
|
|
parser.add_argument('-v', '--volume', action='append',
|
|
|
|
help="Bind mount a volume (can be used multiple times)")
|
|
|
|
parser.add_argument("-T", action='store_true',
|
|
|
|
help="Disable pseudo-tty allocation. By default `podman-compose run` allocates a TTY.")
|
|
|
|
parser.add_argument("-w", "--workdir", type=str, default=None,
|
|
|
|
help="Working directory inside the container")
|
|
|
|
parser.add_argument('service', metavar='service', nargs=None,
|
|
|
|
help='service name')
|
2019-09-15 10:33:58 +02:00
|
|
|
parser.add_argument('cnt_command', metavar='command', nargs=argparse.REMAINDER,
|
|
|
|
help='command and its arguments')
|
2019-08-10 13:11:28 +02:00
|
|
|
|
2021-04-26 13:31:09 +02:00
|
|
|
@cmd_parse(podman_compose, 'exec')
|
|
|
|
def compose_run_parse(parser):
|
|
|
|
parser.add_argument("-d", "--detach", action='store_true',
|
|
|
|
help="Detached mode: Run container in the background, print new container name.")
|
2021-04-26 14:07:14 +02:00
|
|
|
parser.add_argument("--privileged", action='store_true', default=False,
|
|
|
|
help="Give the process extended Linux capabilities inside the container")
|
2021-04-26 13:31:09 +02:00
|
|
|
parser.add_argument("-u", "--user", type=str, default=None,
|
|
|
|
help="Run as specified username or uid")
|
|
|
|
parser.add_argument("-T", action='store_true',
|
|
|
|
help="Disable pseudo-tty allocation. By default `podman-compose run` allocates a TTY.")
|
|
|
|
parser.add_argument("--index", type=int, default=1,
|
|
|
|
help="Index of the container if there are multiple instances of a service")
|
|
|
|
parser.add_argument('-e', '--env', metavar="KEY=VAL", action='append',
|
|
|
|
help="Set an environment variable (can be used multiple times)")
|
|
|
|
parser.add_argument("-w", "--workdir", type=str, default=None,
|
|
|
|
help="Working directory inside the container")
|
|
|
|
parser.add_argument('service', metavar='service', nargs=None,
|
|
|
|
help='service name')
|
|
|
|
parser.add_argument('cnt_command', metavar='command', nargs=argparse.REMAINDER,
|
|
|
|
help='command and its arguments')
|
|
|
|
|
|
|
|
|
2020-05-24 16:09:56 +02:00
|
|
|
@cmd_parse(podman_compose, ['down', 'stop', 'restart'])
|
2019-08-10 13:11:28 +02:00
|
|
|
def compose_parse_timeout(parser):
|
2019-08-09 15:31:56 +02:00
|
|
|
parser.add_argument("-t", "--timeout",
|
|
|
|
help="Specify a shutdown timeout in seconds. ",
|
2020-05-24 16:15:11 +02:00
|
|
|
type=int, default=10)
|
2019-08-09 15:31:56 +02:00
|
|
|
|
2019-11-05 08:42:53 +01:00
|
|
|
@cmd_parse(podman_compose, ['logs'])
|
|
|
|
def compose_logs_parse(parser):
|
|
|
|
parser.add_argument("-f", "--follow", action='store_true',
|
2021-12-13 21:41:07 +01:00
|
|
|
help="Follow log output. The default is false")
|
|
|
|
parser.add_argument("-l", "--latest", action='store_true',
|
|
|
|
help="Act on the latest container podman is aware of")
|
|
|
|
parser.add_argument("-n", "--names", action='store_true',
|
|
|
|
help="Output the container name in the log")
|
|
|
|
parser.add_argument("--since", help="Show logs since TIMESTAMP",
|
|
|
|
type=str, default=None)
|
2019-11-05 08:42:53 +01:00
|
|
|
parser.add_argument("-t", "--timestamps", action='store_true',
|
|
|
|
help="Show timestamps.")
|
|
|
|
parser.add_argument("--tail",
|
|
|
|
help="Number of lines to show from the end of the logs for each "
|
|
|
|
"container.",
|
|
|
|
type=str, default="all")
|
2021-12-13 21:41:07 +01:00
|
|
|
parser.add_argument("--until", help="Show logs until TIMESTAMP",
|
|
|
|
type=str, default=None)
|
2021-12-13 21:20:45 +01:00
|
|
|
parser.add_argument('services', metavar='services', nargs='*', default=None,
|
|
|
|
help='service names')
|
2019-11-05 08:42:53 +01:00
|
|
|
|
2021-08-05 11:24:35 +02:00
|
|
|
@cmd_parse(podman_compose, 'pull')
|
|
|
|
def compose_pull_parse(parser):
|
|
|
|
parser.add_argument("--force-local", action='store_true', default=False,
|
|
|
|
help="Also pull unprefixed images for services which have a build section")
|
|
|
|
|
2019-08-09 15:31:56 +02:00
|
|
|
@cmd_parse(podman_compose, 'push')
|
|
|
|
def compose_push_parse(parser):
|
|
|
|
parser.add_argument("--ignore-push-failures", action='store_true',
|
|
|
|
help="Push what it can and ignores images with push failures. (not implemented)")
|
|
|
|
parser.add_argument('services', metavar='services', nargs='*',
|
|
|
|
help='services to push')
|
|
|
|
|
2019-10-04 21:56:51 +02:00
|
|
|
@cmd_parse(podman_compose, 'ps')
|
|
|
|
def compose_ps_parse(parser):
|
|
|
|
parser.add_argument("-q", "--quiet",
|
|
|
|
help="Only display container IDs", action='store_true')
|
2019-08-09 15:31:56 +02:00
|
|
|
|
2020-05-22 17:06:45 +02:00
|
|
|
@cmd_parse(podman_compose, ['build', 'up'])
|
2019-08-09 15:31:56 +02:00
|
|
|
def compose_build_parse(parser):
|
|
|
|
parser.add_argument("--pull",
|
|
|
|
help="attempt to pull a newer version of the image", action='store_true')
|
|
|
|
parser.add_argument("--pull-always",
|
|
|
|
help="attempt to pull a newer version of the image, Raise an error even if the image is present locally.", action='store_true')
|
2020-04-02 16:18:16 +02:00
|
|
|
parser.add_argument("--build-arg", metavar="key=val", action="append", default=[],
|
|
|
|
help="Set build-time variables for services.")
|
2020-04-08 16:51:40 +02:00
|
|
|
parser.add_argument("--no-cache",
|
|
|
|
help="Do not use cache when building the image.", action='store_true')
|
2019-08-09 15:31:56 +02:00
|
|
|
|
2021-12-13 21:19:26 +01:00
|
|
|
@cmd_parse(podman_compose, ['build', 'up', 'down', 'start', 'stop', 'restart'])
|
2021-11-13 23:28:43 +01:00
|
|
|
def compose_build_parse(parser):
|
|
|
|
parser.add_argument('services', metavar='services', nargs='*',default=None,
|
|
|
|
help='affected services')
|
|
|
|
|
2019-08-09 15:31:56 +02:00
|
|
|
def main():
|
|
|
|
podman_compose.run()
|
2019-03-23 21:07:06 +01:00
|
|
|
|
2019-03-23 21:04:07 +01:00
|
|
|
if __name__ == "__main__":
|
2019-03-23 21:07:06 +01:00
|
|
|
main()
|