2019-05-09 22:15:05 +02:00
|
|
|
#! /usr/bin/python3
|
2019-08-10 17:08:21 +02:00
|
|
|
# -*- coding: utf-8 -*-
|
2019-03-04 10:30:14 +01:00
|
|
|
|
2019-03-04 22:58:25 +01:00
|
|
|
# https://docs.docker.com/compose/compose-file/#service-configuration-reference
|
|
|
|
# https://docs.docker.com/samples/
|
|
|
|
# https://docs.docker.com/compose/gettingstarted/
|
|
|
|
# https://docs.docker.com/compose/django/
|
|
|
|
# https://docs.docker.com/compose/wordpress/
|
|
|
|
|
2019-03-04 10:30:14 +01:00
|
|
|
from __future__ import print_function
|
|
|
|
|
2019-03-24 00:39:22 +01:00
|
|
|
import sys
|
2019-03-04 10:30:14 +01:00
|
|
|
import os
|
|
|
|
import argparse
|
|
|
|
import subprocess
|
2021-09-06 06:45:50 +02:00
|
|
|
import textwrap
|
2019-03-04 10:30:14 +01:00
|
|
|
import time
|
2019-03-24 00:08:26 +01:00
|
|
|
import re
|
2019-06-09 02:26:13 +02:00
|
|
|
import hashlib
|
2019-08-10 13:11:28 +02:00
|
|
|
import random
|
2019-10-05 21:37:14 +02:00
|
|
|
import json
|
2019-03-04 22:58:25 +01:00
|
|
|
|
2019-08-17 22:39:42 +02:00
|
|
|
from threading import Thread
|
|
|
|
|
2019-10-04 19:36:30 +02:00
|
|
|
import shlex
|
|
|
|
|
2019-07-08 22:53:38 +02:00
|
|
|
try:
|
|
|
|
from shlex import quote as cmd_quote
|
|
|
|
except ImportError:
|
|
|
|
from pipes import quote as cmd_quote
|
|
|
|
|
2019-05-09 22:15:05 +02:00
|
|
|
# import fnmatch
|
2019-03-04 22:58:25 +01:00
|
|
|
# fnmatch.fnmatchcase(env, "*_HOST")
|
2019-03-04 10:30:14 +01:00
|
|
|
|
|
|
|
import yaml
|
2021-12-10 00:01:45 +01:00
|
|
|
from dotenv import dotenv_values
|
2019-03-04 10:30:14 +01:00
|
|
|
|
2021-11-21 14:55:19 +01:00
|
|
|
__version__ = '0.1.9'
|
2019-09-03 17:38:57 +02:00
|
|
|
|
2019-03-24 00:39:22 +01:00
|
|
|
PY3 = sys.version_info[0] == 3
|
|
|
|
if PY3:
|
|
|
|
basestring = str
|
|
|
|
|
2019-05-09 22:16:40 +02:00
|
|
|
# helper functions
|
|
|
|
|
2019-05-09 22:15:05 +02:00
|
|
|
is_str = lambda s: isinstance(s, basestring)
|
|
|
|
is_dict = lambda d: isinstance(d, dict)
|
|
|
|
is_list = lambda l: not is_str(l) and not is_dict(l) and hasattr(l, "__iter__")
|
2019-08-16 14:29:09 +02:00
|
|
|
# identity filter
|
2020-04-18 17:39:59 +02:00
|
|
|
filteri = lambda a: filter(lambda i: i, a)
|
2019-05-09 22:15:05 +02:00
|
|
|
|
2019-05-09 22:16:40 +02:00
|
|
|
def try_int(i, fallback=None):
|
|
|
|
try:
|
|
|
|
return int(i)
|
|
|
|
except ValueError:
|
|
|
|
pass
|
|
|
|
except TypeError:
|
|
|
|
pass
|
|
|
|
return fallback
|
|
|
|
|
2021-06-22 22:30:22 +02:00
|
|
|
def try_float(i, fallback=None):
|
|
|
|
try:
|
|
|
|
return float(i)
|
|
|
|
except ValueError:
|
|
|
|
pass
|
|
|
|
except TypeError:
|
|
|
|
pass
|
|
|
|
return fallback
|
|
|
|
|
2019-06-09 02:26:13 +02:00
|
|
|
dir_re = re.compile("^[~/\.]")
|
2020-04-18 17:39:59 +02:00
|
|
|
propagation_re = re.compile("^(?:z|Z|r?shared|r?slave|r?private)$")
|
2020-12-02 14:31:51 +01:00
|
|
|
norm_re = re.compile('[^-_a-z0-9]')
|
2021-10-24 16:35:36 +02:00
|
|
|
num_split_re = re.compile(r'(\d+|\D+)')
|
2019-08-14 17:22:36 +02:00
|
|
|
|
2021-05-17 14:03:47 +02:00
|
|
|
PODMAN_CMDS = (
|
|
|
|
"pull", "push", "build", "inspect",
|
|
|
|
"run", "start", "stop", "rm", "volume",
|
|
|
|
)
|
|
|
|
|
2021-10-24 16:35:36 +02:00
|
|
|
def ver_as_list(a):
|
|
|
|
return [try_int(i, i) for i in num_split_re.findall(a)]
|
|
|
|
|
|
|
|
def strverscmp_lt(a, b):
|
2021-11-13 12:08:32 +01:00
|
|
|
a_ls = ver_as_list(a or '')
|
|
|
|
b_ls = ver_as_list(b or '')
|
2021-10-24 16:35:36 +02:00
|
|
|
return a_ls < b_ls
|
|
|
|
|
2019-06-09 02:26:13 +02:00
|
|
|
def parse_short_mount(mount_str, basedir):
|
|
|
|
mount_a = mount_str.split(':')
|
|
|
|
mount_opt_dict = {}
|
|
|
|
mount_opt = None
|
2020-04-18 17:39:59 +02:00
|
|
|
if len(mount_a) == 1:
|
|
|
|
# Anonymous: Just specify a path and let the engine creates the volume
|
2019-06-09 02:26:13 +02:00
|
|
|
# - /var/lib/mysql
|
2020-04-18 17:39:59 +02:00
|
|
|
mount_src, mount_dst = None, mount_str
|
|
|
|
elif len(mount_a) == 2:
|
2019-06-09 02:26:13 +02:00
|
|
|
mount_src, mount_dst = mount_a
|
2020-04-18 17:39:59 +02:00
|
|
|
# dest must start with / like /foo:/var/lib/mysql
|
|
|
|
# otherwise it's option like /var/lib/mysql:rw
|
2019-06-09 02:26:13 +02:00
|
|
|
if not mount_dst.startswith('/'):
|
|
|
|
mount_dst, mount_opt = mount_a
|
|
|
|
mount_src = None
|
2020-04-18 17:39:59 +02:00
|
|
|
elif len(mount_a) == 3:
|
2019-06-09 02:26:13 +02:00
|
|
|
mount_src, mount_dst, mount_opt = mount_a
|
|
|
|
else:
|
|
|
|
raise ValueError("could not parse mount "+mount_str)
|
|
|
|
if mount_src and dir_re.match(mount_src):
|
|
|
|
# Specify an absolute path mapping
|
|
|
|
# - /opt/data:/var/lib/mysql
|
|
|
|
# Path on the host, relative to the Compose file
|
|
|
|
# - ./cache:/tmp/cache
|
|
|
|
# User-relative path
|
|
|
|
# - ~/configs:/etc/configs/:ro
|
|
|
|
mount_type = "bind"
|
2021-12-10 21:27:00 +01:00
|
|
|
basedir = os.path.realpath(basedir)
|
2019-06-09 02:26:13 +02:00
|
|
|
mount_src = os.path.join(basedir, os.path.expanduser(mount_src))
|
|
|
|
else:
|
|
|
|
# Named volume
|
|
|
|
# - datavolume:/var/lib/mysql
|
|
|
|
mount_type = "volume"
|
2019-08-16 14:29:09 +02:00
|
|
|
mount_opts = filteri((mount_opt or '').split(','))
|
2019-06-11 14:07:49 +02:00
|
|
|
for opt in mount_opts:
|
2020-04-18 17:39:59 +02:00
|
|
|
if opt == 'ro': mount_opt_dict["read_only"] = True
|
|
|
|
elif opt == 'rw': mount_opt_dict["read_only"] = False
|
|
|
|
elif opt in ('consistent', 'delegated', 'cached'):
|
|
|
|
mount_opt_dict["consistency"] = opt
|
|
|
|
elif propagation_re.match(opt): mount_opt_dict["bind"] = dict(propagation=opt)
|
2019-06-09 02:26:13 +02:00
|
|
|
else:
|
|
|
|
# TODO: ignore
|
|
|
|
raise ValueError("unknown mount option "+opt)
|
|
|
|
return dict(type=mount_type, source=mount_src, target=mount_dst, **mount_opt_dict)
|
|
|
|
|
2020-04-18 17:39:59 +02:00
|
|
|
# NOTE: if a named volume is used but not defined it
|
|
|
|
# gives ERROR: Named volume "abc" is used in service "xyz"
|
|
|
|
# but no declaration was found in the volumes section.
|
|
|
|
# unless it's anonymous-volume
|
|
|
|
|
2021-10-14 00:30:44 +02:00
|
|
|
def fix_mount_dict(compose, mount_dict, proj_name, srv_name):
|
2019-06-09 02:26:13 +02:00
|
|
|
"""
|
2019-08-14 17:22:36 +02:00
|
|
|
in-place fix mount dictionary to:
|
2021-10-14 00:30:44 +02:00
|
|
|
- define _vol to be the corresponding top-level volume
|
|
|
|
- if name is missing it would be source prefixed with project
|
|
|
|
- if no source it would be generated
|
2019-06-09 02:26:13 +02:00
|
|
|
"""
|
2020-04-18 17:39:59 +02:00
|
|
|
# if already applied nothing todo
|
2021-10-14 00:30:44 +02:00
|
|
|
if "_vol" in mount_dict: return mount_dict
|
2020-04-18 17:39:59 +02:00
|
|
|
if mount_dict["type"] == "volume":
|
2021-10-14 00:30:44 +02:00
|
|
|
vols = compose.vols
|
2020-04-18 17:39:59 +02:00
|
|
|
source = mount_dict.get("source", None)
|
2021-10-14 00:30:44 +02:00
|
|
|
vol = (vols.get(source, None) or {}) if source else {}
|
2021-10-14 01:11:45 +02:00
|
|
|
name = vol.get('name', None)
|
2021-10-14 00:30:44 +02:00
|
|
|
mount_dict["_vol"] = vol
|
|
|
|
# handle anonymouse or implied volume
|
2019-08-14 17:22:36 +02:00
|
|
|
if not source:
|
|
|
|
# missing source
|
2021-10-14 00:30:44 +02:00
|
|
|
vol["name"] = "_".join([
|
2019-08-14 17:22:36 +02:00
|
|
|
proj_name, srv_name,
|
2020-06-15 19:50:52 +02:00
|
|
|
hashlib.sha256(mount_dict["target"].encode("utf-8")).hexdigest(),
|
2019-08-14 17:22:36 +02:00
|
|
|
])
|
2021-10-14 00:30:44 +02:00
|
|
|
elif not name:
|
2021-10-14 01:11:45 +02:00
|
|
|
external = vol.get("external", None)
|
|
|
|
ext_name = external.get("name", None) if isinstance(external, dict) else None
|
|
|
|
vol["name"] = ext_name if ext_name else f"{proj_name}_{source}"
|
2019-06-09 02:26:13 +02:00
|
|
|
return mount_dict
|
2019-05-09 22:16:40 +02:00
|
|
|
|
2019-03-24 00:08:26 +01:00
|
|
|
# docker and docker-compose support subset of bash variable substitution
|
|
|
|
# https://docs.docker.com/compose/compose-file/#variable-substitution
|
|
|
|
# https://docs.docker.com/compose/env-file/
|
|
|
|
# https://www.gnu.org/software/bash/manual/html_node/Shell-Parameter-Expansion.html
|
|
|
|
# $VARIABLE
|
|
|
|
# ${VARIABLE}
|
|
|
|
# ${VARIABLE:-default} default if not set or empty
|
|
|
|
# ${VARIABLE-default} default if not set
|
|
|
|
# ${VARIABLE:?err} raise error if not set or empty
|
|
|
|
# ${VARIABLE?err} raise error if not set
|
|
|
|
# $$ means $
|
|
|
|
|
2021-05-05 23:49:42 +02:00
|
|
|
var_re = re.compile(r"""
|
|
|
|
\$(?:
|
|
|
|
(?P<escaped>\$) |
|
|
|
|
(?P<named>[_a-zA-Z][_a-zA-Z0-9]*) |
|
|
|
|
(?:{
|
|
|
|
(?P<braced>[_a-zA-Z][_a-zA-Z0-9]*)
|
|
|
|
(?:
|
|
|
|
(?::?-(?P<default>[^}]+)) |
|
|
|
|
(?::?\?(?P<err>[^}]+))
|
|
|
|
)?
|
|
|
|
})
|
|
|
|
)
|
|
|
|
""", re.VERBOSE)
|
2019-03-24 00:08:26 +01:00
|
|
|
|
2021-11-13 22:27:43 +01:00
|
|
|
def rec_subs(value, subs_dict):
|
2019-04-19 13:44:47 +02:00
|
|
|
"""
|
|
|
|
do bash-like substitution in value and if list of dictionary do that recursively
|
|
|
|
"""
|
2019-05-09 22:15:05 +02:00
|
|
|
if is_dict(value):
|
2021-11-13 22:27:43 +01:00
|
|
|
value = dict([(k, rec_subs(v, subs_dict)) for k, v in value.items()])
|
2019-05-09 22:15:05 +02:00
|
|
|
elif is_str(value):
|
2021-05-05 23:49:42 +02:00
|
|
|
def convert(m):
|
|
|
|
if m.group("escaped") is not None:
|
|
|
|
return "$"
|
|
|
|
name = m.group("named") or m.group("braced")
|
2021-11-13 22:27:43 +01:00
|
|
|
value = subs_dict.get(name)
|
|
|
|
if value is not None:
|
|
|
|
return "%s" % value
|
2021-05-05 23:49:42 +02:00
|
|
|
if m.group("err") is not None:
|
|
|
|
raise RuntimeError(m.group("err"))
|
|
|
|
return m.group("default") or ""
|
|
|
|
value = var_re.sub(convert, value)
|
2019-05-05 13:33:07 +02:00
|
|
|
elif hasattr(value, "__iter__"):
|
2021-11-13 22:27:43 +01:00
|
|
|
value = [rec_subs(i, subs_dict) for i in value]
|
2019-03-24 00:08:26 +01:00
|
|
|
return value
|
|
|
|
|
2019-03-04 22:58:25 +01:00
|
|
|
def norm_as_list(src):
|
|
|
|
"""
|
|
|
|
given a dictionary {key1:value1, key2: None} or list
|
|
|
|
return a list of ["key1=value1", "key2"]
|
|
|
|
"""
|
|
|
|
if src is None:
|
2019-03-20 23:49:17 +01:00
|
|
|
dst = []
|
2019-05-09 22:15:05 +02:00
|
|
|
elif is_dict(src):
|
2020-10-08 13:41:34 +02:00
|
|
|
dst = [("{}={}".format(k, v) if v is not None else k) for k, v in src.items()]
|
2019-05-09 22:15:05 +02:00
|
|
|
elif is_list(src):
|
2019-03-20 23:49:17 +01:00
|
|
|
dst = list(src)
|
2019-03-04 22:58:25 +01:00
|
|
|
else:
|
2019-03-20 23:49:17 +01:00
|
|
|
dst = [src]
|
2019-03-04 22:58:25 +01:00
|
|
|
return dst
|
2019-03-04 10:30:14 +01:00
|
|
|
|
2019-03-23 20:42:04 +01:00
|
|
|
|
2019-03-04 22:58:25 +01:00
|
|
|
def norm_as_dict(src):
|
|
|
|
"""
|
|
|
|
given a list ["key1=value1", "key2"]
|
|
|
|
return a dictionary {key1:value1, key2: None}
|
|
|
|
"""
|
|
|
|
if src is None:
|
2019-03-20 23:49:17 +01:00
|
|
|
dst = {}
|
2019-05-09 22:15:05 +02:00
|
|
|
elif is_dict(src):
|
2019-03-20 23:49:17 +01:00
|
|
|
dst = dict(src)
|
2019-05-09 22:15:05 +02:00
|
|
|
elif is_list(src):
|
2019-03-23 20:42:04 +01:00
|
|
|
dst = [i.split("=", 1) for i in src if i]
|
2019-03-20 23:49:17 +01:00
|
|
|
dst = dict([(a if len(a) == 2 else (a[0], None)) for a in dst])
|
2019-09-08 01:20:48 +02:00
|
|
|
elif is_str(src):
|
|
|
|
key, value = src.split("=", 1) if "=" in src else (src, None)
|
|
|
|
dst = {key: value}
|
2019-03-04 22:58:25 +01:00
|
|
|
else:
|
|
|
|
raise ValueError("dictionary or iterable is expected")
|
|
|
|
return dst
|
|
|
|
|
2019-09-11 17:50:00 +02:00
|
|
|
def norm_ulimit(inner_value):
|
|
|
|
if is_dict(inner_value):
|
|
|
|
if not inner_value.keys() & {"soft", "hard"}:
|
|
|
|
raise ValueError("expected at least one soft or hard limit")
|
2020-04-18 17:39:59 +02:00
|
|
|
soft = inner_value.get("soft", inner_value.get("hard", None))
|
|
|
|
hard = inner_value.get("hard", inner_value.get("soft", None))
|
2019-09-11 17:50:00 +02:00
|
|
|
return "{}:{}".format(soft, hard)
|
|
|
|
elif is_list(inner_value): return norm_ulimit(norm_as_dict(inner_value))
|
|
|
|
# if int or string return as is
|
|
|
|
return inner_value
|
|
|
|
|
2019-03-20 23:49:17 +01:00
|
|
|
|
2019-03-04 22:58:25 +01:00
|
|
|
# transformation helpers
|
2019-03-04 10:30:14 +01:00
|
|
|
|
2019-03-23 20:42:04 +01:00
|
|
|
def adj_hosts(services, cnt, dst="127.0.0.1"):
|
2019-03-04 22:58:25 +01:00
|
|
|
"""
|
|
|
|
adjust container cnt in-place to add hosts pointing to dst for services
|
|
|
|
"""
|
|
|
|
common_extra_hosts = []
|
|
|
|
for srv, cnts in services.items():
|
|
|
|
common_extra_hosts.append("{}:{}".format(srv, dst))
|
2019-03-04 23:02:24 +01:00
|
|
|
for cnt0 in cnts:
|
|
|
|
common_extra_hosts.append("{}:{}".format(cnt0, dst))
|
2019-03-04 22:58:25 +01:00
|
|
|
extra_hosts = list(cnt.get("extra_hosts", []))
|
|
|
|
extra_hosts.extend(common_extra_hosts)
|
|
|
|
# link aliases
|
|
|
|
for link in cnt.get("links", []):
|
|
|
|
a = link.strip().split(':', 1)
|
2019-03-20 23:49:17 +01:00
|
|
|
if len(a) == 2:
|
|
|
|
alias = a[1].strip()
|
2019-03-04 22:58:25 +01:00
|
|
|
extra_hosts.append("{}:{}".format(alias, dst))
|
|
|
|
cnt["extra_hosts"] = extra_hosts
|
|
|
|
|
2019-03-23 20:42:04 +01:00
|
|
|
|
2019-03-04 23:39:08 +01:00
|
|
|
def move_list(dst, containers, key):
|
|
|
|
"""
|
|
|
|
move key (like port forwarding) from containers to dst (a pod or a infra container)
|
|
|
|
"""
|
2020-04-18 17:39:59 +02:00
|
|
|
a = set(dst.get(key, None) or [])
|
2019-03-04 23:39:08 +01:00
|
|
|
for cnt in containers:
|
2020-04-18 17:39:59 +02:00
|
|
|
a0 = cnt.get(key, None)
|
2019-03-04 23:39:08 +01:00
|
|
|
if a0:
|
2019-03-04 23:46:42 +01:00
|
|
|
a.update(a0)
|
2019-03-04 23:39:08 +01:00
|
|
|
del cnt[key]
|
2019-03-23 20:42:04 +01:00
|
|
|
if a:
|
|
|
|
dst[key] = list(a)
|
|
|
|
|
2019-03-04 23:39:08 +01:00
|
|
|
|
2019-03-04 22:58:25 +01:00
|
|
|
def move_port_fw(dst, containers):
|
|
|
|
"""
|
|
|
|
move port forwarding from containers to dst (a pod or a infra container)
|
|
|
|
"""
|
2019-03-04 23:48:48 +01:00
|
|
|
move_list(dst, containers, "ports")
|
2019-03-04 23:39:08 +01:00
|
|
|
|
2019-03-23 20:42:04 +01:00
|
|
|
|
2019-03-04 23:39:08 +01:00
|
|
|
def move_extra_hosts(dst, containers):
|
|
|
|
"""
|
|
|
|
move port forwarding from containers to dst (a pod or a infra container)
|
|
|
|
"""
|
|
|
|
move_list(dst, containers, "extra_hosts")
|
|
|
|
|
2019-03-04 22:58:25 +01:00
|
|
|
|
|
|
|
# transformations
|
|
|
|
|
2019-03-20 23:49:17 +01:00
|
|
|
transformations = {}
|
2019-03-23 20:42:04 +01:00
|
|
|
|
|
|
|
|
2019-03-04 22:58:25 +01:00
|
|
|
def trans(func):
|
2019-03-20 23:49:17 +01:00
|
|
|
transformations[func.__name__.replace("tr_", "")] = func
|
2019-03-04 22:58:25 +01:00
|
|
|
return func
|
|
|
|
|
2019-03-23 20:42:04 +01:00
|
|
|
|
2019-03-04 22:58:25 +01:00
|
|
|
@trans
|
2019-03-04 10:30:14 +01:00
|
|
|
def tr_identity(project_name, services, given_containers):
|
2019-03-20 23:49:17 +01:00
|
|
|
containers = []
|
2019-03-04 10:30:14 +01:00
|
|
|
for cnt in given_containers:
|
2019-03-04 22:58:25 +01:00
|
|
|
containers.append(dict(cnt))
|
|
|
|
return [], containers
|
|
|
|
|
2019-03-23 20:42:04 +01:00
|
|
|
|
2019-03-04 22:58:25 +01:00
|
|
|
@trans
|
|
|
|
def tr_publishall(project_name, services, given_containers):
|
2019-03-20 23:49:17 +01:00
|
|
|
containers = []
|
2019-03-04 22:58:25 +01:00
|
|
|
for cnt0 in given_containers:
|
2019-03-23 20:42:04 +01:00
|
|
|
cnt = dict(cnt0, publishall=True)
|
2019-03-04 22:58:25 +01:00
|
|
|
# adjust hosts to point to the gateway, TODO: adjust host env
|
|
|
|
adj_hosts(services, cnt, '10.0.2.2')
|
|
|
|
containers.append(cnt)
|
|
|
|
return [], containers
|
|
|
|
|
2019-03-23 20:42:04 +01:00
|
|
|
|
2019-03-04 22:58:25 +01:00
|
|
|
@trans
|
|
|
|
def tr_hostnet(project_name, services, given_containers):
|
2019-03-20 23:49:17 +01:00
|
|
|
containers = []
|
2019-03-04 22:58:25 +01:00
|
|
|
for cnt0 in given_containers:
|
2019-03-23 20:42:04 +01:00
|
|
|
cnt = dict(cnt0, network_mode="host")
|
2019-03-04 22:58:25 +01:00
|
|
|
# adjust hosts to point to localhost, TODO: adjust host env
|
|
|
|
adj_hosts(services, cnt, '127.0.0.1')
|
|
|
|
containers.append(cnt)
|
2019-03-04 10:30:14 +01:00
|
|
|
return [], containers
|
|
|
|
|
2019-03-23 20:42:04 +01:00
|
|
|
|
2019-03-04 22:58:25 +01:00
|
|
|
@trans
|
|
|
|
def tr_cntnet(project_name, services, given_containers):
|
2019-03-20 23:49:17 +01:00
|
|
|
containers = []
|
|
|
|
infra_name = project_name + "_infra"
|
2019-03-04 22:58:25 +01:00
|
|
|
infra = dict(
|
2019-03-23 20:42:04 +01:00
|
|
|
name=infra_name,
|
|
|
|
image="k8s.gcr.io/pause:3.1",
|
2021-11-21 14:55:19 +01:00
|
|
|
_service=None,
|
|
|
|
service_name=None
|
2019-03-04 22:58:25 +01:00
|
|
|
)
|
|
|
|
for cnt0 in given_containers:
|
2019-03-23 20:42:04 +01:00
|
|
|
cnt = dict(cnt0, network_mode="container:"+infra_name)
|
2020-04-18 17:39:59 +02:00
|
|
|
deps = cnt.get("depends_on", None) or []
|
2019-03-04 22:58:25 +01:00
|
|
|
deps.append(infra_name)
|
2019-06-17 17:31:22 +02:00
|
|
|
cnt["depends_on"] = deps
|
2019-03-04 22:58:25 +01:00
|
|
|
# adjust hosts to point to localhost, TODO: adjust host env
|
|
|
|
adj_hosts(services, cnt, '127.0.0.1')
|
2019-03-23 20:42:04 +01:00
|
|
|
if "hostname" in cnt:
|
|
|
|
del cnt["hostname"]
|
2019-03-04 22:58:25 +01:00
|
|
|
containers.append(cnt)
|
|
|
|
move_port_fw(infra, containers)
|
2019-03-04 23:39:08 +01:00
|
|
|
move_extra_hosts(infra, containers)
|
2019-03-04 22:58:25 +01:00
|
|
|
containers.insert(0, infra)
|
|
|
|
return [], containers
|
|
|
|
|
2019-03-23 20:42:04 +01:00
|
|
|
|
2019-03-04 22:58:25 +01:00
|
|
|
@trans
|
2019-03-04 10:30:14 +01:00
|
|
|
def tr_1pod(project_name, services, given_containers):
|
|
|
|
"""
|
2019-06-27 10:41:27 +02:00
|
|
|
project_name:
|
2019-03-04 10:30:14 +01:00
|
|
|
services: {service_name: ["container_name1", "..."]}, currently only one is supported
|
|
|
|
given_containers: [{}, ...]
|
|
|
|
"""
|
2019-03-23 20:42:04 +01:00
|
|
|
pod = dict(name=project_name)
|
2019-03-20 23:49:17 +01:00
|
|
|
containers = []
|
2019-03-04 10:30:14 +01:00
|
|
|
for cnt0 in given_containers:
|
2019-03-23 20:42:04 +01:00
|
|
|
cnt = dict(cnt0, pod=project_name)
|
2019-03-04 10:30:14 +01:00
|
|
|
# services can be accessed as localhost because they are on one pod
|
2019-03-04 22:58:25 +01:00
|
|
|
# adjust hosts to point to localhost, TODO: adjust host env
|
|
|
|
adj_hosts(services, cnt, '127.0.0.1')
|
2019-03-04 10:30:14 +01:00
|
|
|
containers.append(cnt)
|
|
|
|
return [pod], containers
|
|
|
|
|
2019-03-23 20:42:04 +01:00
|
|
|
|
2019-03-04 22:58:25 +01:00
|
|
|
@trans
|
2019-03-04 10:30:14 +01:00
|
|
|
def tr_1podfw(project_name, services, given_containers):
|
|
|
|
pods, containers = tr_1pod(project_name, services, given_containers)
|
2019-03-20 23:49:17 +01:00
|
|
|
pod = pods[0]
|
2019-03-04 22:58:25 +01:00
|
|
|
move_port_fw(pod, containers)
|
2019-03-04 10:30:14 +01:00
|
|
|
return pods, containers
|
|
|
|
|
2019-03-23 20:42:04 +01:00
|
|
|
|
2020-04-18 17:39:59 +02:00
|
|
|
def assert_volume(compose, mount_dict):
|
2019-06-09 02:26:13 +02:00
|
|
|
"""
|
|
|
|
inspect volume to get directory
|
|
|
|
create volume if needed
|
|
|
|
"""
|
2021-10-14 00:30:44 +02:00
|
|
|
vol = mount_dict.get("_vol", None)
|
|
|
|
if mount_dict["type"] != "volume" or not vol or vol.get("external", None) or not vol.get("name", None): return
|
2019-08-10 21:19:16 +02:00
|
|
|
proj_name = compose.project_name
|
2021-10-14 00:30:44 +02:00
|
|
|
vol_name = vol["name"]
|
2019-06-09 02:26:13 +02:00
|
|
|
print("podman volume inspect {vol_name} || podman volume create {vol_name}".format(vol_name=vol_name))
|
2020-04-18 17:39:59 +02:00
|
|
|
# TODO: might move to using "volume list"
|
2019-06-11 16:03:24 +02:00
|
|
|
# podman volume list --format '{{.Name}}\t{{.MountPoint}}' -f 'label=io.podman.compose.project=HERE'
|
2021-05-17 14:03:47 +02:00
|
|
|
try: out = compose.podman.output([], "volume", ["inspect", vol_name]).decode('utf-8')
|
2019-06-09 02:26:13 +02:00
|
|
|
except subprocess.CalledProcessError:
|
2021-10-14 01:11:45 +02:00
|
|
|
labels = vol.get("labels", None) or []
|
|
|
|
args = [
|
|
|
|
"create",
|
|
|
|
"--label", "io.podman.compose.project={}".format(proj_name),
|
|
|
|
"--label", "com.docker.compose.project={}".format(proj_name),
|
|
|
|
]
|
|
|
|
for item in norm_as_list(labels):
|
|
|
|
args.extend(["--label", item])
|
|
|
|
args.append(vol_name)
|
|
|
|
compose.podman.output([], "volume", args)
|
2021-05-17 14:03:47 +02:00
|
|
|
out = compose.podman.output([], "volume", ["inspect", vol_name]).decode('utf-8')
|
2019-06-09 02:26:13 +02:00
|
|
|
|
2020-04-18 17:39:59 +02:00
|
|
|
def mount_desc_to_mount_args(compose, mount_desc, srv_name, cnt_name):
|
|
|
|
mount_type = mount_desc.get("type", None)
|
2021-10-14 00:30:44 +02:00
|
|
|
vol = mount_desc.get("_vol", None) if mount_type=="volume" else None
|
|
|
|
source = vol["name"] if vol else mount_desc.get("source", None)
|
2019-06-09 02:26:13 +02:00
|
|
|
target = mount_desc["target"]
|
2020-04-18 17:39:59 +02:00
|
|
|
opts = []
|
|
|
|
if mount_desc.get(mount_type, None):
|
|
|
|
# TODO: we might need to add mount_dict[mount_type]["propagation"] = "z"
|
|
|
|
mount_prop = mount_desc.get(mount_type, {}).get("propagation", None)
|
|
|
|
if mount_prop: opts.append("{}-propagation={}".format(mount_type, mount_prop))
|
2019-06-09 02:26:13 +02:00
|
|
|
if mount_desc.get("read_only", False): opts.append("ro")
|
2020-04-18 17:39:59 +02:00
|
|
|
if mount_type == 'tmpfs':
|
2019-06-09 02:26:13 +02:00
|
|
|
tmpfs_opts = mount_desc.get("tmpfs", {})
|
2020-04-18 17:39:59 +02:00
|
|
|
tmpfs_size = tmpfs_opts.get("size", None)
|
2019-06-09 02:26:13 +02:00
|
|
|
if tmpfs_size:
|
|
|
|
opts.append("tmpfs-size={}".format(tmpfs_size))
|
2020-04-18 17:39:59 +02:00
|
|
|
tmpfs_mode = tmpfs_opts.get("mode", None)
|
2019-06-09 02:26:13 +02:00
|
|
|
if tmpfs_mode:
|
|
|
|
opts.append("tmpfs-mode={}".format(tmpfs_mode))
|
2020-04-18 17:39:59 +02:00
|
|
|
opts = ",".join(opts)
|
|
|
|
if mount_type == 'bind':
|
2019-06-09 02:26:13 +02:00
|
|
|
return "type=bind,source={source},destination={target},{opts}".format(
|
|
|
|
source=source,
|
|
|
|
target=target,
|
|
|
|
opts=opts
|
|
|
|
).rstrip(",")
|
2020-04-18 17:39:59 +02:00
|
|
|
elif mount_type == 'volume':
|
|
|
|
return "type=volume,source={source},destination={target},{opts}".format(
|
|
|
|
source=source,
|
|
|
|
target=target,
|
|
|
|
opts=opts
|
|
|
|
).rstrip(",")
|
|
|
|
elif mount_type == 'tmpfs':
|
2019-06-09 02:26:13 +02:00
|
|
|
return "type=tmpfs,destination={target},{opts}".format(
|
|
|
|
target=target,
|
|
|
|
opts=opts
|
|
|
|
).rstrip(",")
|
|
|
|
else:
|
|
|
|
raise ValueError("unknown mount type:"+mount_type)
|
|
|
|
|
2020-02-27 10:30:53 +01:00
|
|
|
def container_to_ulimit_args(cnt, podman_args):
|
|
|
|
ulimit = cnt.get('ulimits', [])
|
|
|
|
if ulimit is not None:
|
|
|
|
# ulimit can be a single value, i.e. ulimit: host
|
|
|
|
if is_str(ulimit):
|
|
|
|
podman_args.extend(['--ulimit', ulimit])
|
|
|
|
# or a dictionary or list:
|
|
|
|
else:
|
|
|
|
ulimit = norm_as_dict(ulimit)
|
|
|
|
ulimit = [ "{}={}".format(ulimit_key, norm_ulimit(inner_value)) for ulimit_key, inner_value in ulimit.items()]
|
|
|
|
for i in ulimit:
|
|
|
|
podman_args.extend(['--ulimit', i])
|
|
|
|
|
2020-04-18 17:39:59 +02:00
|
|
|
def mount_desc_to_volume_args(compose, mount_desc, srv_name, cnt_name):
|
|
|
|
mount_type = mount_desc["type"]
|
|
|
|
if mount_type != 'bind' and mount_type != 'volume':
|
|
|
|
raise ValueError("unknown mount type:"+mount_type)
|
2021-10-14 00:30:44 +02:00
|
|
|
vol = mount_desc.get("_vol", None) if mount_type=="volume" else None
|
|
|
|
source = vol["name"] if vol else mount_desc.get("source", None)
|
|
|
|
if not source:
|
|
|
|
raise ValueError(f"missing mount source for {mount_type} on {srv_name}")
|
|
|
|
target = mount_desc["target"]
|
|
|
|
opts = []
|
|
|
|
|
2020-04-18 17:39:59 +02:00
|
|
|
propagations = set(filteri(mount_desc.get(mount_type, {}).get("propagation", "").split(',')))
|
|
|
|
if mount_type != 'bind':
|
|
|
|
propagations.update(filteri(mount_desc.get('bind', {}).get("propagation", "").split(',')))
|
|
|
|
opts.extend(propagations)
|
|
|
|
# --volume, -v[=[[SOURCE-VOLUME|HOST-DIR:]CONTAINER-DIR[:OPTIONS]]]
|
|
|
|
# [rw|ro]
|
|
|
|
# [z|Z]
|
|
|
|
# [[r]shared|[r]slave|[r]private]
|
|
|
|
# [[r]bind]
|
|
|
|
# [noexec|exec]
|
|
|
|
# [nodev|dev]
|
|
|
|
# [nosuid|suid]
|
|
|
|
read_only = mount_desc.get("read_only", None)
|
|
|
|
if read_only is not None:
|
|
|
|
opts.append('ro' if read_only else 'rw')
|
|
|
|
args = f'{source}:{target}'
|
|
|
|
if opts: args += ':' + ','.join(opts)
|
|
|
|
return args
|
|
|
|
|
|
|
|
def get_mount_args(compose, cnt, volume):
|
|
|
|
proj_name = compose.project_name
|
|
|
|
srv_name = cnt['_service']
|
|
|
|
basedir = compose.dirname
|
|
|
|
if is_str(volume): volume = parse_short_mount(volume, basedir)
|
|
|
|
mount_type = volume["type"]
|
2020-12-21 11:24:17 +01:00
|
|
|
|
2021-10-14 00:30:44 +02:00
|
|
|
assert_volume(compose, fix_mount_dict(compose, volume, proj_name, srv_name))
|
2020-04-18 17:39:59 +02:00
|
|
|
if compose._prefer_volume_over_mount:
|
|
|
|
if mount_type == 'tmpfs':
|
|
|
|
# TODO: --tmpfs /tmp:rw,size=787448k,mode=1777
|
|
|
|
args = volume['target']
|
|
|
|
tmpfs_opts = volume.get("tmpfs", {})
|
|
|
|
opts = []
|
|
|
|
size = tmpfs_opts.get("size", None)
|
|
|
|
if size: opts.append('size={}'.format(size))
|
|
|
|
mode = tmpfs_opts.get("mode", None)
|
2021-01-29 04:23:17 +01:00
|
|
|
if mode: opts.append('mode={}'.format(mode))
|
2020-04-18 17:39:59 +02:00
|
|
|
if opts: args += ':' + ','.join(opts)
|
|
|
|
return ['--tmpfs', args]
|
|
|
|
else:
|
|
|
|
args = mount_desc_to_volume_args(compose, volume, srv_name, cnt['name'])
|
|
|
|
return ['-v', args]
|
|
|
|
else:
|
|
|
|
args = mount_desc_to_mount_args(compose, volume, srv_name, cnt['name'])
|
|
|
|
return ['--mount', args]
|
2020-02-27 10:30:53 +01:00
|
|
|
|
2021-07-21 18:22:07 +02:00
|
|
|
|
|
|
|
def get_secret_args(compose, cnt, secret):
|
|
|
|
secret_name = secret if is_str(secret) else secret.get('source', None)
|
|
|
|
if not secret_name or secret_name not in compose.declared_secrets.keys():
|
|
|
|
raise ValueError(
|
|
|
|
'ERROR: undeclared secret: "{}", service: "{}"'
|
|
|
|
.format(secret, cnt['_service'])
|
|
|
|
)
|
|
|
|
declared_secret = compose.declared_secrets[secret_name]
|
|
|
|
|
|
|
|
source_file = declared_secret.get('file', None)
|
|
|
|
dest_file = ''
|
|
|
|
secret_opts = ''
|
|
|
|
|
|
|
|
target = None if is_str(secret) else secret.get('target', None)
|
|
|
|
uid = None if is_str(secret) else secret.get('uid', None)
|
|
|
|
gid = None if is_str(secret) else secret.get('gid', None)
|
|
|
|
mode = None if is_str(secret) else secret.get('mode', None)
|
|
|
|
|
|
|
|
if source_file:
|
|
|
|
if not target:
|
|
|
|
dest_file = '/run/secrets/{}'.format(secret_name)
|
|
|
|
elif not target.startswith("/"):
|
|
|
|
dest_file = '/run/secrets/{}'.format(target if target else secret_name)
|
|
|
|
else:
|
|
|
|
dest_file = target
|
|
|
|
volume_ref = [
|
|
|
|
'--volume', '{}:{}:ro,rprivate,rbind'.format(source_file, dest_file)
|
|
|
|
]
|
|
|
|
if uid or gid or mode:
|
|
|
|
print(
|
|
|
|
'WARNING: Service "{}" uses secret "{}" with uid, gid, or mode.'
|
|
|
|
.format(cnt['_service'], target if target else secret_name)
|
|
|
|
+ ' These fields are not supported by this implementation of the Compose file'
|
|
|
|
)
|
|
|
|
return volume_ref
|
|
|
|
# v3.5 and up added external flag, earlier the spec
|
|
|
|
# only required a name to be specified.
|
|
|
|
# docker-compose does not support external secrets outside of swarm mode.
|
|
|
|
# However accessing these via podman is trivial
|
|
|
|
# since these commands are directly translated to
|
|
|
|
# podman-create commands, albiet we can only support a 1:1 mapping
|
|
|
|
# at the moment
|
|
|
|
if declared_secret.get('external', False) or declared_secret.get('name', None):
|
|
|
|
secret_opts += ',uid={}'.format(uid) if uid else ''
|
|
|
|
secret_opts += ',gid={}'.format(gid) if gid else ''
|
|
|
|
secret_opts += ',mode={}'.format(mode) if mode else ''
|
|
|
|
# The target option is only valid for type=env,
|
|
|
|
# which in an ideal world would work
|
|
|
|
# for type=mount as well.
|
|
|
|
# having a custom name for the external secret
|
|
|
|
# has the same problem as well
|
|
|
|
ext_name = declared_secret.get('name', None)
|
|
|
|
err_str = 'ERROR: Custom name/target reference "{}" for mounted external secret "{}" is not supported'
|
|
|
|
if ext_name and ext_name != secret_name:
|
|
|
|
raise ValueError(err_str.format(secret_name, ext_name))
|
|
|
|
elif target and target != secret_name:
|
|
|
|
raise ValueError(err_str.format(target, secret_name))
|
|
|
|
elif target:
|
|
|
|
print('WARNING: Service "{}" uses target: "{}" for secret: "{}".'
|
|
|
|
.format(cnt['_service'], target, secret_name)
|
|
|
|
+ ' That is un-supported and a no-op and is ignored.')
|
|
|
|
return [ '--secret', '{}{}'.format(secret_name, secret_opts) ]
|
|
|
|
|
|
|
|
raise ValueError('ERROR: unparseable secret: "{}", service: "{}"'
|
|
|
|
.format(secret_name, cnt['_service']))
|
|
|
|
|
|
|
|
|
2021-06-22 22:30:22 +02:00
|
|
|
def container_to_res_args(cnt, podman_args):
|
|
|
|
# v2 < https://docs.docker.com/compose/compose-file/compose-file-v2/#cpu-and-other-resources
|
2021-06-22 22:48:05 +02:00
|
|
|
cpus_limit_v2 = try_float(cnt.get('cpus', None), None)
|
2021-08-18 10:32:01 +02:00
|
|
|
cpu_shares_v2 = try_int(cnt.get('cpu_shares', None), None)
|
2021-06-22 22:30:22 +02:00
|
|
|
mem_limit_v2 = cnt.get('mem_limit', None)
|
|
|
|
mem_res_v2 = cnt.get('mem_reservation', None)
|
|
|
|
# v3 < https://docs.docker.com/compose/compose-file/compose-file-v3/#resources
|
|
|
|
# spec < https://github.com/compose-spec/compose-spec/blob/master/deploy.md#resources
|
|
|
|
deploy = cnt.get('deploy', None) or {}
|
|
|
|
res = deploy.get('resources', None) or {}
|
|
|
|
limits = res.get('limits', None) or {}
|
|
|
|
cpus_limit_v3 = try_float(limits.get('cpus', None), None)
|
|
|
|
mem_limit_v3 = limits.get('memory', None)
|
|
|
|
reservations = res.get('reservations', None) or {}
|
|
|
|
#cpus_res_v3 = try_float(reservations.get('cpus', None), None)
|
|
|
|
mem_res_v3 = reservations.get('memory', None)
|
|
|
|
# add args
|
2021-06-22 22:48:05 +02:00
|
|
|
cpus = cpus_limit_v3 or cpus_limit_v2
|
|
|
|
if cpus:
|
2021-06-22 22:52:24 +02:00
|
|
|
podman_args.extend(('--cpus', str(cpus),))
|
2021-08-18 10:32:01 +02:00
|
|
|
if cpu_shares_v2:
|
2021-08-18 10:42:52 +02:00
|
|
|
podman_args.extend(('--cpu-shares', str(cpu_shares_v2),))
|
2021-06-22 22:30:22 +02:00
|
|
|
mem = mem_limit_v3 or mem_limit_v2
|
2021-06-22 22:48:05 +02:00
|
|
|
if mem:
|
2021-06-22 22:52:24 +02:00
|
|
|
podman_args.extend(('-m', str(mem).lower(),))
|
2021-06-22 22:30:22 +02:00
|
|
|
mem_res = mem_res_v3 or mem_res_v2
|
2021-06-22 22:48:05 +02:00
|
|
|
if mem_res:
|
2021-06-22 22:52:24 +02:00
|
|
|
podman_args.extend(('--memory-reservation', str(mem_res).lower(),))
|
2021-06-22 22:30:22 +02:00
|
|
|
|
2021-10-09 23:43:01 +02:00
|
|
|
def port_dict_to_str(port_desc):
|
|
|
|
# NOTE: `mode: host|ingress` is ignored
|
|
|
|
cnt_port = port_desc.get("target", None)
|
|
|
|
published = port_desc.get("published", None) or ""
|
|
|
|
host_ip = port_desc.get("host_ip", None)
|
|
|
|
protocol = port_desc.get("protocol", None) or "tcp"
|
|
|
|
if not cnt_port:
|
|
|
|
raise ValueError("target container port must be specified")
|
|
|
|
if host_ip:
|
|
|
|
ret = f"{host_ip}:{published}:{cnt_port}"
|
|
|
|
else:
|
|
|
|
ret = f"{published}:{cnt_port}" if published else f"{cnt_port}"
|
|
|
|
if protocol!="tcp":
|
|
|
|
ret+= f"/{protocol}"
|
|
|
|
return ret
|
|
|
|
|
|
|
|
def norm_ports(ports_in):
|
|
|
|
if not ports_in:
|
|
|
|
ports_in = []
|
|
|
|
if isinstance(ports_in, str):
|
|
|
|
ports_in = [ports_in]
|
|
|
|
ports_out = []
|
|
|
|
for port in ports_in:
|
|
|
|
if isinstance(port, dict):
|
|
|
|
port = port_dict_to_str(port)
|
|
|
|
elif not isinstance(port, str):
|
|
|
|
raise TypeError("port should be either string or dict")
|
|
|
|
ports_out.append(port)
|
|
|
|
return ports_out
|
|
|
|
|
2021-05-17 14:03:47 +02:00
|
|
|
def container_to_args(compose, cnt, detached=True):
|
2019-08-17 22:39:42 +02:00
|
|
|
# TODO: double check -e , --add-host, -v, --read-only
|
2019-08-09 15:31:56 +02:00
|
|
|
dirname = compose.dirname
|
2020-04-18 17:39:59 +02:00
|
|
|
pod = cnt.get('pod', None) or ''
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_args = [
|
2020-04-18 17:39:59 +02:00
|
|
|
'--name={}'.format(cnt.get('name', None)),
|
2019-03-04 10:30:14 +01:00
|
|
|
]
|
2019-03-23 20:42:04 +01:00
|
|
|
|
2019-08-10 13:11:28 +02:00
|
|
|
if detached:
|
|
|
|
podman_args.append("-d")
|
|
|
|
|
2019-03-04 23:04:53 +01:00
|
|
|
if pod:
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_args.append('--pod={}'.format(pod))
|
2020-04-18 17:39:59 +02:00
|
|
|
sec = norm_as_list(cnt.get("security_opt", None))
|
2019-06-27 10:41:27 +02:00
|
|
|
for s in sec:
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_args.extend(['--security-opt', s])
|
2021-02-20 18:21:32 +01:00
|
|
|
ann = norm_as_list(cnt.get("annotations", None))
|
|
|
|
for a in ann:
|
|
|
|
podman_args.extend(['--annotation', a])
|
2020-04-18 17:39:59 +02:00
|
|
|
if cnt.get('read_only', None):
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_args.append('--read-only')
|
2019-03-04 10:30:14 +01:00
|
|
|
for i in cnt.get('labels', []):
|
2019-10-01 12:06:50 +02:00
|
|
|
podman_args.extend(['--label', i])
|
2020-04-18 17:39:59 +02:00
|
|
|
net = cnt.get("network_mode", None)
|
2019-03-04 22:58:25 +01:00
|
|
|
if net:
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_args.extend(['--network', net])
|
2019-11-30 01:45:35 +01:00
|
|
|
for c in cnt.get('cap_add', []):
|
|
|
|
podman_args.extend(['--cap-add', c])
|
|
|
|
for c in cnt.get('cap_drop', []):
|
|
|
|
podman_args.extend(['--cap-drop', c])
|
2019-09-13 21:11:57 +02:00
|
|
|
for d in cnt.get('devices', []):
|
|
|
|
podman_args.extend(['--device', d])
|
2020-09-09 16:19:07 +02:00
|
|
|
env_file = cnt.get('env_file', [])
|
|
|
|
if is_str(env_file): env_file = [env_file]
|
|
|
|
for i in env_file:
|
2019-03-20 23:49:17 +01:00
|
|
|
i = os.path.realpath(os.path.join(dirname, i))
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_args.extend(['--env-file', i])
|
2020-10-08 13:34:02 +02:00
|
|
|
env = norm_as_list(cnt.get('environment', {}))
|
|
|
|
for e in env:
|
|
|
|
podman_args.extend(['-e', e])
|
2019-06-11 16:03:24 +02:00
|
|
|
tmpfs_ls = cnt.get('tmpfs', [])
|
2020-04-18 17:39:59 +02:00
|
|
|
if is_str(tmpfs_ls): tmpfs_ls = [tmpfs_ls]
|
2019-06-11 16:03:24 +02:00
|
|
|
for i in tmpfs_ls:
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_args.extend(['--tmpfs', i])
|
|
|
|
for volume in cnt.get('volumes', []):
|
2019-06-09 02:26:13 +02:00
|
|
|
# TODO: should we make it os.path.realpath(os.path.join(, i))?
|
2020-04-18 17:39:59 +02:00
|
|
|
podman_args.extend(get_mount_args(compose, cnt, volume))
|
2021-07-27 11:25:01 +02:00
|
|
|
log = cnt.get('logging')
|
|
|
|
if log is not None:
|
|
|
|
podman_args.append(f'--log-driver={log.get("driver", "k8s-file")}')
|
|
|
|
log_opts = log.get('options') or {}
|
|
|
|
podman_args += [f'--log-opt={name}={value}' for name, value in log_opts.items()]
|
2021-07-21 18:22:07 +02:00
|
|
|
for secret in cnt.get('secrets', []):
|
|
|
|
podman_args.extend(get_secret_args(compose, cnt, secret))
|
2019-03-04 10:30:14 +01:00
|
|
|
for i in cnt.get('extra_hosts', []):
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_args.extend(['--add-host', i])
|
2019-03-04 10:30:14 +01:00
|
|
|
for i in cnt.get('expose', []):
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_args.extend(['--expose', i])
|
2020-04-18 17:39:59 +02:00
|
|
|
if cnt.get('publishall', None):
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_args.append('-P')
|
2021-08-26 11:37:14 +02:00
|
|
|
ports = cnt.get('ports', None) or []
|
|
|
|
if isinstance(ports, str):
|
|
|
|
ports = [ports]
|
2021-10-09 23:43:01 +02:00
|
|
|
for port in ports:
|
|
|
|
if isinstance(port, dict):
|
|
|
|
port = port_dict_to_str(port)
|
|
|
|
elif not isinstance(port, str):
|
|
|
|
raise TypeError("port should be either string or dict")
|
|
|
|
podman_args.extend(['-p', port])
|
|
|
|
|
2020-04-18 17:39:59 +02:00
|
|
|
user = cnt.get('user', None)
|
2019-03-04 10:30:14 +01:00
|
|
|
if user is not None:
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_args.extend(['-u', user])
|
2020-04-18 17:39:59 +02:00
|
|
|
if cnt.get('working_dir', None) is not None:
|
|
|
|
podman_args.extend(['-w', cnt['working_dir']])
|
|
|
|
if cnt.get('hostname', None):
|
|
|
|
podman_args.extend(['--hostname', cnt['hostname']])
|
|
|
|
if cnt.get('shm_size', None):
|
2020-04-21 11:39:08 +02:00
|
|
|
podman_args.extend(['--shm-size', '{}'.format(cnt['shm_size'])])
|
2020-04-18 17:39:59 +02:00
|
|
|
if cnt.get('stdin_open', None):
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_args.append('-i')
|
2021-05-18 19:47:25 +02:00
|
|
|
if cnt.get('stop_signal', None):
|
|
|
|
podman_args.extend(['--stop-signal', cnt['stop_signal']])
|
2020-12-24 23:13:52 +01:00
|
|
|
for i in cnt.get('sysctls', []):
|
|
|
|
podman_args.extend(['--sysctl', i])
|
2020-04-18 17:39:59 +02:00
|
|
|
if cnt.get('tty', None):
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_args.append('--tty')
|
2020-04-18 17:39:59 +02:00
|
|
|
if cnt.get('privileged', None):
|
2019-11-17 08:09:41 +01:00
|
|
|
podman_args.append('--privileged')
|
2021-11-16 10:12:08 +01:00
|
|
|
pull_policy = cnt.get('pull_policy', None)
|
|
|
|
if pull_policy is not None and pull_policy!='build':
|
|
|
|
podman_args.extend(['--pull', pull_policy])
|
2020-05-22 14:37:29 +02:00
|
|
|
if cnt.get('restart', None) is not None:
|
2020-05-22 14:52:49 +02:00
|
|
|
podman_args.extend(['--restart', cnt['restart']])
|
2020-02-27 10:30:53 +01:00
|
|
|
container_to_ulimit_args(cnt, podman_args)
|
2021-06-22 22:30:22 +02:00
|
|
|
container_to_res_args(cnt, podman_args)
|
2019-03-04 10:30:14 +01:00
|
|
|
# currently podman shipped by fedora does not package this
|
2020-05-22 22:52:48 +02:00
|
|
|
if cnt.get('init', None):
|
|
|
|
podman_args.append('--init')
|
|
|
|
if cnt.get('init-path', None):
|
|
|
|
podman_args.extend(['--init-path', cnt['init-path']])
|
2020-04-18 17:39:59 +02:00
|
|
|
entrypoint = cnt.get('entrypoint', None)
|
2019-03-04 10:30:14 +01:00
|
|
|
if entrypoint is not None:
|
2019-05-09 22:15:05 +02:00
|
|
|
if is_str(entrypoint):
|
2021-10-24 23:14:19 +02:00
|
|
|
entrypoint = shlex.split(entrypoint)
|
|
|
|
podman_args.extend(['--entrypoint', json.dumps(entrypoint)])
|
2019-07-08 13:47:27 +02:00
|
|
|
|
2019-07-08 22:53:38 +02:00
|
|
|
# WIP: healthchecks are still work in progress
|
|
|
|
healthcheck = cnt.get('healthcheck', None) or {}
|
|
|
|
if not is_dict(healthcheck):
|
|
|
|
raise ValueError("'healthcheck' must be an key-value mapping")
|
2020-04-18 17:39:59 +02:00
|
|
|
healthcheck_test = healthcheck.get('test', None)
|
2019-07-08 22:53:38 +02:00
|
|
|
if healthcheck_test:
|
2019-08-10 17:08:21 +02:00
|
|
|
# If it's a string, it's equivalent to specifying CMD-SHELL
|
2019-07-08 22:53:38 +02:00
|
|
|
if is_str(healthcheck_test):
|
|
|
|
# podman does not add shell to handle command with whitespace
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_args.extend(['--healthcheck-command', '/bin/sh -c {}'.format(cmd_quote(healthcheck_test))])
|
2019-07-08 22:53:38 +02:00
|
|
|
elif is_list(healthcheck_test):
|
2019-08-10 17:08:21 +02:00
|
|
|
# If it's a list, first item is either NONE, CMD or CMD-SHELL.
|
2019-07-08 22:53:38 +02:00
|
|
|
healthcheck_type = healthcheck_test.pop(0)
|
|
|
|
if healthcheck_type == 'NONE':
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_args.append("--no-healthcheck")
|
2019-07-08 22:53:38 +02:00
|
|
|
elif healthcheck_type == 'CMD':
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_args.extend(['--healthcheck-command', '/bin/sh -c {}'.format(
|
2019-07-08 22:53:38 +02:00
|
|
|
"' '".join([cmd_quote(i) for i in healthcheck_test])
|
|
|
|
)])
|
|
|
|
elif healthcheck_type == 'CMD-SHELL':
|
|
|
|
if len(healthcheck_test)!=1:
|
|
|
|
raise ValueError("'CMD_SHELL' takes a single string after it")
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_args.extend(['--healthcheck-command', '/bin/sh -c {}'.format(cmd_quote(healthcheck_test[0]))])
|
2019-07-08 22:53:38 +02:00
|
|
|
else:
|
|
|
|
raise ValueError(
|
|
|
|
"unknown healthcheck test type [{}],\
|
|
|
|
expecting NONE, CMD or CMD-SHELL."
|
|
|
|
.format(healthcheck_type)
|
|
|
|
)
|
2019-07-08 13:47:27 +02:00
|
|
|
else:
|
2019-07-08 22:53:38 +02:00
|
|
|
raise ValueError("'healthcheck.test' either a string or a list")
|
|
|
|
|
|
|
|
# interval, timeout and start_period are specified as durations.
|
|
|
|
if 'interval' in healthcheck:
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_args.extend(['--healthcheck-interval', healthcheck['interval']])
|
2019-07-08 22:53:38 +02:00
|
|
|
if 'timeout' in healthcheck:
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_args.extend(['--healthcheck-timeout', healthcheck['timeout']])
|
2019-07-08 22:53:38 +02:00
|
|
|
if 'start_period' in healthcheck:
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_args.extend(['--healthcheck-start-period', healthcheck['start_period']])
|
2019-07-08 22:53:38 +02:00
|
|
|
|
|
|
|
# convert other parameters to string
|
|
|
|
if 'retries' in healthcheck:
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_args.extend(['--healthcheck-retries', '{}'.format(healthcheck['retries'])])
|
2019-07-08 13:47:27 +02:00
|
|
|
|
2020-04-18 17:39:59 +02:00
|
|
|
podman_args.append(cnt['image']) # command, ..etc.
|
|
|
|
command = cnt.get('command', None)
|
2019-03-04 10:30:14 +01:00
|
|
|
if command is not None:
|
2019-07-08 08:12:25 +02:00
|
|
|
if is_str(command):
|
2019-10-04 19:36:30 +02:00
|
|
|
podman_args.extend(shlex.split(command))
|
2019-07-08 08:12:25 +02:00
|
|
|
else:
|
2021-11-11 16:47:26 +01:00
|
|
|
podman_args.extend([str(i) for i in command])
|
2019-08-09 15:31:56 +02:00
|
|
|
return podman_args
|
2019-03-04 10:30:14 +01:00
|
|
|
|
2019-10-05 21:37:14 +02:00
|
|
|
def rec_deps(services, service_name, start_point=None):
|
|
|
|
"""
|
|
|
|
return all dependencies of service_name recursively
|
|
|
|
"""
|
|
|
|
if not start_point:
|
|
|
|
start_point = service_name
|
|
|
|
deps = services[service_name]["_deps"]
|
|
|
|
for dep_name in deps.copy():
|
2019-11-07 18:09:32 +01:00
|
|
|
# avoid A depens on A
|
2020-04-18 17:39:59 +02:00
|
|
|
if dep_name == service_name:
|
2019-11-07 18:09:32 +01:00
|
|
|
continue
|
2020-04-18 17:39:59 +02:00
|
|
|
dep_srv = services.get(dep_name, None)
|
2019-10-05 21:37:14 +02:00
|
|
|
if not dep_srv:
|
2019-03-23 20:42:04 +01:00
|
|
|
continue
|
2019-10-05 21:37:14 +02:00
|
|
|
# NOTE: avoid creating loops, A->B->A
|
|
|
|
if start_point and start_point in dep_srv["_deps"]:
|
|
|
|
continue
|
|
|
|
new_deps = rec_deps(services, dep_name, start_point)
|
|
|
|
deps.update(new_deps)
|
2019-03-09 22:25:32 +01:00
|
|
|
return deps
|
|
|
|
|
2019-10-05 21:37:14 +02:00
|
|
|
def flat_deps(services, with_extends=False):
|
|
|
|
"""
|
|
|
|
create dependencies "_deps" or update it recursively for all services
|
|
|
|
"""
|
|
|
|
for name, srv in services.items():
|
|
|
|
deps = set()
|
2019-10-25 09:49:51 +02:00
|
|
|
srv["_deps"] = deps
|
2019-10-05 21:37:14 +02:00
|
|
|
if with_extends:
|
|
|
|
ext = srv.get("extends", {}).get("service", None)
|
|
|
|
if ext:
|
2019-11-07 18:09:32 +01:00
|
|
|
if ext != name: deps.add(ext)
|
2019-10-05 21:37:14 +02:00
|
|
|
continue
|
2021-11-14 00:37:22 +01:00
|
|
|
deps_ls = srv.get("depends_on", None) or []
|
2021-12-09 15:18:52 +01:00
|
|
|
if is_str(deps_ls): deps_ls=[deps_ls]
|
|
|
|
elif is_dict(deps_ls): deps_ls=list(deps_ls.keys())
|
2021-11-14 00:37:22 +01:00
|
|
|
deps.update(deps_ls)
|
2019-10-05 21:37:14 +02:00
|
|
|
# parse link to get service name and remove alias
|
2021-11-14 00:37:22 +01:00
|
|
|
links_ls = srv.get("links", None) or []
|
|
|
|
if not is_list(links_ls): links_ls=[links_ls]
|
2019-10-05 21:37:14 +02:00
|
|
|
deps.update([(c.split(":")[0] if ":" in c else c)
|
2021-11-14 00:37:22 +01:00
|
|
|
for c in links_ls])
|
2019-10-05 21:37:14 +02:00
|
|
|
for name, srv in services.items():
|
|
|
|
rec_deps(services, name)
|
2019-03-09 22:25:32 +01:00
|
|
|
|
2019-08-09 15:31:56 +02:00
|
|
|
###################
|
|
|
|
# podman and compose classes
|
|
|
|
###################
|
|
|
|
|
|
|
|
class Podman:
|
|
|
|
def __init__(self, compose, podman_path='podman', dry_run=False):
|
|
|
|
self.compose = compose
|
|
|
|
self.podman_path = podman_path
|
|
|
|
self.dry_run = dry_run
|
2019-11-17 08:09:41 +01:00
|
|
|
|
2021-05-17 14:03:47 +02:00
|
|
|
def output(self, podman_args, cmd='', cmd_args=None):
|
|
|
|
cmd_args = cmd_args or []
|
|
|
|
xargs = self.compose.get_podman_args(cmd) if cmd else []
|
|
|
|
cmd_ls = [self.podman_path, *podman_args, cmd] + xargs + cmd_args
|
|
|
|
print(cmd_ls)
|
|
|
|
return subprocess.check_output(cmd_ls)
|
|
|
|
|
|
|
|
def run(self, podman_args, cmd='', cmd_args=None, wait=True, sleep=1, obj=None):
|
2021-11-13 23:59:41 +01:00
|
|
|
if obj is not None:
|
|
|
|
obj.exit_code = None
|
2021-06-16 19:33:16 +02:00
|
|
|
cmd_args = list(map(str, cmd_args or []))
|
2021-05-17 14:03:47 +02:00
|
|
|
xargs = self.compose.get_podman_args(cmd) if cmd else []
|
|
|
|
cmd_ls = [self.podman_path, *podman_args, cmd] + xargs + cmd_args
|
2021-11-11 16:52:02 +01:00
|
|
|
print(" ".join([str(i) for i in cmd_ls]))
|
2019-08-09 15:31:56 +02:00
|
|
|
if self.dry_run:
|
|
|
|
return None
|
|
|
|
# subprocess.Popen(args, bufsize = 0, executable = None, stdin = None, stdout = None, stderr = None, preexec_fn = None, close_fds = False, shell = False, cwd = None, env = None, universal_newlines = False, startupinfo = None, creationflags = 0)
|
2021-05-17 14:03:47 +02:00
|
|
|
p = subprocess.Popen(cmd_ls)
|
2019-08-09 15:31:56 +02:00
|
|
|
if wait:
|
2021-05-06 01:08:48 +02:00
|
|
|
exit_code = p.wait()
|
2021-11-13 23:59:41 +01:00
|
|
|
print("exit code:", exit_code)
|
2021-05-06 01:08:48 +02:00
|
|
|
if obj is not None:
|
|
|
|
obj.exit_code = exit_code
|
2021-09-06 06:45:50 +02:00
|
|
|
|
2019-08-09 15:31:56 +02:00
|
|
|
if sleep:
|
|
|
|
time.sleep(sleep)
|
|
|
|
return p
|
|
|
|
|
2021-11-22 19:34:40 +01:00
|
|
|
def volume_inspect_all(self):
|
|
|
|
output = self.output(["volume", "inspect", "--all"]).decode('utf-8')
|
|
|
|
return json.loads(output)
|
|
|
|
|
|
|
|
def volume_rm(self, name):
|
|
|
|
return self.run(["volume", "rm", name])
|
|
|
|
|
2019-10-05 21:37:14 +02:00
|
|
|
def normalize_service(service):
|
|
|
|
for key in ("env_file", "security_opt"):
|
|
|
|
if key not in service: continue
|
|
|
|
if is_str(service[key]): service[key]=[service[key]]
|
|
|
|
for key in ("environment", "labels"):
|
|
|
|
if key not in service: continue
|
|
|
|
service[key] = norm_as_dict(service[key])
|
|
|
|
if "extends" in service:
|
|
|
|
extends = service["extends"]
|
|
|
|
if is_str(extends):
|
|
|
|
extends = {"service": extends}
|
|
|
|
service["extends"] = extends
|
|
|
|
return service
|
|
|
|
|
2019-09-08 01:20:48 +02:00
|
|
|
def normalize(compose):
|
|
|
|
"""
|
|
|
|
convert compose dict of some keys from string or dicts into arrays
|
|
|
|
"""
|
|
|
|
services = compose.get("services", None) or {}
|
|
|
|
for service_name, service in services.items():
|
2019-10-05 21:37:14 +02:00
|
|
|
normalize_service(service)
|
2019-09-08 01:20:48 +02:00
|
|
|
return compose
|
|
|
|
|
2019-10-05 21:37:14 +02:00
|
|
|
def rec_merge_one(target, source):
|
2019-09-08 01:20:48 +02:00
|
|
|
"""
|
2019-10-05 21:37:14 +02:00
|
|
|
update target from source recursively
|
2019-09-08 01:20:48 +02:00
|
|
|
"""
|
|
|
|
done = set()
|
|
|
|
for key, value in source.items():
|
|
|
|
if key in target: continue
|
|
|
|
target[key]=value
|
|
|
|
done.add(key)
|
|
|
|
for key, value in target.items():
|
|
|
|
if key in done: continue
|
|
|
|
if key not in source: continue
|
|
|
|
value2 = source[key]
|
|
|
|
if type(value2)!=type(value):
|
|
|
|
raise ValueError("can't merge value of {} of type {} and {}".format(key, type(value), type(value2)))
|
2019-10-05 21:37:14 +02:00
|
|
|
if is_list(value2):
|
2021-12-10 01:06:43 +01:00
|
|
|
if key == 'volumes':
|
|
|
|
# clean duplicate mount targets
|
|
|
|
pts = set([ v.split(':', 1)[1] for v in value2 if ":" in v ])
|
|
|
|
del_ls = [ ix for (ix, v) in enumerate(value) if ":" in v and v.split(':', 1)[1] in pts ]
|
|
|
|
for ix in reversed(del_ls):
|
|
|
|
del value[ix]
|
|
|
|
value.extend(value2)
|
|
|
|
else:
|
|
|
|
value.extend(value2)
|
2019-09-08 01:20:48 +02:00
|
|
|
elif is_dict(value2):
|
2019-10-05 21:37:14 +02:00
|
|
|
rec_merge_one(value, value2)
|
2019-09-08 01:20:48 +02:00
|
|
|
else:
|
2019-10-05 21:37:14 +02:00
|
|
|
target[key]=value2
|
2019-09-08 01:20:48 +02:00
|
|
|
return target
|
|
|
|
|
2019-10-05 21:37:14 +02:00
|
|
|
def rec_merge(target, *sources):
|
|
|
|
"""
|
|
|
|
update target recursively from sources
|
|
|
|
"""
|
|
|
|
for source in sources:
|
|
|
|
ret = rec_merge_one(target, source)
|
|
|
|
return ret
|
|
|
|
|
2021-11-13 22:27:43 +01:00
|
|
|
def resolve_extends(services, service_names, environ):
|
2019-10-05 21:37:14 +02:00
|
|
|
for name in service_names:
|
|
|
|
service = services[name]
|
|
|
|
ext = service.get("extends", {})
|
|
|
|
if is_str(ext): ext = {"service": ext}
|
|
|
|
from_service_name = ext.get("service", None)
|
|
|
|
if not from_service_name: continue
|
|
|
|
filename = ext.get("file", None)
|
|
|
|
if filename:
|
|
|
|
with open(filename, 'r') as f:
|
|
|
|
content = yaml.safe_load(f) or {}
|
|
|
|
if "services" in content:
|
|
|
|
content = content["services"]
|
2021-11-13 22:27:43 +01:00
|
|
|
content = rec_subs(content, environ)
|
2019-10-05 21:37:14 +02:00
|
|
|
from_service = content.get(from_service_name, {})
|
|
|
|
normalize_service(from_service)
|
|
|
|
else:
|
|
|
|
from_service = services.get(from_service_name, {}).copy()
|
|
|
|
del from_service["_deps"]
|
2019-10-25 09:49:51 +02:00
|
|
|
try:
|
|
|
|
del from_service["extends"]
|
|
|
|
except KeyError:
|
|
|
|
pass
|
2019-10-05 21:37:14 +02:00
|
|
|
new_service = rec_merge({}, from_service, service)
|
|
|
|
services[name] = new_service
|
|
|
|
|
2021-12-10 00:01:45 +01:00
|
|
|
def dotenv_to_dict(dotenv_path):
|
|
|
|
if not os.path.isfile(dotenv_path):
|
|
|
|
return {}
|
|
|
|
return dotenv_values(dotenv_path)
|
2019-10-05 21:37:14 +02:00
|
|
|
|
2019-08-09 15:31:56 +02:00
|
|
|
class PodmanCompose:
|
|
|
|
def __init__(self):
|
2021-11-13 12:08:32 +01:00
|
|
|
self.podman_version = None
|
2021-05-06 01:08:48 +02:00
|
|
|
self.exit_code = None
|
2019-08-09 15:31:56 +02:00
|
|
|
self.commands = {}
|
|
|
|
self.global_args = None
|
|
|
|
self.project_name = None
|
|
|
|
self.dirname = None
|
|
|
|
self.pods = None
|
|
|
|
self.containers = None
|
2021-10-14 00:30:44 +02:00
|
|
|
self.vols = None
|
2021-07-21 18:22:07 +02:00
|
|
|
self.declared_secrets = None
|
2019-08-09 15:31:56 +02:00
|
|
|
self.container_names_by_service = None
|
2019-08-10 13:11:28 +02:00
|
|
|
self.container_by_name = None
|
2020-04-18 17:39:59 +02:00
|
|
|
self._prefer_volume_over_mount = True
|
2019-08-09 15:31:56 +02:00
|
|
|
|
2021-05-17 14:03:47 +02:00
|
|
|
def get_podman_args(self, cmd):
|
|
|
|
xargs = []
|
|
|
|
for args in self.global_args.podman_args:
|
|
|
|
xargs.extend(shlex.split(args))
|
|
|
|
cmd_norm = cmd if cmd != 'create' else 'run'
|
|
|
|
cmd_args = self.global_args.__dict__.get(f"podman_{cmd_norm}_args", None) or []
|
|
|
|
for args in cmd_args:
|
|
|
|
xargs.extend(shlex.split(args))
|
|
|
|
return xargs
|
|
|
|
|
2019-08-09 15:31:56 +02:00
|
|
|
def run(self):
|
|
|
|
args = self._parse_args()
|
2019-08-10 17:08:21 +02:00
|
|
|
podman_path = args.podman_path
|
2019-08-09 15:31:56 +02:00
|
|
|
if podman_path != 'podman':
|
|
|
|
if os.path.isfile(podman_path) and os.access(podman_path, os.X_OK):
|
|
|
|
podman_path = os.path.realpath(podman_path)
|
|
|
|
else:
|
|
|
|
# this also works if podman hasn't been installed now
|
2020-04-18 21:15:55 +02:00
|
|
|
if args.dry_run == False:
|
2020-04-18 21:18:36 +02:00
|
|
|
sys.stderr.write("Binary {} has not been found.\n".format(podman_path))
|
|
|
|
exit(1)
|
2019-08-10 17:08:21 +02:00
|
|
|
self.podman = Podman(self, podman_path, args.dry_run)
|
2020-04-18 21:15:55 +02:00
|
|
|
if not args.dry_run:
|
|
|
|
# just to make sure podman is running
|
|
|
|
try:
|
2021-10-24 16:35:36 +02:00
|
|
|
self.podman_version = self.podman.output(["--version"], '', []).decode('utf-8').strip() or ""
|
|
|
|
self.podman_version = (self.podman_version.split() or [""])[-1]
|
2020-04-18 21:15:55 +02:00
|
|
|
except subprocess.CalledProcessError:
|
|
|
|
self.podman_version = None
|
|
|
|
if not self.podman_version:
|
2020-04-18 21:18:36 +02:00
|
|
|
sys.stderr.write("it seems that you do not have `podman` installed\n")
|
|
|
|
exit(1)
|
2020-04-18 21:15:55 +02:00
|
|
|
print("using podman version: "+self.podman_version)
|
2019-08-10 17:08:21 +02:00
|
|
|
cmd_name = args.command
|
2020-09-28 22:27:29 +02:00
|
|
|
if (cmd_name != "version"):
|
|
|
|
self._parse_compose_file()
|
2019-08-09 15:31:56 +02:00
|
|
|
cmd = self.commands[cmd_name]
|
|
|
|
cmd(self, args)
|
|
|
|
|
|
|
|
def _parse_compose_file(self):
|
|
|
|
args = self.global_args
|
|
|
|
cmd = args.command
|
2019-09-08 01:20:48 +02:00
|
|
|
if not args.file:
|
|
|
|
args.file = list(filter(os.path.exists, [
|
2021-05-05 12:24:22 +02:00
|
|
|
"compose.yaml",
|
|
|
|
"compose.yml",
|
|
|
|
"compose.override.yaml",
|
|
|
|
"compose.override.yml",
|
2021-11-11 13:27:30 +01:00
|
|
|
"podman-compose.yaml",
|
|
|
|
"podman-compose.yml",
|
2019-09-08 01:20:48 +02:00
|
|
|
"docker-compose.yml",
|
|
|
|
"docker-compose.yaml",
|
|
|
|
"docker-compose.override.yml",
|
2019-09-20 21:38:03 +02:00
|
|
|
"docker-compose.override.yaml",
|
|
|
|
"container-compose.yml",
|
|
|
|
"container-compose.yaml",
|
|
|
|
"container-compose.override.yml",
|
|
|
|
"container-compose.override.yaml"
|
2019-09-08 01:20:48 +02:00
|
|
|
]))
|
|
|
|
files = args.file
|
|
|
|
if not files:
|
2021-06-16 21:13:25 +02:00
|
|
|
print("no compose.yaml, docker-compose.yml or container-compose.yml file found, pass files with -f")
|
2019-11-07 17:55:49 +01:00
|
|
|
exit(-1)
|
2019-09-08 01:20:48 +02:00
|
|
|
ex = map(os.path.exists, files)
|
|
|
|
missing = [ fn0 for ex0, fn0 in zip(ex, files) if not ex0 ]
|
|
|
|
if missing:
|
|
|
|
print("missing files: ", missing)
|
|
|
|
exit(1)
|
|
|
|
# make absolute
|
2021-03-03 16:30:26 +01:00
|
|
|
relative_files = files
|
2019-09-08 01:20:48 +02:00
|
|
|
files = list(map(os.path.realpath, files))
|
|
|
|
filename = files[0]
|
2019-08-09 15:31:56 +02:00
|
|
|
project_name = args.project_name
|
|
|
|
no_ansi = args.no_ansi
|
|
|
|
no_cleanup = args.no_cleanup
|
|
|
|
dry_run = args.dry_run
|
|
|
|
transform_policy = args.transform_policy
|
|
|
|
host_env = None
|
|
|
|
dirname = os.path.dirname(filename)
|
|
|
|
dir_basename = os.path.basename(dirname)
|
|
|
|
self.dirname = dirname
|
2019-08-10 17:08:21 +02:00
|
|
|
# TODO: remove next line
|
2019-08-10 13:11:28 +02:00
|
|
|
os.chdir(dirname)
|
2019-08-09 15:31:56 +02:00
|
|
|
|
|
|
|
if not project_name:
|
2021-06-16 21:13:25 +02:00
|
|
|
# More strict then actually needed for simplicity: podman requires [a-zA-Z0-9][a-zA-Z0-9_.-]*
|
2020-12-02 14:31:51 +01:00
|
|
|
project_name = norm_re.sub('', dir_basename.lower())
|
|
|
|
if not project_name:
|
|
|
|
raise RuntimeError("Project name [{}] normalized to empty".format(dir_basename))
|
|
|
|
|
2019-08-09 15:31:56 +02:00
|
|
|
self.project_name = project_name
|
2019-11-17 08:09:41 +01:00
|
|
|
|
2019-08-09 15:31:56 +02:00
|
|
|
|
|
|
|
dotenv_path = os.path.join(dirname, ".env")
|
2021-11-13 22:27:43 +01:00
|
|
|
self.environ = dict(os.environ)
|
2021-12-10 00:01:45 +01:00
|
|
|
self.environ.update(dotenv_to_dict(dotenv_path))
|
2020-11-04 23:08:19 +01:00
|
|
|
# TODO: should read and respect those env variables
|
|
|
|
# see: https://docs.docker.com/compose/reference/envvars/
|
|
|
|
# see: https://docs.docker.com/compose/env-file/
|
2021-11-13 22:27:43 +01:00
|
|
|
self.environ.update({
|
2020-11-04 23:14:57 +01:00
|
|
|
"COMPOSE_FILE": os.path.basename(filename),
|
2020-11-04 23:08:19 +01:00
|
|
|
"COMPOSE_PROJECT_NAME": self.project_name,
|
|
|
|
"COMPOSE_PATH_SEPARATOR": ":",
|
|
|
|
})
|
2020-04-18 17:39:59 +02:00
|
|
|
compose = {'_dirname': dirname}
|
2019-09-08 01:20:48 +02:00
|
|
|
for filename in files:
|
|
|
|
with open(filename, 'r') as f:
|
|
|
|
content = yaml.safe_load(f)
|
|
|
|
#print(filename, json.dumps(content, indent = 2))
|
2020-05-09 17:40:10 +02:00
|
|
|
if not isinstance(content, dict):
|
|
|
|
sys.stderr.write("Compose file does not contain a top level object: %s\n"%filename)
|
|
|
|
exit(1)
|
2019-09-08 01:20:48 +02:00
|
|
|
content = normalize(content)
|
|
|
|
#print(filename, json.dumps(content, indent = 2))
|
2021-11-13 22:27:43 +01:00
|
|
|
content = rec_subs(content, self.environ)
|
2019-09-08 01:20:48 +02:00
|
|
|
rec_merge(compose, content)
|
2019-08-09 15:31:56 +02:00
|
|
|
# debug mode
|
2019-09-08 01:20:48 +02:00
|
|
|
if len(files)>1:
|
|
|
|
print(" ** merged:\n", json.dumps(compose, indent = 2))
|
2020-04-18 17:39:59 +02:00
|
|
|
ver = compose.get('version', None)
|
|
|
|
services = compose.get('services', None)
|
2020-04-04 23:09:37 +02:00
|
|
|
if services is None:
|
|
|
|
services = {}
|
|
|
|
print("WARNING: No services defined")
|
2021-09-06 06:45:50 +02:00
|
|
|
|
2019-10-05 21:37:14 +02:00
|
|
|
# NOTE: maybe add "extends.service" to _deps at this stage
|
|
|
|
flat_deps(services, with_extends=True)
|
|
|
|
service_names = sorted([ (len(srv["_deps"]), name) for name, srv in services.items() ])
|
|
|
|
service_names = [ name for _, name in service_names]
|
2021-11-13 22:27:43 +01:00
|
|
|
resolve_extends(services, service_names, self.environ)
|
2019-10-05 21:37:14 +02:00
|
|
|
flat_deps(services)
|
|
|
|
service_names = sorted([ (len(srv["_deps"]), name) for name, srv in services.items() ])
|
|
|
|
service_names = [ name for _, name in service_names]
|
2019-08-09 15:31:56 +02:00
|
|
|
# volumes: [...]
|
2021-10-14 00:30:44 +02:00
|
|
|
self.vols = compose.get('volumes', {})
|
2019-08-09 15:31:56 +02:00
|
|
|
podman_compose_labels = [
|
|
|
|
"io.podman.compose.config-hash=123",
|
|
|
|
"io.podman.compose.project=" + project_name,
|
|
|
|
"io.podman.compose.version=0.0.1",
|
2021-03-03 16:30:26 +01:00
|
|
|
"com.docker.compose.project=" + project_name,
|
|
|
|
"com.docker.compose.project.working_dir=" + dirname,
|
|
|
|
"com.docker.compose.project.config_files=" + ','.join(relative_files),
|
2019-08-09 15:31:56 +02:00
|
|
|
]
|
|
|
|
# other top-levels:
|
|
|
|
# networks: {driver: ...}
|
|
|
|
# configs: {...}
|
2021-07-21 18:22:07 +02:00
|
|
|
self.declared_secrets = compose.get('secrets', {})
|
2019-08-09 15:31:56 +02:00
|
|
|
given_containers = []
|
|
|
|
container_names_by_service = {}
|
2021-11-13 23:28:43 +01:00
|
|
|
self.services = services
|
2019-08-09 15:31:56 +02:00
|
|
|
for service_name, service_desc in services.items():
|
|
|
|
replicas = try_int(service_desc.get('deploy', {}).get('replicas', '1'))
|
|
|
|
container_names_by_service[service_name] = []
|
|
|
|
for num in range(1, replicas+1):
|
|
|
|
name0 = "{project_name}_{service_name}_{num}".format(
|
|
|
|
project_name=project_name,
|
|
|
|
service_name=service_name,
|
|
|
|
num=num,
|
|
|
|
)
|
|
|
|
if num == 1:
|
|
|
|
name = service_desc.get("container_name", name0)
|
|
|
|
else:
|
|
|
|
name = name0
|
|
|
|
container_names_by_service[service_name].append(name)
|
|
|
|
# print(service_name,service_desc)
|
|
|
|
cnt = dict(name=name, num=num,
|
|
|
|
service_name=service_name, **service_desc)
|
|
|
|
if 'image' not in cnt:
|
|
|
|
cnt['image'] = "{project_name}_{service_name}".format(
|
|
|
|
project_name=project_name,
|
|
|
|
service_name=service_name,
|
|
|
|
)
|
2020-04-18 17:39:59 +02:00
|
|
|
labels = norm_as_list(cnt.get('labels', None))
|
2021-10-09 23:43:01 +02:00
|
|
|
cnt["ports"] = norm_ports(cnt.get("ports", None))
|
2019-08-09 15:31:56 +02:00
|
|
|
labels.extend(podman_compose_labels)
|
|
|
|
labels.extend([
|
|
|
|
"com.docker.compose.container-number={}".format(num),
|
|
|
|
"com.docker.compose.service=" + service_name,
|
|
|
|
])
|
|
|
|
cnt['labels'] = labels
|
|
|
|
cnt['_service'] = service_name
|
|
|
|
cnt['_project'] = project_name
|
|
|
|
given_containers.append(cnt)
|
|
|
|
self.container_names_by_service = container_names_by_service
|
|
|
|
container_by_name = dict([(c["name"], c) for c in given_containers])
|
|
|
|
#print("deps:", [(c["name"], c["_deps"]) for c in given_containers])
|
|
|
|
given_containers = list(container_by_name.values())
|
2020-04-18 17:39:59 +02:00
|
|
|
given_containers.sort(key=lambda c: len(c.get('_deps', None) or []))
|
2019-08-09 15:31:56 +02:00
|
|
|
#print("sorted:", [c["name"] for c in given_containers])
|
|
|
|
tr = transformations[transform_policy]
|
|
|
|
pods, containers = tr(
|
|
|
|
project_name, container_names_by_service, given_containers)
|
|
|
|
self.pods = pods
|
|
|
|
self.containers = containers
|
2019-08-10 13:11:28 +02:00
|
|
|
self.container_by_name = dict([ (c["name"], c) for c in containers])
|
2019-08-09 15:31:56 +02:00
|
|
|
|
|
|
|
|
|
|
|
def _parse_args(self):
|
2021-09-06 06:45:50 +02:00
|
|
|
parser = argparse.ArgumentParser(
|
|
|
|
formatter_class=argparse.RawTextHelpFormatter
|
|
|
|
)
|
2019-08-09 15:31:56 +02:00
|
|
|
self._init_global_parser(parser)
|
2019-08-10 17:08:21 +02:00
|
|
|
subparsers = parser.add_subparsers(title='command', dest='command')
|
2019-09-08 01:20:48 +02:00
|
|
|
subparser = subparsers.add_parser('help', help='show help')
|
2019-08-10 17:08:21 +02:00
|
|
|
for cmd_name, cmd in self.commands.items():
|
|
|
|
subparser = subparsers.add_parser(cmd_name, help=cmd._cmd_desc)
|
|
|
|
for cmd_parser in cmd._parse_args:
|
|
|
|
cmd_parser(subparser)
|
2019-08-09 15:31:56 +02:00
|
|
|
self.global_args = parser.parse_args()
|
2021-09-08 23:33:07 +02:00
|
|
|
if self.global_args.version:
|
|
|
|
self.global_args.command = "version"
|
2019-09-08 01:20:48 +02:00
|
|
|
if not self.global_args.command or self.global_args.command=='help':
|
2019-09-02 23:19:07 +02:00
|
|
|
parser.print_help()
|
|
|
|
exit(-1)
|
2019-08-10 17:08:21 +02:00
|
|
|
return self.global_args
|
|
|
|
|
2019-08-09 15:31:56 +02:00
|
|
|
def _init_global_parser(self, parser):
|
2021-09-08 23:33:07 +02:00
|
|
|
parser.add_argument("-v", "--version",
|
|
|
|
help="show version", action='store_true')
|
2019-08-09 15:31:56 +02:00
|
|
|
parser.add_argument("-f", "--file",
|
|
|
|
help="Specify an alternate compose file (default: docker-compose.yml)",
|
2019-09-08 01:20:48 +02:00
|
|
|
metavar='file', action='append', default=[])
|
2019-08-09 15:31:56 +02:00
|
|
|
parser.add_argument("-p", "--project-name",
|
|
|
|
help="Specify an alternate project name (default: directory name)",
|
|
|
|
type=str, default=None)
|
|
|
|
parser.add_argument("--podman-path",
|
|
|
|
help="Specify an alternate path to podman (default: use location in $PATH variable)",
|
|
|
|
type=str, default="podman")
|
2021-05-17 14:03:47 +02:00
|
|
|
parser.add_argument("--podman-args",
|
|
|
|
help="custom global arguments to be passed to `podman`",
|
|
|
|
metavar='args', action='append', default=[])
|
|
|
|
for podman_cmd in PODMAN_CMDS:
|
|
|
|
parser.add_argument(f"--podman-{podman_cmd}-args",
|
|
|
|
help=f"custom arguments to be passed to `podman {podman_cmd}`",
|
|
|
|
metavar='args', action='append', default=[])
|
2019-08-09 15:31:56 +02:00
|
|
|
parser.add_argument("--no-ansi",
|
|
|
|
help="Do not print ANSI control characters", action='store_true')
|
|
|
|
parser.add_argument("--no-cleanup",
|
|
|
|
help="Do not stop and remove existing pod & containers", action='store_true')
|
|
|
|
parser.add_argument("--dry-run",
|
|
|
|
help="No action; perform a simulation of commands", action='store_true')
|
|
|
|
parser.add_argument("-t", "--transform_policy",
|
2021-09-06 06:45:50 +02:00
|
|
|
help=textwrap.dedent("""\
|
|
|
|
how to translate docker compose to podman (default: 1podfw)
|
|
|
|
1podfw - create all containers in one pod (inter-container communication is done via localhost), doing port mapping in that pod
|
|
|
|
1pod - create all containers in one pod, doing port mapping in each container (does not work)
|
|
|
|
identity - no mapping
|
|
|
|
hostnet - use host network, and inter-container communication is done via host gateway and published ports
|
|
|
|
cntnet - create a container and use it via --network container:name (inter-container communication via localhost)
|
|
|
|
publishall - publish all ports to host (using -P) and communicate via gateway
|
|
|
|
"""),
|
2019-08-09 15:31:56 +02:00
|
|
|
choices=['1pod', '1podfw', 'hostnet', 'cntnet', 'publishall', 'identity'], default='1podfw')
|
|
|
|
|
|
|
|
podman_compose = PodmanCompose()
|
|
|
|
|
|
|
|
###################
|
|
|
|
# decorators to add commands and parse options
|
|
|
|
###################
|
|
|
|
|
|
|
|
class cmd_run:
|
|
|
|
def __init__(self, compose, cmd_name, cmd_desc):
|
|
|
|
self.compose = compose
|
|
|
|
self.cmd_name = cmd_name
|
|
|
|
self.cmd_desc = cmd_desc
|
|
|
|
def __call__(self, func):
|
|
|
|
def wrapped(*args, **kw):
|
|
|
|
return func(*args, **kw)
|
|
|
|
wrapped._compose = self.compose
|
|
|
|
wrapped._cmd_name = self.cmd_name
|
|
|
|
wrapped._cmd_desc = self.cmd_desc
|
|
|
|
wrapped._parse_args = []
|
|
|
|
self.compose.commands[self.cmd_name] = wrapped
|
|
|
|
return wrapped
|
|
|
|
|
|
|
|
class cmd_parse:
|
|
|
|
def __init__(self, compose, cmd_names):
|
|
|
|
self.compose = compose
|
|
|
|
self.cmd_names = cmd_names if is_list(cmd_names) else [cmd_names]
|
|
|
|
|
|
|
|
def __call__(self, func):
|
|
|
|
def wrapped(*args, **kw):
|
|
|
|
return func(*args, **kw)
|
|
|
|
for cmd_name in self.cmd_names:
|
|
|
|
self.compose.commands[cmd_name]._parse_args.append(wrapped)
|
|
|
|
return wrapped
|
|
|
|
|
|
|
|
###################
|
|
|
|
# actual commands
|
|
|
|
###################
|
|
|
|
|
2019-10-04 19:57:07 +02:00
|
|
|
@cmd_run(podman_compose, 'version', 'show version')
|
|
|
|
def compose_version(compose, args):
|
|
|
|
print("podman-composer version ", __version__)
|
2021-05-17 14:03:47 +02:00
|
|
|
compose.podman.run(["--version"], "", [], sleep=0)
|
2019-10-04 19:57:07 +02:00
|
|
|
|
2021-08-05 11:24:35 +02:00
|
|
|
def is_local(container: dict) -> bool:
|
|
|
|
"""Test if a container is local, i.e. if it is
|
|
|
|
* prefixed with localhost/
|
|
|
|
* has a build section and is not prefixed
|
|
|
|
"""
|
|
|
|
return (
|
|
|
|
not "/" in container["image"]
|
|
|
|
if "build" in container
|
|
|
|
else container["image"].startswith("localhost/")
|
|
|
|
)
|
|
|
|
|
|
|
|
@cmd_run(podman_compose, "pull", "pull stack images")
|
2019-08-09 15:31:56 +02:00
|
|
|
def compose_pull(compose, args):
|
2021-08-05 11:24:35 +02:00
|
|
|
img_containers = [cnt for cnt in compose.containers if "image" in cnt]
|
|
|
|
images = {cnt["image"] for cnt in img_containers}
|
|
|
|
if not args.force_local:
|
|
|
|
local_images = {cnt["image"] for cnt in img_containers if is_local(cnt)}
|
|
|
|
images -= local_images
|
2020-11-27 12:00:34 +01:00
|
|
|
for image in images:
|
2021-05-17 14:03:47 +02:00
|
|
|
compose.podman.run([], "pull", [image], sleep=0)
|
2019-04-19 17:24:30 +02:00
|
|
|
|
2019-08-09 15:31:56 +02:00
|
|
|
@cmd_run(podman_compose, 'push', 'push stack images')
|
|
|
|
def compose_push(compose, args):
|
2019-06-09 03:22:58 +02:00
|
|
|
services = set(args.services)
|
2019-08-09 15:31:56 +02:00
|
|
|
for cnt in compose.containers:
|
2019-06-09 03:21:55 +02:00
|
|
|
if 'build' not in cnt: continue
|
2019-06-09 03:22:58 +02:00
|
|
|
if services and cnt['_service'] not in services: continue
|
2021-05-17 14:03:47 +02:00
|
|
|
compose.podman.run([], "push", [cnt["image"]], sleep=0)
|
2019-08-09 15:31:56 +02:00
|
|
|
|
2019-08-17 22:39:42 +02:00
|
|
|
def build_one(compose, args, cnt):
|
|
|
|
if 'build' not in cnt: return
|
2019-09-03 15:13:24 +02:00
|
|
|
if getattr(args, 'if_not_exists', None):
|
2021-05-17 14:03:47 +02:00
|
|
|
try: img_id = compose.podman.output([], 'inspect', ['-t', 'image', '-f', '{{.Id}}', cnt["image"]])
|
2019-08-17 22:39:42 +02:00
|
|
|
except subprocess.CalledProcessError: img_id = None
|
|
|
|
if img_id: return
|
|
|
|
build_desc = cnt['build']
|
|
|
|
if not hasattr(build_desc, 'items'):
|
|
|
|
build_desc = dict(context=build_desc)
|
|
|
|
ctx = build_desc.get('context', '.')
|
2021-11-11 10:27:33 +01:00
|
|
|
dockerfile = build_desc.get("dockerfile", None)
|
|
|
|
if dockerfile:
|
|
|
|
dockerfile = os.path.join(ctx, dockerfile)
|
|
|
|
else:
|
|
|
|
dockerfile_alts = [
|
|
|
|
'Containerfile', 'ContainerFile', 'containerfile',
|
|
|
|
'Dockerfile', 'DockerFile','dockerfile',
|
|
|
|
]
|
|
|
|
for dockerfile in dockerfile_alts:
|
|
|
|
dockerfile = os.path.join(ctx, dockerfile)
|
|
|
|
if os.path.exists(dockerfile): break
|
2019-08-17 22:39:42 +02:00
|
|
|
if not os.path.exists(dockerfile):
|
2021-11-11 10:27:33 +01:00
|
|
|
raise OSError("Dockerfile not found in "+ctx)
|
2021-05-17 14:03:47 +02:00
|
|
|
build_args = ["-t", cnt["image"], "-f", dockerfile]
|
2020-03-23 19:52:17 +01:00
|
|
|
if "target" in build_desc:
|
2020-04-22 13:44:36 +02:00
|
|
|
build_args.extend(["--target", build_desc["target"]])
|
2020-02-27 10:30:53 +01:00
|
|
|
container_to_ulimit_args(cnt, build_args)
|
2020-04-08 16:51:40 +02:00
|
|
|
if args.no_cache:
|
|
|
|
build_args.append("--no-cache")
|
2019-09-03 15:13:24 +02:00
|
|
|
if getattr(args, 'pull_always', None): build_args.append("--pull-always")
|
|
|
|
elif getattr(args, 'pull', None): build_args.append("--pull")
|
2019-08-17 22:39:42 +02:00
|
|
|
args_list = norm_as_list(build_desc.get('args', {}))
|
2020-04-02 16:18:16 +02:00
|
|
|
for build_arg in args_list + args.build_arg:
|
2019-08-17 22:39:42 +02:00
|
|
|
build_args.extend(("--build-arg", build_arg,))
|
|
|
|
build_args.append(ctx)
|
2021-05-17 14:03:47 +02:00
|
|
|
compose.podman.run([], "build", build_args, sleep=0)
|
2019-08-17 22:39:42 +02:00
|
|
|
|
2019-08-09 15:31:56 +02:00
|
|
|
@cmd_run(podman_compose, 'build', 'build stack images')
|
|
|
|
def compose_build(compose, args):
|
2020-04-22 14:31:57 +02:00
|
|
|
if args.services:
|
|
|
|
container_names_by_service = compose.container_names_by_service
|
|
|
|
for service in args.services:
|
|
|
|
try:
|
|
|
|
cnt = compose.container_by_name[container_names_by_service[service][0]]
|
|
|
|
except:
|
|
|
|
raise ValueError("unknown service: " + service)
|
|
|
|
build_one(compose, args, cnt)
|
|
|
|
else:
|
|
|
|
for cnt in compose.containers:
|
|
|
|
build_one(compose, args, cnt)
|
2019-03-23 20:42:04 +01:00
|
|
|
|
2019-08-10 13:11:28 +02:00
|
|
|
def create_pods(compose, args):
|
2019-08-09 15:31:56 +02:00
|
|
|
for pod in compose.pods:
|
|
|
|
podman_args = [
|
2021-05-17 14:03:47 +02:00
|
|
|
"create",
|
2019-03-20 23:49:17 +01:00
|
|
|
"--name={}".format(pod["name"]),
|
|
|
|
"--share", "net",
|
|
|
|
]
|
2021-11-13 12:08:32 +01:00
|
|
|
if compose.podman_version and not strverscmp_lt(compose.podman_version, "3.4.0"):
|
2021-10-24 16:35:36 +02:00
|
|
|
podman_args.append("--infra-name={}_infra".format(pod["name"]))
|
2020-04-18 17:39:59 +02:00
|
|
|
ports = pod.get("ports", None) or []
|
2021-08-26 11:37:14 +02:00
|
|
|
if isinstance(ports, str):
|
|
|
|
ports = [ports]
|
2019-03-20 23:49:17 +01:00
|
|
|
for i in ports:
|
2021-05-28 19:06:45 +02:00
|
|
|
podman_args.extend(['-p', str(i)])
|
2021-05-17 14:03:47 +02:00
|
|
|
compose.podman.run([], "pod", podman_args)
|
2019-03-23 20:42:04 +01:00
|
|
|
|
2021-05-28 19:06:45 +02:00
|
|
|
|
2019-08-17 22:39:42 +02:00
|
|
|
def up_specific(compose, args):
|
|
|
|
deps = []
|
|
|
|
if not args.no_deps:
|
|
|
|
for service in args.services:
|
|
|
|
deps.extend([])
|
2019-11-17 08:09:41 +01:00
|
|
|
# args.always_recreate_deps
|
2019-08-17 22:39:42 +02:00
|
|
|
print("services", args.services)
|
|
|
|
raise NotImplementedError("starting specific services is not yet implemented")
|
|
|
|
|
2021-11-13 23:28:43 +01:00
|
|
|
def get_excluded(compose, args):
|
|
|
|
excluded = set()
|
2019-08-17 22:39:42 +02:00
|
|
|
if args.services:
|
2021-11-13 23:28:43 +01:00
|
|
|
excluded = set(compose.services)
|
|
|
|
for service in args.services:
|
|
|
|
excluded-= compose.services[service]['_deps']
|
|
|
|
excluded.discard(service)
|
|
|
|
print("** excluding: ", excluded)
|
|
|
|
return excluded
|
2019-08-17 22:39:42 +02:00
|
|
|
|
2021-11-13 23:28:43 +01:00
|
|
|
@cmd_run(podman_compose, 'up', 'Create and start the entire stack or some of its services')
|
|
|
|
def compose_up(compose, args):
|
|
|
|
excluded = get_excluded(compose, args)
|
2019-08-17 22:39:42 +02:00
|
|
|
if not args.no_build:
|
|
|
|
# `podman build` does not cache, so don't always build
|
|
|
|
build_args = argparse.Namespace(
|
|
|
|
if_not_exists=(not args.build),
|
2020-04-18 17:39:59 +02:00
|
|
|
**args.__dict__)
|
2019-08-17 22:39:42 +02:00
|
|
|
compose.commands['build'](compose, build_args)
|
2019-11-17 08:09:41 +01:00
|
|
|
|
2019-08-17 22:39:42 +02:00
|
|
|
# TODO: implement check hash label for change
|
|
|
|
if args.force_recreate:
|
2019-08-10 13:11:28 +02:00
|
|
|
compose.commands['down'](compose, args)
|
2019-08-17 22:39:42 +02:00
|
|
|
# args.no_recreate disables check for changes (which is not implemented)
|
2019-08-10 13:11:28 +02:00
|
|
|
|
2019-08-17 22:39:42 +02:00
|
|
|
podman_command = 'run' if args.detach and not args.no_start else 'create'
|
2019-08-10 13:11:28 +02:00
|
|
|
|
2019-08-17 22:39:42 +02:00
|
|
|
create_pods(compose, args)
|
2019-08-09 15:31:56 +02:00
|
|
|
for cnt in compose.containers:
|
2021-11-13 23:28:43 +01:00
|
|
|
if cnt["_service"] in excluded:
|
|
|
|
print("** skipping: ", cnt['name'])
|
|
|
|
continue
|
2021-05-17 14:03:47 +02:00
|
|
|
podman_args = container_to_args(compose, cnt, detached=args.detach)
|
|
|
|
subproc = compose.podman.run([], podman_command, podman_args)
|
2021-11-13 23:28:43 +01:00
|
|
|
if podman_command == 'run' and subproc and subproc.returncode:
|
2021-05-17 14:03:47 +02:00
|
|
|
compose.podman.run([], 'start', [cnt['name']])
|
|
|
|
if args.no_start or args.detach or args.dry_run:
|
|
|
|
return
|
2019-09-02 23:19:07 +02:00
|
|
|
# TODO: handle already existing
|
|
|
|
# TODO: if error creating do not enter loop
|
2019-08-17 22:39:42 +02:00
|
|
|
# TODO: colors if sys.stdout.isatty()
|
2021-11-13 23:59:41 +01:00
|
|
|
exit_code_from = args.__dict__.get('exit_code_from', None)
|
|
|
|
if exit_code_from:
|
|
|
|
args.abort_on_container_exit=True
|
2019-03-04 10:30:14 +01:00
|
|
|
|
2019-08-17 22:39:42 +02:00
|
|
|
threads = []
|
|
|
|
for cnt in compose.containers:
|
2021-11-13 23:28:43 +01:00
|
|
|
if cnt["_service"] in excluded:
|
|
|
|
print("** skipping: ", cnt['name'])
|
|
|
|
continue
|
2019-08-17 22:39:42 +02:00
|
|
|
# TODO: remove sleep from podman.run
|
2021-11-13 23:59:41 +01:00
|
|
|
obj = compose if exit_code_from == cnt['_service'] else None
|
2021-05-17 14:03:47 +02:00
|
|
|
thread = Thread(target=compose.podman.run, args=[[], 'start', ['-a', cnt['name']]], kwargs={"obj":obj}, daemon=True, name=cnt['name'])
|
2019-08-17 22:39:42 +02:00
|
|
|
thread.start()
|
|
|
|
threads.append(thread)
|
|
|
|
time.sleep(1)
|
2021-11-13 23:59:41 +01:00
|
|
|
|
2020-06-02 21:39:22 +02:00
|
|
|
while threads:
|
2019-08-17 22:39:42 +02:00
|
|
|
for thread in threads:
|
|
|
|
thread.join(timeout=1.0)
|
2020-06-02 21:39:22 +02:00
|
|
|
if not thread.is_alive():
|
|
|
|
threads.remove(thread)
|
|
|
|
if args.abort_on_container_exit:
|
2021-11-13 23:59:41 +01:00
|
|
|
time.sleep(1)
|
2021-05-06 01:08:48 +02:00
|
|
|
exit_code = compose.exit_code if compose.exit_code is not None else -1
|
|
|
|
exit(exit_code)
|
2019-03-23 20:42:04 +01:00
|
|
|
|
2019-08-09 15:31:56 +02:00
|
|
|
@cmd_run(podman_compose, 'down', 'tear down entire stack')
|
|
|
|
def compose_down(compose, args):
|
2021-11-13 23:28:43 +01:00
|
|
|
excluded = get_excluded(compose, args)
|
2020-05-24 16:09:56 +02:00
|
|
|
podman_args=[]
|
|
|
|
timeout=getattr(args, 'timeout', None)
|
|
|
|
if timeout is None:
|
|
|
|
timeout = 1
|
|
|
|
podman_args.extend(['-t', "{}".format(timeout)])
|
2021-11-11 10:32:24 +01:00
|
|
|
containers = list(reversed(compose.containers))
|
2020-05-24 16:09:56 +02:00
|
|
|
|
2021-11-11 10:32:24 +01:00
|
|
|
for cnt in containers:
|
2021-11-13 23:28:43 +01:00
|
|
|
if cnt["_service"] in excluded: continue
|
2021-05-17 14:03:47 +02:00
|
|
|
compose.podman.run([], "stop", [*podman_args, cnt["name"]], sleep=0)
|
2021-11-11 10:32:24 +01:00
|
|
|
for cnt in containers:
|
2021-11-13 23:28:43 +01:00
|
|
|
if cnt["_service"] in excluded: continue
|
2021-05-17 14:03:47 +02:00
|
|
|
compose.podman.run([], "rm", [cnt["name"]], sleep=0)
|
2021-11-13 23:28:43 +01:00
|
|
|
if excluded:
|
|
|
|
return
|
2019-08-09 15:31:56 +02:00
|
|
|
for pod in compose.pods:
|
2021-05-17 14:03:47 +02:00
|
|
|
compose.podman.run([], "pod", ["rm", pod["name"]], sleep=0)
|
2021-11-22 19:34:40 +01:00
|
|
|
if args.volumes:
|
|
|
|
volumes = compose.podman.volume_inspect_all()
|
|
|
|
for volume in volumes:
|
|
|
|
project = volume.get("Labels", {}).get("io.podman.compose.project")
|
|
|
|
if project == compose.project_name:
|
|
|
|
compose.podman.volume_rm(volume["Name"])
|
2019-08-09 15:31:56 +02:00
|
|
|
|
2019-10-04 21:56:51 +02:00
|
|
|
@cmd_run(podman_compose, 'ps', 'show status of containers')
|
|
|
|
def compose_ps(compose, args):
|
2019-10-05 21:47:04 +02:00
|
|
|
proj_name = compose.project_name
|
2019-10-04 21:56:51 +02:00
|
|
|
if args.quiet == True:
|
2021-05-17 14:03:47 +02:00
|
|
|
compose.podman.run([], "ps", ["-a", "--format", "{{.ID}}", "--filter", f"label=io.podman.compose.project={proj_name}"])
|
2019-10-04 21:56:51 +02:00
|
|
|
else:
|
2021-05-17 14:03:47 +02:00
|
|
|
compose.podman.run([], "ps", ["-a", "--filter", f"label=io.podman.compose.project={proj_name}"])
|
2019-10-04 21:56:51 +02:00
|
|
|
|
2019-08-10 13:11:28 +02:00
|
|
|
@cmd_run(podman_compose, 'run', 'create a container similar to a service to run a one-off command')
|
|
|
|
def compose_run(compose, args):
|
|
|
|
create_pods(compose, args)
|
|
|
|
container_names=compose.container_names_by_service[args.service]
|
|
|
|
container_name=container_names[0]
|
|
|
|
cnt = compose.container_by_name[container_name]
|
|
|
|
deps = cnt["_deps"]
|
|
|
|
if not args.no_deps:
|
2021-11-14 00:37:22 +01:00
|
|
|
up_args = argparse.Namespace(**dict(args.__dict__,
|
|
|
|
detach=True, services=deps,
|
|
|
|
# defaults
|
2021-12-03 22:33:32 +01:00
|
|
|
no_build=False, build=True, force_recreate=False, no_start=False, no_cache=False, build_arg=[],
|
2021-11-14 00:37:22 +01:00
|
|
|
)
|
|
|
|
)
|
|
|
|
compose.commands['up'](compose, up_args)
|
2019-08-10 13:11:28 +02:00
|
|
|
# adjust one-off container options
|
|
|
|
name0 = "{}_{}_tmp{}".format(compose.project_name, args.service, random.randrange(0, 65536))
|
|
|
|
cnt["name"] = args.name or name0
|
|
|
|
if args.entrypoint: cnt["entrypoint"] = args.entrypoint
|
|
|
|
if args.user: cnt["user"] = args.user
|
|
|
|
if args.workdir: cnt["working_dir"] = args.workdir
|
2021-05-06 00:17:01 +02:00
|
|
|
env = dict(cnt.get('environment', {}))
|
|
|
|
if args.env:
|
|
|
|
additional_env_vars = dict(map(lambda each: each.split('='), args.env))
|
|
|
|
env.update(additional_env_vars)
|
|
|
|
cnt['environment'] = env
|
2019-08-10 13:11:28 +02:00
|
|
|
if not args.service_ports:
|
|
|
|
for k in ("expose", "publishall", "ports"):
|
|
|
|
try: del cnt[k]
|
|
|
|
except KeyError: pass
|
|
|
|
if args.volume:
|
|
|
|
# TODO: handle volumes
|
|
|
|
pass
|
|
|
|
cnt['tty']=False if args.T else True
|
2019-11-08 20:06:32 +01:00
|
|
|
if args.cnt_command is not None and len(args.cnt_command) > 0:
|
|
|
|
cnt['command']=args.cnt_command
|
2019-08-10 13:11:28 +02:00
|
|
|
# run podman
|
|
|
|
podman_args = container_to_args(compose, cnt, args.detach)
|
|
|
|
if not args.detach:
|
|
|
|
podman_args.insert(1, '-i')
|
|
|
|
if args.rm:
|
|
|
|
podman_args.insert(1, '--rm')
|
2021-05-17 14:03:47 +02:00
|
|
|
compose.podman.run([], 'run', podman_args, sleep=0)
|
2019-11-17 08:09:41 +01:00
|
|
|
|
2021-04-26 13:31:09 +02:00
|
|
|
@cmd_run(podman_compose, 'exec', 'execute a command in a running container')
|
|
|
|
def compose_exec(compose, args):
|
|
|
|
container_names=compose.container_names_by_service[args.service]
|
|
|
|
container_name=container_names[args.index - 1]
|
|
|
|
cnt = compose.container_by_name[container_name]
|
2021-05-17 14:03:47 +02:00
|
|
|
podman_args = ['--interactive']
|
2021-04-26 14:07:14 +02:00
|
|
|
if args.privileged: podman_args += ['--privileged']
|
|
|
|
if args.user: podman_args += ['--user', args.user]
|
|
|
|
if args.workdir: podman_args += ['--workdir', args.workdir]
|
|
|
|
if not args.T: podman_args += ['--tty']
|
2021-06-23 17:04:13 +02:00
|
|
|
env = dict(cnt.get('environment', {}))
|
2021-04-26 13:31:09 +02:00
|
|
|
if args.env:
|
|
|
|
additional_env_vars = dict(map(lambda each: each.split('='), args.env))
|
2021-04-26 14:07:14 +02:00
|
|
|
env.update(additional_env_vars)
|
|
|
|
for name, value in env.items():
|
|
|
|
podman_args += ['--env', "%s=%s" % (name, value)]
|
|
|
|
podman_args += [container_name]
|
|
|
|
if args.cnt_command is not None and len(args.cnt_command) > 0:
|
|
|
|
podman_args += args.cnt_command
|
2021-05-17 14:03:47 +02:00
|
|
|
compose.podman.run([], 'exec', podman_args, sleep=0)
|
2021-04-26 13:31:09 +02:00
|
|
|
|
2019-08-09 15:31:56 +02:00
|
|
|
|
|
|
|
def transfer_service_status(compose, args, action):
|
|
|
|
# TODO: handle dependencies, handle creations
|
|
|
|
container_names_by_service = compose.container_names_by_service
|
|
|
|
targets = []
|
|
|
|
for service in args.services:
|
|
|
|
if service not in container_names_by_service:
|
|
|
|
raise ValueError("unknown service: " + service)
|
|
|
|
targets.extend(container_names_by_service[service])
|
2021-05-17 14:03:47 +02:00
|
|
|
podman_args=[]
|
2019-08-09 15:31:56 +02:00
|
|
|
timeout=getattr(args, 'timeout', None)
|
|
|
|
if timeout is not None:
|
|
|
|
podman_args.extend(['-t', "{}".format(timeout)])
|
|
|
|
for target in targets:
|
2021-05-17 14:03:47 +02:00
|
|
|
compose.podman.run([], action, podman_args+[target], sleep=0)
|
2019-08-09 15:31:56 +02:00
|
|
|
|
|
|
|
@cmd_run(podman_compose, 'start', 'start specific services')
|
|
|
|
def compose_start(compose, args):
|
|
|
|
transfer_service_status(compose, args, 'start')
|
|
|
|
|
|
|
|
@cmd_run(podman_compose, 'stop', 'stop specific services')
|
|
|
|
def compose_stop(compose, args):
|
2019-10-26 20:28:24 +02:00
|
|
|
transfer_service_status(compose, args, 'stop')
|
2019-08-09 15:31:56 +02:00
|
|
|
|
|
|
|
@cmd_run(podman_compose, 'restart', 'restart specific services')
|
|
|
|
def compose_restart(compose, args):
|
|
|
|
transfer_service_status(compose, args, 'restart')
|
2019-03-23 21:04:07 +01:00
|
|
|
|
2019-11-05 08:42:53 +01:00
|
|
|
@cmd_run(podman_compose, 'logs', 'show logs from services')
|
|
|
|
def compose_logs(compose, args):
|
|
|
|
container_names_by_service = compose.container_names_by_service
|
|
|
|
target = None
|
|
|
|
if args.service not in container_names_by_service:
|
|
|
|
raise ValueError("unknown service: " + args.service)
|
|
|
|
target = container_names_by_service[args.service]
|
2021-05-17 14:03:47 +02:00
|
|
|
podman_args = []
|
2019-11-05 08:42:53 +01:00
|
|
|
if args.follow:
|
|
|
|
podman_args.append('-f')
|
|
|
|
# the default value is to print all logs which is in podman = 0 and not
|
|
|
|
# needed to be passed
|
|
|
|
if args.tail and args.tail != 'all':
|
|
|
|
podman_args.extend(['--tail', args.tail])
|
|
|
|
if args.timestamps:
|
|
|
|
podman_args.append('-t')
|
2021-05-17 14:03:47 +02:00
|
|
|
compose.podman.run([], 'logs', podman_args+target)
|
2019-11-05 08:42:53 +01:00
|
|
|
|
2019-08-09 15:31:56 +02:00
|
|
|
###################
|
|
|
|
# command arguments parsing
|
|
|
|
###################
|
|
|
|
|
2019-08-14 17:49:21 +02:00
|
|
|
@cmd_parse(podman_compose, 'up')
|
|
|
|
def compose_up_parse(parser):
|
|
|
|
parser.add_argument("-d", "--detach", action='store_true',
|
|
|
|
help="Detached mode: Run container in the background, print new container name. Incompatible with --abort-on-container-exit.")
|
|
|
|
parser.add_argument("--no-color", action='store_true',
|
|
|
|
help="Produce monochrome output.")
|
|
|
|
parser.add_argument("--quiet-pull", action='store_true',
|
|
|
|
help="Pull without printing progress information.")
|
|
|
|
parser.add_argument("--no-deps", action='store_true',
|
|
|
|
help="Don't start linked services.")
|
|
|
|
parser.add_argument("--force-recreate", action='store_true',
|
|
|
|
help="Recreate containers even if their configuration and image haven't changed.")
|
|
|
|
parser.add_argument("--always-recreate-deps", action='store_true',
|
|
|
|
help="Recreate dependent containers. Incompatible with --no-recreate.")
|
|
|
|
parser.add_argument("--no-recreate", action='store_true',
|
|
|
|
help="If containers already exist, don't recreate them. Incompatible with --force-recreate and -V.")
|
|
|
|
parser.add_argument("--no-build", action='store_true',
|
|
|
|
help="Don't build an image, even if it's missing.")
|
|
|
|
parser.add_argument("--no-start", action='store_true',
|
|
|
|
help="Don't start the services after creating them.")
|
|
|
|
parser.add_argument("--build", action='store_true',
|
|
|
|
help="Build images before starting containers.")
|
|
|
|
parser.add_argument("--abort-on-container-exit", action='store_true',
|
|
|
|
help="Stops all containers if any container was stopped. Incompatible with -d.")
|
|
|
|
parser.add_argument("-t", "--timeout", type=float, default=10,
|
|
|
|
help="Use this timeout in seconds for container shutdown when attached or when containers are already running. (default: 10)")
|
|
|
|
parser.add_argument("-V", "--renew-anon-volumes", action='store_true',
|
|
|
|
help="Recreate anonymous volumes instead of retrieving data from the previous containers.")
|
|
|
|
parser.add_argument("--remove-orphans", action='store_true',
|
|
|
|
help="Remove containers for services not defined in the Compose file.")
|
|
|
|
parser.add_argument('--scale', metavar="SERVICE=NUM", action='append',
|
|
|
|
help="Scale SERVICE to NUM instances. Overrides the `scale` setting in the Compose file if present.")
|
|
|
|
parser.add_argument("--exit-code-from", metavar='SERVICE', type=str, default=None,
|
|
|
|
help="Return the exit code of the selected service container. Implies --abort-on-container-exit.")
|
|
|
|
|
2021-11-22 19:34:40 +01:00
|
|
|
@cmd_parse(podman_compose, 'down')
|
|
|
|
def compose_down_parse(parser):
|
|
|
|
parser.add_argument("-v", "--volumes", action='store_true', default=False,
|
|
|
|
help="Remove named volumes declared in the `volumes` section of the Compose file and "
|
|
|
|
"anonymous volumes attached to containers.")
|
|
|
|
|
2019-08-10 13:11:28 +02:00
|
|
|
@cmd_parse(podman_compose, 'run')
|
|
|
|
def compose_run_parse(parser):
|
|
|
|
parser.add_argument("-d", "--detach", action='store_true',
|
|
|
|
help="Detached mode: Run container in the background, print new container name.")
|
|
|
|
parser.add_argument("--name", type=str, default=None,
|
|
|
|
help="Assign a name to the container")
|
|
|
|
parser.add_argument("--entrypoint", type=str, default=None,
|
|
|
|
help="Override the entrypoint of the image.")
|
2021-05-06 00:17:01 +02:00
|
|
|
parser.add_argument('-e', '--env', metavar="KEY=VAL", action='append',
|
2019-08-10 13:11:28 +02:00
|
|
|
help="Set an environment variable (can be used multiple times)")
|
|
|
|
parser.add_argument('-l', '--label', metavar="KEY=VAL", action='append',
|
|
|
|
help="Add or override a label (can be used multiple times)")
|
|
|
|
parser.add_argument("-u", "--user", type=str, default=None,
|
|
|
|
help="Run as specified username or uid")
|
|
|
|
parser.add_argument("--no-deps", action='store_true',
|
|
|
|
help="Don't start linked services")
|
|
|
|
parser.add_argument("--rm", action='store_true',
|
|
|
|
help="Remove container after run. Ignored in detached mode.")
|
|
|
|
parser.add_argument('-p', '--publish', action='append',
|
|
|
|
help="Publish a container's port(s) to the host (can be used multiple times)")
|
|
|
|
parser.add_argument("--service-ports", action='store_true',
|
|
|
|
help="Run command with the service's ports enabled and mapped to the host.")
|
|
|
|
parser.add_argument('-v', '--volume', action='append',
|
|
|
|
help="Bind mount a volume (can be used multiple times)")
|
|
|
|
parser.add_argument("-T", action='store_true',
|
|
|
|
help="Disable pseudo-tty allocation. By default `podman-compose run` allocates a TTY.")
|
|
|
|
parser.add_argument("-w", "--workdir", type=str, default=None,
|
|
|
|
help="Working directory inside the container")
|
|
|
|
parser.add_argument('service', metavar='service', nargs=None,
|
|
|
|
help='service name')
|
2019-09-15 10:33:58 +02:00
|
|
|
parser.add_argument('cnt_command', metavar='command', nargs=argparse.REMAINDER,
|
|
|
|
help='command and its arguments')
|
2019-08-10 13:11:28 +02:00
|
|
|
|
2021-04-26 13:31:09 +02:00
|
|
|
@cmd_parse(podman_compose, 'exec')
|
|
|
|
def compose_run_parse(parser):
|
|
|
|
parser.add_argument("-d", "--detach", action='store_true',
|
|
|
|
help="Detached mode: Run container in the background, print new container name.")
|
2021-04-26 14:07:14 +02:00
|
|
|
parser.add_argument("--privileged", action='store_true', default=False,
|
|
|
|
help="Give the process extended Linux capabilities inside the container")
|
2021-04-26 13:31:09 +02:00
|
|
|
parser.add_argument("-u", "--user", type=str, default=None,
|
|
|
|
help="Run as specified username or uid")
|
|
|
|
parser.add_argument("-T", action='store_true',
|
|
|
|
help="Disable pseudo-tty allocation. By default `podman-compose run` allocates a TTY.")
|
|
|
|
parser.add_argument("--index", type=int, default=1,
|
|
|
|
help="Index of the container if there are multiple instances of a service")
|
|
|
|
parser.add_argument('-e', '--env', metavar="KEY=VAL", action='append',
|
|
|
|
help="Set an environment variable (can be used multiple times)")
|
|
|
|
parser.add_argument("-w", "--workdir", type=str, default=None,
|
|
|
|
help="Working directory inside the container")
|
|
|
|
parser.add_argument('service', metavar='service', nargs=None,
|
|
|
|
help='service name')
|
|
|
|
parser.add_argument('cnt_command', metavar='command', nargs=argparse.REMAINDER,
|
|
|
|
help='command and its arguments')
|
|
|
|
|
|
|
|
|
2020-05-24 16:09:56 +02:00
|
|
|
@cmd_parse(podman_compose, ['down', 'stop', 'restart'])
|
2019-08-10 13:11:28 +02:00
|
|
|
def compose_parse_timeout(parser):
|
2019-08-09 15:31:56 +02:00
|
|
|
parser.add_argument("-t", "--timeout",
|
|
|
|
help="Specify a shutdown timeout in seconds. ",
|
2020-05-24 16:15:11 +02:00
|
|
|
type=int, default=10)
|
2019-08-09 15:31:56 +02:00
|
|
|
|
|
|
|
@cmd_parse(podman_compose, ['start', 'stop', 'restart'])
|
2019-08-10 13:11:28 +02:00
|
|
|
def compose_parse_services(parser):
|
2019-08-09 15:31:56 +02:00
|
|
|
parser.add_argument('services', metavar='services', nargs='+',
|
|
|
|
help='affected services')
|
|
|
|
|
2019-11-05 08:42:53 +01:00
|
|
|
@cmd_parse(podman_compose, ['logs'])
|
|
|
|
def compose_logs_parse(parser):
|
|
|
|
parser.add_argument("-f", "--follow", action='store_true',
|
|
|
|
help="Follow log output.")
|
|
|
|
parser.add_argument("-t", "--timestamps", action='store_true',
|
|
|
|
help="Show timestamps.")
|
|
|
|
parser.add_argument("--tail",
|
|
|
|
help="Number of lines to show from the end of the logs for each "
|
|
|
|
"container.",
|
|
|
|
type=str, default="all")
|
|
|
|
parser.add_argument('service', metavar='service', nargs=None,
|
|
|
|
help='service name')
|
|
|
|
|
2021-08-05 11:24:35 +02:00
|
|
|
@cmd_parse(podman_compose, 'pull')
|
|
|
|
def compose_pull_parse(parser):
|
|
|
|
parser.add_argument("--force-local", action='store_true', default=False,
|
|
|
|
help="Also pull unprefixed images for services which have a build section")
|
|
|
|
|
2019-08-09 15:31:56 +02:00
|
|
|
@cmd_parse(podman_compose, 'push')
|
|
|
|
def compose_push_parse(parser):
|
|
|
|
parser.add_argument("--ignore-push-failures", action='store_true',
|
|
|
|
help="Push what it can and ignores images with push failures. (not implemented)")
|
|
|
|
parser.add_argument('services', metavar='services', nargs='*',
|
|
|
|
help='services to push')
|
|
|
|
|
2019-10-04 21:56:51 +02:00
|
|
|
@cmd_parse(podman_compose, 'ps')
|
|
|
|
def compose_ps_parse(parser):
|
|
|
|
parser.add_argument("-q", "--quiet",
|
|
|
|
help="Only display container IDs", action='store_true')
|
2019-08-09 15:31:56 +02:00
|
|
|
|
2020-05-22 17:06:45 +02:00
|
|
|
@cmd_parse(podman_compose, ['build', 'up'])
|
2019-08-09 15:31:56 +02:00
|
|
|
def compose_build_parse(parser):
|
|
|
|
parser.add_argument("--pull",
|
|
|
|
help="attempt to pull a newer version of the image", action='store_true')
|
|
|
|
parser.add_argument("--pull-always",
|
|
|
|
help="attempt to pull a newer version of the image, Raise an error even if the image is present locally.", action='store_true')
|
2020-04-02 16:18:16 +02:00
|
|
|
parser.add_argument("--build-arg", metavar="key=val", action="append", default=[],
|
|
|
|
help="Set build-time variables for services.")
|
2020-04-08 16:51:40 +02:00
|
|
|
parser.add_argument("--no-cache",
|
|
|
|
help="Do not use cache when building the image.", action='store_true')
|
2019-08-09 15:31:56 +02:00
|
|
|
|
2021-11-13 23:28:43 +01:00
|
|
|
@cmd_parse(podman_compose, ['build', 'up', 'down'])
|
|
|
|
def compose_build_parse(parser):
|
|
|
|
parser.add_argument('services', metavar='services', nargs='*',default=None,
|
|
|
|
help='affected services')
|
|
|
|
|
2019-08-09 15:31:56 +02:00
|
|
|
def main():
|
|
|
|
podman_compose.run()
|
2019-03-23 21:07:06 +01:00
|
|
|
|
2019-03-23 21:04:07 +01:00
|
|
|
if __name__ == "__main__":
|
2019-03-23 21:07:06 +01:00
|
|
|
main()
|