podman-compose/podman_compose.py

1391 lines
53 KiB
Python
Raw Normal View History

2019-05-09 22:15:05 +02:00
#! /usr/bin/python3
2019-08-10 17:08:21 +02:00
# -*- coding: utf-8 -*-
2019-03-04 10:30:14 +01:00
# https://docs.docker.com/compose/compose-file/#service-configuration-reference
# https://docs.docker.com/samples/
# https://docs.docker.com/compose/gettingstarted/
# https://docs.docker.com/compose/django/
# https://docs.docker.com/compose/wordpress/
2019-03-04 10:30:14 +01:00
from __future__ import print_function
2019-03-24 00:39:22 +01:00
import sys
2019-03-04 10:30:14 +01:00
import os
import argparse
import subprocess
import time
2019-03-24 00:08:26 +01:00
import re
2019-06-09 02:26:13 +02:00
import hashlib
import random
2019-10-05 21:37:14 +02:00
import json
from threading import Thread
2019-10-04 19:36:30 +02:00
import shlex
2019-07-08 22:53:38 +02:00
try:
from shlex import quote as cmd_quote
except ImportError:
from pipes import quote as cmd_quote
2019-05-09 22:15:05 +02:00
# import fnmatch
# fnmatch.fnmatchcase(env, "*_HOST")
2019-03-04 10:30:14 +01:00
import json
import yaml
2020-04-18 17:39:59 +02:00
__version__ = '0.1.7dev'
2019-09-03 17:38:57 +02:00
2019-03-24 00:39:22 +01:00
PY3 = sys.version_info[0] == 3
if PY3:
basestring = str
2019-05-09 22:16:40 +02:00
# helper functions
2019-05-09 22:15:05 +02:00
is_str = lambda s: isinstance(s, basestring)
is_dict = lambda d: isinstance(d, dict)
is_list = lambda l: not is_str(l) and not is_dict(l) and hasattr(l, "__iter__")
2019-08-16 14:29:09 +02:00
# identity filter
2020-04-18 17:39:59 +02:00
filteri = lambda a: filter(lambda i: i, a)
2019-05-09 22:15:05 +02:00
2019-05-09 22:16:40 +02:00
def try_int(i, fallback=None):
try:
return int(i)
except ValueError:
pass
except TypeError:
pass
return fallback
2019-06-09 02:26:13 +02:00
dir_re = re.compile("^[~/\.]")
2020-04-18 17:39:59 +02:00
propagation_re = re.compile("^(?:z|Z|r?shared|r?slave|r?private)$")
2019-06-09 02:26:13 +02:00
def parse_short_mount(mount_str, basedir):
mount_a = mount_str.split(':')
mount_opt_dict = {}
mount_opt = None
2020-04-18 17:39:59 +02:00
if len(mount_a) == 1:
# Anonymous: Just specify a path and let the engine creates the volume
2019-06-09 02:26:13 +02:00
# - /var/lib/mysql
2020-04-18 17:39:59 +02:00
mount_src, mount_dst = None, mount_str
elif len(mount_a) == 2:
2019-06-09 02:26:13 +02:00
mount_src, mount_dst = mount_a
2020-04-18 17:39:59 +02:00
# dest must start with / like /foo:/var/lib/mysql
# otherwise it's option like /var/lib/mysql:rw
2019-06-09 02:26:13 +02:00
if not mount_dst.startswith('/'):
mount_dst, mount_opt = mount_a
mount_src = None
2020-04-18 17:39:59 +02:00
elif len(mount_a) == 3:
2019-06-09 02:26:13 +02:00
mount_src, mount_dst, mount_opt = mount_a
else:
raise ValueError("could not parse mount "+mount_str)
if mount_src and dir_re.match(mount_src):
# Specify an absolute path mapping
# - /opt/data:/var/lib/mysql
# Path on the host, relative to the Compose file
# - ./cache:/tmp/cache
# User-relative path
# - ~/configs:/etc/configs/:ro
mount_type = "bind"
# TODO: should we use os.path.realpath(basedir)?
mount_src = os.path.join(basedir, os.path.expanduser(mount_src))
else:
# Named volume
# - datavolume:/var/lib/mysql
mount_type = "volume"
2019-08-16 14:29:09 +02:00
mount_opts = filteri((mount_opt or '').split(','))
2019-06-11 14:07:49 +02:00
for opt in mount_opts:
2020-04-18 17:39:59 +02:00
if opt == 'ro': mount_opt_dict["read_only"] = True
elif opt == 'rw': mount_opt_dict["read_only"] = False
elif opt in ('consistent', 'delegated', 'cached'):
mount_opt_dict["consistency"] = opt
elif propagation_re.match(opt): mount_opt_dict["bind"] = dict(propagation=opt)
2019-06-09 02:26:13 +02:00
else:
# TODO: ignore
raise ValueError("unknown mount option "+opt)
return dict(type=mount_type, source=mount_src, target=mount_dst, **mount_opt_dict)
2020-04-18 17:39:59 +02:00
# NOTE: if a named volume is used but not defined it
# gives ERROR: Named volume "abc" is used in service "xyz"
# but no declaration was found in the volumes section.
# unless it's anonymous-volume
def fix_mount_dict(mount_dict, proj_name, srv_name):
2019-06-09 02:26:13 +02:00
"""
in-place fix mount dictionary to:
- add missing source
- prefix source with proj_name
2019-06-09 02:26:13 +02:00
"""
2020-04-18 17:39:59 +02:00
# if already applied nothing todo
if "_source" in mount_dict: return mount_dict
if mount_dict["type"] == "volume":
source = mount_dict.get("source", None)
# keep old source
mount_dict["_source"] = source
if not source:
# missing source
mount_dict["source"] = "_".join([
proj_name, srv_name,
hashlib.sha256(mount_dict["target"].encode("utf-8")).hexdigest(),
])
else:
# prefix with proj_name
mount_dict["source"] = proj_name+"_"+source
2019-06-09 02:26:13 +02:00
return mount_dict
2019-05-09 22:16:40 +02:00
2019-03-24 00:08:26 +01:00
# docker and docker-compose support subset of bash variable substitution
# https://docs.docker.com/compose/compose-file/#variable-substitution
# https://docs.docker.com/compose/env-file/
# https://www.gnu.org/software/bash/manual/html_node/Shell-Parameter-Expansion.html
# $VARIABLE
# ${VARIABLE}
# ${VARIABLE:-default} default if not set or empty
# ${VARIABLE-default} default if not set
# ${VARIABLE:?err} raise error if not set or empty
# ${VARIABLE?err} raise error if not set
# $$ means $
var_re = re.compile(r'\$(\{(?:[^\s\$:\-\}]+)\}|(?:[^\s\$\{\}]+))')
var_def_re = re.compile(r'\$\{([^\s\$:\-\}]+)(:)?-([^\}]*)\}')
var_err_re = re.compile(r'\$\{([^\s\$:\-\}]+)(:)?\?([^\}]*)\}')
2019-03-24 00:08:26 +01:00
def dicts_get(dicts, key, fallback='', fallback_empty=False):
"""
get the given key from any dict in dicts, trying them one by one
if not found in any, then use fallback, if fallback is Exception raise is
"""
2019-03-24 00:08:26 +01:00
value = None
for d in dicts:
2020-04-18 17:39:59 +02:00
value = d.get(key, None)
2019-03-24 00:08:26 +01:00
if value is not None: break
if not value:
if fallback_empty or value is None:
value = fallback
if isinstance(value, Exception):
raise value
return value
def rec_subs(value, dicts):
"""
do bash-like substitution in value and if list of dictionary do that recursively
"""
2019-05-09 22:15:05 +02:00
if is_dict(value):
2019-03-24 00:08:26 +01:00
value = dict([(k, rec_subs(v, dicts)) for k, v in value.items()])
2019-05-09 22:15:05 +02:00
elif is_str(value):
2019-03-24 00:08:26 +01:00
value = var_re.sub(lambda m: dicts_get(dicts, m.group(1).strip('{}')), value)
sub_def = lambda m: dicts_get(dicts, m.group(1), m.group(3), m.group(2) == ':')
value = var_def_re.sub(sub_def, value)
sub_err = lambda m: dicts_get(dicts, m.group(1), RuntimeError(m.group(3)),
m.group(2) == ':')
value = var_err_re.sub(sub_err, value)
value = value.replace('$$', '$')
elif hasattr(value, "__iter__"):
value = [rec_subs(i, dicts) for i in value]
2019-03-24 00:08:26 +01:00
return value
def norm_as_list(src):
"""
given a dictionary {key1:value1, key2: None} or list
return a list of ["key1=value1", "key2"]
"""
if src is None:
dst = []
2019-05-09 22:15:05 +02:00
elif is_dict(src):
dst = [("{}={}".format(k, v) if v else k) for k, v in src.items()]
2019-05-09 22:15:05 +02:00
elif is_list(src):
dst = list(src)
else:
dst = [src]
return dst
2019-03-04 10:30:14 +01:00
2019-03-23 20:42:04 +01:00
def norm_as_dict(src):
"""
given a list ["key1=value1", "key2"]
return a dictionary {key1:value1, key2: None}
"""
if src is None:
dst = {}
2019-05-09 22:15:05 +02:00
elif is_dict(src):
dst = dict(src)
2019-05-09 22:15:05 +02:00
elif is_list(src):
2019-03-23 20:42:04 +01:00
dst = [i.split("=", 1) for i in src if i]
dst = dict([(a if len(a) == 2 else (a[0], None)) for a in dst])
elif is_str(src):
key, value = src.split("=", 1) if "=" in src else (src, None)
dst = {key: value}
else:
raise ValueError("dictionary or iterable is expected")
return dst
def norm_ulimit(inner_value):
if is_dict(inner_value):
if not inner_value.keys() & {"soft", "hard"}:
raise ValueError("expected at least one soft or hard limit")
2020-04-18 17:39:59 +02:00
soft = inner_value.get("soft", inner_value.get("hard", None))
hard = inner_value.get("hard", inner_value.get("soft", None))
return "{}:{}".format(soft, hard)
elif is_list(inner_value): return norm_ulimit(norm_as_dict(inner_value))
# if int or string return as is
return inner_value
# transformation helpers
2019-03-04 10:30:14 +01:00
2019-03-23 20:42:04 +01:00
def adj_hosts(services, cnt, dst="127.0.0.1"):
"""
adjust container cnt in-place to add hosts pointing to dst for services
"""
common_extra_hosts = []
for srv, cnts in services.items():
common_extra_hosts.append("{}:{}".format(srv, dst))
for cnt0 in cnts:
common_extra_hosts.append("{}:{}".format(cnt0, dst))
extra_hosts = list(cnt.get("extra_hosts", []))
extra_hosts.extend(common_extra_hosts)
# link aliases
for link in cnt.get("links", []):
a = link.strip().split(':', 1)
if len(a) == 2:
alias = a[1].strip()
extra_hosts.append("{}:{}".format(alias, dst))
cnt["extra_hosts"] = extra_hosts
2019-03-23 20:42:04 +01:00
2019-03-04 23:39:08 +01:00
def move_list(dst, containers, key):
"""
move key (like port forwarding) from containers to dst (a pod or a infra container)
"""
2020-04-18 17:39:59 +02:00
a = set(dst.get(key, None) or [])
2019-03-04 23:39:08 +01:00
for cnt in containers:
2020-04-18 17:39:59 +02:00
a0 = cnt.get(key, None)
2019-03-04 23:39:08 +01:00
if a0:
2019-03-04 23:46:42 +01:00
a.update(a0)
2019-03-04 23:39:08 +01:00
del cnt[key]
2019-03-23 20:42:04 +01:00
if a:
dst[key] = list(a)
2019-03-04 23:39:08 +01:00
def move_port_fw(dst, containers):
"""
move port forwarding from containers to dst (a pod or a infra container)
"""
2019-03-04 23:48:48 +01:00
move_list(dst, containers, "ports")
2019-03-04 23:39:08 +01:00
2019-03-23 20:42:04 +01:00
2019-03-04 23:39:08 +01:00
def move_extra_hosts(dst, containers):
"""
move port forwarding from containers to dst (a pod or a infra container)
"""
move_list(dst, containers, "extra_hosts")
# transformations
transformations = {}
2019-03-23 20:42:04 +01:00
def trans(func):
transformations[func.__name__.replace("tr_", "")] = func
return func
2019-03-23 20:42:04 +01:00
@trans
2019-03-04 10:30:14 +01:00
def tr_identity(project_name, services, given_containers):
containers = []
2019-03-04 10:30:14 +01:00
for cnt in given_containers:
containers.append(dict(cnt))
return [], containers
2019-03-23 20:42:04 +01:00
@trans
def tr_publishall(project_name, services, given_containers):
containers = []
for cnt0 in given_containers:
2019-03-23 20:42:04 +01:00
cnt = dict(cnt0, publishall=True)
# adjust hosts to point to the gateway, TODO: adjust host env
adj_hosts(services, cnt, '10.0.2.2')
containers.append(cnt)
return [], containers
2019-03-23 20:42:04 +01:00
@trans
def tr_hostnet(project_name, services, given_containers):
containers = []
for cnt0 in given_containers:
2019-03-23 20:42:04 +01:00
cnt = dict(cnt0, network_mode="host")
# adjust hosts to point to localhost, TODO: adjust host env
adj_hosts(services, cnt, '127.0.0.1')
containers.append(cnt)
2019-03-04 10:30:14 +01:00
return [], containers
2019-03-23 20:42:04 +01:00
@trans
def tr_cntnet(project_name, services, given_containers):
containers = []
infra_name = project_name + "_infra"
infra = dict(
2019-03-23 20:42:04 +01:00
name=infra_name,
image="k8s.gcr.io/pause:3.1",
)
for cnt0 in given_containers:
2019-03-23 20:42:04 +01:00
cnt = dict(cnt0, network_mode="container:"+infra_name)
2020-04-18 17:39:59 +02:00
deps = cnt.get("depends_on", None) or []
deps.append(infra_name)
2019-06-17 17:31:22 +02:00
cnt["depends_on"] = deps
# adjust hosts to point to localhost, TODO: adjust host env
adj_hosts(services, cnt, '127.0.0.1')
2019-03-23 20:42:04 +01:00
if "hostname" in cnt:
del cnt["hostname"]
containers.append(cnt)
move_port_fw(infra, containers)
2019-03-04 23:39:08 +01:00
move_extra_hosts(infra, containers)
containers.insert(0, infra)
return [], containers
2019-03-23 20:42:04 +01:00
@trans
2019-03-04 10:30:14 +01:00
def tr_1pod(project_name, services, given_containers):
"""
project_name:
2019-03-04 10:30:14 +01:00
services: {service_name: ["container_name1", "..."]}, currently only one is supported
given_containers: [{}, ...]
"""
2019-03-23 20:42:04 +01:00
pod = dict(name=project_name)
containers = []
2019-03-04 10:30:14 +01:00
for cnt0 in given_containers:
2019-03-23 20:42:04 +01:00
cnt = dict(cnt0, pod=project_name)
2019-03-04 10:30:14 +01:00
# services can be accessed as localhost because they are on one pod
# adjust hosts to point to localhost, TODO: adjust host env
adj_hosts(services, cnt, '127.0.0.1')
2019-03-04 10:30:14 +01:00
containers.append(cnt)
return [pod], containers
2019-03-23 20:42:04 +01:00
@trans
2019-03-04 10:30:14 +01:00
def tr_1podfw(project_name, services, given_containers):
pods, containers = tr_1pod(project_name, services, given_containers)
pod = pods[0]
move_port_fw(pod, containers)
2019-03-04 10:30:14 +01:00
return pods, containers
2019-03-23 20:42:04 +01:00
2020-04-18 17:39:59 +02:00
def assert_volume(compose, mount_dict):
2019-06-09 02:26:13 +02:00
"""
inspect volume to get directory
create volume if needed
"""
2020-04-18 17:39:59 +02:00
if mount_dict["type"] != "volume": return
2019-08-10 21:19:16 +02:00
proj_name = compose.project_name
shared_vols = compose.shared_vols
2020-04-18 17:39:59 +02:00
2019-09-13 23:32:00 +02:00
vol_name_orig = mount_dict.get("_source", None)
2019-06-09 02:26:13 +02:00
vol_name = mount_dict["source"]
print("podman volume inspect {vol_name} || podman volume create {vol_name}".format(vol_name=vol_name))
2020-04-18 17:39:59 +02:00
# TODO: might move to using "volume list"
2019-06-11 16:03:24 +02:00
# podman volume list --format '{{.Name}}\t{{.MountPoint}}' -f 'label=io.podman.compose.project=HERE'
try: out = compose.podman.output(["volume", "inspect", vol_name]).decode('utf-8')
2019-06-09 02:26:13 +02:00
except subprocess.CalledProcessError:
compose.podman.output(["volume", "create", "--label", "io.podman.compose.project={}".format(proj_name), vol_name])
out = compose.podman.output(["volume", "inspect", vol_name]).decode('utf-8')
2019-06-09 02:26:13 +02:00
2020-04-18 17:39:59 +02:00
def mount_desc_to_mount_args(compose, mount_desc, srv_name, cnt_name):
basedir = compose.dirname
proj_name = compose.project_name
shared_vols = compose.shared_vols
2020-04-18 17:39:59 +02:00
mount_type = mount_desc.get("type", None)
source = mount_desc.get("source", None)
2019-06-09 02:26:13 +02:00
target = mount_desc["target"]
2020-04-18 17:39:59 +02:00
opts = []
if mount_desc.get(mount_type, None):
# TODO: we might need to add mount_dict[mount_type]["propagation"] = "z"
mount_prop = mount_desc.get(mount_type, {}).get("propagation", None)
if mount_prop: opts.append("{}-propagation={}".format(mount_type, mount_prop))
2019-06-09 02:26:13 +02:00
if mount_desc.get("read_only", False): opts.append("ro")
2020-04-18 17:39:59 +02:00
if mount_type == 'tmpfs':
2019-06-09 02:26:13 +02:00
tmpfs_opts = mount_desc.get("tmpfs", {})
2020-04-18 17:39:59 +02:00
tmpfs_size = tmpfs_opts.get("size", None)
2019-06-09 02:26:13 +02:00
if tmpfs_size:
opts.append("tmpfs-size={}".format(tmpfs_size))
2020-04-18 17:39:59 +02:00
tmpfs_mode = tmpfs_opts.get("mode", None)
2019-06-09 02:26:13 +02:00
if tmpfs_mode:
opts.append("tmpfs-mode={}".format(tmpfs_mode))
2020-04-18 17:39:59 +02:00
opts = ",".join(opts)
if mount_type == 'bind':
2019-06-09 02:26:13 +02:00
return "type=bind,source={source},destination={target},{opts}".format(
source=source,
target=target,
opts=opts
).rstrip(",")
2020-04-18 17:39:59 +02:00
elif mount_type == 'volume':
return "type=volume,source={source},destination={target},{opts}".format(
source=source,
target=target,
opts=opts
).rstrip(",")
elif mount_type == 'tmpfs':
2019-06-09 02:26:13 +02:00
return "type=tmpfs,destination={target},{opts}".format(
target=target,
opts=opts
).rstrip(",")
else:
raise ValueError("unknown mount type:"+mount_type)
def container_to_ulimit_args(cnt, podman_args):
ulimit = cnt.get('ulimits', [])
if ulimit is not None:
# ulimit can be a single value, i.e. ulimit: host
if is_str(ulimit):
podman_args.extend(['--ulimit', ulimit])
# or a dictionary or list:
else:
ulimit = norm_as_dict(ulimit)
ulimit = [ "{}={}".format(ulimit_key, norm_ulimit(inner_value)) for ulimit_key, inner_value in ulimit.items()]
for i in ulimit:
podman_args.extend(['--ulimit', i])
2020-04-18 17:39:59 +02:00
def mount_desc_to_volume_args(compose, mount_desc, srv_name, cnt_name):
basedir = compose.dirname
proj_name = compose.project_name
shared_vols = compose.shared_vols
mount_type = mount_desc["type"]
source = mount_desc.get("source", None)
target = mount_desc["target"]
opts = []
if mount_type != 'bind' and mount_type != 'volume':
raise ValueError("unknown mount type:"+mount_type)
propagations = set(filteri(mount_desc.get(mount_type, {}).get("propagation", "").split(',')))
if mount_type != 'bind':
propagations.update(filteri(mount_desc.get('bind', {}).get("propagation", "").split(',')))
opts.extend(propagations)
# --volume, -v[=[[SOURCE-VOLUME|HOST-DIR:]CONTAINER-DIR[:OPTIONS]]]
# [rw|ro]
# [z|Z]
# [[r]shared|[r]slave|[r]private]
# [[r]bind]
# [noexec|exec]
# [nodev|dev]
# [nosuid|suid]
read_only = mount_desc.get("read_only", None)
if read_only is not None:
opts.append('ro' if read_only else 'rw')
args = f'{source}:{target}'
if opts: args += ':' + ','.join(opts)
return args
def get_mount_args(compose, cnt, volume):
proj_name = compose.project_name
srv_name = cnt['_service']
basedir = compose.dirname
if is_str(volume): volume = parse_short_mount(volume, basedir)
mount_type = volume["type"]
assert_volume(compose, fix_mount_dict(volume, proj_name, srv_name))
if compose._prefer_volume_over_mount:
if mount_type == 'tmpfs':
# TODO: --tmpfs /tmp:rw,size=787448k,mode=1777
args = volume['target']
tmpfs_opts = volume.get("tmpfs", {})
opts = []
size = tmpfs_opts.get("size", None)
if size: opts.append('size={}'.format(size))
mode = tmpfs_opts.get("mode", None)
if mode: opts.append('mode={}'.format(size))
if opts: args += ':' + ','.join(opts)
return ['--tmpfs', args]
else:
args = mount_desc_to_volume_args(compose, volume, srv_name, cnt['name'])
return ['-v', args]
else:
args = mount_desc_to_mount_args(compose, volume, srv_name, cnt['name'])
return ['--mount', args]
def container_to_args(compose, cnt, detached=True, podman_command='run'):
# TODO: double check -e , --add-host, -v, --read-only
dirname = compose.dirname
shared_vols = compose.shared_vols
2020-04-18 17:39:59 +02:00
pod = cnt.get('pod', None) or ''
podman_args = [
podman_command,
2020-04-18 17:39:59 +02:00
'--name={}'.format(cnt.get('name', None)),
2019-03-04 10:30:14 +01:00
]
2019-03-23 20:42:04 +01:00
if detached:
podman_args.append("-d")
if pod:
podman_args.append('--pod={}'.format(pod))
2020-04-18 17:39:59 +02:00
sec = norm_as_list(cnt.get("security_opt", None))
for s in sec:
podman_args.extend(['--security-opt', s])
2020-04-18 17:39:59 +02:00
if cnt.get('read_only', None):
podman_args.append('--read-only')
2019-03-04 10:30:14 +01:00
for i in cnt.get('labels', []):
podman_args.extend(['--label', i])
2020-04-18 17:39:59 +02:00
net = cnt.get("network_mode", None)
if net:
podman_args.extend(['--network', net])
env = norm_as_list(cnt.get('environment', {}))
for c in cnt.get('cap_add', []):
podman_args.extend(['--cap-add', c])
for c in cnt.get('cap_drop', []):
podman_args.extend(['--cap-drop', c])
2019-09-13 21:11:57 +02:00
for d in cnt.get('devices', []):
podman_args.extend(['--device', d])
2019-03-04 10:30:14 +01:00
for e in env:
podman_args.extend(['-e', e])
2019-03-04 10:30:14 +01:00
for i in cnt.get('env_file', []):
i = os.path.realpath(os.path.join(dirname, i))
podman_args.extend(['--env-file', i])
2019-06-11 16:03:24 +02:00
tmpfs_ls = cnt.get('tmpfs', [])
2020-04-18 17:39:59 +02:00
if is_str(tmpfs_ls): tmpfs_ls = [tmpfs_ls]
2019-06-11 16:03:24 +02:00
for i in tmpfs_ls:
podman_args.extend(['--tmpfs', i])
for volume in cnt.get('volumes', []):
2019-06-09 02:26:13 +02:00
# TODO: should we make it os.path.realpath(os.path.join(, i))?
2020-04-18 17:39:59 +02:00
podman_args.extend(get_mount_args(compose, cnt, volume))
2019-03-04 10:30:14 +01:00
for i in cnt.get('extra_hosts', []):
podman_args.extend(['--add-host', i])
2019-03-04 10:30:14 +01:00
for i in cnt.get('expose', []):
podman_args.extend(['--expose', i])
2020-04-18 17:39:59 +02:00
if cnt.get('publishall', None):
podman_args.append('-P')
2019-03-04 10:30:14 +01:00
for i in cnt.get('ports', []):
podman_args.extend(['-p', i])
2020-04-18 17:39:59 +02:00
user = cnt.get('user', None)
2019-03-04 10:30:14 +01:00
if user is not None:
podman_args.extend(['-u', user])
2020-04-18 17:39:59 +02:00
if cnt.get('working_dir', None) is not None:
podman_args.extend(['-w', cnt['working_dir']])
if cnt.get('hostname', None):
podman_args.extend(['--hostname', cnt['hostname']])
if cnt.get('shm_size', None):
podman_args.extend(['--shm-size', '{}'.format(cnt['shm_size'])])
2020-04-18 17:39:59 +02:00
if cnt.get('stdin_open', None):
podman_args.append('-i')
2020-04-18 17:39:59 +02:00
if cnt.get('tty', None):
podman_args.append('--tty')
2020-04-18 17:39:59 +02:00
if cnt.get('privileged', None):
podman_args.append('--privileged')
if cnt.get('restart', None) is not None:
2020-05-22 14:52:49 +02:00
podman_args.extend(['--restart', cnt['restart']])
container_to_ulimit_args(cnt, podman_args)
2019-03-04 10:30:14 +01:00
# currently podman shipped by fedora does not package this
2020-05-22 22:52:48 +02:00
if cnt.get('init', None):
podman_args.append('--init')
if cnt.get('init-path', None):
podman_args.extend(['--init-path', cnt['init-path']])
2020-04-18 17:39:59 +02:00
entrypoint = cnt.get('entrypoint', None)
2019-03-04 10:30:14 +01:00
if entrypoint is not None:
2019-05-09 22:15:05 +02:00
if is_str(entrypoint):
podman_args.extend(['--entrypoint', entrypoint])
2019-05-09 22:15:05 +02:00
else:
podman_args.extend(['--entrypoint', json.dumps(entrypoint)])
2019-07-08 22:53:38 +02:00
# WIP: healthchecks are still work in progress
healthcheck = cnt.get('healthcheck', None) or {}
if not is_dict(healthcheck):
raise ValueError("'healthcheck' must be an key-value mapping")
2020-04-18 17:39:59 +02:00
healthcheck_test = healthcheck.get('test', None)
2019-07-08 22:53:38 +02:00
if healthcheck_test:
2019-08-10 17:08:21 +02:00
# If it's a string, it's equivalent to specifying CMD-SHELL
2019-07-08 22:53:38 +02:00
if is_str(healthcheck_test):
# podman does not add shell to handle command with whitespace
podman_args.extend(['--healthcheck-command', '/bin/sh -c {}'.format(cmd_quote(healthcheck_test))])
2019-07-08 22:53:38 +02:00
elif is_list(healthcheck_test):
2019-08-10 17:08:21 +02:00
# If it's a list, first item is either NONE, CMD or CMD-SHELL.
2019-07-08 22:53:38 +02:00
healthcheck_type = healthcheck_test.pop(0)
if healthcheck_type == 'NONE':
podman_args.append("--no-healthcheck")
2019-07-08 22:53:38 +02:00
elif healthcheck_type == 'CMD':
podman_args.extend(['--healthcheck-command', '/bin/sh -c {}'.format(
2019-07-08 22:53:38 +02:00
"' '".join([cmd_quote(i) for i in healthcheck_test])
)])
elif healthcheck_type == 'CMD-SHELL':
if len(healthcheck_test)!=1:
raise ValueError("'CMD_SHELL' takes a single string after it")
podman_args.extend(['--healthcheck-command', '/bin/sh -c {}'.format(cmd_quote(healthcheck_test[0]))])
2019-07-08 22:53:38 +02:00
else:
raise ValueError(
"unknown healthcheck test type [{}],\
expecting NONE, CMD or CMD-SHELL."
.format(healthcheck_type)
)
else:
2019-07-08 22:53:38 +02:00
raise ValueError("'healthcheck.test' either a string or a list")
# interval, timeout and start_period are specified as durations.
if 'interval' in healthcheck:
podman_args.extend(['--healthcheck-interval', healthcheck['interval']])
2019-07-08 22:53:38 +02:00
if 'timeout' in healthcheck:
podman_args.extend(['--healthcheck-timeout', healthcheck['timeout']])
2019-07-08 22:53:38 +02:00
if 'start_period' in healthcheck:
podman_args.extend(['--healthcheck-start-period', healthcheck['start_period']])
2019-07-08 22:53:38 +02:00
# convert other parameters to string
if 'retries' in healthcheck:
podman_args.extend(['--healthcheck-retries', '{}'.format(healthcheck['retries'])])
2020-04-18 17:39:59 +02:00
podman_args.append(cnt['image']) # command, ..etc.
command = cnt.get('command', None)
2019-03-04 10:30:14 +01:00
if command is not None:
if is_str(command):
2019-10-04 19:36:30 +02:00
podman_args.extend(shlex.split(command))
else:
podman_args.extend(command)
return podman_args
2019-03-04 10:30:14 +01:00
2019-10-05 21:37:14 +02:00
def rec_deps(services, service_name, start_point=None):
"""
return all dependencies of service_name recursively
"""
if not start_point:
start_point = service_name
deps = services[service_name]["_deps"]
for dep_name in deps.copy():
# avoid A depens on A
2020-04-18 17:39:59 +02:00
if dep_name == service_name:
continue
2020-04-18 17:39:59 +02:00
dep_srv = services.get(dep_name, None)
2019-10-05 21:37:14 +02:00
if not dep_srv:
2019-03-23 20:42:04 +01:00
continue
2019-10-05 21:37:14 +02:00
# NOTE: avoid creating loops, A->B->A
if start_point and start_point in dep_srv["_deps"]:
continue
new_deps = rec_deps(services, dep_name, start_point)
deps.update(new_deps)
2019-03-09 22:25:32 +01:00
return deps
2019-10-05 21:37:14 +02:00
def flat_deps(services, with_extends=False):
"""
create dependencies "_deps" or update it recursively for all services
"""
for name, srv in services.items():
deps = set()
srv["_deps"] = deps
2019-10-05 21:37:14 +02:00
if with_extends:
ext = srv.get("extends", {}).get("service", None)
if ext:
if ext != name: deps.add(ext)
2019-10-05 21:37:14 +02:00
continue
deps.update(srv.get("depends_on", []))
# parse link to get service name and remove alias
deps.update([(c.split(":")[0] if ":" in c else c)
for c in srv.get("links", [])])
for name, srv in services.items():
rec_deps(services, name)
2019-03-09 22:25:32 +01:00
###################
# podman and compose classes
###################
class Podman:
def __init__(self, compose, podman_path='podman', dry_run=False):
self.compose = compose
self.podman_path = podman_path
self.dry_run = dry_run
def output(self, podman_args):
cmd = [self.podman_path]+podman_args
return subprocess.check_output(cmd)
def run(self, podman_args, wait=True, sleep=1):
2019-09-08 18:57:49 +02:00
podman_args_str = [str(arg) for arg in podman_args]
print("podman " + " ".join(podman_args_str))
if self.dry_run:
return None
2019-09-08 18:57:49 +02:00
cmd = [self.podman_path]+podman_args_str
# subprocess.Popen(args, bufsize = 0, executable = None, stdin = None, stdout = None, stderr = None, preexec_fn = None, close_fds = False, shell = False, cwd = None, env = None, universal_newlines = False, startupinfo = None, creationflags = 0)
p = subprocess.Popen(cmd)
if wait:
print(p.wait())
if sleep:
time.sleep(sleep)
return p
2019-10-05 21:37:14 +02:00
def normalize_service(service):
for key in ("env_file", "security_opt"):
if key not in service: continue
if is_str(service[key]): service[key]=[service[key]]
for key in ("environment", "labels"):
if key not in service: continue
service[key] = norm_as_dict(service[key])
if "extends" in service:
extends = service["extends"]
if is_str(extends):
extends = {"service": extends}
service["extends"] = extends
return service
def normalize(compose):
"""
convert compose dict of some keys from string or dicts into arrays
"""
services = compose.get("services", None) or {}
for service_name, service in services.items():
2019-10-05 21:37:14 +02:00
normalize_service(service)
return compose
2019-10-05 21:37:14 +02:00
def rec_merge_one(target, source):
"""
2019-10-05 21:37:14 +02:00
update target from source recursively
"""
done = set()
for key, value in source.items():
if key in target: continue
target[key]=value
done.add(key)
for key, value in target.items():
if key in done: continue
if key not in source: continue
value2 = source[key]
if type(value2)!=type(value):
raise ValueError("can't merge value of {} of type {} and {}".format(key, type(value), type(value2)))
2019-10-05 21:37:14 +02:00
if is_list(value2):
value.extend(value2)
elif is_dict(value2):
2019-10-05 21:37:14 +02:00
rec_merge_one(value, value2)
else:
2019-10-05 21:37:14 +02:00
target[key]=value2
return target
2019-10-05 21:37:14 +02:00
def rec_merge(target, *sources):
"""
update target recursively from sources
"""
for source in sources:
ret = rec_merge_one(target, source)
return ret
def resolve_extends(services, service_names, dotenv_dict):
for name in service_names:
service = services[name]
ext = service.get("extends", {})
if is_str(ext): ext = {"service": ext}
from_service_name = ext.get("service", None)
if not from_service_name: continue
filename = ext.get("file", None)
if filename:
with open(filename, 'r') as f:
content = yaml.safe_load(f) or {}
if "services" in content:
content = content["services"]
content = rec_subs(content, [os.environ, dotenv_dict])
from_service = content.get(from_service_name, {})
normalize_service(from_service)
else:
from_service = services.get(from_service_name, {}).copy()
del from_service["_deps"]
try:
del from_service["extends"]
except KeyError:
pass
2019-10-05 21:37:14 +02:00
new_service = rec_merge({}, from_service, service)
services[name] = new_service
class PodmanCompose:
def __init__(self):
self.commands = {}
self.global_args = None
self.project_name = None
self.dirname = None
self.pods = None
self.containers = None
self.shared_vols = None
self.container_names_by_service = None
self.container_by_name = None
2020-04-18 17:39:59 +02:00
self._prefer_volume_over_mount = True
def run(self):
args = self._parse_args()
2019-08-10 17:08:21 +02:00
self._parse_compose_file()
podman_path = args.podman_path
if podman_path != 'podman':
if os.path.isfile(podman_path) and os.access(podman_path, os.X_OK):
podman_path = os.path.realpath(podman_path)
else:
# this also works if podman hasn't been installed now
if args.dry_run == False:
sys.stderr.write("Binary {} has not been found.\n".format(podman_path))
exit(1)
2019-08-10 17:08:21 +02:00
self.podman = Podman(self, podman_path, args.dry_run)
if not args.dry_run:
# just to make sure podman is running
try:
self.podman_version = self.podman.output(["--version"]).decode('utf-8').strip()
except subprocess.CalledProcessError:
self.podman_version = None
if not self.podman_version:
sys.stderr.write("it seems that you do not have `podman` installed\n")
exit(1)
print("using podman version: "+self.podman_version)
2019-08-10 17:08:21 +02:00
cmd_name = args.command
cmd = self.commands[cmd_name]
cmd(self, args)
def _parse_compose_file(self):
args = self.global_args
cmd = args.command
if not args.file:
args.file = list(filter(os.path.exists, [
"docker-compose.yml",
"docker-compose.yaml",
"docker-compose.override.yml",
"docker-compose.override.yaml",
"container-compose.yml",
"container-compose.yaml",
"container-compose.override.yml",
"container-compose.override.yaml"
]))
files = args.file
if not files:
print("no docker-compose.yml or container-compose.yml file found, pass files with -f")
2019-11-07 17:55:49 +01:00
exit(-1)
ex = map(os.path.exists, files)
missing = [ fn0 for ex0, fn0 in zip(ex, files) if not ex0 ]
if missing:
print("missing files: ", missing)
exit(1)
# make absolute
files = list(map(os.path.realpath, files))
filename = files[0]
project_name = args.project_name
no_ansi = args.no_ansi
no_cleanup = args.no_cleanup
dry_run = args.dry_run
transform_policy = args.transform_policy
host_env = None
dirname = os.path.dirname(filename)
dir_basename = os.path.basename(dirname)
self.dirname = dirname
2019-08-10 17:08:21 +02:00
# TODO: remove next line
os.chdir(dirname)
if not project_name:
project_name = dir_basename.lower()
self.project_name = project_name
dotenv_path = os.path.join(dirname, ".env")
if os.path.exists(dotenv_path):
with open(dotenv_path, 'r') as f:
dotenv_ls = [l.strip() for l in f if l.strip() and not l.startswith('#')]
dotenv_dict = dict([l.split("=", 1) for l in dotenv_ls if "=" in l])
else:
dotenv_dict = {}
2020-04-18 17:39:59 +02:00
compose = {'_dirname': dirname}
for filename in files:
with open(filename, 'r') as f:
content = yaml.safe_load(f)
#print(filename, json.dumps(content, indent = 2))
if not isinstance(content, dict):
sys.stderr.write("Compose file does not contain a top level object: %s\n"%filename)
exit(1)
content = normalize(content)
#print(filename, json.dumps(content, indent = 2))
content = rec_subs(content, [os.environ, dotenv_dict])
rec_merge(compose, content)
# debug mode
if len(files)>1:
print(" ** merged:\n", json.dumps(compose, indent = 2))
2020-04-18 17:39:59 +02:00
ver = compose.get('version', None)
services = compose.get('services', None)
if services is None:
services = {}
print("WARNING: No services defined")
2019-10-05 21:37:14 +02:00
# NOTE: maybe add "extends.service" to _deps at this stage
flat_deps(services, with_extends=True)
service_names = sorted([ (len(srv["_deps"]), name) for name, srv in services.items() ])
service_names = [ name for _, name in service_names]
resolve_extends(services, service_names, dotenv_dict)
flat_deps(services)
service_names = sorted([ (len(srv["_deps"]), name) for name, srv in services.items() ])
service_names = [ name for _, name in service_names]
# volumes: [...]
shared_vols = compose.get('volumes', {})
# shared_vols = list(shared_vols.keys())
shared_vols = set(shared_vols.keys())
self.shared_vols = shared_vols
podman_compose_labels = [
"io.podman.compose.config-hash=123",
"io.podman.compose.project=" + project_name,
"io.podman.compose.version=0.0.1",
]
# other top-levels:
# networks: {driver: ...}
# configs: {...}
# secrets: {...}
given_containers = []
container_names_by_service = {}
for service_name, service_desc in services.items():
replicas = try_int(service_desc.get('deploy', {}).get('replicas', '1'))
container_names_by_service[service_name] = []
for num in range(1, replicas+1):
name0 = "{project_name}_{service_name}_{num}".format(
project_name=project_name,
service_name=service_name,
num=num,
)
if num == 1:
name = service_desc.get("container_name", name0)
else:
name = name0
container_names_by_service[service_name].append(name)
# print(service_name,service_desc)
cnt = dict(name=name, num=num,
service_name=service_name, **service_desc)
if 'image' not in cnt:
cnt['image'] = "{project_name}_{service_name}".format(
project_name=project_name,
service_name=service_name,
)
2020-04-18 17:39:59 +02:00
labels = norm_as_list(cnt.get('labels', None))
labels.extend(podman_compose_labels)
labels.extend([
"com.docker.compose.container-number={}".format(num),
"com.docker.compose.service=" + service_name,
])
cnt['labels'] = labels
cnt['_service'] = service_name
cnt['_project'] = project_name
given_containers.append(cnt)
self.container_names_by_service = container_names_by_service
container_by_name = dict([(c["name"], c) for c in given_containers])
#print("deps:", [(c["name"], c["_deps"]) for c in given_containers])
given_containers = list(container_by_name.values())
2020-04-18 17:39:59 +02:00
given_containers.sort(key=lambda c: len(c.get('_deps', None) or []))
#print("sorted:", [c["name"] for c in given_containers])
tr = transformations[transform_policy]
pods, containers = tr(
project_name, container_names_by_service, given_containers)
self.pods = pods
self.containers = containers
self.container_by_name = dict([ (c["name"], c) for c in containers])
def _parse_args(self):
parser = argparse.ArgumentParser()
self._init_global_parser(parser)
2019-08-10 17:08:21 +02:00
subparsers = parser.add_subparsers(title='command', dest='command')
subparser = subparsers.add_parser('help', help='show help')
2019-08-10 17:08:21 +02:00
for cmd_name, cmd in self.commands.items():
subparser = subparsers.add_parser(cmd_name, help=cmd._cmd_desc)
for cmd_parser in cmd._parse_args:
cmd_parser(subparser)
self.global_args = parser.parse_args()
if not self.global_args.command or self.global_args.command=='help':
parser.print_help()
exit(-1)
2019-08-10 17:08:21 +02:00
return self.global_args
def _init_global_parser(self, parser):
parser.add_argument("-f", "--file",
help="Specify an alternate compose file (default: docker-compose.yml)",
metavar='file', action='append', default=[])
parser.add_argument("-p", "--project-name",
help="Specify an alternate project name (default: directory name)",
type=str, default=None)
parser.add_argument("--podman-path",
help="Specify an alternate path to podman (default: use location in $PATH variable)",
type=str, default="podman")
parser.add_argument("--no-ansi",
help="Do not print ANSI control characters", action='store_true')
parser.add_argument("--no-cleanup",
help="Do not stop and remove existing pod & containers", action='store_true')
parser.add_argument("--dry-run",
help="No action; perform a simulation of commands", action='store_true')
parser.add_argument("-t", "--transform_policy",
help="how to translate docker compose to podman [1pod|hostnet|accurate]",
choices=['1pod', '1podfw', 'hostnet', 'cntnet', 'publishall', 'identity'], default='1podfw')
podman_compose = PodmanCompose()
###################
# decorators to add commands and parse options
###################
class cmd_run:
def __init__(self, compose, cmd_name, cmd_desc):
self.compose = compose
self.cmd_name = cmd_name
self.cmd_desc = cmd_desc
def __call__(self, func):
def wrapped(*args, **kw):
return func(*args, **kw)
wrapped._compose = self.compose
wrapped._cmd_name = self.cmd_name
wrapped._cmd_desc = self.cmd_desc
wrapped._parse_args = []
self.compose.commands[self.cmd_name] = wrapped
return wrapped
class cmd_parse:
def __init__(self, compose, cmd_names):
self.compose = compose
self.cmd_names = cmd_names if is_list(cmd_names) else [cmd_names]
def __call__(self, func):
def wrapped(*args, **kw):
return func(*args, **kw)
for cmd_name in self.cmd_names:
self.compose.commands[cmd_name]._parse_args.append(wrapped)
return wrapped
###################
# actual commands
###################
2019-10-04 19:57:07 +02:00
@cmd_run(podman_compose, 'version', 'show version')
def compose_version(compose, args):
print("podman-composer version ", __version__)
compose.podman.run(["--version"], sleep=0)
@cmd_run(podman_compose, 'pull', 'pull stack images')
def compose_pull(compose, args):
for cnt in compose.containers:
2020-04-18 17:39:59 +02:00
if cnt.get('build', None): continue
compose.podman.run(["pull", cnt["image"]], sleep=0)
@cmd_run(podman_compose, 'push', 'push stack images')
def compose_push(compose, args):
2019-06-09 03:22:58 +02:00
services = set(args.services)
for cnt in compose.containers:
2019-06-09 03:21:55 +02:00
if 'build' not in cnt: continue
2019-06-09 03:22:58 +02:00
if services and cnt['_service'] not in services: continue
compose.podman.run(["push", cnt["image"]], sleep=0)
def build_one(compose, args, cnt):
if 'build' not in cnt: return
if getattr(args, 'if_not_exists', None):
try: img_id = compose.podman.output(['inspect', '-t', 'image', '-f', '{{.Id}}', cnt["image"]])
except subprocess.CalledProcessError: img_id = None
if img_id: return
build_desc = cnt['build']
if not hasattr(build_desc, 'items'):
build_desc = dict(context=build_desc)
ctx = build_desc.get('context', '.')
dockerfile = os.path.join(ctx, build_desc.get("dockerfile", "Dockerfile"))
if not os.path.exists(dockerfile):
dockerfile = os.path.join(ctx, build_desc.get("dockerfile", "dockerfile"))
if not os.path.exists(dockerfile):
raise OSError("Dockerfile not found in "+ctx)
build_args = [
"build", "-t", cnt["image"],
"-f", dockerfile
]
if "target" in build_desc:
2020-04-22 13:44:36 +02:00
build_args.extend(["--target", build_desc["target"]])
container_to_ulimit_args(cnt, build_args)
if getattr(args, 'pull_always', None): build_args.append("--pull-always")
elif getattr(args, 'pull', None): build_args.append("--pull")
args_list = norm_as_list(build_desc.get('args', {}))
2020-04-02 16:18:16 +02:00
for build_arg in args_list + args.build_arg:
build_args.extend(("--build-arg", build_arg,))
build_args.append(ctx)
compose.podman.run(build_args, sleep=0)
@cmd_run(podman_compose, 'build', 'build stack images')
def compose_build(compose, args):
for cnt in compose.containers:
build_one(compose, args, cnt)
2019-03-23 20:42:04 +01:00
def create_pods(compose, args):
for pod in compose.pods:
podman_args = [
2019-03-23 21:04:07 +01:00
"pod", "create",
"--name={}".format(pod["name"]),
"--share", "net",
]
2020-04-18 17:39:59 +02:00
ports = pod.get("ports", None) or []
for i in ports:
podman_args.extend(['-p', i])
compose.podman.run(podman_args)
2019-03-23 20:42:04 +01:00
def up_specific(compose, args):
deps = []
if not args.no_deps:
for service in args.services:
deps.extend([])
# args.always_recreate_deps
print("services", args.services)
raise NotImplementedError("starting specific services is not yet implemented")
@cmd_run(podman_compose, 'up', 'Create and start the entire stack or some of its services')
def compose_up(compose, args):
if args.services:
return up_specific(compose, args)
if not args.no_build:
# `podman build` does not cache, so don't always build
build_args = argparse.Namespace(
if_not_exists=(not args.build),
2020-04-18 17:39:59 +02:00
**args.__dict__)
compose.commands['build'](compose, build_args)
shared_vols = compose.shared_vols
# TODO: implement check hash label for change
if args.force_recreate:
compose.commands['down'](compose, args)
# args.no_recreate disables check for changes (which is not implemented)
podman_command = 'run' if args.detach and not args.no_start else 'create'
create_pods(compose, args)
for cnt in compose.containers:
podman_args = container_to_args(compose, cnt,
detached=args.detach, podman_command=podman_command)
2020-04-19 02:53:55 +02:00
subproc = compose.podman.run(podman_args)
if podman_command == 'run' and subproc.returncode:
compose.podman.run(['start', cnt['name']])
if args.no_start or args.detach or args.dry_run: return
# TODO: handle already existing
# TODO: if error creating do not enter loop
# TODO: colors if sys.stdout.isatty()
2019-03-04 10:30:14 +01:00
threads = []
for cnt in compose.containers:
# TODO: remove sleep from podman.run
thread = Thread(target=compose.podman.run, args=[['start', '-a', cnt['name']]], daemon=True)
thread.start()
threads.append(thread)
time.sleep(1)
while threads:
for thread in threads:
thread.join(timeout=1.0)
if not thread.is_alive():
threads.remove(thread)
if args.abort_on_container_exit:
exit(-1)
2019-03-23 20:42:04 +01:00
@cmd_run(podman_compose, 'down', 'tear down entire stack')
def compose_down(compose, args):
podman_args=[]
timeout=getattr(args, 'timeout', None)
if timeout is None:
timeout = 1
podman_args.extend(['-t', "{}".format(timeout)])
for cnt in compose.containers:
compose.podman.run(["stop", *podman_args, cnt["name"]], sleep=0)
for cnt in compose.containers:
compose.podman.run(["rm", cnt["name"]], sleep=0)
for pod in compose.pods:
compose.podman.run(["pod", "rm", pod["name"]], sleep=0)
2019-10-04 21:56:51 +02:00
@cmd_run(podman_compose, 'ps', 'show status of containers')
def compose_ps(compose, args):
2019-10-05 21:47:04 +02:00
proj_name = compose.project_name
2019-10-04 21:56:51 +02:00
if args.quiet == True:
2019-11-05 08:56:55 +01:00
compose.podman.run(["ps", "-a", "--format", "{{.ID}}", "--filter", f"label=io.podman.compose.project={proj_name}"])
2019-10-04 21:56:51 +02:00
else:
2019-11-05 08:56:55 +01:00
compose.podman.run(["ps", "-a", "--filter", f"label=io.podman.compose.project={proj_name}"])
2019-10-04 21:56:51 +02:00
@cmd_run(podman_compose, 'run', 'create a container similar to a service to run a one-off command')
def compose_run(compose, args):
create_pods(compose, args)
print(args)
container_names=compose.container_names_by_service[args.service]
container_name=container_names[0]
cnt = compose.container_by_name[container_name]
deps = cnt["_deps"]
if not args.no_deps:
# TODO: start services in deps
pass
# adjust one-off container options
name0 = "{}_{}_tmp{}".format(compose.project_name, args.service, random.randrange(0, 65536))
cnt["name"] = args.name or name0
if args.entrypoint: cnt["entrypoint"] = args.entrypoint
if args.user: cnt["user"] = args.user
if args.workdir: cnt["working_dir"] = args.workdir
if not args.service_ports:
for k in ("expose", "publishall", "ports"):
try: del cnt[k]
except KeyError: pass
if args.volume:
# TODO: handle volumes
pass
cnt['tty']=False if args.T else True
if args.cnt_command is not None and len(args.cnt_command) > 0:
cnt['command']=args.cnt_command
# run podman
podman_args = container_to_args(compose, cnt, args.detach)
if not args.detach:
podman_args.insert(1, '-i')
if args.rm:
podman_args.insert(1, '--rm')
compose.podman.run(podman_args, sleep=0)
def transfer_service_status(compose, args, action):
# TODO: handle dependencies, handle creations
container_names_by_service = compose.container_names_by_service
targets = []
for service in args.services:
if service not in container_names_by_service:
raise ValueError("unknown service: " + service)
targets.extend(container_names_by_service[service])
podman_args=[action]
timeout=getattr(args, 'timeout', None)
if timeout is not None:
podman_args.extend(['-t', "{}".format(timeout)])
for target in targets:
compose.podman.run(podman_args+[target], sleep=0)
@cmd_run(podman_compose, 'start', 'start specific services')
def compose_start(compose, args):
transfer_service_status(compose, args, 'start')
@cmd_run(podman_compose, 'stop', 'stop specific services')
def compose_stop(compose, args):
2019-10-26 20:28:24 +02:00
transfer_service_status(compose, args, 'stop')
@cmd_run(podman_compose, 'restart', 'restart specific services')
def compose_restart(compose, args):
transfer_service_status(compose, args, 'restart')
2019-03-23 21:04:07 +01:00
2019-11-05 08:42:53 +01:00
@cmd_run(podman_compose, 'logs', 'show logs from services')
def compose_logs(compose, args):
container_names_by_service = compose.container_names_by_service
target = None
if args.service not in container_names_by_service:
raise ValueError("unknown service: " + args.service)
target = container_names_by_service[args.service]
podman_args = ['logs']
if args.follow:
podman_args.append('-f')
# the default value is to print all logs which is in podman = 0 and not
# needed to be passed
if args.tail and args.tail != 'all':
podman_args.extend(['--tail', args.tail])
if args.timestamps:
podman_args.append('-t')
compose.podman.run(podman_args+target)
###################
# command arguments parsing
###################
2019-08-14 17:49:21 +02:00
@cmd_parse(podman_compose, 'up')
def compose_up_parse(parser):
parser.add_argument("-d", "--detach", action='store_true',
help="Detached mode: Run container in the background, print new container name. Incompatible with --abort-on-container-exit.")
parser.add_argument("--no-color", action='store_true',
help="Produce monochrome output.")
parser.add_argument("--quiet-pull", action='store_true',
help="Pull without printing progress information.")
parser.add_argument("--no-deps", action='store_true',
help="Don't start linked services.")
parser.add_argument("--force-recreate", action='store_true',
help="Recreate containers even if their configuration and image haven't changed.")
parser.add_argument("--always-recreate-deps", action='store_true',
help="Recreate dependent containers. Incompatible with --no-recreate.")
parser.add_argument("--no-recreate", action='store_true',
help="If containers already exist, don't recreate them. Incompatible with --force-recreate and -V.")
parser.add_argument("--no-build", action='store_true',
help="Don't build an image, even if it's missing.")
parser.add_argument("--no-start", action='store_true',
help="Don't start the services after creating them.")
parser.add_argument("--build", action='store_true',
help="Build images before starting containers.")
parser.add_argument("--abort-on-container-exit", action='store_true',
help="Stops all containers if any container was stopped. Incompatible with -d.")
parser.add_argument("-t", "--timeout", type=float, default=10,
help="Use this timeout in seconds for container shutdown when attached or when containers are already running. (default: 10)")
parser.add_argument("-V", "--renew-anon-volumes", action='store_true',
help="Recreate anonymous volumes instead of retrieving data from the previous containers.")
parser.add_argument("--remove-orphans", action='store_true',
help="Remove containers for services not defined in the Compose file.")
parser.add_argument('--scale', metavar="SERVICE=NUM", action='append',
help="Scale SERVICE to NUM instances. Overrides the `scale` setting in the Compose file if present.")
parser.add_argument("--exit-code-from", metavar='SERVICE', type=str, default=None,
help="Return the exit code of the selected service container. Implies --abort-on-container-exit.")
2019-08-16 14:44:44 +02:00
parser.add_argument('services', metavar='SERVICES', nargs='*',
help='service names to start')
2019-08-14 17:49:21 +02:00
@cmd_parse(podman_compose, 'run')
def compose_run_parse(parser):
parser.add_argument("-d", "--detach", action='store_true',
help="Detached mode: Run container in the background, print new container name.")
parser.add_argument("--name", type=str, default=None,
help="Assign a name to the container")
parser.add_argument("--entrypoint", type=str, default=None,
help="Override the entrypoint of the image.")
parser.add_argument('-e', metavar="KEY=VAL", action='append',
help="Set an environment variable (can be used multiple times)")
parser.add_argument('-l', '--label', metavar="KEY=VAL", action='append',
help="Add or override a label (can be used multiple times)")
parser.add_argument("-u", "--user", type=str, default=None,
help="Run as specified username or uid")
parser.add_argument("--no-deps", action='store_true',
help="Don't start linked services")
parser.add_argument("--rm", action='store_true',
help="Remove container after run. Ignored in detached mode.")
parser.add_argument('-p', '--publish', action='append',
help="Publish a container's port(s) to the host (can be used multiple times)")
parser.add_argument("--service-ports", action='store_true',
help="Run command with the service's ports enabled and mapped to the host.")
parser.add_argument('-v', '--volume', action='append',
help="Bind mount a volume (can be used multiple times)")
parser.add_argument("-T", action='store_true',
help="Disable pseudo-tty allocation. By default `podman-compose run` allocates a TTY.")
parser.add_argument("-w", "--workdir", type=str, default=None,
help="Working directory inside the container")
parser.add_argument('service', metavar='service', nargs=None,
help='service name')
parser.add_argument('cnt_command', metavar='command', nargs=argparse.REMAINDER,
help='command and its arguments')
@cmd_parse(podman_compose, ['down', 'stop', 'restart'])
def compose_parse_timeout(parser):
parser.add_argument("-t", "--timeout",
help="Specify a shutdown timeout in seconds. ",
type=int, default=10)
@cmd_parse(podman_compose, ['start', 'stop', 'restart'])
def compose_parse_services(parser):
parser.add_argument('services', metavar='services', nargs='+',
help='affected services')
2019-11-05 08:42:53 +01:00
@cmd_parse(podman_compose, ['logs'])
def compose_logs_parse(parser):
parser.add_argument("-f", "--follow", action='store_true',
help="Follow log output.")
parser.add_argument("-t", "--timestamps", action='store_true',
help="Show timestamps.")
parser.add_argument("--tail",
help="Number of lines to show from the end of the logs for each "
"container.",
type=str, default="all")
parser.add_argument('service', metavar='service', nargs=None,
help='service name')
@cmd_parse(podman_compose, 'push')
def compose_push_parse(parser):
parser.add_argument("--ignore-push-failures", action='store_true',
help="Push what it can and ignores images with push failures. (not implemented)")
parser.add_argument('services', metavar='services', nargs='*',
help='services to push')
2019-10-04 21:56:51 +02:00
@cmd_parse(podman_compose, 'ps')
def compose_ps_parse(parser):
parser.add_argument("-q", "--quiet",
help="Only display container IDs", action='store_true')
@cmd_parse(podman_compose, ['build', 'up'])
def compose_build_parse(parser):
parser.add_argument("--pull",
help="attempt to pull a newer version of the image", action='store_true')
parser.add_argument("--pull-always",
help="attempt to pull a newer version of the image, Raise an error even if the image is present locally.", action='store_true')
2020-04-02 16:18:16 +02:00
parser.add_argument("--build-arg", metavar="key=val", action="append", default=[],
help="Set build-time variables for services.")
def main():
podman_compose.run()
2019-03-23 21:07:06 +01:00
2019-03-23 21:04:07 +01:00
if __name__ == "__main__":
2019-03-23 21:07:06 +01:00
main()