podman-compose/podman-compose.py

748 lines
27 KiB
Python
Raw Normal View History

2019-05-09 22:15:05 +02:00
#! /usr/bin/python3
2019-03-04 10:30:14 +01:00
# https://docs.docker.com/compose/compose-file/#service-configuration-reference
# https://docs.docker.com/samples/
# https://docs.docker.com/compose/gettingstarted/
# https://docs.docker.com/compose/django/
# https://docs.docker.com/compose/wordpress/
2019-03-04 10:30:14 +01:00
from __future__ import print_function
2019-03-24 00:39:22 +01:00
import sys
2019-03-04 10:30:14 +01:00
import os
import argparse
import subprocess
import time
2019-03-24 00:08:26 +01:00
import re
2019-06-09 02:26:13 +02:00
import hashlib
2019-05-09 22:15:05 +02:00
# import fnmatch
# fnmatch.fnmatchcase(env, "*_HOST")
2019-03-04 10:30:14 +01:00
import json
import yaml
2019-03-24 00:39:22 +01:00
PY3 = sys.version_info[0] == 3
if PY3:
basestring = str
2019-05-09 22:16:40 +02:00
# helper functions
2019-05-09 22:15:05 +02:00
is_str = lambda s: isinstance(s, basestring)
is_dict = lambda d: isinstance(d, dict)
is_list = lambda l: not is_str(l) and not is_dict(l) and hasattr(l, "__iter__")
2019-05-09 22:16:40 +02:00
def try_int(i, fallback=None):
try:
return int(i)
except ValueError:
pass
except TypeError:
pass
return fallback
2019-06-09 02:26:13 +02:00
dir_re = re.compile("^[~/\.]")
propagation_re=re.compile("^(?:z|Z|r?shared|r?slave|r?private)$")
def parse_short_mount(mount_str, basedir):
mount_a = mount_str.split(':')
mount_opt_dict = {}
mount_opt = None
if len(mount_a)==1:
# Just specify a path and let the Engine create a volume
# - /var/lib/mysql
mount_src, mount_dst=None, mount_str
elif len(mount_a)==2:
mount_src, mount_dst = mount_a
if not mount_dst.startswith('/'):
mount_dst, mount_opt = mount_a
mount_src = None
elif len(mount_a)==3:
mount_src, mount_dst, mount_opt = mount_a
else:
raise ValueError("could not parse mount "+mount_str)
if mount_src and dir_re.match(mount_src):
# Specify an absolute path mapping
# - /opt/data:/var/lib/mysql
# Path on the host, relative to the Compose file
# - ./cache:/tmp/cache
# User-relative path
# - ~/configs:/etc/configs/:ro
mount_type = "bind"
# TODO: should we use os.path.realpath(basedir)?
mount_src = os.path.join(basedir, os.path.expanduser(mount_src))
else:
# Named volume
# - datavolume:/var/lib/mysql
mount_type = "volume"
2019-06-11 14:07:49 +02:00
mount_opts = filter(lambda i:i, (mount_opt or '').split(','))
for opt in mount_opts:
2019-06-09 02:26:13 +02:00
if opt=='ro': mount_opt_dict["read_only"]=True
elif opt=='rw': mount_opt_dict["read_only"]=False
elif propagation_re.match(opt): mount_opt_dict["bind"]=dict(propagation=opt)
else:
# TODO: ignore
raise ValueError("unknown mount option "+opt)
return dict(type=mount_type, source=mount_src, target=mount_dst, **mount_opt_dict)
def fix_mount_dict(mount_dict, srv_name, cnt_name):
"""
in-place fix mount dictionary to add missing source
"""
if mount_dict["type"]=="volume" and not mount_dict.get("source"):
mount_dict["source"] = "_".join([
srv_name, cnt_name,
2019-06-11 16:03:24 +02:00
hashlib.md5(mount_dict["target"].encode("utf-8")).hexdigest(),
2019-06-09 02:26:13 +02:00
])
return mount_dict
2019-05-09 22:16:40 +02:00
2019-03-24 00:08:26 +01:00
# docker and docker-compose support subset of bash variable substitution
# https://docs.docker.com/compose/compose-file/#variable-substitution
# https://docs.docker.com/compose/env-file/
# https://www.gnu.org/software/bash/manual/html_node/Shell-Parameter-Expansion.html
# $VARIABLE
# ${VARIABLE}
# ${VARIABLE:-default} default if not set or empty
# ${VARIABLE-default} default if not set
# ${VARIABLE:?err} raise error if not set or empty
# ${VARIABLE?err} raise error if not set
# $$ means $
var_re = re.compile(r'\$(\{(?:[^\s\$:\-\}]+)\}|(?:[^\s\$\{\}]+))')
var_def_re = re.compile(r'\$\{([^\s\$:\-\}]+)(:)?-([^\}]+)\}')
var_err_re = re.compile(r'\$\{([^\s\$:\-\}]+)(:)?\?([^\}]+)\}')
def dicts_get(dicts, key, fallback='', fallback_empty=False):
"""
get the given key from any dict in dicts, trying them one by one
if not found in any, then use fallback, if fallback is Exception raise is
"""
2019-03-24 00:08:26 +01:00
value = None
for d in dicts:
value = d.get(key)
if value is not None: break
if not value:
if fallback_empty or value is None:
value = fallback
if isinstance(value, Exception):
raise value
return value
def rec_subs(value, dicts):
"""
do bash-like substitution in value and if list of dictionary do that recursively
"""
2019-05-09 22:15:05 +02:00
if is_dict(value):
2019-03-24 00:08:26 +01:00
value = dict([(k, rec_subs(v, dicts)) for k, v in value.items()])
2019-05-09 22:15:05 +02:00
elif is_str(value):
2019-03-24 00:08:26 +01:00
value = var_re.sub(lambda m: dicts_get(dicts, m.group(1).strip('{}')), value)
sub_def = lambda m: dicts_get(dicts, m.group(1), m.group(3), m.group(2) == ':')
value = var_def_re.sub(sub_def, value)
sub_err = lambda m: dicts_get(dicts, m.group(1), RuntimeError(m.group(3)),
m.group(2) == ':')
value = var_err_re.sub(sub_err, value)
value = value.replace('$$', '$')
elif hasattr(value, "__iter__"):
value = [rec_subs(i, dicts) for i in value]
2019-03-24 00:08:26 +01:00
return value
def norm_as_list(src):
"""
given a dictionary {key1:value1, key2: None} or list
return a list of ["key1=value1", "key2"]
"""
if src is None:
dst = []
2019-05-09 22:15:05 +02:00
elif is_dict(src):
dst = [("{}={}".format(k, v) if v else k) for k, v in src.items()]
2019-05-09 22:15:05 +02:00
elif is_list(src):
dst = list(src)
else:
dst = [src]
return dst
2019-03-04 10:30:14 +01:00
2019-03-23 20:42:04 +01:00
def norm_as_dict(src):
"""
given a list ["key1=value1", "key2"]
return a dictionary {key1:value1, key2: None}
"""
if src is None:
dst = {}
2019-05-09 22:15:05 +02:00
elif is_dict(src):
dst = dict(src)
2019-05-09 22:15:05 +02:00
elif is_list(src):
2019-03-23 20:42:04 +01:00
dst = [i.split("=", 1) for i in src if i]
dst = dict([(a if len(a) == 2 else (a[0], None)) for a in dst])
else:
raise ValueError("dictionary or iterable is expected")
return dst
# transformation helpers
2019-03-04 10:30:14 +01:00
2019-03-23 20:42:04 +01:00
def adj_hosts(services, cnt, dst="127.0.0.1"):
"""
adjust container cnt in-place to add hosts pointing to dst for services
"""
common_extra_hosts = []
for srv, cnts in services.items():
common_extra_hosts.append("{}:{}".format(srv, dst))
for cnt0 in cnts:
common_extra_hosts.append("{}:{}".format(cnt0, dst))
extra_hosts = list(cnt.get("extra_hosts", []))
extra_hosts.extend(common_extra_hosts)
# link aliases
for link in cnt.get("links", []):
a = link.strip().split(':', 1)
if len(a) == 2:
alias = a[1].strip()
extra_hosts.append("{}:{}".format(alias, dst))
cnt["extra_hosts"] = extra_hosts
2019-03-23 20:42:04 +01:00
2019-03-04 23:39:08 +01:00
def move_list(dst, containers, key):
"""
move key (like port forwarding) from containers to dst (a pod or a infra container)
"""
a = set(dst.get(key) or [])
2019-03-04 23:39:08 +01:00
for cnt in containers:
a0 = cnt.get(key)
if a0:
2019-03-04 23:46:42 +01:00
a.update(a0)
2019-03-04 23:39:08 +01:00
del cnt[key]
2019-03-23 20:42:04 +01:00
if a:
dst[key] = list(a)
2019-03-04 23:39:08 +01:00
def move_port_fw(dst, containers):
"""
move port forwarding from containers to dst (a pod or a infra container)
"""
2019-03-04 23:48:48 +01:00
move_list(dst, containers, "ports")
2019-03-04 23:39:08 +01:00
2019-03-23 20:42:04 +01:00
2019-03-04 23:39:08 +01:00
def move_extra_hosts(dst, containers):
"""
move port forwarding from containers to dst (a pod or a infra container)
"""
move_list(dst, containers, "extra_hosts")
# transformations
transformations = {}
2019-03-23 20:42:04 +01:00
def trans(func):
transformations[func.__name__.replace("tr_", "")] = func
return func
2019-03-23 20:42:04 +01:00
@trans
2019-03-04 10:30:14 +01:00
def tr_identity(project_name, services, given_containers):
containers = []
2019-03-04 10:30:14 +01:00
for cnt in given_containers:
containers.append(dict(cnt))
return [], containers
2019-03-23 20:42:04 +01:00
@trans
def tr_publishall(project_name, services, given_containers):
containers = []
for cnt0 in given_containers:
2019-03-23 20:42:04 +01:00
cnt = dict(cnt0, publishall=True)
# adjust hosts to point to the gateway, TODO: adjust host env
adj_hosts(services, cnt, '10.0.2.2')
containers.append(cnt)
return [], containers
2019-03-23 20:42:04 +01:00
@trans
def tr_hostnet(project_name, services, given_containers):
containers = []
for cnt0 in given_containers:
2019-03-23 20:42:04 +01:00
cnt = dict(cnt0, network_mode="host")
# adjust hosts to point to localhost, TODO: adjust host env
adj_hosts(services, cnt, '127.0.0.1')
containers.append(cnt)
2019-03-04 10:30:14 +01:00
return [], containers
2019-03-23 20:42:04 +01:00
@trans
def tr_cntnet(project_name, services, given_containers):
containers = []
infra_name = project_name + "_infra"
infra = dict(
2019-03-23 20:42:04 +01:00
name=infra_name,
image="k8s.gcr.io/pause:3.1",
)
for cnt0 in given_containers:
2019-03-23 20:42:04 +01:00
cnt = dict(cnt0, network_mode="container:"+infra_name)
2019-06-17 17:31:22 +02:00
deps = cnt.get("depends_on") or []
deps.append(infra_name)
2019-06-17 17:31:22 +02:00
cnt["depends_on"] = deps
# adjust hosts to point to localhost, TODO: adjust host env
adj_hosts(services, cnt, '127.0.0.1')
2019-03-23 20:42:04 +01:00
if "hostname" in cnt:
del cnt["hostname"]
containers.append(cnt)
move_port_fw(infra, containers)
2019-03-04 23:39:08 +01:00
move_extra_hosts(infra, containers)
containers.insert(0, infra)
return [], containers
2019-03-23 20:42:04 +01:00
@trans
2019-03-04 10:30:14 +01:00
def tr_1pod(project_name, services, given_containers):
"""
project_name:
services: {service_name: ["container_name1", "..."]}, currently only one is supported
given_containers: [{}, ...]
"""
2019-03-23 20:42:04 +01:00
pod = dict(name=project_name)
containers = []
2019-03-04 10:30:14 +01:00
for cnt0 in given_containers:
2019-03-23 20:42:04 +01:00
cnt = dict(cnt0, pod=project_name)
2019-03-04 10:30:14 +01:00
# services can be accessed as localhost because they are on one pod
# adjust hosts to point to localhost, TODO: adjust host env
adj_hosts(services, cnt, '127.0.0.1')
2019-03-04 10:30:14 +01:00
containers.append(cnt)
return [pod], containers
2019-03-23 20:42:04 +01:00
@trans
2019-03-04 10:30:14 +01:00
def tr_1podfw(project_name, services, given_containers):
pods, containers = tr_1pod(project_name, services, given_containers)
pod = pods[0]
move_port_fw(pod, containers)
2019-03-04 10:30:14 +01:00
return pods, containers
2019-03-23 20:42:04 +01:00
2019-03-23 21:04:07 +01:00
def run_podman(dry_run, podman_path, podman_args, wait=True, sleep=1):
print("podman " + " ".join(podman_args))
if dry_run:
return None
cmd = [podman_path]+podman_args
# subprocess.Popen(args, bufsize = 0, executable = None, stdin = None, stdout = None, stderr = None, preexec_fn = None, close_fds = False, shell = False, cwd = None, env = None, universal_newlines = False, startupinfo = None, creationflags = 0)
p = subprocess.Popen(cmd)
if wait:
print(p.wait())
if sleep:
time.sleep(sleep)
return p
2019-06-09 02:26:13 +02:00
def mount_dict_vol_to_bind(mount_dict, podman_path, proj_name, shared_vols):
"""
inspect volume to get directory
create volume if needed
and return mount_dict as bind of that directory
"""
2019-06-11 14:15:02 +02:00
if mount_dict["type"]!="volume": return mount_dict
2019-06-09 02:26:13 +02:00
vol_name = mount_dict["source"]
print("podman volume inspect {vol_name} || podman volume create {vol_name}".format(vol_name=vol_name))
2019-06-11 16:03:24 +02:00
# podman volume list --format '{{.Name}}\t{{.MountPoint}}' -f 'label=io.podman.compose.project=HERE'
2019-06-09 02:26:13 +02:00
try: out = subprocess.check_output([podman_path, "volume", "inspect", vol_name])
except subprocess.CalledProcessError:
subprocess.check_output([podman_path, "volume", "create", "-l", "io.podman.compose.project={}".format(proj_name), vol_name])
out = subprocess.check_output([podman_path, "volume", "inspect", vol_name])
src = json.loads(out)[0]["mountPoint"]
ret=dict(mount_dict, type="bind", source=src, _vol=vol_name)
bind_prop=ret.get("bind", {}).get("propagation")
if not bind_prop:
if "bind" not in ret:
ret["bind"]={}
# if in top level volumes then it's shared bind-propagation=z
if vol_name in shared_vols:
ret["bind"]["propagation"]="z"
else:
ret["bind"]["propagation"]="Z"
2019-06-11 16:03:24 +02:00
try: del ret["volume"]
except KeyError: pass
2019-06-09 02:26:13 +02:00
return ret
def mount_desc_to_args(mount_desc, podman_path, basedir, proj_name, srv_name, cnt_name, shared_vols):
if is_str(mount_desc): mount_desc=parse_short_mount(mount_desc, basedir)
mount_desc = mount_dict_vol_to_bind(fix_mount_dict(mount_desc, srv_name, cnt_name), podman_path, proj_name, shared_vols)
mount_type = mount_desc.get("type")
source = mount_desc.get("source")
target = mount_desc["target"]
opts=[]
if mount_desc.get("bind"):
2019-06-09 02:46:31 +02:00
bind_prop=mount_desc["bind"].get("propagation")
2019-06-09 02:26:13 +02:00
if bind_prop: opts.append("bind-propagation={}".format(bind_prop))
if mount_desc.get("read_only", False): opts.append("ro")
if mount_type=='tmpfs':
tmpfs_opts = mount_desc.get("tmpfs", {})
tmpfs_size = tmpfs_opts.get("size")
if tmpfs_size:
opts.append("tmpfs-size={}".format(tmpfs_size))
tmpfs_mode = tmpfs_opts.get("mode")
if tmpfs_mode:
opts.append("tmpfs-mode={}".format(tmpfs_mode))
opts=",".join(opts)
if mount_type=='bind':
return "type=bind,source={source},destination={target},{opts}".format(
source=source,
target=target,
opts=opts
).rstrip(",")
elif mount_type=='tmpfs':
return "type=tmpfs,destination={target},{opts}".format(
target=target,
opts=opts
).rstrip(",")
else:
raise ValueError("unknown mount type:"+mount_type)
2019-03-23 21:07:06 +01:00
# pylint: disable=unused-argument
def down(project_name, dirname, pods, containers, dry_run, podman_path):
2019-03-04 10:30:14 +01:00
for cnt in containers:
2019-03-23 21:07:06 +01:00
run_podman(dry_run, podman_path, [
"stop", "-t=1", cnt["name"]], sleep=0)
2019-03-04 10:30:14 +01:00
for cnt in containers:
2019-03-23 21:04:07 +01:00
run_podman(dry_run, podman_path, ["rm", cnt["name"]], sleep=0)
2019-03-04 10:30:14 +01:00
for pod in pods:
2019-03-23 21:04:07 +01:00
run_podman(dry_run, podman_path, ["pod", "rm", pod["name"]], sleep=0)
2019-03-23 20:42:04 +01:00
2019-03-23 21:07:06 +01:00
2019-06-09 02:26:13 +02:00
def container_to_args(cnt, dirname, podman_path, shared_vols):
pod = cnt.get('pod') or ''
args = [
2019-03-23 21:04:07 +01:00
'run',
'--name={}'.format(cnt.get('name')),
'-d'
2019-03-04 10:30:14 +01:00
]
2019-03-23 20:42:04 +01:00
if pod:
args.append('--pod={}'.format(pod))
2019-03-04 10:30:14 +01:00
if cnt.get('read_only'):
args.append('--read-only')
for i in cnt.get('labels', []):
2019-03-24 00:08:26 +01:00
args.extend(['-l', i])
2019-03-23 20:42:04 +01:00
net = cnt.get("network_mode")
if net:
args.extend(['--network', net])
env = norm_as_list(cnt.get('environment', {}))
2019-03-04 10:30:14 +01:00
for e in env:
args.extend(['-e', e])
for i in cnt.get('env_file', []):
i = os.path.realpath(os.path.join(dirname, i))
2019-03-04 10:30:14 +01:00
args.extend(['--env-file', i])
2019-06-11 16:03:24 +02:00
tmpfs_ls = cnt.get('tmpfs', [])
if is_str(tmpfs_ls): tmpfs_ls=[tmpfs_ls]
for i in tmpfs_ls:
2019-03-04 10:30:14 +01:00
args.extend(['--tmpfs', i])
for i in cnt.get('volumes', []):
2019-06-09 02:26:13 +02:00
# TODO: should we make it os.path.realpath(os.path.join(, i))?
mount_args = mount_desc_to_args(
2019-06-11 11:41:58 +02:00
i, podman_path, dirname,
2019-06-09 02:26:13 +02:00
cnt['_project'], cnt['_service'], cnt['name'],
shared_vols
)
args.extend(['--mount', mount_args])
2019-03-04 10:30:14 +01:00
for i in cnt.get('extra_hosts', []):
args.extend(['--add-host', i])
for i in cnt.get('expose', []):
args.extend(['--expose', i])
if cnt.get('publishall'):
args.append('-P')
2019-03-04 10:30:14 +01:00
for i in cnt.get('ports', []):
args.extend(['-p', i])
user = cnt.get('user')
2019-03-04 10:30:14 +01:00
if user is not None:
args.extend(['-u', user])
if cnt.get('working_dir') is not None:
args.extend(['-w', cnt.get('working_dir')])
if cnt.get('hostname'):
args.extend(['--hostname', cnt.get('hostname')])
if cnt.get('shm_size'):
args.extend(['--shm_size', '{}'.format(cnt.get('shm_size'))])
if cnt.get('stdin_open'):
args.append('-i')
if cnt.get('tty'):
args.append('--tty')
# currently podman shipped by fedora does not package this
2019-03-23 20:42:04 +01:00
# if cnt.get('init'):
2019-03-04 10:30:14 +01:00
# args.append('--init')
entrypoint = cnt.get('entrypoint')
if entrypoint is not None:
2019-05-09 22:15:05 +02:00
if is_str(entrypoint):
2019-03-04 10:30:14 +01:00
args.extend(['--entrypoint', entrypoint])
2019-05-09 22:15:05 +02:00
else:
args.extend(['--entrypoint', json.dumps(entrypoint)])
2019-03-23 20:42:04 +01:00
args.append(cnt.get('image')) # command, ..etc.
command = cnt.get('command')
2019-03-04 10:30:14 +01:00
if command is not None:
# TODO: handle if command is string
args.extend(command)
return args
2019-03-23 20:42:04 +01:00
2019-03-09 22:25:32 +01:00
def rec_deps(services, container_by_name, cnt, init_service):
deps = cnt["_deps"]
2019-03-09 22:25:32 +01:00
for dep in deps:
dep_cnts = services.get(dep)
2019-03-23 20:42:04 +01:00
if not dep_cnts:
continue
dep_cnt = container_by_name.get(dep_cnts[0])
2019-03-09 22:25:32 +01:00
if dep_cnt:
# TODO: avoid creating loops, A->B->A
2019-03-23 20:42:04 +01:00
if init_service and init_service in dep_cnt["_deps"]:
continue
new_deps = rec_deps(services, container_by_name,
dep_cnt, init_service)
2019-03-09 22:25:32 +01:00
deps.update(new_deps)
return deps
2019-03-23 20:42:04 +01:00
2019-03-09 22:25:32 +01:00
def flat_deps(services, container_by_name):
for name, cnt in container_by_name.items():
2019-03-23 20:42:04 +01:00
deps = set([(c.split(":")[0] if ":" in c else c)
for c in cnt.get("links", [])])
2019-06-17 17:31:22 +02:00
deps.update(cnt.get("depends_on", []))
cnt["_deps"] = deps
2019-03-09 22:25:32 +01:00
for name, cnt in container_by_name.items():
rec_deps(services, container_by_name, cnt, cnt.get('_service'))
# pylint: disable=unused-argument
def pull(project_name, dirname, pods, containers, dry_run, podman_path):
for cnt in containers:
if cnt.get('build'): continue
run_podman(dry_run, podman_path, ["pull", cnt["image"]], sleep=0)
2019-06-09 03:21:55 +02:00
def push(project_name, dirname, pods, containers, dry_run, podman_path, cmd_args):
parser = argparse.ArgumentParser()
parser.prog+=' push'
parser.add_argument("--ignore-push-failures", action='store_true',
2019-06-09 03:22:58 +02:00
help="Push what it can and ignores images with push failures. (not implemented)")
parser.add_argument('services', metavar='services', nargs='*',
2019-06-09 03:21:55 +02:00
help='services to push')
args = parser.parse_args(cmd_args)
2019-06-09 03:22:58 +02:00
services = set(args.services)
2019-06-09 03:21:55 +02:00
for cnt in containers:
if 'build' not in cnt: continue
2019-06-09 03:22:58 +02:00
if services and cnt['_service'] not in services: continue
2019-06-09 03:21:55 +02:00
run_podman(dry_run, podman_path, ["push", cnt["image"]], sleep=0)
# pylint: disable=unused-argument
def build(project_name, dirname, pods, containers, dry_run, podman_path, podman_args=[]):
for cnt in containers:
if 'build' not in cnt: continue
build_desc = cnt['build']
if not hasattr(build_desc, 'items'):
build_desc = dict(context=build_desc)
ctx = build_desc.get('context', '.')
dockerfile = os.path.join(ctx, build_desc.get("dockerfile", "Dockerfile"))
if not os.path.exists(dockerfile):
dockerfile = os.path.join(ctx, build_desc.get("dockerfile", "dockerfile"))
if not os.path.exists(dockerfile):
raise OSError("Dockerfile not found in "+ctx)
build_args = [
"build", "-t", cnt["image"],
"-f", dockerfile
]
build_args.extend(podman_args)
args_list = norm_as_list(build_desc.get('args', {}))
for build_arg in args_list:
build_args.extend(("--build-arg", build_arg,))
build_args.append(ctx)
run_podman(dry_run, podman_path, build_args, sleep=0)
2019-03-23 20:42:04 +01:00
2019-06-09 02:26:13 +02:00
def up(project_name, dirname, pods, containers, no_cleanup, dry_run, podman_path, shared_vols):
os.chdir(dirname)
2019-03-23 20:42:04 +01:00
# NOTE: podman does not cache, so don't always build
# TODO: if build and the following command fails "podman inspect -t image <image_name>" then run build
2019-03-04 10:30:14 +01:00
# no need remove them if they have same hash label
2019-03-23 20:42:04 +01:00
if no_cleanup == False:
down(project_name, dirname, pods, containers, dry_run, podman_path)
2019-03-04 10:30:14 +01:00
for pod in pods:
args = [
2019-03-23 21:04:07 +01:00
"pod", "create",
"--name={}".format(pod["name"]),
"--share", "net",
]
ports = pod.get("ports") or []
for i in ports:
args.extend(['-p', i])
2019-03-23 21:04:07 +01:00
run_podman(dry_run, podman_path, args)
2019-03-23 20:42:04 +01:00
2019-03-04 10:30:14 +01:00
for cnt in containers:
# TODO: -e , --add-host, -v, --read-only
2019-06-09 02:26:13 +02:00
args = container_to_args(cnt, dirname, podman_path, shared_vols)
2019-03-23 21:04:07 +01:00
run_podman(dry_run, podman_path, args)
2019-03-23 20:42:04 +01:00
2019-03-24 00:08:26 +01:00
def run_compose(
cmd, cmd_args, filename, project_name,
2019-03-23 21:07:06 +01:00
no_ansi, no_cleanup, dry_run,
transform_policy, podman_path, host_env=None,
):
2019-03-23 21:16:30 +01:00
if not os.path.exists(filename):
alt_path = filename.replace('.yml', '.yaml')
if os.path.exists(alt_path):
filename = alt_path
else:
print("file [{}] not found".format(filename))
exit(-1)
2019-03-23 20:42:04 +01:00
filename = os.path.realpath(filename)
dirname = os.path.dirname(filename)
dir_basename = os.path.basename(dirname)
if podman_path != 'podman':
if os.path.isfile(podman_path) and os.access(podman_path, os.X_OK):
podman_path = os.path.realpath(podman_path)
else:
# this also works if podman hasn't been installed now
2019-03-23 20:42:04 +01:00
if dry_run == False:
raise IOError(
"Binary {} has not been found.".format(podman_path))
if not project_name:
project_name = dir_basename
2019-03-24 00:08:26 +01:00
dotenv_path = os.path.join(dirname, ".env")
if os.path.exists(dotenv_path):
with open(dotenv_path, 'r') as f:
dotenv_ls = [l.strip() for l in f if l.strip() and not l.startswith('#')]
dotenv_dict = dict([l.split("=", 1) for l in dotenv_ls if "=" in l])
else:
dotenv_dict = {}
2019-03-04 10:30:14 +01:00
with open(filename, 'r') as f:
2019-03-24 00:08:26 +01:00
compose = rec_subs(yaml.safe_load(f), [os.environ, dotenv_dict])
compose['_dirname']=dirname
2019-03-23 20:42:04 +01:00
# debug mode
#print(json.dumps(compose, indent = 2))
2019-03-23 20:42:04 +01:00
ver = compose.get('version')
services = compose.get('services')
2019-06-09 02:26:13 +02:00
# volumes: [...]
2019-06-11 16:03:24 +02:00
shared_vols = compose.get('volumes', {})
# shared_vols = list(shared_vols.keys())
shared_vols = set(shared_vols.keys())
podman_compose_labels = [
"io.podman.compose.config-hash=123",
"io.podman.compose.project=" + project_name,
"io.podman.compose.version=0.0.1",
2019-03-04 10:30:14 +01:00
]
# other top-levels:
# networks: {driver: ...}
# configs: {...}
# secrets: {...}
given_containers = []
container_names_by_service = {}
for service_name, service_desc in services.items():
replicas = try_int(service_desc.get('deploy', {}).get('replicas', '1'))
container_names_by_service[service_name] = []
2019-03-04 10:30:14 +01:00
for num in range(1, replicas+1):
2019-05-14 14:14:47 +02:00
name0 = "{project_name}_{service_name}_{num}".format(
2019-03-23 20:42:04 +01:00
project_name=project_name,
service_name=service_name,
num=num,
2019-03-04 10:30:14 +01:00
)
2019-05-14 14:14:47 +02:00
if num == 1:
name = service_desc.get("container_name", name0)
else:
name = name0
2019-03-04 10:30:14 +01:00
container_names_by_service[service_name].append(name)
# print(service_name,service_desc)
2019-03-23 20:42:04 +01:00
cnt = dict(name=name, num=num,
service_name=service_name, **service_desc)
if 'image' not in cnt:
cnt['image'] = "{project_name}_{service_name}".format(
project_name=project_name,
service_name=service_name,
)
labels = norm_as_list(cnt.get('labels'))
2019-03-04 10:30:14 +01:00
labels.extend(podman_compose_labels)
labels.extend([
"com.docker.compose.container-number={}".format(num),
"com.docker.compose.service=" + service_name,
2019-03-04 10:30:14 +01:00
])
cnt['labels'] = labels
2019-03-09 22:25:32 +01:00
cnt['_service'] = service_name
2019-06-09 02:26:13 +02:00
cnt['_project'] = project_name
2019-03-04 10:30:14 +01:00
given_containers.append(cnt)
container_by_name = dict([(c["name"], c) for c in given_containers])
2019-03-09 22:25:32 +01:00
flat_deps(container_names_by_service, container_by_name)
#print("deps:", [(c["name"], c["_deps"]) for c in given_containers])
given_containers = list(container_by_name.values())
2019-03-23 20:42:04 +01:00
given_containers.sort(key=lambda c: len(c.get('_deps') or []))
2019-03-09 22:25:32 +01:00
#print("sorted:", [c["name"] for c in given_containers])
tr = transformations[transform_policy]
2019-03-23 20:42:04 +01:00
pods, containers = tr(
project_name, container_names_by_service, given_containers)
2019-06-09 03:21:55 +02:00
if cmd != "build" and cmd != "push" and cmd_args:
raise ValueError("'{}' does not accept any argument".format(cmd))
if cmd == "pull":
pull(project_name, dirname, pods, containers, dry_run, podman_path)
2019-06-09 03:21:55 +02:00
if cmd == "push":
push(project_name, dirname, pods, containers, dry_run, podman_path, cmd_args)
elif cmd == "build":
parser = argparse.ArgumentParser()
parser.prog+=' build'
parser.add_argument("--pull",
help="attempt to pull a newer version of the image", action='store_true')
parser.add_argument("--pull-always",
help="attempt to pull a newer version of the image, Raise an error even if the image is present locally.", action='store_true')
args = parser.parse_args(cmd_args)
podman_args = []
if args.pull_always: podman_args.append("--pull-always")
elif args.pull: podman_args.append("--pull")
build(project_name, dirname, pods, containers, dry_run, podman_path, podman_args)
elif cmd == "up":
2019-03-23 20:42:04 +01:00
up(project_name, dirname, pods, containers,
2019-06-09 02:26:13 +02:00
no_cleanup, dry_run, podman_path, shared_vols)
elif cmd == "down":
down(project_name, dirname, pods, containers, dry_run, podman_path)
2019-03-04 23:22:27 +01:00
else:
raise NotImplementedError("command {} is not implemented".format(cmd))
2019-03-04 10:30:14 +01:00
2019-03-23 20:42:04 +01:00
2019-03-23 21:04:07 +01:00
def main():
parser = argparse.ArgumentParser()
2019-03-23 20:42:04 +01:00
parser.add_argument('command', metavar='command',
help='command to run',
2019-06-09 03:21:55 +02:00
choices=['up', 'down', 'build', 'pull', 'push'], nargs=None, default="up")
parser.add_argument('args', nargs=argparse.REMAINDER)
parser.add_argument("-f", "--file",
2019-03-23 20:42:04 +01:00
help="Specify an alternate compose file (default: docker-compose.yml)",
type=str, default="docker-compose.yml")
parser.add_argument("-p", "--project-name",
2019-03-23 20:42:04 +01:00
help="Specify an alternate project name (default: directory name)",
type=str, default=None)
parser.add_argument("--podman-path",
2019-03-23 20:42:04 +01:00
help="Specify an alternate path to podman (default: use location in $PATH variable)",
type=str, default="podman")
parser.add_argument("--no-ansi",
2019-03-23 20:42:04 +01:00
help="Do not print ANSI control characters", action='store_true')
parser.add_argument("--no-cleanup",
2019-03-23 20:42:04 +01:00
help="Do not stop and remove existing pod & containers", action='store_true')
parser.add_argument("--dry-run",
2019-03-23 20:42:04 +01:00
help="No action; perform a simulation of commands", action='store_true')
parser.add_argument("-t", "--transform_policy",
2019-03-23 20:42:04 +01:00
help="how to translate docker compose to podman [1pod|hostnet|accurate]",
choices=['1pod', '1podfw', 'hostnet', 'cntnet', 'publishall', 'identity'], default='1podfw')
args = parser.parse_args()
2019-03-24 00:08:26 +01:00
run_compose(
cmd=args.command,
cmd_args=args.args,
2019-03-23 20:42:04 +01:00
filename=args.file,
project_name=args.project_name,
no_ansi=args.no_ansi,
no_cleanup=args.no_cleanup,
dry_run=args.dry_run,
transform_policy=args.transform_policy,
podman_path=args.podman_path
)
2019-03-23 21:04:07 +01:00
2019-03-23 21:07:06 +01:00
2019-03-23 21:04:07 +01:00
if __name__ == "__main__":
2019-03-23 21:07:06 +01:00
main()