13 Commits

13 changed files with 675 additions and 34 deletions

View File

@ -35,7 +35,7 @@ OpenShift/Kubernetes distribution like [OKD](https://www.okd.io/).
## Versions
If you have legacy version of `podman` (before 3.x) you might need to stick with legacy `podman-compose` `0.1.x` branch.
If you have legacy version of `podman` (before 3.1.0) you might need to stick with legacy `podman-compose` `0.1.x` branch.
The legacy branch 0.1.x uses mappings and workarounds to compensate for rootless limitations.
Modern podman versions (>=3.4) do not have those limitations and thus you can use latest and stable 1.x branch.
@ -100,7 +100,7 @@ which have
When testing the `AWX3` example, if you got errors just wait for db migrations to end.
There is also AWX 17.1.0
## Tests

37
examples/awx17/README.md Normal file
View File

@ -0,0 +1,37 @@
# AWX Compose
the directory roles is taken from [here](https://github.com/ansible/awx/tree/17.1.0/installer/roles/local_docker)
also look at https://github.com/ansible/awx/tree/17.1.0/tools/docker-compose
```
mkdir deploy awx17
ansible localhost \
-e host_port=8080 \
-e awx_secret_key='awx,secret.123' \
-e secret_key='awx,secret.123' \
-e admin_user='admin' \
-e admin_password='admin' \
-e pg_password='awx,123.' \
-e pg_username='awx' \
-e pg_database='awx' \
-e pg_port='5432' \
-e redis_image="docker.io/library/redis:6-alpine" \
-e postgres_data_dir="./data/pg" \
-e compose_start_containers=false \
-e dockerhub_base='docker.io/ansible' \
-e awx_image='docker.io/ansible/awx' \
-e awx_version='17.1.0' \
-e dockerhub_version='17.1.0' \
-e docker_deploy_base_path=$PWD/deploy \
-e docker_compose_dir=$PWD/awx17 \
-e awx_task_hostname=awx \
-e awx_web_hostname=awxweb \
-m include_role -a name=local_docker
cp awx17/docker-compose.yml awx17/docker-compose.yml.orig
sed -i -re "s#- \"$PWD/awx17/(.*):/#- \"./\1:/#" awx17/docker-compose.yml
cd awx17
podman-compose run --rm --service-ports task awx-manage migrate --no-input
podman-compose up -d
```

View File

@ -0,0 +1,11 @@
---
dockerhub_version: "{{ lookup('file', playbook_dir + '/../VERSION') }}"
awx_image: "awx"
redis_image: "redis"
postgresql_version: "12"
postgresql_image: "postgres:{{postgresql_version}}"
compose_start_containers: true
upgrade_postgres: false

View File

@ -0,0 +1,74 @@
---
- name: Create {{ docker_compose_dir }} directory
file:
path: "{{ docker_compose_dir }}"
state: directory
- name: Create Redis socket directory
file:
path: "{{ docker_compose_dir }}/redis_socket"
state: directory
mode: 0777
- name: Create Docker Compose Configuration
template:
src: "{{ item.file }}.j2"
dest: "{{ docker_compose_dir }}/{{ item.file }}"
mode: "{{ item.mode }}"
loop:
- file: environment.sh
mode: "0600"
- file: credentials.py
mode: "0600"
- file: docker-compose.yml
mode: "0600"
- file: nginx.conf
mode: "0600"
- file: redis.conf
mode: "0664"
register: awx_compose_config
- name: Render SECRET_KEY file
copy:
content: "{{ secret_key }}"
dest: "{{ docker_compose_dir }}/SECRET_KEY"
mode: 0600
register: awx_secret_key
- block:
- name: Remove AWX containers before migrating postgres so that the old postgres container does not get used
docker_compose:
project_src: "{{ docker_compose_dir }}"
state: absent
ignore_errors: true
- name: Run migrations in task container
shell: docker-compose run --rm --service-ports task awx-manage migrate --no-input
args:
chdir: "{{ docker_compose_dir }}"
- name: Start the containers
docker_compose:
project_src: "{{ docker_compose_dir }}"
restarted: "{{ awx_compose_config is changed or awx_secret_key is changed }}"
register: awx_compose_start
- name: Update CA trust in awx_web container
command: docker exec awx_web '/usr/bin/update-ca-trust'
when: awx_compose_config.changed or awx_compose_start.changed
- name: Update CA trust in awx_task container
command: docker exec awx_task '/usr/bin/update-ca-trust'
when: awx_compose_config.changed or awx_compose_start.changed
- name: Wait for launch script to create user
wait_for:
timeout: 10
delegate_to: localhost
- name: Create Preload data
command: docker exec awx_task bash -c "/usr/bin/awx-manage create_preload_data"
when: create_preload_data|bool
register: cdo
changed_when: "'added' in cdo.stdout"
when: compose_start_containers|bool

View File

@ -0,0 +1,15 @@
---
- name: Generate broadcast websocket secret
set_fact:
broadcast_websocket_secret: "{{ lookup('password', '/dev/null length=128') }}"
run_once: true
no_log: true
when: broadcast_websocket_secret is not defined
- import_tasks: upgrade_postgres.yml
when:
- postgres_data_dir is defined
- pg_hostname is not defined
- import_tasks: set_image.yml
- import_tasks: compose.yml

View File

@ -0,0 +1,46 @@
---
- name: Manage AWX Container Images
block:
- name: Export Docker awx image if it isnt local and there isnt a registry defined
docker_image:
name: "{{ awx_image }}"
tag: "{{ awx_version }}"
archive_path: "{{ awx_local_base_config_path|default('/tmp') }}/{{ awx_image }}_{{ awx_version }}.tar"
when: inventory_hostname != "localhost" and docker_registry is not defined
delegate_to: localhost
- name: Set docker base path
set_fact:
docker_deploy_base_path: "{{ awx_base_path|default('/tmp') }}/docker_deploy"
when: ansible_connection != "local" and docker_registry is not defined
- name: Ensure directory exists
file:
path: "{{ docker_deploy_base_path }}"
state: directory
when: ansible_connection != "local" and docker_registry is not defined
- name: Copy awx image to docker execution
copy:
src: "{{ awx_local_base_config_path|default('/tmp') }}/{{ awx_image }}_{{ awx_version }}.tar"
dest: "{{ docker_deploy_base_path }}/{{ awx_image }}_{{ awx_version }}.tar"
when: ansible_connection != "local" and docker_registry is not defined
- name: Load awx image
docker_image:
name: "{{ awx_image }}"
tag: "{{ awx_version }}"
load_path: "{{ docker_deploy_base_path }}/{{ awx_image }}_{{ awx_version }}.tar"
timeout: 300
when: ansible_connection != "local" and docker_registry is not defined
- name: Set full image path for local install
set_fact:
awx_docker_actual_image: "{{ awx_image }}:{{ awx_version }}"
when: docker_registry is not defined
when: dockerhub_base is not defined
- name: Set DockerHub Image Paths
set_fact:
awx_docker_actual_image: "{{ dockerhub_base }}/awx:{{ dockerhub_version }}"
when: dockerhub_base is defined

View File

@ -0,0 +1,64 @@
---
- name: Create {{ postgres_data_dir }} directory
file:
path: "{{ postgres_data_dir }}"
state: directory
- name: Get full path of postgres data dir
shell: "echo {{ postgres_data_dir }}"
register: fq_postgres_data_dir
- name: Register temporary docker container
set_fact:
container_command: "docker run --rm -v '{{ fq_postgres_data_dir.stdout }}:/var/lib/postgresql' centos:8 bash -c "
- name: Check for existing Postgres data (run from inside the container for access to file)
shell:
cmd: |
{{ container_command }} "[[ -f /var/lib/postgresql/10/data/PG_VERSION ]] && echo 'exists'"
register: pg_version_file
ignore_errors: true
- name: Record Postgres version
shell: |
{{ container_command }} "cat /var/lib/postgresql/10/data/PG_VERSION"
register: old_pg_version
when: pg_version_file is defined and pg_version_file.stdout == 'exists'
- name: Determine whether to upgrade postgres
set_fact:
upgrade_postgres: "{{ old_pg_version.stdout == '10' }}"
when: old_pg_version.changed
- name: Set up new postgres paths pre-upgrade
shell: |
{{ container_command }} "mkdir -p /var/lib/postgresql/12/data/"
when: upgrade_postgres | bool
- name: Stop AWX before upgrading postgres
docker_compose:
project_src: "{{ docker_compose_dir }}"
stopped: true
when: upgrade_postgres | bool
- name: Upgrade Postgres
shell: |
docker run --rm \
-v {{ postgres_data_dir }}/10/data:/var/lib/postgresql/10/data \
-v {{ postgres_data_dir }}/12/data:/var/lib/postgresql/12/data \
-e PGUSER={{ pg_username }} -e POSTGRES_INITDB_ARGS="-U {{ pg_username }}" \
tianon/postgres-upgrade:10-to-12 --username={{ pg_username }}
when: upgrade_postgres | bool
- name: Copy old pg_hba.conf
shell: |
{{ container_command }} "cp /var/lib/postgresql/10/data/pg_hba.conf /var/lib/postgresql/12/data/pg_hba.conf"
when: upgrade_postgres | bool
- name: Remove old data directory
shell: |
{{ container_command }} "rm -rf /var/lib/postgresql/10/data"
when:
- upgrade_postgres | bool
- compose_start_containers|bool

View File

@ -0,0 +1,13 @@
DATABASES = {
'default': {
'ATOMIC_REQUESTS': True,
'ENGINE': 'django.db.backends.postgresql',
'NAME': "{{ pg_database }}",
'USER': "{{ pg_username }}",
'PASSWORD': "{{ pg_password }}",
'HOST': "{{ pg_hostname | default('postgres') }}",
'PORT': "{{ pg_port }}",
}
}
BROADCAST_WEBSOCKET_SECRET = "{{ broadcast_websocket_secret | b64encode }}"

View File

@ -0,0 +1,208 @@
#jinja2: lstrip_blocks: True
version: '2'
services:
web:
image: {{ awx_docker_actual_image }}
container_name: awx_web
depends_on:
- redis
{% if pg_hostname is not defined %}
- postgres
{% endif %}
{% if (host_port is defined) or (host_port_ssl is defined) %}
ports:
{% if (host_port_ssl is defined) and (ssl_certificate is defined) %}
- "{{ host_port_ssl }}:8053"
{% endif %}
{% if host_port is defined %}
- "{{ host_port }}:8052"
{% endif %}
{% endif %}
hostname: {{ awx_web_hostname }}
user: root
restart: unless-stopped
{% if (awx_web_container_labels is defined) and (',' in awx_web_container_labels) %}
{% set awx_web_container_labels_list = awx_web_container_labels.split(',') %}
labels:
{% for awx_web_container_label in awx_web_container_labels_list %}
- {{ awx_web_container_label }}
{% endfor %}
{% elif awx_web_container_labels is defined %}
labels:
- {{ awx_web_container_labels }}
{% endif %}
volumes:
- supervisor-socket:/var/run/supervisor
- rsyslog-socket:/var/run/awx-rsyslog/
- rsyslog-config:/var/lib/awx/rsyslog/
- "{{ docker_compose_dir }}/SECRET_KEY:/etc/tower/SECRET_KEY"
- "{{ docker_compose_dir }}/environment.sh:/etc/tower/conf.d/environment.sh"
- "{{ docker_compose_dir }}/credentials.py:/etc/tower/conf.d/credentials.py"
- "{{ docker_compose_dir }}/nginx.conf:/etc/nginx/nginx.conf:ro"
- "{{ docker_compose_dir }}/redis_socket:/var/run/redis/:rw"
{% if project_data_dir is defined %}
- "{{ project_data_dir +':/var/lib/awx/projects:rw' }}"
{% endif %}
{% if custom_venv_dir is defined %}
- "{{ custom_venv_dir +':'+ custom_venv_dir +':rw' }}"
{% endif %}
{% if ca_trust_dir is defined %}
- "{{ ca_trust_dir +':/etc/pki/ca-trust/source/anchors:ro' }}"
{% endif %}
{% if (ssl_certificate is defined) and (ssl_certificate_key is defined) %}
- "{{ ssl_certificate +':/etc/nginx/awxweb.pem:ro' }}"
- "{{ ssl_certificate_key +':/etc/nginx/awxweb_key.pem:ro' }}"
{% elif (ssl_certificate is defined) and (ssl_certificate_key is not defined) %}
- "{{ ssl_certificate +':/etc/nginx/awxweb.pem:ro' }}"
{% endif %}
{% if (awx_container_search_domains is defined) and (',' in awx_container_search_domains) %}
{% set awx_container_search_domains_list = awx_container_search_domains.split(',') %}
dns_search:
{% for awx_container_search_domain in awx_container_search_domains_list %}
- {{ awx_container_search_domain }}
{% endfor %}
{% elif awx_container_search_domains is defined %}
dns_search: "{{ awx_container_search_domains }}"
{% endif %}
{% if (awx_alternate_dns_servers is defined) and (',' in awx_alternate_dns_servers) %}
{% set awx_alternate_dns_servers_list = awx_alternate_dns_servers.split(',') %}
dns:
{% for awx_alternate_dns_server in awx_alternate_dns_servers_list %}
- {{ awx_alternate_dns_server }}
{% endfor %}
{% elif awx_alternate_dns_servers is defined %}
dns: "{{ awx_alternate_dns_servers }}"
{% endif %}
{% if (docker_compose_extra_hosts is defined) and (':' in docker_compose_extra_hosts) %}
{% set docker_compose_extra_hosts_list = docker_compose_extra_hosts.split(',') %}
extra_hosts:
{% for docker_compose_extra_host in docker_compose_extra_hosts_list %}
- "{{ docker_compose_extra_host }}"
{% endfor %}
{% endif %}
environment:
http_proxy: {{ http_proxy | default('') }}
https_proxy: {{ https_proxy | default('') }}
no_proxy: {{ no_proxy | default('') }}
{% if docker_logger is defined %}
logging:
driver: {{ docker_logger }}
{% endif %}
task:
image: {{ awx_docker_actual_image }}
container_name: awx_task
depends_on:
- redis
- web
{% if pg_hostname is not defined %}
- postgres
{% endif %}
command: /usr/bin/launch_awx_task.sh
hostname: {{ awx_task_hostname }}
user: root
restart: unless-stopped
volumes:
- supervisor-socket:/var/run/supervisor
- rsyslog-socket:/var/run/awx-rsyslog/
- rsyslog-config:/var/lib/awx/rsyslog/
- "{{ docker_compose_dir }}/SECRET_KEY:/etc/tower/SECRET_KEY"
- "{{ docker_compose_dir }}/environment.sh:/etc/tower/conf.d/environment.sh"
- "{{ docker_compose_dir }}/credentials.py:/etc/tower/conf.d/credentials.py"
- "{{ docker_compose_dir }}/redis_socket:/var/run/redis/:rw"
{% if project_data_dir is defined %}
- "{{ project_data_dir +':/var/lib/awx/projects:rw' }}"
{% endif %}
{% if custom_venv_dir is defined %}
- "{{ custom_venv_dir +':'+ custom_venv_dir +':rw' }}"
{% endif %}
{% if ca_trust_dir is defined %}
- "{{ ca_trust_dir +':/etc/pki/ca-trust/source/anchors:ro' }}"
{% endif %}
{% if ssl_certificate is defined %}
- "{{ ssl_certificate +':/etc/nginx/awxweb.pem:ro' }}"
{% endif %}
{% if (awx_container_search_domains is defined) and (',' in awx_container_search_domains) %}
{% set awx_container_search_domains_list = awx_container_search_domains.split(',') %}
dns_search:
{% for awx_container_search_domain in awx_container_search_domains_list %}
- {{ awx_container_search_domain }}
{% endfor %}
{% elif awx_container_search_domains is defined %}
dns_search: "{{ awx_container_search_domains }}"
{% endif %}
{% if (awx_alternate_dns_servers is defined) and (',' in awx_alternate_dns_servers) %}
{% set awx_alternate_dns_servers_list = awx_alternate_dns_servers.split(',') %}
dns:
{% for awx_alternate_dns_server in awx_alternate_dns_servers_list %}
- {{ awx_alternate_dns_server }}
{% endfor %}
{% elif awx_alternate_dns_servers is defined %}
dns: "{{ awx_alternate_dns_servers }}"
{% endif %}
{% if (docker_compose_extra_hosts is defined) and (':' in docker_compose_extra_hosts) %}
{% set docker_compose_extra_hosts_list = docker_compose_extra_hosts.split(',') %}
extra_hosts:
{% for docker_compose_extra_host in docker_compose_extra_hosts_list %}
- "{{ docker_compose_extra_host }}"
{% endfor %}
{% endif %}
environment:
AWX_SKIP_MIGRATIONS: "1"
http_proxy: {{ http_proxy | default('') }}
https_proxy: {{ https_proxy | default('') }}
no_proxy: {{ no_proxy | default('') }}
SUPERVISOR_WEB_CONFIG_PATH: '/etc/supervisord.conf'
redis:
image: {{ redis_image }}
container_name: awx_redis
restart: unless-stopped
environment:
http_proxy: {{ http_proxy | default('') }}
https_proxy: {{ https_proxy | default('') }}
no_proxy: {{ no_proxy | default('') }}
command: ["/usr/local/etc/redis/redis.conf"]
volumes:
- "{{ docker_compose_dir }}/redis.conf:/usr/local/etc/redis/redis.conf:ro"
- "{{ docker_compose_dir }}/redis_socket:/var/run/redis/:rw"
{% if docker_logger is defined %}
logging:
driver: {{ docker_logger }}
{% endif %}
{% if pg_hostname is not defined %}
postgres:
image: {{ postgresql_image }}
container_name: awx_postgres
restart: unless-stopped
volumes:
- "{{ postgres_data_dir }}/12/data/:/var/lib/postgresql/data:Z"
environment:
POSTGRES_USER: {{ pg_username }}
POSTGRES_PASSWORD: {{ pg_password }}
POSTGRES_DB: {{ pg_database }}
http_proxy: {{ http_proxy | default('') }}
https_proxy: {{ https_proxy | default('') }}
no_proxy: {{ no_proxy | default('') }}
{% if docker_logger is defined %}
logging:
driver: {{ docker_logger }}
{% endif %}
{% endif %}
{% if docker_compose_subnet is defined %}
networks:
default:
driver: bridge
ipam:
driver: default
config:
- subnet: {{ docker_compose_subnet }}
{% endif %}
volumes:
supervisor-socket:
rsyslog-socket:
rsyslog-config:

View File

@ -0,0 +1,10 @@
DATABASE_USER={{ pg_username|quote }}
DATABASE_NAME={{ pg_database|quote }}
DATABASE_HOST={{ pg_hostname|default('postgres')|quote }}
DATABASE_PORT={{ pg_port|default('5432')|quote }}
DATABASE_PASSWORD={{ pg_password|default('awxpass')|quote }}
{% if pg_admin_password is defined %}
DATABASE_ADMIN_PASSWORD={{ pg_admin_password|quote }}
{% endif %}
AWX_ADMIN_USER={{ admin_user|quote }}
AWX_ADMIN_PASSWORD={{ admin_password|quote }}

View File

@ -0,0 +1,122 @@
#user awx;
worker_processes 1;
pid /tmp/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
server_tokens off;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /dev/stdout main;
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
sendfile on;
#tcp_nopush on;
#gzip on;
upstream uwsgi {
server 127.0.0.1:8050;
}
upstream daphne {
server 127.0.0.1:8051;
}
{% if ssl_certificate is defined %}
server {
listen 8052 default_server;
server_name _;
# Redirect all HTTP links to the matching HTTPS page
return 301 https://$host$request_uri;
}
{%endif %}
server {
{% if (ssl_certificate is defined) and (ssl_certificate_key is defined) %}
listen 8053 ssl;
ssl_certificate /etc/nginx/awxweb.pem;
ssl_certificate_key /etc/nginx/awxweb_key.pem;
{% elif (ssl_certificate is defined) and (ssl_certificate_key is not defined) %}
listen 8053 ssl;
ssl_certificate /etc/nginx/awxweb.pem;
ssl_certificate_key /etc/nginx/awxweb.pem;
{% else %}
listen 8052 default_server;
{% endif %}
# If you have a domain name, this is where to add it
server_name _;
keepalive_timeout 65;
# HSTS (ngx_http_headers_module is required) (15768000 seconds = 6 months)
add_header Strict-Transport-Security max-age=15768000;
# Protect against click-jacking https://www.owasp.org/index.php/Testing_for_Clickjacking_(OTG-CLIENT-009)
add_header X-Frame-Options "DENY";
location /nginx_status {
stub_status on;
access_log off;
allow 127.0.0.1;
deny all;
}
location /static/ {
alias /var/lib/awx/public/static/;
}
location /favicon.ico { alias /var/lib/awx/public/static/favicon.ico; }
location /websocket {
# Pass request to the upstream alias
proxy_pass http://daphne;
# Require http version 1.1 to allow for upgrade requests
proxy_http_version 1.1;
# We want proxy_buffering off for proxying to websockets.
proxy_buffering off;
# http://en.wikipedia.org/wiki/X-Forwarded-For
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# enable this if you use HTTPS:
proxy_set_header X-Forwarded-Proto https;
# pass the Host: header from the client for the sake of redirects
proxy_set_header Host $http_host;
# We've set the Host header, so we don't need Nginx to muddle
# about with redirects
proxy_redirect off;
# Depending on the request value, set the Upgrade and
# connection headers
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
}
location / {
# Add trailing / if missing
rewrite ^(.*)$http_host(.*[^/])$ $1$http_host$2/ permanent;
uwsgi_read_timeout 120s;
uwsgi_pass uwsgi;
include /etc/nginx/uwsgi_params;
{%- if extra_nginx_include is defined %}
include {{ extra_nginx_include }};
{%- endif %}
proxy_set_header X-Forwarded-Port 443;
uwsgi_param HTTP_X_FORWARDED_PORT 443;
}
}
}

View File

@ -0,0 +1,4 @@
unixsocket /var/run/redis/redis.sock
unixsocketperm 660
port 0
bind 127.0.0.1

View File

@ -35,7 +35,7 @@ except ImportError:
import yaml
from dotenv import dotenv_values
__version__ = '1.0.2'
__version__ = '1.0.3'
PY3 = sys.version_info[0] == 3
if PY3:
@ -68,7 +68,7 @@ def try_float(i, fallback=None):
return fallback
dir_re = re.compile("^[~/\.]")
propagation_re = re.compile("^(?:z|Z|r?shared|r?slave|r?private)$")
propagation_re = re.compile("^(?:z|Z|O|U|r?shared|r?slave|r?private|r?unbindable|r?bind|(?:no)?(?:exec|dev|suid))$")
norm_re = re.compile('[^-_a-z0-9]')
num_split_re = re.compile(r'(\d+|\D+)')
@ -378,11 +378,13 @@ def mount_desc_to_volume_args(compose, mount_desc, srv_name, cnt_name):
# --volume, -v[=[[SOURCE-VOLUME|HOST-DIR:]CONTAINER-DIR[:OPTIONS]]]
# [rw|ro]
# [z|Z]
# [[r]shared|[r]slave|[r]private]
# [[r]shared|[r]slave|[r]private]|[r]unbindable
# [[r]bind]
# [noexec|exec]
# [nodev|dev]
# [nosuid|suid]
# [O]
# [U]
read_only = mount_desc.get("read_only", None)
if read_only is not None:
opts.append('ro' if read_only else 'rw')
@ -554,10 +556,14 @@ def assert_cnt_nets(compose, cnt):
cnt_nets = norm_as_list(cnt.get("networks", None) or default_net)
for net in cnt_nets:
net_desc = nets[net] or {}
net_name = net_desc.get("name", None) or f"{proj_name}_{net}"
print(f"podman network exists '{net_name}' || podman network create '{net_name}'")
is_ext = net_desc.get("external", None)
ext_desc = is_ext if is_dict(is_ext) else {}
default_net_name = net if is_ext else f"{proj_name}_{net}"
net_name = ext_desc.get("name", None) or net_desc.get("name", None) or default_net_name
try: compose.podman.output([], "network", ["exists", net_name])
except subprocess.CalledProcessError:
if is_ext:
raise RuntimeError(f"External network [{net_name}] does not exists")
args = [
"create",
"--label", "io.podman.compose.project={}".format(proj_name),
@ -567,20 +573,25 @@ def assert_cnt_nets(compose, cnt):
labels = net_desc.get("labels", None) or []
for item in norm_as_list(labels):
args.extend(["--label", item])
if net_desc.get("internal", None):
args.append("--internal")
args.append(net_name)
compose.podman.output([], "network", args)
compose.podman.output([], "network", ["exists", net_name])
def get_net_args(compose, cnt):
service_name = cnt["service_name"]
project_name = compose.project_name
proj_name = compose.project_name
default_net = compose.default_net
nets = compose.networks
cnt_nets = norm_as_list(cnt.get("networks", None) or default_net)
net_names = set()
for net in cnt_nets:
net_desc = nets[net] or {}
net_name = net_desc.get("name", None) or f"{project_name}_{net}"
is_ext = net_desc.get("external", None)
ext_desc = is_ext if is_dict(is_ext) else {}
default_net_name = net if is_ext else f"{proj_name}_{net}"
net_name = ext_desc.get("name", None) or net_desc.get("name", None) or default_net_name
net_names.add(net_name)
net_names_str = ",".join(net_names)
return ["--net", net_names_str, "--network-alias", service_name]
@ -830,11 +841,15 @@ class Podman:
return p
def volume_inspect_all(self):
output = self.output(["volume", "inspect", "--all"]).decode('utf-8')
output = self.output([], "volume", ["inspect", "--all"]).decode('utf-8')
return json.loads(output)
def volume_rm(self, name):
return self.run(["volume", "rm", name])
def volume_inspect_proj(self, proj=None):
if not proj:
proj = self.compose.project_name
volumes = [(vol.get("Labels", {}), vol) for vol in self.volume_inspect_all()]
volumes = [(labels.get("io.podman.compose.project", None), vol) for labels, vol in volumes]
return [vol for vol_proj, vol in volumes if vol_proj==proj]
def normalize_service(service):
for key in ("env_file", "security_opt", "volumes"):
@ -1386,7 +1401,8 @@ def compose_up(compose, args):
# TODO: implement check hash label for change
if args.force_recreate:
compose.commands['down'](compose, args)
down_args = argparse.Namespace(**dict(args.__dict__, volumes=False))
compose.commands['down'](compose, down_args)
# args.no_recreate disables check for changes (which is not implemented)
podman_command = 'run' if args.detach and not args.no_start else 'create'
@ -1452,11 +1468,9 @@ def compose_down(compose, args):
for pod in compose.pods:
compose.podman.run([], "pod", ["rm", pod["name"]], sleep=0)
if args.volumes:
volumes = compose.podman.volume_inspect_all()
for volume in volumes:
project = volume.get("Labels", {}).get("io.podman.compose.project")
if project == compose.project_name:
compose.podman.volume_rm(volume["Name"])
volume_names = [vol["Name"] for vol in compose.podman.volume_inspect_proj()]
for volume_name in volume_names:
compose.podman.run([], "volume", ["rm", volume_name])
@cmd_run(podman_compose, 'ps', 'show status of containers')
def compose_ps(compose, args):
@ -1471,7 +1485,7 @@ def compose_run(compose, args):
create_pods(compose, args)
container_names=compose.container_names_by_service[args.service]
container_name=container_names[0]
cnt = compose.container_by_name[container_name]
cnt = dict(compose.container_by_name[container_name])
deps = cnt["_deps"]
if not args.no_deps:
up_args = argparse.Namespace(**dict(args.__dict__,
@ -1502,6 +1516,9 @@ def compose_run(compose, args):
cnt['tty']=False if args.T else True
if args.cnt_command is not None and len(args.cnt_command) > 0:
cnt['command']=args.cnt_command
# can't restart and --rm
if args.rm and 'restart' in cnt:
del cnt['restart']
# run podman
podman_args = container_to_args(compose, cnt, args.detach)
if not args.detach:
@ -1535,11 +1552,15 @@ def compose_exec(compose, args):
def transfer_service_status(compose, args, action):
# TODO: handle dependencies, handle creations
container_names_by_service = compose.container_names_by_service
if not args.services:
args.services = container_names_by_service.keys()
targets = []
for service in args.services:
if service not in container_names_by_service:
raise ValueError("unknown service: " + service)
targets.extend(container_names_by_service[service])
if action in ['stop', 'restart']:
targets = list(reversed(targets))
podman_args=[]
timeout=getattr(args, 'timeout', None)
if timeout is not None:
@ -1562,20 +1583,33 @@ def compose_restart(compose, args):
@cmd_run(podman_compose, 'logs', 'show logs from services')
def compose_logs(compose, args):
container_names_by_service = compose.container_names_by_service
target = None
if args.service not in container_names_by_service:
raise ValueError("unknown service: " + args.service)
target = container_names_by_service[args.service]
if not args.services and not args.latest:
args.services = container_names_by_service.keys()
targets = []
for service in args.services:
if service not in container_names_by_service:
raise ValueError("unknown service: " + service)
targets.extend(container_names_by_service[service])
podman_args = []
if args.follow:
podman_args.append('-f')
if args.latest:
podman_args.append("-l")
if args.names:
podman_args.append('-n')
if args.since:
podman_args.extend(['--since', args.since])
# the default value is to print all logs which is in podman = 0 and not
# needed to be passed
if args.tail and args.tail != 'all':
podman_args.extend(['--tail', args.tail])
if args.timestamps:
podman_args.append('-t')
compose.podman.run([], 'logs', podman_args+target)
if args.until:
podman_args.extend(['--until', args.until])
for target in targets:
podman_args.append(target)
compose.podman.run([], 'logs', podman_args)
###################
# command arguments parsing
@ -1683,23 +1717,26 @@ def compose_parse_timeout(parser):
help="Specify a shutdown timeout in seconds. ",
type=int, default=10)
@cmd_parse(podman_compose, ['start', 'stop', 'restart'])
def compose_parse_services(parser):
parser.add_argument('services', metavar='services', nargs='+',
help='affected services')
@cmd_parse(podman_compose, ['logs'])
def compose_logs_parse(parser):
parser.add_argument("-f", "--follow", action='store_true',
help="Follow log output.")
help="Follow log output. The default is false")
parser.add_argument("-l", "--latest", action='store_true',
help="Act on the latest container podman is aware of")
parser.add_argument("-n", "--names", action='store_true',
help="Output the container name in the log")
parser.add_argument("--since", help="Show logs since TIMESTAMP",
type=str, default=None)
parser.add_argument("-t", "--timestamps", action='store_true',
help="Show timestamps.")
parser.add_argument("--tail",
help="Number of lines to show from the end of the logs for each "
"container.",
type=str, default="all")
parser.add_argument('service', metavar='service', nargs=None,
help='service name')
parser.add_argument("--until", help="Show logs until TIMESTAMP",
type=str, default=None)
parser.add_argument('services', metavar='services', nargs='*', default=None,
help='service names')
@cmd_parse(podman_compose, 'pull')
def compose_pull_parse(parser):
@ -1729,7 +1766,7 @@ def compose_build_parse(parser):
parser.add_argument("--no-cache",
help="Do not use cache when building the image.", action='store_true')
@cmd_parse(podman_compose, ['build', 'up', 'down'])
@cmd_parse(podman_compose, ['build', 'up', 'down', 'start', 'stop', 'restart'])
def compose_build_parse(parser):
parser.add_argument('services', metavar='services', nargs='*',default=None,
help='affected services')