11 Commits

Author SHA1 Message Date
e9b103eb23 release v0.1.10 2021-12-11 02:02:59 +02:00
bbaa786739 Fixes #199: seccomp:unconfined 2021-12-11 02:02:09 +02:00
d1d0f9e452 FIXES #371: respect COMPOSE_FILE env 2021-12-11 02:02:00 +02:00
d8dba61e08 FIXES #185: creates dirs 2021-12-11 02:01:44 +02:00
3343910763 resolve 2021-12-11 02:01:19 +02:00
34ec4b3cb9 #222: normalize basedir using os.path.realpath 2021-12-11 01:54:22 +02:00
f4a78ae812 FIXES #333: when volumes are merged, remove duplicates 2021-12-11 01:54:09 +02:00
00b9ce1ee4 FIXES #368: parse depends_on of type dict 2021-12-11 01:53:27 +02:00
749d188321 fix AttributeError when running a one-off command
Without this, I get errors when running "podman-compose -p podname run".
2021-12-04 00:47:09 +02:00
e879529976 Remove named volumes during "down -v"
Fixes containers#105

Signed-off-by: Luiz Carvalho <lucarval@redhat.com>
2021-11-23 08:02:10 +02:00
1555417958 FIXES #361: key error _service 2021-11-21 15:55:19 +02:00
77 changed files with 948 additions and 4040 deletions

View File

@ -35,7 +35,7 @@ What is the behavior you actually got and that should not happen.
```
$ podman-compose version
using podman version: 3.4.0
podman-compose version 0.1.7dev
podman-composer version 0.1.7dev
podman --version
podman version 3.4.0

View File

@ -1,6 +0,0 @@
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"

View File

@ -1,41 +0,0 @@
name: Pylint
on:
- push
- pull_request
jobs:
lint-black:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Install psf/black requirements
run: |
apt-get update
apt-get install -y python3 python3-venv
- uses: psf/black@stable
with:
options: "--check --verbose"
version: "~= 23.3"
lint-pylint:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.8", "3.9", "3.10"]
steps:
- uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
pip install pylint
- name: Analysing the code with pylint
run: |
python -m compileall podman_compose.py
pylint podman_compose.py
# pylint $(git ls-files '*.py')

View File

@ -1,36 +0,0 @@
# This workflow will install Python dependencies, run tests and lint with a single version of Python
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
name: PyTest
on:
push:
branches: [ devel ]
pull_request:
branches: [ devel ]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Set up Python 3.10
uses: actions/setup-python@v4
with:
python-version: "3.10"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install flake8 pytest
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
- name: Lint with flake8
run: |
# stop the build if there are Python syntax errors or undefined names
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
- name: Test with pytest
run: |
python -m pytest ./pytests

View File

@ -1,29 +0,0 @@
repos:
- repo: https://github.com/psf/black
rev: 23.3.0
hooks:
- id: black
# It is recommended to specify the latest version of Python
# supported by your project here, or alternatively use
# pre-commit's default_language_version, see
# https://pre-commit.com/#top_level-default_language_version
language_version: python3.10
types: [python]
- repo: https://github.com/pycqa/flake8
rev: 6.0.0
hooks:
- id: flake8
types: [python]
- repo: local
hooks:
- id: pylint
name: pylint
entry: pylint
language: system
types: [python]
args:
[
"-rn", # Only display messages
"-sn", # Don't display the score
"--rcfile=.pylintrc", # Link to your config file
]

View File

@ -1,18 +1,13 @@
[MESSAGES CONTROL]
# C0111 missing-docstring: missing-class-docstring, missing-function-docstring, missing-method-docstring, missing-module-docstrin
# consider-using-with: we need it for color formatter pipe
disable=too-many-lines,too-many-branches,too-many-locals,too-many-statements,too-many-arguments,too-many-instance-attributes,fixme,multiple-statements,missing-docstring,line-too-long,consider-using-f-string,consider-using-with,unnecessary-lambda-assignment
disable=W0614,C0410,C0321,C0111,I0011,C0103
# allow _ for ignored variables
# allow generic names like a,b,c and i,j,k,l,m,n and x,y,z
# allow k,v for key/value
# allow e for exceptions, it for iterator, ix for index
# allow ip for ip address
# allow e for exceptions, it for iterator
# allow w,h for width, height
# allow op for operation/operator/opcode
# allow t, t0, t1, t2, and t3 for time
# allow dt for delta time
# allow db for database
# allow ls for list
# allow p for pipe
# allow ex for examples, exists ..etc
good-names=_,a,b,c,dt,db,e,f,fn,fd,i,j,k,v,kv,kw,l,m,n,ls,t,t0,t1,t2,t3,w,h,x,y,z,it,ix,ip,op,p,ex
good-names=_,a,b,c,dt,db,e,f,fn,fd,i,j,k,v,kv,kw,l,m,n,ls,t,t0,t1,t2,t3,w,h,x,y,z,it,op

View File

@ -1,49 +1,5 @@
# Contributing to podman-compose
## Who can contribute?
- Users that found a bug
- Users that wants to propose new functionalities or enhancements
- Users that want to help other users to troubleshoot their environments
- Developers that want to fix bugs
- Developers that want to implement new functionalities or enhancements
## Branches
Please request your PR to be merged into the `devel` branch.
Changes to the `stable` branch are managed by the repository maintainers.
## Development environment setup
Note: Some steps are OPTIONAL but all are RECOMMENDED.
1. Fork the project repo and clone it
```shell
$ git clone https://github.com/USERNAME/podman-compose.git
$ cd podman-compose
```
1. (OPTIONAL) Create a python virtual environment. Example using [virtualenv wrapper](https://virtualenvwrapper.readthedocs.io/en/latest/):
```shell
mkvirtualenv podman-compose
```
2. Install the project runtime and development requirements
```shell
$ pip install '.[devel]'
```
3. (OPTIONAL) Install `pre-commit` git hook scripts (https://pre-commit.com/#3-install-the-git-hook-scripts)
```shell
$ pre-commit install
```
4. Create a new branch, develop and add tests when possible
5. Run linting & testing before commiting code. Ensure all the hooks are passing.
```shell
$ pre-commit run --all-files
```
6. Commit your code to your fork's branch.
- Make sure you include a `Signed-off-by` message in your commits. Read [this guide](https://docs.github.com/en/authentication/managing-commit-signature-verification/signing-commits) to learn how to sign your commits
- In the commit message reference the Issue ID that your code fixes and a brief description of the changes. Example: `Fixes #516: allow empty network`
7. Open a PR to `containers/podman-compose:devel` and wait for a maintainer to review your work.
## Adding new commands
To add a command you need to add a function that is decorated
@ -104,11 +60,15 @@ def compose_up(compose, args):
create Create services
events Receive real time events from containers
images List images
kill Kill containers
logs View output from containers
pause Pause services
port Print the public port for a port binding
ps List containers
rm Remove stopped containers
run Run a one-off command
scale Set number of containers for a service
top Display the running processes
unpause Unpause services
version Show the Docker-Compose version information
```

View File

@ -1,55 +1,44 @@
# Podman Compose
## [![Pylint Test: ](https://github.com/containers/podman-compose/actions/workflows/pylint.yml/badge.svg)](https://github.com/containers/podman-compose/actions/workflows/pylint.yml) [![Unit tests PyTest](https://github.com/containers/podman-compose/actions/workflows/pytest.yml/badge.svg)](https://github.com/containers/podman-compose/actions/workflows/pytest.yml)
An implementation of `docker-compose` with [Podman](https://podman.io/) backend.
The main objective of this project is to be able to run `docker-compose.yml` unmodified and rootless.
This project is aimed to provide drop-in replacement for `docker-compose`,
and it's very useful for certain cases because:
An implementation of [Compose Spec](https://compose-spec.io/) with [Podman](https://podman.io/) backend.
This project focuses on:
- can run rootless
- no daemon, no setup.
- can be used by developers to run single-machine containerized stacks using single familiar YAML file
* rootless
* daemon-less process model, we directly execute podman, no running daemon.
This project only depends on:
This project only depend on:
* `podman`
* [podman dnsname plugin](https://github.com/containers/dnsname): It is usually found in the `podman-plugins` or `podman-dnsname` distro packages, those packages are not pulled by default and you need to install them. This allows containers to be able to resolve each other if they are on the same CNI network.
* Python3
* [PyYAML](https://pyyaml.org/)
* [python-dotenv](https://pypi.org/project/python-dotenv/)
And it's formed as a single Python file script that you can drop into your PATH and run.
And it's formed as a single python file script that you can drop into your PATH and run.
## References:
* [spec.md](https://github.com/compose-spec/compose-spec/blob/master/spec.md)
* [docker-compose compose-file-v3](https://docs.docker.com/compose/compose-file/compose-file-v3/)
* [docker-compose compose-file-v2](https://docs.docker.com/compose/compose-file/compose-file-v2/)
## Alternatives
As in [this article](https://fedoramagazine.org/use-docker-compose-with-podman-to-orchestrate-containers-on-fedora/) you can setup a `podman.socket` and use unmodified `docker-compose` that talks to that socket but in this case you lose the process-model (ex. `docker-compose build` will send a possibly large context tarball to the daemon)
For production-like single-machine containerized environment consider
- [k3s](https://k3s.io) | [k3s github](https://github.com/rancher/k3s)
- [MiniKube](https://minikube.sigs.k8s.io/)
- [MiniShift](https://www.okd.io/minishift/)
For the real thing (multi-node clusters) check any production
OpenShift/Kubernetes distribution like [OKD](https://www.okd.io/).
OpenShift/Kubernetes distribution like [OKD](https://www.okd.io/minishift/).
## Versions
If you have legacy version of `podman` (before 3.1.0) you might need to stick with legacy `podman-compose` `0.1.x` branch.
If you have legacy version of `podman` (before 3.x) you might need to stick with legacy `podman-compose` `0.1.x` branch.
The legacy branch 0.1.x uses mappings and workarounds to compensate for rootless limitations.
Modern podman versions (>=3.4) do not have those limitations, and thus you can use latest and stable 1.x branch.
If you are upgrading from `podman-compose` version `0.1.x` then we no longer have global option `-t` to set mapping type
like `hostnet`. If you desire that behavior, pass it the standard way like `network_mode: host` in the YAML.
Modern podman versions (>=3.4) do not have those limitations and thus you can use latest and stable 1.x branch.
## Installation
Install the latest stable version from PyPI:
Install latest stable version from PyPI:
```
pip3 install podman-compose
@ -63,6 +52,19 @@ Or latest development version from GitHub:
pip3 install https://github.com/containers/podman-compose/archive/devel.tar.gz
```
or
```
curl -o /usr/local/bin/podman-compose https://raw.githubusercontent.com/containers/podman-compose/devel/podman_compose.py
chmod +x /usr/local/bin/podman-compose
```
or
```
curl -o ~/.local/bin/podman-compose https://raw.githubusercontent.com/containers/podman-compose/devel/podman_compose.py
chmod +x ~/.local/bin/podman-compose
```
or install from Fedora (starting from f31) repositories:
@ -73,8 +75,6 @@ sudo dnf install podman-compose
## Basic Usage
We have included fully functional sample stacks inside `examples/` directory.
You can get more examples from [awesome-compose](https://github.com/docker/awesome-compose).
A quick example would be
@ -95,21 +95,19 @@ which have
- a django tasks
When testing the `AWX3` example, if you got errors, just wait for db migrations to end.
There is also AWX 17.1.0
When testing the `AWX3` example, if you got errors just wait for db migrations to end.
## Tests
Inside `tests/` directory we have many useless docker-compose stacks
that are meant to test as many cases as we can to make sure we are compatible
that are meant to test as much cases as we can to make sure we are compatible
### Unit tests with pytest
run a pytest with following command
## How it works
```shell
python -m pytest pytests
```
The default mapping `1podfw` creates a single pod and attach all containers to
its network namespace so that all containers talk via localhost.
For more information see [docs/Mappings.md](docs/Mappings.md).
# Contributing guide
If you are running as root, you might use identity mapping.
If you are a user or a developer and want to contribute please check the [CONTRIBUTING](CONTRIBUTING.md) section

View File

@ -1,411 +0,0 @@
# Naming convention:
# * _camelCase for function names
# * snake_case for variable names
# all functions will return 0 if they successfully complete the argument
# (or establish there is no need or no way to complete), and something
# other than 0 if that's not the case
# complete arguments to global options
_completeGlobalOptArgs() {
# arguments to options that take paths as arguments: complete paths
for el in ${path_arg_global_opts}; do
if [[ ${prev} == ${el} ]]; then
COMPREPLY=( $(compgen -f -- ${cur}) )
return 0
fi
done
# arguments to options that take generic arguments: don't complete
for el in ${generic_arg_global_opts}; do
if [[ ${prev} == ${el} ]]; then
return 0
fi
done
return 1
}
# complete root subcommands and options
_completeRoot() {
# if we're completing an option
if [[ ${cur} == -* ]]; then
COMPREPLY=( $(compgen -W "${global_opts}" -- ${cur}) )
return 0
fi
# complete root commands
COMPREPLY=( $(compgen -W "${root_commands}" -- ${cur}) )
return 0
}
# complete names of Compose services
_completeServiceNames() {
# ideally we should complete service names,
# but parsing the compose spec file in the
# completion script is quite complex
return 0
}
# complete commands to run inside containers
_completeCommand() {
# we would need to complete commands to run inside
# a container
return 0
}
# complete the arguments for `podman-compose up` and return 0
_completeUpArgs() {
up_opts="${help_opts} -d --detach --no-color --quiet-pull --no-deps --force-recreate --always-recreate-deps --no-recreate --no-build --no-start --build --abort-on-container-exit -t --timeout -V --renew-anon-volumes --remove-orphans --scale --exit-code-from --pull --pull-always --build-arg --no-cache"
if [[ ${prev} == "--scale" || ${prev} == "-t" || ${prev} == "--timeout" ]]; then
return 0
elif [[ ${cur} == -* ]]; then
COMPREPLY=( $(compgen -W "${up_opts}" -- ${cur}) )
return 0
else
_completeServiceNames
if [[ $? -eq 0 ]]; then
return 0
fi
return 0
fi
}
# complete the arguments for `podman-compose exec` and return 0
_completeExecArgs() {
exec_opts="${help_opts} -d --detach --privileged -u --user -T --index -e --env -w --workdir"
if [[ ${prev} == "-u" || ${prev} == "--user" || ${prev} == "--index" || ${prev} == "-e" || ${prev} == "--env" || ${prev} == "-w" || ${prev} == "--workdir" ]]; then
return 0
elif [[ ${cur} == -* ]]; then
COMPREPLY=( $(compgen -W "${exec_opts}" -- ${cur}) )
return 0
elif [[ ${comp_cword_adj} -eq 2 ]]; then
# complete service name
_completeServiceNames
if [[ $? -eq 0 ]]; then
return 0
fi
elif [[ ${comp_cword_adj} -eq 3 ]]; then
_completeCommand
if [[ $? -eq 0 ]]; then
return 0
fi
return 0
fi
}
# complete the arguments for `podman-compose down` and return 0
_completeDownArgs() {
down_opts="${help_opts} -v --volumes -t --timeout --remove-orphans"
if [[ ${prev} == "-t" || ${prev} == "--timeout" ]]; then
return 0
elif [[ ${cur} == -* ]]; then
COMPREPLY=( $(compgen -W "${down_opts}" -- ${cur}) )
return 0
else
_completeServiceNames
if [[ $? -eq 0 ]]; then
return 0
fi
return 0
fi
}
# complete the arguments for `podman-compose build` and return 0
_completeBuildArgs() {
build_opts="${help_opts} --pull --pull-always --build-arg --no-cache"
if [[ ${prev} == "--build-arg" ]]; then
return 0
elif [[ ${cur} == -* ]]; then
COMPREPLY=( $(compgen -W "${build_opts}" -- ${cur}) )
return 0
else
_completeServiceNames
if [[ $? -eq 0 ]]; then
return 0
fi
return 0
fi
}
# complete the arguments for `podman-compose logs` and return 0
_completeLogsArgs() {
logs_opts="${help_opts} -f --follow -l --latest -n --names --since -t --timestamps --tail --until"
if [[ ${prev} == "--since" || ${prev} == "--tail" || ${prev} == "--until" ]]; then
return 0
elif [[ ${cur} == -* ]]; then
COMPREPLY=( $(compgen -W "${logs_opts}" -- ${cur}) )
return 0
else
_completeServiceNames
if [[ $? -eq 0 ]]; then
return 0
fi
return 0
fi
}
# complete the arguments for `podman-compose ps` and return 0
_completePsArgs() {
ps_opts="${help_opts} -q --quiet"
if [[ ${cur} == -* ]]; then
COMPREPLY=( $(compgen -W "${ps_opts}" -- ${cur}) )
return 0
else
return 0
fi
}
# complete the arguments for `podman-compose pull` and return 0
_completePullArgs() {
pull_opts="${help_opts} --force-local"
if [[ ${cur} == -* ]]; then
COMPREPLY=( $(compgen -W "${pull_opts}" -- ${cur}) )
return 0
else
return 0
fi
}
# complete the arguments for `podman-compose push` and return 0
_completePushArgs() {
push_opts="${help_opts} --ignore-push-failures"
if [[ ${cur} == -* ]]; then
COMPREPLY=( $(compgen -W "${push_opts}" -- ${cur}) )
return 0
else
_completeServiceNames
if [[ $? -eq 0 ]]; then
return 0
fi
return 0
fi
}
# complete the arguments for `podman-compose restart` and return 0
_completeRestartArgs() {
restart_opts="${help_opts} -t --timeout"
if [[ ${prev} == "-t" || ${prev} == "--timeout" ]]; then
return 0
elif [[ ${cur} == -* ]]; then
COMPREPLY=( $(compgen -W "${restart_opts}" -- ${cur}) )
return 0
else
_completeServiceNames
if [[ $? -eq 0 ]]; then
return 0
fi
return 0
fi
}
# complete the arguments for `podman-compose stop` and return 0
_completeStopArgs() {
stop_opts="${help_opts} -t --timeout"
if [[ ${prev} == "-t" || ${prev} == "--timeout" ]]; then
return 0
elif [[ ${cur} == -* ]]; then
COMPREPLY=( $(compgen -W "${stop_opts}" -- ${cur}) )
return 0
else
_completeServiceNames
if [[ $? -eq 0 ]]; then
return 0
fi
return 0
fi
}
# complete the arguments for `podman-compose start` and return 0
_completeStartArgs() {
start_opts="${help_opts}"
if [[ ${cur} == -* ]]; then
COMPREPLY=( $(compgen -W "${start_opts}" -- ${cur}) )
return 0
else
_completeServiceNames
if [[ $? -eq 0 ]]; then
return 0
fi
return 0
fi
}
# complete the arguments for `podman-compose run` and return 0
_completeRunArgs() {
run_opts="${help_opts} -d --detach --privileged -u --user -T --index -e --env -w --workdir"
if [[ ${prev} == "-u" || ${prev} == "--user" || ${prev} == "--index" || ${prev} == "-e" || ${prev} == "--env" || ${prev} == "-w" || ${prev} == "--workdir" ]]; then
return 0
elif [[ ${cur} == -* ]]; then
COMPREPLY=( $(compgen -W "${run_opts}" -- ${cur}) )
return 0
elif [[ ${comp_cword_adj} -eq 2 ]]; then
# complete service name
_completeServiceNames
if [[ $? -eq 0 ]]; then
return 0
fi
elif [[ ${comp_cword_adj} -eq 3 ]]; then
_completeCommand
if [[ $? -eq 0 ]]; then
return 0
fi
fi
}
_podmanCompose() {
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
root_commands="help version pull push build up down ps run exec start stop restart logs"
# options to output help text (used as global and subcommand options)
help_opts="-h --help"
# global options that don't take additional arguments
basic_global_opts="${help_opts} -v --no-ansi --no-cleanup --dry-run"
# global options that take paths as arguments
path_arg_global_opts="-f --file --podman-path"
path_arg_global_opts_array=($arg_global_opts)
# global options that take arguments that are not files
generic_arg_global_opts="-p --project-name --podman-path --podman-args --podman-pull-args --podman-push-args --podman-build-args --podman-inspect-args --podman-run-args --podman-start-args --podman-stop-args --podman-rm-args --podman-volume-args"
generic_arg_global_opts_array=($generic_arg_global_opts)
# all global options that take arguments
arg_global_opts="${path_arg_global_opts} ${generic_arg_global_opts}"
arg_global_opts_array=($arg_global_opts)
# all global options
global_opts="${basic_global_opts} ${arg_global_opts}"
chosen_root_command=""
_completeGlobalOptArgs
if [[ $? -eq 0 ]]; then
return 0
fi
# computing comp_cword_adj, which thruthfully tells us how deep in the subcommands tree we are
# additionally, set the chosen_root_command if possible
comp_cword_adj=${COMP_CWORD}
if [[ ${COMP_CWORD} -ge 2 ]]; then
skip_next="no"
for el in ${COMP_WORDS[@]}; do
# if the user has asked for help text there's no need to complete further
if [[ ${el} == "-h" || ${el} == "--help" ]]; then
return 0
fi
if [[ ${skip_next} == "yes" ]]; then
let "comp_cword_adj--"
skip_next="no"
continue
fi
if [[ ${el} == -* && ${el} != ${cur} ]]; then
let "comp_cword_adj--"
for opt in ${arg_global_opts_array[@]}; do
if [[ ${el} == ${opt} ]]; then
skip_next="yes"
fi
done
elif [[ ${el} != ${cur} && ${el} != ${COMP_WORDS[0]} && ${chosen_root_command} == "" ]]; then
chosen_root_command=${el}
fi
done
fi
if [[ ${comp_cword_adj} -eq 1 ]]; then
_completeRoot
# Given that we check the value of comp_cword_adj outside
# of it, at the moment _completeRoot should always return
# 0, this is just here in case changes are made. The same
# will apply to similar functions below
if [[ $? -eq 0 ]]; then
return 0
fi
fi
case $chosen_root_command in
up)
_completeUpArgs
if [[ $? -eq 0 ]]; then
return 0
fi
;;
down)
_completeDownArgs
if [[ $? -eq 0 ]]; then
return 0
fi
;;
exec)
_completeExecArgs
if [[ $? -eq 0 ]]; then
return 0
fi
;;
build)
_completeBuildArgs
if [[ $? -eq 0 ]]; then
return 0
fi
;;
logs)
_completeLogsArgs
if [[ $? -eq 0 ]]; then
return 0
fi
;;
ps)
_completePsArgs
if [[ $? -eq 0 ]]; then
return 0
fi
;;
pull)
_completePullArgs
if [[ $? -eq 0 ]]; then
return 0
fi
;;
push)
_completePushArgs
if [[ $? -eq 0 ]]; then
return 0
fi
;;
restart)
_completeRestartArgs
if [[ $? -eq 0 ]]; then
return 0
fi
;;
start)
_completeStartArgs
if [[ $? -eq 0 ]]; then
return 0
fi
;;
stop)
_completeStopArgs
if [[ $? -eq 0 ]]; then
return 0
fi
;;
run)
_completeRunArgs
if [[ $? -eq 0 ]]; then
return 0
fi
;;
esac
}
complete -F _podmanCompose podman-compose

View File

@ -1,37 +0,0 @@
# AWX Compose
the directory roles is taken from [here](https://github.com/ansible/awx/tree/17.1.0/installer/roles/local_docker)
also look at https://github.com/ansible/awx/tree/17.1.0/tools/docker-compose
```
mkdir deploy awx17
ansible localhost \
-e host_port=8080 \
-e awx_secret_key='awx,secret.123' \
-e secret_key='awx,secret.123' \
-e admin_user='admin' \
-e admin_password='admin' \
-e pg_password='awx,123.' \
-e pg_username='awx' \
-e pg_database='awx' \
-e pg_port='5432' \
-e redis_image="docker.io/library/redis:6-alpine" \
-e postgres_data_dir="./data/pg" \
-e compose_start_containers=false \
-e dockerhub_base='docker.io/ansible' \
-e awx_image='docker.io/ansible/awx' \
-e awx_version='17.1.0' \
-e dockerhub_version='17.1.0' \
-e docker_deploy_base_path=$PWD/deploy \
-e docker_compose_dir=$PWD/awx17 \
-e awx_task_hostname=awx \
-e awx_web_hostname=awxweb \
-m include_role -a name=local_docker
cp awx17/docker-compose.yml awx17/docker-compose.yml.orig
sed -i -re "s#- \"$PWD/awx17/(.*):/#- \"./\1:/#" awx17/docker-compose.yml
cd awx17
podman-compose run --rm --service-ports task awx-manage migrate --no-input
podman-compose up -d
```

View File

@ -1,11 +0,0 @@
---
dockerhub_version: "{{ lookup('file', playbook_dir + '/../VERSION') }}"
awx_image: "awx"
redis_image: "redis"
postgresql_version: "12"
postgresql_image: "postgres:{{postgresql_version}}"
compose_start_containers: true
upgrade_postgres: false

View File

@ -1,74 +0,0 @@
---
- name: Create {{ docker_compose_dir }} directory
file:
path: "{{ docker_compose_dir }}"
state: directory
- name: Create Redis socket directory
file:
path: "{{ docker_compose_dir }}/redis_socket"
state: directory
mode: 0777
- name: Create Docker Compose Configuration
template:
src: "{{ item.file }}.j2"
dest: "{{ docker_compose_dir }}/{{ item.file }}"
mode: "{{ item.mode }}"
loop:
- file: environment.sh
mode: "0600"
- file: credentials.py
mode: "0600"
- file: docker-compose.yml
mode: "0600"
- file: nginx.conf
mode: "0600"
- file: redis.conf
mode: "0664"
register: awx_compose_config
- name: Render SECRET_KEY file
copy:
content: "{{ secret_key }}"
dest: "{{ docker_compose_dir }}/SECRET_KEY"
mode: 0600
register: awx_secret_key
- block:
- name: Remove AWX containers before migrating postgres so that the old postgres container does not get used
docker_compose:
project_src: "{{ docker_compose_dir }}"
state: absent
ignore_errors: true
- name: Run migrations in task container
shell: docker-compose run --rm --service-ports task awx-manage migrate --no-input
args:
chdir: "{{ docker_compose_dir }}"
- name: Start the containers
docker_compose:
project_src: "{{ docker_compose_dir }}"
restarted: "{{ awx_compose_config is changed or awx_secret_key is changed }}"
register: awx_compose_start
- name: Update CA trust in awx_web container
command: docker exec awx_web '/usr/bin/update-ca-trust'
when: awx_compose_config.changed or awx_compose_start.changed
- name: Update CA trust in awx_task container
command: docker exec awx_task '/usr/bin/update-ca-trust'
when: awx_compose_config.changed or awx_compose_start.changed
- name: Wait for launch script to create user
wait_for:
timeout: 10
delegate_to: localhost
- name: Create Preload data
command: docker exec awx_task bash -c "/usr/bin/awx-manage create_preload_data"
when: create_preload_data|bool
register: cdo
changed_when: "'added' in cdo.stdout"
when: compose_start_containers|bool

View File

@ -1,15 +0,0 @@
---
- name: Generate broadcast websocket secret
set_fact:
broadcast_websocket_secret: "{{ lookup('password', '/dev/null length=128') }}"
run_once: true
no_log: true
when: broadcast_websocket_secret is not defined
- import_tasks: upgrade_postgres.yml
when:
- postgres_data_dir is defined
- pg_hostname is not defined
- import_tasks: set_image.yml
- import_tasks: compose.yml

View File

@ -1,46 +0,0 @@
---
- name: Manage AWX Container Images
block:
- name: Export Docker awx image if it isnt local and there isnt a registry defined
docker_image:
name: "{{ awx_image }}"
tag: "{{ awx_version }}"
archive_path: "{{ awx_local_base_config_path|default('/tmp') }}/{{ awx_image }}_{{ awx_version }}.tar"
when: inventory_hostname != "localhost" and docker_registry is not defined
delegate_to: localhost
- name: Set docker base path
set_fact:
docker_deploy_base_path: "{{ awx_base_path|default('/tmp') }}/docker_deploy"
when: ansible_connection != "local" and docker_registry is not defined
- name: Ensure directory exists
file:
path: "{{ docker_deploy_base_path }}"
state: directory
when: ansible_connection != "local" and docker_registry is not defined
- name: Copy awx image to docker execution
copy:
src: "{{ awx_local_base_config_path|default('/tmp') }}/{{ awx_image }}_{{ awx_version }}.tar"
dest: "{{ docker_deploy_base_path }}/{{ awx_image }}_{{ awx_version }}.tar"
when: ansible_connection != "local" and docker_registry is not defined
- name: Load awx image
docker_image:
name: "{{ awx_image }}"
tag: "{{ awx_version }}"
load_path: "{{ docker_deploy_base_path }}/{{ awx_image }}_{{ awx_version }}.tar"
timeout: 300
when: ansible_connection != "local" and docker_registry is not defined
- name: Set full image path for local install
set_fact:
awx_docker_actual_image: "{{ awx_image }}:{{ awx_version }}"
when: docker_registry is not defined
when: dockerhub_base is not defined
- name: Set DockerHub Image Paths
set_fact:
awx_docker_actual_image: "{{ dockerhub_base }}/awx:{{ dockerhub_version }}"
when: dockerhub_base is defined

View File

@ -1,64 +0,0 @@
---
- name: Create {{ postgres_data_dir }} directory
file:
path: "{{ postgres_data_dir }}"
state: directory
- name: Get full path of postgres data dir
shell: "echo {{ postgres_data_dir }}"
register: fq_postgres_data_dir
- name: Register temporary docker container
set_fact:
container_command: "docker run --rm -v '{{ fq_postgres_data_dir.stdout }}:/var/lib/postgresql' centos:8 bash -c "
- name: Check for existing Postgres data (run from inside the container for access to file)
shell:
cmd: |
{{ container_command }} "[[ -f /var/lib/postgresql/10/data/PG_VERSION ]] && echo 'exists'"
register: pg_version_file
ignore_errors: true
- name: Record Postgres version
shell: |
{{ container_command }} "cat /var/lib/postgresql/10/data/PG_VERSION"
register: old_pg_version
when: pg_version_file is defined and pg_version_file.stdout == 'exists'
- name: Determine whether to upgrade postgres
set_fact:
upgrade_postgres: "{{ old_pg_version.stdout == '10' }}"
when: old_pg_version.changed
- name: Set up new postgres paths pre-upgrade
shell: |
{{ container_command }} "mkdir -p /var/lib/postgresql/12/data/"
when: upgrade_postgres | bool
- name: Stop AWX before upgrading postgres
docker_compose:
project_src: "{{ docker_compose_dir }}"
stopped: true
when: upgrade_postgres | bool
- name: Upgrade Postgres
shell: |
docker run --rm \
-v {{ postgres_data_dir }}/10/data:/var/lib/postgresql/10/data \
-v {{ postgres_data_dir }}/12/data:/var/lib/postgresql/12/data \
-e PGUSER={{ pg_username }} -e POSTGRES_INITDB_ARGS="-U {{ pg_username }}" \
tianon/postgres-upgrade:10-to-12 --username={{ pg_username }}
when: upgrade_postgres | bool
- name: Copy old pg_hba.conf
shell: |
{{ container_command }} "cp /var/lib/postgresql/10/data/pg_hba.conf /var/lib/postgresql/12/data/pg_hba.conf"
when: upgrade_postgres | bool
- name: Remove old data directory
shell: |
{{ container_command }} "rm -rf /var/lib/postgresql/10/data"
when:
- upgrade_postgres | bool
- compose_start_containers|bool

View File

@ -1,13 +0,0 @@
DATABASES = {
'default': {
'ATOMIC_REQUESTS': True,
'ENGINE': 'django.db.backends.postgresql',
'NAME': "{{ pg_database }}",
'USER': "{{ pg_username }}",
'PASSWORD': "{{ pg_password }}",
'HOST': "{{ pg_hostname | default('postgres') }}",
'PORT': "{{ pg_port }}",
}
}
BROADCAST_WEBSOCKET_SECRET = "{{ broadcast_websocket_secret | b64encode }}"

View File

@ -1,208 +0,0 @@
#jinja2: lstrip_blocks: True
version: '2'
services:
web:
image: {{ awx_docker_actual_image }}
container_name: awx_web
depends_on:
- redis
{% if pg_hostname is not defined %}
- postgres
{% endif %}
{% if (host_port is defined) or (host_port_ssl is defined) %}
ports:
{% if (host_port_ssl is defined) and (ssl_certificate is defined) %}
- "{{ host_port_ssl }}:8053"
{% endif %}
{% if host_port is defined %}
- "{{ host_port }}:8052"
{% endif %}
{% endif %}
hostname: {{ awx_web_hostname }}
user: root
restart: unless-stopped
{% if (awx_web_container_labels is defined) and (',' in awx_web_container_labels) %}
{% set awx_web_container_labels_list = awx_web_container_labels.split(',') %}
labels:
{% for awx_web_container_label in awx_web_container_labels_list %}
- {{ awx_web_container_label }}
{% endfor %}
{% elif awx_web_container_labels is defined %}
labels:
- {{ awx_web_container_labels }}
{% endif %}
volumes:
- supervisor-socket:/var/run/supervisor
- rsyslog-socket:/var/run/awx-rsyslog/
- rsyslog-config:/var/lib/awx/rsyslog/
- "{{ docker_compose_dir }}/SECRET_KEY:/etc/tower/SECRET_KEY"
- "{{ docker_compose_dir }}/environment.sh:/etc/tower/conf.d/environment.sh"
- "{{ docker_compose_dir }}/credentials.py:/etc/tower/conf.d/credentials.py"
- "{{ docker_compose_dir }}/nginx.conf:/etc/nginx/nginx.conf:ro"
- "{{ docker_compose_dir }}/redis_socket:/var/run/redis/:rw"
{% if project_data_dir is defined %}
- "{{ project_data_dir +':/var/lib/awx/projects:rw' }}"
{% endif %}
{% if custom_venv_dir is defined %}
- "{{ custom_venv_dir +':'+ custom_venv_dir +':rw' }}"
{% endif %}
{% if ca_trust_dir is defined %}
- "{{ ca_trust_dir +':/etc/pki/ca-trust/source/anchors:ro' }}"
{% endif %}
{% if (ssl_certificate is defined) and (ssl_certificate_key is defined) %}
- "{{ ssl_certificate +':/etc/nginx/awxweb.pem:ro' }}"
- "{{ ssl_certificate_key +':/etc/nginx/awxweb_key.pem:ro' }}"
{% elif (ssl_certificate is defined) and (ssl_certificate_key is not defined) %}
- "{{ ssl_certificate +':/etc/nginx/awxweb.pem:ro' }}"
{% endif %}
{% if (awx_container_search_domains is defined) and (',' in awx_container_search_domains) %}
{% set awx_container_search_domains_list = awx_container_search_domains.split(',') %}
dns_search:
{% for awx_container_search_domain in awx_container_search_domains_list %}
- {{ awx_container_search_domain }}
{% endfor %}
{% elif awx_container_search_domains is defined %}
dns_search: "{{ awx_container_search_domains }}"
{% endif %}
{% if (awx_alternate_dns_servers is defined) and (',' in awx_alternate_dns_servers) %}
{% set awx_alternate_dns_servers_list = awx_alternate_dns_servers.split(',') %}
dns:
{% for awx_alternate_dns_server in awx_alternate_dns_servers_list %}
- {{ awx_alternate_dns_server }}
{% endfor %}
{% elif awx_alternate_dns_servers is defined %}
dns: "{{ awx_alternate_dns_servers }}"
{% endif %}
{% if (docker_compose_extra_hosts is defined) and (':' in docker_compose_extra_hosts) %}
{% set docker_compose_extra_hosts_list = docker_compose_extra_hosts.split(',') %}
extra_hosts:
{% for docker_compose_extra_host in docker_compose_extra_hosts_list %}
- "{{ docker_compose_extra_host }}"
{% endfor %}
{% endif %}
environment:
http_proxy: {{ http_proxy | default('') }}
https_proxy: {{ https_proxy | default('') }}
no_proxy: {{ no_proxy | default('') }}
{% if docker_logger is defined %}
logging:
driver: {{ docker_logger }}
{% endif %}
task:
image: {{ awx_docker_actual_image }}
container_name: awx_task
depends_on:
- redis
- web
{% if pg_hostname is not defined %}
- postgres
{% endif %}
command: /usr/bin/launch_awx_task.sh
hostname: {{ awx_task_hostname }}
user: root
restart: unless-stopped
volumes:
- supervisor-socket:/var/run/supervisor
- rsyslog-socket:/var/run/awx-rsyslog/
- rsyslog-config:/var/lib/awx/rsyslog/
- "{{ docker_compose_dir }}/SECRET_KEY:/etc/tower/SECRET_KEY"
- "{{ docker_compose_dir }}/environment.sh:/etc/tower/conf.d/environment.sh"
- "{{ docker_compose_dir }}/credentials.py:/etc/tower/conf.d/credentials.py"
- "{{ docker_compose_dir }}/redis_socket:/var/run/redis/:rw"
{% if project_data_dir is defined %}
- "{{ project_data_dir +':/var/lib/awx/projects:rw' }}"
{% endif %}
{% if custom_venv_dir is defined %}
- "{{ custom_venv_dir +':'+ custom_venv_dir +':rw' }}"
{% endif %}
{% if ca_trust_dir is defined %}
- "{{ ca_trust_dir +':/etc/pki/ca-trust/source/anchors:ro' }}"
{% endif %}
{% if ssl_certificate is defined %}
- "{{ ssl_certificate +':/etc/nginx/awxweb.pem:ro' }}"
{% endif %}
{% if (awx_container_search_domains is defined) and (',' in awx_container_search_domains) %}
{% set awx_container_search_domains_list = awx_container_search_domains.split(',') %}
dns_search:
{% for awx_container_search_domain in awx_container_search_domains_list %}
- {{ awx_container_search_domain }}
{% endfor %}
{% elif awx_container_search_domains is defined %}
dns_search: "{{ awx_container_search_domains }}"
{% endif %}
{% if (awx_alternate_dns_servers is defined) and (',' in awx_alternate_dns_servers) %}
{% set awx_alternate_dns_servers_list = awx_alternate_dns_servers.split(',') %}
dns:
{% for awx_alternate_dns_server in awx_alternate_dns_servers_list %}
- {{ awx_alternate_dns_server }}
{% endfor %}
{% elif awx_alternate_dns_servers is defined %}
dns: "{{ awx_alternate_dns_servers }}"
{% endif %}
{% if (docker_compose_extra_hosts is defined) and (':' in docker_compose_extra_hosts) %}
{% set docker_compose_extra_hosts_list = docker_compose_extra_hosts.split(',') %}
extra_hosts:
{% for docker_compose_extra_host in docker_compose_extra_hosts_list %}
- "{{ docker_compose_extra_host }}"
{% endfor %}
{% endif %}
environment:
AWX_SKIP_MIGRATIONS: "1"
http_proxy: {{ http_proxy | default('') }}
https_proxy: {{ https_proxy | default('') }}
no_proxy: {{ no_proxy | default('') }}
SUPERVISOR_WEB_CONFIG_PATH: '/etc/supervisord.conf'
redis:
image: {{ redis_image }}
container_name: awx_redis
restart: unless-stopped
environment:
http_proxy: {{ http_proxy | default('') }}
https_proxy: {{ https_proxy | default('') }}
no_proxy: {{ no_proxy | default('') }}
command: ["/usr/local/etc/redis/redis.conf"]
volumes:
- "{{ docker_compose_dir }}/redis.conf:/usr/local/etc/redis/redis.conf:ro"
- "{{ docker_compose_dir }}/redis_socket:/var/run/redis/:rw"
{% if docker_logger is defined %}
logging:
driver: {{ docker_logger }}
{% endif %}
{% if pg_hostname is not defined %}
postgres:
image: {{ postgresql_image }}
container_name: awx_postgres
restart: unless-stopped
volumes:
- "{{ postgres_data_dir }}/12/data/:/var/lib/postgresql/data:Z"
environment:
POSTGRES_USER: {{ pg_username }}
POSTGRES_PASSWORD: {{ pg_password }}
POSTGRES_DB: {{ pg_database }}
http_proxy: {{ http_proxy | default('') }}
https_proxy: {{ https_proxy | default('') }}
no_proxy: {{ no_proxy | default('') }}
{% if docker_logger is defined %}
logging:
driver: {{ docker_logger }}
{% endif %}
{% endif %}
{% if docker_compose_subnet is defined %}
networks:
default:
driver: bridge
ipam:
driver: default
config:
- subnet: {{ docker_compose_subnet }}
{% endif %}
volumes:
supervisor-socket:
rsyslog-socket:
rsyslog-config:

View File

@ -1,10 +0,0 @@
DATABASE_USER={{ pg_username|quote }}
DATABASE_NAME={{ pg_database|quote }}
DATABASE_HOST={{ pg_hostname|default('postgres')|quote }}
DATABASE_PORT={{ pg_port|default('5432')|quote }}
DATABASE_PASSWORD={{ pg_password|default('awxpass')|quote }}
{% if pg_admin_password is defined %}
DATABASE_ADMIN_PASSWORD={{ pg_admin_password|quote }}
{% endif %}
AWX_ADMIN_USER={{ admin_user|quote }}
AWX_ADMIN_PASSWORD={{ admin_password|quote }}

View File

@ -1,122 +0,0 @@
#user awx;
worker_processes 1;
pid /tmp/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
server_tokens off;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /dev/stdout main;
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
sendfile on;
#tcp_nopush on;
#gzip on;
upstream uwsgi {
server 127.0.0.1:8050;
}
upstream daphne {
server 127.0.0.1:8051;
}
{% if ssl_certificate is defined %}
server {
listen 8052 default_server;
server_name _;
# Redirect all HTTP links to the matching HTTPS page
return 301 https://$host$request_uri;
}
{%endif %}
server {
{% if (ssl_certificate is defined) and (ssl_certificate_key is defined) %}
listen 8053 ssl;
ssl_certificate /etc/nginx/awxweb.pem;
ssl_certificate_key /etc/nginx/awxweb_key.pem;
{% elif (ssl_certificate is defined) and (ssl_certificate_key is not defined) %}
listen 8053 ssl;
ssl_certificate /etc/nginx/awxweb.pem;
ssl_certificate_key /etc/nginx/awxweb.pem;
{% else %}
listen 8052 default_server;
{% endif %}
# If you have a domain name, this is where to add it
server_name _;
keepalive_timeout 65;
# HSTS (ngx_http_headers_module is required) (15768000 seconds = 6 months)
add_header Strict-Transport-Security max-age=15768000;
# Protect against click-jacking https://www.owasp.org/index.php/Testing_for_Clickjacking_(OTG-CLIENT-009)
add_header X-Frame-Options "DENY";
location /nginx_status {
stub_status on;
access_log off;
allow 127.0.0.1;
deny all;
}
location /static/ {
alias /var/lib/awx/public/static/;
}
location /favicon.ico { alias /var/lib/awx/public/static/favicon.ico; }
location /websocket {
# Pass request to the upstream alias
proxy_pass http://daphne;
# Require http version 1.1 to allow for upgrade requests
proxy_http_version 1.1;
# We want proxy_buffering off for proxying to websockets.
proxy_buffering off;
# http://en.wikipedia.org/wiki/X-Forwarded-For
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# enable this if you use HTTPS:
proxy_set_header X-Forwarded-Proto https;
# pass the Host: header from the client for the sake of redirects
proxy_set_header Host $http_host;
# We've set the Host header, so we don't need Nginx to muddle
# about with redirects
proxy_redirect off;
# Depending on the request value, set the Upgrade and
# connection headers
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
}
location / {
# Add trailing / if missing
rewrite ^(.*)$http_host(.*[^/])$ $1$http_host$2/ permanent;
uwsgi_read_timeout 120s;
uwsgi_pass uwsgi;
include /etc/nginx/uwsgi_params;
{%- if extra_nginx_include is defined %}
include {{ extra_nginx_include }};
{%- endif %}
proxy_set_header X-Forwarded-Port 443;
uwsgi_param HTTP_X_FORWARDED_PORT 443;
}
}
}

View File

@ -1,4 +0,0 @@
unixsocket /var/run/redis/redis.sock
unixsocketperm 660
port 0
bind 127.0.0.1

View File

@ -1,17 +0,0 @@
# Azure Vote Example
This example have two containers:
* backend: `redis` used as storage
* frontend: having supervisord, nginx, uwsgi/python
```
echo "HOST_PORT=8080" > .env
podman-compose up
```
after typing the commands above open your browser on the host port you picked above like
[http://localhost:8080/](http://localhost:8080/)

View File

@ -1,16 +0,0 @@
---
# from https://github.com/Azure-Samples/azure-voting-app-redis/blob/master/docker-compose.yaml
version: '3'
services:
azure-vote-back:
image: mcr.microsoft.com/oss/bitnami/redis:6.0.8
container_name: azure-vote-back
environment:
ALLOW_EMPTY_PASSWORD: "yes"
azure-vote-front:
image: mcr.microsoft.com/azuredocs/azure-vote-front:v1
environment:
REDIS: azure-vote-back
ports:
- "${HOST_PORT:-8080}:80"

View File

@ -1,31 +0,0 @@
# Echo Service example
```
podman-compose up
```
Test the service with `curl like this`
```
$ curl -X POST -d "foobar" http://localhost:8080/; echo
CLIENT VALUES:
client_address=10.89.31.2
command=POST
real path=/
query=nil
request_version=1.1
request_uri=http://localhost:8080/
SERVER VALUES:
server_version=nginx: 1.10.0 - lua: 10001
HEADERS RECEIVED:
accept=*/*
content-length=6
content-type=application/x-www-form-urlencoded
host=localhost:8080
user-agent=curl/7.76.1
BODY:
foobar
```

View File

@ -1,8 +0,0 @@
---
version: '3'
services:
web:
image: k8s.gcr.io/echoserver:1.4
ports:
- "${HOST_PORT:-8080}:8080"

View File

@ -1,12 +0,0 @@
# GCR Hello App Redis
A 6-node redis cluster using [Bitnami](https://github.com/bitnami/bitnami-docker-redis-cluster)
with a [simple hit counter](https://github.com/GoogleCloudPlatform/kubernetes-engine-samples/tree/main/hello-app-redis) that persists on that redis cluster
```
podman-compose up
```
then open your browser on [http://localhost:8080/](http://localhost:8080/)

View File

@ -1,67 +0,0 @@
---
version: '3'
volumes:
redis-node1-data:
redis-node2-data:
redis-node3-data:
redis-node4-data:
redis-node5-data:
redis-data:
services:
web:
image: gcr.io/google-samples/hello-app-redis:1.0
depends_on:
- redis-cluster
ports:
- "${HOST_PORT:-8080}:8080"
redis-node1:
image: docker.io/bitnami/redis-cluster:6.2
volumes:
- redis-node1-data:/bitnami/redis/data
environment:
- ALLOW_EMPTY_PASSWORD=yes
- REDIS_NODES=redis-node1 redis-node2 redis-node3 redis-node4 redis-node5 redis-cluster
redis-node2:
image: docker.io/bitnami/redis-cluster:6.2
volumes:
- redis-node2-data:/bitnami/redis/data
environment:
- ALLOW_EMPTY_PASSWORD=yes
- REDIS_NODES=redis-node1 redis-node2 redis-node3 redis-node4 redis-node5 redis-cluster
redis-node3:
image: docker.io/bitnami/redis-cluster:6.2
volumes:
- redis-node3-data:/bitnami/redis/data
environment:
- ALLOW_EMPTY_PASSWORD=yes
- REDIS_NODES=redis-node1 redis-node2 redis-node3 redis-node4 redis-node5 redis-cluster
redis-node4:
image: docker.io/bitnami/redis-cluster:6.2
volumes:
- redis-node4-data:/bitnami/redis/data
environment:
- ALLOW_EMPTY_PASSWORD=yes
- REDIS_NODES=redis-node1 redis-node2 redis-node3 redis-node4 redis-node5 redis-cluster
redis-node5:
image: docker.io/bitnami/redis-cluster:6.2
volumes:
- redis-node5-data:/bitnami/redis/data
environment:
- ALLOW_EMPTY_PASSWORD=yes
- REDIS_NODES=redis-node1 redis-node2 redis-node3 redis-node4 redis-node5 redis-cluster
redis-cluster:
image: docker.io/bitnami/redis-cluster:6.2
volumes:
- redis-data:/bitnami/redis/data
depends_on:
- redis-node1
- redis-node2
- redis-node3
- redis-node4
- redis-node5
environment:
- ALLOW_EMPTY_PASSWORD=yes
- REDIS_NODES=redis-node1 redis-node2 redis-node3 redis-node4 redis-node5 redis-cluster
- REDIS_CLUSTER_CREATOR=yes

View File

@ -1,10 +0,0 @@
# GCR Hello App
A small ~2MB image, type
```
podman-compose up
```
then open your browser on [http://localhost:8080/](http://localhost:8080/)

View File

@ -1,8 +0,0 @@
---
version: '3'
services:
web:
image: gcr.io/google-samples/hello-app:1.0
ports:
- "${HOST_PORT:-8080}:8080"

View File

@ -1,37 +0,0 @@
import os
import asyncio
import aioredis
from aiohttp import web
REDIS_HOST = os.environ.get("REDIS_HOST", "localhost")
REDIS_PORT = int(os.environ.get("REDIS_PORT", "6379"))
REDIS_DB = int(os.environ.get("REDIS_DB", "0"))
redis = aioredis.from_url(f"redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_DB}")
app = web.Application()
routes = web.RouteTableDef()
@routes.get("/")
async def hello(request):
counter = await redis.incr("mycounter")
return web.Response(text=f"counter={counter}")
@routes.get("/hello.json")
async def hello_json(request):
counter = await redis.incr("mycounter")
data = {"counter": counter}
return web.json_response(data)
app.add_routes(routes)
def main():
web.run_app(app, port=8080)
if __name__ == "__main__":
main()

View File

@ -1,12 +0,0 @@
FROM python:3.9-alpine
WORKDIR /usr/src/app
COPY requirements.txt ./
RUN pip install --no-cache-dir -r requirements.txt
COPY . .
CMD [ "python", "-m", "App.web" ]
EXPOSE 8080

View File

@ -1,8 +0,0 @@
# Simple Python Demo
## A Redis counter
```
podman-compose up -d
curl localhost:8080/
curl localhost:8080/hello.json
```

View File

@ -1,21 +0,0 @@
---
version: '3'
volumes:
redis:
services:
redis:
read_only: true
image: docker.io/redis:alpine
command: ["redis-server", "--appendonly", "yes", "--notify-keyspace-events", "Ex"]
volumes:
- redis:/data
web:
read_only: true
build:
context: .
image: hello-py-aioweb
ports:
- 8080:8080
environment:
REDIS_HOST: redis

View File

@ -1,3 +0,0 @@
aiohttp
aioredis
# aioredis[hiredis]

View File

@ -1,71 +0,0 @@
{
"env": {
"node": true,
"es6": true
},
"settings": {
"import/resolver": {
"node": {
"extensions": [".js", ".mjs", ".ts", ".cjs"]
}
}
},
"parser": "@typescript-eslint/parser",
"parserOptions": {
"ecmaVersion": 2020,
"sourceType": "module",
"allowImportExportEverywhere": true
},
"extends": [
"eslint:recommended",
"plugin:import/errors",
"plugin:import/warnings",
"plugin:import/typescript",
"plugin:promise/recommended",
"google",
"plugin:security/recommended"
],
"plugins": ["promise", "security", "import"],
"overrides": [
{
"files": "public/**/*.min.js",
"env": {
"browser": true,
"node": false,
"es6": false
},
"parserOptions": {
"sourceType": "script"
},
"extends": ["plugin:compat/recommended"],
"plugins": [],
"rules": {
"no-var": ["off"]
}
}
],
"rules": {
"security/detect-non-literal-fs-filename":["off"],
"security/detect-object-injection":["off"],
"camelcase": ["off"],
"no-console": ["off"],
"require-jsdoc": ["off"],
"one-var": ["off"],
"guard-for-in": ["off"],
"max-len": [
"warn",
{
"ignoreComments": true,
"ignoreTrailingComments": true,
"ignoreUrls": true,
"code": 200
}
],
"indent": ["warn", 4],
"no-unused-vars": ["warn"],
"no-extra-semi": ["warn"],
"linebreak-style": ["error", "unix"],
"quotes": ["warn", "double"],
"semi": ["error", "always"]
}
}

View File

@ -1,5 +0,0 @@
local.env
.env
*.pid
node_modules

View File

@ -1 +0,0 @@
*

View File

@ -1,16 +0,0 @@
# How to run example
```
cp example.local.env local.env
cp example.env .env
cat local.env
cat .env
echo "UID=$UID" >> .env
cat .env
podman-compose build
podman-compose run --rm --no-deps init
podman-compose up
```

View File

@ -1,12 +0,0 @@
FROM registry.fedoraproject.org/fedora-minimal:35
ARG NODE_VER=16
# microdnf -y module enable nodejs:${NODE_VER}
RUN \
echo -e "[nodejs]\nname=nodejs\nstream=${NODE_VER}\nprofiles=\nstate=enabled\n" > /etc/dnf/modules.d/nodejs.module && \
microdnf -y install shadow-utils nodejs zopfli findutils busybox && \
microdnf clean all
RUN adduser -d /app app && mkdir -p /app/code/.home && chown app:app -R /app/code && chmod 711 /app /app/code/.home && usermod -d /app/code/.home app
ENV XDG_CONFIG_HOME=/app/code/.home
ENV HOME=/app/code/.home
WORKDIR /app/code

View File

@ -1,48 +0,0 @@
version: '3'
volumes:
redis:
services:
redis:
read_only: true
image: docker.io/redis:alpine
command: ["redis-server", "--appendonly", "yes", "--notify-keyspace-events", "Ex"]
volumes:
- redis:/data
tmpfs:
- /tmp
- /var/run
- /run
init:
read_only: true
#userns_mode: keep-id
user: ${UID:-1000}
build:
context: ./containers/${NODE_IMG:-node16-runtime}
image: ${NODE_IMG:-node16-runtime}
env_file:
- local.env
volumes:
- .:/app/code
command: ["/bin/sh", "-c", "mkdir -p ~/; [ -d ./node_modules ] && echo '** node_modules exists' || npm install"]
tmpfs:
- /tmp
- /run
task:
extends:
service: init
command: ["npm", "run", "cli", "--", "task"]
links:
- redis
depends_on:
- redis
web:
extends:
service: init
command: ["npm", "run", "cli", "--", "web"]
ports:
- ${WEB_LISTEN_PORT:-3000}:3000
depends_on:
- redis
links:
- mongo

View File

@ -1,3 +0,0 @@
WEB_LISTEN_PORT=3000
# pass UID= your IDE user

View File

@ -1,2 +0,0 @@
REDIS_HOST=redis

View File

@ -1,6 +0,0 @@
#! /usr/bin/env node
"use strict";
import {start} from "./lib";
start();

View File

@ -1,14 +0,0 @@
{
"compilerOptions": {
"target": "es2020",
"module": "es2020",
"moduleResolution": "node",
"allowSyntheticDefaultImports": true
},
"files": [
"index.js"
],
"include": [
"lib/**/*.js"
]
}

View File

@ -1,31 +0,0 @@
"use strict";
import {proj} from "../proj";
async function loop() {
const poped = await proj.predis.blpop("queue", 5);
const task_desc_s = poped[1];
let task_desc;
try {
task_desc = JSON.parse(task_desc_s);
} catch (e) {
console.exception(e);
}
console.info("got task "+task_desc.func);
const func = task_desc.func;
const args = task_desc.args;
if (typeof(proj.tasks[func])!="function") {
console.log(`task ${func} not found`);
process.exit(-1)
}
try {
await ((this.tasks[func])(...args));
} catch (e) {
console.exception(e);
}
}
export async function start() {
while(true) {
loop();
}
}

View File

@ -1,21 +0,0 @@
"use strict";
import {proj} from "../proj";
import http from "http";
import express from "express";
export async function start() {
const app = express();
const server = http.createServer(app);
// Routing
app.use(express.static(proj.config.basedir + "/public"));
app.get("/healthz", function(req, res) {
res.send("ok@"+Date.now());
});
server.listen(proj.config.LISTEN_PORT, proj.config.LISTEN_HOST, function() {
console.warn(`listening at port ${proj.config.LISTEN_PORT}`);
});
}

View File

@ -1,24 +0,0 @@
{
"name": "nodeproj",
"version": "0.0.1",
"description": "nodejs example project",
"exports": {
".": "./index.js",
"./lib": "./lib"
},
"main": "index.js",
"type": "module",
"scripts": {
"cli": "nodemon -w lib -w index.js --es-module-specifier-resolution=node ./index.js"
},
"dependencies": {
"express": "~4.16.4",
"redis": "^3.1.2"
},
"private": true,
"author": "",
"license": "proprietary",
"devDependencies": {
"nodemon": "^2.0.14"
}
}

View File

@ -1,18 +0,0 @@
<!DOCTYPE html>
<html>
<head>
<title>Vote</title>
<link rel="stylesheet" href="https://unpkg.com/browse/normalize.css@8.0.1/normalize.css">
<link rel="stylesheet" href="styles.css">
</head>
<body>
<h1>This is a Heading</h1>
<p>This is a paragraph.</p>
</body>
<script type="text/javascript" src="main.css"></script>
<script type="text/javascript">
//<![CDATA[
console.log("loaded");
//]]>
</script>
</html>

View File

@ -1,24 +0,0 @@
---
volumes:
db_data:
services:
wordpress:
image: docker.io/library/wordpress:latest
ports:
- 8080:80
environment:
- WORDPRESS_DB_HOST=db
- WORDPRESS_DB_USER=wordpress
- WORDPRESS_DB_PASSWORD=password
- WORDPRESS_DB_NAME=wordpress
db:
image: docker.io/library/mariadb:10.6.4-focal
command: '--default-authentication-plugin=mysql_native_password'
volumes:
- db_data:/var/lib/mysql
environment:
- MYSQL_ROOT_PASSWORD=somewordpress
- MYSQL_DATABASE=wordpress
- MYSQL_USER=wordpress
- MYSQL_PASSWORD=password

File diff suppressed because it is too large Load Diff

View File

@ -1,20 +0,0 @@
import pytest
from podman_compose import parse_short_mount
@pytest.fixture
def multi_propagation_mount_str():
return "/foo/bar:/baz:U,Z"
def test_parse_short_mount_multi_propagation(multi_propagation_mount_str):
expected = {
"type": "bind",
"source": "/foo/bar",
"target": "/baz",
"bind": {
"propagation": "U,Z",
},
}
assert parse_short_mount(multi_propagation_mount_str, "/") == expected

View File

@ -2,50 +2,43 @@ import os
from setuptools import setup
try:
readme = open(os.path.join(os.path.dirname(__file__), "README.md")).read()
readme = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
except:
readme = ""
readme = ''
setup(
name="podman-compose",
name='podman-compose',
description="A script to run docker-compose.yml using podman",
long_description=readme,
long_description_content_type="text/markdown",
long_description_content_type='text/markdown',
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Development Status :: 3 - Alpha",
"Topic :: Software Development :: Build Tools",
"License :: OSI Approved :: GNU General Public License v2 (GPLv2)",
],
keywords="podman, podman-compose",
author="Muayyad Alsadi",
author_email="alsadi@gmail.com",
url="https://github.com/containers/podman-compose",
py_modules=["podman_compose"],
entry_points={"console_scripts": ["podman-compose = podman_compose:main"]},
include_package_data=True,
license="GPL-2.0-only",
install_requires=[
"pyyaml",
"python-dotenv",
],
extras_require={
"devel": [
"flake8",
"black",
"pylint",
"pre-commit",
keywords='podman, podman-compose',
author='Muayyad Alsadi',
author_email='alsadi@gmail.com',
url='https://github.com/containers/podman-compose',
py_modules=['podman_compose'],
entry_points={
'console_scripts': [
'podman-compose = podman_compose:main'
]
}
},
include_package_data=True,
license='GPL-2.0-only',
install_requires=[
'pyyaml',
'python-dotenv',
],
# test_suite='tests',
# tests_require=[
# 'coverage',

View File

@ -6,4 +6,3 @@ coverage
pytest-cov
pytest
tox
black

View File

@ -1,8 +0,0 @@
version: "3"
services:
web:
extends:
file: sub/docker-compose.yml
service: webapp
environment:
- DEBUG=1

View File

@ -1,12 +0,0 @@
version: "3"
services:
webapp:
build:
context: docker/example
dockerfile: Dockerfile
image: localhost/subdir_test:me
ports:
- "8000:8000"
volumes:
- "/data"

View File

@ -1 +0,0 @@
FROM busybox as base

View File

@ -4,7 +4,7 @@ services:
image: busybox
command: busybox httpd -h /var/www/html/ -f -p 8001
volumes:
- ./1.env:/var/www/html/index.txt:z
- ./1.env:/var/www/html/index.txt
env_file: ./1.env
labels:
l1: v1

View File

@ -1,11 +1,10 @@
version: '3'
services:
web1:
image: busybox
env_file: ./12.env
labels:
- l1=v2
- l2=v2
- l1=v2
- l2=v2
environment:
mykey1: myval2
mykey2: myval2
@ -14,6 +13,6 @@ services:
image: busybox
command: busybox httpd -h /var/www/html/ -f -p 8002
volumes:
- ./2.env:/var/www/html/index.txt:z
- ./2.env:/var/www/html/index.txt
env_file: ./2.env

View File

@ -1,7 +0,0 @@
version: '3'
services:
web:
image: busybox
command: httpd -f -p 8123 -h /etc/
network_mode: host

View File

@ -1,16 +0,0 @@
---
# https://github.com/compose-spec/compose-spec/blob/master/spec.md#priority
services:
app:
image: busybox
command: top
networks:
app_net_1:
app_net_2:
priority: 1000
app_net_3:
priority: 100
networks:
app_net_1:
app_net_2:
app_net_3:

View File

@ -1,21 +0,0 @@
version: "3"
services:
web1:
image: busybox
hostname: web1
command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8001"]
working_dir: /var/www/html
ports:
- 8001:8001
volumes:
- ./test1.txt:/var/www/html/index.txt:ro,z
web2:
image: busybox
hostname: web2
command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8001"]
working_dir: /var/www/html
ports:
- 8002:8001
volumes:
- ./test2.txt:/var/www/html/index.txt:ro,z

View File

@ -1 +0,0 @@
test1

View File

@ -1 +0,0 @@
test2

View File

@ -1,23 +0,0 @@
version: "3"
networks:
mystack:
services:
web1:
image: busybox
hostname: web1
command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8001"]
working_dir: /var/www/html
ports:
- 8001:8001
volumes:
- ./test1.txt:/var/www/html/index.txt:ro,z
web2:
image: busybox
hostname: web2
command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8001"]
working_dir: /var/www/html
ports:
- 8002:8001
volumes:
- ./test2.txt:/var/www/html/index.txt:ro,z

View File

@ -1 +0,0 @@
test1

View File

@ -1 +0,0 @@
test2

View File

@ -1,45 +0,0 @@
version: "3"
networks:
net1:
net2:
services:
web1:
image: busybox
#container_name: web1
hostname: web1
command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8001"]
working_dir: /var/www/html
networks:
- net1
ports:
- 8001:8001
volumes:
- ./test1.txt:/var/www/html/index.txt:ro,z
web2:
image: busybox
#container_name: web2
hostname: web2
command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8001"]
working_dir: /var/www/html
networks:
- net1
- net2
ports:
- 8002:8001
volumes:
- ./test2.txt:/var/www/html/index.txt:ro,z
web3:
image: busybox
command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8001"]
working_dir: /var/www/html
networks:
net1:
aliases:
- alias11
- alias12
net2:
aliases:
- alias21
volumes:
- ./test2.txt:/var/www/html/index.txt:ro,z

View File

@ -1 +0,0 @@
test1

View File

@ -1 +0,0 @@
test2

View File

@ -2,34 +2,32 @@ version: "3"
services:
web1:
image: busybox
hostname: web1
command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8001"]
working_dir: /var/www/html
ports:
- 8001:8001
- 8001:8001
volumes:
- ./test1.txt:/var/www/html/index.txt:ro,z
- ./test1.txt:/var/www/html/index.txt:ro
web2:
image: busybox
hostname: web2
command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8002"]
working_dir: /var/www/html
ports:
- 8002:8002
- target: 8003
host_ip: 127.0.0.1
published: 8003
protocol: udp
- target: 8004
host_ip: 127.0.0.1
published: 8004
protocol: tcp
- target: 8005
published: 8005
- target: 8006
protocol: udp
- target: 8007
host_ip: 127.0.0.1
- 8002:8002
- target: 8003
host_ip: 127.0.0.1
published: 8003
protocol: udp
- target: 8004
host_ip: 127.0.0.1
published: 8004
protocol: tcp
- target: 8005
published: 8005
- target: 8006
protocol: udp
- target: 8007
host_ip: 127.0.0.1
volumes:
- ./test2.txt:/var/www/html/index.txt:ro,z
- ./test2.txt:/var/www/html/index.txt:ro

View File

@ -1,7 +1,3 @@
---
# echo "sec" | podman secret create my_secret -
# echo "sec2" | podman secret create my_secret_2 -
# echo "sec3" | podman secret create my_secret_3 -
version: "3.8"
services:
test:
@ -12,7 +8,7 @@ services:
- /run
- /tmp
volumes:
- ./print_secrets.sh:/tmp/print_secrets.sh:z
- ./print_secrets.sh:/tmp/print_secrets.sh
secrets:
- my_secret
- my_secret_2

View File

@ -4,7 +4,7 @@ services:
image: redis:alpine
command: ["redis-server", "--appendonly yes", "--notify-keyspace-events", "Ex"]
volumes:
- ./data/redis:/data:z
- ./data/redis:/data
tmpfs: /run1
ports:
- "6379"
@ -25,16 +25,16 @@ services:
command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8001"]
working_dir: /var/www/html
volumes:
- ./data/web:/var/www/html:ro,z
- ./data/web:/var/www/html:ro
web2:
image: busybox
command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8002"]
working_dir: /var/www/html
volumes:
- ~/Downloads/www:/var/www/html:ro,z
- ~/Downloads/www:/var/www/html:ro
web3:
image: busybox
command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8003"]
working_dir: /var/www/html
volumes:
- /var/www/html:/var/www/html:ro,z
- /var/www/html:/var/www/html:ro

View File

@ -1,61 +0,0 @@
from pathlib import Path
import subprocess
def capture(command):
proc = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, err = proc.communicate()
return out, err, proc.returncode
def test_podman_compose_extends_w_file_subdir():
"""
Test that podman-compose can execute podman-compose -f <file> up with extended File which
includes a build context
:return:
"""
main_path = Path(__file__).parent.parent
command_up = [
"python3",
str(main_path.joinpath("podman_compose.py")),
"-f",
str(main_path.joinpath("tests", "extends_w_file_subdir", "docker-compose.yml")),
"up",
"-d",
]
command_check_container = [
"podman",
"container",
"ps",
"--all",
"--format",
'"{{.Image}}"',
]
command_down = [
"podman",
"rmi",
"--force",
"localhost/subdir_test:me",
"docker.io/library/busybox",
]
out, err, returncode = capture(command_up)
assert 0 == returncode
# check container was created and exists
out, err, returncode = capture(command_check_container)
assert 0 == returncode
assert out == b'"localhost/subdir_test:me"\n'
out, err, returncode = capture(command_down)
# cleanup test image(tags)
assert 0 == returncode
# check container did not exists anymore
out, err, returncode = capture(command_check_container)
assert 0 == returncode
assert out == b""

View File

@ -1,9 +0,0 @@
version: "3"
services:
loop1:
image: busybox
command: ["/bin/sh", "-c", "for i in `seq 1 10000`; do echo \"loop1: $$i\"; sleep 1; done"]
loop2:
image: busybox
command: ["/bin/sh", "-c", "for i in `seq 1 10000`; do echo \"loop2: $$i\"; sleep 3; done"]

View File

@ -4,7 +4,6 @@ services:
image: busybox
command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8000"]
working_dir: /var/www/html
restart: always
volumes:
- /var/www/html
tmpfs:
@ -13,10 +12,9 @@ services:
web1:
image: busybox
command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8001"]
restart: unless-stopped
working_dir: /var/www/html
volumes:
- myvol1:/var/www/html:ro,z
- myvol1:/var/www/html:ro
web2:
image: busybox
command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8002"]
@ -34,7 +32,6 @@ services:
- data3:/var/www/html_data3
volumes:
myvol1:
myvol2:
labels:
mylabel: myval