mirror of
https://github.com/ChristianLempa/boilerplates.git
synced 2025-02-25 15:00:46 +01:00
Merge branch 'main' into renovate/public.ecr.aws-gravitational-teleport-distroless-17.x
This commit is contained in:
commit
a8d96b48f6
1
.github/FUNDING.yml
vendored
1
.github/FUNDING.yml
vendored
@ -1,3 +1,4 @@
|
|||||||
|
---
|
||||||
# These are supported funding model platforms
|
# These are supported funding model platforms
|
||||||
|
|
||||||
patreon: christianlempa
|
patreon: christianlempa
|
||||||
|
19
.github/workflows/lint.yaml
vendored
Normal file
19
.github/workflows/lint.yaml
vendored
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
---
|
||||||
|
name: Lint
|
||||||
|
|
||||||
|
on: # yamllint disable-line rule:truthy
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
lint:
|
||||||
|
name: Linters
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- run: yamllint --strict -- $(git ls-files '*.yaml' '*.yml')
|
7
.yamllint
Normal file
7
.yamllint
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
---
|
||||||
|
extends: default
|
||||||
|
|
||||||
|
rules:
|
||||||
|
line-length:
|
||||||
|
max: 160
|
||||||
|
level: warning
|
@ -20,7 +20,6 @@ If you’d like to contribute to this project, reach out to me on social media o
|
|||||||
|
|
||||||
- [Dotfiles](https://github.com/christianlempa/dotfiles) - My personal configuration files on macOS
|
- [Dotfiles](https://github.com/christianlempa/dotfiles) - My personal configuration files on macOS
|
||||||
- [Cheat-Sheets](https://github.com/christianlempa/cheat-sheets) - Command Reference for various tools and technologies
|
- [Cheat-Sheets](https://github.com/christianlempa/cheat-sheets) - Command Reference for various tools and technologies
|
||||||
- [Homelab](https://github.com/christianlempa/homelab) - This is my entire Homelab documentation, and configurations for infrastructure, applications, networking, and more.
|
|
||||||
|
|
||||||
## Support me
|
## Support me
|
||||||
|
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
|
---
|
||||||
name: Kubernetes Deploy
|
name: Kubernetes Deploy
|
||||||
|
|
||||||
on:
|
on: # yamllint disable-line rule:truthy
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
@ -1,6 +1,7 @@
|
|||||||
|
---
|
||||||
name: copy config files to remote machine
|
name: copy config files to remote machine
|
||||||
|
|
||||||
on:
|
on: # yamllint disable-line rule:truthy
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
@ -1,6 +1,7 @@
|
|||||||
|
---
|
||||||
name: Update Docker Compose File
|
name: Update Docker Compose File
|
||||||
|
|
||||||
on:
|
on: # yamllint disable-line rule:truthy
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
@ -1,25 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Install fail2ban and configure sshd
|
|
||||||
hosts: "{{ my_hosts | d([]) }}"
|
|
||||||
become: true
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: Install fail2ban
|
|
||||||
ansible.builtin.apt:
|
|
||||||
name:
|
|
||||||
- fail2ban
|
|
||||||
update_cache: true
|
|
||||||
|
|
||||||
- name: Copy fail2ban config file
|
|
||||||
ansible.builtin.copy:
|
|
||||||
src: configfiles/debian-sshd-default.conf
|
|
||||||
dest: /etc/fail2ban/jail.d/debian-sshd-default.conf
|
|
||||||
mode: '0644'
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
|
|
||||||
- name: Restart fail2ban
|
|
||||||
ansible.builtin.systemd_service:
|
|
||||||
state: restarted
|
|
||||||
daemon_reload: true
|
|
||||||
name: fail2ban
|
|
@ -1,3 +0,0 @@
|
|||||||
[sshd]
|
|
||||||
enabled = true
|
|
||||||
bantime = 3600
|
|
52
ansible/docker/docker-certs-enable.yaml
Normal file
52
ansible/docker/docker-certs-enable.yaml
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
---
|
||||||
|
- name: "Docker Certs enable"
|
||||||
|
hosts: "{{ my_hosts | d([]) }}"
|
||||||
|
become: true
|
||||||
|
vars:
|
||||||
|
certs_path: "/root/docker-certs"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Check if docker certs are existing
|
||||||
|
ansible.builtin.stat:
|
||||||
|
path: "{{ certs_path }}"
|
||||||
|
register: certs_dir
|
||||||
|
|
||||||
|
- name: Fail if docker certs are not existing
|
||||||
|
ansible.builtin.fail:
|
||||||
|
msg: "Docker certificates are not existing in /root/docker-certs."
|
||||||
|
when: not certs_dir.stat.exists
|
||||||
|
|
||||||
|
- name: Get machine's primary internal ip address from eth0 interface
|
||||||
|
ansible.builtin.setup:
|
||||||
|
register: ip_address
|
||||||
|
|
||||||
|
- name: Set machine's primary internal ip address
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
ip_address: "{{ ip_address.ansible_facts.ansible_default_ipv4.address }}"
|
||||||
|
|
||||||
|
- name: Check if ip_address is a valid ip address
|
||||||
|
ansible.builtin.assert:
|
||||||
|
that:
|
||||||
|
- ip_address is match("^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$")
|
||||||
|
fail_msg: "ip_address is not a valid ip address."
|
||||||
|
success_msg: "ip_address is a valid ip address."
|
||||||
|
|
||||||
|
- name: Change docker daemon to use certs
|
||||||
|
ansible.builtin.lineinfile:
|
||||||
|
path: /lib/systemd/system/docker.service
|
||||||
|
line: >
|
||||||
|
ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
|
||||||
|
-H tcp://{{ ip_address }}:2376 --tlsverify --tlscacert={{ certs_path }}/ca.pem
|
||||||
|
--tlscert={{ certs_path }}/server-cert.pem --tlskey={{ certs_path }}/server-key.pem
|
||||||
|
regexp: '^ExecStart='
|
||||||
|
state: present
|
||||||
|
|
||||||
|
- name: Reload systemd daemon
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
daemon_reload: true
|
||||||
|
|
||||||
|
- name: Restart docker daemon
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: docker
|
||||||
|
state: restarted
|
||||||
|
enabled: true
|
158
ansible/docker/docker-certs.yaml
Normal file
158
ansible/docker/docker-certs.yaml
Normal file
@ -0,0 +1,158 @@
|
|||||||
|
---
|
||||||
|
- name: "Docker Certs"
|
||||||
|
hosts: "{{ my_hosts | d([]) }}"
|
||||||
|
become: true
|
||||||
|
vars:
|
||||||
|
certs_path: "/root/docker-certs"
|
||||||
|
cert_validity_days: 3650
|
||||||
|
cn_domain: "your-domain.tld"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Check if docker certs are existing
|
||||||
|
ansible.builtin.stat:
|
||||||
|
path: "{{ certs_path }}"
|
||||||
|
register: certs_dir
|
||||||
|
|
||||||
|
- name: Create docker certs directory (if needed)
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ certs_path }}"
|
||||||
|
state: directory
|
||||||
|
mode: '0700'
|
||||||
|
when: not certs_dir.stat.exists
|
||||||
|
|
||||||
|
- name: Check if docker certs directory is empty
|
||||||
|
ansible.builtin.command: ls -A "{{ certs_path }}"
|
||||||
|
register: certs_list
|
||||||
|
when: certs_dir.stat.exists
|
||||||
|
changed_when: false
|
||||||
|
ignore_errors: true
|
||||||
|
|
||||||
|
- name: Fail if docker certs already exist
|
||||||
|
ansible.builtin.fail:
|
||||||
|
msg: "Docker certificates already exist in /root/docker-certs."
|
||||||
|
when: certs_list.stdout | default('') != ''
|
||||||
|
|
||||||
|
- name: Get machine's primary internal ip address from eth0 interface
|
||||||
|
ansible.builtin.setup:
|
||||||
|
register: ip_address
|
||||||
|
|
||||||
|
- name: Set machine's primary internal ip address
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
ip_address: "{{ ip_address.ansible_facts.ansible_default_ipv4.address }}"
|
||||||
|
|
||||||
|
- name: Check if ip_address is a valid ip address
|
||||||
|
ansible.builtin.assert:
|
||||||
|
that:
|
||||||
|
- ip_address is match("^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$")
|
||||||
|
fail_msg: "ip_address is not a valid ip address."
|
||||||
|
success_msg: "ip_address is a valid ip address."
|
||||||
|
|
||||||
|
- name: Generate CA private key
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: >
|
||||||
|
openssl genrsa -out "{{ certs_path }}/ca-key.pem" 4096
|
||||||
|
args:
|
||||||
|
creates: "{{ certs_path }}/ca-key.pem"
|
||||||
|
|
||||||
|
- name: Generate CA certificate
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: >
|
||||||
|
openssl req -sha256 -new -x509
|
||||||
|
-subj "/CN={{ cn_domain }}"
|
||||||
|
-days "{{ cert_validity_days }}"
|
||||||
|
-key "{{ certs_path }}/ca-key.pem"
|
||||||
|
-out "{{ certs_path }}/ca.pem"
|
||||||
|
args:
|
||||||
|
creates: "{{ certs_path }}/ca.pem"
|
||||||
|
|
||||||
|
- name: Generate server private key
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: >
|
||||||
|
openssl genrsa -out "{{ certs_path }}/server-key.pem" 4096
|
||||||
|
creates: "{{ certs_path }}/server-key.pem"
|
||||||
|
|
||||||
|
- name: Generate server certificate signing request
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: >
|
||||||
|
openssl req -sha256 -new
|
||||||
|
-subj "/CN={{ inventory_hostname }}"
|
||||||
|
-key "{{ certs_path }}/server-key.pem"
|
||||||
|
-out "{{ certs_path }}/server.csr"
|
||||||
|
creates: "{{ certs_path }}/server.csr"
|
||||||
|
|
||||||
|
- name: Generate server certificate extension file
|
||||||
|
ansible.builtin.shell: |
|
||||||
|
echo "subjectAltName = DNS:{{ inventory_hostname }},IP:{{ ip_address }},IP:127.0.0.1" >> "{{ certs_path }}/extfile.cnf"
|
||||||
|
echo "extendedKeyUsage = serverAuth" >> "{{ certs_path }}/extfile.cnf"
|
||||||
|
args:
|
||||||
|
creates: "{{ certs_path }}/extfile.cnf"
|
||||||
|
|
||||||
|
- name: Generate server certificate
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: >
|
||||||
|
openssl x509 -req -days "{{ cert_validity_days }}" -sha256
|
||||||
|
-in "{{ certs_path }}/server.csr"
|
||||||
|
-CA "{{ certs_path }}/ca.pem"
|
||||||
|
-CAkey "{{ certs_path }}/ca-key.pem"
|
||||||
|
-CAcreateserial -out "{{ certs_path }}/server-cert.pem"
|
||||||
|
-extfile "{{ certs_path }}/extfile.cnf"
|
||||||
|
creates: "{{ certs_path }}/server-cert.pem"
|
||||||
|
|
||||||
|
- name: Generate client private key
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: >
|
||||||
|
openssl genrsa -out "{{ certs_path }}/key.pem" 4096
|
||||||
|
creates: "{{ certs_path }}/key.pem"
|
||||||
|
|
||||||
|
- name: Generate client certificate signing request
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: >
|
||||||
|
openssl req -sha256 -new
|
||||||
|
-subj "/CN=client"
|
||||||
|
-key "{{ certs_path }}/key.pem"
|
||||||
|
-out "{{ certs_path }}/client.csr"
|
||||||
|
creates: "{{ certs_path }}/client.csr"
|
||||||
|
|
||||||
|
- name: Generate client certificate extension file
|
||||||
|
ansible.builtin.shell: |
|
||||||
|
echo "extendedKeyUsage = clientAuth" >> "{{ certs_path }}/client-extfile.cnf"
|
||||||
|
args:
|
||||||
|
creates: "{{ certs_path }}/client-extfile.cnf"
|
||||||
|
|
||||||
|
- name: Generate client certificate
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: >
|
||||||
|
openssl x509 -req -days "{{ cert_validity_days }}"
|
||||||
|
-sha256 -in "{{ certs_path }}/client.csr"
|
||||||
|
-CA "{{ certs_path }}/ca.pem"
|
||||||
|
-CAkey "{{ certs_path }}/ca-key.pem"
|
||||||
|
-CAcreateserial -out "{{ certs_path }}/cert.pem"
|
||||||
|
-extfile "{{ certs_path }}/client-extfile.cnf"
|
||||||
|
creates: "{{ certs_path }}/cert.pem"
|
||||||
|
|
||||||
|
- name: Remove client certificate signing request
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ certs_path }}/server.csr"
|
||||||
|
state: absent
|
||||||
|
|
||||||
|
- name: Remove client certificate signing request
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ certs_path }}/client.csr"
|
||||||
|
state: absent
|
||||||
|
|
||||||
|
- name: Remove server certificate extension file
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ certs_path }}/extfile.cnf"
|
||||||
|
state: absent
|
||||||
|
|
||||||
|
- name: Remove client certificate extension file
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ certs_path }}/client-extfile.cnf"
|
||||||
|
state: absent
|
||||||
|
|
||||||
|
- name: Set permissions for docker certs
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ certs_path }}"
|
||||||
|
mode: '0700'
|
||||||
|
recurse: true
|
||||||
|
follow: true
|
@ -30,9 +30,6 @@
|
|||||||
ansible.builtin.apt:
|
ansible.builtin.apt:
|
||||||
name:
|
name:
|
||||||
- docker-ce
|
- docker-ce
|
||||||
- docker-ce-cli
|
|
||||||
- containerd.io
|
|
||||||
- docker-buildx-plugin
|
- docker-buildx-plugin
|
||||||
- docker-scan-plugin
|
|
||||||
- docker-compose-plugin
|
- docker-compose-plugin
|
||||||
update_cache: true
|
update_cache: true
|
@ -1,12 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Install core packages
|
|
||||||
hosts: "{{ my_hosts | d([]) }}"
|
|
||||||
become: true
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: Install core packages
|
|
||||||
ansible.builtin.apt:
|
|
||||||
name:
|
|
||||||
- prometheus-node-exporter
|
|
||||||
- nfs-common
|
|
||||||
update_cache: true
|
|
@ -1,16 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Install microk8s
|
|
||||||
hosts: "{{ my_hosts | d([]) }}"
|
|
||||||
become: true
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: Install microk8s
|
|
||||||
community.general.snap:
|
|
||||||
classic: true
|
|
||||||
name: microk8s
|
|
||||||
|
|
||||||
- name: Add user to group microk8s
|
|
||||||
ansible.builtin.user:
|
|
||||||
name: "{{ lookup('env', 'USER') }}"
|
|
||||||
groups: microk8s
|
|
||||||
append: true
|
|
@ -1,25 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Check disk space
|
|
||||||
hosts: "{{ my_hosts | d([]) }}"
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: Check disk space available
|
|
||||||
ansible.builtin.shell:
|
|
||||||
cmd: |
|
|
||||||
set -euo pipefail
|
|
||||||
df -Ph / | awk 'NR==2 {print $5}'
|
|
||||||
executable: /bin/bash
|
|
||||||
changed_when: false
|
|
||||||
check_mode: false
|
|
||||||
register: disk_usage
|
|
||||||
|
|
||||||
# - name: Send discord message when disk space is over 80%
|
|
||||||
# uri:
|
|
||||||
# url: "your-webhook"
|
|
||||||
# method: POST
|
|
||||||
# body_format: json
|
|
||||||
# body: '{"content": "Disk space on {{ inventory_hostname }} is above 80%!"}'
|
|
||||||
# headers:
|
|
||||||
# Content-Type: application/json
|
|
||||||
# status_code: 204
|
|
||||||
# when: disk_usage.stdout[:-1]|int > 80
|
|
25
ansible/ubuntu/maint-diskspace.yaml
Normal file
25
ansible/ubuntu/maint-diskspace.yaml
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
---
|
||||||
|
- name: Check disk space
|
||||||
|
hosts: "{{ my_hosts | d([]) }}"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Check disk space available
|
||||||
|
ansible.builtin.shell:
|
||||||
|
cmd: |
|
||||||
|
set -euo pipefail
|
||||||
|
df -Ph / | awk 'NR==2 {print $5}'
|
||||||
|
executable: /bin/bash
|
||||||
|
changed_when: false
|
||||||
|
check_mode: false
|
||||||
|
register: disk_usage
|
||||||
|
|
||||||
|
# - name: Send discord message when disk space is over 80%
|
||||||
|
# uri:
|
||||||
|
# url: "your-webhook"
|
||||||
|
# method: POST
|
||||||
|
# body_format: json
|
||||||
|
# body: '{"content": "Disk space on {{ inventory_hostname }} is above 80%!"}'
|
||||||
|
# headers:
|
||||||
|
# Content-Type: application/json
|
||||||
|
# status_code: 204
|
||||||
|
# when: disk_usage.stdout[:-1]|int > 80
|
14
ansible/ubuntu/upd-apt.yaml
Normal file
14
ansible/ubuntu/upd-apt.yaml
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
---
|
||||||
|
- name: Update and upgrade apt packages
|
||||||
|
hosts: all
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Update packages with apt
|
||||||
|
when: ansible_pkg_mgr == 'apt'
|
||||||
|
ansible.builtin.apt:
|
||||||
|
update_cache: true
|
||||||
|
|
||||||
|
- name: Upgrade packages with apt
|
||||||
|
when: ansible_pkg_mgr == 'apt'
|
||||||
|
ansible.builtin.apt:
|
||||||
|
upgrade: dist
|
@ -1 +0,0 @@
|
|||||||
---
|
|
@ -1,27 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Update and upgrade apt packages
|
|
||||||
hosts: all
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: Update packages with apt
|
|
||||||
when: ansible_pkg_mgr == 'apt'
|
|
||||||
ansible.builtin.apt:
|
|
||||||
update_cache: true
|
|
||||||
|
|
||||||
- name: Update packages with yum
|
|
||||||
when: ansible_pkg_mgr == 'yum'
|
|
||||||
ansible.builtin.yum:
|
|
||||||
name: '*'
|
|
||||||
state: latest # noqa: package-latest
|
|
||||||
|
|
||||||
- name: Upgrade packages with apt
|
|
||||||
when: ansible_pkg_mgr == 'apt'
|
|
||||||
ansible.builtin.apt:
|
|
||||||
upgrade: dist
|
|
||||||
|
|
||||||
- name: Upgrade packages with yum
|
|
||||||
when: ansible_pkg_mgr == 'yum'
|
|
||||||
ansible.builtin.yum:
|
|
||||||
name: '*'
|
|
||||||
state: latest # noqa: package-latest
|
|
||||||
exclude: kernel*
|
|
@ -4,7 +4,7 @@ volumes:
|
|||||||
driver: local
|
driver: local
|
||||||
services:
|
services:
|
||||||
mysql:
|
mysql:
|
||||||
image: docker.io/library/mysql:8.3
|
image: docker.io/library/mysql:8.4
|
||||||
hostname: mysql
|
hostname: mysql
|
||||||
volumes:
|
volumes:
|
||||||
- semaphore-mysql:/var/lib/mysql
|
- semaphore-mysql:/var/lib/mysql
|
||||||
@ -16,7 +16,7 @@ services:
|
|||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
semaphore:
|
semaphore:
|
||||||
container_name: ansiblesemaphore
|
container_name: ansiblesemaphore
|
||||||
image: docker.io/semaphoreui/semaphore:v2.10.42
|
image: docker.io/semaphoreui/semaphore:v2.11.2
|
||||||
user: "${UID}:${GID}"
|
user: "${UID}:${GID}"
|
||||||
ports:
|
ports:
|
||||||
- 3000:3000
|
- 3000:3000
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
services:
|
services:
|
||||||
postgres:
|
postgres:
|
||||||
image: docker.io/library/postgres:16.5
|
image: docker.io/library/postgres:16.6
|
||||||
container_name: authentik-db
|
container_name: authentik-db
|
||||||
environment:
|
environment:
|
||||||
- POSTGRES_USER=${POSTGRES_USER:-authentik}
|
- POSTGRES_USER=${POSTGRES_USER:-authentik}
|
||||||
@ -18,7 +18,7 @@ services:
|
|||||||
- postgres_data:/var/lib/postgresql/data
|
- postgres_data:/var/lib/postgresql/data
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
redis:
|
redis:
|
||||||
image: docker.io/library/redis:7.4.1
|
image: docker.io/library/redis:7.4.2
|
||||||
container_name: authentik-redis
|
container_name: authentik-redis
|
||||||
command: --save 60 1 --loglevel warning
|
command: --save 60 1 --loglevel warning
|
||||||
healthcheck:
|
healthcheck:
|
||||||
@ -31,7 +31,7 @@ services:
|
|||||||
- redis_data:/data
|
- redis_data:/data
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
server:
|
server:
|
||||||
image: ghcr.io/goauthentik/server:2024.10.4
|
image: ghcr.io/goauthentik/server:2024.12.2
|
||||||
container_name: authentik-server
|
container_name: authentik-server
|
||||||
command: server
|
command: server
|
||||||
environment:
|
environment:
|
||||||
@ -65,7 +65,7 @@ services:
|
|||||||
- redis
|
- redis
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
worker:
|
worker:
|
||||||
image: ghcr.io/goauthentik/server:2024.10.4
|
image: ghcr.io/goauthentik/server:2024.12.2
|
||||||
container_name: authentik-worker
|
container_name: authentik-worker
|
||||||
command: worker
|
command: worker
|
||||||
environment:
|
environment:
|
||||||
|
20
docker-compose/clamav/compose.yaml
Normal file
20
docker-compose/clamav/compose.yaml
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
services:
|
||||||
|
clamav:
|
||||||
|
image: docker.io/clamav/clamav:1.4.1
|
||||||
|
container_name: clamav
|
||||||
|
volumes:
|
||||||
|
- ./config/clamd.conf:/etc/clamav/clamd.conf:ro
|
||||||
|
- ./config/freshclam.conf:/etc/clamav/freshclam.conf:ro
|
||||||
|
- clamav-data:/var/lib/clamav
|
||||||
|
# --> (Optional) Add a directory to scan
|
||||||
|
# - ./scandir:/scandir:rw
|
||||||
|
# <--
|
||||||
|
# -- Change logging driver here... (required for Wazuh integration)
|
||||||
|
logging:
|
||||||
|
driver: syslog
|
||||||
|
options:
|
||||||
|
tag: "clamd"
|
||||||
|
restart: unless-stopped
|
||||||
|
volumes:
|
||||||
|
clamav-data:
|
81
docker-compose/clamav/config/clamd.conf
Normal file
81
docker-compose/clamav/config/clamd.conf
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
# -- Change Log settings here...
|
||||||
|
LogSyslog yes
|
||||||
|
LogTime yes
|
||||||
|
# --> (Optional) Enable logging to file, can work together with LogSyslog
|
||||||
|
# LogFile /var/log/clamav/clamd.log
|
||||||
|
# LogRotate no
|
||||||
|
# <--
|
||||||
|
|
||||||
|
# -- Change process settings here...
|
||||||
|
PidFile /tmp/clamd.pid
|
||||||
|
LocalSocket /run/clamav/clamd.sock
|
||||||
|
|
||||||
|
# -- Change TCP port settings here...
|
||||||
|
TCPSocket 3310
|
||||||
|
|
||||||
|
# -- Change user settings here...
|
||||||
|
User clamav
|
||||||
|
|
||||||
|
# -- Change detection settings here...
|
||||||
|
# DetectPUA no
|
||||||
|
# HeuristicAlerts yes
|
||||||
|
# HeuristicScanPrecedence no
|
||||||
|
|
||||||
|
# -- Change Heuristic Alerts here...
|
||||||
|
# AlertBrokenExecutables no
|
||||||
|
# AlertBrokenMedia no
|
||||||
|
# AlertEncrypted no
|
||||||
|
# AlertEncryptedArchive no
|
||||||
|
# AlertEncryptedDoc no
|
||||||
|
# AlertOLE2Macros no
|
||||||
|
# AlertPhishingSSLMismatch no
|
||||||
|
# AlertPhishingCloak no
|
||||||
|
# AlertPartitionIntersection no
|
||||||
|
|
||||||
|
# -- Change Executable files settings here...
|
||||||
|
# ScanPE yes
|
||||||
|
# DisableCertCheck no
|
||||||
|
# ScanELF yes
|
||||||
|
|
||||||
|
# -- Change Documents settings here...
|
||||||
|
# ScanOLE2 yes
|
||||||
|
# ScanPDF yes
|
||||||
|
# ScanSWF yes
|
||||||
|
# ScanXMLDOCS yes
|
||||||
|
# ScanHWP3 yes
|
||||||
|
# ScanOneNote yes
|
||||||
|
|
||||||
|
# -- Change other file types settings here...
|
||||||
|
# ScanImage yes
|
||||||
|
# ScanImageFuzzyHash yes
|
||||||
|
|
||||||
|
# -- Change Mail files settings here...
|
||||||
|
# ScanMail yes
|
||||||
|
# ScanPartialMessages no
|
||||||
|
# PhishingSignatures yes
|
||||||
|
# PhishingScanURLs yes
|
||||||
|
|
||||||
|
# -- Change Data Loss Prevention (DLP) settings here...
|
||||||
|
# StructuredDataDetection no
|
||||||
|
# StructuredMinCreditCardCount 3
|
||||||
|
# StructuredCCOnly no
|
||||||
|
# StructuredMinSSNCount 3
|
||||||
|
# StructuredSSNFormatNormal yes
|
||||||
|
# StructuredSSNFormatStripped no
|
||||||
|
|
||||||
|
# -- Change HTML settings here...
|
||||||
|
# ScanHTML yes
|
||||||
|
|
||||||
|
# -- Change Archives settings here...
|
||||||
|
# ScanArchive yes
|
||||||
|
|
||||||
|
# -- Change On-access Scan settings here...
|
||||||
|
# OnAccessMaxFileSize 5M
|
||||||
|
# OnAccessMaxThreads 5
|
||||||
|
# --> (Optional) Set include paths, exclude paths, mount paths, etc...
|
||||||
|
#OnAccessIncludePath /home
|
||||||
|
#OnAccessExcludePath /home/user
|
||||||
|
#OnAccessExtraScanning no
|
||||||
|
#OnAccessMountPath /
|
||||||
|
#OnAccessMountPath /home/user
|
||||||
|
# <--
|
21
docker-compose/clamav/config/freshclam.conf
Normal file
21
docker-compose/clamav/config/freshclam.conf
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
# -- Change Log settings here...
|
||||||
|
LogSyslog no
|
||||||
|
LogTime yes
|
||||||
|
# --> (Optional) Enable logging to file, can work together with LogSyslog
|
||||||
|
# UpdateLogFile /var/log/clamav/freshclam.log
|
||||||
|
# LogRotate no
|
||||||
|
# <--
|
||||||
|
|
||||||
|
# -- Change process settings here...
|
||||||
|
PidFile /tmp/freshclam.pid
|
||||||
|
|
||||||
|
# -- Change database settings here...
|
||||||
|
DatabaseOwner clamav
|
||||||
|
DatabaseMirror database.clamav.net
|
||||||
|
|
||||||
|
# -- Change update and notification settings here...
|
||||||
|
ScriptedUpdates yes
|
||||||
|
NotifyClamd /etc/clamav/clamd.conf
|
||||||
|
|
||||||
|
# -- Change custom sources for databases here...
|
||||||
|
#DatabaseCustomURL http://myserver.example.com/mysigs.ndb
|
@ -2,7 +2,7 @@
|
|||||||
services:
|
services:
|
||||||
dockge:
|
dockge:
|
||||||
container_name: dockge
|
container_name: dockge
|
||||||
image: louislam/dockge:1.4.2
|
image: docker.io/louislam/dockge:1.4.2
|
||||||
volumes:
|
volumes:
|
||||||
- /var/run/docker.sock:/var/run/docker.sock
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
- dockge-data:/app/data
|
- dockge-data:/app/data
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
services:
|
services:
|
||||||
duplicati:
|
duplicati:
|
||||||
image: lscr.io/linuxserver/duplicati:2.0.8
|
image: lscr.io/linuxserver/duplicati:2.1.0
|
||||||
container_name: duplicati
|
container_name: duplicati
|
||||||
environment:
|
environment:
|
||||||
- PUID=1000
|
- PUID=1000
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
services:
|
services:
|
||||||
refactr-runner:
|
refactr-runner:
|
||||||
container_name: factory-runnerpool-prod-1
|
container_name: factory-runnerpool-prod-1
|
||||||
image: docker.io/refactr/runner-pool:v0.152.4
|
image: docker.io/refactr/runner-pool:v0.152.6
|
||||||
user: root
|
user: root
|
||||||
volumes:
|
volumes:
|
||||||
- /run/docker.sock:/run/docker.sock
|
- /run/docker.sock:/run/docker.sock
|
||||||
|
8
docker-compose/gitea/.env.example
Normal file
8
docker-compose/gitea/.env.example
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
# Environment Variable Example File
|
||||||
|
# ---
|
||||||
|
# Add internal database credentials here...
|
||||||
|
# POSTGRES_HOST = "your-database-host"
|
||||||
|
# POSTGRES_PORT = "your-database-port"
|
||||||
|
POSTGRES_DB = "your-database-name"
|
||||||
|
POSTGRES_USER = "your-database-user"
|
||||||
|
POSTGRES_PASSWORD = "your-database-password"
|
90
docker-compose/gitea/compose.yaml
Normal file
90
docker-compose/gitea/compose.yaml
Normal file
@ -0,0 +1,90 @@
|
|||||||
|
---
|
||||||
|
services:
|
||||||
|
server:
|
||||||
|
image: gitea/gitea:1.23.1
|
||||||
|
container_name: gitea-server
|
||||||
|
environment:
|
||||||
|
- USER_UID=1000
|
||||||
|
- USER_GID=1000
|
||||||
|
# -- Change your database settings here...
|
||||||
|
# --> PostgreSQL
|
||||||
|
- GITEA__database__DB_TYPE=postgres
|
||||||
|
- GITEA__database__HOST=${POSTGRES_HOST:-db}:${POSTGRES_PORT:-5432}
|
||||||
|
- GITEA__database__NAME=${POSTGRES_DB:?POSTGRES_DB not set}
|
||||||
|
- GITEA__database__USER=${POSTGRES_USER:?POSTGRES_USER not set}
|
||||||
|
- GITEA__database__PASSWD=${POSTGRES_PASSWORD:?POSTGRES_PASSWORD not set}
|
||||||
|
# <--
|
||||||
|
# --> OR MySQL
|
||||||
|
# - GITEA__database__DB_TYPE=mysql
|
||||||
|
# - GITEA__database__HOST=db:3306
|
||||||
|
# - GITEA__database__NAME=${MYSQL_DATABASE:?MYSQL_DATABASE not set}
|
||||||
|
# - GITEA__database__USER=${MYSQL_USER:?MYSQL_USER not set}
|
||||||
|
# - GITEA__database__PASSWD=${MYSQL_PASSWORD:?MYSQL_PASSWORD not set}
|
||||||
|
# <--
|
||||||
|
# -- (Optional) Change your server settings here...
|
||||||
|
- GITEA__server__SSH_PORT=2221 # <-- (Optional) Replace with your desired SSH port
|
||||||
|
- GITEA__server__ROOT_URL=http://your-fqdn # <-- Replace with your FQDN
|
||||||
|
# --> (Optional) When using traefik...
|
||||||
|
# networks:
|
||||||
|
# - frontend
|
||||||
|
# <--
|
||||||
|
# --> (Optional) When using an internal database...
|
||||||
|
# - backend
|
||||||
|
# <--
|
||||||
|
volumes:
|
||||||
|
- gitea-data:/data
|
||||||
|
- /etc/timezone:/etc/timezone:ro
|
||||||
|
- /etc/localtime:/etc/localtime:ro
|
||||||
|
ports:
|
||||||
|
# --> (Optional) Remove when using traefik...
|
||||||
|
- "3000:3000"
|
||||||
|
# <--
|
||||||
|
- "2221:22" # <-- (Optional) Replace with your desired SSH port
|
||||||
|
# --> (Optional) When using internal database...
|
||||||
|
# depends_on:
|
||||||
|
# - db
|
||||||
|
# <--
|
||||||
|
# --> (Optional) When using traefik...
|
||||||
|
# labels:
|
||||||
|
# - traefik.enable=true
|
||||||
|
# - traefik.http.services.gitea.loadbalancer.server.port=3000
|
||||||
|
# - traefik.http.services.gitea.loadbalancer.server.scheme=http
|
||||||
|
# - traefik.http.routers.gitea-https.entrypoints=websecure
|
||||||
|
# - traefik.http.routers.gitea-https.rule=Host(`your-fqdn`) # <-- Replace with your FQDN
|
||||||
|
# - traefik.http.routers.gitea-https.tls=true
|
||||||
|
# - traefik.http.routers.gitea-https.tls.certresolver=your-certresolver # <-- Replace with your certresolver
|
||||||
|
# <--
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
# --> When using internal database
|
||||||
|
# db:
|
||||||
|
# image: postgres:14
|
||||||
|
# container_name: gitea-db
|
||||||
|
# environment:
|
||||||
|
# - POSTGRES_USER=${POSTGRES_USER:?POSTGRES_USER not set}
|
||||||
|
# - POSTGRES_PASSWORD=${POSTGRES_PASSWORD:?POSTGRES_PASSWORD not set}
|
||||||
|
# - POSTGRES_DB=${POSTGRES_DB:?POSTGRES_DB not set}
|
||||||
|
# networks:
|
||||||
|
# - backend
|
||||||
|
# volumes:
|
||||||
|
# - gitea-db:/var/lib/postgresql/data
|
||||||
|
# restart: unless-stopped
|
||||||
|
# <--
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
gitea-data:
|
||||||
|
driver: local
|
||||||
|
# --> When using internal database
|
||||||
|
# gitea-db:
|
||||||
|
# driver: local
|
||||||
|
# <--
|
||||||
|
|
||||||
|
# --> (Optional) When using traefik...
|
||||||
|
# networks:
|
||||||
|
# frontend:
|
||||||
|
# external: true
|
||||||
|
# <--
|
||||||
|
# --> (Optional) When using an internal database...
|
||||||
|
# backend:
|
||||||
|
# external: true
|
||||||
|
# <--
|
52
docker-compose/gitlab/compose.yaml
Normal file
52
docker-compose/gitlab/compose.yaml
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
---
|
||||||
|
services:
|
||||||
|
gitlab:
|
||||||
|
image: gitlab/gitlab-ce:17.7.1-ce.0
|
||||||
|
container_name: gitlab
|
||||||
|
shm_size: '256m'
|
||||||
|
environment: {}
|
||||||
|
# --> (Optional) When using traefik...
|
||||||
|
# networks:
|
||||||
|
# - frontend
|
||||||
|
# <--
|
||||||
|
volumes:
|
||||||
|
- ./config:/etc/gitlab
|
||||||
|
- ./logs:/var/log/gitlab
|
||||||
|
- gitlab-data:/var/opt/gitlab
|
||||||
|
ports:
|
||||||
|
# --> (Optional) Remove when using traefik...
|
||||||
|
- "80:80"
|
||||||
|
- "443:443"
|
||||||
|
# <--
|
||||||
|
- '2424:22'
|
||||||
|
# --> (Optional) When using traefik...
|
||||||
|
# labels:
|
||||||
|
# - traefik.enable=true
|
||||||
|
# - traefik.http.services.gitlab.loadbalancer.server.port=80
|
||||||
|
# - traefik.http.services.gitlab.loadbalancer.server.scheme=http
|
||||||
|
# - traefik.http.routers.gitlab.service=gitlab
|
||||||
|
# - traefik.http.routers.gitlab.rule=Host(`your-gitlab-fqdn`)
|
||||||
|
# - traefik.http.routers.gitlab.entrypoints=websecure
|
||||||
|
# - traefik.http.routers.gitlab.tls=true
|
||||||
|
# - traefik.http.routers.gitlab.tls.certresolver=cloudflare
|
||||||
|
# <--
|
||||||
|
# --> (Optional) Enable Container Registry settings here...
|
||||||
|
# - traefik.http.services.registry.loadbalancer.server.port=5678
|
||||||
|
# - traefik.http.services.registry.loadbalancer.server.scheme=http
|
||||||
|
# - traefik.http.routers.registry.service=registry
|
||||||
|
# - traefik.http.routers.registry.rule=Host(`your-registry-fqdn`)
|
||||||
|
# - traefik.http.routers.registry.entrypoints=websecure
|
||||||
|
# - traefik.http.routers.registry.tls=true
|
||||||
|
# - traefik.http.routers.registry.tls.certresolver=cloudflare
|
||||||
|
# <--
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
gitlab-data:
|
||||||
|
driver: local
|
||||||
|
|
||||||
|
# --> (Optional) When using traefik...
|
||||||
|
# networks:
|
||||||
|
# frontend:
|
||||||
|
# external: true
|
||||||
|
# <--
|
58
docker-compose/gitlab/config/gitlab.rb
Normal file
58
docker-compose/gitlab/config/gitlab.rb
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
# -- Change GitLab settings here...
|
||||||
|
external_url 'https://your-gitlab-fqdn' # <-- Replace with your GitLab FQDN
|
||||||
|
|
||||||
|
# -- (Optional) Change GitLab Shell settings here...
|
||||||
|
gitlab_rails['gitlab_shell_ssh_port'] = 2424
|
||||||
|
|
||||||
|
# -- Change internal web service settings here...
|
||||||
|
letsencrypt['enable'] = false
|
||||||
|
nginx['listen_port'] = 80
|
||||||
|
nginx['listen_https'] = false
|
||||||
|
|
||||||
|
# --> (Optional) Enable Container Registry settings here...
|
||||||
|
# registry_external_url 'https://your-registry-fqdn' # <-- Replace with your registry FQDN
|
||||||
|
# gitlab_rails['registry_enabled'] = true
|
||||||
|
# registry_nginx['listen_https'] = false
|
||||||
|
# registry_nginx['listen_port'] = 5678 # <-- Replace with your registry port
|
||||||
|
# <--
|
||||||
|
|
||||||
|
# --> (Optional) Add Authentik settings here...
|
||||||
|
# gitlab_rails['omniauth_auto_link_user'] = ['openid_connect']
|
||||||
|
# gitlab_rails['omniauth_providers'] = [
|
||||||
|
# {
|
||||||
|
# name: "openid_connect", # !-- Do not change this parameter
|
||||||
|
# label: "Authentik", # <-- (Optional) Change name for login button, defaults to "Openid Connect"
|
||||||
|
# icon: "https://avatars.githubusercontent.com/u/82976448?s=200&v=4",
|
||||||
|
# args: {
|
||||||
|
# name: "openid_connect",
|
||||||
|
# scope: ["openid","profile","email"],
|
||||||
|
# response_type: "code",
|
||||||
|
# issuer: "https://your-authentik-fqdn/application/o/your-gitlab-slug/", # <-- Replace with your Authentik FQDN and GitLab slug
|
||||||
|
# discovery: true,
|
||||||
|
# client_auth_method: "query",
|
||||||
|
# uid_field: "email",
|
||||||
|
# send_scope_to_token_endpoint: "false",
|
||||||
|
# pkce: true,
|
||||||
|
# client_options: {
|
||||||
|
# identifier: "your-authentik-provider-client-id", # <-- Replace with your Authentik provider client ID
|
||||||
|
# secret: "your-authentik-provider-client-secret", # <-- Replace with your Authentik provider client secret
|
||||||
|
# redirect_uri: "https://your-authentik-fqdn/users/auth/openid_connect/callback" # <-- Replace with your Authentik FQDN
|
||||||
|
# }
|
||||||
|
# }
|
||||||
|
# }
|
||||||
|
# ]
|
||||||
|
# <--
|
||||||
|
|
||||||
|
# --> (Optional) Change SMTP settings here...
|
||||||
|
# gitlab_rails['smtp_enable'] = true
|
||||||
|
# gitlab_rails['smtp_address'] = "your-smtp-server-addr" # <-- Replace with your SMTP server address
|
||||||
|
# gitlab_rails['smtp_port'] = 465
|
||||||
|
# gitlab_rails['smtp_user_name'] = "your-smtp-username" # <-- Replace with your SMTP username
|
||||||
|
# gitlab_rails['smtp_password'] = "your-smtp-password" # <-- Replace with your SMTP password
|
||||||
|
# gitlab_rails['smtp_domain'] = "your-smtp-domain" # <-- Replace with your SMTP domain
|
||||||
|
# gitlab_rails['smtp_authentication'] = "login"
|
||||||
|
# gitlab_rails['smtp_ssl'] = true
|
||||||
|
# gitlab_rails['smtp_force_ssl'] = true
|
||||||
|
# gitlab_rails['gitlab_email_from'] = 'your-email-from-addr' # <-- Replace with your email from address
|
||||||
|
# gitlab_rails['gitlab_email_reply_to'] = 'your-email-replyto-addr' # <-- Replace with your email reply-to address
|
||||||
|
# <--
|
@ -4,7 +4,7 @@ volumes:
|
|||||||
driver: local
|
driver: local
|
||||||
services:
|
services:
|
||||||
grafana:
|
grafana:
|
||||||
image: docker.io/grafana/grafana-oss:11.3.1
|
image: docker.io/grafana/grafana-oss:11.4.0
|
||||||
container_name: grafana
|
container_name: grafana
|
||||||
ports:
|
ports:
|
||||||
- "3000:3000"
|
- "3000:3000"
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
services:
|
services:
|
||||||
homeassistant:
|
homeassistant:
|
||||||
container_name: homeassistant
|
container_name: homeassistant
|
||||||
image: ghcr.io/home-assistant/home-assistant:2024.11.2
|
image: ghcr.io/home-assistant/home-assistant:2025.1.2
|
||||||
volumes:
|
volumes:
|
||||||
- ./config:/config
|
- ./config:/config
|
||||||
- /etc/localtime:/etc/localtime:ro
|
- /etc/localtime:/etc/localtime:ro
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
services:
|
services:
|
||||||
homepage:
|
homepage:
|
||||||
image: ghcr.io/gethomepage/homepage:v0.9.12
|
image: ghcr.io/gethomepage/homepage:v0.10.9
|
||||||
container_name: homepage
|
container_name: homepage
|
||||||
environment:
|
environment:
|
||||||
- LOG_LEVEL=info
|
- LOG_LEVEL=info
|
||||||
|
@ -8,7 +8,7 @@ logo: "logo.png"
|
|||||||
# icon: "fas fa-skull-crossbones" # Optional icon
|
# icon: "fas fa-skull-crossbones" # Optional icon
|
||||||
|
|
||||||
header: true
|
header: true
|
||||||
footer: '<p>Created with <span class="has-text-danger">❤</span> with <a href="https://bulma.io/">bulma</a>, <a href="https://vuejs.org/">vuejs</a> & <a href="https://fontawesome.com/">font awesome</a> // Fork me on <a href="https://github.com/bastienwirtz/homer"><i class="fab fa-github-alt"></i></a></p>' # set false if you want to hide it.
|
footer: false
|
||||||
|
|
||||||
# Optional theme customization
|
# Optional theme customization
|
||||||
theme: default
|
theme: default
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
services:
|
services:
|
||||||
homer:
|
homer:
|
||||||
image: docker.io/b4bz/homer:v24.11.4
|
image: docker.io/b4bz/homer:v24.12.1
|
||||||
container_name: homer
|
container_name: homer
|
||||||
ports:
|
ports:
|
||||||
- "8080:8080"
|
- "8080:8080"
|
||||||
|
@ -8,7 +8,7 @@ volumes:
|
|||||||
services:
|
services:
|
||||||
influxdb:
|
influxdb:
|
||||||
container_name: influxdb
|
container_name: influxdb
|
||||||
image: docker.io/library/influxdb:2.7.10-alpine
|
image: docker.io/library/influxdb:2.7.11-alpine
|
||||||
# (Optional) remove this section when using traefik
|
# (Optional) remove this section when using traefik
|
||||||
ports:
|
ports:
|
||||||
- '8086:8086'
|
- '8086:8086'
|
||||||
|
@ -7,8 +7,7 @@ volumes:
|
|||||||
mariadb-data:
|
mariadb-data:
|
||||||
services:
|
services:
|
||||||
mariadb:
|
mariadb:
|
||||||
# (Recommended) replace "latest" with specific version
|
image: docker.io/library/mariadb:11.6.2
|
||||||
image: docker.io/library/mariadb:11.5.2
|
|
||||||
# (Optional) remove this section when you don't want to expose
|
# (Optional) remove this section when you don't want to expose
|
||||||
ports:
|
ports:
|
||||||
- 3306:3306
|
- 3306:3306
|
||||||
|
@ -4,7 +4,7 @@ volumes:
|
|||||||
nextcloud-db:
|
nextcloud-db:
|
||||||
services:
|
services:
|
||||||
nextcloud-app:
|
nextcloud-app:
|
||||||
image: docker.io/library/nextcloud:30.0.2-apache
|
image: docker.io/library/nextcloud:30.0.4-apache
|
||||||
container_name: nextcloud-app
|
container_name: nextcloud-app
|
||||||
ports:
|
ports:
|
||||||
- 80:80
|
- 80:80
|
||||||
|
@ -5,7 +5,7 @@ volumes:
|
|||||||
nginxproxymanager-db:
|
nginxproxymanager-db:
|
||||||
services:
|
services:
|
||||||
nginxproxymanager:
|
nginxproxymanager:
|
||||||
image: docker.io/jc21/nginx-proxy-manager:2.12.1
|
image: docker.io/jc21/nginx-proxy-manager:2.12.2
|
||||||
ports:
|
ports:
|
||||||
- 80:80
|
- 80:80
|
||||||
- 81:81
|
- 81:81
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
services:
|
services:
|
||||||
nvidia_smi_exporter:
|
nvidia_smi_exporter:
|
||||||
image: docker.io/utkuozdemir/nvidia_gpu_exporter:1.2.1
|
image: docker.io/utkuozdemir/nvidia_gpu_exporter:1.3.0
|
||||||
container_name: nvidia_smi_exporter
|
container_name: nvidia_smi_exporter
|
||||||
runtime: nvidia
|
runtime: nvidia
|
||||||
environment:
|
environment:
|
||||||
|
@ -17,7 +17,7 @@ services:
|
|||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
passbolt:
|
passbolt:
|
||||||
container_name: passbolt-app
|
container_name: passbolt-app
|
||||||
image: docker.io/passbolt/passbolt:4.9.1-1-ce
|
image: docker.io/passbolt/passbolt:4.10.1-1-ce
|
||||||
depends_on:
|
depends_on:
|
||||||
- passbolt-db
|
- passbolt-db
|
||||||
environment:
|
environment:
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
services:
|
services:
|
||||||
postgres:
|
postgres:
|
||||||
image: docker.io/library/postgres:17.1
|
image: docker.io/library/postgres:17.2
|
||||||
container_name: postgres
|
container_name: postgres
|
||||||
environment:
|
environment:
|
||||||
- POSTGRES_INITDB_ARGS=${POSTGRES_INITDB_ARGS---data-checksums}
|
- POSTGRES_INITDB_ARGS=${POSTGRES_INITDB_ARGS---data-checksums}
|
||||||
|
@ -4,7 +4,7 @@ volumes:
|
|||||||
driver: local
|
driver: local
|
||||||
services:
|
services:
|
||||||
prometheus:
|
prometheus:
|
||||||
image: docker.io/prom/prometheus:v2.55.1
|
image: docker.io/prom/prometheus:v3.0.0
|
||||||
container_name: prometheus
|
container_name: prometheus
|
||||||
ports:
|
ports:
|
||||||
- 9090:9090
|
- 9090:9090
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
---
|
||||||
global:
|
global:
|
||||||
scrape_interval: 15s # By default, scrape targets every 15 seconds.
|
scrape_interval: 15s # By default, scrape targets every 15 seconds.
|
||||||
|
|
||||||
|
@ -15,7 +15,7 @@ services:
|
|||||||
- /opt/webserver_swag/config/mariadb:/config
|
- /opt/webserver_swag/config/mariadb:/config
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
swag:
|
swag:
|
||||||
image: docker.io/linuxserver/swag:3.0.1
|
image: docker.io/linuxserver/swag:3.1.0
|
||||||
container_name: swag
|
container_name: swag
|
||||||
cap_add:
|
cap_add:
|
||||||
- NET_ADMIN
|
- NET_ADMIN
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
---
|
||||||
version: v2
|
version: v2
|
||||||
teleport:
|
teleport:
|
||||||
nodename: your-server-name
|
nodename: your-server-name
|
||||||
@ -9,7 +10,7 @@ teleport:
|
|||||||
output: text
|
output: text
|
||||||
|
|
||||||
auth_service:
|
auth_service:
|
||||||
enabled: "yes"
|
enabled: true
|
||||||
listen_addr: 0.0.0.0:3025
|
listen_addr: 0.0.0.0:3025
|
||||||
proxy_listener_mode: multiplex
|
proxy_listener_mode: multiplex
|
||||||
cluster_name: your-server-url
|
cluster_name: your-server-url
|
||||||
@ -26,10 +27,10 @@ auth_service:
|
|||||||
# api_token_path: /etc/teleport/openai_key
|
# api_token_path: /etc/teleport/openai_key
|
||||||
|
|
||||||
ssh_service:
|
ssh_service:
|
||||||
enabled: "no"
|
enabled: false
|
||||||
|
|
||||||
proxy_service:
|
proxy_service:
|
||||||
enabled: "yes"
|
enabled: true
|
||||||
web_listen_addr: 0.0.0.0:3080
|
web_listen_addr: 0.0.0.0:3080
|
||||||
# -- (Optional) when using reverse proxy
|
# -- (Optional) when using reverse proxy
|
||||||
# public_addr: ['your-server-url:443']
|
# public_addr: ['your-server-url:443']
|
||||||
@ -37,7 +38,7 @@ proxy_service:
|
|||||||
acme: {}
|
acme: {}
|
||||||
# --(Optional) ACME
|
# --(Optional) ACME
|
||||||
# acme:
|
# acme:
|
||||||
# enabled: "yes"
|
# enabled: true
|
||||||
# email: your-email-address
|
# email: your-email-address
|
||||||
# -- (Optional) Teleport Assist
|
# -- (Optional) Teleport Assist
|
||||||
# assist:
|
# assist:
|
||||||
@ -45,9 +46,9 @@ proxy_service:
|
|||||||
# api_token_path: /etc/teleport/openai_key
|
# api_token_path: /etc/teleport/openai_key
|
||||||
|
|
||||||
app_service:
|
app_service:
|
||||||
enabled: no
|
enabled: false
|
||||||
# -- (Optional) App Service
|
# -- (Optional) App Service
|
||||||
# enabled: yes
|
# enabled: true
|
||||||
# apps:
|
# apps:
|
||||||
# - name: "yourapp"
|
# - name: "yourapp"
|
||||||
# uri: "http://your-app-url"
|
# uri: "http://your-app-url"
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
services:
|
services:
|
||||||
traefik:
|
traefik:
|
||||||
image: docker.io/library/traefik:v3.2.1
|
image: docker.io/library/traefik:v3.3.1
|
||||||
container_name: traefik
|
container_name: traefik
|
||||||
ports:
|
ports:
|
||||||
- 80:80
|
- 80:80
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
---
|
||||||
http:
|
http:
|
||||||
# -- Change Router Configuration here...
|
# -- Change Router Configuration here...
|
||||||
routers:
|
routers:
|
@ -0,0 +1,20 @@
|
|||||||
|
# --> (Optional) Securely expose apps using the Traefik proxy outpost...
|
||||||
|
# http:
|
||||||
|
# middlewares:
|
||||||
|
# authentik-middleware:
|
||||||
|
# forwardAuth:
|
||||||
|
# address: http://your-authentik-outpost-fqdn:9000/outpost.goauthentik.io/auth/traefik
|
||||||
|
# trustForwardHeader: true
|
||||||
|
# authResponseHeaders:
|
||||||
|
# - X-authentik-username
|
||||||
|
# - X-authentik-groups
|
||||||
|
# - X-authentik-email
|
||||||
|
# - X-authentik-name
|
||||||
|
# - X-authentik-uid
|
||||||
|
# - X-authentik-jwt
|
||||||
|
# - X-authentik-meta-jwks
|
||||||
|
# - X-authentik-meta-outpost
|
||||||
|
# - X-authentik-meta-provider
|
||||||
|
# - X-authentik-meta-app
|
||||||
|
# - X-authentik-meta-version
|
||||||
|
# <--
|
@ -0,0 +1,22 @@
|
|||||||
|
# --> (Optional) When using Passbolt with Traefik...
|
||||||
|
# http:
|
||||||
|
# middlewares:
|
||||||
|
# passbolt-middleware:
|
||||||
|
# headers:
|
||||||
|
# FrameDeny: true
|
||||||
|
# AccessControlAllowMethods: 'GET,OPTIONS,PUT'
|
||||||
|
# AccessControlAllowOriginList:
|
||||||
|
# - origin-list-or-null
|
||||||
|
# AccessControlMaxAge: 100
|
||||||
|
# AddVaryHeader: true
|
||||||
|
# BrowserXssFilter: true
|
||||||
|
# ContentTypeNosniff: true
|
||||||
|
# ForceSTSHeader: true
|
||||||
|
# STSIncludeSubdomains: true
|
||||||
|
# STSPreload: true
|
||||||
|
# ContentSecurityPolicy: default-src 'self' 'unsafe-inline'
|
||||||
|
# CustomFrameOptionsValue: SAMEORIGIN
|
||||||
|
# ReferrerPolicy: same-origin
|
||||||
|
# PermissionsPolicy: vibrate 'self'
|
||||||
|
# STSSeconds: 315360000
|
||||||
|
# <--
|
18
docker-compose/traefik/config/conf.d/tls.yaml
Normal file
18
docker-compose/traefik/config/conf.d/tls.yaml
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
---
|
||||||
|
# -- Change TLS Configuration here...
|
||||||
|
tls:
|
||||||
|
options:
|
||||||
|
default:
|
||||||
|
minVersion: VersionTLS12
|
||||||
|
sniStrict: true
|
||||||
|
curvePreferences:
|
||||||
|
- CurveP256
|
||||||
|
- CurveP384
|
||||||
|
- CurveP521
|
||||||
|
cipherSuites:
|
||||||
|
- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
|
||||||
|
- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
|
||||||
|
- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256
|
||||||
|
- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
|
||||||
|
- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
|
||||||
|
- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305
|
@ -1,3 +1,4 @@
|
|||||||
|
---
|
||||||
global:
|
global:
|
||||||
checkNewVersion: false
|
checkNewVersion: false
|
||||||
sendAnonymousUsage: false
|
sendAnonymousUsage: false
|
||||||
@ -53,6 +54,9 @@ certificatesResolvers:
|
|||||||
providers:
|
providers:
|
||||||
docker:
|
docker:
|
||||||
exposedByDefault: false # <-- (Optional) Change this to true if you want to expose all services
|
exposedByDefault: false # <-- (Optional) Change this to true if you want to expose all services
|
||||||
|
# Specify discovery network - This ensures correct name resolving and possible issues with containers, that are in multiple networks.
|
||||||
|
# E.g. Database container in a separate network and a container in the frontend and database network.
|
||||||
|
network: frontend
|
||||||
file:
|
file:
|
||||||
directory: /etc/traefik
|
directory: /etc/traefik
|
||||||
watch: true
|
watch: true
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
services:
|
services:
|
||||||
twingate_connector:
|
twingate_connector:
|
||||||
container_name: twingate_connector
|
container_name: twingate_connector
|
||||||
image: docker.io/twingate/connector:1.72.0
|
image: docker.io/twingate/connector:1.73.0
|
||||||
environment:
|
environment:
|
||||||
- TWINGATE_NETWORK=your-twingate-network
|
- TWINGATE_NETWORK=your-twingate-network
|
||||||
- TWINGATE_ACCESS_TOKEN=${TWINGATE_ACCESS_TOKEN}
|
- TWINGATE_ACCESS_TOKEN=${TWINGATE_ACCESS_TOKEN}
|
||||||
|
@ -4,7 +4,7 @@ volumes:
|
|||||||
driver: local
|
driver: local
|
||||||
services:
|
services:
|
||||||
uptimekuma:
|
uptimekuma:
|
||||||
image: docker.io/louislam/uptime-kuma:1.23.15
|
image: docker.io/louislam/uptime-kuma:1.23.16
|
||||||
container_name: uptimekuma
|
container_name: uptimekuma
|
||||||
ports:
|
ports:
|
||||||
- 3001:3001
|
- 3001:3001
|
||||||
|
6
docker-compose/wazuh/.env.example
Normal file
6
docker-compose/wazuh/.env.example
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
INDEXER_USERNAME = "admin"
|
||||||
|
INDEXER_PASSWORD = "your-admin-password"
|
||||||
|
DASHBOARD_USERNAME = "kibanaserver"
|
||||||
|
DASHBOARD_PASSWORD = "your-kibanaserver-password"
|
||||||
|
API_USERNAME = "wazuh-wui"
|
||||||
|
API_PASSWORD = "your-wazuh-wui-password"
|
174
docker-compose/wazuh/compose.yaml
Normal file
174
docker-compose/wazuh/compose.yaml
Normal file
@ -0,0 +1,174 @@
|
|||||||
|
---
|
||||||
|
services:
|
||||||
|
wazuh.manager:
|
||||||
|
image: docker.io/wazuh/wazuh-manager:4.10.0
|
||||||
|
container_name: wazuh-prod-1-manager
|
||||||
|
hostname: wazuh.manager
|
||||||
|
ulimits:
|
||||||
|
memlock:
|
||||||
|
soft: -1
|
||||||
|
hard: -1
|
||||||
|
nofile:
|
||||||
|
soft: 655360
|
||||||
|
hard: 655360
|
||||||
|
ports:
|
||||||
|
- "1514:1514"
|
||||||
|
- "1515:1515"
|
||||||
|
- "514:514/udp"
|
||||||
|
- "55000:55000"
|
||||||
|
environment:
|
||||||
|
- INDEXER_URL=https://wazuh.indexer:9200
|
||||||
|
- INDEXER_USERNAME=${INDEXER_USERNAME:?error}
|
||||||
|
- INDEXER_PASSWORD=${INDEXER_PASSWORD:?error}
|
||||||
|
- FILEBEAT_SSL_VERIFICATION_MODE=full
|
||||||
|
- SSL_CERTIFICATE_AUTHORITIES=/etc/ssl/root-ca.pem
|
||||||
|
- SSL_CERTIFICATE=/etc/ssl/filebeat.pem
|
||||||
|
- SSL_KEY=/etc/ssl/filebeat.key
|
||||||
|
- API_USERNAME=${API_USERNAME:?error}
|
||||||
|
- API_PASSWORD=${API_PASSWORD:?error}
|
||||||
|
volumes:
|
||||||
|
- wazuh_api_configuration:/var/ossec/api/configuration
|
||||||
|
- wazuh_etc:/var/ossec/etc
|
||||||
|
- wazuh_logs:/var/ossec/logs
|
||||||
|
- wazuh_queue:/var/ossec/queue
|
||||||
|
- wazuh_var_multigroups:/var/ossec/var/multigroups
|
||||||
|
- wazuh_integrations:/var/ossec/integrations
|
||||||
|
- wazuh_active_response:/var/ossec/active-response/bin
|
||||||
|
- wazuh_agentless:/var/ossec/agentless
|
||||||
|
- wazuh_wodles:/var/ossec/wodles
|
||||||
|
- filebeat_etc:/etc/filebeat
|
||||||
|
- filebeat_var:/var/lib/filebeat
|
||||||
|
- ./config/wazuh_indexer_ssl_certs/root-ca-manager.pem:/etc/ssl/root-ca.pem
|
||||||
|
- ./config/wazuh_indexer_ssl_certs/wazuh.manager.pem:/etc/ssl/filebeat.pem
|
||||||
|
- ./config/wazuh_indexer_ssl_certs/wazuh.manager-key.pem:/etc/ssl/filebeat.key
|
||||||
|
- ./config/wazuh_cluster/wazuh_manager.conf:/wazuh-config-mount/etc/ossec.conf
|
||||||
|
# --> (Optional) For custom rules
|
||||||
|
# - ./config/rules/local_rules.xml:/var/ossec/etc/rules/local_rules.xml:ro
|
||||||
|
# <--
|
||||||
|
# --> (Optional) When using traefik
|
||||||
|
# networks:
|
||||||
|
# - frontend
|
||||||
|
# <--
|
||||||
|
# --> (Optional) When using a separate backend network
|
||||||
|
# - backend
|
||||||
|
# <--
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
wazuh.indexer:
|
||||||
|
image: docker.io/wazuh/wazuh-indexer:4.10.0
|
||||||
|
container_name: wazuh-prod-1-indexer
|
||||||
|
hostname: wazuh.indexer
|
||||||
|
ports:
|
||||||
|
- "9200:9200"
|
||||||
|
environment:
|
||||||
|
- "OPENSEARCH_JAVA_OPTS=-Xms1g -Xmx1g"
|
||||||
|
ulimits:
|
||||||
|
memlock:
|
||||||
|
soft: -1
|
||||||
|
hard: -1
|
||||||
|
nofile:
|
||||||
|
soft: 65536
|
||||||
|
hard: 65536
|
||||||
|
volumes:
|
||||||
|
- wazuh-indexer-data:/var/lib/wazuh-indexer
|
||||||
|
- ./config/wazuh_indexer_ssl_certs/root-ca.pem:/usr/share/wazuh-indexer/certs/root-ca.pem
|
||||||
|
- ./config/wazuh_indexer_ssl_certs/wazuh.indexer-key.pem:/usr/share/wazuh-indexer/certs/wazuh.indexer.key
|
||||||
|
- ./config/wazuh_indexer_ssl_certs/wazuh.indexer.pem:/usr/share/wazuh-indexer/certs/wazuh.indexer.pem
|
||||||
|
- ./config/wazuh_indexer_ssl_certs/admin.pem:/usr/share/wazuh-indexer/certs/admin.pem
|
||||||
|
- ./config/wazuh_indexer_ssl_certs/admin-key.pem:/usr/share/wazuh-indexer/certs/admin-key.pem
|
||||||
|
- ./config/wazuh_indexer/wazuh.indexer.yml:/usr/share/wazuh-indexer/opensearch.yml
|
||||||
|
- ./config/wazuh_indexer/internal_users.yml:/usr/share/wazuh-indexer/opensearch-security/internal_users.yml
|
||||||
|
# --> (Optional) When using traefik
|
||||||
|
# networks:
|
||||||
|
# - frontend
|
||||||
|
# <--
|
||||||
|
# --> (Optional) When using a separate backend network
|
||||||
|
# - backend
|
||||||
|
# <--
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
wazuh.dashboard:
|
||||||
|
image: docker.io/wazuh/wazuh-dashboard:4.10.0
|
||||||
|
container_name: wazuh-prod-1-dashboard
|
||||||
|
hostname: wazuh.dashboard
|
||||||
|
# --> (Optional) Remove the port mapping when using traefik
|
||||||
|
ports:
|
||||||
|
- 4443:5601
|
||||||
|
# <--
|
||||||
|
environment:
|
||||||
|
- INDEXER_USERNAME=${INDEXER_USERNAME:?error}
|
||||||
|
- INDEXER_PASSWORD=${INDEXER_PASSWORD:?error}
|
||||||
|
- WAZUH_API_URL=https://wazuh.manager
|
||||||
|
- DASHBOARD_USERNAME=${DASHBOARD_USERNAME:?error}
|
||||||
|
- DASHBOARD_PASSWORD=${DASHBOARD_PASSWORD:?error}
|
||||||
|
- API_USERNAME=${API_USERNAME:?error}
|
||||||
|
- API_PASSWORD=${API_PASSWORD:?error}
|
||||||
|
volumes:
|
||||||
|
- ./config/wazuh_indexer_ssl_certs/wazuh.dashboard.pem:/usr/share/wazuh-dashboard/certs/wazuh-dashboard.pem
|
||||||
|
- ./config/wazuh_indexer_ssl_certs/wazuh.dashboard-key.pem:/usr/share/wazuh-dashboard/certs/wazuh-dashboard-key.pem
|
||||||
|
- ./config/wazuh_indexer_ssl_certs/root-ca.pem:/usr/share/wazuh-dashboard/certs/root-ca.pem
|
||||||
|
- ./config/wazuh_dashboard/opensearch_dashboards.yml:/usr/share/wazuh-dashboard/config/opensearch_dashboards.yml
|
||||||
|
- ./config/wazuh_dashboard/wazuh.yml:/usr/share/wazuh-dashboard/data/wazuh/config/wazuh.yml
|
||||||
|
- wazuh-dashboard-config:/usr/share/wazuh-dashboard/data/wazuh/config
|
||||||
|
- wazuh-dashboard-custom:/usr/share/wazuh-dashboard/plugins/wazuh/public/assets/custom
|
||||||
|
# --> (Optional) When using traefik
|
||||||
|
# labels:
|
||||||
|
# - traefik.enable=true
|
||||||
|
# - traefik.http.routers.wazuh-prod-1-https.entrypoints=websecure
|
||||||
|
# - traefik.http.routers.wazuh-prod-1-https.rule=Host(`wazuh-prod-1.srv-prod-1.home.clcreative.de`)
|
||||||
|
# - traefik.http.routers.wazuh-prod-1-https.tls=true
|
||||||
|
# - traefik.http.routers.wazuh-prod-1-https.tls.certresolver=cloudflare
|
||||||
|
# - traefik.http.services.wazuh-prod-1-service.loadbalancer.server.port=5601
|
||||||
|
# - traefik.http.services.wazuh-prod-1-service.loadbalancer.server.scheme=https
|
||||||
|
# networks:
|
||||||
|
# - frontend
|
||||||
|
# <--
|
||||||
|
# --> (Optional) When using a separate backend network
|
||||||
|
# - backend
|
||||||
|
# <--
|
||||||
|
depends_on:
|
||||||
|
- wazuh.indexer
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
# --> (Optional) When you need to use an SMTP relay for email notifications, and authentication is required
|
||||||
|
# postfix:
|
||||||
|
# image: docker.io/mwader/postfix-relay:1.1.39
|
||||||
|
# environment:
|
||||||
|
# - POSTFIX_myhostname=postfix
|
||||||
|
# volumes:
|
||||||
|
# - ./config/postfix-relay/main.cf:/etc/postfix/main.cf:ro
|
||||||
|
# - ./config/postfix-relay/sasl_passwd:/etc/postfix/sasl_passwd:rw # <-- (Optional) Remove when using inline credentials
|
||||||
|
# - postfix_data:/etc/postfix
|
||||||
|
# networks:
|
||||||
|
# - backend
|
||||||
|
# restart: unless-stopped
|
||||||
|
# <--
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
wazuh_api_configuration:
|
||||||
|
wazuh_etc:
|
||||||
|
wazuh_logs:
|
||||||
|
wazuh_queue:
|
||||||
|
wazuh_var_multigroups:
|
||||||
|
wazuh_integrations:
|
||||||
|
wazuh_active_response:
|
||||||
|
wazuh_agentless:
|
||||||
|
wazuh_wodles:
|
||||||
|
filebeat_etc:
|
||||||
|
filebeat_var:
|
||||||
|
wazuh-indexer-data:
|
||||||
|
wazuh-dashboard-config:
|
||||||
|
wazuh-dashboard-custom:
|
||||||
|
# --> (Optional) When you need to use an SMTP relay for email notifications, and authentication is required
|
||||||
|
# postfix_data:
|
||||||
|
# <--
|
||||||
|
|
||||||
|
# --> (Optional) When using traefik
|
||||||
|
# networks:
|
||||||
|
# frontend:
|
||||||
|
# external: true
|
||||||
|
# <--
|
||||||
|
# --> (Optional) When using a separate backend network
|
||||||
|
# backend:
|
||||||
|
# external: true
|
||||||
|
# <--
|
15
docker-compose/wazuh/config/postfix-relay/main.cf
Normal file
15
docker-compose/wazuh/config/postfix-relay/main.cf
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
relayhost = [your-smtp-server-addr]:587 ; Replace [your-smtp-server-addr] with your SMTP server address
|
||||||
|
smtp_sasl_auth_enable = yes
|
||||||
|
smtp_sasl_security_options = noanonymous
|
||||||
|
smtp_tls_CAfile = /etc/ssl/certs/ca-certificates.crt
|
||||||
|
smtp_use_tls = yes
|
||||||
|
smtpd_relay_restrictions = permit_mynetworks
|
||||||
|
mydestination = localhost
|
||||||
|
myhostname = postfix
|
||||||
|
mynetworks = 127.0.0.0/8, 172.0.0.0/8, 192.168.0.0/16, 10.0.0.0/8, [::1]/128
|
||||||
|
smtp_tls_security_level = may
|
||||||
|
smtpd_tls_security_level = none
|
||||||
|
smtp_sasl_password_maps = hash:/etc/postfix/sasl_passwd # <-- (Optional) Remove when using inline credentials
|
||||||
|
# --> (Optional) When using inline credentials, uncomment the following line and replace the placeholders with your SMTP server address and credentials
|
||||||
|
# smtp_sasl_password_maps = inline:{ [your-smtp-server-addr]:587=username:password } # <-- Replace [your-smtp-server-addr] with your SMTP server address, and username:password with your SMTP server credentials
|
||||||
|
# <--
|
1
docker-compose/wazuh/config/postfix-relay/sasl_passwd
Normal file
1
docker-compose/wazuh/config/postfix-relay/sasl_passwd
Normal file
@ -0,0 +1 @@
|
|||||||
|
[your-smtp-server-addr]:587 username:password ; Replace [your-smtp-server-addr] with your SMTP server address, and username:password with your SMTP server credentials
|
12
docker-compose/wazuh/config/rules/local_rules.xml
Normal file
12
docker-compose/wazuh/config/rules/local_rules.xml
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
<!-- Custom Rules XML file for Wazuh -->
|
||||||
|
|
||||||
|
<!-- (Optional) Fix false-positive reports in Wazuh ClamAV
|
||||||
|
<group name="clamd,freshclam,">
|
||||||
|
<rule id="52502" level="8" overwrite="yes">
|
||||||
|
<if_sid>52500</if_sid>
|
||||||
|
<match>FOUND$</match>
|
||||||
|
<description>ClamAV: Virus detected</description>
|
||||||
|
<group>virus,pci_dss_5.1,pci_dss_5.2,pci_dss_11.4,gpg13_4.2,gdpr_IV_35.7.d,nist_800_53_SI.3,nist_800_53_SI.4,tsc_A1.2,tsc_CC6.1,tsc_CC6.8,tsc_CC7.2,tsc_CC7.3,</group>
|
||||||
|
</rule>
|
||||||
|
</group>
|
||||||
|
-->
|
308
docker-compose/wazuh/config/wazuh_cluster/wazuh_manager.conf
Normal file
308
docker-compose/wazuh/config/wazuh_cluster/wazuh_manager.conf
Normal file
@ -0,0 +1,308 @@
|
|||||||
|
<ossec_config>
|
||||||
|
<global>
|
||||||
|
<jsonout_output>yes</jsonout_output>
|
||||||
|
<alerts_log>yes</alerts_log>
|
||||||
|
<logall>no</logall>
|
||||||
|
<logall_json>no</logall_json>
|
||||||
|
<email_notification>no</email_notification> <!-- (Optional) When you want to use email notifications -->
|
||||||
|
<smtp_server>postfix</smtp_server> <!-- Optional) When you need to use an SMTP relay for email notifications, and authentication is required -->
|
||||||
|
<email_from>your-from-email</email_from> <!-- (Optional) Replace with your email, hen you want to use email notifications -->
|
||||||
|
<email_to>your-to-email</email_to> <!-- (Optional) Replace with your email, when you want to use email notifications -->
|
||||||
|
<email_maxperhour>12</email_maxperhour>
|
||||||
|
<email_log_source>alerts.log</email_log_source>
|
||||||
|
<agents_disconnection_time>10m</agents_disconnection_time>
|
||||||
|
<agents_disconnection_alert_time>0</agents_disconnection_alert_time>
|
||||||
|
</global>
|
||||||
|
|
||||||
|
<alerts>
|
||||||
|
<log_alert_level>3</log_alert_level>
|
||||||
|
<email_alert_level>12</email_alert_level>
|
||||||
|
</alerts>
|
||||||
|
|
||||||
|
<!-- Choose between "plain", "json", or "plain,json" for the format of internal logs -->
|
||||||
|
<logging>
|
||||||
|
<log_format>plain</log_format>
|
||||||
|
</logging>
|
||||||
|
|
||||||
|
<remote>
|
||||||
|
<connection>secure</connection>
|
||||||
|
<port>1514</port>
|
||||||
|
<protocol>tcp</protocol>
|
||||||
|
<queue_size>131072</queue_size>
|
||||||
|
</remote>
|
||||||
|
|
||||||
|
<!-- Policy monitoring -->
|
||||||
|
<rootcheck>
|
||||||
|
<disabled>no</disabled>
|
||||||
|
<check_files>yes</check_files>
|
||||||
|
<check_trojans>yes</check_trojans>
|
||||||
|
<check_dev>yes</check_dev>
|
||||||
|
<check_sys>yes</check_sys>
|
||||||
|
<check_pids>yes</check_pids>
|
||||||
|
<check_ports>yes</check_ports>
|
||||||
|
<check_if>yes</check_if>
|
||||||
|
|
||||||
|
<!-- Frequency that rootcheck is executed - every 12 hours -->
|
||||||
|
<frequency>43200</frequency>
|
||||||
|
|
||||||
|
<rootkit_files>etc/rootcheck/rootkit_files.txt</rootkit_files>
|
||||||
|
<rootkit_trojans>etc/rootcheck/rootkit_trojans.txt</rootkit_trojans>
|
||||||
|
|
||||||
|
<skip_nfs>yes</skip_nfs>
|
||||||
|
</rootcheck>
|
||||||
|
|
||||||
|
<wodle name="cis-cat">
|
||||||
|
<disabled>yes</disabled>
|
||||||
|
<timeout>1800</timeout>
|
||||||
|
<interval>1d</interval>
|
||||||
|
<scan-on-start>yes</scan-on-start>
|
||||||
|
|
||||||
|
<java_path>wodles/java</java_path>
|
||||||
|
<ciscat_path>wodles/ciscat</ciscat_path>
|
||||||
|
</wodle>
|
||||||
|
|
||||||
|
<!-- Osquery integration -->
|
||||||
|
<wodle name="osquery">
|
||||||
|
<disabled>yes</disabled>
|
||||||
|
<run_daemon>yes</run_daemon>
|
||||||
|
<log_path>/var/log/osquery/osqueryd.results.log</log_path>
|
||||||
|
<config_path>/etc/osquery/osquery.conf</config_path>
|
||||||
|
<add_labels>yes</add_labels>
|
||||||
|
</wodle>
|
||||||
|
|
||||||
|
<!-- System inventory -->
|
||||||
|
<wodle name="syscollector">
|
||||||
|
<disabled>no</disabled>
|
||||||
|
<interval>1h</interval>
|
||||||
|
<scan_on_start>yes</scan_on_start>
|
||||||
|
<hardware>yes</hardware>
|
||||||
|
<os>yes</os>
|
||||||
|
<network>yes</network>
|
||||||
|
<packages>yes</packages>
|
||||||
|
<ports all="no">yes</ports>
|
||||||
|
<processes>yes</processes>
|
||||||
|
|
||||||
|
<!-- Database synchronization settings -->
|
||||||
|
<synchronization>
|
||||||
|
<max_eps>10</max_eps>
|
||||||
|
</synchronization>
|
||||||
|
</wodle>
|
||||||
|
|
||||||
|
<sca>
|
||||||
|
<enabled>yes</enabled>
|
||||||
|
<scan_on_start>yes</scan_on_start>
|
||||||
|
<interval>12h</interval>
|
||||||
|
<skip_nfs>yes</skip_nfs>
|
||||||
|
</sca>
|
||||||
|
|
||||||
|
<vulnerability-detection>
|
||||||
|
<enabled>yes</enabled>
|
||||||
|
<index-status>yes</index-status>
|
||||||
|
<feed-update-interval>60m</feed-update-interval>
|
||||||
|
</vulnerability-detection>
|
||||||
|
|
||||||
|
<indexer>
|
||||||
|
<enabled>yes</enabled>
|
||||||
|
<hosts>
|
||||||
|
<host>https://wazuh.indexer:9200</host>
|
||||||
|
</hosts>
|
||||||
|
<ssl>
|
||||||
|
<certificate_authorities>
|
||||||
|
<ca>/etc/ssl/root-ca.pem</ca>
|
||||||
|
</certificate_authorities>
|
||||||
|
<certificate>/etc/ssl/filebeat.pem</certificate>
|
||||||
|
<key>/etc/ssl/filebeat.key</key>
|
||||||
|
</ssl>
|
||||||
|
</indexer>
|
||||||
|
|
||||||
|
<!-- File integrity monitoring -->
|
||||||
|
<syscheck>
|
||||||
|
<disabled>no</disabled>
|
||||||
|
|
||||||
|
<!-- Frequency that syscheck is executed default every 12 hours -->
|
||||||
|
<frequency>43200</frequency>
|
||||||
|
|
||||||
|
<scan_on_start>yes</scan_on_start>
|
||||||
|
|
||||||
|
<!-- Generate alert when new file detected -->
|
||||||
|
<alert_new_files>yes</alert_new_files>
|
||||||
|
|
||||||
|
<!-- Don't ignore files that change more than 'frequency' times -->
|
||||||
|
<auto_ignore frequency="10" timeframe="3600">no</auto_ignore>
|
||||||
|
|
||||||
|
<!-- Directories to check (perform all possible verifications) -->
|
||||||
|
<directories>/etc,/usr/bin,/usr/sbin</directories>
|
||||||
|
<directories>/bin,/sbin,/boot</directories>
|
||||||
|
|
||||||
|
<!-- Files/directories to ignore -->
|
||||||
|
<ignore>/etc/mtab</ignore>
|
||||||
|
<ignore>/etc/hosts.deny</ignore>
|
||||||
|
<ignore>/etc/mail/statistics</ignore>
|
||||||
|
<ignore>/etc/random-seed</ignore>
|
||||||
|
<ignore>/etc/random.seed</ignore>
|
||||||
|
<ignore>/etc/adjtime</ignore>
|
||||||
|
<ignore>/etc/httpd/logs</ignore>
|
||||||
|
<ignore>/etc/utmpx</ignore>
|
||||||
|
<ignore>/etc/wtmpx</ignore>
|
||||||
|
<ignore>/etc/cups/certs</ignore>
|
||||||
|
<ignore>/etc/dumpdates</ignore>
|
||||||
|
<ignore>/etc/svc/volatile</ignore>
|
||||||
|
|
||||||
|
<!-- File types to ignore -->
|
||||||
|
<ignore type="sregex">.log$|.swp$</ignore>
|
||||||
|
|
||||||
|
<!-- Check the file, but never compute the diff -->
|
||||||
|
<nodiff>/etc/ssl/private.key</nodiff>
|
||||||
|
|
||||||
|
<skip_nfs>yes</skip_nfs>
|
||||||
|
<skip_dev>yes</skip_dev>
|
||||||
|
<skip_proc>yes</skip_proc>
|
||||||
|
<skip_sys>yes</skip_sys>
|
||||||
|
|
||||||
|
<!-- Nice value for Syscheck process -->
|
||||||
|
<process_priority>10</process_priority>
|
||||||
|
|
||||||
|
<!-- Maximum output throughput -->
|
||||||
|
<max_eps>100</max_eps>
|
||||||
|
|
||||||
|
<!-- Database synchronization settings -->
|
||||||
|
<synchronization>
|
||||||
|
<enabled>yes</enabled>
|
||||||
|
<interval>5m</interval>
|
||||||
|
<max_interval>1h</max_interval>
|
||||||
|
<max_eps>10</max_eps>
|
||||||
|
</synchronization>
|
||||||
|
</syscheck>
|
||||||
|
|
||||||
|
<!-- Active response -->
|
||||||
|
<global>
|
||||||
|
<white_list>127.0.0.1</white_list>
|
||||||
|
<white_list>^localhost.localdomain$</white_list>
|
||||||
|
</global>
|
||||||
|
|
||||||
|
<command>
|
||||||
|
<name>disable-account</name>
|
||||||
|
<executable>disable-account</executable>
|
||||||
|
<timeout_allowed>yes</timeout_allowed>
|
||||||
|
</command>
|
||||||
|
|
||||||
|
<command>
|
||||||
|
<name>restart-wazuh</name>
|
||||||
|
<executable>restart-wazuh</executable>
|
||||||
|
</command>
|
||||||
|
|
||||||
|
<command>
|
||||||
|
<name>firewall-drop</name>
|
||||||
|
<executable>firewall-drop</executable>
|
||||||
|
<timeout_allowed>yes</timeout_allowed>
|
||||||
|
</command>
|
||||||
|
|
||||||
|
<command>
|
||||||
|
<name>host-deny</name>
|
||||||
|
<executable>host-deny</executable>
|
||||||
|
<timeout_allowed>yes</timeout_allowed>
|
||||||
|
</command>
|
||||||
|
|
||||||
|
<command>
|
||||||
|
<name>route-null</name>
|
||||||
|
<executable>route-null</executable>
|
||||||
|
<timeout_allowed>yes</timeout_allowed>
|
||||||
|
</command>
|
||||||
|
|
||||||
|
<command>
|
||||||
|
<name>win_route-null</name>
|
||||||
|
<executable>route-null.exe</executable>
|
||||||
|
<timeout_allowed>yes</timeout_allowed>
|
||||||
|
</command>
|
||||||
|
|
||||||
|
<command>
|
||||||
|
<name>netsh</name>
|
||||||
|
<executable>netsh.exe</executable>
|
||||||
|
<timeout_allowed>yes</timeout_allowed>
|
||||||
|
</command>
|
||||||
|
|
||||||
|
<!--
|
||||||
|
<active-response>
|
||||||
|
active-response options here
|
||||||
|
</active-response>
|
||||||
|
-->
|
||||||
|
|
||||||
|
<!-- Log analysis -->
|
||||||
|
<localfile>
|
||||||
|
<log_format>command</log_format>
|
||||||
|
<command>df -P</command>
|
||||||
|
<frequency>360</frequency>
|
||||||
|
</localfile>
|
||||||
|
|
||||||
|
<localfile>
|
||||||
|
<log_format>full_command</log_format>
|
||||||
|
<command>netstat -tulpn | sed 's/\([[:alnum:]]\+\)\ \+[[:digit:]]\+\ \+[[:digit:]]\+\ \+\(.*\):\([[:digit:]]*\)\ \+\([0-9\.\:\*]\+\).\+\ \([[:digit:]]*\/[[:alnum:]\-]*\).*/\1 \2 == \3 == \4 \5/' | sort -k 4 -g | sed 's/ == \(.*\) ==/:\1/' | sed 1,2d</command>
|
||||||
|
<alias>netstat listening ports</alias>
|
||||||
|
<frequency>360</frequency>
|
||||||
|
</localfile>
|
||||||
|
|
||||||
|
<localfile>
|
||||||
|
<log_format>full_command</log_format>
|
||||||
|
<command>last -n 20</command>
|
||||||
|
<frequency>360</frequency>
|
||||||
|
</localfile>
|
||||||
|
|
||||||
|
<ruleset>
|
||||||
|
<!-- Default ruleset -->
|
||||||
|
<decoder_dir>ruleset/decoders</decoder_dir>
|
||||||
|
<rule_dir>ruleset/rules</rule_dir>
|
||||||
|
<rule_exclude>0215-policy_rules.xml</rule_exclude>
|
||||||
|
<list>etc/lists/audit-keys</list>
|
||||||
|
<list>etc/lists/amazon/aws-eventnames</list>
|
||||||
|
<list>etc/lists/security-eventchannel</list>
|
||||||
|
|
||||||
|
<!-- User-defined ruleset -->
|
||||||
|
<decoder_dir>etc/decoders</decoder_dir>
|
||||||
|
<rule_dir>etc/rules</rule_dir>
|
||||||
|
</ruleset>
|
||||||
|
|
||||||
|
<rule_test>
|
||||||
|
<enabled>yes</enabled>
|
||||||
|
<threads>1</threads>
|
||||||
|
<max_sessions>64</max_sessions>
|
||||||
|
<session_timeout>15m</session_timeout>
|
||||||
|
</rule_test>
|
||||||
|
|
||||||
|
<!-- Configuration for wazuh-authd -->
|
||||||
|
<auth>
|
||||||
|
<disabled>no</disabled>
|
||||||
|
<port>1515</port>
|
||||||
|
<use_source_ip>no</use_source_ip>
|
||||||
|
<purge>yes</purge>
|
||||||
|
<use_password>no</use_password>
|
||||||
|
<ciphers>HIGH:!ADH:!EXP:!MD5:!RC4:!3DES:!CAMELLIA:@STRENGTH</ciphers>
|
||||||
|
<!-- <ssl_agent_ca></ssl_agent_ca> -->
|
||||||
|
<ssl_verify_host>no</ssl_verify_host>
|
||||||
|
<ssl_manager_cert>etc/sslmanager.cert</ssl_manager_cert>
|
||||||
|
<ssl_manager_key>etc/sslmanager.key</ssl_manager_key>
|
||||||
|
<ssl_auto_negotiate>no</ssl_auto_negotiate>
|
||||||
|
</auth>
|
||||||
|
|
||||||
|
<cluster>
|
||||||
|
<name>wazuh</name>
|
||||||
|
<node_name>node01</node_name>
|
||||||
|
<node_type>master</node_type>
|
||||||
|
<key>aa093264ef885029653eea20dfcf51ae</key>
|
||||||
|
<port>1516</port>
|
||||||
|
<bind_addr>0.0.0.0</bind_addr>
|
||||||
|
<nodes>
|
||||||
|
<node>wazuh.manager</node>
|
||||||
|
</nodes>
|
||||||
|
<hidden>no</hidden>
|
||||||
|
<disabled>yes</disabled>
|
||||||
|
</cluster>
|
||||||
|
|
||||||
|
</ossec_config>
|
||||||
|
|
||||||
|
<ossec_config>
|
||||||
|
<localfile>
|
||||||
|
<log_format>syslog</log_format>
|
||||||
|
<location>/var/ossec/logs/active-responses.log</location>
|
||||||
|
</localfile>
|
||||||
|
|
||||||
|
</ossec_config>
|
@ -0,0 +1,17 @@
|
|||||||
|
---
|
||||||
|
server.host: 0.0.0.0
|
||||||
|
server.port: 5601
|
||||||
|
opensearch.hosts: https://wazuh.indexer:9200
|
||||||
|
opensearch.ssl.verificationMode: certificate
|
||||||
|
opensearch.requestHeadersWhitelist:
|
||||||
|
- "securitytenant"
|
||||||
|
- "Authorization"
|
||||||
|
opensearch_security.multitenancy.enabled: false
|
||||||
|
opensearch_security.readonly_mode.roles:
|
||||||
|
- "kibana_read_only"
|
||||||
|
server.ssl.enabled: true
|
||||||
|
server.ssl.key: "/usr/share/wazuh-dashboard/certs/wazuh-dashboard-key.pem"
|
||||||
|
server.ssl.certificate: "/usr/share/wazuh-dashboard/certs/wazuh-dashboard.pem"
|
||||||
|
opensearch.ssl.certificateAuthorities:
|
||||||
|
- "/usr/share/wazuh-dashboard/certs/root-ca.pem"
|
||||||
|
uiSettings.overrides.defaultRoute: /app/wz-home
|
11
docker-compose/wazuh/config/wazuh_dashboard/wazuh.yml
Normal file
11
docker-compose/wazuh/config/wazuh_dashboard/wazuh.yml
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
hosts:
|
||||||
|
- 1513629884013:
|
||||||
|
url: "https://wazuh.manager"
|
||||||
|
port: 55000
|
||||||
|
username: wazuh-wui
|
||||||
|
password: "your-wazuh-wui-password"
|
||||||
|
run_as: false
|
||||||
|
|
||||||
|
enrollment.dns: "your-enrollment-dns-server"
|
||||||
|
alerts.sample.prefix: "wazuh-alerts-"
|
56
docker-compose/wazuh/config/wazuh_indexer/internal_users.yml
Normal file
56
docker-compose/wazuh/config/wazuh_indexer/internal_users.yml
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
---
|
||||||
|
# This is the internal user database
|
||||||
|
# The hash value is a bcrypt hash and can be generated with plugin/tools/hash.sh
|
||||||
|
|
||||||
|
_meta:
|
||||||
|
type: "internalusers"
|
||||||
|
config_version: 2
|
||||||
|
|
||||||
|
# Define your internal users here
|
||||||
|
|
||||||
|
## Demo users
|
||||||
|
|
||||||
|
admin:
|
||||||
|
hash: "$2y$12$y85PV5Ob2lqeR30Rcm/F9..8JMgLT5ALZGMtzTo7c.p1vPpR394ki"
|
||||||
|
reserved: true
|
||||||
|
backend_roles:
|
||||||
|
- admin
|
||||||
|
description: "Demo admin user"
|
||||||
|
|
||||||
|
kibanaserver:
|
||||||
|
hash: "$2y$12$b9G5KNitghhTt1V5asLQd.nDOjd7O8h.30vkZVfroWT/HFq0y51TO"
|
||||||
|
reserved: true
|
||||||
|
description: "Demo kibanaserver user"
|
||||||
|
|
||||||
|
kibanaro:
|
||||||
|
hash: "$2a$12$JJSXNfTowz7Uu5ttXfeYpeYE0arACvcwlPBStB1F.MI7f0U9Z4DGC"
|
||||||
|
reserved: false
|
||||||
|
backend_roles:
|
||||||
|
- kibanauser
|
||||||
|
- readall
|
||||||
|
attributes:
|
||||||
|
attribute1: "value1"
|
||||||
|
attribute2: "value2"
|
||||||
|
attribute3: "value3"
|
||||||
|
description: "Demo kibanaro user"
|
||||||
|
|
||||||
|
logstash:
|
||||||
|
hash: "$2a$12$u1ShR4l4uBS3Uv59Pa2y5.1uQuZBrZtmNfqB3iM/.jL0XoV9sghS2"
|
||||||
|
reserved: false
|
||||||
|
backend_roles:
|
||||||
|
- logstash
|
||||||
|
description: "Demo logstash user"
|
||||||
|
|
||||||
|
readall:
|
||||||
|
hash: "$2a$12$ae4ycwzwvLtZxwZ82RmiEunBbIPiAmGZduBAjKN0TXdwQFtCwARz2"
|
||||||
|
reserved: false
|
||||||
|
backend_roles:
|
||||||
|
- readall
|
||||||
|
description: "Demo readall user"
|
||||||
|
|
||||||
|
snapshotrestore:
|
||||||
|
hash: "$2y$12$DpwmetHKwgYnorbgdvORCenv4NAK8cPUg8AI6pxLCuWf/ALc0.v7W"
|
||||||
|
reserved: false
|
||||||
|
backend_roles:
|
||||||
|
- snapshotrestore
|
||||||
|
description: "Demo snapshotrestore user"
|
43
docker-compose/wazuh/config/wazuh_indexer/wazuh.indexer.yml
Normal file
43
docker-compose/wazuh/config/wazuh_indexer/wazuh.indexer.yml
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
---
|
||||||
|
network.host: "0.0.0.0"
|
||||||
|
node.name: "wazuh.indexer"
|
||||||
|
path.data: /var/lib/wazuh-indexer
|
||||||
|
path.logs: /var/log/wazuh-indexer
|
||||||
|
discovery.type: single-node
|
||||||
|
http.port: 9200-9299
|
||||||
|
transport.tcp.port: 9300-9399
|
||||||
|
compatibility.override_main_response_version: true
|
||||||
|
plugins.security.ssl.http.pemcert_filepath: /usr/share/wazuh-indexer/certs/wazuh.indexer.pem
|
||||||
|
plugins.security.ssl.http.pemkey_filepath: /usr/share/wazuh-indexer/certs/wazuh.indexer.key
|
||||||
|
plugins.security.ssl.http.pemtrustedcas_filepath: /usr/share/wazuh-indexer/certs/root-ca.pem
|
||||||
|
plugins.security.ssl.transport.pemcert_filepath: /usr/share/wazuh-indexer/certs/wazuh.indexer.pem
|
||||||
|
plugins.security.ssl.transport.pemkey_filepath: /usr/share/wazuh-indexer/certs/wazuh.indexer.key
|
||||||
|
plugins.security.ssl.transport.pemtrustedcas_filepath: /usr/share/wazuh-indexer/certs/root-ca.pem
|
||||||
|
plugins.security.ssl.http.enabled: true
|
||||||
|
plugins.security.ssl.transport.enforce_hostname_verification: false
|
||||||
|
plugins.security.ssl.transport.resolve_hostname: false
|
||||||
|
plugins.security.authcz.admin_dn:
|
||||||
|
- "CN=admin,OU=Wazuh,O=Wazuh,L=California,C=US"
|
||||||
|
plugins.security.check_snapshot_restore_write_privileges: true
|
||||||
|
plugins.security.enable_snapshot_restore_privilege: true
|
||||||
|
plugins.security.nodes_dn:
|
||||||
|
- "CN=wazuh.indexer,OU=Wazuh,O=Wazuh,L=California,C=US"
|
||||||
|
plugins.security.restapi.roles_enabled:
|
||||||
|
- "all_access"
|
||||||
|
- "security_rest_api_access"
|
||||||
|
plugins.security.system_indices.enabled: true
|
||||||
|
plugins.security.system_indices.indices:
|
||||||
|
- ".opendistro-alerting-config"
|
||||||
|
- ".opendistro-alerting-alert*"
|
||||||
|
- ".opendistro-anomaly-results*"
|
||||||
|
- ".opendistro-anomaly-detector*"
|
||||||
|
- ".opendistro-anomaly-checkpoints"
|
||||||
|
- ".opendistro-anomaly-detection-state"
|
||||||
|
- ".opendistro-reports-*"
|
||||||
|
- ".opendistro-notifications-*"
|
||||||
|
- ".opendistro-notebooks"
|
||||||
|
- ".opensearch-observability"
|
||||||
|
- ".opendistro-asynchronous-search-response*"
|
||||||
|
- ".replication-metadata-store"
|
||||||
|
plugins.security.allow_default_init_securityindex: true
|
||||||
|
cluster.routing.allocation.disk.threshold_enabled: false
|
8
docker-compose/wazuh/generate-certs.yaml
Normal file
8
docker-compose/wazuh/generate-certs.yaml
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
---
|
||||||
|
services:
|
||||||
|
generator:
|
||||||
|
image: wazuh/wazuh-certs-generator:0.0.2
|
||||||
|
hostname: wazuh-certs-generator
|
||||||
|
volumes:
|
||||||
|
- ./config/wazuh_indexer_ssl_certs/:/certificates/
|
||||||
|
- ./config/certs.yml:/config/certs.yml
|
@ -1,19 +0,0 @@
|
|||||||
image:
|
|
||||||
repository: traefik
|
|
||||||
version: v3.2.1
|
|
||||||
pullPolicy: IfNotPresent
|
|
||||||
|
|
||||||
# --> (Optional) Change log settings here...
|
|
||||||
# logs:
|
|
||||||
# general:
|
|
||||||
# level: ERROR
|
|
||||||
# access:
|
|
||||||
# enabled: false
|
|
||||||
# <--
|
|
||||||
|
|
||||||
# --> (Optional) Redirect HTTP to HTTPs by default
|
|
||||||
# ports:
|
|
||||||
# web:
|
|
||||||
# redirectTo:
|
|
||||||
# port: websecure
|
|
||||||
# <--
|
|
36
kestra/ansible/ansible-playbook-git.yaml
Normal file
36
kestra/ansible/ansible-playbook-git.yaml
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
---
|
||||||
|
# Kestra ansible-playbook Template
|
||||||
|
# ---
|
||||||
|
#
|
||||||
|
# Run an ansible playbook cloned from a Git Repository
|
||||||
|
#
|
||||||
|
id: ansible_playbook_git
|
||||||
|
namespace: your_namespace # <-- Replace with your namespace...
|
||||||
|
tasks:
|
||||||
|
- id: ansible_job
|
||||||
|
type: io.kestra.plugin.core.flow.WorkingDirectory
|
||||||
|
inputFiles:
|
||||||
|
id_rsa: "{{ secret('RSA_SSH_KEY') }}" # <-- (Required) Replace with your secret key...
|
||||||
|
# id_ed25519: "{{ secret('ED25519_SSH_KEY') }}" # <-- (Optional) Replace with your secret key, when using ED25519...
|
||||||
|
tasks:
|
||||||
|
- id: git_clone
|
||||||
|
type: io.kestra.plugin.git.Clone
|
||||||
|
url: your-git-repository-url # <-- Replace with your Git repository URL...
|
||||||
|
directory: ansible
|
||||||
|
branch: main # <-- (Optional) Replace with your Git branch...
|
||||||
|
# --> (Optional) If Git repository is private, add your Git token...
|
||||||
|
# username: xcad
|
||||||
|
# password: "{{ secret('GITOKEN') }}"
|
||||||
|
# <--
|
||||||
|
- id: ansible_playbook
|
||||||
|
type: io.kestra.plugin.ansible.cli.AnsibleCLI
|
||||||
|
taskRunner:
|
||||||
|
type: io.kestra.plugin.scripts.runner.docker.Docker
|
||||||
|
image: docker.io/cytopia/ansible:latest-tools
|
||||||
|
user: "1000" # <-- (Required) Replace with your user id...
|
||||||
|
env:
|
||||||
|
"ANSIBLE_HOST_KEY_CHECKING": "false"
|
||||||
|
"ANSIBLE_REMOTE_USER": "your-remote-user" # <-- (Required) Replace with your remote user...
|
||||||
|
commands:
|
||||||
|
- ansible-playbook -i ansible/inventory --key-file id_rsa ansible/your-playbook.yaml
|
||||||
|
# - ansible-playbook -i ansible/inventory --key-file id_ed25519 ansible/your-playbook.yaml # <-- (Optional) when using ED25519...
|
38
kestra/ansible/ansible-playbook-inline.yaml
Normal file
38
kestra/ansible/ansible-playbook-inline.yaml
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
---
|
||||||
|
# Kestra ansible-playbook Template
|
||||||
|
# ---
|
||||||
|
#
|
||||||
|
# Run an ansible playbook defined inline the kestra flow.
|
||||||
|
#
|
||||||
|
id: ansible_playbook_inline
|
||||||
|
namespace: your_namespace # <-- Replace with your namespace...
|
||||||
|
tasks:
|
||||||
|
- id: ansible_job
|
||||||
|
type: io.kestra.plugin.core.flow.WorkingDirectory
|
||||||
|
inputFiles:
|
||||||
|
inventory.ini: | # <-- Replace with your inventory file content...
|
||||||
|
srv-demo-1.home.clcreative.de
|
||||||
|
myplaybook.yaml: | # <-- Replace with your playbook file content...
|
||||||
|
---
|
||||||
|
- hosts: srv-demo-1.home.clcreative.de
|
||||||
|
tasks:
|
||||||
|
- name: upgrade apt packages
|
||||||
|
become: true
|
||||||
|
ansible.builtin.apt:
|
||||||
|
upgrade: true
|
||||||
|
update_cache: true
|
||||||
|
id_rsa: "{{ secret('RSA_SSH_KEY') }}" # <-- (Required) Replace with your secret key...
|
||||||
|
# id_ed25519: "{{ secret('ED25519_SSH_KEY') }}" # <-- (Optional) Replace with your secret key, when using ED25519...
|
||||||
|
tasks:
|
||||||
|
- id: ansible_playbook
|
||||||
|
type: io.kestra.plugin.ansible.cli.AnsibleCLI
|
||||||
|
taskRunner:
|
||||||
|
type: io.kestra.plugin.scripts.runner.docker.Docker
|
||||||
|
image: docker.io/cytopia/ansible:latest-tools
|
||||||
|
user: "1000" # <-- (Required) Replace with your user id...
|
||||||
|
env:
|
||||||
|
"ANSIBLE_HOST_KEY_CHECKING": "false"
|
||||||
|
"ANSIBLE_REMOTE_USER": "your-remote-user" # <-- (Required) Replace with your remote user...
|
||||||
|
commands:
|
||||||
|
- ansible-playbook -i inventory.ini --key-file id_rsa myplaybook.yaml
|
||||||
|
# - ansible-playbook -i inventory.ini --key-file id_ed25519 myplaybook.yaml # <-- (Optional) when using ED25519...
|
@ -1,42 +0,0 @@
|
|||||||
---
|
|
||||||
# Kestra ansible-playbook Template
|
|
||||||
# ---
|
|
||||||
#
|
|
||||||
# Run an ansible playbook defined inline the kestra flow.
|
|
||||||
#
|
|
||||||
id: ansible_job
|
|
||||||
namespace: # your-namespace
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- id: ansible
|
|
||||||
type: io.kestra.plugin.core.flow.WorkingDirectory
|
|
||||||
tasks:
|
|
||||||
- id: local_files
|
|
||||||
type: io.kestra.core.tasks.storages.LocalFiles
|
|
||||||
inputs:
|
|
||||||
inventory.ini: |
|
|
||||||
srv-demo-1.home.clcreative.de
|
|
||||||
# --> replace with your playbook
|
|
||||||
myplaybook.yaml: |
|
|
||||||
---
|
|
||||||
- hosts: srv-demo-1.home.clcreative.de
|
|
||||||
tasks:
|
|
||||||
- name: upgrade apt packages
|
|
||||||
become: true
|
|
||||||
ansible.builtin.apt:
|
|
||||||
upgrade: true
|
|
||||||
update_cache: true
|
|
||||||
# <--
|
|
||||||
id_rsa: "{{ secret('SSH_KEY') }}"
|
|
||||||
- id: ansible_task
|
|
||||||
type: io.kestra.plugin.ansible.cli.AnsibleCLI
|
|
||||||
docker:
|
|
||||||
image: docker.io/cytopia/ansible:latest-tools
|
|
||||||
user: "1000" # required to set ssh key permissions
|
|
||||||
env:
|
|
||||||
"ANSIBLE_HOST_KEY_CHECKING": "false"
|
|
||||||
# --> (optional) when using a different remote user
|
|
||||||
# "ANSIBLE_REMOTE_USER": "your-remote-user"
|
|
||||||
# <--
|
|
||||||
commands:
|
|
||||||
- ansible-playbook -i inventory.ini --key-file id_rsa myplaybook.yaml
|
|
@ -1,33 +0,0 @@
|
|||||||
---
|
|
||||||
# Kestra ansible-playbook Template
|
|
||||||
# ---
|
|
||||||
#
|
|
||||||
# Run an ansible playbook which has been uploaded to the server.
|
|
||||||
#
|
|
||||||
id: ansible_job
|
|
||||||
namespace: # your-namespace
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- id: ansible
|
|
||||||
type: io.kestra.plugin.core.flow.WorkingDirectory
|
|
||||||
tasks:
|
|
||||||
- id: ansible_task
|
|
||||||
namespaceFiles:
|
|
||||||
enabled: true
|
|
||||||
# --> upload your files to the kestra data directory for the namespace in
|
|
||||||
# <docker volume for kestra-data>/<namespace>/_files/
|
|
||||||
include:
|
|
||||||
- inventory.ini
|
|
||||||
- myplaybook.yaml
|
|
||||||
# <--
|
|
||||||
type: io.kestra.plugin.ansible.cli.AnsibleCLI
|
|
||||||
docker:
|
|
||||||
image: docker.io/cytopia/ansible:latest-tools
|
|
||||||
env:
|
|
||||||
"ANSIBLE_HOST_KEY_CHECKING": "false"
|
|
||||||
# --> (optional) when using a different remote user
|
|
||||||
# "ANSIBLE_REMOTE_USER": "your-remote-user"
|
|
||||||
# <--
|
|
||||||
commands:
|
|
||||||
- apk add sshpass # only required if use ssh passwords.
|
|
||||||
- ansible-playbook -i inventory.ini myplaybook.yaml
|
|
@ -1,38 +0,0 @@
|
|||||||
---
|
|
||||||
# Kestra ansible-playbook Template
|
|
||||||
# ---
|
|
||||||
#
|
|
||||||
# Run an ansible playbook which has been uploaded to the server, using
|
|
||||||
# ssh key authentication.
|
|
||||||
#
|
|
||||||
id: ansible_job
|
|
||||||
namespace: # your-namespace
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- id: ansible
|
|
||||||
type: io.kestra.plugin.core.flow.WorkingDirectory
|
|
||||||
tasks:
|
|
||||||
- id: load_ssh_key
|
|
||||||
type: io.kestra.core.tasks.storages.LocalFiles
|
|
||||||
inputs:
|
|
||||||
id_rsa: "{{ secret('SSH_KEY') }}"
|
|
||||||
- id: ansible_task
|
|
||||||
namespaceFiles:
|
|
||||||
enabled: true
|
|
||||||
# --> upload your files to the kestra data directory for the namespace in
|
|
||||||
# <docker volume for kestra-data>/<namespace>/_files/
|
|
||||||
include:
|
|
||||||
- inventory.ini
|
|
||||||
- myplaybook.yaml
|
|
||||||
# <--
|
|
||||||
type: io.kestra.plugin.ansible.cli.AnsibleCLI
|
|
||||||
docker:
|
|
||||||
image: docker.io/cytopia/ansible:latest-tools
|
|
||||||
user: "1000" # required to set ssh key permissions
|
|
||||||
env:
|
|
||||||
"ANSIBLE_HOST_KEY_CHECKING": "false"
|
|
||||||
# --> (optional) when using a different remote user
|
|
||||||
# "ANSIBLE_REMOTE_USER": "your-remote-user"
|
|
||||||
# <--
|
|
||||||
commands:
|
|
||||||
- ansible-playbook -i inventory.ini --key-file id_rsa myplaybook.yaml
|
|
31
kestra/docker/docker-build-git.yaml
Normal file
31
kestra/docker/docker-build-git.yaml
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
---
|
||||||
|
# Kestra Docker Git Build Template
|
||||||
|
# ---
|
||||||
|
#
|
||||||
|
# Build a Docker image from a Git repository.
|
||||||
|
#
|
||||||
|
id: docker_build_git
|
||||||
|
namespace: your_namespace # <- Replace with your namespace...
|
||||||
|
tasks:
|
||||||
|
- id: docker_job
|
||||||
|
type: io.kestra.plugin.core.flow.WorkingDirectory
|
||||||
|
tasks:
|
||||||
|
- id: git_clone
|
||||||
|
type: io.kestra.plugin.git.Clone
|
||||||
|
url: your-git-repository-url # <-- Replace with your Git repository URL...
|
||||||
|
directory: docker
|
||||||
|
branch: main # <-- (Optional) Replace with your Git branch...
|
||||||
|
# --> (Optional) If Git repository is private, add your Git token...
|
||||||
|
# username: xcad
|
||||||
|
# password: "{{ secret('GITOKEN') }}"
|
||||||
|
# <--
|
||||||
|
- id: docker_build
|
||||||
|
type: io.kestra.plugin.docker.Build
|
||||||
|
dockerfile: "docker/src/Dockerfile" # <- Replace with your Dockerfile path...
|
||||||
|
tags:
|
||||||
|
- your-username/your-repository:your-tag # <- Replace with your Docker image tag...
|
||||||
|
push: true
|
||||||
|
credentials:
|
||||||
|
registry: https://index.docker.io/v1/
|
||||||
|
username: "{{ secret('YOUR_USERNAME') }}" # <- Replace with your Docker Hub username...
|
||||||
|
password: "{{ secret('YOUR_PASSWORD') }}" # <- Replace with your Docker Hub password...
|
33
kestra/docker/docker-build-inline.yaml
Normal file
33
kestra/docker/docker-build-inline.yaml
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
---
|
||||||
|
# Kestra Docker File Build Template
|
||||||
|
# ---
|
||||||
|
#
|
||||||
|
# Build a Docker image from a File.
|
||||||
|
#
|
||||||
|
id: docker_build_inline
|
||||||
|
namespace: your_namespace # <- Replace with your namespace...
|
||||||
|
tasks:
|
||||||
|
- id: docker_job
|
||||||
|
type: io.kestra.plugin.core.flow.WorkingDirectory
|
||||||
|
inputFiles:
|
||||||
|
Dockerfile: | # <- Replace with your Dockerfile content...
|
||||||
|
FROM alpine:latest
|
||||||
|
WORKDIR /app
|
||||||
|
COPY . /app
|
||||||
|
RUN apk add --update python3
|
||||||
|
CMD [ "python", "main.py"]
|
||||||
|
main.py: | # <- Replace with your Python script content...
|
||||||
|
if __name__ == "__main__":
|
||||||
|
print("Hello from Docker!")
|
||||||
|
exit(0)
|
||||||
|
tasks:
|
||||||
|
- id: docker_build
|
||||||
|
type: io.kestra.plugin.docker.Build
|
||||||
|
dockerfile: "src/Dockerfile" # <- Replace with your Dockerfile path...
|
||||||
|
tags:
|
||||||
|
- your-username/your-repository:your-tag # <- Replace with your Docker image tag...
|
||||||
|
push: true
|
||||||
|
credentials:
|
||||||
|
registry: https://index.docker.io/v1/
|
||||||
|
username: "{{ secret('YOUR_USERNAME') }}" # <- Replace with your Docker Hub username...
|
||||||
|
password: "{{ secret('YOUR_PASSWORD') }}" # <- Replace with your Docker Hub password...
|
@ -1,39 +0,0 @@
|
|||||||
---
|
|
||||||
# Kestra Docker File Build Template
|
|
||||||
# ---
|
|
||||||
#
|
|
||||||
# Build a Docker image from a File.
|
|
||||||
#
|
|
||||||
|
|
||||||
id: docker-file-build
|
|
||||||
namespace: # your-namespace
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
|
|
||||||
- id: file
|
|
||||||
type: io.kestra.core.tasks.flows.WorkingDirectory
|
|
||||||
tasks:
|
|
||||||
- id: createFiles
|
|
||||||
type: io.kestra.core.tasks.storages.LocalFiles
|
|
||||||
inputs:
|
|
||||||
Dockerfile: |
|
|
||||||
FROM alpine:latest
|
|
||||||
WORKDIR /app
|
|
||||||
COPY . /app
|
|
||||||
RUN apk add --update python3
|
|
||||||
CMD [ "python", "main.py"]
|
|
||||||
main.py: |
|
|
||||||
if __name__ == "__main__":
|
|
||||||
print("Hello from Docker!")
|
|
||||||
exit(0)
|
|
||||||
|
|
||||||
- id: build
|
|
||||||
type: io.kestra.plugin.docker.Build
|
|
||||||
dockerfile: "src/Dockerfile"
|
|
||||||
tags:
|
|
||||||
- your-username/your-repository:your-tag
|
|
||||||
push: true
|
|
||||||
credentials:
|
|
||||||
registry: https://index.docker.io/v1/
|
|
||||||
username: "{{ secret('YOUR_USERNAME') }}"
|
|
||||||
password: "{{ secret('YOUR_PASSWORD') }}"
|
|
@ -1,30 +0,0 @@
|
|||||||
---
|
|
||||||
# Kestra Docker Git Build Template
|
|
||||||
# ---
|
|
||||||
#
|
|
||||||
# Build a Docker image from a Git repository.
|
|
||||||
#
|
|
||||||
|
|
||||||
id: docker-git-build
|
|
||||||
namespace: # your-namespace
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
|
|
||||||
- id: git
|
|
||||||
type: io.kestra.core.tasks.flows.WorkingDirectory
|
|
||||||
tasks:
|
|
||||||
- id: clone
|
|
||||||
type: io.kestra.plugin.git.Clone
|
|
||||||
url: https://your-git-repo-url
|
|
||||||
branch: your-branch
|
|
||||||
|
|
||||||
- id: build
|
|
||||||
type: io.kestra.plugin.docker.Build
|
|
||||||
dockerfile: "src/Dockerfile"
|
|
||||||
tags:
|
|
||||||
- your-username/your-repository:your-tag
|
|
||||||
push: true
|
|
||||||
credentials:
|
|
||||||
registry: https://index.docker.io/v1/
|
|
||||||
username: "{{ secret('YOUR_USERNAME') }}"
|
|
||||||
password: "{{ secret('YOUR_PASSWORD') }}"
|
|
@ -5,61 +5,61 @@
|
|||||||
# Inputs is a list of dynamic values passed to the flow at runtime.
|
# Inputs is a list of dynamic values passed to the flow at runtime.
|
||||||
#
|
#
|
||||||
|
|
||||||
id: inputs
|
id: inputs # <- Replace with your task id...
|
||||||
namespace: # your-namespace
|
namespace: your-namespace # <- Replace with your namespace...
|
||||||
|
|
||||||
inputs:
|
inputs:
|
||||||
- id: string
|
- id: string # <- Replace with your input name...
|
||||||
type: STRING
|
type: STRING
|
||||||
|
|
||||||
- id: optional
|
- id: optional # <- Replace with your input name...
|
||||||
type: STRING
|
type: STRING
|
||||||
required: false
|
required: false
|
||||||
|
|
||||||
- id: int
|
- id: int # <- Replace with your input name...
|
||||||
type: INT
|
type: INT
|
||||||
|
|
||||||
- id: bool
|
- id: bool # <- Replace with your input name...
|
||||||
type: BOOLEAN
|
type: BOOLEAN
|
||||||
|
|
||||||
- id: float
|
- id: float # <- Replace with your input name...
|
||||||
type: FLOAT
|
type: FLOAT
|
||||||
|
|
||||||
- id: instant
|
- id: instant # <- Replace with your input name...
|
||||||
type: DATETIME
|
type: DATETIME
|
||||||
|
|
||||||
- id: date
|
- id: date # <- Replace with your input name...
|
||||||
type: DATE
|
type: DATE
|
||||||
|
|
||||||
- id: time
|
- id: time # <- Replace with your input name...
|
||||||
type: TIME
|
type: TIME
|
||||||
|
|
||||||
- id: duration
|
- id: duration # <- Replace with your input name...
|
||||||
type: DURATION
|
type: DURATION
|
||||||
|
|
||||||
- id: file
|
- id: file # <- Replace with your input name...
|
||||||
type: FILE
|
type: FILE
|
||||||
|
|
||||||
- id: optionalFile
|
- id: optionalFile # <- Replace with your input name...
|
||||||
type: FILE
|
type: FILE
|
||||||
|
|
||||||
- id: instantDefaults
|
- id: instantDefaults # <- Replace with your input name...
|
||||||
type: DATETIME
|
type: DATETIME
|
||||||
defaults: "2013-08-09T14:19:00Z"
|
defaults: "2013-08-09T14:19:00Z" # <- Replace with your default value...
|
||||||
|
|
||||||
- id: json
|
- id: json # <- Replace with your input name...
|
||||||
type: JSON
|
type: JSON
|
||||||
|
|
||||||
- id: uri
|
- id: uri # <- Replace with your input name...
|
||||||
type: URI
|
type: URI
|
||||||
|
|
||||||
- id: secret
|
- id: secret # <- Replace with your input name...
|
||||||
type: SECRET
|
type: SECRET
|
||||||
|
|
||||||
- id: nested.string
|
- id: nested.string # <- Replace with your input name...
|
||||||
type: STRING
|
type: STRING
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
- id: using_inputs
|
- id: using_inputs
|
||||||
type: io.kestra.core.tasks.log.Log
|
type: io.kestra.plugin.core.log.Log
|
||||||
message: "{{ inputs.string }}"
|
message: "{{ inputs.string }}"
|
||||||
|
@ -7,14 +7,13 @@
|
|||||||
# usage:
|
# usage:
|
||||||
# make sure the Kestra instance can access the /app/scripts/your-python-script.py file
|
# make sure the Kestra instance can access the /app/scripts/your-python-script.py file
|
||||||
# if you're running Kestra in Docker, use a volume to mount the file/directory.
|
# if you're running Kestra in Docker, use a volume to mount the file/directory.
|
||||||
|
#
|
||||||
id: python-command
|
id: python_command
|
||||||
namespace: # your-namespace
|
namespace: your_namespace # <-- Replace with your namespace...
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
|
- id: python_job
|
||||||
- id: python_command
|
|
||||||
type: io.kestra.plugin.scripts.python.Commands
|
type: io.kestra.plugin.scripts.python.Commands
|
||||||
commands:
|
commands:
|
||||||
- python /app/scripts/your-python-script.py
|
- python /app/scripts/your-python-script.py
|
||||||
runner: PROCESS # or DOCKER (might be deprecated in the future) use TaskRunner instead
|
taskRunner:
|
||||||
|
type: io.kestra.plugin.core.runner.Process
|
@ -4,15 +4,13 @@
|
|||||||
#
|
#
|
||||||
# This template is a simple Python script that can be used to make a request to a website and log the status code.
|
# This template is a simple Python script that can be used to make a request to a website and log the status code.
|
||||||
#
|
#
|
||||||
|
id: python_script
|
||||||
id: python-script
|
namespace: your_namespace # <-- Replace with your namespace...
|
||||||
namespace: # your-namespace
|
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
|
- id: python_job
|
||||||
- id: python_script
|
|
||||||
type: io.kestra.plugin.scripts.python.Script
|
type: io.kestra.plugin.scripts.python.Script
|
||||||
runner: DOCKER # (might be deprecated in the future) use TaskRunner instead
|
taskRunner:
|
||||||
|
type: io.kestra.plugin.core.runner.Process
|
||||||
script: |
|
script: |
|
||||||
from kestra import Kestra
|
from kestra import Kestra
|
||||||
import requests
|
import requests
|
||||||
@ -21,9 +19,6 @@ tasks:
|
|||||||
print(response.status_code)
|
print(response.status_code)
|
||||||
|
|
||||||
Kestra.outputs({'status': response.status_code, 'text': response.text})
|
Kestra.outputs({'status': response.status_code, 'text': response.text})
|
||||||
beforeCommands:
|
|
||||||
- pip install requests kestra
|
|
||||||
|
|
||||||
- id: log
|
- id: log
|
||||||
type: io.kestra.core.tasks.log.Log
|
type: io.kestra.plugin.core.log.Log
|
||||||
message: "StatusCode: {{outputs.pythonscript.vars.status}}"
|
message: "StatusCode: {{outputs.pythonscript.vars.status}}"
|
@ -2,16 +2,16 @@
|
|||||||
# Kestra Variable Template
|
# Kestra Variable Template
|
||||||
# ---
|
# ---
|
||||||
#
|
#
|
||||||
#
|
# Variables is a list of static values passed to the flow at runtime.
|
||||||
#
|
#
|
||||||
|
|
||||||
id: variables
|
id: variables # <- Replace with your task id...
|
||||||
namespace: # your-namespace
|
namespace: your-namespace # <- Replace with your namespace...
|
||||||
|
|
||||||
variables:
|
variables:
|
||||||
variable-name: "variable-value"
|
variable-name: "variable-value" # <- Replace with your variable name and value...
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
- id: using_variables
|
- id: using_variables
|
||||||
type: io.kestra.core.tasks.log.Log
|
type: io.kestra.plugin.core.log.Log
|
||||||
message: "{{ vars.variable-name }}"
|
message: "{{ vars.variable-name }}"
|
||||||
|
@ -6,14 +6,15 @@
|
|||||||
#
|
#
|
||||||
# usage:
|
# usage:
|
||||||
# curl http://your-kestra-instance/api/v1/executions/webhook/your-namespace/your-task-id/your-secret-key
|
# curl http://your-kestra-instance/api/v1/executions/webhook/your-namespace/your-task-id/your-secret-key
|
||||||
|
#
|
||||||
|
|
||||||
id: webhook
|
id: webhook # <- Replace with your task id...
|
||||||
namespace: # your-namespace
|
namespace: your-namespace # <- Replace with your namespace...
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
# - your-tasks
|
# -- Add your tasks here...
|
||||||
|
|
||||||
triggers:
|
triggers:
|
||||||
- id: webhook
|
- id: webhook
|
||||||
type: io.kestra.core.models.triggers.types.Webhook
|
type: io.kestra.plugin.core.trigger.Webhook
|
||||||
key: # your-secret-key, keep this secret!
|
key: your-secret-key # <- Replace with your secret key...
|
||||||
|
@ -21,7 +21,7 @@ image:
|
|||||||
tag: "v1.7.2"
|
tag: "v1.7.2"
|
||||||
supportBundleKit:
|
supportBundleKit:
|
||||||
repository: "longhornio/support-bundle-kit"
|
repository: "longhornio/support-bundle-kit"
|
||||||
tag: "v0.0.45"
|
tag: "v0.0.47"
|
||||||
csi:
|
csi:
|
||||||
attacher:
|
attacher:
|
||||||
repository: "longhornio/csi-attacher"
|
repository: "longhornio/csi-attacher"
|
@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
image:
|
image:
|
||||||
repository: portainer/portainer-ce
|
repository: portainer/portainer-ce
|
||||||
tag: 2.24.0
|
tag: 2.25.0
|
||||||
pullPolicy: IfNotPresent
|
pullPolicy: IfNotPresent
|
||||||
|
|
||||||
service:
|
service:
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user