17 Commits
master ... zato

Author SHA1 Message Date
Boris Gallet
120e4aed17 fix: [zato] restart load-balancer after updating the .pem cert 2025-09-16 10:36:46 +02:00
Boris Gallet
1048cb4ba6 new: [zato] auto schedule crt renew from letsencrypt certs 2025-02-06 11:53:41 +01:00
Boris Gallet
30ff0a39b6 fix: [keycloak-elabore] turn sql database to recommended, sometimes it’s external 2024-10-24 16:32:28 +02:00
Boris Gallet
e9eadfc968 chg: [zato] upd README 2024-10-22 17:13:39 +02:00
Boris Gallet
06c91d245f new: [zato] Add actions to start, stop and restart Zato 2024-10-22 16:23:27 +02:00
default
74c28a1bb6 chg: [zato] handle load_balancer config for https 2024-10-22 15:47:55 +02:00
Boris Gallet
68448a4243 Merge branch 'keycloak' into zato 2024-09-10 16:30:39 +02:00
Boris Gallet
fa65793593 add: [zato] new data-resources for 2024-08-28 12:20:32 +02:00
Stéphan Sainléger
2e996df297 fix: [zato] comment useless charm-resources config in metadata.yml 2024-08-28 10:30:57 +02:00
Boris Gallet
2eeca100e7 add: [zato] load-balancer config persistence 2024-08-20 11:34:59 +02:00
Boris Gallet
9bf9d41cbb new: [zatodoc] basic publish-dir charm for zato documentation 2024-08-13 17:01:58 +02:00
Boris Gallet
0ec2ad81bc add: [zato] hooks web_proxy-relation-joined 2024-08-12 18:28:38 +02:00
Boris Gallet
596ef1eb51 new: [edit] posgres-database for zato charm 2024-08-12 15:30:00 +02:00
Boris Gallet
7ad2f0f103 new: [add] new charm zato 2024-07-10 11:24:09 +02:00
default
ec1eb83814 new: [add] new charm keycloak 2024-06-26 12:31:00 +02:00
default
647075a110 new: [add] new charm keycloak 2024-05-28 14:49:23 +02:00
default
b24dc346bb new: [outline] add new charm 2024-05-21 11:04:48 +02:00
33 changed files with 1148 additions and 0 deletions

View File

@@ -0,0 +1,34 @@
Description
===========
Using ``keycloak`` version 24.0
DEV info : https://www.keycloak.org/server/containers
Usage
=====
To start with ``keycloak``, just put this service in your
``compose.yml``::
keycloak:
docker-compose:
image: docker.0k.io/keycloak24.0.4-elabore:1.0.0
options:
admin-password: CHANGEME
relations:
web-proxy:
frontend:
domain: id.mydomain.fr
Customize theme
===============
You can customize theme by putting your theme in
``/srv/datastore/data/keycloak/opt/keycloak/themes``
For example copy the material folder from
https://github.com/MAXIMUS-DeltaWare/material-keycloak-theme and
restart ``keycloak``.
Then go to your admin console, log in and go to the realm/themes part
to choose you new theme

View File

@@ -0,0 +1,3 @@
#!/bin/bash

View File

@@ -0,0 +1,15 @@
#FROM keycloak/keycloak:24.0.4 as builder
#
#ENV KC_METRICS_ENABLED=true
#ENV KC_FEATURES=token-exchange
#ENV KC_DB=postgres
#
#WORKDIR /opt/keycloak
## for demonstration purposes only, please make sure to use proper certificates in production instead
#RUN /opt/keycloak/bin/kc.sh build
FROM keycloak/keycloak:24.0.4
#COPY --from=builder /opt/keycloak/ /opt/keycloak/
WORKDIR /opt/keycloak
ENV KC_LOG_LEVEL=INFO
ENTRYPOINT ["/opt/keycloak/bin/kc.sh", "start", "--optimized"]

View File

@@ -0,0 +1,16 @@
FROM docker.0k.io/keycloak:17.0.1 as builder
ENV KC_METRICS_ENABLED=true
ENV KC_FEATURES=token-exchange
ENV KC_DB=postgres
RUN /opt/keycloak/bin/kc.sh build
FROM builder as inspector
ENTRYPOINT ["ls", "-l", "/opt/keycloak/lib/"]
#FROM docker.0k.io/keycloak:17.0.0
#COPY --from=builder /opt/keycloak/lib/quarkus/ /opt/keycloak/lib/quarkus/
#WORKDIR /opt/keycloak
#ENV KC_LOG_LEVEL=INFO
#ENTRYPOINT ["/opt/keycloak/bin/kc.sh", "start"]

12
keycloak-elabore/hooks/init Executable file
View File

@@ -0,0 +1,12 @@
#!/bin/bash
set -e
admin_password=$(options-get admin-password) || exit 1
init-config-add "\
$MASTER_BASE_SERVICE_NAME:
environment:
KEYCLOAK_ADMIN: \"admin\"
KEYCLOAK_ADMIN_PASSWORD: \"$admin_password\"
"

View File

@@ -0,0 +1,17 @@
#!/bin/bash
set -e
PASSWORD="$(relation-get password)"
USER="$(relation-get user)"
DBNAME="$(relation-get dbname)"
config-add "\
services:
$MASTER_BASE_SERVICE_NAME:
environment:
KC_DB_URL: \"jdbc:postgresql://$MASTER_TARGET_SERVICE_NAME:5432/$DBNAME\"
KC_DB_USERNAME: \"$USER\"
KC_DB_PASSWORD: \"$PASSWORD\"
KC_DB: \"postgres\"
"

View File

@@ -0,0 +1,21 @@
#!/bin/bash
. lib/common
DOMAIN=$(relation-get domain) || exit 1
#IP_HOST=$(hostname -I | awk '{print $1}')
set -e
keycloak:generate-key-if-not-exist "$DOMAIN"
config-add "\
services:
$MASTER_BASE_SERVICE_NAME:
environment:
KC_HOSTNAME: \"$DOMAIN\"
KC_PROXY: edge
KC_HTTP_ENABLED: \"true\"
KC_HOSTNAME_STRICT: \"false\"
"

View File

@@ -0,0 +1,46 @@
# -*- mode: bash -*-
KEYCLOAK_DIR=/opt/keycloak
DATASTORE_KEYCLOAK_DIR="$SERVICE_DATASTORE$KEYCLOAK_DIR"
HOST_DATASTORE_KEYCLOAK_DIR="$HOST_DATASTORE/$SERVICE_NAME$KEYCLOAK_DIR"
keycloak:generate-key-if-not-exist() {
local domain="$1" ip_host
[ -d "$DATASTORE_KEYCLOAK_DIR" ] && return 0
ip_host=$(set -o pipefail; getent ahostsv4 "$domain" | head -n 1 | cut -f 1 -d " ") || {
err "Couldn't resolve to ipv4 domain name '$domain'."
return 1
}
info "Resolved successfully '$domain' to ip '$ip_host'."
debug "DOCKER_BASE_IMAGE: $DOCKER_BASE_IMAGE"
debug "HOST_DATASTORE_KEYCLOAK_DIR:: $HOST_DATASTORE_KEYCLOAK_DIR"
mkdir -p "$DATASTORE_KEYCLOAK_DIR/conf" || return 0
docker_image_export_dir "$DOCKER_BASE_IMAGE" "/opt/keycloak" "$SERVICE_DATASTORE/opt" || return 1
uid=$(docker_get_uid "$SERVICE_NAME" "keycloak") || return 1
chown "$uid" "$DATASTORE_KEYCLOAK_DIR" -R
debug "DATASTORE_KEYCLOAK_DIR_LS:: $(ls $DATASTORE_KEYCLOAK_DIR)"
docker run -w /opt/keycloak \
-v "$HOST_DATASTORE_KEYCLOAK_DIR":"/opt/keycloak" \
--entrypoint bash \
"$DOCKER_BASE_IMAGE" -c "
export KC_METRICS_ENABLED=true
export KC_FEATURES=token-exchange
export KC_DB=postgres
keytool -genkeypair -storepass password \
-storetype PKCS12 -keyalg RSA \
-keysize 2048 -dname 'CN=$domain' \
-alias server -ext 'SAN:c=DNS:$domain,IP:$ip_host' \
-keystore conf/server.keystore || exit 1
echo 'Generated key'
/opt/keycloak/bin/kc.sh build
" || {
rmdir "$DATASTORE_KEYCLOAK_DIR/conf" 2>/dev/null
rmdir "$DATASTORE_KEYCLOAK_DIR" 2>/dev/null
return 1
}
}

View File

@@ -0,0 +1,21 @@
default-options:
uses:
web-proxy:
#constraint: required | recommended | optional
#auto: pair | summon | none ## default: pair
constraint: required
auto: pair
solves:
proxy: "Public access"
default-options:
target: !var-expand ${MASTER_BASE_SERVICE_NAME}:8080
postgres-database:
#constraint: required | recommended | optional
#auto: pair | summon | none ## default: pair
constraint: recommended
auto: summon
solves:
database: "main storage"
default-options:

113
outline/README.org Normal file
View File

@@ -0,0 +1,113 @@
# -*- ispell-local-dictionary: "english" -*-
* Info
From: https://docs.getoutline.com/s/hosting/doc/docker-7pfeLP5a8t
* Usage
Config info: https://github.com/outline/outline/blob/main/.env.sample
Odoo config: if you configure odoo OIDC connector, the callback url
should be like this : https://<YOUR_OUTLINE>:443/auth/oidc.callback
Requires a =smtp-server= provider to be functional, you can use
=smtp-stub= charm to provide information to externally managed =SMTP=.
#+begin_src yaml
outline:
options:
sender-email: #the sender email (beware the conf of your SMTP server)
oidc-client-id: #the client id of your OIDC provider
oidc-client-secret: #the client
oidc-auth-uri: #the host of your OIDC provider
oidc-token-uri: #the token uri of your OIDC provider
oidc-user-info-uri: #the user info uri of your OIDC provider
oidc-logout-uri: #the login uri of your OIDC provider
smtp-stub:
options:
host: smtp.myhost.com
port: 465
connection-security: "ssl/tls"
auth-method: password #IMPORTANT: if not present login password doesnt work
login: myuser
password: myp4ssw0rd
#+end_src
* Building a new image
We use the official image with an added patch due to 2 bugs:
- https://github.com/outline/outline/issues/6859
- second was not reported yet
Note that a PR was pushed with a fix on the first bug. But this was not yet tested.
These fix are on 0.76.0
** First fix
We need to add "url.port = '';" in ~build/server/middlewares/passport.js~ to remove the port. Note that this is a bad fix but works for our setup.
#+begin_src bash
IMAGE=docker.0k.io/outline:0.76.0-elabore
echo 'apk add patch bash' | dupd -u "$IMAGE" -- -u 1
cat <<'EOF1' | dupd -u "$IMAGE" -- -u 0
patch -p 1 <<'EOF2'
--- a/build/server/middlewares/passport.js
+++ b/build/server/middlewares/passport.js
@@ -40,6 +40,7 @@
const requestHost = ctx.get("host");
const url = new URL("".concat(reqProtocol, "://").concat(requestHost).concat(redirectUrl));
url.host = host;
+ url.port = '';
return ctx.redirect("".concat(url.toString()).concat(hasQueryString ? "&" : "?", "notice=").concat(notice));
}
if (_env.default.isDevelopment) {
EOF2
EOF1
#+end_src
** Second fix
Upon calling "/oidc" url, outline will return "Set-Cookie" header
with a "domain:" value that is incorrect (still the inner docker
domain: "outline" instead of the outer proxy domain from the frontend.)
Fortunately we can simply remove the value "domain" from the cookie by
commenting only 2 lines in ~build/server/utils/passport.js~.
The patches will change the "build/" files, so this is a very temporary and brittle fix.
#+begin_src bash
IMAGE=docker.0k.io/outline:0.76.0-elabore
cat <<'EOF1' | dupd -u "$IMAGE" -- -u 0
patch -p 1 <<'EOF2'
--- a/build/server/utils/passport.js.orig
+++ b/build/server/utils/passport.js
@@ -37,7 +37,7 @@
const state = buildState(host, token, client);
ctx.cookies.set(this.key, state, {
expires: (0, _dateFns.addMinutes)(new Date(), 10),
- domain: (0, _domains.getCookieDomain)(ctx.hostname, _env.default.isCloudHosted)
+ //domain: (0, _domains.getCookieDomain)(ctx.hostname, _env.default.isCloudHosted)
});
callback(null, token);
});
@@ -53,7 +53,7 @@
// Destroy the one-time pad token and ensure it matches
ctx.cookies.set(this.key, "", {
expires: (0, _dateFns.subMinutes)(new Date(), 1),
- domain: (0, _domains.getCookieDomain)(ctx.hostname, _env.default.isCloudHosted)
+ //domain: (0, _domains.getCookieDomain)(ctx.hostname, _env.default.isCloudHosted)
});
if (!token || token !== providedToken) {
return callback((0, _errors.OAuthStateMismatchError)(), false, token);
EOF2
EOF1
#+end_src

73
outline/hooks/init Executable file
View File

@@ -0,0 +1,73 @@
#!/bin/bash
## Init is run on host
## For now it is run every time the script is launched, but
## it should be launched only once after build.
## Accessible variables are:
## - SERVICE_NAME Name of current service
## - DOCKER_BASE_IMAGE Base image from which this service might be built if any
## - SERVICE_DATASTORE Location on host of the DATASTORE of this service
## - SERVICE_CONFIGSTORE Location on host of the CONFIGSTORE of this service
set -e
PASSWORD_FILE="$SERVICE_DATASTORE"/.compose/password/secret-key
UTILS_SECRET="$SERVICE_DATASTORE"/.compose/password/utils-secret
if ! [ -f "$UTILS_SECRET" ]; then
info "Generating secret password"
mkdir -p "${UTILS_SECRET%/*}"
umask 077
openssl rand -hex 32 > "$UTILS_SECRET"
else
info "Using existing utils-secret"
fi
if ! [ -f "$PASSWORD_FILE" ]; then
info "Generating secret password"
mkdir -p "${PASSWORD_FILE%/*}"
umask 077
openssl rand -hex 32 > "$PASSWORD_FILE"
else
info "Using existing secret password"
fi
secret_password=$(cat "$PASSWORD_FILE")
utils_secret=$(cat "$UTILS_SECRET")
sender=$(options-get sender-email) || exit 1
oidc_client_id=$(options-get oidc-client-id) || exit 1
oidc_client_secret=$(options-get oidc-client-secret) || exit 1
oidc_auth_uri=$(options-get oidc-auth-uri) || exit 1
oidc_token_uri=$(options-get oidc-token-uri) || exit 1
oidc_user_info_uri=$(options-get oidc-user-info-uri) || exit 1
oidc_logout_uri=$(options-get oidc-logout-uri) || exit 1
init-config-add "
$SERVICE_NAME:
volumes:
- $SERVICE_DATASTORE:/var/lib/outline/data
environment:
SMTP_FROM_EMAIL: \"$sender\"
DEFAULT_LANGUAGE: \"fr_FR\"
SECRET_KEY: \"$secret_password\"
UTILS_SECRET: \"$utils_secret\"
OIDC_CLIENT_ID: \"$oidc_client_id\"
OIDC_CLIENT_SECRET: \"$oidc_client_secret\"
OIDC_AUTH_URI: \"$oidc_auth_uri\"
OIDC_TOKEN_URI: \"$oidc_token_uri\"
OIDC_USERINFO_URI: \"$oidc_user_info_uri\"
OIDC_LOGOUT_URI: \"$oidc_logout_uri\"
OIDC_SCOPES: \"openid\"
OIDC_USERNAME_CLAIM: \"preferred_username\"
OIDC_DISPLAY_NAME: \"OpenID Connect\"
NODE_ENV: \"production\"
LOG_LEVEL: \"debug\"
FORCE_HTTPS: \"false\"
#DEVELOPMENT_UNSAFE_INLINE_CSP: \"true\"
DEBUG: \"http\"
"

View File

@@ -0,0 +1,18 @@
#!/bin/bash
set -e
PASSWORD="$(relation-get password)"
USER="$(relation-get user)"
DBNAME="$(relation-get dbname)"
config-add "\
services:
$MASTER_BASE_SERVICE_NAME:
environment:
DATABASE_URL: postgres://$USER:$PASSWORD@$TARGET_SERVICE_NAME:5432/$DBNAME
PGSSLMODE: disable
"
info "Configured $SERVICE_NAME code for $TARGET_SERVICE_NAME access."

View File

@@ -0,0 +1,25 @@
#!/bin/bash
set -e
# USER="$(relation-get user)"
# DBNAME="$(relation-get dbname)"
# PASSWORD=$(relation-get password) || {
# err "Can't get password for '$SERVICE_NAME' from '$TARGET_SERVICE_NAME'."
# exit 1
# }
PASSWORD=$(relation-get password) || {
err "Can't get password for '$SERVICE_NAME' from '$TARGET_SERVICE_NAME'."
exit 1
}
config-add "\
services:
$MASTER_BASE_SERVICE_NAME:
environment:
REDIS_URL: redis://:$PASSWORD@$TARGET_SERVICE_NAME:6379
"
info "Configured $SERVICE_NAME code for $TARGET_SERVICE_NAME access."

View File

@@ -0,0 +1,21 @@
#!/bin/bash
set -e
host=$(relation-get host) || exit 1
port=$(relation-get port) || exit 1
user=$(relation-get login) || exit 1
password="$(relation-get password)" || exit 1
config-add "\
services:
$MASTER_BASE_SERVICE_NAME:
environment:
SMTP_USERNAME: \"$user\"
SMTP_PASS: \"${password//\$/\$\$}\"
SMTP_HOST: \"$host\"
SMTP_PORT: \"$port\"
#SMTP_SECURE: \"false\"
"

View File

@@ -0,0 +1,48 @@
#!/bin/bash
set -e
URL=$(relation-get url) || {
echo "Failed to query for 'url' value"
exit 1
}
DOMAIN_PATH="${URL#*://}"
if [[ "$DOMAIN_PATH" == *"/"* ]]; then
DOMAIN="${DOMAIN_PATH%%/*}"
UPATH="/${DOMAIN_PATH#*/}"
else
DOMAIN="${DOMAIN_PATH}"
UPATH=""
fi
PROTO="${URL%:*}"
if [[ "$DOMAIN" == *":"* ]]; then
PORT="${DOMAIN#*:}"
DOMAIN="${DOMAIN%%:*}"
else
case "$PROTO" in
http)
PORT=80
;;
https)
PORT=443
;;
*)
echo "Unknown portocol '$PROTO' in url '$URL'."
exit 1
;;
esac
fi
config-add "\
services:
$MASTER_BASE_SERVICE_NAME:
environment:
URL: \"${PROTO}://${DOMAIN}:${PORT}${UPATH}\"
"

38
outline/metadata.yml Normal file
View File

@@ -0,0 +1,38 @@
docker-image: docker.0k.io/outline:0.76.0-elabore
uses:
postgres-database:
#constraint: required | recommended | optional
#auto: pair | summon | none ## default: pair
constraint: required
auto: summon
solves:
database: "main storage"
default-options:
extensions:
- uuid-ossp
redis-database:
constraint: required
auto: summon
solves:
database: "short time storage"
smtp-server:
constraint: required
auto: summon
solves:
proxy: "Public access"
web-proxy:
#constraint: required | recommended | optional
#auto: pair | summon | none ## default: pair
constraint: recommended
auto: pair
solves:
proxy: "Public access"
default-options:
target: !var-expand ${MASTER_BASE_SERVICE_NAME}:3000
backup:
constraint: recommended
auto: pair
solves:
backup: "Automatic regular backup"
default-options:

32
zato/README.org Normal file
View File

@@ -0,0 +1,32 @@
# -*- ispell-local-dictionary: "english" -*-
* Info
From Zato 3.2 : https://zato.io/en/docs/3.2/tutorial/01.html
* Usage
Lauching with web-proxy need to have a frontend connected
Deployments of services : they should be mounted as volume for the docker
Warning : if using keycloak the correct keycloak_public_key have to be manually placed in reso
#+begin_src yaml
zato:
docker-compose:
volumes:
- <PROJECT_FOLDER>/schemas:/opt/zato/current/extlib/schemas:rw
- <PROJECT_FOLDER>/models:/opt/zato/current/extlib/models:rw
- <PROJECT_FOLDER>/services:/opt/hot-deploy/services:rw
- <PROJECT_FOLDER>/enmasse:/opt/hot-deploy/enmasse:rw
- <PROJECT_FOLDER>/resources/keycloak_public_key.pem:/opt/hot-deploy/keycloak_public_key.pem:rw
relations:
web-proxy:
frontend:
domain: zato.<mondomain>.coop
#+end_src

BIN
zato/actions/.renew_crt.swp Normal file

Binary file not shown.

23
zato/actions/renew_crt Executable file
View File

@@ -0,0 +1,23 @@
#!/bin/bash
# compose: no-hooks
## Merged letsencrypt certificate for load_balancer in zato
. $CHARM_PATH/lib/common
DOMAIN=$(relation:get "$SERVICE_NAME":web-proxy domain)
merge_crt_letsencrypt "$DOMAIN" || exit 1
zato_commands="
cd /opt/zato &&
./restart-load-balancer.sh
"
if ! exec_as_zato_in_container "$zato_commands"; then
printf "Error: failed to execute 'restart-load-balancer' in container '%s'.\n" "$CONTAINER_NAME" >&2
return 1
fi
echo "load balancer restarted"

25
zato/actions/restart-zato Executable file
View File

@@ -0,0 +1,25 @@
#!/bin/bash
if [ -z "$SERVICE_DATASTORE" ]; then
echo "This script is meant to be run through 'compose' to work properly." >&2
exit 1
fi
. "$CHARM_PATH/lib/common"
# Combined commands to be run as zato user
zato_commands="
cd /opt/zato/env/qs-1 &&
./start-server-fg.sh &
"
# Execute commands as zato user
exec_as_zato_in_container "/opt/zato/current/bin/zato stop /opt/zato/env/qs-1/server1/"
sleep 3
if ! exec_as_zato_in_container "$zato_commands"; then
printf "Error: Failed to execute zato commands in container '%s'.\n" "$CONTAINER_NAME" >&2
exit 1
fi
printf "Zato restarted successfully in container '%s'.\n" "$CONTAINER_NAME" >&2

24
zato/actions/start-zato Executable file
View File

@@ -0,0 +1,24 @@
#!/bin/bash
if [ -z "$SERVICE_DATASTORE" ]; then
echo "This script is meant to be run through 'compose' to work properly." >&2
exit 1
fi
. "$CHARM_PATH/lib/common"
# Combined commands to be run as zato user
zato_commands="
cd /opt/zato/env/qs-1 &&
./start-server-fg.sh &
"
# Execute commands as zato user
if ! exec_as_zato_in_container "$zato_commands"; then
printf "Error: Failed to execute zato commands in container '%s'.\n" "$CONTAINER_NAME" >&2
return 1
fi
printf "Zato started successfully in container '%s'.\n" "$CONTAINER_NAME" >&2

21
zato/actions/stop-zato Executable file
View File

@@ -0,0 +1,21 @@
#!/bin/bash
if [ -z "$SERVICE_DATASTORE" ]; then
echo "This script is meant to be run through 'compose' to work properly." >&2
exit 1
fi
. "$CHARM_PATH/lib/common"
# Combined commands to be run as zato user
zato_commands="/opt/zato/current/bin/zato stop /opt/zato/env/qs-1/server1/"
# Execute commands as zato user
if ! exec_as_zato_in_container "$zato_commands"; then
printf "Error: Failed to execute zato commands in container '%s'.\n" "$CONTAINER_NAME" >&2
return 1
fi
printf "Zato stopped successfully in container '%s'.\n" "$CONTAINER_NAME" >&2

BIN
zato/hooks/.init.swp Normal file

Binary file not shown.

48
zato/hooks/init Executable file
View File

@@ -0,0 +1,48 @@
#!/bin/bash
## Init is run on host
## For now it is run every time the script is launched, but
## it should be launched only once after build.
## Accessible variables are:
## - SERVICE_NAME Name of current service
## - DOCKER_BASE_IMAGE Base image from which this service might be built if any
## - SERVICE_DATASTORE Location on host of the DATASTORE of this service
## - SERVICE_CONFIGSTORE Location on host of the CONFIGSTORE of this service
set -e
. lib/common
ZATO_DIR="/opt/hot-deploy"
DATASTORE_ZATO_DIR="$SERVICE_NAME$ZATO_DIR"
SSH_PASSWORD_FILE="$SERVICE_DATASTORE"/.compose/password/ssh-password
DASHBOARD_PASSWORD_FILE="$SERVICE_DATASTORE"/.compose/password/dashboard-password
IDE_PASSWORD_FILE="$SERVICE_DATASTORE"/.compose/password/ide-password
## Load balancer script in not in /opt/zato/env/qs-1 because this folder is created after launch
RESTART_LOADBALANCER_FILE=/opt/zato/restart-load-balancer.sh
ssh_password=$(generate_or_get_secret "$SSH_PASSWORD_FILE")
dashboard_password=$(generate_or_get_secret "$DASHBOARD_PASSWORD_FILE")
ide_password=$(generate_or_get_secret "$IDE_PASSWORD_FILE")
init-config-add "
$SERVICE_NAME:
environment:
Zato_Log_Env_Details: \"True\"
Zato_Dashboard_Debug_Enabled: \"True\"
Zato_SSH_Password: \"$ssh_password\"
Zato_Dashboard_Password: \"$dashboard_password\"
Zato_IDE_Password: \"$ide_password\"
volumes:
- /srv/charm-store/elabore-charms/zato/resources$RESTART_LOADBALANCER_FILE:$RESTART_LOADBALANCER_FILE
"
# uid=$(docker_get_uid "$SERVICE_NAME" "zato")
# mkdir -p "$DATASTORE_ZATO_DIR"
# chown "$uid" "$DATASTORE_ZATO_DIR"

View File

@@ -0,0 +1,14 @@
#!/bin/bash
set -e
user=$(relation-get user) || exit 1
password="$(relation-get password)" || exit 1
dbname="$(relation-get dbname)" || exit 1
COMPOSE_DIR="$SERVICE_DATASTORE/.compose"
echo "
user:${user}
dbname:${dbname}
password:${password}
" > $COMPOSE_DIR/psql_id

View File

@@ -0,0 +1,22 @@
#!/bin/bash
. lib/common
DOMAIN=$(relation-get domain) || exit 1
CUSTOM_CREATE_LB_PATH="/opt/zato/3.2.0/code/zato-cli/src/zato/cli/create_lb.py"
set -e
merge_crt_letsencrypt "$DOMAIN"
# adding custom config file to handle https in load_balancer with letsencrypt-fullchain certificate
config-add "\
services:
$MASTER_BASE_SERVICE_NAME:
volumes:
- $BASE_CHARM_PATH/resources/$CUSTOM_CREATE_LB_PATH:$CUSTOM_CREATE_LB_PATH
- $DEST_LETSENCRYPT_FULLCHAIN:/opt/zato/letsencrypt-fullchain.pem
"
info "Configured $SERVICE_NAME load_balancer with HTTPS support."

51
zato/lib/common Normal file
View File

@@ -0,0 +1,51 @@
#!/bin/bash
generate_or_get_secret() {
local secret_file="$1"
local secret_value
if ! [ -f "$secret_file" ]; then
info "Generating secret password for ${secret_file##*/}"
mkdir -p "${secret_file%/*}"
umask 077
secret_value=$(openssl rand -hex 32)
echo "$secret_value" > "$secret_file"
else
info "Using existing secret from ${secret_file##*/}"
secret_value=$(cat "$secret_file")
fi
echo "$secret_value"
}
get_container_name(){
containers="$(get_running_containers_for_service "$SERVICE_NAME")"
if [ -z "$containers" ]; then
error "No running containers found for service $SERVICE_NAME"
exit 1
fi
container="$(echo "$containers" | head -n 1)"
echo "$container"
}
# Function to execute all commands sequentially as the zato user inside the Docker container
exec_as_zato_in_container() {
CONTAINER_NAME=$(get_container_name)
local cmd="$1"
if ! docker exec -i "$CONTAINER_NAME" bash -c "su - zato -c '$cmd'"; then
printf "Error: Failed to execute command '%s' as zato user in container '%s'\n" "$cmd" "$CONTAINER_NAME" >&2
return 1
fi
}
## merge certificate for zato HapProxy to handle https API calls
merge_crt_letsencrypt(){
local DOMAIN="$1"
DEST_LETSENCRYPT_FULLCHAIN="$SERVICE_DATASTORE/opt/zato/letsencrypt-fullchain.pem"
mkdir -p "${DEST_LETSENCRYPT_FULLCHAIN%/*}"
cat $DATASTORE/letsencrypt/etc/letsencrypt/live/$DOMAIN/{fullchain,privkey}.pem > "$DEST_LETSENCRYPT_FULLCHAIN" || return 1
info "Letsencrypt {fullchain,privkey}.pem have been concat to /opt/zato/letsencrypt-fullchain.pem for zato hapProxy conf"
}

35
zato/metadata.yml Normal file
View File

@@ -0,0 +1,35 @@
docker-image: docker.0k.io/zato-3.2-quickstart
docker-compose:
ports:
- "21223:21223"
uses:
web-proxy:
#constraint: required | recommended | optional
#auto: pair | summon | none ## default: pair
constraint: recommended
auto: pair
solves:
proxy: "Public access"
default-options:
target: !var-expand ${MASTER_BASE_SERVICE_NAME}:8183
postgres-database:
#constraint: required | recommended | optional
#auto: pair | summon | none ## default: pair
constraint: recommended
auto: summon
solves:
database: "main storage"
schedule-command:
constraint: required
auto: pair
solves:
maintenance: "Auto renew crt for hapProxy in zato"
default-options: !var-expand
(35 3 * * 7) {-D -p 10} compose renew_crt "$BASE_SERVICE_NAME"
backup:
constraint: recommended
auto: pair
solves:
backup: "Automatic regular backup"
default-options:

View File

@@ -0,0 +1,222 @@
# -*- coding: utf-8 -*-
"""
Copyright (C) 2021, Zato Source s.r.o. https://zato.io
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
"""
# stdlib
import os, uuid
# Zato
from zato.cli import is_arg_given, ZatoCommand
from zato.common.defaults import http_plain_server_port
from zato.common.util.open_ import open_w
config_template = """{{
"haproxy_command": "haproxy",
"host": "localhost",
"port": 20151,
"is_tls_enabled": false,
"keyfile": "./zato-lba-priv-key.pem",
"certfile": "./zato-lba-cert.pem",
"ca_certs": "./zato-lba-ca-certs.pem",
"work_dir": "../",
"verify_fields": {{}},
"log_config": "./logging.conf",
"pid_file": "zato-lb-agent.pid"
}}
"""
zato_config_template = """
# ##############################################################################
global
log 127.0.0.1:514 local0 debug # ZATO global:log
stats socket {stats_socket} # ZATO global:stats_socket
# ##############################################################################
defaults
log global
option httpclose
stats uri /zato-lb-stats # ZATO defaults:stats uri
timeout connect 15000 # ZATO defaults:timeout connect
timeout client 15000 # ZATO defaults:timeout client
timeout server 15000 # ZATO defaults:timeout server
errorfile 503 {http_503_path}
stats enable
stats realm Haproxy\ Statistics
# Note: The password below is a UUID4 written in plain-text.
stats auth admin1:{stats_password}
stats refresh 5s
# ##############################################################################
backend bck_http_plain
mode http
balance roundrobin
# ZATO begin backend bck_http_plain
{default_backend}
# ZATO end backend bck_http_plain
# ##############################################################################
frontend front_http_plain
mode http
default_backend bck_http_plain
option forwardfor
option httplog # ZATO frontend front_http_plain:option log-http-requests
bind 0.0.0.0:11223 # ZATO frontend front_http_plain:bind
maxconn 200 # ZATO frontend front_http_plain:maxconn
monitor-uri /zato-lb-alive # ZATO frontend front_http_plain:monitor-uri
# ##############################################################################
frontend front_tls_no_client_certs
mode http
default_backend bck_http_plain
option forwardfor
reqadd X-Forwarded-Proto:\ https
acl has_x_forwarded_proto req.fhdr(X-Forwarded-Proto) -m found
http-request deny if has_x_forwarded_proto
bind 0.0.0.0:21223 ssl crt /opt/zato/letsencrypt-fullchain.pem
""" # noqa
default_backend = """
server http_plain--server1 127.0.0.1:{server01_port} check inter 2s rise 2 fall 2 # ZATO backend bck_http_plain:server--server1
"""
http_503 = """HTTP/1.0 503 Service Unavailable
Cache-Control: no-cache
Connection: close
Content-Type: application/json
{"zato_env":
{"details": "No server is available to handle the request",
"result": "ZATO_ERROR",
"cid": "K012345678901234567890123456"}
}
"""
class Create(ZatoCommand):
"""Creates a new Zato load-balancer"""
opts = []
opts.append(
{
"name": "--pub-key-path",
"help": "Path to the load-balancer agent's public key in PEM",
}
)
opts.append(
{
"name": "--priv-key-path",
"help": "Path to the load-balancer agent's private key in PEM",
}
)
opts.append(
{
"name": "--cert-path",
"help": "Path to the load-balancer agent's certificate in PEM",
}
)
opts.append(
{
"name": "--ca-certs-path",
"help": "Path to the a PEM list of certificates the load-balancer's agent will trust",
}
)
needs_empty_dir = True
def __init__(self, args):
super(Create, self).__init__(args)
self.target_dir = os.path.abspath(args.path) # noqa
def execute(
self,
args,
use_default_backend=False,
server02_port=None,
show_output=True,
):
# Zato
from zato.common.util.logging_ import get_logging_conf_contents
os.mkdir(os.path.join(self.target_dir, "config")) # noqa
os.mkdir(os.path.join(self.target_dir, "logs")) # noqa
repo_dir = os.path.join(self.target_dir, "config", "repo") # noqa
os.mkdir(repo_dir) # noqa
log_path = os.path.abspath(
os.path.join(repo_dir, "..", "..", "logs", "lb-agent.log")
) # noqa
stats_socket = os.path.join(self.target_dir, "haproxy-stat.sock") # noqa
is_tls_enabled = is_arg_given(args, "priv_key_path")
config = config_template.format(
**{
"is_tls_enabled": is_tls_enabled,
}
)
logging_conf_contents = get_logging_conf_contents()
open_w(os.path.join(repo_dir, "lb-agent.conf")).write(config) # noqa
open_w(os.path.join(repo_dir, "logging.conf")).write(
logging_conf_contents
) # noqa
if use_default_backend:
backend = default_backend.format(
server01_port=http_plain_server_port,
server02_port=server02_port,
)
else:
backend = "\n# ZATO default_backend_empty"
zato_config = zato_config_template.format(
stats_socket=stats_socket,
stats_password=uuid.uuid4().hex,
default_backend=backend,
http_503_path=os.path.join(repo_dir, "503.http"),
) # noqa
open_w(os.path.join(repo_dir, "zato.config")).write(zato_config) # noqa
open_w(os.path.join(repo_dir, "503.http")).write(http_503) # noqa
self.copy_lb_crypto(repo_dir, args)
# Initial info
self.store_initial_info(
self.target_dir, self.COMPONENTS.LOAD_BALANCER.code
)
if show_output:
if self.verbose:
msg = "Successfully created a load-balancer's agent in {}".format(
self.target_dir
)
self.logger.debug(msg)
else:
self.logger.info("OK")

View File

@@ -0,0 +1,68 @@
# ##############################################################################
global
log 127.0.0.1:514 local0 debug # ZATO global:log
stats socket /opt/zato/env/qs-1/load-balancer/haproxy-stat.sock # ZATO global:stats_socket
# ##############################################################################
defaults
log global
option httpclose
stats uri /zato-lb-stats # ZATO defaults:stats uri
timeout connect 15000 # ZATO defaults:timeout connect
timeout client 15000 # ZATO defaults:timeout client
timeout server 15000 # ZATO defaults:timeout server
errorfile 503 /opt/zato/env/qs-1/load-balancer/config/repo/503.http
stats enable
stats realm Haproxy\ Statistics
# Note: The password below is a UUID4 written in plain-text.
stats auth admin1:8ecbddd3bebe474b93ae43b353a917ff
stats refresh 5s
# ##############################################################################
backend bck_http_plain
mode http
balance roundrobin
# ZATO begin backend bck_http_plain
server http_plain--server1 127.0.0.1:17010 check inter 2s rise 2 fall 2 # ZATO backend bck_http_plain:server--server1
# ZATO end backend bck_http_plain
# ##############################################################################
frontend front_http_plain
mode http
default_backend bck_http_plain
option forwardfor
option httplog # ZATO frontend front_http_plain:option log-http-requests
bind 0.0.0.0:11223 # ZATO frontend front_http_plain:bind
maxconn 200 # ZATO frontend front_http_plain:maxconn
monitor-uri /zato-lb-alive # ZATO frontend front_http_plain:monitor-uri
frontend front_tls_no_client_certs
mode http
default_backend bck_http_plain
option forwardfor
reqadd X-Forwarded-Proto:\ https
acl has_x_forwarded_proto req.fhdr(X-Forwarded-Proto) -m found
http-request deny if has_x_forwarded_proto
bind 0.0.0.0:21223 ssl crt /opt/hot-deploy/cert/letsencrypt-fullchain.pem

View File

@@ -0,0 +1,14 @@
export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:~/current/bin
export PYTHONPATH=:/opt/zato/current/extlib
export ZATO_PYTHON_REQS=/opt/hot-deploy/python-reqs/requirements.txt
export ZATO_HOT_DEPLOY_DIR=/opt/hot-deploy/services:
export ZATO_USER_CONF_DIR=/opt/hot-deploy/user-conf:/tmp/zato-user-conf
export ZATO_HOT_DEPLOY_PREFER_SNAPSHOTS=True
export Zato_Is_Quickstart=
export Zato_Log_Env_Details=True
export Zato_TLS_Verify=
export Zato_Is_Docker=True
~/current/bin/zato stop /opt/zato/env/qs-1/load-balancer
kill $(ps -aux | grep zato.agent.load_balancer.main | grep -v grep | grep -v /bin/sh | awk '{ print $2 }')
~/current/bin/zato start /opt/zato/env/qs-1/load-balancer --env-file /opt/hot-deploy/enmasse/env.ini

5
zatodoc/actions/popo Executable file
View File

@@ -0,0 +1,5 @@
#!/bin/bash
## compose: no-hooks
echo youpla

23
zatodoc/metadata.yml Normal file
View File

@@ -0,0 +1,23 @@
description: Zato Doc
subordinate: true
requires:
web-publishing-directory:
interface: publish-dir
scope: container
data-resources:
- /opt/zatodoc/
uses:
publish-dir:
#constraint: required | recommended | optional
#auto: pair | summon | none ## default: pair
scope: container
constraint: required
auto: summon
solves:
container: "main running server"
default-options:
location: !var-expand "$DATASTORE/$BASE_SERVICE_NAME/opt/zatodoc"