Compare commits
8 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
c5e45fa28b | ||
|
79dd0a785a | ||
|
49e2f398c5 | ||
|
69002abb5c | ||
|
406e01a095 | ||
|
f6fd85266f | ||
|
0fa527a98e | ||
|
02755e516c |
7
clickhouse/README.org
Normal file
7
clickhouse/README.org
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
# -*- ispell-local-dictionary: "english" -*-
|
||||||
|
|
||||||
|
* Info
|
||||||
|
|
||||||
|
This charm is provided to work with plausible charm
|
||||||
|
|
||||||
|
* Usage
|
21
clickhouse/hooks/init
Executable file
21
clickhouse/hooks/init
Executable file
@@ -0,0 +1,21 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
## Init is run on host
|
||||||
|
## For now it is run every time the script is launched, but
|
||||||
|
## it should be launched only once after build.
|
||||||
|
|
||||||
|
## Accessible variables are:
|
||||||
|
## - SERVICE_NAME Name of current service
|
||||||
|
## - DOCKER_BASE_IMAGE Base image from which this service might be built if any
|
||||||
|
## - SERVICE_DATASTORE Location on host of the DATASTORE of this service
|
||||||
|
## - SERVICE_CONFIGSTORE Location on host of the CONFIGSTORE of this service
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
init-config-add "
|
||||||
|
$SERVICE_NAME:
|
||||||
|
environment:
|
||||||
|
CLICKHOUSE_SKIP_USER_SETUP: 1
|
||||||
|
healthcheck:
|
||||||
|
test: [ \"CMD-SHELL\", \"wget --no-verbose --tries=1 -O - http://127.0.0.1:8123/ping || exit 1\" ]
|
||||||
|
"
|
21
clickhouse/metadata.yml
Normal file
21
clickhouse/metadata.yml
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
docker-image: docker.0k.io/clickhouse:24.12-alpine
|
||||||
|
#docker-image: clickhouse/clickhouse-server:24.12-alpine
|
||||||
|
|
||||||
|
data-resources:
|
||||||
|
- /var/lib/clickhouse
|
||||||
|
- /var/log/clickhouse-server
|
||||||
|
|
||||||
|
charm-resources:
|
||||||
|
- /etc/clickhouse-server/config.d/logs.xml
|
||||||
|
- /etc/clickhouse-server/config.d/ipv4-only.xml
|
||||||
|
- /etc/clickhouse-server/config.d/low-resources.xml
|
||||||
|
|
||||||
|
provides:
|
||||||
|
event-db:
|
||||||
|
|
||||||
|
uses:
|
||||||
|
log-rotate:
|
||||||
|
constraint: recommended
|
||||||
|
auto: pair
|
||||||
|
solves:
|
||||||
|
disk-leak: "/var/log/clickhouse-server"
|
@@ -0,0 +1,3 @@
|
|||||||
|
<clickhouse>
|
||||||
|
<listen_host>0.0.0.0</listen_host>
|
||||||
|
</clickhouse>
|
28
clickhouse/resources/etc/clickhouse-server/config.d/logs.xml
Normal file
28
clickhouse/resources/etc/clickhouse-server/config.d/logs.xml
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
<clickhouse>
|
||||||
|
<logger>
|
||||||
|
<level>warning</level>
|
||||||
|
<console>true</console>
|
||||||
|
</logger>
|
||||||
|
|
||||||
|
<query_log replace="1">
|
||||||
|
<database>system</database>
|
||||||
|
<table>query_log</table>
|
||||||
|
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||||
|
<engine>
|
||||||
|
ENGINE = MergeTree
|
||||||
|
PARTITION BY event_date
|
||||||
|
ORDER BY (event_time)
|
||||||
|
TTL event_date + interval 30 day
|
||||||
|
SETTINGS ttl_only_drop_parts=1
|
||||||
|
</engine>
|
||||||
|
</query_log>
|
||||||
|
|
||||||
|
<!-- Stops unnecessary logging -->
|
||||||
|
<metric_log remove="remove" />
|
||||||
|
<asynchronous_metric_log remove="remove" />
|
||||||
|
<query_thread_log remove="remove" />
|
||||||
|
<text_log remove="remove" />
|
||||||
|
<trace_log remove="remove" />
|
||||||
|
<session_log remove="remove" />
|
||||||
|
<part_log remove="remove" />
|
||||||
|
</clickhouse>
|
@@ -0,0 +1,23 @@
|
|||||||
|
<!-- https://clickhouse.com/docs/en/operations/tips#using-less-than-16gb-of-ram -->
|
||||||
|
<clickhouse>
|
||||||
|
<!--
|
||||||
|
https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings#mark_cache_size -->
|
||||||
|
<mark_cache_size>524288000</mark_cache_size>
|
||||||
|
|
||||||
|
<profile>
|
||||||
|
<default>
|
||||||
|
<!-- https://clickhouse.com/docs/en/operations/settings/settings#max_threads -->
|
||||||
|
<max_threads>1</max_threads>
|
||||||
|
<!-- https://clickhouse.com/docs/en/operations/settings/settings#max_block_size -->
|
||||||
|
<max_block_size>8192</max_block_size>
|
||||||
|
<!-- https://clickhouse.com/docs/en/operations/settings/settings#max_download_threads -->
|
||||||
|
<max_download_threads>1</max_download_threads>
|
||||||
|
<!--
|
||||||
|
https://clickhouse.com/docs/en/operations/settings/settings#input_format_parallel_parsing -->
|
||||||
|
<input_format_parallel_parsing>0</input_format_parallel_parsing>
|
||||||
|
<!--
|
||||||
|
https://clickhouse.com/docs/en/operations/settings/settings#output_format_parallel_formatting -->
|
||||||
|
<output_format_parallel_formatting>0</output_format_parallel_formatting>
|
||||||
|
</default>
|
||||||
|
</profile>
|
||||||
|
</clickhouse>
|
@@ -1,34 +0,0 @@
|
|||||||
Description
|
|
||||||
===========
|
|
||||||
|
|
||||||
Using ``keycloak`` version 24.0
|
|
||||||
DEV info : https://www.keycloak.org/server/containers
|
|
||||||
|
|
||||||
Usage
|
|
||||||
=====
|
|
||||||
|
|
||||||
To start with ``keycloak``, just put this service in your
|
|
||||||
``compose.yml``::
|
|
||||||
|
|
||||||
keycloak:
|
|
||||||
docker-compose:
|
|
||||||
image: docker.0k.io/keycloak24.0.4-elabore:1.0.0
|
|
||||||
options:
|
|
||||||
admin-password: CHANGEME
|
|
||||||
relations:
|
|
||||||
web-proxy:
|
|
||||||
frontend:
|
|
||||||
domain: id.mydomain.fr
|
|
||||||
|
|
||||||
Customize theme
|
|
||||||
===============
|
|
||||||
|
|
||||||
You can customize theme by putting your theme in
|
|
||||||
``/srv/datastore/data/keycloak/opt/keycloak/themes``
|
|
||||||
|
|
||||||
For example copy the material folder from
|
|
||||||
https://github.com/MAXIMUS-DeltaWare/material-keycloak-theme and
|
|
||||||
restart ``keycloak``.
|
|
||||||
|
|
||||||
Then go to your admin console, log in and go to the realm/themes part
|
|
||||||
to choose you new theme
|
|
@@ -1,3 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
|
|
@@ -1,15 +0,0 @@
|
|||||||
#FROM keycloak/keycloak:24.0.4 as builder
|
|
||||||
#
|
|
||||||
#ENV KC_METRICS_ENABLED=true
|
|
||||||
#ENV KC_FEATURES=token-exchange
|
|
||||||
#ENV KC_DB=postgres
|
|
||||||
#
|
|
||||||
#WORKDIR /opt/keycloak
|
|
||||||
## for demonstration purposes only, please make sure to use proper certificates in production instead
|
|
||||||
#RUN /opt/keycloak/bin/kc.sh build
|
|
||||||
|
|
||||||
FROM keycloak/keycloak:24.0.4
|
|
||||||
#COPY --from=builder /opt/keycloak/ /opt/keycloak/
|
|
||||||
WORKDIR /opt/keycloak
|
|
||||||
ENV KC_LOG_LEVEL=INFO
|
|
||||||
ENTRYPOINT ["/opt/keycloak/bin/kc.sh", "start", "--optimized"]
|
|
@@ -1,16 +0,0 @@
|
|||||||
FROM docker.0k.io/keycloak:17.0.1 as builder
|
|
||||||
|
|
||||||
ENV KC_METRICS_ENABLED=true
|
|
||||||
ENV KC_FEATURES=token-exchange
|
|
||||||
ENV KC_DB=postgres
|
|
||||||
RUN /opt/keycloak/bin/kc.sh build
|
|
||||||
|
|
||||||
FROM builder as inspector
|
|
||||||
ENTRYPOINT ["ls", "-l", "/opt/keycloak/lib/"]
|
|
||||||
|
|
||||||
|
|
||||||
#FROM docker.0k.io/keycloak:17.0.0
|
|
||||||
#COPY --from=builder /opt/keycloak/lib/quarkus/ /opt/keycloak/lib/quarkus/
|
|
||||||
#WORKDIR /opt/keycloak
|
|
||||||
#ENV KC_LOG_LEVEL=INFO
|
|
||||||
#ENTRYPOINT ["/opt/keycloak/bin/kc.sh", "start"]
|
|
@@ -1,12 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
admin_password=$(options-get admin-password) || exit 1
|
|
||||||
|
|
||||||
init-config-add "\
|
|
||||||
$MASTER_BASE_SERVICE_NAME:
|
|
||||||
environment:
|
|
||||||
KEYCLOAK_ADMIN: \"admin\"
|
|
||||||
KEYCLOAK_ADMIN_PASSWORD: \"$admin_password\"
|
|
||||||
"
|
|
@@ -1,21 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
. lib/common
|
|
||||||
|
|
||||||
DOMAIN=$(relation-get domain) || exit 1
|
|
||||||
#IP_HOST=$(hostname -I | awk '{print $1}')
|
|
||||||
|
|
||||||
set -e
|
|
||||||
keycloak:generate-key-if-not-exist "$DOMAIN"
|
|
||||||
|
|
||||||
|
|
||||||
config-add "\
|
|
||||||
services:
|
|
||||||
$MASTER_BASE_SERVICE_NAME:
|
|
||||||
environment:
|
|
||||||
KC_HOSTNAME: \"$DOMAIN\"
|
|
||||||
KC_PROXY: edge
|
|
||||||
KC_HTTP_ENABLED: \"true\"
|
|
||||||
KC_HOSTNAME_STRICT: \"false\"
|
|
||||||
"
|
|
||||||
|
|
@@ -1,46 +0,0 @@
|
|||||||
# -*- mode: bash -*-
|
|
||||||
|
|
||||||
KEYCLOAK_DIR=/opt/keycloak
|
|
||||||
DATASTORE_KEYCLOAK_DIR="$SERVICE_DATASTORE$KEYCLOAK_DIR"
|
|
||||||
HOST_DATASTORE_KEYCLOAK_DIR="$HOST_DATASTORE/$SERVICE_NAME$KEYCLOAK_DIR"
|
|
||||||
|
|
||||||
keycloak:generate-key-if-not-exist() {
|
|
||||||
local domain="$1" ip_host
|
|
||||||
|
|
||||||
[ -d "$DATASTORE_KEYCLOAK_DIR" ] && return 0
|
|
||||||
|
|
||||||
ip_host=$(set -o pipefail; getent ahostsv4 "$domain" | head -n 1 | cut -f 1 -d " ") || {
|
|
||||||
err "Couldn't resolve to ipv4 domain name '$domain'."
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
info "Resolved successfully '$domain' to ip '$ip_host'."
|
|
||||||
debug "DOCKER_BASE_IMAGE: $DOCKER_BASE_IMAGE"
|
|
||||||
debug "HOST_DATASTORE_KEYCLOAK_DIR:: $HOST_DATASTORE_KEYCLOAK_DIR"
|
|
||||||
mkdir -p "$DATASTORE_KEYCLOAK_DIR/conf" || return 0
|
|
||||||
docker_image_export_dir "$DOCKER_BASE_IMAGE" "/opt/keycloak" "$SERVICE_DATASTORE/opt" || return 1
|
|
||||||
uid=$(docker_get_uid "$SERVICE_NAME" "keycloak") || return 1
|
|
||||||
chown "$uid" "$DATASTORE_KEYCLOAK_DIR" -R
|
|
||||||
debug "DATASTORE_KEYCLOAK_DIR_LS:: $(ls $DATASTORE_KEYCLOAK_DIR)"
|
|
||||||
docker run -w /opt/keycloak \
|
|
||||||
-v "$HOST_DATASTORE_KEYCLOAK_DIR":"/opt/keycloak" \
|
|
||||||
--entrypoint bash \
|
|
||||||
"$DOCKER_BASE_IMAGE" -c "
|
|
||||||
export KC_METRICS_ENABLED=true
|
|
||||||
export KC_FEATURES=token-exchange
|
|
||||||
export KC_DB=postgres
|
|
||||||
keytool -genkeypair -storepass password \
|
|
||||||
-storetype PKCS12 -keyalg RSA \
|
|
||||||
-keysize 2048 -dname 'CN=$domain' \
|
|
||||||
-alias server -ext 'SAN:c=DNS:$domain,IP:$ip_host' \
|
|
||||||
-keystore conf/server.keystore || exit 1
|
|
||||||
echo 'Generated key'
|
|
||||||
/opt/keycloak/bin/kc.sh build
|
|
||||||
|
|
||||||
" || {
|
|
||||||
|
|
||||||
rmdir "$DATASTORE_KEYCLOAK_DIR/conf" 2>/dev/null
|
|
||||||
rmdir "$DATASTORE_KEYCLOAK_DIR" 2>/dev/null
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
@@ -1,21 +0,0 @@
|
|||||||
|
|
||||||
default-options:
|
|
||||||
|
|
||||||
uses:
|
|
||||||
web-proxy:
|
|
||||||
#constraint: required | recommended | optional
|
|
||||||
#auto: pair | summon | none ## default: pair
|
|
||||||
constraint: required
|
|
||||||
auto: pair
|
|
||||||
solves:
|
|
||||||
proxy: "Public access"
|
|
||||||
default-options:
|
|
||||||
target: !var-expand ${MASTER_BASE_SERVICE_NAME}:8080
|
|
||||||
postgres-database:
|
|
||||||
#constraint: required | recommended | optional
|
|
||||||
#auto: pair | summon | none ## default: pair
|
|
||||||
constraint: recommended
|
|
||||||
auto: summon
|
|
||||||
solves:
|
|
||||||
database: "main storage"
|
|
||||||
default-options:
|
|
17
n8n/hooks/web_proxy-relation-joined
Executable file
17
n8n/hooks/web_proxy-relation-joined
Executable file
@@ -0,0 +1,17 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
DOMAIN=$(relation-get domain) || {
|
||||||
|
echo "Failed to get domain"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
config-add "\
|
||||||
|
services:
|
||||||
|
$MASTER_BASE_SERVICE_NAME:
|
||||||
|
environment:
|
||||||
|
N8N_HOST: \"${DOMAIN}\"
|
||||||
|
WEBHOOK_URL: \"https:\/\/${DOMAIN}\"
|
||||||
|
"
|
||||||
|
|
@@ -1,4 +1,4 @@
|
|||||||
docker-image: docker.n8n.io/n8nio/n8n:1.23.0
|
docker-image: docker.0k.io/n8n:1.45.1
|
||||||
|
|
||||||
uses:
|
uses:
|
||||||
postgres-database:
|
postgres-database:
|
||||||
@@ -22,6 +22,15 @@ uses:
|
|||||||
proxy: "Public access"
|
proxy: "Public access"
|
||||||
default-options:
|
default-options:
|
||||||
target: !var-expand ${MASTER_BASE_SERVICE_NAME}:5678
|
target: !var-expand ${MASTER_BASE_SERVICE_NAME}:5678
|
||||||
|
apache-custom-rules:
|
||||||
|
- !var-expand |
|
||||||
|
|
||||||
|
## Use RewriteEngine to handle WebSocket connection upgrades
|
||||||
|
RewriteEngine On
|
||||||
|
RewriteCond %{HTTP:Upgrade} ^websocket$ [NC]
|
||||||
|
RewriteCond %{HTTP:Connection} Upgrade [NC]
|
||||||
|
RewriteRule /(.*)\$ ws://${MASTER_BASE_SERVICE_NAME}:5678/\$1 [P,L]
|
||||||
|
|
||||||
backup:
|
backup:
|
||||||
constraint: recommended
|
constraint: recommended
|
||||||
auto: pair
|
auto: pair
|
||||||
|
@@ -12,8 +12,9 @@ Config info: https://github.com/outline/outline/blob/main/.env.sample
|
|||||||
Odoo config: if you configure odoo OIDC connector, the callback url
|
Odoo config: if you configure odoo OIDC connector, the callback url
|
||||||
should be like this : https://<YOUR_OUTLINE>:443/auth/oidc.callback
|
should be like this : https://<YOUR_OUTLINE>:443/auth/oidc.callback
|
||||||
|
|
||||||
Requires a =smtp-server= provider to be functional, you can use
|
|
||||||
=smtp-stub= charm to provide information to externally managed =SMTP=.
|
#Requires a =smtp-server= provider to be functional, you can use
|
||||||
|
#=smtp-stub= charm to provide information to externally managed =SMTP=.
|
||||||
|
|
||||||
#+begin_src yaml
|
#+begin_src yaml
|
||||||
outline:
|
outline:
|
||||||
@@ -26,16 +27,19 @@ outline:
|
|||||||
oidc-user-info-uri: #the user info uri of your OIDC provider
|
oidc-user-info-uri: #the user info uri of your OIDC provider
|
||||||
oidc-logout-uri: #the login uri of your OIDC provider
|
oidc-logout-uri: #the login uri of your OIDC provider
|
||||||
|
|
||||||
smtp-stub:
|
#smtp-stub:
|
||||||
options:
|
# options:
|
||||||
host: smtp.myhost.com
|
# host: smtp.myhost.com
|
||||||
port: 465
|
# port: 465
|
||||||
connection-security: "ssl/tls"
|
# connection-security: "ssl/tls"
|
||||||
auth-method: password #IMPORTANT: if not present login password doesn’t work
|
# auth-method: password #IMPORTANT: if not present login password doesn’t work
|
||||||
login: myuser
|
# login: myuser
|
||||||
password: myp4ssw0rd
|
# password: myp4ssw0rd
|
||||||
#+end_src
|
##+end_src
|
||||||
|
|
||||||
|
** Odoo 14
|
||||||
|
|
||||||
|
We monkey-patch odoo in order to make it work, be sure to use latest version in 14.0 of galicea openIDConnection module
|
||||||
|
|
||||||
* Building a new image
|
* Building a new image
|
||||||
|
|
||||||
@@ -45,33 +49,9 @@ We use the official image with an added patch due to 2 bugs:
|
|||||||
|
|
||||||
Note that a PR was pushed with a fix on the first bug. But this was not yet tested.
|
Note that a PR was pushed with a fix on the first bug. But this was not yet tested.
|
||||||
|
|
||||||
These fix are on 0.76.0
|
The fix are on 0.83.0
|
||||||
|
|
||||||
** First fix
|
** Fix
|
||||||
|
|
||||||
We need to add "url.port = '';" in ~build/server/middlewares/passport.js~ to remove the port. Note that this is a bad fix but works for our setup.
|
|
||||||
|
|
||||||
#+begin_src bash
|
|
||||||
IMAGE=docker.0k.io/outline:0.76.0-elabore
|
|
||||||
|
|
||||||
echo 'apk add patch bash' | dupd -u "$IMAGE" -- -u 1
|
|
||||||
cat <<'EOF1' | dupd -u "$IMAGE" -- -u 0
|
|
||||||
patch -p 1 <<'EOF2'
|
|
||||||
--- a/build/server/middlewares/passport.js
|
|
||||||
+++ b/build/server/middlewares/passport.js
|
|
||||||
@@ -40,6 +40,7 @@
|
|
||||||
const requestHost = ctx.get("host");
|
|
||||||
const url = new URL("".concat(reqProtocol, "://").concat(requestHost).concat(redirectUrl));
|
|
||||||
url.host = host;
|
|
||||||
+ url.port = '';
|
|
||||||
return ctx.redirect("".concat(url.toString()).concat(hasQueryString ? "&" : "?", "notice=").concat(notice));
|
|
||||||
}
|
|
||||||
if (_env.default.isDevelopment) {
|
|
||||||
EOF2
|
|
||||||
EOF1
|
|
||||||
#+end_src
|
|
||||||
|
|
||||||
** Second fix
|
|
||||||
|
|
||||||
Upon calling "/oidc" url, outline will return "Set-Cookie" header
|
Upon calling "/oidc" url, outline will return "Set-Cookie" header
|
||||||
with a "domain:" value that is incorrect (still the inner docker
|
with a "domain:" value that is incorrect (still the inner docker
|
||||||
@@ -84,8 +64,9 @@ The patches will change the "build/" files, so this is a very temporary and brit
|
|||||||
|
|
||||||
|
|
||||||
#+begin_src bash
|
#+begin_src bash
|
||||||
IMAGE=docker.0k.io/outline:0.76.0-elabore
|
IMAGE=docker.0k.io/outline:0.83.0-elabore
|
||||||
|
|
||||||
|
echo 'apt update && apt install patch' | dupd -u "$IMAGE" -- -u 0
|
||||||
cat <<'EOF1' | dupd -u "$IMAGE" -- -u 0
|
cat <<'EOF1' | dupd -u "$IMAGE" -- -u 0
|
||||||
patch -p 1 <<'EOF2'
|
patch -p 1 <<'EOF2'
|
||||||
--- a/build/server/utils/passport.js.orig
|
--- a/build/server/utils/passport.js.orig
|
||||||
|
@@ -62,12 +62,12 @@ $SERVICE_NAME:
|
|||||||
OIDC_LOGOUT_URI: \"$oidc_logout_uri\"
|
OIDC_LOGOUT_URI: \"$oidc_logout_uri\"
|
||||||
OIDC_SCOPES: \"openid\"
|
OIDC_SCOPES: \"openid\"
|
||||||
OIDC_USERNAME_CLAIM: \"preferred_username\"
|
OIDC_USERNAME_CLAIM: \"preferred_username\"
|
||||||
OIDC_DISPLAY_NAME: \"OpenID Connect\"
|
|
||||||
NODE_ENV: \"production\"
|
NODE_ENV: \"production\"
|
||||||
LOG_LEVEL: \"debug\"
|
LOG_LEVEL: \"debug\"
|
||||||
FORCE_HTTPS: \"false\"
|
FORCE_HTTPS: \"false\"
|
||||||
|
FILE_STORAGE: \"local\"
|
||||||
#DEVELOPMENT_UNSAFE_INLINE_CSP: \"true\"
|
#DEVELOPMENT_UNSAFE_INLINE_CSP: \"true\"
|
||||||
DEBUG: \"http\"
|
#DEBUG: \"http\"
|
||||||
"
|
"
|
||||||
|
|
||||||
|
|
||||||
|
@@ -13,9 +13,9 @@ services:
|
|||||||
$MASTER_BASE_SERVICE_NAME:
|
$MASTER_BASE_SERVICE_NAME:
|
||||||
environment:
|
environment:
|
||||||
SMTP_USERNAME: \"$user\"
|
SMTP_USERNAME: \"$user\"
|
||||||
SMTP_PASS: \"${password//\$/\$\$}\"
|
SMTP_PASSWORD: \"${password//\$/\$\$}\"
|
||||||
SMTP_HOST: \"$host\"
|
SMTP_HOST: \"$host\"
|
||||||
SMTP_PORT: \"$port\"
|
SMTP_PORT: \"$port\"
|
||||||
#SMTP_SECURE: \"false\"
|
SMTP_FROM_EMAIL: \"$user\"
|
||||||
"
|
"
|
||||||
|
|
||||||
|
@@ -1,4 +1,4 @@
|
|||||||
docker-image: docker.0k.io/outline:0.76.0-elabore
|
docker-image: docker.0k.io/outline:0.83.0-elabore
|
||||||
|
|
||||||
uses:
|
uses:
|
||||||
postgres-database:
|
postgres-database:
|
||||||
@@ -11,6 +11,8 @@ uses:
|
|||||||
default-options:
|
default-options:
|
||||||
extensions:
|
extensions:
|
||||||
- uuid-ossp
|
- uuid-ossp
|
||||||
|
- unaccent
|
||||||
|
- pg_trm
|
||||||
redis-database:
|
redis-database:
|
||||||
constraint: required
|
constraint: required
|
||||||
auto: summon
|
auto: summon
|
||||||
@@ -30,6 +32,15 @@ uses:
|
|||||||
proxy: "Public access"
|
proxy: "Public access"
|
||||||
default-options:
|
default-options:
|
||||||
target: !var-expand ${MASTER_BASE_SERVICE_NAME}:3000
|
target: !var-expand ${MASTER_BASE_SERVICE_NAME}:3000
|
||||||
|
apache-custom-rules:
|
||||||
|
- !var-expand |
|
||||||
|
|
||||||
|
## Use RewriteEngine to handle WebSocket connection upgrades
|
||||||
|
RewriteEngine On
|
||||||
|
RewriteCond %{HTTP:Connection} Upgrade [NC]
|
||||||
|
RewriteCond %{HTTP:Upgrade} websocket [NC]
|
||||||
|
RewriteRule /(.*)\$ ws://${MASTER_BASE_SERVICE_NAME}:3000/\$1 [P,L]
|
||||||
|
|
||||||
backup:
|
backup:
|
||||||
constraint: recommended
|
constraint: recommended
|
||||||
auto: pair
|
auto: pair
|
||||||
|
7
plausible/README.org
Normal file
7
plausible/README.org
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
# -*- ispell-local-dictionary: "english" -*-
|
||||||
|
|
||||||
|
* Info
|
||||||
|
|
||||||
|
From: https://github.com/plausible/community-edition/
|
||||||
|
|
||||||
|
* Usage
|
14
plausible/hooks/event_db-relation-joined
Executable file
14
plausible/hooks/event_db-relation-joined
Executable file
@@ -0,0 +1,14 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
config-add "\
|
||||||
|
services:
|
||||||
|
$MASTER_BASE_SERVICE_NAME:
|
||||||
|
environment:
|
||||||
|
CLICKHOUSE_DATABASE_URL: http://$TARGET_SERVICE_NAME:8123/$TARGET_SERVICE_NAME
|
||||||
|
"
|
||||||
|
|
||||||
|
info "Configured $SERVICE_NAME code for $TARGET_SERVICE_NAME access."
|
27
plausible/hooks/init
Executable file
27
plausible/hooks/init
Executable file
@@ -0,0 +1,27 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
SECRET_KEY_BASE="$SERVICE_DATASTORE"/secret-key
|
||||||
|
|
||||||
|
SHARE_DIR="$SERVICE_DATASTORE"/var/lib/plausible
|
||||||
|
mkdir -p $SHARE_DIR
|
||||||
|
|
||||||
|
uid=$(docker_get_uid "$SERVICE_NAME" "plausible")
|
||||||
|
|
||||||
|
if ! [ -f "$SECRET_KEY_BASE" ]; then
|
||||||
|
info "Generating secret key"
|
||||||
|
mkdir -p "${SECRET_KEY_BASE%/*}"
|
||||||
|
umask 077
|
||||||
|
openssl rand -base64 64 > "$SECRET_KEY_BASE"
|
||||||
|
else
|
||||||
|
info "Using existing secret key"
|
||||||
|
fi
|
||||||
|
|
||||||
|
secret_key_base=$(cat "$SECRET_KEY_BASE")
|
||||||
|
|
||||||
|
init-config-add "
|
||||||
|
$SERVICE_NAME:
|
||||||
|
environment:
|
||||||
|
SECRET_KEY_BASE: \"$secret_key_base\"
|
||||||
|
"
|
||||||
|
|
||||||
|
chown -v "$uid" "$SHARE_DIR"
|
@@ -6,12 +6,12 @@ PASSWORD="$(relation-get password)"
|
|||||||
USER="$(relation-get user)"
|
USER="$(relation-get user)"
|
||||||
DBNAME="$(relation-get dbname)"
|
DBNAME="$(relation-get dbname)"
|
||||||
|
|
||||||
|
|
||||||
config-add "\
|
config-add "\
|
||||||
services:
|
services:
|
||||||
$MASTER_BASE_SERVICE_NAME:
|
$MASTER_BASE_SERVICE_NAME:
|
||||||
environment:
|
environment:
|
||||||
KC_DB_URL: \"jdbc:postgresql://$MASTER_TARGET_SERVICE_NAME:5432/$DBNAME\"
|
DATABASE_URL: postgres://$USER:$PASSWORD@$TARGET_SERVICE_NAME:5432/$DBNAME
|
||||||
KC_DB_USERNAME: \"$USER\"
|
|
||||||
KC_DB_PASSWORD: \"$PASSWORD\"
|
|
||||||
KC_DB: \"postgres\"
|
|
||||||
"
|
"
|
||||||
|
|
||||||
|
info "Configured $SERVICE_NAME code for $TARGET_SERVICE_NAME access."
|
22
plausible/hooks/smtp_server-relation-joined
Executable file
22
plausible/hooks/smtp_server-relation-joined
Executable file
@@ -0,0 +1,22 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
host=$(relation-get host) || exit 1
|
||||||
|
port=$(relation-get port) || exit 1
|
||||||
|
user=$(relation-get login) || exit 1
|
||||||
|
password="$(relation-get password)" || exit 1
|
||||||
|
|
||||||
|
|
||||||
|
config-add "\
|
||||||
|
services:
|
||||||
|
$MASTER_BASE_SERVICE_NAME:
|
||||||
|
environment:
|
||||||
|
SMTP_USER_NAME: \"$user\"
|
||||||
|
SMTP_USER_PWD: \"${password//\$/\$\$}\"
|
||||||
|
SMTP_HOST_ADDR: \"$host\"
|
||||||
|
SMTP_HOST_PORT: \"$port\"
|
||||||
|
SMTP_HOST_SSL_ENABLE: \"true\"
|
||||||
|
MAILER_EMAIL: \"$user\"
|
||||||
|
"
|
||||||
|
|
16
plausible/hooks/web_proxy-relation-joined
Executable file
16
plausible/hooks/web_proxy-relation-joined
Executable file
@@ -0,0 +1,16 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
DOMAIN=$(relation-get domain) || {
|
||||||
|
echo "Failed to get domain"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
config-add "\
|
||||||
|
services:
|
||||||
|
$MASTER_BASE_SERVICE_NAME:
|
||||||
|
environment:
|
||||||
|
BASE_URL: \"https:\/\/${DOMAIN}\"
|
||||||
|
"
|
||||||
|
|
58
plausible/metadata.yml
Normal file
58
plausible/metadata.yml
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
docker-image: docker.0k.io/plausible:3.0.1
|
||||||
|
#docker-image: ghcr.io/plausible/community-edition:v3.0.1
|
||||||
|
|
||||||
|
data-resources:
|
||||||
|
- /var/lib/plausible
|
||||||
|
|
||||||
|
docker-compose:
|
||||||
|
entrypoint: sh -c "/entrypoint.sh db createdb && /entrypoint.sh db migrate && /entrypoint.sh run"
|
||||||
|
#entrypoint: sh -c "/entrypoint.sh run"
|
||||||
|
uses:
|
||||||
|
event-db:
|
||||||
|
#constraint: required | recommended | optional
|
||||||
|
#auto: pair | summon | none ## default: pair
|
||||||
|
constraint: required
|
||||||
|
auto: summon
|
||||||
|
solves:
|
||||||
|
database: "event db"
|
||||||
|
postgres-database:
|
||||||
|
#constraint: required | recommended | optional
|
||||||
|
#auto: pair | summon | none ## default: pair
|
||||||
|
constraint: required
|
||||||
|
auto: summon
|
||||||
|
solves:
|
||||||
|
database: "main storage"
|
||||||
|
default-options:
|
||||||
|
extensions:
|
||||||
|
- citext
|
||||||
|
smtp-server:
|
||||||
|
constraint: required
|
||||||
|
auto: summon
|
||||||
|
solves:
|
||||||
|
proxy: "Public access"
|
||||||
|
web-proxy:
|
||||||
|
#constraint: required | recommended | optional
|
||||||
|
#auto: pair | summon | none ## default: pair
|
||||||
|
constraint: recommended
|
||||||
|
auto: pair
|
||||||
|
solves:
|
||||||
|
proxy: "Public access"
|
||||||
|
default-options:
|
||||||
|
target: !var-expand ${MASTER_BASE_SERVICE_NAME}:8000
|
||||||
|
apache-custom-rules:
|
||||||
|
- !var-expand |
|
||||||
|
ProxyPreserveHost On
|
||||||
|
|
||||||
|
#Set web sockets
|
||||||
|
RewriteEngine On
|
||||||
|
RewriteCond %{HTTP:Upgrade} =websocket [NC]
|
||||||
|
RewriteCond %{HTTP:Connection} upgrade [NC]
|
||||||
|
RewriteRule ^/(live/websocket)$ ws://${MASTER_BASE_SERVICE_NAME}:8000/\$1 [P,L]
|
||||||
|
|
||||||
|
|
||||||
|
backup:
|
||||||
|
constraint: recommended
|
||||||
|
auto: pair
|
||||||
|
solves:
|
||||||
|
backup: "Automatic regular backup"
|
||||||
|
default-options:
|
@@ -1,32 +0,0 @@
|
|||||||
# -*- ispell-local-dictionary: "english" -*-
|
|
||||||
|
|
||||||
* Info
|
|
||||||
|
|
||||||
From Zato 3.2 : https://zato.io/en/docs/3.2/tutorial/01.html
|
|
||||||
|
|
||||||
|
|
||||||
* Usage
|
|
||||||
|
|
||||||
Lauching with web-proxy need to have a frontend connected
|
|
||||||
Deployments of services : they should be mounted as volume for the docker
|
|
||||||
|
|
||||||
Warning : if using keycloak the correct keycloak_public_key have to be manually placed in reso
|
|
||||||
|
|
||||||
#+begin_src yaml
|
|
||||||
|
|
||||||
zato:
|
|
||||||
docker-compose:
|
|
||||||
volumes:
|
|
||||||
- <PROJECT_FOLDER>/schemas:/opt/zato/current/extlib/schemas:rw
|
|
||||||
- <PROJECT_FOLDER>/models:/opt/zato/current/extlib/models:rw
|
|
||||||
- <PROJECT_FOLDER>/services:/opt/hot-deploy/services:rw
|
|
||||||
- <PROJECT_FOLDER>/enmasse:/opt/hot-deploy/enmasse:rw
|
|
||||||
- <PROJECT_FOLDER>/resources/keycloak_public_key.pem:/opt/hot-deploy/keycloak_public_key.pem:rw
|
|
||||||
relations:
|
|
||||||
web-proxy:
|
|
||||||
frontend:
|
|
||||||
domain: zato.<mondomain>.coop
|
|
||||||
|
|
||||||
#+end_src
|
|
||||||
|
|
||||||
|
|
Binary file not shown.
@@ -1,23 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# compose: no-hooks
|
|
||||||
|
|
||||||
## Merged letsencrypt certificate for load_balancer in zato
|
|
||||||
|
|
||||||
. $CHARM_PATH/lib/common
|
|
||||||
|
|
||||||
DOMAIN=$(relation:get "$SERVICE_NAME":web-proxy domain)
|
|
||||||
|
|
||||||
|
|
||||||
merge_crt_letsencrypt "$DOMAIN" || exit 1
|
|
||||||
|
|
||||||
zato_commands="
|
|
||||||
cd /opt/zato &&
|
|
||||||
./restart-load-balancer.sh
|
|
||||||
"
|
|
||||||
|
|
||||||
if ! exec_as_zato_in_container "$zato_commands"; then
|
|
||||||
printf "Error: failed to execute 'restart-load-balancer' in container '%s'.\n" "$CONTAINER_NAME" >&2
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "load balancer restarted"
|
|
@@ -1,25 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
|
|
||||||
if [ -z "$SERVICE_DATASTORE" ]; then
|
|
||||||
echo "This script is meant to be run through 'compose' to work properly." >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
. "$CHARM_PATH/lib/common"
|
|
||||||
|
|
||||||
# Combined commands to be run as zato user
|
|
||||||
zato_commands="
|
|
||||||
cd /opt/zato/env/qs-1 &&
|
|
||||||
./start-server-fg.sh &
|
|
||||||
"
|
|
||||||
|
|
||||||
# Execute commands as zato user
|
|
||||||
exec_as_zato_in_container "/opt/zato/current/bin/zato stop /opt/zato/env/qs-1/server1/"
|
|
||||||
sleep 3
|
|
||||||
if ! exec_as_zato_in_container "$zato_commands"; then
|
|
||||||
printf "Error: Failed to execute zato commands in container '%s'.\n" "$CONTAINER_NAME" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
printf "Zato restarted successfully in container '%s'.\n" "$CONTAINER_NAME" >&2
|
|
@@ -1,24 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
|
|
||||||
if [ -z "$SERVICE_DATASTORE" ]; then
|
|
||||||
echo "This script is meant to be run through 'compose' to work properly." >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
. "$CHARM_PATH/lib/common"
|
|
||||||
|
|
||||||
# Combined commands to be run as zato user
|
|
||||||
zato_commands="
|
|
||||||
cd /opt/zato/env/qs-1 &&
|
|
||||||
./start-server-fg.sh &
|
|
||||||
"
|
|
||||||
|
|
||||||
# Execute commands as zato user
|
|
||||||
if ! exec_as_zato_in_container "$zato_commands"; then
|
|
||||||
printf "Error: Failed to execute zato commands in container '%s'.\n" "$CONTAINER_NAME" >&2
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
printf "Zato started successfully in container '%s'.\n" "$CONTAINER_NAME" >&2
|
|
||||||
|
|
@@ -1,21 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
|
|
||||||
if [ -z "$SERVICE_DATASTORE" ]; then
|
|
||||||
echo "This script is meant to be run through 'compose' to work properly." >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
. "$CHARM_PATH/lib/common"
|
|
||||||
|
|
||||||
# Combined commands to be run as zato user
|
|
||||||
zato_commands="/opt/zato/current/bin/zato stop /opt/zato/env/qs-1/server1/"
|
|
||||||
|
|
||||||
# Execute commands as zato user
|
|
||||||
if ! exec_as_zato_in_container "$zato_commands"; then
|
|
||||||
printf "Error: Failed to execute zato commands in container '%s'.\n" "$CONTAINER_NAME" >&2
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
printf "Zato stopped successfully in container '%s'.\n" "$CONTAINER_NAME" >&2
|
|
||||||
|
|
Binary file not shown.
@@ -1,48 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
## Init is run on host
|
|
||||||
## For now it is run every time the script is launched, but
|
|
||||||
## it should be launched only once after build.
|
|
||||||
|
|
||||||
## Accessible variables are:
|
|
||||||
## - SERVICE_NAME Name of current service
|
|
||||||
## - DOCKER_BASE_IMAGE Base image from which this service might be built if any
|
|
||||||
## - SERVICE_DATASTORE Location on host of the DATASTORE of this service
|
|
||||||
## - SERVICE_CONFIGSTORE Location on host of the CONFIGSTORE of this service
|
|
||||||
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
. lib/common
|
|
||||||
|
|
||||||
ZATO_DIR="/opt/hot-deploy"
|
|
||||||
DATASTORE_ZATO_DIR="$SERVICE_NAME$ZATO_DIR"
|
|
||||||
|
|
||||||
|
|
||||||
SSH_PASSWORD_FILE="$SERVICE_DATASTORE"/.compose/password/ssh-password
|
|
||||||
DASHBOARD_PASSWORD_FILE="$SERVICE_DATASTORE"/.compose/password/dashboard-password
|
|
||||||
IDE_PASSWORD_FILE="$SERVICE_DATASTORE"/.compose/password/ide-password
|
|
||||||
## Load balancer script in not in /opt/zato/env/qs-1 because this folder is created after launch
|
|
||||||
RESTART_LOADBALANCER_FILE=/opt/zato/restart-load-balancer.sh
|
|
||||||
|
|
||||||
ssh_password=$(generate_or_get_secret "$SSH_PASSWORD_FILE")
|
|
||||||
dashboard_password=$(generate_or_get_secret "$DASHBOARD_PASSWORD_FILE")
|
|
||||||
ide_password=$(generate_or_get_secret "$IDE_PASSWORD_FILE")
|
|
||||||
|
|
||||||
|
|
||||||
init-config-add "
|
|
||||||
$SERVICE_NAME:
|
|
||||||
environment:
|
|
||||||
Zato_Log_Env_Details: \"True\"
|
|
||||||
Zato_Dashboard_Debug_Enabled: \"True\"
|
|
||||||
Zato_SSH_Password: \"$ssh_password\"
|
|
||||||
Zato_Dashboard_Password: \"$dashboard_password\"
|
|
||||||
Zato_IDE_Password: \"$ide_password\"
|
|
||||||
volumes:
|
|
||||||
- /srv/charm-store/elabore-charms/zato/resources$RESTART_LOADBALANCER_FILE:$RESTART_LOADBALANCER_FILE
|
|
||||||
"
|
|
||||||
|
|
||||||
# uid=$(docker_get_uid "$SERVICE_NAME" "zato")
|
|
||||||
# mkdir -p "$DATASTORE_ZATO_DIR"
|
|
||||||
# chown "$uid" "$DATASTORE_ZATO_DIR"
|
|
||||||
|
|
@@ -1,14 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
user=$(relation-get user) || exit 1
|
|
||||||
password="$(relation-get password)" || exit 1
|
|
||||||
dbname="$(relation-get dbname)" || exit 1
|
|
||||||
COMPOSE_DIR="$SERVICE_DATASTORE/.compose"
|
|
||||||
|
|
||||||
echo "
|
|
||||||
user:${user}
|
|
||||||
dbname:${dbname}
|
|
||||||
password:${password}
|
|
||||||
" > $COMPOSE_DIR/psql_id
|
|
@@ -1,22 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
. lib/common
|
|
||||||
|
|
||||||
DOMAIN=$(relation-get domain) || exit 1
|
|
||||||
CUSTOM_CREATE_LB_PATH="/opt/zato/3.2.0/code/zato-cli/src/zato/cli/create_lb.py"
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
merge_crt_letsencrypt "$DOMAIN"
|
|
||||||
|
|
||||||
# adding custom config file to handle https in load_balancer with letsencrypt-fullchain certificate
|
|
||||||
|
|
||||||
config-add "\
|
|
||||||
services:
|
|
||||||
$MASTER_BASE_SERVICE_NAME:
|
|
||||||
volumes:
|
|
||||||
- $BASE_CHARM_PATH/resources/$CUSTOM_CREATE_LB_PATH:$CUSTOM_CREATE_LB_PATH
|
|
||||||
- $DEST_LETSENCRYPT_FULLCHAIN:/opt/zato/letsencrypt-fullchain.pem
|
|
||||||
"
|
|
||||||
|
|
||||||
info "Configured $SERVICE_NAME load_balancer with HTTPS support."
|
|
@@ -1,51 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
generate_or_get_secret() {
|
|
||||||
local secret_file="$1"
|
|
||||||
local secret_value
|
|
||||||
|
|
||||||
if ! [ -f "$secret_file" ]; then
|
|
||||||
info "Generating secret password for ${secret_file##*/}"
|
|
||||||
mkdir -p "${secret_file%/*}"
|
|
||||||
umask 077
|
|
||||||
secret_value=$(openssl rand -hex 32)
|
|
||||||
echo "$secret_value" > "$secret_file"
|
|
||||||
else
|
|
||||||
info "Using existing secret from ${secret_file##*/}"
|
|
||||||
secret_value=$(cat "$secret_file")
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "$secret_value"
|
|
||||||
}
|
|
||||||
|
|
||||||
get_container_name(){
|
|
||||||
containers="$(get_running_containers_for_service "$SERVICE_NAME")"
|
|
||||||
if [ -z "$containers" ]; then
|
|
||||||
error "No running containers found for service $SERVICE_NAME"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
container="$(echo "$containers" | head -n 1)"
|
|
||||||
echo "$container"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Function to execute all commands sequentially as the zato user inside the Docker container
|
|
||||||
exec_as_zato_in_container() {
|
|
||||||
CONTAINER_NAME=$(get_container_name)
|
|
||||||
local cmd="$1"
|
|
||||||
if ! docker exec -i "$CONTAINER_NAME" bash -c "su - zato -c '$cmd'"; then
|
|
||||||
printf "Error: Failed to execute command '%s' as zato user in container '%s'\n" "$cmd" "$CONTAINER_NAME" >&2
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
## merge certificate for zato HapProxy to handle https API calls
|
|
||||||
merge_crt_letsencrypt(){
|
|
||||||
local DOMAIN="$1"
|
|
||||||
|
|
||||||
DEST_LETSENCRYPT_FULLCHAIN="$SERVICE_DATASTORE/opt/zato/letsencrypt-fullchain.pem"
|
|
||||||
mkdir -p "${DEST_LETSENCRYPT_FULLCHAIN%/*}"
|
|
||||||
cat $DATASTORE/letsencrypt/etc/letsencrypt/live/$DOMAIN/{fullchain,privkey}.pem > "$DEST_LETSENCRYPT_FULLCHAIN" || return 1
|
|
||||||
info "Letsencrypt {fullchain,privkey}.pem have been concat to /opt/zato/letsencrypt-fullchain.pem for zato hapProxy conf"
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
@@ -1,35 +0,0 @@
|
|||||||
docker-image: docker.0k.io/zato-3.2-quickstart
|
|
||||||
docker-compose:
|
|
||||||
ports:
|
|
||||||
- "21223:21223"
|
|
||||||
|
|
||||||
uses:
|
|
||||||
web-proxy:
|
|
||||||
#constraint: required | recommended | optional
|
|
||||||
#auto: pair | summon | none ## default: pair
|
|
||||||
constraint: recommended
|
|
||||||
auto: pair
|
|
||||||
solves:
|
|
||||||
proxy: "Public access"
|
|
||||||
default-options:
|
|
||||||
target: !var-expand ${MASTER_BASE_SERVICE_NAME}:8183
|
|
||||||
postgres-database:
|
|
||||||
#constraint: required | recommended | optional
|
|
||||||
#auto: pair | summon | none ## default: pair
|
|
||||||
constraint: recommended
|
|
||||||
auto: summon
|
|
||||||
solves:
|
|
||||||
database: "main storage"
|
|
||||||
schedule-command:
|
|
||||||
constraint: required
|
|
||||||
auto: pair
|
|
||||||
solves:
|
|
||||||
maintenance: "Auto renew crt for hapProxy in zato"
|
|
||||||
default-options: !var-expand
|
|
||||||
(35 3 * * 7) {-D -p 10} compose renew_crt "$BASE_SERVICE_NAME"
|
|
||||||
backup:
|
|
||||||
constraint: recommended
|
|
||||||
auto: pair
|
|
||||||
solves:
|
|
||||||
backup: "Automatic regular backup"
|
|
||||||
default-options:
|
|
@@ -1,222 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
"""
|
|
||||||
Copyright (C) 2021, Zato Source s.r.o. https://zato.io
|
|
||||||
|
|
||||||
Licensed under AGPLv3, see LICENSE.txt for terms and conditions.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# stdlib
|
|
||||||
import os, uuid
|
|
||||||
|
|
||||||
# Zato
|
|
||||||
from zato.cli import is_arg_given, ZatoCommand
|
|
||||||
from zato.common.defaults import http_plain_server_port
|
|
||||||
from zato.common.util.open_ import open_w
|
|
||||||
|
|
||||||
config_template = """{{
|
|
||||||
"haproxy_command": "haproxy",
|
|
||||||
"host": "localhost",
|
|
||||||
"port": 20151,
|
|
||||||
"is_tls_enabled": false,
|
|
||||||
"keyfile": "./zato-lba-priv-key.pem",
|
|
||||||
"certfile": "./zato-lba-cert.pem",
|
|
||||||
"ca_certs": "./zato-lba-ca-certs.pem",
|
|
||||||
"work_dir": "../",
|
|
||||||
"verify_fields": {{}},
|
|
||||||
"log_config": "./logging.conf",
|
|
||||||
"pid_file": "zato-lb-agent.pid"
|
|
||||||
}}
|
|
||||||
"""
|
|
||||||
|
|
||||||
zato_config_template = """
|
|
||||||
# ##############################################################################
|
|
||||||
|
|
||||||
global
|
|
||||||
log 127.0.0.1:514 local0 debug # ZATO global:log
|
|
||||||
stats socket {stats_socket} # ZATO global:stats_socket
|
|
||||||
|
|
||||||
# ##############################################################################
|
|
||||||
|
|
||||||
defaults
|
|
||||||
log global
|
|
||||||
option httpclose
|
|
||||||
|
|
||||||
stats uri /zato-lb-stats # ZATO defaults:stats uri
|
|
||||||
|
|
||||||
timeout connect 15000 # ZATO defaults:timeout connect
|
|
||||||
timeout client 15000 # ZATO defaults:timeout client
|
|
||||||
timeout server 15000 # ZATO defaults:timeout server
|
|
||||||
|
|
||||||
errorfile 503 {http_503_path}
|
|
||||||
|
|
||||||
stats enable
|
|
||||||
stats realm Haproxy\ Statistics
|
|
||||||
|
|
||||||
# Note: The password below is a UUID4 written in plain-text.
|
|
||||||
stats auth admin1:{stats_password}
|
|
||||||
|
|
||||||
stats refresh 5s
|
|
||||||
|
|
||||||
# ##############################################################################
|
|
||||||
|
|
||||||
backend bck_http_plain
|
|
||||||
mode http
|
|
||||||
balance roundrobin
|
|
||||||
|
|
||||||
# ZATO begin backend bck_http_plain
|
|
||||||
|
|
||||||
{default_backend}
|
|
||||||
|
|
||||||
# ZATO end backend bck_http_plain
|
|
||||||
|
|
||||||
# ##############################################################################
|
|
||||||
|
|
||||||
frontend front_http_plain
|
|
||||||
|
|
||||||
mode http
|
|
||||||
default_backend bck_http_plain
|
|
||||||
|
|
||||||
option forwardfor
|
|
||||||
option httplog # ZATO frontend front_http_plain:option log-http-requests
|
|
||||||
bind 0.0.0.0:11223 # ZATO frontend front_http_plain:bind
|
|
||||||
maxconn 200 # ZATO frontend front_http_plain:maxconn
|
|
||||||
|
|
||||||
monitor-uri /zato-lb-alive # ZATO frontend front_http_plain:monitor-uri
|
|
||||||
|
|
||||||
# ##############################################################################
|
|
||||||
|
|
||||||
frontend front_tls_no_client_certs
|
|
||||||
|
|
||||||
mode http
|
|
||||||
default_backend bck_http_plain
|
|
||||||
option forwardfor
|
|
||||||
reqadd X-Forwarded-Proto:\ https
|
|
||||||
|
|
||||||
acl has_x_forwarded_proto req.fhdr(X-Forwarded-Proto) -m found
|
|
||||||
http-request deny if has_x_forwarded_proto
|
|
||||||
|
|
||||||
bind 0.0.0.0:21223 ssl crt /opt/zato/letsencrypt-fullchain.pem
|
|
||||||
|
|
||||||
""" # noqa
|
|
||||||
|
|
||||||
default_backend = """
|
|
||||||
server http_plain--server1 127.0.0.1:{server01_port} check inter 2s rise 2 fall 2 # ZATO backend bck_http_plain:server--server1
|
|
||||||
"""
|
|
||||||
|
|
||||||
http_503 = """HTTP/1.0 503 Service Unavailable
|
|
||||||
Cache-Control: no-cache
|
|
||||||
Connection: close
|
|
||||||
Content-Type: application/json
|
|
||||||
|
|
||||||
{"zato_env":
|
|
||||||
{"details": "No server is available to handle the request",
|
|
||||||
"result": "ZATO_ERROR",
|
|
||||||
"cid": "K012345678901234567890123456"}
|
|
||||||
}
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
class Create(ZatoCommand):
|
|
||||||
"""Creates a new Zato load-balancer"""
|
|
||||||
|
|
||||||
opts = []
|
|
||||||
opts.append(
|
|
||||||
{
|
|
||||||
"name": "--pub-key-path",
|
|
||||||
"help": "Path to the load-balancer agent's public key in PEM",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
opts.append(
|
|
||||||
{
|
|
||||||
"name": "--priv-key-path",
|
|
||||||
"help": "Path to the load-balancer agent's private key in PEM",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
opts.append(
|
|
||||||
{
|
|
||||||
"name": "--cert-path",
|
|
||||||
"help": "Path to the load-balancer agent's certificate in PEM",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
opts.append(
|
|
||||||
{
|
|
||||||
"name": "--ca-certs-path",
|
|
||||||
"help": "Path to the a PEM list of certificates the load-balancer's agent will trust",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
needs_empty_dir = True
|
|
||||||
|
|
||||||
def __init__(self, args):
|
|
||||||
super(Create, self).__init__(args)
|
|
||||||
self.target_dir = os.path.abspath(args.path) # noqa
|
|
||||||
|
|
||||||
def execute(
|
|
||||||
self,
|
|
||||||
args,
|
|
||||||
use_default_backend=False,
|
|
||||||
server02_port=None,
|
|
||||||
show_output=True,
|
|
||||||
):
|
|
||||||
# Zato
|
|
||||||
from zato.common.util.logging_ import get_logging_conf_contents
|
|
||||||
|
|
||||||
os.mkdir(os.path.join(self.target_dir, "config")) # noqa
|
|
||||||
os.mkdir(os.path.join(self.target_dir, "logs")) # noqa
|
|
||||||
|
|
||||||
repo_dir = os.path.join(self.target_dir, "config", "repo") # noqa
|
|
||||||
os.mkdir(repo_dir) # noqa
|
|
||||||
|
|
||||||
log_path = os.path.abspath(
|
|
||||||
os.path.join(repo_dir, "..", "..", "logs", "lb-agent.log")
|
|
||||||
) # noqa
|
|
||||||
stats_socket = os.path.join(self.target_dir, "haproxy-stat.sock") # noqa
|
|
||||||
|
|
||||||
is_tls_enabled = is_arg_given(args, "priv_key_path")
|
|
||||||
config = config_template.format(
|
|
||||||
**{
|
|
||||||
"is_tls_enabled": is_tls_enabled,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
logging_conf_contents = get_logging_conf_contents()
|
|
||||||
|
|
||||||
open_w(os.path.join(repo_dir, "lb-agent.conf")).write(config) # noqa
|
|
||||||
open_w(os.path.join(repo_dir, "logging.conf")).write(
|
|
||||||
logging_conf_contents
|
|
||||||
) # noqa
|
|
||||||
|
|
||||||
if use_default_backend:
|
|
||||||
backend = default_backend.format(
|
|
||||||
server01_port=http_plain_server_port,
|
|
||||||
server02_port=server02_port,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
backend = "\n# ZATO default_backend_empty"
|
|
||||||
|
|
||||||
zato_config = zato_config_template.format(
|
|
||||||
stats_socket=stats_socket,
|
|
||||||
stats_password=uuid.uuid4().hex,
|
|
||||||
default_backend=backend,
|
|
||||||
http_503_path=os.path.join(repo_dir, "503.http"),
|
|
||||||
) # noqa
|
|
||||||
|
|
||||||
open_w(os.path.join(repo_dir, "zato.config")).write(zato_config) # noqa
|
|
||||||
open_w(os.path.join(repo_dir, "503.http")).write(http_503) # noqa
|
|
||||||
|
|
||||||
self.copy_lb_crypto(repo_dir, args)
|
|
||||||
|
|
||||||
# Initial info
|
|
||||||
self.store_initial_info(
|
|
||||||
self.target_dir, self.COMPONENTS.LOAD_BALANCER.code
|
|
||||||
)
|
|
||||||
|
|
||||||
if show_output:
|
|
||||||
if self.verbose:
|
|
||||||
msg = "Successfully created a load-balancer's agent in {}".format(
|
|
||||||
self.target_dir
|
|
||||||
)
|
|
||||||
self.logger.debug(msg)
|
|
||||||
else:
|
|
||||||
self.logger.info("OK")
|
|
@@ -1,68 +0,0 @@
|
|||||||
|
|
||||||
# ##############################################################################
|
|
||||||
|
|
||||||
global
|
|
||||||
log 127.0.0.1:514 local0 debug # ZATO global:log
|
|
||||||
stats socket /opt/zato/env/qs-1/load-balancer/haproxy-stat.sock # ZATO global:stats_socket
|
|
||||||
|
|
||||||
# ##############################################################################
|
|
||||||
|
|
||||||
defaults
|
|
||||||
log global
|
|
||||||
option httpclose
|
|
||||||
|
|
||||||
stats uri /zato-lb-stats # ZATO defaults:stats uri
|
|
||||||
|
|
||||||
timeout connect 15000 # ZATO defaults:timeout connect
|
|
||||||
timeout client 15000 # ZATO defaults:timeout client
|
|
||||||
timeout server 15000 # ZATO defaults:timeout server
|
|
||||||
|
|
||||||
errorfile 503 /opt/zato/env/qs-1/load-balancer/config/repo/503.http
|
|
||||||
|
|
||||||
stats enable
|
|
||||||
stats realm Haproxy\ Statistics
|
|
||||||
|
|
||||||
# Note: The password below is a UUID4 written in plain-text.
|
|
||||||
stats auth admin1:8ecbddd3bebe474b93ae43b353a917ff
|
|
||||||
|
|
||||||
stats refresh 5s
|
|
||||||
|
|
||||||
# ##############################################################################
|
|
||||||
|
|
||||||
backend bck_http_plain
|
|
||||||
mode http
|
|
||||||
balance roundrobin
|
|
||||||
|
|
||||||
# ZATO begin backend bck_http_plain
|
|
||||||
|
|
||||||
|
|
||||||
server http_plain--server1 127.0.0.1:17010 check inter 2s rise 2 fall 2 # ZATO backend bck_http_plain:server--server1
|
|
||||||
|
|
||||||
|
|
||||||
# ZATO end backend bck_http_plain
|
|
||||||
|
|
||||||
# ##############################################################################
|
|
||||||
|
|
||||||
frontend front_http_plain
|
|
||||||
|
|
||||||
mode http
|
|
||||||
default_backend bck_http_plain
|
|
||||||
|
|
||||||
option forwardfor
|
|
||||||
option httplog # ZATO frontend front_http_plain:option log-http-requests
|
|
||||||
bind 0.0.0.0:11223 # ZATO frontend front_http_plain:bind
|
|
||||||
maxconn 200 # ZATO frontend front_http_plain:maxconn
|
|
||||||
|
|
||||||
monitor-uri /zato-lb-alive # ZATO frontend front_http_plain:monitor-uri
|
|
||||||
|
|
||||||
frontend front_tls_no_client_certs
|
|
||||||
|
|
||||||
mode http
|
|
||||||
default_backend bck_http_plain
|
|
||||||
option forwardfor
|
|
||||||
reqadd X-Forwarded-Proto:\ https
|
|
||||||
|
|
||||||
acl has_x_forwarded_proto req.fhdr(X-Forwarded-Proto) -m found
|
|
||||||
http-request deny if has_x_forwarded_proto
|
|
||||||
|
|
||||||
bind 0.0.0.0:21223 ssl crt /opt/hot-deploy/cert/letsencrypt-fullchain.pem
|
|
@@ -1,14 +0,0 @@
|
|||||||
export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:~/current/bin
|
|
||||||
export PYTHONPATH=:/opt/zato/current/extlib
|
|
||||||
export ZATO_PYTHON_REQS=/opt/hot-deploy/python-reqs/requirements.txt
|
|
||||||
export ZATO_HOT_DEPLOY_DIR=/opt/hot-deploy/services:
|
|
||||||
export ZATO_USER_CONF_DIR=/opt/hot-deploy/user-conf:/tmp/zato-user-conf
|
|
||||||
export ZATO_HOT_DEPLOY_PREFER_SNAPSHOTS=True
|
|
||||||
export Zato_Is_Quickstart=
|
|
||||||
export Zato_Log_Env_Details=True
|
|
||||||
export Zato_TLS_Verify=
|
|
||||||
export Zato_Is_Docker=True
|
|
||||||
|
|
||||||
~/current/bin/zato stop /opt/zato/env/qs-1/load-balancer
|
|
||||||
kill $(ps -aux | grep zato.agent.load_balancer.main | grep -v grep | grep -v /bin/sh | awk '{ print $2 }')
|
|
||||||
~/current/bin/zato start /opt/zato/env/qs-1/load-balancer --env-file /opt/hot-deploy/enmasse/env.ini
|
|
@@ -1,5 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
## compose: no-hooks
|
|
||||||
|
|
||||||
|
|
||||||
echo youpla
|
|
@@ -1,23 +0,0 @@
|
|||||||
description: Zato Doc
|
|
||||||
subordinate: true
|
|
||||||
requires:
|
|
||||||
web-publishing-directory:
|
|
||||||
interface: publish-dir
|
|
||||||
scope: container
|
|
||||||
|
|
||||||
data-resources:
|
|
||||||
- /opt/zatodoc/
|
|
||||||
|
|
||||||
|
|
||||||
uses:
|
|
||||||
publish-dir:
|
|
||||||
#constraint: required | recommended | optional
|
|
||||||
#auto: pair | summon | none ## default: pair
|
|
||||||
scope: container
|
|
||||||
constraint: required
|
|
||||||
auto: summon
|
|
||||||
solves:
|
|
||||||
container: "main running server"
|
|
||||||
default-options:
|
|
||||||
location: !var-expand "$DATASTORE/$BASE_SERVICE_NAME/opt/zatodoc"
|
|
||||||
|
|
Reference in New Issue
Block a user