Merge branch 'main' into update_readme_for_installation

This commit is contained in:
Croft 2023-03-22 09:56:11 +01:00
commit 8deafe2023
17 changed files with 365 additions and 33 deletions

View File

@ -25,10 +25,11 @@ This repository is the starting point for any information and tools you will nee
- [File structure](#file-structure)
4. [Things you should know](#things-you-should-know)
- [Auto-Updates](#auto-updates)
- [Auto-Backups](#auto-backups)
- [Non-Linux OS](#non-linux-os)
5. [Troubleshooting](#troubleshooting)
- [Docker Daemon Proxy Configuration](#docker-daemon-proxy-configuration)
- [Monitoring](#monitoring)
- [Docker Daemon Proxy Configuration](#docker-Daemon-Proxy-Configuration)
6. [License](#license)
## Requirements
@ -249,6 +250,20 @@ Your Bridgehead will automatically and regularly check for updates. Whenever som
If you would like to understand what happens exactly and when, please check the systemd units deployed during the [installation](#base-installation) via `systemctl cat bridgehead-update@<PROJECT>.service` and `systemctl cat bridgehead-update@<PROJECT.timer`.
### Auto-Backups
Some of the components in the bridgehead will store persistent data. For those components, we integrated an automated backup solution in the bridgehead updates. It will automatically save the backup in multiple files
1) Last-XX, were XX represents a weekday to allow re-import of at least one version of the database for each of the past seven days.
2) Year-KW-XX, were XX represents the calendar week to allow re-import of at least one version per calendar week
3) Year-Month, to allow re-import of at least one version per month
To enable the Auto-Backup feature, please set the Variable `BACKUP_DIRECTORY` in your sites configuration.
### Development Installation
By using `./bridgehead dev-install <projectname>` instead of `install`, you can install a developer bridgehead. The difference is, that you can provide an arbitrary configuration repository during the installation, meaning that it does not have to adhere to the usual naming scheme. This allows for better decoupling between development and production configurations.
### Non-Linux OS
The installation procedures described above have only been tested under Linux.
@ -268,6 +283,11 @@ Installation under WSL ought to work, but we have not tested this.
## Troubleshooting
### Docker Daemon Proxy Configuration
Docker has a background daemon, responsible for downloading images and starting them. Sometimes, proxy configuration from your system won't carry over and it will fail to download images. In that case, configure the proxy for this daemon as described in the [official documentation](https://docs.docker.com).
### Monitoring
To keep all Bridgeheads up and working and detect any errors before a user does, a central monitoring
@ -277,10 +297,6 @@ To keep all Bridgeheads up and working and detect any errors before a user does,
In all monitoring cases, obviously no sensitive information is transmitted, in particular not any patient-related data. Aggregated data, e.g. total amount of datasets, may be transmitted for diagnostic purposes.
### Docker Daemon Proxy Configuration
Docker has a background daemon, responsible for downloading images and starting them. Sometimes, proxy configuration from your system won't carry over and it will fail to download images. In that case, configure the proxy for this daemon as described in the [official documentation](https://docs.docker.com).
## License
Copyright 2019 - 2022 The Samply Community

View File

@ -3,7 +3,7 @@ version: "3.7"
services:
traefik:
container_name: bridgehead-traefik
image: traefik:latest
image: docker.verbis.dkfz.de/cache/traefik:latest
command:
- --entrypoints.web.address=:80
- --entrypoints.websecure.address=:443
@ -32,7 +32,7 @@ services:
forward_proxy:
container_name: bridgehead-forward-proxy
image: samply/bridgehead-forward-proxy:latest
image: docker.verbis.dkfz.de/cache/samply/bridgehead-forward-proxy:latest
environment:
HTTPS_PROXY: ${HTTPS_PROXY_URL}
USERNAME: ${HTTPS_PROXY_USERNAME}
@ -42,7 +42,7 @@ services:
landing:
container_name: bridgehead-landingpage
image: samply/bridgehead-landingpage:master
image: docker.verbis.dkfz.de/cache/samply/bridgehead-landingpage:master
labels:
- "traefik.enable=true"
- "traefik.http.routers.landing.rule=PathPrefix(`/`)"
@ -54,7 +54,7 @@ services:
SITE_NAME: ${SITE_NAME}
blaze:
image: "samply/blaze:0.18"
image: docker.verbis.dkfz.de/cache/samply/blaze:0.19
container_name: bridgehead-bbmri-blaze
environment:
BASE_URL: "http://bridgehead-bbmri-blaze:8080"
@ -72,7 +72,7 @@ services:
- "traefik.http.routers.blaze_ccp.tls=true"
spot:
image: samply/spot:latest
image: docker.verbis.dkfz.de/cache/samply/spot:latest
container_name: bridgehead-spot
environment:
SECRET: ${SPOT_BEAM_SECRET_LONG}
@ -85,7 +85,7 @@ services:
- "blaze"
beam-proxy:
image: "samply/beam-proxy:develop"
image: docker.verbis.dkfz.de/cache/samply/beam-proxy:develop
container_name: bridgehead-beam-proxy
environment:
BROKER_URL: ${BROKER_URL}

View File

@ -70,14 +70,25 @@ case "$ACTION" in
;;
stop)
loadVars
# HACK: This is tempoarily to properly shut down false bridgehead instances (bridgehead-ccp instead ccp)
$COMPOSE -p bridgehead-$PROJECT -f ./$PROJECT/docker-compose.yml $OVERRIDE down
exec $COMPOSE -f ./$PROJECT/docker-compose.yml $OVERRIDE down
;;
is-running)
bk_is_running
exit $?
;;
update)
loadVars
exec ./lib/update-bridgehead.sh $PROJECT
;;
install)
source ./lib/prepare-system.sh
source ./lib/prepare-system.sh NODEV
loadVars
exec ./lib/install-bridgehead.sh $PROJECT
;;
dev-install)
exec ./lib/prepare-system.sh DEV
loadVars
exec ./lib/install-bridgehead.sh $PROJECT
;;

View File

@ -3,7 +3,7 @@ version: "3.7"
services:
traefik:
container_name: bridgehead-traefik
image: traefik:latest
image: docker.verbis.dkfz.de/cache/traefik:latest
command:
- --entrypoints.web.address=:80
- --entrypoints.websecure.address=:443
@ -32,7 +32,7 @@ services:
forward_proxy:
container_name: bridgehead-forward-proxy
image: samply/bridgehead-forward-proxy:latest
image: docker.verbis.dkfz.de/cache/samply/bridgehead-forward-proxy:latest
environment:
HTTPS_PROXY: ${HTTPS_PROXY_URL}
USERNAME: ${HTTPS_PROXY_USERNAME}
@ -42,7 +42,7 @@ services:
landing:
container_name: bridgehead-landingpage
image: samply/bridgehead-landingpage:master
image: docker.verbis.dkfz.de/cache/samply/bridgehead-landingpage:master
labels:
- "traefik.enable=true"
- "traefik.http.routers.landing.rule=PathPrefix(`/`)"
@ -54,7 +54,7 @@ services:
SITE_NAME: ${SITE_NAME}
blaze:
image: "samply/blaze:0.18"
image: docker.verbis.dkfz.de/cache/samply/blaze:0.19
container_name: bridgehead-ccp-blaze
environment:
BASE_URL: "http://bridgehead-ccp-blaze:8080"
@ -72,7 +72,7 @@ services:
- "traefik.http.routers.blaze_ccp.tls=true"
spot:
image: samply/spot:latest
image: docker.verbis.dkfz.de/cache/samply/spot:latest
container_name: bridgehead-spot
environment:
SECRET: ${SPOT_BEAM_SECRET_LONG}
@ -85,7 +85,7 @@ services:
- "blaze"
beam-proxy:
image: "samply/beam-proxy:develop"
image: docker.verbis.dkfz.de/cache/samply/beam-proxy:develop
container_name: bridgehead-beam-proxy
environment:
BROKER_URL: ${BROKER_URL}

View File

@ -2,7 +2,7 @@ version: "3.7"
services:
exliquid-task-store:
image: "samply/blaze:0.18"
image: docker.verbis.dkfz.de/cache/samply/blaze:0.19
container_name: bridgehead-exliquid-task-store
environment:
BASE_URL: "http://bridgehead-exliquid-task-store:8080"
@ -13,7 +13,7 @@ services:
- "traefik.enable=false"
exliquid-report-hub:
image: "samply/report-hub:latest"
image: docker.verbis.dkfz.de/cache/samply/report-hub:latest
container_name: bridgehead-exliquid-report-hub
environment:
SPRING_WEBFLUX_BASE_PATH: "/exliquid"

View File

@ -2,7 +2,7 @@
function exliquidSetup() {
case ${SITE_ID} in
berlin|dresden|essen|frankfurt|freiburg|luebeck|mainz|muenchen-lmu|muenchen-tu|mannheim|tuebingen)
berlin|dresden|essen|frankfurt|freiburg|luebeck|mainz|muenchen-lmu|muenchen-tum|mannheim|tuebingen)
EXLIQUID=1
;;
dktk-test)

View File

@ -0,0 +1,57 @@
version: "3.7"
services:
id-manager:
image: docker.verbis.dkfz.de/bridgehead/magicpl
container_name: bridgehead-id-manager
environment:
TOMCAT_REVERSEPROXY_FQDN: ${HOST}
MAGICPL_SITE: ${IDMANAGEMENT_FRIENDLY_ID}
MAGICPL_ALLOWED_ORIGINS: https://${HOST}
MAGICPL_LOCAL_PATIENTLIST_APIKEY: ${IDMANAGER_LOCAL_PATIENTLIST_APIKEY}
MAGICPL_CENTRAXX_APIKEY: ${IDMANAGER_UPLOAD_APIKEY}
MAGICPL_CONNECTOR_APIKEY: ${IDMANAGER_READ_APIKEY}
MAGICPL_CENTRAL_PATIENTLIST_APIKEY: ${IDMANAGER_CENTRAL_PATIENTLIST_APIKEY}
MAGICPL_CONTROLNUMBERGENERATOR_APIKEY: ${IDMANAGER_CONTROLNUMBERGENERATOR_APIKEY}
MAGICPL_OIDC_CLIENT_ID: ${IDMANAGER_AUTH_CLIENT_ID}
MAGICPL_OIDC_CLIENT_SECRET: ${IDMANAGER_AUTH_CLIENT_SECRET}
depends_on:
- patientlist
labels:
- "traefik.enable=true"
- "traefik.http.routers.id-manager.rule=PathPrefix(`/id-manager`)"
- "traefik.http.services.id-manager.loadbalancer.server.port=8080"
- "traefik.http.routers.id-manager.tls=true"
patientlist:
image: docker.verbis.dkfz.de/bridgehead/mainzelliste
container_name: bridgehead-patientlist
environment:
- TOMCAT_REVERSEPROXY_FQDN=${HOST}
- ML_SITE=${IDMANAGEMENT_FRIENDLY_ID}
- ML_DB_PASS=${PATIENTLIST_POSTGRES_PASSWORD}
- ML_API_KEY=${IDMANAGER_LOCAL_PATIENTLIST_APIKEY}
- ML_UPLOAD_API_KEY=${IDMANAGER_UPLOAD_APIKEY}
# Add Variables from /etc/patientlist-id-generators.env
- PATIENTLIST_SEEDS_TRANSFORMED
labels:
- "traefik.enable=true"
- "traefik.http.routers.patientlist.rule=PathPrefix(`/patientlist`)"
- "traefik.http.services.patientlist.loadbalancer.server.port=8080"
- "traefik.http.routers.patientlist.tls=true"
depends_on:
- patientlist-db
patientlist-db:
image: docker.verbis.dkfz.de/cache/postgres:15.1-alpine
container_name: bridgehead-patientlist-db
environment:
POSTGRES_USER: "mainzelliste"
POSTGRES_DB: "mainzelliste"
POSTGRES_PASSWORD: ${PATIENTLIST_POSTGRES_PASSWORD}
volumes:
- "patientlist-db-data:/var/lib/postgresql/data"
# NOTE: Add backups here. This is only imported if /var/lib/bridgehead/data/patientlist/ is empty!!!
- "/tmp/bridgehead/patientlist/:/docker-entrypoint-initdb.d/"
volumes:
patientlist-db-data:

View File

@ -0,0 +1,52 @@
#!/bin/bash
function idManagementSetup() {
if [ -n "$IDMANAGER_UPLOAD_APIKEY" ]; then
log INFO "id-management setup detected -- will start id-management (mainzelliste & magicpl)."
OVERRIDE+=" -f ./$PROJECT/modules/id-management-compose.yml"
# Auto Generate local Passwords
PATIENTLIST_POSTGRES_PASSWORD="$(echo \"id-management-module-db-password-salt\" | openssl rsautl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 30)"
IDMANAGER_LOCAL_PATIENTLIST_APIKEY="$(cat /proc/sys/kernel/random/uuid | sed 's/[-]//g' | head -c 20)"
# Transform Seeds Configuration to pass it to the Mainzelliste Container
PATIENTLIST_SEEDS_TRANSFORMED="$(declare -p PATIENTLIST_SEEDS | tr -d '\"' | sed 's/\[/\[\"/g' | sed 's/\]/\"\]/g')"
# Ensure old ids are working !!!
export IDMANAGEMENT_FRIENDLY_ID=$(legacyIdMapping "$SITE_ID")
fi
}
# Transform into single string array, e.g. 'dktk-test' to 'dktk test'
# Usage: transformToSingleStringArray 'dktk-test' -> 'dktk test'
function transformToSingleStringArray() {
echo "${1//-/ }";
}
# Ensure all Words are Uppercase
# Usage: transformToUppercase 'dktk test' -> 'Dktk Test'
function transformToUppercase() {
result="";
for word in $1; do
result+=" ${word^}";
done
echo "$result";
}
# Handle all execeptions from the norm (e.g LMU, TUM)
# Usage: applySpecialCases 'Muenchen Lmu Test' -> 'Muenchen LMU Test'
function applySpecialCases() {
result="$1";
result="${result/Lmu/LMU}";
result="${result/Tum/TUM}";
echo "$result";
}
# Transform current siteids to legacy version
# Usage: legacyIdMapping "dktk-test" -> "DktkTest"
function legacyIdMapping() {
single_string_array=$(transformToSingleStringArray "$1");
uppercase_string=$(transformToUppercase "$single_string_array");
normalized_string=$(applySpecialCases "$uppercase_string");
echo "$normalized_string" | tr -d ' '
}

View File

@ -0,0 +1,66 @@
# Module: Id-Management
This module provides integration with the CCP-Pseudonymiziation Service. To learn more on the backgrounds of this service, you can refer to the [CCP Data Protection Concept](https://dktk.dkfz.de/klinische-plattformen/documents-download).
## Getting Started
The following configuration variables are added to your sites-configuration repository:
```
IDMANAGER_UPLOAD_APIKEY="<random-string>"
IDMANAGER_READ_APIKEY="<random-string>"
IDMANAGER_CENTRAL_PATIENTLIST_APIKEY="<given-to-you-by-ccp-it>"
IDMANAGER_CONTROLNUMBERGENERATOR_APIKEY="<given-to-you-by-ccp-it>"
IDMANAGER_AUTH_CLIENT_ID="<given-to-you-by-ccp-it>"
IDMANAGER_AUTH_CLIENT_SECRET="<given-to-you-by-ccp-it>"
IDMANAGER_SEEDS_BK="<three-numbers>"
IDMANAGER_SEEDS_MDS="<three-numbers>"
IDMANAGER_SEEDS_DKTK000001985="<three-numbers>"
```
> NOTE: Additionally, the CCP-IT adds lines declaring the `PATIENTLIST_SEEDS` array in your site configuration. This will contain the seeds for the different id-generators used in all projects.
Once your Bridgehead is updated and restarted, you're all set!
## Additional information you may want to know
### Services
Upon configuration, the Bridgehead will spawn the following services:
- The `bridgehead-id-manager` at https://bridgehead.local/id-manager, provides a common interface for creating pseudonyms in the bridgehead.
- The `bridgehead-patientlist` at https://bridgehead.local/patientlist is a local instance of the open-source software [Mainzelliste](https://mainzelliste.de). This service's primary task is to map patients IDAT to pseudonyms identifying them along the different CCP projects.
- The `bridgehead-patientlist-db` is only accessible within the Bridgehead itself. This is a local postgresql instance storing the database for `bridgehead-patientlist`. The data is persisted as a named volume `patientlist-db-data`.
### How to import an existing database (e.g from Legacy Windows or from Backups)
First you must shutdown your local bridgehead instance:
```
systemctl stop bridgehead@ccp
```
Next you need to remove the current patientlist database:
```
docker volume rm patientlist-db-data;
```
Third, you need to place your postgres dump in the import directory `/tmp/bridgehead/patientlist/some-dump.sql`. This will only be imported, then the volume `patientlist-db-data` was removed previously.
> NOTE: Please create the postgres dump with the options "--no-owner" and "--no-privileges". Additionally ensure the dump is created in the plain format (SQL).
After this, you can restart your bridgehead and the dump will be imported:
```
systemctl start bridgehead@ccp
```
### How to connect your local data-management
Typically, the sites connect their local data-management for the pseudonym creation with the id-management in the bridgehead. In the following two sections, you can read where you can change the configuration:
#### Sites using CentraXX
On your CentraXX Server, you need to change following settings in the "centraxx-dev.properties" file.
```
dktk.idmanagement.url=https://<your-linux-bk-host>/id-manager/translator/getId
dktk.idmanagement.apiKey=<your-setting-for-IDMANAGER_UPLOAD_APIKEY>
```
They typically already exist, but need to be changed to the new values!
#### Sites using ADT2FHIR
@Pierre
### How to connect the legacy windows bridgehead
You need to change the configuration file "..." of your Windows Bridgehead. TODO...

36
ccp/mtba-compose.yml Normal file
View File

@ -0,0 +1,36 @@
version: "3.7"
services:
mtba:
image: docker.verbis.dkfz.de/cache/samply/mtba:develop
container_name: bridgehead-mtba
environment:
BLAZE_STORE_URL: http://blaze:8080
# NOTE: Aktuell Berechtigungen wie MagicPL!!!
# TODO: Add separate ApiKey to MagicPL only for MTBA!
ID_MANAGER_API_KEY: ${IDMANAGER_UPLOAD_APIKEY}
ID_MANAGER_PSEUDONYM_ID_TYPE: BK_${IDMANAGEMENT_FRIENDLY_ID}_L-ID
ID_MANAGER_URL: http://id-manager:8080/id-manager
PATIENT_CSV_FIRST_NAME_HEADER: ${MTBA_PATIENT_CSV_FIRST_NAME_HEADER}
PATIENT_CSV_LAST_NAME_HEADER: ${MTBA_PATIENT_CSV_LAST_NAME_HEADER}
PATIENT_CSV_GENDER_HEADER: ${MTBA_PATIENT_CSV_GENDER_HEADER}
PATIENT_CSV_BIRTHDAY_HEADER: ${MTBA_PATIENT_CSV_BIRTHDAY_HEADER}
CBIOPORTAL_URL: http://cbioportal:8080
FILE_CHARSET: ${MTBA_FILE_CHARSET}
FILE_END_OF_LINE: ${MTBA_FILE_END_OF_LINE}
CSV_DELIMITER: ${MTBA_CSV_DELIMITER}
labels:
- "traefik.enable=true"
- "traefik.http.routers.mtba.rule=PathPrefix(`/`)"
- "traefik.http.services.mtba.loadbalancer.server.port=80"
- "traefik.http.routers.mtba.tls=true"
volumes:
- /tmp/bridgehead/mtba/input:/app/input
- /tmp/bridgehead/mtba/persist:/app/persist
# TODO: Include CBioPortal in Deployment ...
# NOTE: CBioPortal can't load data while the system is running. So after import of data bridgehead needs to be restarted!
# TODO: Find a trigger to let mtba signal a restart for CBioPortal
volumes:
mtba-data:

View File

@ -18,7 +18,7 @@ services:
- "traefik.http.routers.connector.tls=true"
connector_db:
image: postgres:9.5-alpine
image: docker.verbis.dkfz.de/cache/postgres:9.5-alpine
container_name: bridgehead-ccp-connector-db
volumes:
- "connector_db_data:/var/lib/postgresql/data"

View File

@ -7,3 +7,15 @@ function nngmSetup() {
fi
CONNECTOR_POSTGRES_PASSWORD="$(echo \"This is a salt string to generate one consistent password. It is not required to be secret.\" | openssl rsautl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 30)"
}
function mtbaSetup() {
# TODO: Check if ID-Management Module is activated!
if [ -n "$ENABLE_MTBA" ];then
log INFO "MTBA setup detected -- will start MTBA Service and CBioPortal."
if [ ! -n "$IDMANAGER_UPLOAD_APIKEY" ]; then
log ERROR "Detected MTBA Module configuration but ID-Management Module seems not to be configured!"
exit 1;
fi
OVERRIDE+=" -f ./$PROJECT/mtba-compose.yml"
fi
}

View File

@ -8,8 +8,12 @@ REPORTHUB_BEAM_SECRET_LONG="ApiKey report-hub.${PROXY_ID} ${REPORTHUB_BEAM_SECRE
SUPPORT_EMAIL=support-ccp@dkfz-heidelberg.de
PRIVATEKEYFILENAME=/etc/bridgehead/pki/${SITE_ID}.priv.pem
# This will load id-management setup. Effective only if id-management configuration is defined.
source $PROJECT/modules/id-management-setup.sh
idManagementSetup
# This will load nngm setup. Effective only if nngm configuration is defined.
source $PROJECT/nngm-setup.sh
nngmSetup
source $PROJECT/exliquid-setup.sh
exliquidSetup
mtbaSetup

View File

@ -11,7 +11,7 @@ detectCompose() {
getLdmPassword() {
if [ -n "$LDM_PASSWORD" ]; then
docker run --rm httpd:alpine htpasswd -nb $PROJECT $LDM_PASSWORD | tr -d '\n' | tr -d '\r'
docker run --rm docker.verbis.dkfz.de/cache/httpd:alpine htpasswd -nb $PROJECT $LDM_PASSWORD | tr -d '\n' | tr -d '\r'
else
echo -n ""
fi
@ -34,7 +34,7 @@ checkOwner(){
}
printUsage() {
echo "Usage: bridgehead start|stop|update|install|uninstall|enroll PROJECTNAME"
echo "Usage: bridgehead start|stop|is-running|update|install|uninstall|enroll PROJECTNAME"
echo "PROJECTNAME should be one of ccp|bbmri"
}
@ -131,11 +131,22 @@ fail_and_report() {
setHostname() {
if [ -z "$HOST" ]; then
export HOST=$(hostname -f)
export HOST=$(hostname -f | tr "[:upper:]" "[:lower:]")
log DEBUG "Using auto-detected hostname $HOST."
fi
}
# Takes 1) The Backup Directory Path 2) The name of the Service to be backuped
# Creates 3 Backups: 1) For the past seven days 2) For the current month and 3) for each calendar week
createEncryptedPostgresBackup(){
docker exec "$2" bash -c 'pg_dump -U $POSTGRES_USER $POSTGRES_DB --format=p --no-owner --no-privileges' | \
# TODO: Encrypt using /etc/bridgehead/pki/${SITE_ID}.priv.pem | \
tee "$1/$2/$(date +Last-%A).sql" | \
tee "$1/$2/$(date +%Y-%m).sql" > \
"$1/$2/$(date +%Y-KW%V).sql"
}
# from: https://gist.github.com/sj26/88e1c6584397bb7c13bd11108a579746
# ex. use: retry 5 /bin/false
function retry {
@ -158,6 +169,17 @@ function retry {
return 0
}
function bk_is_running {
detectCompose
RUNNING="$($COMPOSE -p $PROJECT -f ./$PROJECT/docker-compose.yml $OVERRIDE ps -q)"
NUMBEROFRUNNING=$(echo "$RUNNING" | wc -l)
if [ $NUMBEROFRUNNING -ge 2 ]; then
return 0
else
return 1
fi
}
##Setting Network properties
# currently not needed
#export HOSTIP=$(MSYS_NO_PATHCONV=1 docker run --rm --add-host=host.docker.internal:host-gateway ubuntu cat /etc/hosts | grep 'host.docker.internal' | awk '{print $1}');

View File

@ -1,10 +1,21 @@
#!/bin/bash -e
DEV_MODE="${1:-NODEV}"
source lib/log.sh
source lib/functions.sh
log "INFO" "Preparing your system for bridgehead installation ..."
# Check, if running in WSL
if [[ $(grep -i Microsoft /proc/version) ]]; then
# Check, if systemd is available
if [ "$(systemctl is-system-running)" = "offline" ]; then
log "ERROR" "It seems you have no active systemd environment in your WSL environment. Please follow the guide in https://devblogs.microsoft.com/commandline/systemd-support-is-now-available-in-wsl/"
exit 1
fi
fi
# Create the bridgehead user
if id bridgehead &>/dev/null; then
log "INFO" "Existing user with id $(id -u bridgehead) will be used by the bridgehead system units."
@ -14,7 +25,12 @@ else
fi
# Clone the OpenSource repository of bridgehead
set +e
bridgehead_repository_url=$(git remote get-url origin)
if [ $? -ne 0 ]; then
bridgehead_repository_url="https://github.com/samply/bridgehead.git"
fi
set -e
if [ -d "/srv/docker/bridgehead" ]; then
current_owner=$(stat -c '%U' /srv/docker/bridgehead)
if [ "$(su -c 'git -C /srv/docker/bridgehead remote get-url origin' $current_owner)" == "$bridgehead_repository_url" ]; then
@ -26,7 +42,7 @@ if [ -d "/srv/docker/bridgehead" ]; then
else
log "INFO" "Cloning $bridgehead_repository_url to /srv/docker/bridgehead"
mkdir -p /srv/docker/
git clone bridgehead_repository_url /srv/docker/bridgehead
git clone $bridgehead_repository_url /srv/docker/bridgehead
fi
case "$PROJECT" in
@ -50,7 +66,7 @@ if [ -d /etc/bridgehead ]; then
else
log "WARN" "Your site configuration repository in /etc/bridgehead seems to have another origin than git.verbis.dkfz.de. Please check if the repository is correctly cloned!"
fi
else
elif [[ "$DEV_MODE" == "NODEV" ]]; then
log "INFO" "Now cloning your site configuration repository for you."
read -p "Please enter your site: " site
read -s -p "Please enter the bridgehead's access token for your site configuration repository (will not be echoed): " access_token
@ -59,9 +75,13 @@ else
if [ $? -gt 0 ]; then
log "ERROR" "Unable to clone your configuration repository. Please obtain correct access data and try again."
fi
elif [[ "$DEV_MODE" == "DEV" ]]; then
log "INFO" "Now cloning your developer configuration repository for you."
read -p "Please enter your config repository URL: " url
git clone "$url" /etc/bridgehead
fi
chown -R bridgehead /etc/bridgehead /srv/docker/bridgehead
log INFO "System preparation is completed and private key is present."
log INFO "System preparation is completed and configuration is present."

View File

@ -4,10 +4,15 @@ source lib/functions.sh
AUTO_HOUSEKEEPING=${AUTO_HOUSEKEEPING:-true}
if [ "$AUTO_HOUSEKEEPING" == "true" ]; then
A="Performing automatic maintenance: Cleaning docker images."
A="Performing automatic maintenance: "
if bk_is_running; then
A="$A Cleaning docker images."
docker system prune -a -f
else
A="$A Not cleaning docker images since BK is not running."
fi
hc_send log "$A"
log INFO "$A"
docker system prune -a -f
else
log WARN "Automatic housekeeping disabled (variable AUTO_HOUSEKEEPING != \"true\")"
fi
@ -81,7 +86,7 @@ done
# Check docker updates
log "INFO" "Checking for updates to running docker images ..."
docker_updated="false"
for IMAGE in $(cat $PROJECT/docker-compose.yml | grep -v "^#" | grep "image:" | sed -e 's_^.*image: \(.*\).*$_\1_g; s_\"__g'); do
for IMAGE in $(cat $PROJECT/docker-compose.yml ${OVERRIDE//-f/} | grep -v "^#" | grep "image:" | sed -e 's_^.*image: \(.*\).*$_\1_g; s_\"__g'); do
log "INFO" "Checking for Updates of Image: $IMAGE"
if docker pull $IMAGE | grep "Downloaded newer image"; then
CHANGE="Image $IMAGE updated."
@ -103,6 +108,37 @@ else
hc_send log "$RES"
fi
if [ -n "${BACKUP_DIRECTORY}" ]; then
if [ ! -d "$BACKUP_DIRECTORY" ]; then
message="Performing automatic maintenance: Attempting to create backup directory $BACKUP_DIRECTORY."
hc_send log "$message"
log INFO "$message"
mkdir -p "$BACKUP_DIRECTORY"
chown -R "$BACKUP_DIRECTORY" bridgehead;
fi
checkOwner "$BACKUP_DIRECTORY" bridgehead || fail_and_report 1 "Automatic maintenance failed: Wrong permissions for backup directory $(pwd)"
# Collect all container names that contain '-db'
BACKUP_SERVICES="$(docker ps --filter name=-db --format "{{.Names}}" | tr "\n" "\ ")"
log INFO "Performing automatic maintenance: Creating Backups for $BACKUP_SERVICES";
for service in $BACKUP_SERVICES; do
if [ ! -d "$BACKUP_DIRECTORY/$service" ]; then
message="Performing automatic maintenance: Attempting to create backup directory for $service in $BACKUP_DIRECTORY."
hc_send log "$message"
log INFO "$message"
mkdir -p "$BACKUP_DIRECTORY/$service"
fi
if createEncryptedPostgresBackup "$BACKUP_DIRECTORY" "$service"; then
message="Performing automatic maintenance: Stored encrypted backup for $service in $BACKUP_DIRECTORY."
hc_send log "$message"
log INFO "$message"
else
fail_and_report 5 "Failed to create encrypted update for $service"
fi
done
else
log WARN "Automated backups are disabled (variable AUTO_BACKUPS != \"true\")"
fi
exit 0
# TODO: Print last commit explicit