mirror of
https://github.com/samply/bridgehead.git
synced 2025-06-17 21:10:14 +02:00
refactor: put all modules in a common directory to remove redundancy
This commit is contained in:
@ -1,32 +0,0 @@
|
||||
version: "3.7"
|
||||
|
||||
services:
|
||||
blaze-secondary:
|
||||
image: docker.verbis.dkfz.de/cache/samply/blaze:0.28
|
||||
container_name: bridgehead-ccp-blaze-secondary
|
||||
environment:
|
||||
BASE_URL: "http://bridgehead-ccp-blaze-secondary:8080"
|
||||
JAVA_TOOL_OPTIONS: "-Xmx${BLAZE_MEMORY_CAP:-4096}m"
|
||||
DB_RESOURCE_CACHE_SIZE: ${BLAZE_RESOURCE_CACHE_CAP:-2500000}
|
||||
DB_BLOCK_CACHE_SIZE: $BLAZE_MEMORY_CAP
|
||||
ENFORCE_REFERENTIAL_INTEGRITY: "false"
|
||||
volumes:
|
||||
- "blaze-secondary-data:/app/data"
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.blaze-secondary_ccp.rule=PathPrefix(`/ccp-localdatamanagement-secondary`)"
|
||||
- "traefik.http.middlewares.ccp_b-secondary_strip.stripprefix.prefixes=/ccp-localdatamanagement-secondary"
|
||||
- "traefik.http.services.blaze-secondary_ccp.loadbalancer.server.port=8080"
|
||||
- "traefik.http.routers.blaze-secondary_ccp.middlewares=ccp_b-secondary_strip,auth"
|
||||
- "traefik.http.routers.blaze-secondary_ccp.tls=true"
|
||||
|
||||
obds2fhir-rest:
|
||||
environment:
|
||||
STORE_PATH: ${STORE_PATH:-http://blaze:8080/fhir}
|
||||
|
||||
exporter:
|
||||
environment:
|
||||
BLAZE_HOST: "blaze-secondary"
|
||||
|
||||
volumes:
|
||||
blaze-secondary-data:
|
@ -1,11 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
function blazeSecondarySetup() {
|
||||
if [ -n "$ENABLE_SECONDARY_BLAZE" ]; then
|
||||
log INFO "Secondary Blaze setup detected -- will start second blaze."
|
||||
OVERRIDE+=" -f ./$PROJECT/modules/blaze-secondary-compose.yml"
|
||||
#make oBDS2FHIR ignore ID-Management and replace target Blaze
|
||||
PATIENTLIST_URL=" "
|
||||
STORE_PATH="http://blaze-secondary:8080/fhir"
|
||||
fi
|
||||
}
|
@ -1,171 +0,0 @@
|
||||
version: "3.7"
|
||||
|
||||
services:
|
||||
rstudio:
|
||||
container_name: bridgehead-rstudio
|
||||
image: docker.verbis.dkfz.de/ccp/dktk-rstudio:latest
|
||||
environment:
|
||||
#DEFAULT_USER: "rstudio" # This line is kept for informational purposes
|
||||
PASSWORD: "${RSTUDIO_ADMIN_PASSWORD}" # It is required, even if the authentication is disabled
|
||||
DISABLE_AUTH: "true" # https://rocker-project.org/images/versioned/rstudio.html#how-to-use
|
||||
HTTP_RELATIVE_PATH: "/rstudio"
|
||||
ALL_PROXY: "http://forward_proxy:3128" # https://rocker-project.org/use/networking.html
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.rstudio_ccp.rule=PathPrefix(`/rstudio`)"
|
||||
- "traefik.http.services.rstudio_ccp.loadbalancer.server.port=8787"
|
||||
- "traefik.http.middlewares.rstudio_ccp_strip.stripprefix.prefixes=/rstudio"
|
||||
- "traefik.http.routers.rstudio_ccp.tls=true"
|
||||
- "traefik.http.routers.rstudio_ccp.middlewares=oidcAuth,rstudio_ccp_strip"
|
||||
networks:
|
||||
- rstudio
|
||||
|
||||
opal:
|
||||
container_name: bridgehead-opal
|
||||
image: docker.verbis.dkfz.de/ccp/dktk-opal:latest
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.opal_ccp.rule=PathPrefix(`/opal`)"
|
||||
- "traefik.http.services.opal_ccp.loadbalancer.server.port=8080"
|
||||
- "traefik.http.routers.opal_ccp.tls=true"
|
||||
links:
|
||||
- opal-rserver
|
||||
- opal-db
|
||||
environment:
|
||||
JAVA_OPTS: "-Xms1G -Xmx8G -XX:+UseG1GC -Dhttps.proxyHost=forward_proxy -Dhttps.proxyPort=3128"
|
||||
# OPAL_ADMINISTRATOR_USER: "administrator" # This line is kept for informational purposes
|
||||
OPAL_ADMINISTRATOR_PASSWORD: "${OPAL_ADMIN_PASSWORD}"
|
||||
POSTGRESDATA_HOST: "opal-db"
|
||||
POSTGRESDATA_DATABASE: "opal"
|
||||
POSTGRESDATA_USER: "opal"
|
||||
POSTGRESDATA_PASSWORD: "${OPAL_DB_PASSWORD}"
|
||||
ROCK_HOSTS: "opal-rserver:8085"
|
||||
APP_URL: "https://${HOST}/opal"
|
||||
APP_CONTEXT_PATH: "/opal"
|
||||
OPAL_PRIVATE_KEY: "/run/secrets/opal-key.pem"
|
||||
OPAL_CERTIFICATE: "/run/secrets/opal-cert.pem"
|
||||
OIDC_URL: "${OIDC_URL}"
|
||||
OIDC_REALM: "${OIDC_REALM}"
|
||||
OIDC_CLIENT_ID: "${OIDC_PRIVATE_CLIENT_ID}"
|
||||
OIDC_CLIENT_SECRET: "${OIDC_CLIENT_SECRET}"
|
||||
OIDC_ADMIN_GROUP: "${OIDC_ADMIN_GROUP}"
|
||||
TOKEN_MANAGER_PASSWORD: "${TOKEN_MANAGER_OPAL_PASSWORD}"
|
||||
EXPORTER_PASSWORD: "${EXPORTER_OPAL_PASSWORD}"
|
||||
BEAM_APP_ID: token-manager.${PROXY_ID}
|
||||
BEAM_SECRET: ${TOKEN_MANAGER_SECRET}
|
||||
BEAM_DATASHIELD_PROXY: request-manager
|
||||
volumes:
|
||||
- "/var/cache/bridgehead/ccp/opal-metadata-db:/srv" # Opal metadata
|
||||
secrets:
|
||||
- opal-cert.pem
|
||||
- opal-key.pem
|
||||
|
||||
opal-db:
|
||||
container_name: bridgehead-opal-db
|
||||
image: docker.verbis.dkfz.de/cache/postgres:${POSTGRES_TAG}
|
||||
environment:
|
||||
POSTGRES_PASSWORD: "${OPAL_DB_PASSWORD}" # Set in datashield-setup.sh
|
||||
POSTGRES_USER: "opal"
|
||||
POSTGRES_DB: "opal"
|
||||
volumes:
|
||||
- "/var/cache/bridgehead/ccp/opal-db:/var/lib/postgresql/data" # Opal project data (imported from exporter)
|
||||
|
||||
opal-rserver:
|
||||
container_name: bridgehead-opal-rserver
|
||||
image: docker.verbis.dkfz.de/ccp/dktk-rserver # datashield/rock-base + dsCCPhos
|
||||
tmpfs:
|
||||
- /srv
|
||||
|
||||
beam-connect:
|
||||
image: docker.verbis.dkfz.de/cache/samply/beam-connect:develop
|
||||
container_name: bridgehead-datashield-connect
|
||||
environment:
|
||||
PROXY_URL: "http://beam-proxy:8081"
|
||||
TLS_CA_CERTIFICATES_DIR: /run/secrets
|
||||
APP_ID: datashield-connect.${SITE_ID}.${BROKER_ID}
|
||||
PROXY_APIKEY: ${DATASHIELD_CONNECT_SECRET}
|
||||
DISCOVERY_URL: "./map/central.json"
|
||||
LOCAL_TARGETS_FILE: "./map/local.json"
|
||||
NO_AUTH: "true"
|
||||
secrets:
|
||||
- opal-cert.pem
|
||||
depends_on:
|
||||
- beam-proxy
|
||||
volumes:
|
||||
- /tmp/bridgehead/opal-map/:/map/:ro
|
||||
networks:
|
||||
- default
|
||||
- rstudio
|
||||
|
||||
traefik:
|
||||
labels:
|
||||
- "traefik.http.middlewares.oidcAuth.forwardAuth.address=http://oauth2-proxy:4180/"
|
||||
- "traefik.http.middlewares.oidcAuth.forwardAuth.trustForwardHeader=true"
|
||||
- "traefik.http.middlewares.oidcAuth.forwardAuth.authResponseHeaders=X-Auth-Request-Access-Token,Authorization"
|
||||
networks:
|
||||
- default
|
||||
- rstudio
|
||||
forward_proxy:
|
||||
networks:
|
||||
- default
|
||||
- rstudio
|
||||
|
||||
beam-proxy:
|
||||
environment:
|
||||
APP_datashield-connect_KEY: ${DATASHIELD_CONNECT_SECRET}
|
||||
APP_token-manager_KEY: ${TOKEN_MANAGER_SECRET}
|
||||
|
||||
# TODO: Allow users of group /DataSHIELD and OIDC_USER_GROUP at the same time:
|
||||
# Maybe a solution would be (https://oauth2-proxy.github.io/oauth2-proxy/configuration/oauth_provider):
|
||||
# --allowed-groups=/DataSHIELD,OIDC_USER_GROUP
|
||||
oauth2-proxy:
|
||||
image: docker.verbis.dkfz.de/cache/oauth2-proxy/oauth2-proxy:latest
|
||||
container_name: bridgehead-oauth2proxy
|
||||
command: >-
|
||||
--allowed-group=DataSHIELD
|
||||
--oidc-groups-claim=${OIDC_GROUP_CLAIM}
|
||||
--auth-logging=true
|
||||
--whitelist-domain=${HOST}
|
||||
--http-address="0.0.0.0:4180"
|
||||
--reverse-proxy=true
|
||||
--upstream="static://202"
|
||||
--email-domain="*"
|
||||
--cookie-name="_BRIDGEHEAD_oauth2"
|
||||
--cookie-secret="${OAUTH2_PROXY_SECRET}"
|
||||
--cookie-expire="12h"
|
||||
--cookie-secure="true"
|
||||
--cookie-httponly="true"
|
||||
#OIDC settings
|
||||
--provider="keycloak-oidc"
|
||||
--provider-display-name="VerbIS Login"
|
||||
--client-id="${OIDC_PRIVATE_CLIENT_ID}"
|
||||
--client-secret="${OIDC_CLIENT_SECRET}"
|
||||
--redirect-url="https://${HOST}${OAUTH2_CALLBACK}"
|
||||
--oidc-issuer-url="${OIDC_ISSUER_URL}"
|
||||
--scope="openid email profile"
|
||||
--code-challenge-method="S256"
|
||||
--skip-provider-button=true
|
||||
#X-Forwarded-Header settings - true/false depending on your needs
|
||||
--pass-basic-auth=true
|
||||
--pass-user-headers=false
|
||||
--pass-access-token=false
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.oauth2_proxy.rule=Host(`${HOST}`) && PathPrefix(`/oauth2`)"
|
||||
- "traefik.http.services.oauth2_proxy.loadbalancer.server.port=4180"
|
||||
- "traefik.http.routers.oauth2_proxy.tls=true"
|
||||
environment:
|
||||
http_proxy: "http://forward_proxy:3128"
|
||||
https_proxy: "http://forward_proxy:3128"
|
||||
depends_on:
|
||||
forward_proxy:
|
||||
condition: service_healthy
|
||||
|
||||
secrets:
|
||||
opal-cert.pem:
|
||||
file: /tmp/bridgehead/opal-cert.pem
|
||||
opal-key.pem:
|
||||
file: /tmp/bridgehead/opal-key.pem
|
||||
|
||||
networks:
|
||||
rstudio:
|
@ -1,44 +0,0 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
if [ "$ENABLE_DATASHIELD" == true ]; then
|
||||
# HACK: This only works because exporter-setup.sh and teiler-setup.sh are sourced after datashield-setup.sh
|
||||
if [ -z "${ENABLE_EXPORTER}" ] || [ "${ENABLE_EXPORTER}" != "true" ]; then
|
||||
log WARN "The ENABLE_EXPORTER variable is either not set or not set to 'true'."
|
||||
fi
|
||||
OAUTH2_CALLBACK=/oauth2/callback
|
||||
OAUTH2_PROXY_SECRET="$(echo \"This is a salt string to generate one consistent encryption key for the oauth2_proxy. It is not required to be secret.\" | sha1sum | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 32)"
|
||||
add_private_oidc_redirect_url "${OAUTH2_CALLBACK}"
|
||||
|
||||
log INFO "DataSHIELD setup detected -- will start DataSHIELD services."
|
||||
OVERRIDE+=" -f ./$PROJECT/modules/datashield-compose.yml"
|
||||
EXPORTER_OPAL_PASSWORD="$(generate_password \"exporter in Opal\")"
|
||||
TOKEN_MANAGER_OPAL_PASSWORD="$(generate_password \"Token Manager in Opal\")"
|
||||
OPAL_DB_PASSWORD="$(echo \"Opal DB\" | generate_simple_password)"
|
||||
OPAL_ADMIN_PASSWORD="$(generate_password \"admin password for Opal\")"
|
||||
RSTUDIO_ADMIN_PASSWORD="$(generate_password \"admin password for R-Studio\")"
|
||||
DATASHIELD_CONNECT_SECRET="$(echo \"DataShield Connect\" | generate_simple_password)"
|
||||
TOKEN_MANAGER_SECRET="$(echo \"Token Manager\" | generate_simple_password)"
|
||||
if [ ! -e /tmp/bridgehead/opal-cert.pem ]; then
|
||||
mkdir -p /tmp/bridgehead/
|
||||
openssl req -x509 -newkey rsa:4096 -nodes -keyout /tmp/bridgehead/opal-key.pem -out /tmp/bridgehead/opal-cert.pem -days 3650 -subj "/CN=opal/C=DE"
|
||||
fi
|
||||
mkdir -p /tmp/bridgehead/opal-map
|
||||
sites="$(cat ./$PROJECT/modules/datashield-sites.json)"
|
||||
echo "$sites" | docker_jq -n --args '{"sites": input | map({
|
||||
"name": .,
|
||||
"id": .,
|
||||
"virtualhost": "\(.):443",
|
||||
"beamconnect": "datashield-connect.\(.).'"$BROKER_ID"'"
|
||||
})}' $sites >/tmp/bridgehead/opal-map/central.json
|
||||
echo "$sites" | docker_jq -n --args '[{
|
||||
"external": "'"$SITE_ID"':443",
|
||||
"internal": "opal:8443",
|
||||
"allowed": input | map("\(.).'"$BROKER_ID"'")
|
||||
}]' >/tmp/bridgehead/opal-map/local.json
|
||||
if [ "$USER" == "root" ]; then
|
||||
chown -R bridgehead:docker /tmp/bridgehead
|
||||
chmod g+wr /tmp/bridgehead/opal-map/*
|
||||
chmod g+r /tmp/bridgehead/opal-key.pem
|
||||
fi
|
||||
add_private_oidc_redirect_url "/opal/*"
|
||||
fi
|
@ -1,28 +0,0 @@
|
||||
# DataSHIELD
|
||||
This module constitutes the infrastructure to run DataSHIELD within the bridgehead.
|
||||
For more information about DataSHIELD, please visit https://www.datashield.org/
|
||||
|
||||
## R-Studio
|
||||
To connect to the different bridgeheads of the CCP through DataSHIELD, you can use your own R-Studio environment.
|
||||
However, this R-Studio has already installed the DataSHIELD libraries and is integrated within the bridgehead.
|
||||
This can save you some time for extra configuration of your R-Studio environment.
|
||||
|
||||
## Opal
|
||||
This is the core of DataSHIELD. It is made up of Opal, a Postgres database and an R-server.
|
||||
For more information about Opal, please visit https://opaldoc.obiba.org
|
||||
|
||||
### Opal
|
||||
Opal is OBiBa’s core database application for biobanks.
|
||||
|
||||
### Opal-DB
|
||||
Opal requires a database to import the data for DataSHIELD. We use a Postgres instance as database.
|
||||
The data is imported within the bridgehead through the exporter.
|
||||
|
||||
### Opal-R-Server
|
||||
R-Server to execute R scripts in DataSHIELD.
|
||||
|
||||
## Beam
|
||||
### Beam-Connect
|
||||
Beam-Connect is used to route http(s) traffic through beam to enable R-Studio to access data from other bridgeheads that have datashield enabled.
|
||||
### Beam-Proxy
|
||||
The usual beam proxy used for communication.
|
@ -1,39 +0,0 @@
|
||||
version: "3.7"
|
||||
|
||||
services:
|
||||
beam-proxy:
|
||||
environment:
|
||||
APP_dnpm-connect_KEY: ${DNPM_BEAM_SECRET_SHORT}
|
||||
dnpm-beam-connect:
|
||||
depends_on: [ beam-proxy ]
|
||||
image: docker.verbis.dkfz.de/cache/samply/beam-connect:develop
|
||||
container_name: bridgehead-dnpm-beam-connect
|
||||
environment:
|
||||
PROXY_URL: http://beam-proxy:8081
|
||||
PROXY_APIKEY: ${DNPM_BEAM_SECRET_SHORT}
|
||||
APP_ID: dnpm-connect.${PROXY_ID}
|
||||
DISCOVERY_URL: "./conf/central_targets.json"
|
||||
LOCAL_TARGETS_FILE: "./conf/connect_targets.json"
|
||||
HTTP_PROXY: "http://forward_proxy:3128"
|
||||
HTTPS_PROXY: "http://forward_proxy:3128"
|
||||
NO_PROXY: beam-proxy,dnpm-backend,host.docker.internal${DNPM_ADDITIONAL_NO_PROXY}
|
||||
RUST_LOG: ${RUST_LOG:-info}
|
||||
NO_AUTH: "true"
|
||||
TLS_CA_CERTIFICATES_DIR: ./conf/trusted-ca-certs
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
volumes:
|
||||
- /etc/bridgehead/trusted-ca-certs:/conf/trusted-ca-certs:ro
|
||||
- /etc/bridgehead/dnpm/local_targets.json:/conf/connect_targets.json:ro
|
||||
- /etc/bridgehead/dnpm/central_targets.json:/conf/central_targets.json:ro
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.dnpm-connect.rule=PathPrefix(`/dnpm-connect`)"
|
||||
- "traefik.http.middlewares.dnpm-connect-strip.stripprefix.prefixes=/dnpm-connect"
|
||||
- "traefik.http.routers.dnpm-connect.middlewares=dnpm-connect-strip"
|
||||
- "traefik.http.services.dnpm-connect.loadbalancer.server.port=8062"
|
||||
- "traefik.http.routers.dnpm-connect.tls=true"
|
||||
|
||||
dnpm-echo:
|
||||
image: docker.verbis.dkfz.de/cache/samply/bridgehead-echo:latest
|
||||
container_name: bridgehead-dnpm-echo
|
@ -1,34 +0,0 @@
|
||||
version: "3.7"
|
||||
|
||||
services:
|
||||
dnpm-backend:
|
||||
image: ghcr.io/kohlbacherlab/bwhc-backend:1.0-snapshot-broker-connector
|
||||
container_name: bridgehead-dnpm-backend
|
||||
environment:
|
||||
- ZPM_SITE=${ZPM_SITE}
|
||||
- N_RANDOM_FILES=${DNPM_SYNTH_NUM}
|
||||
volumes:
|
||||
- /etc/bridgehead/dnpm:/bwhc_config:ro
|
||||
- ${DNPM_DATA_DIR}:/bwhc_data
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.bwhc-backend.rule=PathPrefix(`/bwhc`)"
|
||||
- "traefik.http.services.bwhc-backend.loadbalancer.server.port=9000"
|
||||
- "traefik.http.routers.bwhc-backend.tls=true"
|
||||
|
||||
dnpm-frontend:
|
||||
image: ghcr.io/kohlbacherlab/bwhc-frontend:2209
|
||||
container_name: bridgehead-dnpm-frontend
|
||||
links:
|
||||
- dnpm-backend
|
||||
environment:
|
||||
- NUXT_HOST=0.0.0.0
|
||||
- NUXT_PORT=8080
|
||||
- BACKEND_PROTOCOL=https
|
||||
- BACKEND_HOSTNAME=$HOST
|
||||
- BACKEND_PORT=443
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.bwhc-frontend.rule=PathPrefix(`/`)"
|
||||
- "traefik.http.services.bwhc-frontend.loadbalancer.server.port=8080"
|
||||
- "traefik.http.routers.bwhc-frontend.tls=true"
|
@ -1,28 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ -n "${ENABLE_DNPM_NODE}" ]; then
|
||||
log INFO "DNPM setup detected (BwHC Node) -- will start BwHC node."
|
||||
OVERRIDE+=" -f ./$PROJECT/modules/dnpm-node-compose.yml"
|
||||
|
||||
# Set variables required for BwHC Node. ZPM_SITE is assumed to be set in /etc/bridgehead/<project>.conf
|
||||
DNPM_APPLICATION_SECRET="$(echo \"This is a salt string to generate one consistent password for DNPM. It is not required to be secret.\" | sha1sum | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 30)"
|
||||
if [ -z "${ZPM_SITE+x}" ]; then
|
||||
log ERROR "Mandatory variable ZPM_SITE not defined!"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${DNPM_DATA_DIR+x}" ]; then
|
||||
log ERROR "Mandatory variable DNPM_DATA_DIR not defined!"
|
||||
exit 1
|
||||
fi
|
||||
DNPM_SYNTH_NUM=${DNPM_SYNTH_NUM:-0}
|
||||
if grep -q 'traefik.http.routers.landing.rule=PathPrefix(`/landing`)' /srv/docker/bridgehead/minimal/docker-compose.override.yml 2>/dev/null; then
|
||||
echo "Override of landing page url already in place"
|
||||
else
|
||||
echo "Adding override of landing page url"
|
||||
if [ -f /srv/docker/bridgehead/minimal/docker-compose.override.yml ]; then
|
||||
echo -e ' landing:\n labels:\n - "traefik.http.routers.landing.rule=PathPrefix(`/landing`)"' >> /srv/docker/bridgehead/minimal/docker-compose.override.yml
|
||||
else
|
||||
echo -e 'version: "3.7"\nservices:\n landing:\n labels:\n - "traefik.http.routers.landing.rule=PathPrefix(`/landing`)"' >> /srv/docker/bridgehead/minimal/docker-compose.override.yml
|
||||
fi
|
||||
fi
|
||||
fi
|
@ -1,15 +0,0 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
if [ -n "${ENABLE_DNPM}" ]; then
|
||||
log INFO "DNPM setup detected (Beam.Connect) -- will start Beam.Connect for DNPM."
|
||||
OVERRIDE+=" -f ./$PROJECT/modules/dnpm-compose.yml"
|
||||
|
||||
# Set variables required for Beam-Connect
|
||||
DNPM_BEAM_SECRET_SHORT="$(cat /proc/sys/kernel/random/uuid | sed 's/[-]//g' | head -c 20)"
|
||||
# If the DNPM_NO_PROXY variable is set, prefix it with a comma (as it gets added to a comma separated list)
|
||||
if [ -n "${DNPM_NO_PROXY}" ]; then
|
||||
DNPM_ADDITIONAL_NO_PROXY=",${DNPM_NO_PROXY}"
|
||||
else
|
||||
DNPM_ADDITIONAL_NO_PROXY=""
|
||||
fi
|
||||
fi
|
@ -1,67 +0,0 @@
|
||||
version: "3.7"
|
||||
|
||||
services:
|
||||
exporter:
|
||||
image: docker.verbis.dkfz.de/ccp/dktk-exporter:latest
|
||||
container_name: bridgehead-ccp-exporter
|
||||
environment:
|
||||
JAVA_OPTS: "-Xms1G -Xmx8G -XX:+UseG1GC"
|
||||
LOG_LEVEL: "INFO"
|
||||
EXPORTER_API_KEY: "${EXPORTER_API_KEY}" # Set in exporter-setup.sh
|
||||
CROSS_ORIGINS: "https://${HOST}"
|
||||
EXPORTER_DB_USER: "exporter"
|
||||
EXPORTER_DB_PASSWORD: "${EXPORTER_DB_PASSWORD}" # Set in exporter-setup.sh
|
||||
EXPORTER_DB_URL: "jdbc:postgresql://exporter-db:5432/exporter"
|
||||
HTTP_RELATIVE_PATH: "/ccp-exporter"
|
||||
SITE: "${SITE_ID}"
|
||||
HTTP_SERVLET_REQUEST_SCHEME: "https"
|
||||
OPAL_PASSWORD: "${EXPORTER_OPAL_PASSWORD}"
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.exporter_ccp.rule=PathPrefix(`/ccp-exporter`)"
|
||||
- "traefik.http.services.exporter_ccp.loadbalancer.server.port=8092"
|
||||
- "traefik.http.routers.exporter_ccp.tls=true"
|
||||
- "traefik.http.middlewares.exporter_ccp_strip.stripprefix.prefixes=/ccp-exporter"
|
||||
- "traefik.http.routers.exporter_ccp.middlewares=exporter_ccp_strip"
|
||||
volumes:
|
||||
- "/var/cache/bridgehead/ccp/exporter-files:/app/exporter-files/output"
|
||||
|
||||
exporter-db:
|
||||
image: docker.verbis.dkfz.de/cache/postgres:${POSTGRES_TAG}
|
||||
container_name: bridgehead-ccp-exporter-db
|
||||
environment:
|
||||
POSTGRES_USER: "exporter"
|
||||
POSTGRES_PASSWORD: "${EXPORTER_DB_PASSWORD}" # Set in exporter-setup.sh
|
||||
POSTGRES_DB: "exporter"
|
||||
volumes:
|
||||
# Consider removing this volume once we find a solution to save Lens-queries to be executed in the explorer.
|
||||
- "/var/cache/bridgehead/ccp/exporter-db:/var/lib/postgresql/data"
|
||||
|
||||
reporter:
|
||||
image: docker.verbis.dkfz.de/ccp/dktk-reporter:latest
|
||||
container_name: bridgehead-ccp-reporter
|
||||
environment:
|
||||
JAVA_OPTS: "-Xms1G -Xmx8G -XX:+UseG1GC"
|
||||
LOG_LEVEL: "INFO"
|
||||
CROSS_ORIGINS: "https://${HOST}"
|
||||
HTTP_RELATIVE_PATH: "/ccp-reporter"
|
||||
SITE: "${SITE_ID}"
|
||||
EXPORTER_API_KEY: "${EXPORTER_API_KEY}" # Set in exporter-setup.sh
|
||||
EXPORTER_URL: "http://exporter:8092"
|
||||
LOG_FHIR_VALIDATION: "false"
|
||||
HTTP_SERVLET_REQUEST_SCHEME: "https"
|
||||
|
||||
# In this initial development state of the bridgehead, we are trying to have so many volumes as possible.
|
||||
# However, in the first executions in the CCP sites, this volume seems to be very important. A report is
|
||||
# a process that can take several hours, because it depends on the exporter.
|
||||
# There is a risk that the bridgehead restarts, losing the already created export.
|
||||
|
||||
volumes:
|
||||
- "/var/cache/bridgehead/ccp/reporter-files:/app/reports"
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.reporter_ccp.rule=PathPrefix(`/ccp-reporter`)"
|
||||
- "traefik.http.services.reporter_ccp.loadbalancer.server.port=8095"
|
||||
- "traefik.http.routers.reporter_ccp.tls=true"
|
||||
- "traefik.http.middlewares.reporter_ccp_strip.stripprefix.prefixes=/ccp-reporter"
|
||||
- "traefik.http.routers.reporter_ccp.middlewares=reporter_ccp_strip"
|
@ -1,8 +0,0 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
if [ "$ENABLE_EXPORTER" == true ]; then
|
||||
log INFO "Exporter setup detected -- will start Exporter service."
|
||||
OVERRIDE+=" -f ./$PROJECT/modules/exporter-compose.yml"
|
||||
EXPORTER_DB_PASSWORD="$(echo \"This is a salt string to generate one consistent password for the exporter. It is not required to be secret.\" | sha1sum | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 30)"
|
||||
EXPORTER_API_KEY="$(echo \"This is a salt string to generate one consistent API KEY for the exporter. It is not required to be secret.\" | sha1sum | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 64)"
|
||||
fi
|
@ -1,15 +0,0 @@
|
||||
# Exporter and Reporter
|
||||
|
||||
|
||||
## Exporter
|
||||
The exporter is a REST API that exports the data of the different databases of the bridgehead in a set of tables.
|
||||
It can accept different output formats as CSV, Excel, JSON or XML. It can also export data into Opal.
|
||||
|
||||
## Exporter-DB
|
||||
It is a database to save queries for its execution in the exporter.
|
||||
The exporter manages also the different executions of the same query in through the database.
|
||||
|
||||
## Reporter
|
||||
This component is a plugin of the exporter that allows to create more complex Excel reports described in templates.
|
||||
It is compatible with different template engines as Groovy, Thymeleaf,...
|
||||
It is perfect to generate a document as our traditional CCP quality report.
|
@ -1,25 +0,0 @@
|
||||
version: "3.7"
|
||||
|
||||
services:
|
||||
fhir2sql:
|
||||
depends_on:
|
||||
- "dashboard-db"
|
||||
- "blaze"
|
||||
image: docker.verbis.dkfz.de/cache/samply/fhir2sql:latest
|
||||
container_name: bridgehead-ccp-dashboard-fhir2sql
|
||||
environment:
|
||||
BLAZE_BASE_URL: "http://bridgehead-ccp-blaze:8080"
|
||||
PG_HOST: "dashboard-db"
|
||||
PG_USERNAME: "dashboard"
|
||||
PG_PASSWORD: "${DASHBOARD_DB_PASSWORD}" # Set in dashboard-setup.sh
|
||||
PG_DBNAME: "dashboard"
|
||||
|
||||
dashboard-db:
|
||||
image: docker.verbis.dkfz.de/cache/postgres:${POSTGRES_TAG}
|
||||
container_name: bridgehead-ccp-dashboard-db
|
||||
environment:
|
||||
POSTGRES_USER: "dashboard"
|
||||
POSTGRES_PASSWORD: "${DASHBOARD_DB_PASSWORD}" # Set in dashboard-setup.sh
|
||||
POSTGRES_DB: "dashboard"
|
||||
volumes:
|
||||
- "/var/cache/bridgehead/ccp/dashboard-db:/var/lib/postgresql/data"
|
@ -1,7 +0,0 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
if [ "$ENABLE_FHIR2SQL" == true ]; then
|
||||
log INFO "Dashboard setup detected -- will start Dashboard backend and FHIR2SQL service."
|
||||
OVERRIDE+=" -f ./$PROJECT/modules/fhir2sql-compose.yml"
|
||||
DASHBOARD_DB_PASSWORD="$(generate_simple_password 'fhir2sql')"
|
||||
fi
|
@ -1,36 +0,0 @@
|
||||
# fhir2sql
|
||||
fhir2sql connects to Blaze, retrieves data, and syncs it with a PostgreSQL database. The application is designed to run continuously, syncing data at regular intervals.
|
||||
The Dashboard module is a optional component of the Bridgehead CCP setup. When enabled, it starts two Docker services: **fhir2sql** and **dashboard-db**. Data held in PostgreSQL is only stored temporarily and Blaze is considered to be the 'leading system' or 'source of truth'.
|
||||
|
||||
## Services
|
||||
### fhir2sql
|
||||
* Image: docker.verbis.dkfz.de/cache/samply/fhir2sql:latest
|
||||
* Container name: bridgehead-ccp-dashboard-fhir2sql
|
||||
* Depends on: dashboard-db
|
||||
* Environment variables:
|
||||
- BLAZE_BASE_URL: The base URL of the Blaze FHIR server (set to http://blaze:8080/fhir/)
|
||||
- PG_HOST: The hostname of the PostgreSQL database (set to dashboard-db)
|
||||
- PG_USERNAME: The username for the PostgreSQL database (set to dashboard)
|
||||
- PG_PASSWORD: The password for the PostgreSQL database (set to the value of DASHBOARD_DB_PASSWORD)
|
||||
- PG_DBNAME: The name of the PostgreSQL database (set to dashboard)
|
||||
|
||||
### dashboard-db
|
||||
|
||||
* Image: docker.verbis.dkfz.de/cache/postgres:${POSTGRES_TAG}
|
||||
* Container name: bridgehead-ccp-dashboard-db
|
||||
* Environment variables:
|
||||
- POSTGRES_USER: The username for the PostgreSQL database (set to dashboard)
|
||||
- POSTGRES_PASSWORD: The password for the PostgreSQL database (set to the value of DASHBOARD_DB_PASSWORD)
|
||||
- POSTGRES_DB: The name of the PostgreSQL database (set to dashboard)
|
||||
* Volumes:
|
||||
- /var/cache/bridgehead/ccp/dashboard-db:/var/lib/postgresql/data
|
||||
|
||||
The volume used by dashboard-db can be removed safely and should be restored to a working order by re-importing data from Blaze.
|
||||
|
||||
### Environment Variables
|
||||
* DASHBOARD_DB_PASSWORD: A generated password for the PostgreSQL database, created using a salt string and the SHA1 hash function.
|
||||
* POSTGRES_TAG: The tag of the PostgreSQL image to use (not set in this module, but required by the dashboard-db service).
|
||||
|
||||
|
||||
### Setup
|
||||
To enable the Dashboard module, set the ENABLE_FHIR2SQL environment variable to true. The dashboard-setup.sh script will then start the fhir2sql and dashboard-db services, using the environment variables and volumes defined above.
|
@ -1,96 +0,0 @@
|
||||
version: "3.7"
|
||||
|
||||
services:
|
||||
id-manager:
|
||||
image: docker.verbis.dkfz.de/bridgehead/magicpl
|
||||
container_name: bridgehead-id-manager
|
||||
environment:
|
||||
TOMCAT_REVERSEPROXY_FQDN: ${HOST}
|
||||
TOMCAT_REVERSEPROXY_SSL: "true"
|
||||
MAGICPL_SITE: ${IDMANAGEMENT_FRIENDLY_ID}
|
||||
MAGICPL_ALLOWED_ORIGINS: https://${HOST}
|
||||
MAGICPL_LOCAL_PATIENTLIST_APIKEY: ${IDMANAGER_LOCAL_PATIENTLIST_APIKEY}
|
||||
MAGICPL_CENTRAXX_APIKEY: ${IDMANAGER_UPLOAD_APIKEY}
|
||||
MAGICPL_CONNECTOR_APIKEY: ${IDMANAGER_READ_APIKEY}
|
||||
MAGICPL_CENTRAL_PATIENTLIST_APIKEY: ${IDMANAGER_CENTRAL_PATIENTLIST_APIKEY}
|
||||
MAGICPL_CONTROLNUMBERGENERATOR_APIKEY: ${IDMANAGER_CONTROLNUMBERGENERATOR_APIKEY}
|
||||
depends_on:
|
||||
- patientlist
|
||||
- traefik-forward-auth
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.id-manager.rule=PathPrefix(`/id-manager`)"
|
||||
- "traefik.http.services.id-manager.loadbalancer.server.port=8080"
|
||||
- "traefik.http.routers.id-manager.tls=true"
|
||||
- "traefik.http.routers.id-manager.middlewares=traefik-forward-auth-idm"
|
||||
|
||||
patientlist:
|
||||
image: docker.verbis.dkfz.de/bridgehead/mainzelliste
|
||||
container_name: bridgehead-patientlist
|
||||
environment:
|
||||
- TOMCAT_REVERSEPROXY_FQDN=${HOST}
|
||||
- TOMCAT_REVERSEPROXY_SSL=true
|
||||
- ML_SITE=${IDMANAGEMENT_FRIENDLY_ID}
|
||||
- ML_DB_PASS=${PATIENTLIST_POSTGRES_PASSWORD}
|
||||
- ML_API_KEY=${IDMANAGER_LOCAL_PATIENTLIST_APIKEY}
|
||||
- ML_UPLOAD_API_KEY=${IDMANAGER_UPLOAD_APIKEY}
|
||||
# Add Variables from /etc/patientlist-id-generators.env
|
||||
- PATIENTLIST_SEEDS_TRANSFORMED
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.patientlist.rule=PathPrefix(`/patientlist`)"
|
||||
- "traefik.http.services.patientlist.loadbalancer.server.port=8080"
|
||||
- "traefik.http.routers.patientlist.tls=true"
|
||||
depends_on:
|
||||
- patientlist-db
|
||||
|
||||
patientlist-db:
|
||||
image: docker.verbis.dkfz.de/cache/postgres:${POSTGRES_TAG}
|
||||
container_name: bridgehead-patientlist-db
|
||||
environment:
|
||||
POSTGRES_USER: "mainzelliste"
|
||||
POSTGRES_DB: "mainzelliste"
|
||||
POSTGRES_PASSWORD: ${PATIENTLIST_POSTGRES_PASSWORD}
|
||||
volumes:
|
||||
- "patientlist-db-data:/var/lib/postgresql/data"
|
||||
# NOTE: Add backups here. This is only imported if /var/lib/bridgehead/data/patientlist/ is empty!!!
|
||||
- "/tmp/bridgehead/patientlist/:/docker-entrypoint-initdb.d/"
|
||||
|
||||
traefik-forward-auth:
|
||||
image: docker.verbis.dkfz.de/cache/oauth2-proxy/oauth2-proxy:v7.6.0
|
||||
environment:
|
||||
- http_proxy=http://forward_proxy:3128
|
||||
- https_proxy=http://forward_proxy:3128
|
||||
- OAUTH2_PROXY_PROVIDER=oidc
|
||||
- OAUTH2_PROXY_SKIP_PROVIDER_BUTTON=true
|
||||
- OAUTH2_PROXY_OIDC_ISSUER_URL=https://login.verbis.dkfz.de/realms/master
|
||||
- OAUTH2_PROXY_CLIENT_ID=bridgehead-${SITE_ID}
|
||||
- OAUTH2_PROXY_CLIENT_SECRET=${IDMANAGER_AUTH_CLIENT_SECRET}
|
||||
- OAUTH2_PROXY_COOKIE_SECRET=${IDMANAGER_AUTH_COOKIE_SECRET}
|
||||
- OAUTH2_PROXY_COOKIE_DOMAINS=.${HOST}
|
||||
- OAUTH2_PROXY_HTTP_ADDRESS=:4180
|
||||
- OAUTH2_PROXY_REVERSE_PROXY=true
|
||||
- OAUTH2_PROXY_WHITELIST_DOMAINS=.${HOST}
|
||||
- OAUTH2_PROXY_UPSTREAMS=static://202
|
||||
- OAUTH2_PROXY_EMAIL_DOMAINS=*
|
||||
- OAUTH2_PROXY_SCOPE=openid profile email
|
||||
# Pass Authorization Header and some user information to backend services
|
||||
- OAUTH2_PROXY_SET_AUTHORIZATION_HEADER=true
|
||||
- OAUTH2_PROXY_SET_XAUTHREQUEST=true
|
||||
# Keycloak has an expiration time of 60s therefore oauth2-proxy needs to refresh after that
|
||||
- OAUTH2_PROXY_COOKIE_REFRESH=60s
|
||||
- OAUTH2_PROXY_ALLOWED_GROUPS=DKTK-CCP-PPSN
|
||||
- OAUTH2_PROXY_PROXY_PREFIX=/oauth2-idm
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.services.traefik-forward-auth.loadbalancer.server.port=4180"
|
||||
- "traefik.http.routers.traefik-forward-auth.rule=Host(`${HOST}`) && PathPrefix(`/oauth2-idm`)"
|
||||
- "traefik.http.routers.traefik-forward-auth.tls=true"
|
||||
- "traefik.http.middlewares.traefik-forward-auth-idm.forwardauth.address=http://traefik-forward-auth:4180"
|
||||
- "traefik.http.middlewares.traefik-forward-auth-idm.forwardauth.authResponseHeaders=Authorization"
|
||||
depends_on:
|
||||
forward_proxy:
|
||||
condition: service_healthy
|
||||
|
||||
volumes:
|
||||
patientlist-db-data:
|
@ -1,53 +0,0 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
function idManagementSetup() {
|
||||
if [ -n "$IDMANAGER_UPLOAD_APIKEY" ]; then
|
||||
log INFO "id-management setup detected -- will start id-management (mainzelliste & magicpl)."
|
||||
OVERRIDE+=" -f ./ccp/modules/id-management-compose.yml"
|
||||
|
||||
# Auto Generate local Passwords
|
||||
PATIENTLIST_POSTGRES_PASSWORD="$(echo \"id-management-module-db-password-salt\" | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 30)"
|
||||
IDMANAGER_LOCAL_PATIENTLIST_APIKEY="$(cat /proc/sys/kernel/random/uuid | sed 's/[-]//g' | head -c 20)"
|
||||
|
||||
# Transform Seeds Configuration to pass it to the Mainzelliste Container
|
||||
PATIENTLIST_SEEDS_TRANSFORMED="$(declare -p PATIENTLIST_SEEDS | tr -d '\"' | sed 's/\[/\[\"/g' | sed 's/\]/\"\]/g')"
|
||||
|
||||
# Ensure old ids are working !!!
|
||||
export IDMANAGEMENT_FRIENDLY_ID=$(legacyIdMapping "$SITE_ID")
|
||||
fi
|
||||
}
|
||||
|
||||
# Transform into single string array, e.g. 'dktk-test' to 'dktk test'
|
||||
# Usage: transformToSingleStringArray 'dktk-test' -> 'dktk test'
|
||||
function transformToSingleStringArray() {
|
||||
echo "${1//-/ }";
|
||||
}
|
||||
|
||||
# Ensure all Words are Uppercase
|
||||
# Usage: transformToUppercase 'dktk test' -> 'Dktk Test'
|
||||
function transformToUppercase() {
|
||||
result="";
|
||||
for word in $1; do
|
||||
result+=" ${word^}";
|
||||
done
|
||||
echo "$result";
|
||||
}
|
||||
|
||||
# Handle all execeptions from the norm (e.g LMU, TUM)
|
||||
# Usage: applySpecialCases 'Muenchen Lmu Test' -> 'Muenchen LMU Test'
|
||||
function applySpecialCases() {
|
||||
result="$1";
|
||||
result="${result/Lmu/LMU}";
|
||||
result="${result/Tum/TUM}";
|
||||
result="${result/Dktk Test/Teststandort}";
|
||||
echo "$result";
|
||||
}
|
||||
|
||||
# Transform current siteids to legacy version
|
||||
# Usage: legacyIdMapping "dktk-test" -> "DktkTest"
|
||||
function legacyIdMapping() {
|
||||
single_string_array=$(transformToSingleStringArray "$1");
|
||||
uppercase_string=$(transformToUppercase "$single_string_array");
|
||||
normalized_string=$(applySpecialCases "$uppercase_string");
|
||||
echo "$normalized_string" | tr -d ' '
|
||||
}
|
@ -1,66 +0,0 @@
|
||||
# Module: Id-Management
|
||||
This module provides integration with the CCP-Pseudonymiziation Service. To learn more on the backgrounds of this service, you can refer to the [CCP Data Protection Concept](https://dktk.dkfz.de/klinische-plattformen/documents-download).
|
||||
|
||||
## Getting Started
|
||||
The following configuration variables are added to your sites-configuration repository:
|
||||
|
||||
```
|
||||
IDMANAGER_UPLOAD_APIKEY="<random-string>"
|
||||
IDMANAGER_READ_APIKEY="<random-string>"
|
||||
IDMANAGER_CENTRAL_PATIENTLIST_APIKEY="<given-to-you-by-ccp-it>"
|
||||
IDMANAGER_CONTROLNUMBERGENERATOR_APIKEY="<given-to-you-by-ccp-it>"
|
||||
IDMANAGER_AUTH_CLIENT_ID="<given-to-you-by-ccp-it>"
|
||||
IDMANAGER_AUTH_CLIENT_SECRET="<given-to-you-by-ccp-it>"
|
||||
|
||||
IDMANAGER_SEEDS_BK="<three-numbers>"
|
||||
IDMANAGER_SEEDS_MDS="<three-numbers>"
|
||||
IDMANAGER_SEEDS_DKTK000001985="<three-numbers>"
|
||||
```
|
||||
> NOTE: Additionally, the CCP-IT adds lines declaring the `PATIENTLIST_SEEDS` array in your site configuration. This will contain the seeds for the different id-generators used in all projects.
|
||||
|
||||
Once your Bridgehead is updated and restarted, you're all set!
|
||||
|
||||
## Additional information you may want to know
|
||||
|
||||
### Services
|
||||
|
||||
Upon configuration, the Bridgehead will spawn the following services:
|
||||
|
||||
- The `bridgehead-id-manager` at https://bridgehead.local/id-manager, provides a common interface for creating pseudonyms in the bridgehead.
|
||||
- The `bridgehead-patientlist` at https://bridgehead.local/patientlist is a local instance of the open-source software [Mainzelliste](https://mainzelliste.de). This service's primary task is to map patients IDAT to pseudonyms identifying them along the different CCP projects.
|
||||
- The `bridgehead-patientlist-db` is only accessible within the Bridgehead itself. This is a local postgresql instance storing the database for `bridgehead-patientlist`. The data is persisted as a named volume `patientlist-db-data`.
|
||||
|
||||
### How to import an existing database (e.g from Legacy Windows or from Backups)
|
||||
First you must shutdown your local bridgehead instance:
|
||||
```
|
||||
systemctl stop bridgehead@ccp
|
||||
```
|
||||
|
||||
Next you need to remove the current patientlist database:
|
||||
```
|
||||
docker volume rm patientlist-db-data;
|
||||
```
|
||||
|
||||
Third, you need to place your postgres dump in the import directory `/tmp/bridgehead/patientlist/some-dump.sql`. This will only be imported, then the volume `patientlist-db-data` was removed previously.
|
||||
> NOTE: Please create the postgres dump with the options "--no-owner" and "--no-privileges". Additionally ensure the dump is created in the plain format (SQL).
|
||||
|
||||
After this, you can restart your bridgehead and the dump will be imported:
|
||||
```
|
||||
systemctl start bridgehead@ccp
|
||||
```
|
||||
|
||||
### How to connect your local data-management
|
||||
Typically, the sites connect their local data-management for the pseudonym creation with the id-management in the bridgehead. In the following two sections, you can read where you can change the configuration:
|
||||
#### Sites using CentraXX
|
||||
On your CentraXX Server, you need to change following settings in the "centraxx-dev.properties" file.
|
||||
```
|
||||
dktk.idmanagement.url=https://<your-linux-bk-host>/id-manager/translator/getId
|
||||
dktk.idmanagement.apiKey=<your-setting-for-IDMANAGER_UPLOAD_APIKEY>
|
||||
```
|
||||
They typically already exist, but need to be changed to the new values!
|
||||
#### Sites using ADT2FHIR
|
||||
@Pierre
|
||||
|
||||
|
||||
### How to connect the legacy windows bridgehead
|
||||
You need to change the configuration file "..." of your Windows Bridgehead. TODO...
|
@ -1,44 +0,0 @@
|
||||
version: "3.7"
|
||||
|
||||
services:
|
||||
mtba:
|
||||
image: docker.verbis.dkfz.de/cache/samply/mtba:develop
|
||||
container_name: bridgehead-mtba
|
||||
environment:
|
||||
BLAZE_STORE_URL: http://blaze:8080
|
||||
# NOTE: Aktuell Berechtigungen wie MagicPL!!!
|
||||
# TODO: Add separate ApiKey to MagicPL only for MTBA!
|
||||
ID_MANAGER_API_KEY: ${IDMANAGER_UPLOAD_APIKEY}
|
||||
ID_MANAGER_PSEUDONYM_ID_TYPE: BK_${IDMANAGEMENT_FRIENDLY_ID}_L-ID
|
||||
ID_MANAGER_URL: http://id-manager:8080/id-manager
|
||||
PATIENT_CSV_FIRST_NAME_HEADER: ${MTBA_PATIENT_CSV_FIRST_NAME_HEADER:-FIRST_NAME}
|
||||
PATIENT_CSV_LAST_NAME_HEADER: ${MTBA_PATIENT_CSV_LAST_NAME_HEADER:-LAST_NAME}
|
||||
PATIENT_CSV_GENDER_HEADER: ${MTBA_PATIENT_CSV_GENDER_HEADER:-GENDER}
|
||||
PATIENT_CSV_BIRTHDAY_HEADER: ${MTBA_PATIENT_CSV_BIRTHDAY_HEADER:-BIRTHDAY}
|
||||
CBIOPORTAL_URL: http://cbioportal:8080
|
||||
FILE_CHARSET: ${MTBA_FILE_CHARSET:-UTF-8}
|
||||
FILE_END_OF_LINE: ${MTBA_FILE_END_OF_LINE:-LF}
|
||||
CSV_DELIMITER: ${MTBA_CSV_DELIMITER:-TAB}
|
||||
HTTP_RELATIVE_PATH: "/mtba"
|
||||
OIDC_ADMIN_GROUP: "${OIDC_ADMIN_GROUP}"
|
||||
OIDC_CLIENT_ID: "${OIDC_PRIVATE_CLIENT_ID}"
|
||||
OIDC_CLIENT_SECRET: "${OIDC_CLIENT_SECRET}"
|
||||
OIDC_REALM: "${OIDC_REALM}"
|
||||
OIDC_URL: "${OIDC_URL}"
|
||||
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.mtba_ccp.rule=PathPrefix(`/mtba`)"
|
||||
- "traefik.http.services.mtba_ccp.loadbalancer.server.port=8480"
|
||||
- "traefik.http.routers.mtba_ccp.tls=true"
|
||||
|
||||
volumes:
|
||||
- /var/cache/bridgehead/ccp/mtba/input:/app/input
|
||||
- /var/cache/bridgehead/ccp/mtba/persist:/app/persist
|
||||
|
||||
# TODO: Include CBioPortal in Deployment ...
|
||||
# NOTE: CBioPortal can't load data while the system is running. So after import of data bridgehead needs to be restarted!
|
||||
# TODO: Find a trigger to let mtba signal a restart for CBioPortal
|
||||
|
||||
volumes:
|
||||
mtba-data:
|
@ -1,12 +0,0 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
function mtbaSetup() {
|
||||
if [ -n "$ENABLE_MTBA" ];then
|
||||
log INFO "MTBA setup detected -- will start MTBA Service and CBioPortal."
|
||||
if [ ! -n "$IDMANAGER_UPLOAD_APIKEY" ]; then
|
||||
log ERROR "Missing ID-Management Module! Fix this by setting up ID Management:"
|
||||
fi
|
||||
OVERRIDE+=" -f ./$PROJECT/modules/mtba-compose.yml"
|
||||
add_private_oidc_redirect_url "/mtba/*"
|
||||
fi
|
||||
}
|
@ -1,6 +0,0 @@
|
||||
# Molecular Tumor Board Alliance (MTBA)
|
||||
|
||||
In this module, the genetic data to import is stored in a directory (/tmp/bridgehead/mtba/input). A process checks
|
||||
regularly if there are files in the directory. The files are pseudonomized when the IDAT is provided. The files are
|
||||
combined with clinical data of the blaze and imported in cBioPortal. On the other hand, this files are also imported in
|
||||
Blaze.
|
@ -1,27 +0,0 @@
|
||||
version: "3.7"
|
||||
|
||||
volumes:
|
||||
nngm-rest:
|
||||
|
||||
services:
|
||||
connector:
|
||||
container_name: bridgehead-connector
|
||||
image: docker.verbis.dkfz.de/ccp/nngm-rest:main
|
||||
environment:
|
||||
CTS_MAGICPL_API_KEY: ${NNGM_MAGICPL_APIKEY}
|
||||
CTS_API_KEY: ${NNGM_CTS_APIKEY}
|
||||
CRYPT_KEY: ${NNGM_CRYPTKEY}
|
||||
#CTS_MAGICPL_SITE: ${SITE_ID}TODO
|
||||
restart: always
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.connector.rule=PathPrefix(`/nngm-connector`)"
|
||||
- "traefik.http.middlewares.connector_strip.stripprefix.prefixes=/nngm-connector"
|
||||
- "traefik.http.services.connector.loadbalancer.server.port=8080"
|
||||
- "traefik.http.routers.connector.tls=true"
|
||||
- "traefik.http.routers.connector.middlewares=connector_strip,auth-nngm"
|
||||
volumes:
|
||||
- nngm-rest:/var/log
|
||||
traefik:
|
||||
labels:
|
||||
- "traefik.http.middlewares.auth-nngm.basicauth.users=${NNGM_AUTH}"
|
@ -1,6 +0,0 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
if [ -n "$NNGM_CTS_APIKEY" ]; then
|
||||
log INFO "nNGM setup detected -- will start nNGM Connector."
|
||||
OVERRIDE+=" -f ./$PROJECT/modules/nngm-compose.yml"
|
||||
fi
|
@ -1,20 +0,0 @@
|
||||
version: "3.7"
|
||||
|
||||
services:
|
||||
obds2fhir-rest:
|
||||
container_name: bridgehead-obds2fhir-rest
|
||||
image: docker.verbis.dkfz.de/ccp/obds2fhir-rest:main
|
||||
environment:
|
||||
IDTYPE: BK_${IDMANAGEMENT_FRIENDLY_ID}_L-ID
|
||||
MAINZELLISTE_APIKEY: ${IDMANAGER_LOCAL_PATIENTLIST_APIKEY}
|
||||
SALT: ${LOCAL_SALT}
|
||||
KEEP_INTERNAL_ID: ${KEEP_INTERNAL_ID:-false}
|
||||
MAINZELLISTE_URL: ${PATIENTLIST_URL:-http://patientlist:8080/patientlist}
|
||||
restart: always
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.obds2fhir-rest.rule=PathPrefix(`/obds2fhir-rest`) || PathPrefix(`/adt2fhir-rest`)"
|
||||
- "traefik.http.middlewares.obds2fhir-rest_strip.stripprefix.prefixes=/obds2fhir-rest,/adt2fhir-rest"
|
||||
- "traefik.http.services.obds2fhir-rest.loadbalancer.server.port=8080"
|
||||
- "traefik.http.routers.obds2fhir-rest.tls=true"
|
||||
- "traefik.http.routers.obds2fhir-rest.middlewares=obds2fhir-rest_strip,auth"
|
@ -1,13 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
function obds2fhirRestSetup() {
|
||||
if [ -n "$ENABLE_OBDS2FHIR_REST" ]; then
|
||||
log INFO "oBDS2FHIR-REST setup detected -- will start obds2fhir-rest module."
|
||||
if [ ! -n "$IDMANAGER_UPLOAD_APIKEY" ]; then
|
||||
log ERROR "Missing ID-Management Module! Fix this by setting up ID Management:"
|
||||
PATIENTLIST_URL=" "
|
||||
fi
|
||||
OVERRIDE+=" -f ./ccp/modules/obds2fhir-rest-compose.yml"
|
||||
LOCAL_SALT="$(echo \"local-random-salt\" | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 30)"
|
||||
fi
|
||||
}
|
@ -1,81 +0,0 @@
|
||||
version: "3.7"
|
||||
|
||||
services:
|
||||
|
||||
teiler-orchestrator:
|
||||
image: docker.verbis.dkfz.de/cache/samply/teiler-orchestrator:latest
|
||||
container_name: bridgehead-teiler-orchestrator
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.teiler_orchestrator_ccp.rule=PathPrefix(`/ccp-teiler`)"
|
||||
- "traefik.http.services.teiler_orchestrator_ccp.loadbalancer.server.port=9000"
|
||||
- "traefik.http.routers.teiler_orchestrator_ccp.tls=true"
|
||||
- "traefik.http.middlewares.teiler_orchestrator_ccp_strip.stripprefix.prefixes=/ccp-teiler"
|
||||
- "traefik.http.routers.teiler_orchestrator_ccp.middlewares=teiler_orchestrator_ccp_strip"
|
||||
environment:
|
||||
TEILER_BACKEND_URL: "https://${HOST}/ccp-teiler-backend"
|
||||
TEILER_DASHBOARD_URL: "https://${HOST}/ccp-teiler-dashboard"
|
||||
DEFAULT_LANGUAGE: "${TEILER_DEFAULT_LANGUAGE_LOWER_CASE}"
|
||||
HTTP_RELATIVE_PATH: "/ccp-teiler"
|
||||
|
||||
teiler-dashboard:
|
||||
image: docker.verbis.dkfz.de/cache/samply/teiler-dashboard:develop
|
||||
container_name: bridgehead-teiler-dashboard
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.teiler_dashboard_ccp.rule=PathPrefix(`/ccp-teiler-dashboard`)"
|
||||
- "traefik.http.services.teiler_dashboard_ccp.loadbalancer.server.port=80"
|
||||
- "traefik.http.routers.teiler_dashboard_ccp.tls=true"
|
||||
- "traefik.http.middlewares.teiler_dashboard_ccp_strip.stripprefix.prefixes=/ccp-teiler-dashboard"
|
||||
- "traefik.http.routers.teiler_dashboard_ccp.middlewares=teiler_dashboard_ccp_strip"
|
||||
environment:
|
||||
DEFAULT_LANGUAGE: "${TEILER_DEFAULT_LANGUAGE}"
|
||||
TEILER_BACKEND_URL: "https://${HOST}/ccp-teiler-backend"
|
||||
OIDC_URL: "${OIDC_URL}"
|
||||
OIDC_REALM: "${OIDC_REALM}"
|
||||
OIDC_CLIENT_ID: "${OIDC_PUBLIC_CLIENT_ID}"
|
||||
OIDC_TOKEN_GROUP: "${OIDC_GROUP_CLAIM}"
|
||||
TEILER_ADMIN_NAME: "${OPERATOR_FIRST_NAME} ${OPERATOR_LAST_NAME}"
|
||||
TEILER_ADMIN_EMAIL: "${OPERATOR_EMAIL}"
|
||||
TEILER_ADMIN_PHONE: "${OPERATOR_PHONE}"
|
||||
TEILER_PROJECT: "${PROJECT}"
|
||||
EXPORTER_API_KEY: "${EXPORTER_API_KEY}"
|
||||
TEILER_ORCHESTRATOR_URL: "https://${HOST}/ccp-teiler"
|
||||
TEILER_DASHBOARD_HTTP_RELATIVE_PATH: "/ccp-teiler-dashboard"
|
||||
TEILER_ORCHESTRATOR_HTTP_RELATIVE_PATH: "/ccp-teiler"
|
||||
TEILER_USER: "${OIDC_USER_GROUP}"
|
||||
TEILER_ADMIN: "${OIDC_ADMIN_GROUP}"
|
||||
REPORTER_DEFAULT_TEMPLATE_ID: "ccp-qb"
|
||||
EXPORTER_DEFAULT_TEMPLATE_ID: "ccp"
|
||||
|
||||
|
||||
teiler-backend:
|
||||
image: docker.verbis.dkfz.de/ccp/dktk-teiler-backend:latest
|
||||
container_name: bridgehead-teiler-backend
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.teiler_backend_ccp.rule=PathPrefix(`/ccp-teiler-backend`)"
|
||||
- "traefik.http.services.teiler_backend_ccp.loadbalancer.server.port=8085"
|
||||
- "traefik.http.routers.teiler_backend_ccp.tls=true"
|
||||
- "traefik.http.middlewares.teiler_backend_ccp_strip.stripprefix.prefixes=/ccp-teiler-backend"
|
||||
- "traefik.http.routers.teiler_backend_ccp.middlewares=teiler_backend_ccp_strip"
|
||||
environment:
|
||||
LOG_LEVEL: "INFO"
|
||||
APPLICATION_PORT: "8085"
|
||||
APPLICATION_ADDRESS: "${HOST}"
|
||||
DEFAULT_LANGUAGE: "${TEILER_DEFAULT_LANGUAGE}"
|
||||
CONFIG_ENV_VAR_PATH: "/run/secrets/ccp.conf"
|
||||
TEILER_ORCHESTRATOR_HTTP_RELATIVE_PATH: "/ccp-teiler"
|
||||
TEILER_ORCHESTRATOR_URL: "https://${HOST}/ccp-teiler"
|
||||
TEILER_DASHBOARD_DE_URL: "https://${HOST}/ccp-teiler-dashboard/de"
|
||||
TEILER_DASHBOARD_EN_URL: "https://${HOST}/ccp-teiler-dashboard/en"
|
||||
CENTRAX_URL: "${CENTRAXX_URL}"
|
||||
HTTP_PROXY: "http://forward_proxy:3128"
|
||||
ENABLE_MTBA: "${ENABLE_MTBA}"
|
||||
ENABLE_DATASHIELD: "${ENABLE_DATASHIELD}"
|
||||
secrets:
|
||||
- ccp.conf
|
||||
|
||||
secrets:
|
||||
ccp.conf:
|
||||
file: /etc/bridgehead/ccp.conf
|
@ -1,9 +0,0 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
if [ "$ENABLE_TEILER" == true ];then
|
||||
log INFO "Teiler setup detected -- will start Teiler services."
|
||||
OVERRIDE+=" -f ./$PROJECT/modules/teiler-compose.yml"
|
||||
TEILER_DEFAULT_LANGUAGE=DE
|
||||
TEILER_DEFAULT_LANGUAGE_LOWER_CASE=${TEILER_DEFAULT_LANGUAGE,,}
|
||||
add_public_oidc_redirect_url "/ccp-teiler/*"
|
||||
fi
|
@ -1,19 +0,0 @@
|
||||
# Teiler
|
||||
This module orchestrates the different microfrontends of the bridgehead as a single page application.
|
||||
|
||||
## Teiler Orchestrator
|
||||
Single SPA component that consists on the root HTML site of the single page application and a javascript code that
|
||||
gets the information about the microfrontend calling the teiler backend and is responsible for registering them. With the
|
||||
resulting mapping, it can initialize, mount and unmount the required microfrontends on the fly.
|
||||
|
||||
The microfrontends run independently in different containers and can be based on different frameworks (Angular, Vue, React,...)
|
||||
This microfrontends can run as single alone but need an extension with Single-SPA (https://single-spa.js.org/docs/ecosystem).
|
||||
There are also available three templates (Angular, Vue, React) to be directly extended to be used directly in the teiler.
|
||||
|
||||
## Teiler Dashboard
|
||||
It consists on the main dashboard and a set of embedded services.
|
||||
### Login
|
||||
user and password in ccp.local.conf
|
||||
|
||||
## Teiler Backend
|
||||
In this component, the microfrontends are configured.
|
Reference in New Issue
Block a user