mirror of https://github.com/samply/bridgehead.git
Merge branch 'main' into main_directory_sync_extra_attributes_and_star_model
This commit is contained in:
commit
3f8bb158bc
|
@ -1,7 +1,7 @@
|
|||
##Ignore site configuration
|
||||
.gitmodules
|
||||
site-config/*
|
||||
|
||||
.idea
|
||||
## Ignore site configuration
|
||||
*/docker-compose.override.yml
|
||||
|
||||
|
|
|
@ -200,7 +200,7 @@ sudo systemctl [enable|disable] bridgehead@<PROJECT>.service
|
|||
After starting the Bridgehead, you can watch the initialization process with the following command:
|
||||
|
||||
```shell
|
||||
journalctl -u bridgehead@bbmri -f
|
||||
/srv/docker/bridgehead/bridgehead logs <project> -f
|
||||
```
|
||||
|
||||
if this exits with something similar to the following:
|
||||
|
@ -220,8 +220,9 @@ docker ps
|
|||
There should be 6 - 10 Docker proceses. If there are fewer, then you know that something has gone wrong. To see what is going on, run:
|
||||
|
||||
```shell
|
||||
journalctl -u bridgehead@bbmri -f
|
||||
/srv/docker/bridgehead/bridgehead logs <Project> -f
|
||||
```
|
||||
This translates to a journalctl command so all the regular journalctl flags can be used.
|
||||
|
||||
Once the Bridgehead has passed these checks, take a look at the landing page:
|
||||
|
||||
|
@ -235,7 +236,7 @@ You can either do this in a browser or with curl. If you visit the URL in the br
|
|||
curl -k https://localhost
|
||||
```
|
||||
|
||||
If you get errors when you do this, you need to use ```docker logs``` to examine your landing page container in order to determine what is going wrong.
|
||||
Should the landing page not show anything, you can inspect the logs of the containers to determine what is going wrong. To do this you can use `./bridgehead docker-logs <Project> -f` to follow the logs of the container. This transaltes to a docker compose logs command meaning all the ususal docker logs flags work.
|
||||
|
||||
If you have chosen to take part in our monitoring program (by setting the ```MONITOR_APIKEY``` variable in the configuration), you will be informed by email when problems are detected in your Bridgehead.
|
||||
|
||||
|
|
|
@ -4,12 +4,13 @@ version: "3.7"
|
|||
|
||||
services:
|
||||
blaze:
|
||||
image: docker.verbis.dkfz.de/cache/samply/blaze:latest
|
||||
image: docker.verbis.dkfz.de/cache/samply/blaze:0.28
|
||||
container_name: bridgehead-bbmri-blaze
|
||||
environment:
|
||||
BASE_URL: "http://bridgehead-bbmri-blaze:8080"
|
||||
JAVA_TOOL_OPTIONS: "-Xmx4g"
|
||||
LOG_LEVEL: "debug"
|
||||
JAVA_TOOL_OPTIONS: "-Xmx${BLAZE_MEMORY_CAP:-4096}m"
|
||||
DB_RESOURCE_CACHE_SIZE: ${BLAZE_RESOURCE_CACHE_CAP:-2500000}
|
||||
DB_BLOCK_CACHE_SIZE: $BLAZE_MEMORY_CAP
|
||||
ENFORCE_REFERENTIAL_INTEGRITY: "false"
|
||||
volumes:
|
||||
- "blaze-data:/app/data"
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
version: "3.7"
|
||||
|
||||
services:
|
||||
directory_sync_service:
|
||||
image: "docker.verbis.dkfz.de/cache/samply/directory_sync_service"
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
# Makes only sense for German Biobanks
|
||||
: ${ENABLE_GBN:=false}
|
||||
|
||||
FOCUS_RETRY_COUNT=32
|
||||
FOCUS_RETRY_COUNT=${FOCUS_RETRY_COUNT:-64}
|
||||
PRIVATEKEYFILENAME=/etc/bridgehead/pki/${SITE_ID}.priv.pem
|
||||
|
||||
for module in $PROJECT/modules/*.sh
|
||||
|
|
11
bridgehead
11
bridgehead
|
@ -50,6 +50,8 @@ loadVars() {
|
|||
source /etc/bridgehead/$PROJECT.local.conf || fail_and_report 1 "Found /etc/bridgehead/$PROJECT.local.conf but failed to import"
|
||||
fi
|
||||
fetchVarsFromVaultByFile /etc/bridgehead/$PROJECT.conf || fail_and_report 1 "Unable to fetchVarsFromVaultByFile"
|
||||
setHostname
|
||||
optimizeBlazeMemoryUsage
|
||||
[ -e ./$PROJECT/vars ] && source ./$PROJECT/vars
|
||||
set +a
|
||||
|
||||
|
@ -64,7 +66,6 @@ loadVars() {
|
|||
OVERRIDE+=" -f ./$PROJECT/docker-compose.override.yml"
|
||||
fi
|
||||
detectCompose
|
||||
setHostname
|
||||
setupProxy
|
||||
|
||||
# Set some project-independent default values
|
||||
|
@ -89,11 +90,14 @@ case "$ACTION" in
|
|||
loadVars
|
||||
hc_send log "Bridgehead $PROJECT startup: Checking requirements ..."
|
||||
checkRequirements
|
||||
sync_secrets
|
||||
hc_send log "Bridgehead $PROJECT startup: Requirements checked out. Now starting bridgehead ..."
|
||||
exec $COMPOSE -p $PROJECT -f ./minimal/docker-compose.yml -f ./$PROJECT/docker-compose.yml $OVERRIDE up --abort-on-container-exit
|
||||
;;
|
||||
stop)
|
||||
loadVars
|
||||
# Kill stale secret-sync instances if present
|
||||
docker kill $(docker ps -q --filter ancestor=docker.verbis.dkfz.de/cache/samply/secret-sync-local) 2>/dev/null || true
|
||||
# HACK: This is temporarily to properly shut down false bridgehead instances (bridgehead-ccp instead ccp)
|
||||
$COMPOSE -p bridgehead-$PROJECT -f ./minimal/docker-compose.yml -f ./$PROJECT/docker-compose.yml $OVERRIDE down
|
||||
exec $COMPOSE -p $PROJECT -f ./minimal/docker-compose.yml -f ./$PROJECT/docker-compose.yml $OVERRIDE down
|
||||
|
@ -103,6 +107,11 @@ case "$ACTION" in
|
|||
exit $?
|
||||
;;
|
||||
logs)
|
||||
loadVars
|
||||
shift 2
|
||||
exec journalctl -u bridgehead@$PROJECT -u bridgehead-update@$PROJECT -a $@
|
||||
;;
|
||||
docker-logs)
|
||||
loadVars
|
||||
shift 2
|
||||
exec $COMPOSE -p $PROJECT -f ./minimal/docker-compose.yml -f ./$PROJECT/docker-compose.yml $OVERRIDE logs -f $@
|
||||
|
|
|
@ -2,11 +2,13 @@ version: "3.7"
|
|||
|
||||
services:
|
||||
blaze:
|
||||
image: docker.verbis.dkfz.de/cache/samply/blaze:latest
|
||||
image: docker.verbis.dkfz.de/cache/samply/blaze:0.28
|
||||
container_name: bridgehead-ccp-blaze
|
||||
environment:
|
||||
BASE_URL: "http://bridgehead-ccp-blaze:8080"
|
||||
JAVA_TOOL_OPTIONS: "-Xmx4g"
|
||||
JAVA_TOOL_OPTIONS: "-Xmx${BLAZE_MEMORY_CAP:-4096}m"
|
||||
DB_RESOURCE_CACHE_SIZE: ${BLAZE_RESOURCE_CACHE_CAP:-2500000}
|
||||
DB_BLOCK_CACHE_SIZE: $BLAZE_MEMORY_CAP
|
||||
ENFORCE_REFERENTIAL_INTEGRITY: "false"
|
||||
volumes:
|
||||
- "blaze-data:/app/data"
|
||||
|
@ -19,7 +21,7 @@ services:
|
|||
- "traefik.http.routers.blaze_ccp.tls=true"
|
||||
|
||||
focus:
|
||||
image: docker.verbis.dkfz.de/cache/samply/focus:0.4.0
|
||||
image: docker.verbis.dkfz.de/cache/samply/focus:${FOCUS_TAG}
|
||||
container_name: bridgehead-focus
|
||||
environment:
|
||||
API_KEY: ${FOCUS_BEAM_SECRET_SHORT}
|
||||
|
|
|
@ -1,18 +0,0 @@
|
|||
version: "3.7"
|
||||
|
||||
services:
|
||||
adt2fhir-rest:
|
||||
container_name: bridgehead-adt2fhir-rest
|
||||
image: docker.verbis.dkfz.de/ccp/adt2fhir-rest:main
|
||||
environment:
|
||||
IDTYPE: BK_${IDMANAGEMENT_FRIENDLY_ID}_L-ID
|
||||
MAINZELLISTE_APIKEY: ${IDMANAGER_LOCAL_PATIENTLIST_APIKEY}
|
||||
SALT: ${LOCAL_SALT}
|
||||
restart: always
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.adt2fhir-rest.rule=PathPrefix(`/adt2fhir-rest`)"
|
||||
- "traefik.http.middlewares.adt2fhir-rest_strip.stripprefix.prefixes=/adt2fhir-rest"
|
||||
- "traefik.http.services.adt2fhir-rest.loadbalancer.server.port=8080"
|
||||
- "traefik.http.routers.adt2fhir-rest.tls=true"
|
||||
- "traefik.http.routers.adt2fhir-rest.middlewares=adt2fhir-rest_strip,auth"
|
|
@ -1,13 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
function adt2fhirRestSetup() {
|
||||
if [ -n "$ENABLE_ADT2FHIR_REST" ]; then
|
||||
log INFO "ADT2FHIR-REST setup detected -- will start adt2fhir-rest API."
|
||||
if [ ! -n "$IDMANAGER_LOCAL_PATIENTLIST_APIKEY" ]; then
|
||||
log ERROR "Missing ID-Management Module! Fix this by setting up ID Management:"
|
||||
exit 1;
|
||||
fi
|
||||
OVERRIDE+=" -f ./$PROJECT/modules/adt2fhir-rest-compose.yml"
|
||||
LOCAL_SALT="$(echo \"local-random-salt\" | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 30)"
|
||||
fi
|
||||
}
|
|
@ -0,0 +1,32 @@
|
|||
version: "3.7"
|
||||
|
||||
services:
|
||||
blaze-secondary:
|
||||
image: docker.verbis.dkfz.de/cache/samply/blaze:0.28
|
||||
container_name: bridgehead-ccp-blaze-secondary
|
||||
environment:
|
||||
BASE_URL: "http://bridgehead-ccp-blaze-secondary:8080"
|
||||
JAVA_TOOL_OPTIONS: "-Xmx${BLAZE_MEMORY_CAP:-4096}m"
|
||||
DB_RESOURCE_CACHE_SIZE: ${BLAZE_RESOURCE_CACHE_CAP:-2500000}
|
||||
DB_BLOCK_CACHE_SIZE: $BLAZE_MEMORY_CAP
|
||||
ENFORCE_REFERENTIAL_INTEGRITY: "false"
|
||||
volumes:
|
||||
- "blaze-secondary-data:/app/data"
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.blaze-secondary_ccp.rule=PathPrefix(`/ccp-localdatamanagement-secondary`)"
|
||||
- "traefik.http.middlewares.ccp_b-secondary_strip.stripprefix.prefixes=/ccp-localdatamanagement-secondary"
|
||||
- "traefik.http.services.blaze-secondary_ccp.loadbalancer.server.port=8080"
|
||||
- "traefik.http.routers.blaze-secondary_ccp.middlewares=ccp_b-secondary_strip,auth"
|
||||
- "traefik.http.routers.blaze-secondary_ccp.tls=true"
|
||||
|
||||
obds2fhir-rest:
|
||||
environment:
|
||||
STORE_PATH: ${STORE_PATH:-http://blaze:8080/fhir}
|
||||
|
||||
exporter:
|
||||
environment:
|
||||
BLAZE_HOST: "blaze-secondary"
|
||||
|
||||
volumes:
|
||||
blaze-secondary-data:
|
|
@ -0,0 +1,11 @@
|
|||
#!/bin/bash
|
||||
|
||||
function blazeSecondarySetup() {
|
||||
if [ -n "$ENABLE_SECONDARY_BLAZE" ]; then
|
||||
log INFO "Secondary Blaze setup detected -- will start second blaze."
|
||||
OVERRIDE+=" -f ./$PROJECT/modules/blaze-secondary-compose.yml"
|
||||
#make oBDS2FHIR ignore ID-Management and replace target Blaze
|
||||
PATIENTLIST_URL=" "
|
||||
STORE_PATH="http://blaze-secondary:8080/fhir"
|
||||
fi
|
||||
}
|
|
@ -0,0 +1,171 @@
|
|||
version: "3.7"
|
||||
|
||||
services:
|
||||
rstudio:
|
||||
container_name: bridgehead-rstudio
|
||||
image: docker.verbis.dkfz.de/ccp/dktk-rstudio:latest
|
||||
environment:
|
||||
#DEFAULT_USER: "rstudio" # This line is kept for informational purposes
|
||||
PASSWORD: "${RSTUDIO_ADMIN_PASSWORD}" # It is required, even if the authentication is disabled
|
||||
DISABLE_AUTH: "true" # https://rocker-project.org/images/versioned/rstudio.html#how-to-use
|
||||
HTTP_RELATIVE_PATH: "/rstudio"
|
||||
ALL_PROXY: "http://forward_proxy:3128" # https://rocker-project.org/use/networking.html
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.rstudio_ccp.rule=PathPrefix(`/rstudio`)"
|
||||
- "traefik.http.services.rstudio_ccp.loadbalancer.server.port=8787"
|
||||
- "traefik.http.middlewares.rstudio_ccp_strip.stripprefix.prefixes=/rstudio"
|
||||
- "traefik.http.routers.rstudio_ccp.tls=true"
|
||||
- "traefik.http.routers.rstudio_ccp.middlewares=oidcAuth,rstudio_ccp_strip"
|
||||
networks:
|
||||
- rstudio
|
||||
|
||||
opal:
|
||||
container_name: bridgehead-opal
|
||||
image: docker.verbis.dkfz.de/ccp/dktk-opal:latest
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.opal_ccp.rule=PathPrefix(`/opal`)"
|
||||
- "traefik.http.services.opal_ccp.loadbalancer.server.port=8080"
|
||||
- "traefik.http.routers.opal_ccp.tls=true"
|
||||
links:
|
||||
- opal-rserver
|
||||
- opal-db
|
||||
environment:
|
||||
JAVA_OPTS: "-Xms1G -Xmx8G -XX:+UseG1GC -Dhttps.proxyHost=forward_proxy -Dhttps.proxyPort=3128"
|
||||
# OPAL_ADMINISTRATOR_USER: "administrator" # This line is kept for informational purposes
|
||||
OPAL_ADMINISTRATOR_PASSWORD: "${OPAL_ADMIN_PASSWORD}"
|
||||
POSTGRESDATA_HOST: "opal-db"
|
||||
POSTGRESDATA_DATABASE: "opal"
|
||||
POSTGRESDATA_USER: "opal"
|
||||
POSTGRESDATA_PASSWORD: "${OPAL_DB_PASSWORD}"
|
||||
ROCK_HOSTS: "opal-rserver:8085"
|
||||
APP_URL: "https://${HOST}/opal"
|
||||
APP_CONTEXT_PATH: "/opal"
|
||||
OPAL_PRIVATE_KEY: "/run/secrets/opal-key.pem"
|
||||
OPAL_CERTIFICATE: "/run/secrets/opal-cert.pem"
|
||||
OIDC_URL: "${OIDC_URL}"
|
||||
OIDC_REALM: "${OIDC_REALM}"
|
||||
OIDC_CLIENT_ID: "${OIDC_PRIVATE_CLIENT_ID}"
|
||||
OIDC_CLIENT_SECRET: "${OIDC_CLIENT_SECRET}"
|
||||
OIDC_ADMIN_GROUP: "${OIDC_ADMIN_GROUP}"
|
||||
TOKEN_MANAGER_PASSWORD: "${TOKEN_MANAGER_OPAL_PASSWORD}"
|
||||
EXPORTER_PASSWORD: "${EXPORTER_OPAL_PASSWORD}"
|
||||
BEAM_APP_ID: token-manager.${PROXY_ID}
|
||||
BEAM_SECRET: ${TOKEN_MANAGER_SECRET}
|
||||
BEAM_DATASHIELD_PROXY: request-manager
|
||||
volumes:
|
||||
- "/var/cache/bridgehead/ccp/opal-metadata-db:/srv" # Opal metadata
|
||||
secrets:
|
||||
- opal-cert.pem
|
||||
- opal-key.pem
|
||||
|
||||
opal-db:
|
||||
container_name: bridgehead-opal-db
|
||||
image: docker.verbis.dkfz.de/cache/postgres:${POSTGRES_TAG}
|
||||
environment:
|
||||
POSTGRES_PASSWORD: "${OPAL_DB_PASSWORD}" # Set in datashield-setup.sh
|
||||
POSTGRES_USER: "opal"
|
||||
POSTGRES_DB: "opal"
|
||||
volumes:
|
||||
- "/var/cache/bridgehead/ccp/opal-db:/var/lib/postgresql/data" # Opal project data (imported from exporter)
|
||||
|
||||
opal-rserver:
|
||||
container_name: bridgehead-opal-rserver
|
||||
image: docker.verbis.dkfz.de/ccp/dktk-rserver # datashield/rock-base + dsCCPhos
|
||||
tmpfs:
|
||||
- /srv
|
||||
|
||||
beam-connect:
|
||||
image: docker.verbis.dkfz.de/cache/samply/beam-connect:develop
|
||||
container_name: bridgehead-datashield-connect
|
||||
environment:
|
||||
PROXY_URL: "http://beam-proxy:8081"
|
||||
TLS_CA_CERTIFICATES_DIR: /run/secrets
|
||||
APP_ID: datashield-connect.${SITE_ID}.${BROKER_ID}
|
||||
PROXY_APIKEY: ${DATASHIELD_CONNECT_SECRET}
|
||||
DISCOVERY_URL: "./map/central.json"
|
||||
LOCAL_TARGETS_FILE: "./map/local.json"
|
||||
NO_AUTH: "true"
|
||||
secrets:
|
||||
- opal-cert.pem
|
||||
depends_on:
|
||||
- beam-proxy
|
||||
volumes:
|
||||
- /tmp/bridgehead/opal-map/:/map/:ro
|
||||
networks:
|
||||
- default
|
||||
- rstudio
|
||||
|
||||
traefik:
|
||||
labels:
|
||||
- "traefik.http.middlewares.oidcAuth.forwardAuth.address=http://oauth2-proxy:4180/"
|
||||
- "traefik.http.middlewares.oidcAuth.forwardAuth.trustForwardHeader=true"
|
||||
- "traefik.http.middlewares.oidcAuth.forwardAuth.authResponseHeaders=X-Auth-Request-Access-Token,Authorization"
|
||||
networks:
|
||||
- default
|
||||
- rstudio
|
||||
forward_proxy:
|
||||
networks:
|
||||
- default
|
||||
- rstudio
|
||||
|
||||
beam-proxy:
|
||||
environment:
|
||||
APP_datashield-connect_KEY: ${DATASHIELD_CONNECT_SECRET}
|
||||
APP_token-manager_KEY: ${TOKEN_MANAGER_SECRET}
|
||||
|
||||
# TODO: Allow users of group /DataSHIELD and OIDC_USER_GROUP at the same time:
|
||||
# Maybe a solution would be (https://oauth2-proxy.github.io/oauth2-proxy/configuration/oauth_provider):
|
||||
# --allowed-groups=/DataSHIELD,OIDC_USER_GROUP
|
||||
oauth2-proxy:
|
||||
image: docker.verbis.dkfz.de/cache/oauth2-proxy/oauth2-proxy:latest
|
||||
container_name: bridgehead-oauth2proxy
|
||||
command: >-
|
||||
--allowed-group=DataSHIELD
|
||||
--oidc-groups-claim=${OIDC_GROUP_CLAIM}
|
||||
--auth-logging=true
|
||||
--whitelist-domain=${HOST}
|
||||
--http-address="0.0.0.0:4180"
|
||||
--reverse-proxy=true
|
||||
--upstream="static://202"
|
||||
--email-domain="*"
|
||||
--cookie-name="_BRIDGEHEAD_oauth2"
|
||||
--cookie-secret="${OAUTH2_PROXY_SECRET}"
|
||||
--cookie-expire="12h"
|
||||
--cookie-secure="true"
|
||||
--cookie-httponly="true"
|
||||
#OIDC settings
|
||||
--provider="keycloak-oidc"
|
||||
--provider-display-name="VerbIS Login"
|
||||
--client-id="${OIDC_PRIVATE_CLIENT_ID}"
|
||||
--client-secret="${OIDC_CLIENT_SECRET}"
|
||||
--redirect-url="https://${HOST}${OAUTH2_CALLBACK}"
|
||||
--oidc-issuer-url="${OIDC_ISSUER_URL}"
|
||||
--scope="openid email profile"
|
||||
--code-challenge-method="S256"
|
||||
--skip-provider-button=true
|
||||
#X-Forwarded-Header settings - true/false depending on your needs
|
||||
--pass-basic-auth=true
|
||||
--pass-user-headers=false
|
||||
--pass-access-token=false
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.oauth2_proxy.rule=Host(`${HOST}`) && PathPrefix(`/oauth2`)"
|
||||
- "traefik.http.services.oauth2_proxy.loadbalancer.server.port=4180"
|
||||
- "traefik.http.routers.oauth2_proxy.tls=true"
|
||||
environment:
|
||||
http_proxy: "http://forward_proxy:3128"
|
||||
https_proxy: "http://forward_proxy:3128"
|
||||
depends_on:
|
||||
forward_proxy:
|
||||
condition: service_healthy
|
||||
|
||||
secrets:
|
||||
opal-cert.pem:
|
||||
file: /tmp/bridgehead/opal-cert.pem
|
||||
opal-key.pem:
|
||||
file: /tmp/bridgehead/opal-key.pem
|
||||
|
||||
networks:
|
||||
rstudio:
|
|
@ -0,0 +1,157 @@
|
|||
<template id="opal-ccp" source-id="blaze-store" opal-project="ccp-demo" target-id="opal" >
|
||||
|
||||
<container csv-filename="Patient-${TIMESTAMP}.csv" opal-table="patient" opal-entity-type="Patient">
|
||||
<attribute csv-column="patient-id" opal-value-type="text" primary-key="true" val-fhir-path="Patient.id.value" anonym="Pat" op="EXTRACT_RELATIVE_ID"/>
|
||||
<attribute csv-column="dktk-id-global" opal-value-type="text" val-fhir-path="Patient.identifier.where(type.coding.code = 'Global').value.value"/>
|
||||
<attribute csv-column="dktk-id-lokal" opal-value-type="text" val-fhir-path="Patient.identifier.where(type.coding.code = 'Lokal').value.value" />
|
||||
<attribute csv-column="geburtsdatum" opal-value-type="date" val-fhir-path="Patient.birthDate.value"/>
|
||||
<attribute csv-column="geschlecht" opal-value-type="text" val-fhir-path="Patient.gender.value" />
|
||||
<attribute csv-column="datum_des_letztbekannten_vitalstatus" opal-value-type="date" val-fhir-path="Observation.where(code.coding.code = '75186-7').effective.value" join-fhir-path="/Observation.where(code.coding.code = '75186-7').subject.reference.value"/>
|
||||
<attribute csv-column="vitalstatus" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '75186-7').value.coding.code.value" join-fhir-path="/Observation.where(code.coding.code = '75186-7').subject.reference.value"/>
|
||||
<!--fehlt in ADT2FHIR--><attribute csv-column="tod_tumorbedingt" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '68343-3').value.coding.where(system = 'http://fhir.de/CodeSystem/bfarm/icd-10-gm').code.value" join-fhir-path="/Observation.where(code.coding.code = '68343-3').subject.reference.value"/>
|
||||
<!--fehlt in ADT2FHIR--><attribute csv-column="todesursachen" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '68343-3').value.coding.where(system = 'http://dktk.dkfz.de/fhir/onco/core/CodeSystem/JNUCS').code.value" join-fhir-path="/Observation.where(code.coding.code = '68343-3').subject.reference.value"/>
|
||||
</container>
|
||||
|
||||
<container csv-filename="Diagnosis-${TIMESTAMP}.csv" opal-table="diagnosis" opal-entity-type="Diagnosis">
|
||||
<attribute csv-column="diagnosis-id" primary-key="true" opal-value-type="text" val-fhir-path="Condition.id.value" anonym="Dia" op="EXTRACT_RELATIVE_ID"/>
|
||||
<attribute csv-column="patient-id" opal-value-type="text" val-fhir-path="Condition.subject.reference.value" anonym="Pat"/>
|
||||
<attribute csv-column="primaerdiagnose" opal-value-type="text" val-fhir-path="Condition.code.coding.code.value"/>
|
||||
<attribute csv-column="tumor_diagnosedatum" opal-value-type="date" val-fhir-path="Condition.onset.value"/>
|
||||
<attribute csv-column="primaertumor_diagnosetext" opal-value-type="text" val-fhir-path="Condition.code.text.value"/>
|
||||
<attribute csv-column="version_des_icd-10_katalogs" opal-value-type="integer" val-fhir-path="Condition.code.coding.version.value"/>
|
||||
<attribute csv-column="lokalisation" opal-value-type="text" val-fhir-path="Condition.bodySite.coding.where(system = 'urn:oid:2.16.840.1.113883.6.43.1').code.value"/>
|
||||
<attribute csv-column="icd-o_katalog_topographie_version" opal-value-type="text" val-fhir-path="Condition.bodySite.coding.where(system = 'urn:oid:2.16.840.1.113883.6.43.1').version.value"/>
|
||||
<attribute csv-column="seitenlokalisation_nach_adt-gekid" opal-value-type="text" val-fhir-path="Condition.bodySite.coding.where(system = 'http://dktk.dkfz.de/fhir/onco/core/CodeSystem/SeitenlokalisationCS').code.value"/>
|
||||
</container>
|
||||
|
||||
<container csv-filename="Progress-${TIMESTAMP}.csv" opal-table="progress" opal-entity-type="Progress">
|
||||
<!--it would be better to generate a an ID, instead of extracting the ClinicalImpression id-->
|
||||
<attribute csv-column="progress-id" primary-key="true" opal-value-type="text" val-fhir-path="ClinicalImpression.id.value" anonym="Pro" op="EXTRACT_RELATIVE_ID"/>
|
||||
<attribute csv-column="diagnosis-id" opal-value-type="text" val-fhir-path="ClinicalImpression.problem.reference.value" anonym="Dia"/>
|
||||
<attribute csv-column="patient-id" opal-value-type="text" val-fhir-path="ClinicalImpression.subject.reference.value" anonym="Pat" />
|
||||
<attribute csv-column="untersuchungs-_befunddatum_im_verlauf" opal-value-type="date" val-fhir-path="ClinicalImpression.effective.value" />
|
||||
<!-- just for evaluation: redundant to Untersuchungs-, Befunddatum im Verlauf-->
|
||||
<attribute csv-column="datum_lokales_oder_regionaeres_rezidiv" opal-value-type="date" val-fhir-path="Observation.where(code.coding.code = 'LA4583-6').effective.value" join-fhir-path="ClinicalImpression.finding.itemReference.reference.value" />
|
||||
<attribute csv-column="gesamtbeurteilung_tumorstatus" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21976-6').value.coding.code.value" join-fhir-path="ClinicalImpression.finding.itemReference.reference.value"/>
|
||||
<attribute csv-column="lokales_oder_regionaeres_rezidiv" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = 'LA4583-6').value.coding.code.value" join-fhir-path="ClinicalImpression.finding.itemReference.reference.value"/>
|
||||
<attribute csv-column="lymphknoten-rezidiv" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = 'LA4370-8').value.coding.code.value" join-fhir-path="ClinicalImpression.finding.itemReference.reference.value" />
|
||||
<attribute csv-column="fernmetastasen" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = 'LA4226-2').value.coding.code.value" join-fhir-path="ClinicalImpression.finding.itemReference.reference.value" />
|
||||
</container>
|
||||
|
||||
<container csv-filename="Histology-${TIMESTAMP}.csv" opal-table="histology" opal-entity-type="Histology" >
|
||||
<attribute csv-column="histology-id" primary-key="true" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '59847-4').id" anonym="His" op="EXTRACT_RELATIVE_ID"/>
|
||||
<attribute csv-column="diagnosis-id" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '59847-4').focus.reference.value" anonym="Dia"/>
|
||||
<attribute csv-column="patient-id" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '59847-4').subject.reference.value" anonym="Pat" />
|
||||
<attribute csv-column="histologie_datum" opal-value-type="date" val-fhir-path="Observation.where(code.coding.code = '59847-4').effective.value"/>
|
||||
<attribute csv-column="icd-o_katalog_morphologie_version" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '59847-4').value.coding.version.value" />
|
||||
<attribute csv-column="morphologie" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '59847-4').value.coding.code.value"/>
|
||||
<attribute csv-column="morphologie-freitext" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '59847-4').value.text.value"/>
|
||||
<attribute csv-column="grading" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '59542-1').value.coding.code.value" join-fhir-path="Observation.where(code.coding.code = '59847-4').hasMember.reference.value"/>
|
||||
</container>
|
||||
|
||||
|
||||
<container csv-filename="Metastasis-${TIMESTAMP}.csv" opal-table="metastasis" opal-entity-type="Metastasis" >
|
||||
<attribute csv-column="metastasis-id" primary-key="true" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21907-1').id" anonym="Met" op="EXTRACT_RELATIVE_ID"/>
|
||||
<attribute csv-column="diagnosis-id" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21907-1').focus.reference.value" anonym="Dia"/>
|
||||
<attribute csv-column="patient-id" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21907-1').subject.reference.value" anonym="Pat" />
|
||||
<attribute csv-column="datum_fernmetastasen" opal-value-type="date" val-fhir-path="Observation.where(code.coding.code = '21907-1').effective.value"/>
|
||||
<attribute csv-column="fernmetastasen_vorhanden" opal-value-type="boolean" val-fhir-path="Observation.where(code.coding.code = '21907-1').value.coding.code.value"/>
|
||||
<attribute csv-column="lokalisation_fernmetastasen" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21907-1').bodySite.coding.code.value"/>
|
||||
</container>
|
||||
|
||||
<container csv-filename="TNM-${TIMESTAMP}.csv" opal-table="tnm" opal-entity-type="TNM">
|
||||
<attribute csv-column="tnm-id" primary-key="true" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').id" anonym="TNM" op="EXTRACT_RELATIVE_ID"/>
|
||||
<attribute csv-column="diagnosis-id" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').focus.reference.value" anonym="Dia"/>
|
||||
<attribute csv-column="patient-id" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').subject.reference.value" anonym="Pat" />
|
||||
<attribute csv-column="datum_der_tnm_dokumentation_datum_befund" opal-value-type="date" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').effective.value"/>
|
||||
<attribute csv-column="uicc_stadium" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').value.coding.code.value"/>
|
||||
<attribute csv-column="tnm-t" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').component.where(code.coding.code = '21905-5' or code.coding.code = '21899-0').value.coding.code.value"/>
|
||||
<attribute csv-column="tnm-n" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').component.where(code.coding.code = '21906-3' or code.coding.code = '21900-6').value.coding.code.value"/>
|
||||
<attribute csv-column="tnm-m" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').component.where(code.coding.code = '21907-1' or code.coding.code = '21901-4').value.coding.code.value"/>
|
||||
<attribute csv-column="c_p_u_preefix_t" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').component.where(code.coding.code = '21905-5' or code.coding.code = '21899-0').extension('http://dktk.dkfz.de/fhir/StructureDefinition/onco-core-Extension-TNMcpuPraefix').value.coding.code.value"/>
|
||||
<attribute csv-column="c_p_u_preefix_n" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').component.where(code.coding.code = '21906-3' or code.coding.code = '21900-6').extension('http://dktk.dkfz.de/fhir/StructureDefinition/onco-core-Extension-TNMcpuPraefix').value.coding.code.value"/>
|
||||
<attribute csv-column="c_p_u_preefix_m" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').component.where(code.coding.code = '21907-1' or code.coding.code = '21901-4').extension('http://dktk.dkfz.de/fhir/StructureDefinition/onco-core-Extension-TNMcpuPraefix').value.coding.code.value"/>
|
||||
<attribute csv-column="tnm-y-symbol" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').component.where(code.coding.code = '59479-6' or code.coding.code = '59479-6').value.coding.code.value"/>
|
||||
<attribute csv-column="tnm-r-symbol" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').component.where(code.coding.code = '21983-2' or code.coding.code = '21983-2').value.coding.code.value"/>
|
||||
<attribute csv-column="tnm-m-symbol" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').component.where(code.coding.code = '42030-7' or code.coding.code = '42030-7').value.coding.code.value"/>
|
||||
<!--nur bei UICC, nicht in ADT2FHIR--><attribute csv-column="tnm-version" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').value.coding.version.value"/>
|
||||
</container>
|
||||
|
||||
|
||||
<container csv-filename="System-Therapy-${TIMESTAMP}.csv" opal-table="system-therapy" opal-entity-type="SystemTherapy">
|
||||
<attribute csv-column="system-therapy-id" primary-key="true" opal-value-type="text" val-fhir-path="MedicationStatement.id" anonym="Sys" op="EXTRACT_RELATIVE_ID"/>
|
||||
<attribute csv-column="diagnosis-id" opal-value-type="text" val-fhir-path="MedicationStatement.reasonReference.reference.value" anonym="Dia"/>
|
||||
<attribute csv-column="patient-id" opal-value-type="text" val-fhir-path="MedicationStatement.subject.reference.value" anonym="Pat" />
|
||||
<attribute csv-column="systemische_therapie_stellung_zu_operativer_therapie" opal-value-type="text" val-fhir-path="MedicationStatement.extension('http://dktk.dkfz.de/fhir/StructureDefinition/onco-core-Extension-StellungZurOp').value.coding.code.value"/>
|
||||
<attribute csv-column="intention_chemotherapie" opal-value-type="text" val-fhir-path="MedicationStatement.extension('http://dktk.dkfz.de/fhir/StructureDefinition/onco-core-Extension-SYSTIntention').value.coding.code.value"/>
|
||||
<attribute csv-column="therapieart" opal-value-type="text" val-fhir-path="MedicationStatement.category.coding.code.value"/>
|
||||
<attribute csv-column="systemische_therapie_beginn" opal-value-type="date" val-fhir-path="MedicationStatement.effective.start.value"/>
|
||||
<attribute csv-column="systemische_therapie_ende" opal-value-type="date" val-fhir-path="MedicationStatement.effective.end.value"/>
|
||||
<attribute csv-column="systemische_therapie_protokoll" opal-value-type="text" val-fhir-path="MedicationStatement.extension('http://dktk.dkfz.de/fhir/StructureDefinition/onco-core-Extension-SystemischeTherapieProtokoll').value.text.value"/>
|
||||
<attribute csv-column="systemische_therapie_substanzen" opal-value-type="text" val-fhir-path="MedicationStatement.medication.text.value"/>
|
||||
<attribute csv-column="chemotherapie" opal-value-type="boolean" val-fhir-path="MedicationStatement.where(category.coding.code = 'CH').exists().value" />
|
||||
<attribute csv-column="hormontherapie" opal-value-type="boolean" val-fhir-path="MedicationStatement.where(category.coding.code = 'HO').exists().value" />
|
||||
<attribute csv-column="immuntherapie" opal-value-type="boolean" val-fhir-path="MedicationStatement.where(category.coding.code = 'IM').exists().value" />
|
||||
<attribute csv-column="knochenmarktransplantation" opal-value-type="boolean" val-fhir-path="MedicationStatement.where(category.coding.code = 'KM').exists().value" />
|
||||
<attribute csv-column="abwartende_strategie" opal-value-type="boolean" val-fhir-path="MedicationStatement.where(category.coding.code = 'WS').exists().value" />
|
||||
</container>
|
||||
|
||||
|
||||
<container csv-filename="Surgery-${TIMESTAMP}.csv" opal-table="surgery" opal-entity-type="Surgery">
|
||||
<attribute csv-column="surgery-id" primary-key="true" opal-value-type="text" val-fhir-path="Procedure.where(category.coding.code = 'OP').id" anonym="Sur" op="EXTRACT_RELATIVE_ID"/>
|
||||
<attribute csv-column="diagnosis-id" opal-value-type="text" val-fhir-path="Procedure.where(category.coding.code = 'OP').reasonReference.reference.value" anonym="Dia"/>
|
||||
<attribute csv-column="patient-id" opal-value-type="text" val-fhir-path="Procedure.where(category.coding.code = 'OP').subject.reference.value" anonym="Pat" />
|
||||
<attribute csv-column="ops-code" opal-value-type="text" val-fhir-path="Procedure.where(category.coding.code = 'OP').code.coding.code.value"/>
|
||||
<attribute csv-column="datum_der_op" opal-value-type="date" val-fhir-path="Procedure.where(category.coding.code = 'OP').performed.value"/>
|
||||
<attribute csv-column="intention_op" opal-value-type="text" val-fhir-path="Procedure.extension('http://dktk.dkfz.de/fhir/StructureDefinition/onco-core-Extension-OPIntention').value.coding.code.value"/>
|
||||
<attribute csv-column="lokale_beurteilung_resttumor" opal-value-type="text" val-fhir-path="Procedure.where(category.coding.code = 'OP').outcome.coding.where(system = 'http://dktk.dkfz.de/fhir/onco/core/CodeSystem/LokaleBeurteilungResidualstatusCS').code.value" />
|
||||
<attribute csv-column="gesamtbeurteilung_resttumor" opal-value-type="text" val-fhir-path="Procedure.where(category.coding.code = 'OP').outcome.coding.where(system = 'http://dktk.dkfz.de/fhir/onco/core/CodeSystem/GesamtbeurteilungResidualstatusCS').code.value" />
|
||||
</container>
|
||||
|
||||
|
||||
<container csv-filename="Radiation-Therapy-${TIMESTAMP}.csv" opal-table="radiation-therapy" opal-entity-type="RadiationTherapy">
|
||||
<attribute csv-column="radiation-therapy-id" primary-key="true" opal-value-type="text" val-fhir-path="Procedure.where(category.coding.code = 'ST').id" anonym="Rad" op="EXTRACT_RELATIVE_ID"/>
|
||||
<attribute csv-column="diagnosis-id" opal-value-type="text" val-fhir-path="Procedure.where(category.coding.code = 'ST').reasonReference.reference.value" anonym="Dia"/>
|
||||
<attribute csv-column="patient-id" opal-value-type="text" val-fhir-path="Procedure.where(category.coding.code = 'ST').subject.reference.value" anonym="Pat" />
|
||||
<attribute csv-column="strahlentherapie_stellung_zu_operativer_therapie" opal-value-type="text" val-fhir-path="Procedure.extension('http://dktk.dkfz.de/fhir/StructureDefinition/onco-core-Extension-StellungZurOp').value.coding.code.value"/>
|
||||
<attribute csv-column="intention_strahlentherapie" opal-value-type="text" val-fhir-path="Procedure.extension('http://dktk.dkfz.de/fhir/StructureDefinition/onco-core-Extension-SYSTIntention').value.coding.code.value" />
|
||||
<attribute csv-column="strahlentherapie_beginn" opal-value-type="date" val-fhir-path="Procedure.where(category.coding.code = 'ST').performed.start.value"/>
|
||||
<attribute csv-column="strahlentherapie_ende" opal-value-type="date" val-fhir-path="Procedure.where(category.coding.code = 'ST').performed.end.value"/>
|
||||
</container>
|
||||
|
||||
|
||||
<container csv-filename="Molecular-Marker-${TIMESTAMP}.csv" opal-table="molecular-marker" opal-entity-type="MolecularMarker">
|
||||
<attribute csv-column="mol-marker-id" primary-key="true" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '69548-6').id" anonym="Mol" op="EXTRACT_RELATIVE_ID"/>
|
||||
<attribute csv-column="diagnosis-id" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '69548-6').focus.reference.value" anonym="Dia" />
|
||||
<attribute csv-column="patient-id" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '69548-6').subject.reference.value" anonym="Pat" />
|
||||
<attribute csv-column="datum_der_datenerhebung" opal-value-type="date" val-fhir-path="Observation.where(code.coding.code = '69548-6').effective.value"/>
|
||||
<attribute csv-column="marker" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '69548-6').component.value.coding.code.value"/>
|
||||
<attribute csv-column="status_des_molekularen_markers" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '69548-6').value.coding.code.value" />
|
||||
<attribute csv-column="zusaetzliche_alternative_dokumentation" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '69548-6').value.text.value"/>
|
||||
</container>
|
||||
|
||||
|
||||
<container csv-filename="Sample-${TIMESTAMP}.csv" opal-table="sample" opal-entity-type="Sample">
|
||||
<attribute csv-column="sample-id" primary-key="true" opal-value-type="text" val-fhir-path="Specimen.id" anonym="Sam" op="EXTRACT_RELATIVE_ID"/>
|
||||
<attribute csv-column="patient-id" opal-value-type="text" val-fhir-path="Specimen.subject.reference.value" anonym="Pat" />
|
||||
<attribute csv-column="entnahmedatum" opal-value-type="date" val-fhir-path="Specimen.collection.collectedDateTime.value"/>
|
||||
<attribute csv-column="probenart" opal-value-type="text" val-fhir-path="Specimen.type.coding.code.value"/>
|
||||
<attribute csv-column="status" opal-value-type="text" val-fhir-path="Specimen.status.code.value"/>
|
||||
<attribute csv-column="projekt" opal-value-type="text" val-fhir-path="Specimen.identifier.system.value"/>
|
||||
<!-- @TODO: it is still necessary to clarify whether it would not be better to take the quantity of collection.quantity -->
|
||||
<attribute csv-column="menge" opal-value-type="integer" val-fhir-path="Specimen.container.specimenQuantity.value.value"/>
|
||||
<attribute csv-column="einheit" opal-value-type="text" val-fhir-path="Specimen.container.specimenQuantity.unit.value"/>
|
||||
<attribute csv-column="aliquot" opal-value-type="text" val-fhir-path="Specimen.parent.reference.exists().value" />
|
||||
</container>
|
||||
|
||||
|
||||
|
||||
|
||||
<fhir-rev-include>Observation:patient</fhir-rev-include>
|
||||
<fhir-rev-include>Condition:patient</fhir-rev-include>
|
||||
<fhir-rev-include>ClinicalImpression:patient</fhir-rev-include>
|
||||
<fhir-rev-include>MedicationStatement:patient</fhir-rev-include>
|
||||
<fhir-rev-include>Procedure:patient</fhir-rev-include>
|
||||
<fhir-rev-include>Specimen:patient</fhir-rev-include>
|
||||
|
||||
</template>
|
|
@ -0,0 +1,44 @@
|
|||
#!/bin/bash -e
|
||||
|
||||
if [ "$ENABLE_DATASHIELD" == true ]; then
|
||||
# HACK: This only works because exporter-setup.sh and teiler-setup.sh are sourced after datashield-setup.sh
|
||||
if [ -z "${ENABLE_EXPORTER}" ] || [ "${ENABLE_EXPORTER}" != "true" ]; then
|
||||
log WARN "The ENABLE_EXPORTER variable is either not set or not set to 'true'."
|
||||
fi
|
||||
OAUTH2_CALLBACK=/oauth2/callback
|
||||
OAUTH2_PROXY_SECRET="$(echo \"This is a salt string to generate one consistent encryption key for the oauth2_proxy. It is not required to be secret.\" | sha1sum | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 32)"
|
||||
add_private_oidc_redirect_url "${OAUTH2_CALLBACK}"
|
||||
|
||||
log INFO "DataSHIELD setup detected -- will start DataSHIELD services."
|
||||
OVERRIDE+=" -f ./$PROJECT/modules/datashield-compose.yml"
|
||||
EXPORTER_OPAL_PASSWORD="$(generate_password \"exporter in Opal\")"
|
||||
TOKEN_MANAGER_OPAL_PASSWORD="$(generate_password \"Token Manager in Opal\")"
|
||||
OPAL_DB_PASSWORD="$(echo \"Opal DB\" | generate_simple_password)"
|
||||
OPAL_ADMIN_PASSWORD="$(generate_password \"admin password for Opal\")"
|
||||
RSTUDIO_ADMIN_PASSWORD="$(generate_password \"admin password for R-Studio\")"
|
||||
DATASHIELD_CONNECT_SECRET="$(echo \"DataShield Connect\" | generate_simple_password)"
|
||||
TOKEN_MANAGER_SECRET="$(echo \"Token Manager\" | generate_simple_password)"
|
||||
if [ ! -e /tmp/bridgehead/opal-cert.pem ]; then
|
||||
mkdir -p /tmp/bridgehead/
|
||||
openssl req -x509 -newkey rsa:4096 -nodes -keyout /tmp/bridgehead/opal-key.pem -out /tmp/bridgehead/opal-cert.pem -days 3650 -subj "/CN=opal/C=DE"
|
||||
fi
|
||||
mkdir -p /tmp/bridgehead/opal-map
|
||||
sites="$(cat ./$PROJECT/modules/datashield-sites.json)"
|
||||
echo "$sites" | docker_jq -n --args '{"sites": input | map({
|
||||
"name": .,
|
||||
"id": .,
|
||||
"virtualhost": "\(.):443",
|
||||
"beamconnect": "datashield-connect.\(.).'"$BROKER_ID"'"
|
||||
})}' $sites >/tmp/bridgehead/opal-map/central.json
|
||||
echo "$sites" | docker_jq -n --args '[{
|
||||
"external": "'"$SITE_ID"':443",
|
||||
"internal": "opal:8443",
|
||||
"allowed": input | map("datashield-connect.\(.).'"$BROKER_ID"'")
|
||||
}]' >/tmp/bridgehead/opal-map/local.json
|
||||
if [ "$USER" == "root" ]; then
|
||||
chown -R bridgehead:docker /tmp/bridgehead
|
||||
chmod g+wr /tmp/bridgehead/opal-map/*
|
||||
chmod g+r /tmp/bridgehead/opal-key.pem
|
||||
fi
|
||||
add_private_oidc_redirect_url "/opal/*"
|
||||
fi
|
|
@ -0,0 +1,14 @@
|
|||
[
|
||||
"berlin",
|
||||
"muenchen-lmu",
|
||||
"dresden",
|
||||
"freiburg",
|
||||
"muenchen-tum",
|
||||
"tuebingen",
|
||||
"mainz",
|
||||
"frankfurt",
|
||||
"essen",
|
||||
"dktk-datashield-test",
|
||||
"dktk-test",
|
||||
"mannheim"
|
||||
]
|
|
@ -0,0 +1,28 @@
|
|||
# DataSHIELD
|
||||
This module constitutes the infrastructure to run DataSHIELD within the bridgehead.
|
||||
For more information about DataSHIELD, please visit https://www.datashield.org/
|
||||
|
||||
## R-Studio
|
||||
To connect to the different bridgeheads of the CCP through DataSHIELD, you can use your own R-Studio environment.
|
||||
However, this R-Studio has already installed the DataSHIELD libraries and is integrated within the bridgehead.
|
||||
This can save you some time for extra configuration of your R-Studio environment.
|
||||
|
||||
## Opal
|
||||
This is the core of DataSHIELD. It is made up of Opal, a Postgres database and an R-server.
|
||||
For more information about Opal, please visit https://opaldoc.obiba.org
|
||||
|
||||
### Opal
|
||||
Opal is OBiBa’s core database application for biobanks.
|
||||
|
||||
### Opal-DB
|
||||
Opal requires a database to import the data for DataSHIELD. We use a Postgres instance as database.
|
||||
The data is imported within the bridgehead through the exporter.
|
||||
|
||||
### Opal-R-Server
|
||||
R-Server to execute R scripts in DataSHIELD.
|
||||
|
||||
## Beam
|
||||
### Beam-Connect
|
||||
Beam-Connect is used to route http(s) traffic through beam to enable R-Studio to access data from other bridgeheads that have datashield enabled.
|
||||
### Beam-Proxy
|
||||
The usual beam proxy used for communication.
|
|
@ -1,4 +1,4 @@
|
|||
#!/bin/bash
|
||||
#!/bin/bash -e
|
||||
|
||||
if [ -n "${ENABLE_DNPM}" ]; then
|
||||
log INFO "DNPM setup detected (Beam.Connect) -- will start Beam.Connect for DNPM."
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
# Full Excel Export
|
||||
curl --location --request POST 'https://${HOST}/ccp-exporter/request?query=Patient&query-format=FHIR_PATH&template-id=ccp&output-format=EXCEL' \
|
||||
--header 'x-api-key: ${EXPORT_API_KEY}'
|
||||
|
||||
# QB
|
||||
curl --location --request POST 'https://${HOST}/ccp-reporter/generate?template-id=ccp'
|
|
@ -0,0 +1,67 @@
|
|||
version: "3.7"
|
||||
|
||||
services:
|
||||
exporter:
|
||||
image: docker.verbis.dkfz.de/ccp/dktk-exporter:latest
|
||||
container_name: bridgehead-ccp-exporter
|
||||
environment:
|
||||
JAVA_OPTS: "-Xms1G -Xmx8G -XX:+UseG1GC"
|
||||
LOG_LEVEL: "INFO"
|
||||
EXPORTER_API_KEY: "${EXPORTER_API_KEY}" # Set in exporter-setup.sh
|
||||
CROSS_ORIGINS: "https://${HOST}"
|
||||
EXPORTER_DB_USER: "exporter"
|
||||
EXPORTER_DB_PASSWORD: "${EXPORTER_DB_PASSWORD}" # Set in exporter-setup.sh
|
||||
EXPORTER_DB_URL: "jdbc:postgresql://exporter-db:5432/exporter"
|
||||
HTTP_RELATIVE_PATH: "/ccp-exporter"
|
||||
SITE: "${SITE_ID}"
|
||||
HTTP_SERVLET_REQUEST_SCHEME: "https"
|
||||
OPAL_PASSWORD: "${EXPORTER_OPAL_PASSWORD}"
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.exporter_ccp.rule=PathPrefix(`/ccp-exporter`)"
|
||||
- "traefik.http.services.exporter_ccp.loadbalancer.server.port=8092"
|
||||
- "traefik.http.routers.exporter_ccp.tls=true"
|
||||
- "traefik.http.middlewares.exporter_ccp_strip.stripprefix.prefixes=/ccp-exporter"
|
||||
- "traefik.http.routers.exporter_ccp.middlewares=exporter_ccp_strip"
|
||||
volumes:
|
||||
- "/var/cache/bridgehead/ccp/exporter-files:/app/exporter-files/output"
|
||||
|
||||
exporter-db:
|
||||
image: docker.verbis.dkfz.de/cache/postgres:${POSTGRES_TAG}
|
||||
container_name: bridgehead-ccp-exporter-db
|
||||
environment:
|
||||
POSTGRES_USER: "exporter"
|
||||
POSTGRES_PASSWORD: "${EXPORTER_DB_PASSWORD}" # Set in exporter-setup.sh
|
||||
POSTGRES_DB: "exporter"
|
||||
volumes:
|
||||
# Consider removing this volume once we find a solution to save Lens-queries to be executed in the explorer.
|
||||
- "/var/cache/bridgehead/ccp/exporter-db:/var/lib/postgresql/data"
|
||||
|
||||
reporter:
|
||||
image: docker.verbis.dkfz.de/ccp/dktk-reporter:latest
|
||||
container_name: bridgehead-ccp-reporter
|
||||
environment:
|
||||
JAVA_OPTS: "-Xms1G -Xmx8G -XX:+UseG1GC"
|
||||
LOG_LEVEL: "INFO"
|
||||
CROSS_ORIGINS: "https://${HOST}"
|
||||
HTTP_RELATIVE_PATH: "/ccp-reporter"
|
||||
SITE: "${SITE_ID}"
|
||||
EXPORTER_API_KEY: "${EXPORTER_API_KEY}" # Set in exporter-setup.sh
|
||||
EXPORTER_URL: "http://exporter:8092"
|
||||
LOG_FHIR_VALIDATION: "false"
|
||||
HTTP_SERVLET_REQUEST_SCHEME: "https"
|
||||
|
||||
# In this initial development state of the bridgehead, we are trying to have so many volumes as possible.
|
||||
# However, in the first executions in the CCP sites, this volume seems to be very important. A report is
|
||||
# a process that can take several hours, because it depends on the exporter.
|
||||
# There is a risk that the bridgehead restarts, losing the already created export.
|
||||
|
||||
volumes:
|
||||
- "/var/cache/bridgehead/ccp/reporter-files:/app/reports"
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.reporter_ccp.rule=PathPrefix(`/ccp-reporter`)"
|
||||
- "traefik.http.services.reporter_ccp.loadbalancer.server.port=8095"
|
||||
- "traefik.http.routers.reporter_ccp.tls=true"
|
||||
- "traefik.http.middlewares.reporter_ccp_strip.stripprefix.prefixes=/ccp-reporter"
|
||||
- "traefik.http.routers.reporter_ccp.middlewares=reporter_ccp_strip"
|
|
@ -0,0 +1,8 @@
|
|||
#!/bin/bash -e
|
||||
|
||||
if [ "$ENABLE_EXPORTER" == true ]; then
|
||||
log INFO "Exporter setup detected -- will start Exporter service."
|
||||
OVERRIDE+=" -f ./$PROJECT/modules/exporter-compose.yml"
|
||||
EXPORTER_DB_PASSWORD="$(echo \"This is a salt string to generate one consistent password for the exporter. It is not required to be secret.\" | sha1sum | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 30)"
|
||||
EXPORTER_API_KEY="$(echo \"This is a salt string to generate one consistent API KEY for the exporter. It is not required to be secret.\" | sha1sum | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 64)"
|
||||
fi
|
|
@ -0,0 +1,15 @@
|
|||
# Exporter and Reporter
|
||||
|
||||
|
||||
## Exporter
|
||||
The exporter is a REST API that exports the data of the different databases of the bridgehead in a set of tables.
|
||||
It can accept different output formats as CSV, Excel, JSON or XML. It can also export data into Opal.
|
||||
|
||||
## Exporter-DB
|
||||
It is a database to save queries for its execution in the exporter.
|
||||
The exporter manages also the different executions of the same query in through the database.
|
||||
|
||||
## Reporter
|
||||
This component is a plugin of the exporter that allows to create more complex Excel reports described in templates.
|
||||
It is compatible with different template engines as Groovy, Thymeleaf,...
|
||||
It is perfect to generate a document as our traditional CCP quality report.
|
|
@ -1,4 +1,5 @@
|
|||
version: "3.7"
|
||||
|
||||
services:
|
||||
id-manager:
|
||||
image: docker.verbis.dkfz.de/bridgehead/magicpl
|
||||
|
@ -28,6 +29,7 @@ services:
|
|||
container_name: bridgehead-patientlist
|
||||
environment:
|
||||
- TOMCAT_REVERSEPROXY_FQDN=${HOST}
|
||||
- TOMCAT_REVERSEPROXY_SSL=true
|
||||
- ML_SITE=${IDMANAGEMENT_FRIENDLY_ID}
|
||||
- ML_DB_PASS=${PATIENTLIST_POSTGRES_PASSWORD}
|
||||
- ML_API_KEY=${IDMANAGER_LOCAL_PATIENTLIST_APIKEY}
|
||||
|
@ -43,7 +45,7 @@ services:
|
|||
- patientlist-db
|
||||
|
||||
patientlist-db:
|
||||
image: docker.verbis.dkfz.de/cache/postgres:15.6-alpine
|
||||
image: docker.verbis.dkfz.de/cache/postgres:${POSTGRES_TAG}
|
||||
container_name: bridgehead-patientlist-db
|
||||
environment:
|
||||
POSTGRES_USER: "mainzelliste"
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#!/bin/bash
|
||||
#!/bin/bash -e
|
||||
|
||||
function idManagementSetup() {
|
||||
if [ -n "$IDMANAGER_UPLOAD_APIKEY" ]; then
|
||||
|
|
|
@ -2,7 +2,7 @@ version: "3.7"
|
|||
|
||||
services:
|
||||
mtba:
|
||||
image: docker.verbis.dkfz.de/cache/samply/mtba:1.0.0
|
||||
image: docker.verbis.dkfz.de/cache/samply/mtba:develop
|
||||
container_name: bridgehead-mtba
|
||||
environment:
|
||||
BLAZE_STORE_URL: http://blaze:8080
|
||||
|
@ -11,22 +11,30 @@ services:
|
|||
ID_MANAGER_API_KEY: ${IDMANAGER_UPLOAD_APIKEY}
|
||||
ID_MANAGER_PSEUDONYM_ID_TYPE: BK_${IDMANAGEMENT_FRIENDLY_ID}_L-ID
|
||||
ID_MANAGER_URL: http://id-manager:8080/id-manager
|
||||
PATIENT_CSV_FIRST_NAME_HEADER: ${MTBA_PATIENT_CSV_FIRST_NAME_HEADER}
|
||||
PATIENT_CSV_LAST_NAME_HEADER: ${MTBA_PATIENT_CSV_LAST_NAME_HEADER}
|
||||
PATIENT_CSV_GENDER_HEADER: ${MTBA_PATIENT_CSV_GENDER_HEADER}
|
||||
PATIENT_CSV_BIRTHDAY_HEADER: ${MTBA_PATIENT_CSV_BIRTHDAY_HEADER}
|
||||
PATIENT_CSV_FIRST_NAME_HEADER: ${MTBA_PATIENT_CSV_FIRST_NAME_HEADER:-FIRST_NAME}
|
||||
PATIENT_CSV_LAST_NAME_HEADER: ${MTBA_PATIENT_CSV_LAST_NAME_HEADER:-LAST_NAME}
|
||||
PATIENT_CSV_GENDER_HEADER: ${MTBA_PATIENT_CSV_GENDER_HEADER:-GENDER}
|
||||
PATIENT_CSV_BIRTHDAY_HEADER: ${MTBA_PATIENT_CSV_BIRTHDAY_HEADER:-BIRTHDAY}
|
||||
CBIOPORTAL_URL: http://cbioportal:8080
|
||||
FILE_CHARSET: ${MTBA_FILE_CHARSET}
|
||||
FILE_END_OF_LINE: ${MTBA_FILE_END_OF_LINE}
|
||||
CSV_DELIMITER: ${MTBA_CSV_DELIMITER}
|
||||
FILE_CHARSET: ${MTBA_FILE_CHARSET:-UTF-8}
|
||||
FILE_END_OF_LINE: ${MTBA_FILE_END_OF_LINE:-LF}
|
||||
CSV_DELIMITER: ${MTBA_CSV_DELIMITER:-TAB}
|
||||
HTTP_RELATIVE_PATH: "/mtba"
|
||||
OIDC_ADMIN_GROUP: "${OIDC_ADMIN_GROUP}"
|
||||
OIDC_CLIENT_ID: "${OIDC_PRIVATE_CLIENT_ID}"
|
||||
OIDC_CLIENT_SECRET: "${OIDC_CLIENT_SECRET}"
|
||||
OIDC_REALM: "${OIDC_REALM}"
|
||||
OIDC_URL: "${OIDC_URL}"
|
||||
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.mtba.rule=PathPrefix(`/`)"
|
||||
- "traefik.http.services.mtba.loadbalancer.server.port=80"
|
||||
- "traefik.http.routers.mtba.tls=true"
|
||||
- "traefik.http.routers.mtba_ccp.rule=PathPrefix(`/mtba`)"
|
||||
- "traefik.http.services.mtba_ccp.loadbalancer.server.port=8480"
|
||||
- "traefik.http.routers.mtba_ccp.tls=true"
|
||||
|
||||
volumes:
|
||||
- /tmp/bridgehead/mtba/input:/app/input
|
||||
- /tmp/bridgehead/mtba/persist:/app/persist
|
||||
- /var/cache/bridgehead/ccp/mtba/input:/app/input
|
||||
- /var/cache/bridgehead/ccp/mtba/persist:/app/persist
|
||||
|
||||
# TODO: Include CBioPortal in Deployment ...
|
||||
# NOTE: CBioPortal can't load data while the system is running. So after import of data bridgehead needs to be restarted!
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
#!/bin/bash
|
||||
#!/bin/bash -e
|
||||
|
||||
function mtbaSetup() {
|
||||
if [ -n "$ENABLE_MTBA" ];then
|
||||
log INFO "MTBA setup detected -- will start MTBA Service and CBioPortal."
|
||||
if [ ! -n "$IDMANAGER_UPLOAD_APIKEY" ]; then
|
||||
log ERROR "Missing ID-Management Module! Fix this by setting up ID Management:"
|
||||
exit 1;
|
||||
fi
|
||||
OVERRIDE+=" -f ./$PROJECT/modules/mtba-compose.yml"
|
||||
add_private_oidc_redirect_url "/mtba/*"
|
||||
fi
|
||||
}
|
|
@ -0,0 +1,6 @@
|
|||
# Molecular Tumor Board Alliance (MTBA)
|
||||
|
||||
In this module, the genetic data to import is stored in a directory (/tmp/bridgehead/mtba/input). A process checks
|
||||
regularly if there are files in the directory. The files are pseudonomized when the IDAT is provided. The files are
|
||||
combined with clinical data of the blaze and imported in cBioPortal. On the other hand, this files are also imported in
|
||||
Blaze.
|
|
@ -1,4 +1,5 @@
|
|||
version: "3.7"
|
||||
|
||||
volumes:
|
||||
nngm-rest:
|
||||
|
||||
|
@ -21,9 +22,6 @@ services:
|
|||
- "traefik.http.routers.connector.middlewares=connector_strip,auth-nngm"
|
||||
volumes:
|
||||
- nngm-rest:/var/log
|
||||
|
||||
traefik:
|
||||
labels:
|
||||
- "traefik.http.middlewares.auth-nngm.basicauth.users=${NNGM_AUTH}"
|
||||
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#!/bin/bash
|
||||
#!/bin/bash -e
|
||||
|
||||
if [ -n "$NNGM_CTS_APIKEY" ]; then
|
||||
log INFO "nNGM setup detected -- will start nNGM Connector."
|
||||
|
|
|
@ -0,0 +1,20 @@
|
|||
version: "3.7"
|
||||
|
||||
services:
|
||||
obds2fhir-rest:
|
||||
container_name: bridgehead-obds2fhir-rest
|
||||
image: docker.verbis.dkfz.de/ccp/obds2fhir-rest:main
|
||||
environment:
|
||||
IDTYPE: BK_${IDMANAGEMENT_FRIENDLY_ID}_L-ID
|
||||
MAINZELLISTE_APIKEY: ${IDMANAGER_LOCAL_PATIENTLIST_APIKEY}
|
||||
SALT: ${LOCAL_SALT}
|
||||
KEEP_INTERNAL_ID: ${KEEP_INTERNAL_ID:-false}
|
||||
MAINZELLISTE_URL: ${PATIENTLIST_URL:-http://patientlist:8080/patientlist}
|
||||
restart: always
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.obds2fhir-rest.rule=PathPrefix(`/obds2fhir-rest`) || PathPrefix(`/adt2fhir-rest`)"
|
||||
- "traefik.http.middlewares.obds2fhir-rest_strip.stripprefix.prefixes=/obds2fhir-rest,/adt2fhir-rest"
|
||||
- "traefik.http.services.obds2fhir-rest.loadbalancer.server.port=8080"
|
||||
- "traefik.http.routers.obds2fhir-rest.tls=true"
|
||||
- "traefik.http.routers.obds2fhir-rest.middlewares=obds2fhir-rest_strip,auth"
|
|
@ -0,0 +1,13 @@
|
|||
#!/bin/bash
|
||||
|
||||
function obds2fhirRestSetup() {
|
||||
if [ -n "$ENABLE_OBDS2FHIR_REST" ]; then
|
||||
log INFO "oBDS2FHIR-REST setup detected -- will start obds2fhir-rest module."
|
||||
if [ ! -n "$IDMANAGER_UPLOAD_APIKEY" ]; then
|
||||
log ERROR "Missing ID-Management Module! Fix this by setting up ID Management:"
|
||||
PATIENTLIST_URL=" "
|
||||
fi
|
||||
OVERRIDE+=" -f ./$PROJECT/modules/obds2fhir-rest-compose.yml"
|
||||
LOCAL_SALT="$(echo \"local-random-salt\" | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 30)"
|
||||
fi
|
||||
}
|
|
@ -0,0 +1,81 @@
|
|||
version: "3.7"
|
||||
|
||||
services:
|
||||
|
||||
teiler-orchestrator:
|
||||
image: docker.verbis.dkfz.de/cache/samply/teiler-orchestrator:latest
|
||||
container_name: bridgehead-teiler-orchestrator
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.teiler_orchestrator_ccp.rule=PathPrefix(`/ccp-teiler`)"
|
||||
- "traefik.http.services.teiler_orchestrator_ccp.loadbalancer.server.port=9000"
|
||||
- "traefik.http.routers.teiler_orchestrator_ccp.tls=true"
|
||||
- "traefik.http.middlewares.teiler_orchestrator_ccp_strip.stripprefix.prefixes=/ccp-teiler"
|
||||
- "traefik.http.routers.teiler_orchestrator_ccp.middlewares=teiler_orchestrator_ccp_strip"
|
||||
environment:
|
||||
TEILER_BACKEND_URL: "https://${HOST}/ccp-teiler-backend"
|
||||
TEILER_DASHBOARD_URL: "https://${HOST}/ccp-teiler-dashboard"
|
||||
DEFAULT_LANGUAGE: "${TEILER_DEFAULT_LANGUAGE_LOWER_CASE}"
|
||||
HTTP_RELATIVE_PATH: "/ccp-teiler"
|
||||
|
||||
teiler-dashboard:
|
||||
image: docker.verbis.dkfz.de/cache/samply/teiler-dashboard:develop
|
||||
container_name: bridgehead-teiler-dashboard
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.teiler_dashboard_ccp.rule=PathPrefix(`/ccp-teiler-dashboard`)"
|
||||
- "traefik.http.services.teiler_dashboard_ccp.loadbalancer.server.port=80"
|
||||
- "traefik.http.routers.teiler_dashboard_ccp.tls=true"
|
||||
- "traefik.http.middlewares.teiler_dashboard_ccp_strip.stripprefix.prefixes=/ccp-teiler-dashboard"
|
||||
- "traefik.http.routers.teiler_dashboard_ccp.middlewares=teiler_dashboard_ccp_strip"
|
||||
environment:
|
||||
DEFAULT_LANGUAGE: "${TEILER_DEFAULT_LANGUAGE}"
|
||||
TEILER_BACKEND_URL: "https://${HOST}/ccp-teiler-backend"
|
||||
OIDC_URL: "${OIDC_URL}"
|
||||
OIDC_REALM: "${OIDC_REALM}"
|
||||
OIDC_CLIENT_ID: "${OIDC_PUBLIC_CLIENT_ID}"
|
||||
OIDC_TOKEN_GROUP: "${OIDC_GROUP_CLAIM}"
|
||||
TEILER_ADMIN_NAME: "${OPERATOR_FIRST_NAME} ${OPERATOR_LAST_NAME}"
|
||||
TEILER_ADMIN_EMAIL: "${OPERATOR_EMAIL}"
|
||||
TEILER_ADMIN_PHONE: "${OPERATOR_PHONE}"
|
||||
TEILER_PROJECT: "${PROJECT}"
|
||||
EXPORTER_API_KEY: "${EXPORTER_API_KEY}"
|
||||
TEILER_ORCHESTRATOR_URL: "https://${HOST}/ccp-teiler"
|
||||
TEILER_DASHBOARD_HTTP_RELATIVE_PATH: "/ccp-teiler-dashboard"
|
||||
TEILER_ORCHESTRATOR_HTTP_RELATIVE_PATH: "/ccp-teiler"
|
||||
TEILER_USER: "${OIDC_USER_GROUP}"
|
||||
TEILER_ADMIN: "${OIDC_ADMIN_GROUP}"
|
||||
REPORTER_DEFAULT_TEMPLATE_ID: "ccp-qb"
|
||||
EXPORTER_DEFAULT_TEMPLATE_ID: "ccp"
|
||||
|
||||
|
||||
teiler-backend:
|
||||
image: docker.verbis.dkfz.de/ccp/dktk-teiler-backend:latest
|
||||
container_name: bridgehead-teiler-backend
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.teiler_backend_ccp.rule=PathPrefix(`/ccp-teiler-backend`)"
|
||||
- "traefik.http.services.teiler_backend_ccp.loadbalancer.server.port=8085"
|
||||
- "traefik.http.routers.teiler_backend_ccp.tls=true"
|
||||
- "traefik.http.middlewares.teiler_backend_ccp_strip.stripprefix.prefixes=/ccp-teiler-backend"
|
||||
- "traefik.http.routers.teiler_backend_ccp.middlewares=teiler_backend_ccp_strip"
|
||||
environment:
|
||||
LOG_LEVEL: "INFO"
|
||||
APPLICATION_PORT: "8085"
|
||||
APPLICATION_ADDRESS: "${HOST}"
|
||||
DEFAULT_LANGUAGE: "${TEILER_DEFAULT_LANGUAGE}"
|
||||
CONFIG_ENV_VAR_PATH: "/run/secrets/ccp.conf"
|
||||
TEILER_ORCHESTRATOR_HTTP_RELATIVE_PATH: "/ccp-teiler"
|
||||
TEILER_ORCHESTRATOR_URL: "https://${HOST}/ccp-teiler"
|
||||
TEILER_DASHBOARD_DE_URL: "https://${HOST}/ccp-teiler-dashboard/de"
|
||||
TEILER_DASHBOARD_EN_URL: "https://${HOST}/ccp-teiler-dashboard/en"
|
||||
CENTRAX_URL: "${CENTRAXX_URL}"
|
||||
HTTP_PROXY: "http://forward_proxy:3128"
|
||||
ENABLE_MTBA: "${ENABLE_MTBA}"
|
||||
ENABLE_DATASHIELD: "${ENABLE_DATASHIELD}"
|
||||
secrets:
|
||||
- ccp.conf
|
||||
|
||||
secrets:
|
||||
ccp.conf:
|
||||
file: /etc/bridgehead/ccp.conf
|
|
@ -0,0 +1,9 @@
|
|||
#!/bin/bash -e
|
||||
|
||||
if [ "$ENABLE_TEILER" == true ];then
|
||||
log INFO "Teiler setup detected -- will start Teiler services."
|
||||
OVERRIDE+=" -f ./$PROJECT/modules/teiler-compose.yml"
|
||||
TEILER_DEFAULT_LANGUAGE=DE
|
||||
TEILER_DEFAULT_LANGUAGE_LOWER_CASE=${TEILER_DEFAULT_LANGUAGE,,}
|
||||
add_public_oidc_redirect_url "/ccp-teiler/*"
|
||||
fi
|
|
@ -0,0 +1,19 @@
|
|||
# Teiler
|
||||
This module orchestrates the different microfrontends of the bridgehead as a single page application.
|
||||
|
||||
## Teiler Orchestrator
|
||||
Single SPA component that consists on the root HTML site of the single page application and a javascript code that
|
||||
gets the information about the microfrontend calling the teiler backend and is responsible for registering them. With the
|
||||
resulting mapping, it can initialize, mount and unmount the required microfrontends on the fly.
|
||||
|
||||
The microfrontends run independently in different containers and can be based on different frameworks (Angular, Vue, React,...)
|
||||
This microfrontends can run as single alone but need an extension with Single-SPA (https://single-spa.js.org/docs/ecosystem).
|
||||
There are also available three templates (Angular, Vue, React) to be directly extended to be used directly in the teiler.
|
||||
|
||||
## Teiler Dashboard
|
||||
It consists on the main dashboard and a set of embedded services.
|
||||
### Login
|
||||
user and password in ccp.local.conf
|
||||
|
||||
## Teiler Backend
|
||||
In this component, the microfrontends are configured.
|
16
ccp/vars
16
ccp/vars
|
@ -2,12 +2,23 @@ BROKER_ID=broker.ccp-it.dktk.dkfz.de
|
|||
BROKER_URL=https://${BROKER_ID}
|
||||
PROXY_ID=${SITE_ID}.${BROKER_ID}
|
||||
FOCUS_BEAM_SECRET_SHORT="$(cat /proc/sys/kernel/random/uuid | sed 's/[-]//g' | head -c 20)"
|
||||
FOCUS_RETRY_COUNT=32
|
||||
FOCUS_RETRY_COUNT=${FOCUS_RETRY_COUNT:-64}
|
||||
SUPPORT_EMAIL=support-ccp@dkfz-heidelberg.de
|
||||
PRIVATEKEYFILENAME=/etc/bridgehead/pki/${SITE_ID}.priv.pem
|
||||
|
||||
BROKER_URL_FOR_PREREQ=$BROKER_URL
|
||||
|
||||
OIDC_USER_GROUP="DKTK_CCP_$(capitalize_first_letter ${SITE_ID})"
|
||||
OIDC_ADMIN_GROUP="DKTK_CCP_$(capitalize_first_letter ${SITE_ID})_Verwalter"
|
||||
OIDC_PRIVATE_CLIENT_ID=${SITE_ID}-private
|
||||
OIDC_PUBLIC_CLIENT_ID=${SITE_ID}-public
|
||||
# Use "test-realm-01" for testing
|
||||
OIDC_REALM="${OIDC_REALM:-master}"
|
||||
OIDC_URL="https://login.verbis.dkfz.de"
|
||||
OIDC_ISSUER_URL="${OIDC_URL}/realms/${OIDC_REALM}"
|
||||
OIDC_GROUP_CLAIM="groups"
|
||||
|
||||
POSTGRES_TAG=15.6-alpine
|
||||
|
||||
for module in $PROJECT/modules/*.sh
|
||||
do
|
||||
|
@ -17,4 +28,5 @@ done
|
|||
|
||||
idManagementSetup
|
||||
mtbaSetup
|
||||
adt2fhirRestSetup
|
||||
obds2fhirRestSetup
|
||||
blazeSecondarySetup
|
134
lib/functions.sh
134
lib/functions.sh
|
@ -53,7 +53,7 @@ checkOwner(){
|
|||
}
|
||||
|
||||
printUsage() {
|
||||
echo "Usage: bridgehead start|stop|logs|is-running|update|install|uninstall|adduser|enroll PROJECTNAME"
|
||||
echo "Usage: bridgehead start|stop|logs|docker-logs|is-running|update|install|uninstall|adduser|enroll PROJECTNAME"
|
||||
echo "PROJECTNAME should be one of ccp|bbmri"
|
||||
}
|
||||
|
||||
|
@ -155,6 +155,28 @@ setHostname() {
|
|||
fi
|
||||
}
|
||||
|
||||
# This function optimizes the usage of memory through blaze, according to the official performance tuning guide:
|
||||
# https://github.com/samply/blaze/blob/master/docs/tuning-guide.md
|
||||
# Short summary of the adjustments made:
|
||||
# - set blaze memory cap to a quarter of the system memory
|
||||
# - set db block cache size to a quarter of the system memory
|
||||
# - limit resource count allowed in blaze to 1,25M per 4GB available system memory
|
||||
optimizeBlazeMemoryUsage() {
|
||||
if [ -z "$BLAZE_MEMORY_CAP" ]; then
|
||||
system_memory_in_mb=$(LC_ALL=C free -m | grep 'Mem:' | awk '{print $2}');
|
||||
export BLAZE_MEMORY_CAP=$(($system_memory_in_mb/4));
|
||||
fi
|
||||
if [ -z "$BLAZE_RESOURCE_CACHE_CAP" ]; then
|
||||
available_system_memory_chunks=$((BLAZE_MEMORY_CAP / 1000))
|
||||
if [ $available_system_memory_chunks -eq 0 ]; then
|
||||
log WARN "Only ${BLAZE_MEMORY_CAP} system memory available for Blaze. If your Blaze stores more than 128000 fhir ressources it will run significally slower."
|
||||
export BLAZE_RESOURCE_CACHE_CAP=128000;
|
||||
else
|
||||
export BLAZE_RESOURCE_CACHE_CAP=$((available_system_memory_chunks * 312500))
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Takes 1) The Backup Directory Path 2) The name of the Service to be backuped
|
||||
# Creates 3 Backups: 1) For the past seven days 2) For the current month and 3) for each calendar week
|
||||
createEncryptedPostgresBackup(){
|
||||
|
@ -239,3 +261,113 @@ add_basic_auth_user() {
|
|||
log DEBUG "Saving clear text credentials in $FILE. If wanted, delete them manually."
|
||||
sed -i "/^$NAME/ s|$|\n# User: $USER\n# Password: $PASSWORD|" $FILE
|
||||
}
|
||||
|
||||
OIDC_PUBLIC_REDIRECT_URLS=${OIDC_PUBLIC_REDIRECT_URLS:-""}
|
||||
OIDC_PRIVATE_REDIRECT_URLS=${OIDC_PRIVATE_REDIRECT_URLS:-""}
|
||||
|
||||
# Add a redirect url to the public oidc client of the bridgehead
|
||||
function add_public_oidc_redirect_url() {
|
||||
if [[ $OIDC_PUBLIC_REDIRECT_URLS == "" ]]; then
|
||||
OIDC_PUBLIC_REDIRECT_URLS+="$(generate_redirect_urls $1)"
|
||||
else
|
||||
OIDC_PUBLIC_REDIRECT_URLS+=",$(generate_redirect_urls $1)"
|
||||
fi
|
||||
}
|
||||
|
||||
# Add a redirect url to the private oidc client of the bridgehead
|
||||
function add_private_oidc_redirect_url() {
|
||||
if [[ $OIDC_PRIVATE_REDIRECT_URLS == "" ]]; then
|
||||
OIDC_PRIVATE_REDIRECT_URLS+="$(generate_redirect_urls $1)"
|
||||
else
|
||||
OIDC_PRIVATE_REDIRECT_URLS+=",$(generate_redirect_urls $1)"
|
||||
fi
|
||||
}
|
||||
|
||||
function sync_secrets() {
|
||||
local delimiter=$'\x1E'
|
||||
local secret_sync_args=""
|
||||
if [[ $OIDC_PRIVATE_REDIRECT_URLS != "" ]]; then
|
||||
secret_sync_args="OIDC:OIDC_CLIENT_SECRET:private;$OIDC_PRIVATE_REDIRECT_URLS"
|
||||
fi
|
||||
if [[ $OIDC_PUBLIC_REDIRECT_URLS != "" ]]; then
|
||||
if [[ $secret_sync_args == "" ]]; then
|
||||
secret_sync_args="OIDC:OIDC_PUBLIC:public;$OIDC_PUBLIC_REDIRECT_URLS"
|
||||
else
|
||||
secret_sync_args+="${delimiter}OIDC:OIDC_PUBLIC:public;$OIDC_PUBLIC_REDIRECT_URLS"
|
||||
fi
|
||||
fi
|
||||
if [[ $secret_sync_args == "" ]]; then
|
||||
return
|
||||
fi
|
||||
mkdir -p /var/cache/bridgehead/secrets/ || fail_and_report 1 "Failed to create '/var/cache/bridgehead/secrets/'. Please run sudo './bridgehead install $PROJECT' again."
|
||||
touch /var/cache/bridgehead/secrets/oidc
|
||||
docker run --rm \
|
||||
-v /var/cache/bridgehead/secrets/oidc:/usr/local/cache \
|
||||
-v $PRIVATEKEYFILENAME:/run/secrets/privkey.pem:ro \
|
||||
-v /srv/docker/bridgehead/$PROJECT/root.crt.pem:/run/secrets/root.crt.pem:ro \
|
||||
-v /etc/bridgehead/trusted-ca-certs:/conf/trusted-ca-certs:ro \
|
||||
-e TLS_CA_CERTIFICATES_DIR=/conf/trusted-ca-certs \
|
||||
-e NO_PROXY=localhost,127.0.0.1 \
|
||||
-e ALL_PROXY=$HTTPS_PROXY_FULL_URL \
|
||||
-e PROXY_ID=$PROXY_ID \
|
||||
-e BROKER_URL=$BROKER_URL \
|
||||
-e OIDC_PROVIDER=secret-sync-central.oidc-client-enrollment.$BROKER_ID \
|
||||
-e SECRET_DEFINITIONS=$secret_sync_args \
|
||||
docker.verbis.dkfz.de/cache/samply/secret-sync-local:latest
|
||||
|
||||
set -a # Export variables as environment variables
|
||||
source /var/cache/bridgehead/secrets/*
|
||||
set +a # Export variables in the regular way
|
||||
}
|
||||
|
||||
capitalize_first_letter() {
|
||||
input="$1"
|
||||
capitalized="$(tr '[:lower:]' '[:upper:]' <<< ${input:0:1})${input:1}"
|
||||
echo "$capitalized"
|
||||
}
|
||||
|
||||
# Generate a string of ',' separated string of redirect urls relative to $HOST.
|
||||
# $1 will be appended to the url
|
||||
# If the host looks like dev-jan.inet.dkfz-heidelberg.de it will generate urls with dev-jan and the original $HOST as url Authorities
|
||||
function generate_redirect_urls(){
|
||||
local redirect_urls="https://${HOST}$1"
|
||||
local host_without_proxy="$(echo "$HOST" | cut -d '.' -f1)"
|
||||
# Only append second url if its different and the host is not an ip address
|
||||
if [[ "$HOST" != "$host_without_proxy" && ! "$HOST" =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
redirect_urls+=",https://$host_without_proxy$1"
|
||||
fi
|
||||
echo "$redirect_urls"
|
||||
}
|
||||
|
||||
# This password contains at least one special char, a random number and a random upper and lower case letter
|
||||
generate_password(){
|
||||
local seed_text="$1"
|
||||
local seed_num=$(awk 'BEGIN{FS=""} NR==1{print $10}' /etc/bridgehead/pki/${SITE_ID}.priv.pem | od -An -tuC)
|
||||
local nums="1234567890"
|
||||
local n=$(echo "$seed_num" | awk '{print $1 % 10}')
|
||||
local random_digit=${nums:$n:1}
|
||||
local n=$(echo "$seed_num" | awk '{print $1 % 26}')
|
||||
local upper="ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
local lower="abcdefghijklmnopqrstuvwxyz"
|
||||
local random_upper=${upper:$n:1}
|
||||
local random_lower=${lower:$n:1}
|
||||
local n=$(echo "$seed_num" | awk '{print $1 % 8}')
|
||||
local special='@#$%^&+='
|
||||
local random_special=${special:$n:1}
|
||||
|
||||
local combined_text="This is a salt string to generate one consistent password for ${seed_text}. It is not required to be secret."
|
||||
local main_password=$(echo "${combined_text}" | sha1sum | openssl pkeyutl -sign -inkey "/etc/bridgehead/pki/${SITE_ID}.priv.pem" 2> /dev/null | base64 | head -c 26 | sed 's/\//A/g')
|
||||
|
||||
echo "${main_password}${random_digit}${random_upper}${random_lower}${random_special}"
|
||||
}
|
||||
|
||||
# This password only contains alphanumeric characters
|
||||
generate_simple_password(){
|
||||
local seed_text="$1"
|
||||
local combined_text="This is a salt string to generate one consistent password for ${seed_text}. It is not required to be secret."
|
||||
echo "${combined_text}" | sha1sum | openssl pkeyutl -sign -inkey "/etc/bridgehead/pki/${SITE_ID}.priv.pem" 2> /dev/null | base64 | head -c 26 | sed 's/[+\/]/A/g'
|
||||
}
|
||||
|
||||
docker_jq() {
|
||||
docker run --rm -i docker.verbis.dkfz.de/cache/jqlang/jq:latest "$@"
|
||||
}
|
||||
|
|
|
@ -89,6 +89,9 @@ elif [[ "$DEV_MODE" == "DEV" ]]; then
|
|||
fi
|
||||
|
||||
chown -R bridgehead /etc/bridgehead /srv/docker/bridgehead
|
||||
mkdir -p /tmp/bridgehead /var/cache/bridgehead
|
||||
chown -R bridgehead:docker /tmp/bridgehead /var/cache/bridgehead
|
||||
chmod -R g+wr /var/cache/bridgehead /tmp/bridgehead
|
||||
|
||||
log INFO "System preparation is completed and configuration is present."
|
||||
|
||||
|
|
|
@ -67,29 +67,30 @@ log INFO "Checking network access ($BROKER_URL_FOR_PREREQ) ..."
|
|||
source /etc/bridgehead/${PROJECT}.conf
|
||||
source ${PROJECT}/vars
|
||||
|
||||
set +e
|
||||
SERVERTIME="$(https_proxy=$HTTPS_PROXY_FULL_URL curl -m 5 -s -I $BROKER_URL_FOR_PREREQ 2>&1 | grep -i -e '^Date: ' | sed -e 's/^Date: //i')"
|
||||
RET=$?
|
||||
set -e
|
||||
if [ $RET -ne 0 ]; then
|
||||
log WARN "Unable to connect to Samply.Beam broker at $BROKER_URL_FOR_PREREQ. Please check your proxy settings.\nThe currently configured proxy was \"$HTTPS_PROXY_URL\". This error is normal when using proxy authentication."
|
||||
log WARN "Unable to check clock skew due to previous error."
|
||||
else
|
||||
log INFO "Checking clock skew ..."
|
||||
if [ "${PROJECT}" != "minimal" ]; then
|
||||
set +e
|
||||
SERVERTIME="$(https_proxy=$HTTPS_PROXY_FULL_URL curl -m 5 -s -I $BROKER_URL_FOR_PREREQ 2>&1 | grep -i -e '^Date: ' | sed -e 's/^Date: //i')"
|
||||
RET=$?
|
||||
set -e
|
||||
if [ $RET -ne 0 ]; then
|
||||
log WARN "Unable to connect to Samply.Beam broker at $BROKER_URL_FOR_PREREQ. Please check your proxy settings.\nThe currently configured proxy was \"$HTTPS_PROXY_URL\". This error is normal when using proxy authentication."
|
||||
log WARN "Unable to check clock skew due to previous error."
|
||||
else
|
||||
log INFO "Checking clock skew ..."
|
||||
|
||||
SERVERTIME_AS_TIMESTAMP=$(date --date="$SERVERTIME" +%s)
|
||||
MYTIME=$(date +%s)
|
||||
SKEW=$(($SERVERTIME_AS_TIMESTAMP - $MYTIME))
|
||||
SKEW=$(echo $SKEW | awk -F- '{print $NF}')
|
||||
SYNCTEXT="For example, consider entering a correct NTP server (e.g. your institution's Active Directory Domain Controller in /etc/systemd/timesyncd.conf (option NTP=) and restart systemd-timesyncd."
|
||||
if [ $SKEW -ge 300 ]; then
|
||||
report_error 5 "Your clock is not synchronized (${SKEW}s off). This will cause Samply.Beam's certificate will fail. Please setup time synchronization. $SYNCTEXT"
|
||||
exit 1
|
||||
elif [ $SKEW -ge 60 ]; then
|
||||
log WARN "Your clock is more than a minute off (${SKEW}s). Consider syncing to a time server. $SYNCTEXT"
|
||||
fi
|
||||
SERVERTIME_AS_TIMESTAMP=$(date --date="$SERVERTIME" +%s)
|
||||
MYTIME=$(date +%s)
|
||||
SKEW=$(($SERVERTIME_AS_TIMESTAMP - $MYTIME))
|
||||
SKEW=$(echo $SKEW | awk -F- '{print $NF}')
|
||||
SYNCTEXT="For example, consider entering a correct NTP server (e.g. your institution's Active Directory Domain Controller in /etc/systemd/timesyncd.conf (option NTP=) and restart systemd-timesyncd."
|
||||
if [ $SKEW -ge 300 ]; then
|
||||
report_error 5 "Your clock is not synchronized (${SKEW}s off). This will cause Samply.Beam's certificate will fail. Please setup time synchronization. $SYNCTEXT"
|
||||
exit 1
|
||||
elif [ $SKEW -ge 60 ]; then
|
||||
log WARN "Your clock is more than a minute off (${SKEW}s). Consider syncing to a time server. $SYNCTEXT"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
checkPrivKey() {
|
||||
if [ -e /etc/bridgehead/pki/${SITE_ID}.priv.pem ]; then
|
||||
log INFO "Success - private key found."
|
||||
|
@ -100,7 +101,7 @@ checkPrivKey() {
|
|||
return 0
|
||||
}
|
||||
|
||||
if [[ "$@" =~ "noprivkey" ]]; then
|
||||
if [[ "$@" =~ "noprivkey" || "${PROJECT}" != "minimal" ]]; then
|
||||
log INFO "Skipping check for private key for now."
|
||||
else
|
||||
checkPrivKey || exit 1
|
||||
|
|
|
@ -42,6 +42,9 @@ services:
|
|||
- /var/spool/squid
|
||||
volumes:
|
||||
- /etc/bridgehead/trusted-ca-certs:/docker/custom-certs/:ro
|
||||
healthcheck:
|
||||
# Wait 1s before marking this service healthy. Required for the oauth2-proxy to talk to the OIDC provider on startup which will fail if the forward proxy is not started yet.
|
||||
test: ["CMD", "sleep", "1"]
|
||||
|
||||
landing:
|
||||
container_name: bridgehead-landingpage
|
||||
|
@ -55,5 +58,3 @@ services:
|
|||
HOST: ${HOST}
|
||||
PROJECT: ${PROJECT}
|
||||
SITE_NAME: ${SITE_NAME}
|
||||
|
||||
|
||||
|
|
Loading…
Reference in New Issue