Compare commits

..

3 Commits

Author SHA1 Message Date
d43f6822bc fix landingpage 2024-09-12 11:42:45 +00:00
Jan
02ee84df2c Merge pull request #183 from samply/fix/minimal-checks
Dont test clock skew and priv key for minimal bridgeheads
2024-04-26 09:04:40 +02:00
36c5c1a080 Changed dnpm-configuration to allow different broker 2024-03-06 13:21:11 +00:00
77 changed files with 111 additions and 1964 deletions

View File

@ -1,39 +0,0 @@
import os
import requests
from datetime import datetime, timedelta
# Configuration
GITHUB_TOKEN = os.getenv('GITHUB_TOKEN')
REPO = 'samply/bridgehead'
HEADERS = {'Authorization': f'token {GITHUB_TOKEN}', 'Accept': 'application/vnd.github.v3+json'}
API_URL = f'https://api.github.com/repos/{REPO}/branches'
INACTIVE_DAYS = 365
CUTOFF_DATE = datetime.now() - timedelta(days=INACTIVE_DAYS)
# Fetch all branches
def get_branches():
response = requests.get(API_URL, headers=HEADERS)
response.raise_for_status()
return response.json() if response.status_code == 200 else []
# Rename inactive branches
def rename_branch(old_name, new_name):
rename_url = f'https://api.github.com/repos/{REPO}/branches/{old_name}/rename'
response = requests.post(rename_url, json={'new_name': new_name}, headers=HEADERS)
response.raise_for_status()
print(f"Renamed branch {old_name} to {new_name}" if response.status_code == 201 else f"Failed to rename {old_name}: {response.status_code}")
# Check if the branch is inactive
def is_inactive(commit_url):
last_commit_date = requests.get(commit_url, headers=HEADERS).json()['commit']['committer']['date']
return datetime.strptime(last_commit_date, '%Y-%m-%dT%H:%M:%SZ') < CUTOFF_DATE
# Rename inactive branches
def main():
for branch in get_branches():
if is_inactive(branch['commit']['url']):
#rename_branch(branch['name'], f"archived/{branch['name']}")
print(f"[LOG] Branch '{branch['name']}' is inactive and would be renamed to 'archived/{branch['name']}'")
if __name__ == "__main__":
main()

View File

@ -1,27 +0,0 @@
name: Cleanup - Rename Inactive Branches
on:
schedule:
- cron: '0 0 * * 0' # Runs every Sunday at midnight
jobs:
archive-stale-branches:
runs-on: ubuntu-latest
steps:
- name: Checkout Repository
uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: '3.x'
- name: Install Libraries
run: pip install requests
- name: Run Script to Rename Inactive Branches
run: |
python .github/scripts/rename_inactive_branches.py
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

2
.gitignore vendored
View File

@ -1,7 +1,7 @@
##Ignore site configuration
.gitmodules
site-config/*
.idea
## Ignore site configuration
*/docker-compose.override.yml

View File

@ -34,7 +34,7 @@ This repository is the starting point for any information and tools you will nee
## Requirements
The data protection officer at your site will probably want to know exactly what our software does with patient data, and you may need to get their approval before you are allowed to install a Bridgehead. To help you with this, we have provided some data protection concepts:
The data protection group at your site will probably want to know exactly what our software does with patient data, and you may need to get their approval before you are allowed to install a Bridgehead. To help you with this, we have provided some data protection concepts:
- [Germany](https://www.bbmri.de/biobanking/it/infrastruktur/datenschutzkonzept/)
@ -46,8 +46,6 @@ Hardware requirements strongly depend on the specific use-cases of your network
- 32 GB RAM
- 160GB Hard Drive, SSD recommended
We recommend using a dedicated VM for the Bridgehead, with no other applications running on it. While the Bridgehead can, in principle, run on a shared VM, you might run into surprising problems such as resource conflicts (e.g., two apps using tcp port 443).
### Software
You are strongly recommended to install the Bridgehead under a Linux operating system (but see the section [Non-Linux OS](#non-linux-os)). You will need root (administrator) priveleges on this machine in order to perform the deployment. We recommend the newest Ubuntu LTS server release.
@ -202,7 +200,7 @@ sudo systemctl [enable|disable] bridgehead@<PROJECT>.service
After starting the Bridgehead, you can watch the initialization process with the following command:
```shell
/srv/docker/bridgehead/bridgehead logs <project> -f
journalctl -u bridgehead@bbmri -f
```
if this exits with something similar to the following:
@ -222,9 +220,8 @@ docker ps
There should be 6 - 10 Docker proceses. If there are fewer, then you know that something has gone wrong. To see what is going on, run:
```shell
/srv/docker/bridgehead/bridgehead logs <Project> -f
journalctl -u bridgehead@bbmri -f
```
This translates to a journalctl command so all the regular journalctl flags can be used.
Once the Bridgehead has passed these checks, take a look at the landing page:
@ -238,7 +235,7 @@ You can either do this in a browser or with curl. If you visit the URL in the br
curl -k https://localhost
```
Should the landing page not show anything, you can inspect the logs of the containers to determine what is going wrong. To do this you can use `./bridgehead docker-logs <Project> -f` to follow the logs of the container. This transaltes to a docker compose logs command meaning all the ususal docker logs flags work.
If you get errors when you do this, you need to use ```docker logs``` to examine your landing page container in order to determine what is going wrong.
If you have chosen to take part in our monitoring program (by setting the ```MONITOR_APIKEY``` variable in the configuration), you will be informed by email when problems are detected in your Bridgehead.
@ -301,19 +298,19 @@ Once you have added your biobank to the Directory you got persistent identifier
The Bridgehead's **Directory Sync** is an optional feature that keeps the Directory up to date with your local data, e.g. number of samples. Conversely, it also updates the local FHIR store with the latest contact details etc. from the Directory. You must explicitly set your country specific directory URL, username and password to enable this feature.
You should talk with your local data protection group regarding the information that is published by Directory sync.
Full details can be found in [directory_sync_service](https://github.com/samply/directory_sync_service).
To enable it, you will need to set these variables to the ```bbmri.conf``` file of your GitLab repository. Here is an example config:
```
DS_DIRECTORY_URL=https://directory.bbmri-eric.eu
DS_DIRECTORY_USER_NAME=your_directory_username
DS_DIRECTORY_USER_PASS=your_directory_password
DS_DIRECTORY_USER_PASS=qwdnqwswdvqHBVGFR9887
DS_TIMER_CRON="0 22 * * *"
```
Please contact your National Node to obtain this information.
You must contact the Directory team for your national node to find the URL, and to register as a user.
Optionally, you **may** change when you want Directory sync to run by specifying a [cron](https://crontab.guru) expression, e.g. `DS_TIMER_CRON="0 22 * * *"` for 10 pm every evening.
Additionally, you should choose when you want Directory sync to run. In the example above, this is set to happen at 10 pm every evening. You can modify this to suit your requirements. The timer specification should follow the [cron](https://crontab.guru) convention.
Once you edited the gitlab config, the bridgehead will autoupdate the config with the values and will sync the data.

View File

@ -4,14 +4,12 @@ version: "3.7"
services:
blaze:
image: docker.verbis.dkfz.de/cache/samply/blaze:0.28
image: docker.verbis.dkfz.de/cache/samply/blaze:latest
container_name: bridgehead-bbmri-blaze
environment:
BASE_URL: "http://bridgehead-bbmri-blaze:8080"
JAVA_TOOL_OPTIONS: "-Xmx${BLAZE_MEMORY_CAP:-4096}m"
DB_RESOURCE_CACHE_SIZE: ${BLAZE_RESOURCE_CACHE_CAP:-2500000}
DB_BLOCK_CACHE_SIZE: ${BLAZE_MEMORY_CAP}
CQL_EXPR_CACHE_SIZE: ${BLAZE_CQL_CACHE_CAP:-32}
JAVA_TOOL_OPTIONS: "-Xmx4g"
LOG_LEVEL: "debug"
ENFORCE_REFERENTIAL_INTEGRITY: "false"
volumes:
- "blaze-data:/app/data"

View File

@ -1,16 +1,8 @@
version: "3.7"
services:
directory_sync_service:
image: "docker.verbis.dkfz.de/cache/samply/directory_sync_service"
environment:
DS_DIRECTORY_URL: ${DS_DIRECTORY_URL:-https://directory.bbmri-eric.eu}
DS_DIRECTORY_URL: ${DS_DIRECTORY_URL}
DS_DIRECTORY_USER_NAME: ${DS_DIRECTORY_USER_NAME}
DS_DIRECTORY_USER_PASS: ${DS_DIRECTORY_USER_PASS}
DS_TIMER_CRON: ${DS_TIMER_CRON:-0 22 * * *}
DS_DIRECTORY_ALLOW_STAR_MODEL: ${DS_DIRECTORY_ALLOW_STAR_MODEL:-true}
DS_DIRECTORY_MOCK: ${DS_DIRECTORY_MOCK}
DS_DIRECTORY_DEFAULT_COLLECTION_ID: ${DS_DIRECTORY_DEFAULT_COLLECTION_ID}
DS_DIRECTORY_COUNTRY: ${DS_DIRECTORY_COUNTRY}
depends_on:
- "blaze"
DS_DIRECTORY_PASS_CODE: ${DS_DIRECTORY_PASS_CODE}
DS_TIMER_CRON: ${DS_TIMER_CRON}

View File

@ -2,7 +2,7 @@ version: "3.7"
services:
focus-eric:
image: docker.verbis.dkfz.de/cache/samply/focus:${FOCUS_TAG}-bbmri
image: docker.verbis.dkfz.de/cache/samply/focus:${FOCUS_TAG}
container_name: bridgehead-focus-eric
environment:
API_KEY: ${ERIC_FOCUS_BEAM_SECRET_SHORT}
@ -16,7 +16,7 @@ services:
- "blaze"
beam-proxy-eric:
image: docker.verbis.dkfz.de/cache/samply/beam-proxy:${BEAM_TAG}
image: docker.verbis.dkfz.de/cache/samply/beam-proxy:develop
container_name: bridgehead-beam-proxy-eric
environment:
BROKER_URL: ${ERIC_BROKER_URL}

View File

@ -2,7 +2,7 @@ version: "3.7"
services:
focus-gbn:
image: docker.verbis.dkfz.de/cache/samply/focus:${FOCUS_TAG}-bbmri
image: docker.verbis.dkfz.de/cache/samply/focus:${FOCUS_TAG}
container_name: bridgehead-focus-gbn
environment:
API_KEY: ${GBN_FOCUS_BEAM_SECRET_SHORT}
@ -16,7 +16,7 @@ services:
- "blaze"
beam-proxy-gbn:
image: docker.verbis.dkfz.de/cache/samply/beam-proxy:${BEAM_TAG}
image: docker.verbis.dkfz.de/cache/samply/beam-proxy:develop
container_name: bridgehead-beam-proxy-gbn
environment:
BROKER_URL: ${GBN_BROKER_URL}

View File

@ -4,7 +4,7 @@
# Makes only sense for German Biobanks
: ${ENABLE_GBN:=false}
FOCUS_RETRY_COUNT=${FOCUS_RETRY_COUNT:-64}
FOCUS_RETRY_COUNT=32
PRIVATEKEYFILENAME=/etc/bridgehead/pki/${SITE_ID}.priv.pem
for module in $PROJECT/modules/*.sh

View File

@ -32,18 +32,6 @@ case "$PROJECT" in
bbmri)
#nothing extra to do
;;
cce)
#nothing extra to do
;;
itcc)
#nothing extra to do
;;
kr)
#nothing extra to do
;;
dhki)
#nothing extra to do
;;
minimal)
#nothing extra to do
;;
@ -62,8 +50,6 @@ loadVars() {
source /etc/bridgehead/$PROJECT.local.conf || fail_and_report 1 "Found /etc/bridgehead/$PROJECT.local.conf but failed to import"
fi
fetchVarsFromVaultByFile /etc/bridgehead/$PROJECT.conf || fail_and_report 1 "Unable to fetchVarsFromVaultByFile"
setHostname
optimizeBlazeMemoryUsage
[ -e ./$PROJECT/vars ] && source ./$PROJECT/vars
set +a
@ -78,25 +64,22 @@ loadVars() {
OVERRIDE+=" -f ./$PROJECT/docker-compose.override.yml"
fi
detectCompose
setHostname
setupProxy
# Set some project-independent default values
: ${ENVIRONMENT:=production}
export ENVIRONMENT
case "$ENVIRONMENT" in
"production")
export FOCUS_TAG=main
export BEAM_TAG=main
;;
"test")
export FOCUS_TAG=develop
export BEAM_TAG=develop
;;
*)
report_error 7 "Environment \"$ENVIRONMENT\" is unknown. Assuming production. FIX THIS!"
export FOCUS_TAG=main
export BEAM_TAG=main
;;
esac
}
@ -106,14 +89,11 @@ case "$ACTION" in
loadVars
hc_send log "Bridgehead $PROJECT startup: Checking requirements ..."
checkRequirements
sync_secrets
hc_send log "Bridgehead $PROJECT startup: Requirements checked out. Now starting bridgehead ..."
exec $COMPOSE -p $PROJECT -f ./minimal/docker-compose.yml -f ./$PROJECT/docker-compose.yml $OVERRIDE up --abort-on-container-exit
;;
stop)
loadVars
# Kill stale secret-sync instances if present
docker kill $(docker ps -q --filter ancestor=docker.verbis.dkfz.de/cache/samply/secret-sync-local) 2>/dev/null || true
# HACK: This is temporarily to properly shut down false bridgehead instances (bridgehead-ccp instead ccp)
$COMPOSE -p bridgehead-$PROJECT -f ./minimal/docker-compose.yml -f ./$PROJECT/docker-compose.yml $OVERRIDE down
exec $COMPOSE -p $PROJECT -f ./minimal/docker-compose.yml -f ./$PROJECT/docker-compose.yml $OVERRIDE down
@ -123,11 +103,6 @@ case "$ACTION" in
exit $?
;;
logs)
loadVars
shift 2
exec journalctl -u bridgehead@$PROJECT -u bridgehead-update@$PROJECT -a $@
;;
docker-logs)
loadVars
shift 2
exec $COMPOSE -p $PROJECT -f ./minimal/docker-compose.yml -f ./$PROJECT/docker-compose.yml $OVERRIDE logs -f $@

View File

@ -1,63 +0,0 @@
version: "3.7"
services:
blaze:
image: docker.verbis.dkfz.de/cache/samply/blaze:0.28
container_name: bridgehead-cce-blaze
environment:
BASE_URL: "http://bridgehead-cce-blaze:8080"
JAVA_TOOL_OPTIONS: "-Xmx${BLAZE_MEMORY_CAP:-4096}m"
DB_RESOURCE_CACHE_SIZE: ${BLAZE_RESOURCE_CACHE_CAP:-2500000}
DB_BLOCK_CACHE_SIZE: $BLAZE_MEMORY_CAP
ENFORCE_REFERENTIAL_INTEGRITY: "false"
volumes:
- "blaze-data:/app/data"
labels:
- "traefik.enable=true"
- "traefik.http.routers.blaze_cce.rule=PathPrefix(`/cce-localdatamanagement`)"
- "traefik.http.middlewares.cce_b_strip.stripprefix.prefixes=/cce-localdatamanagement"
- "traefik.http.services.blaze_cce.loadbalancer.server.port=8080"
- "traefik.http.routers.blaze_cce.middlewares=cce_b_strip,auth"
- "traefik.http.routers.blaze_cce.tls=true"
focus:
image: docker.verbis.dkfz.de/cache/samply/focus:${FOCUS_TAG}
container_name: bridgehead-focus
environment:
API_KEY: ${FOCUS_BEAM_SECRET_SHORT}
BEAM_APP_ID_LONG: focus.${PROXY_ID}
PROXY_ID: ${PROXY_ID}
BLAZE_URL: "http://bridgehead-cce-blaze:8080/fhir/"
BEAM_PROXY_URL: http://beam-proxy:8081
RETRY_COUNT: ${FOCUS_RETRY_COUNT}
EPSILON: 0.28
depends_on:
- "beam-proxy"
- "blaze"
beam-proxy:
image: docker.verbis.dkfz.de/cache/samply/beam-proxy:${BEAM_TAG}
container_name: bridgehead-beam-proxy
environment:
BROKER_URL: ${BROKER_URL}
PROXY_ID: ${PROXY_ID}
APP_focus_KEY: ${FOCUS_BEAM_SECRET_SHORT}
PRIVKEY_FILE: /run/secrets/proxy.pem
ALL_PROXY: http://forward_proxy:3128
TLS_CA_CERTIFICATES_DIR: /conf/trusted-ca-certs
ROOTCERT_FILE: /conf/root.crt.pem
secrets:
- proxy.pem
depends_on:
- "forward_proxy"
volumes:
- /etc/bridgehead/trusted-ca-certs:/conf/trusted-ca-certs:ro
- /srv/docker/bridgehead/cce/root.crt.pem:/conf/root.crt.pem:ro
volumes:
blaze-data:
secrets:
proxy.pem:
file: /etc/bridgehead/pki/${SITE_ID}.priv.pem

View File

@ -1,33 +0,0 @@
version: "3.7"
services:
landing:
container_name: lens_federated-search
image: docker.verbis.dkfz.de/ccp/lens:${SITE_ID}
labels:
- "traefik.enable=true"
- "traefik.http.routers.landing.rule=PathPrefix(`/`)"
- "traefik.http.services.landing.loadbalancer.server.port=80"
- "traefik.http.routers.landing.tls=true"
spot:
image: docker.verbis.dkfz.de/ccp-private/central-spot
environment:
BEAM_SECRET: "${FOCUS_BEAM_SECRET_SHORT}"
BEAM_URL: http://beam-proxy:8081
BEAM_PROXY_ID: ${SITE_ID}
BEAM_BROKER_ID: ${BROKER_ID}
BEAM_APP_ID: "focus"
PROJECT_METADATA: "cce_supervisors"
depends_on:
- "beam-proxy"
labels:
- "traefik.enable=true"
- "traefik.http.services.spot.loadbalancer.server.port=8080"
- "traefik.http.middlewares.corsheaders2.headers.accesscontrolallowmethods=GET,OPTIONS,POST"
- "traefik.http.middlewares.corsheaders2.headers.accesscontrolalloworiginlist=https://${HOST}"
- "traefik.http.middlewares.corsheaders2.headers.accesscontrolallowcredentials=true"
- "traefik.http.middlewares.corsheaders2.headers.accesscontrolmaxage=-1"
- "traefik.http.routers.spot.rule=Host(`${HOST}`) && PathPrefix(`/backend`)"
- "traefik.http.middlewares.stripprefix_spot.stripprefix.prefixes=/backend"
- "traefik.http.routers.spot.tls=true"
- "traefik.http.routers.spot.middlewares=corsheaders2,stripprefix_spot"

View File

@ -1,5 +0,0 @@
#!/bin/bash
if [ -n "$ENABLE_LENS" ];then
OVERRIDE+=" -f ./$PROJECT/modules/lens-compose.yml"
fi

View File

@ -1,20 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDNTCCAh2gAwIBAgIUW34NEb7bl0+Ywx+I1VKtY5vpAOowDQYJKoZIhvcNAQEL
BQAwFjEUMBIGA1UEAxMLQnJva2VyLVJvb3QwHhcNMjQwMTIyMTMzNzEzWhcNMzQw
MTE5MTMzNzQzWjAWMRQwEgYDVQQDEwtCcm9rZXItUm9vdDCCASIwDQYJKoZIhvcN
AQEBBQADggEPADCCAQoCggEBAL5UegLXTlq3XRRj8LyFs3aF0tpRPVoW9RXp5kFI
TnBvyO6qjNbMDT/xK+4iDtEX4QQUvsxAKxfXbe9i1jpdwjgH7JHaSGm2IjAiKLqO
OXQQtguWwfNmmp96Ql13ArLj458YH08xMO/w2NFWGwB/hfARa4z/T0afFuc/tKJf
XbGCG9xzJ9tmcG45QN8NChGhVvaTweNdVxGWlpHxmi0Mn8OM9CEuB7nPtTTiBuiu
pRC2zVVmNjVp4ktkAqL7IHOz+/F5nhiz6tOika9oD3376Xj055lPznLcTQn2+4d7
K7ZrBopCFxIQPjkgmYRLfPejbpdUjK1UVJw7hbWkqWqH7JMCAwEAAaN7MHkwDgYD
VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFGjvRcaIP4HM
poIguUAK9YL2n7fbMB8GA1UdIwQYMBaAFGjvRcaIP4HMpoIguUAK9YL2n7fbMBYG
A1UdEQQPMA2CC0Jyb2tlci1Sb290MA0GCSqGSIb3DQEBCwUAA4IBAQCbzycJSaDm
AXXNJqQ88djrKs5MDXS8RIjS/cu2ayuLaYDe+BzVmUXNA0Vt9nZGdaz63SLLcjpU
fNSxBfKbwmf7s30AK8Cnfj9q4W/BlBeVizUHQsg1+RQpDIdMrRQrwkXv8mfLw+w5
3oaXNW6W/8KpBp/H8TBZ6myl6jCbeR3T8EMXBwipMGop/1zkbF01i98Xpqmhx2+l
n+80ofPsSspOo5XmgCZym8CD/m/oFHmjcvOfpOCvDh4PZ+i37pmbSlCYoMpla3u/
7MJMP5lugfLBYNDN2p+V4KbHP/cApCDT5UWLOeAWjgiZQtHH5ilDeYqEc1oPjyJt
Rtup0MTxSJtN
-----END CERTIFICATE-----

View File

@ -1,14 +0,0 @@
BROKER_ID=test-no-real-data.broker.samply.de
BROKER_URL=https://${BROKER_ID}
PROXY_ID=${SITE_ID}.${BROKER_ID}
FOCUS_BEAM_SECRET_SHORT="$(cat /proc/sys/kernel/random/uuid | sed 's/[-]//g' | head -c 20)"
FOCUS_RETRY_COUNT=${FOCUS_RETRY_COUNT:-64}
SUPPORT_EMAIL=manoj.waikar@dkfz-heidelberg.de
PRIVATEKEYFILENAME=/etc/bridgehead/pki/${SITE_ID}.priv.pem
BROKER_URL_FOR_PREREQ=$BROKER_URL
for module in $PROJECT/modules/*.sh
do
log DEBUG "sourcing $module"
source $module
done

View File

@ -2,14 +2,11 @@ version: "3.7"
services:
blaze:
image: docker.verbis.dkfz.de/cache/samply/blaze:0.28
image: docker.verbis.dkfz.de/cache/samply/blaze:latest
container_name: bridgehead-ccp-blaze
environment:
BASE_URL: "http://bridgehead-ccp-blaze:8080"
JAVA_TOOL_OPTIONS: "-Xmx${BLAZE_MEMORY_CAP:-4096}m"
DB_RESOURCE_CACHE_SIZE: ${BLAZE_RESOURCE_CACHE_CAP:-2500000}
DB_BLOCK_CACHE_SIZE: ${BLAZE_MEMORY_CAP}
CQL_EXPR_CACHE_SIZE: ${BLAZE_CQL_CACHE_CAP:-32}
JAVA_TOOL_OPTIONS: "-Xmx4g"
ENFORCE_REFERENTIAL_INTEGRITY: "false"
volumes:
- "blaze-data:/app/data"
@ -22,7 +19,7 @@ services:
- "traefik.http.routers.blaze_ccp.tls=true"
focus:
image: docker.verbis.dkfz.de/cache/samply/focus:${FOCUS_TAG}-dktk
image: docker.verbis.dkfz.de/cache/samply/focus:0.4.0
container_name: bridgehead-focus
environment:
API_KEY: ${FOCUS_BEAM_SECRET_SHORT}
@ -32,16 +29,12 @@ services:
BEAM_PROXY_URL: http://beam-proxy:8081
RETRY_COUNT: ${FOCUS_RETRY_COUNT}
EPSILON: 0.28
QUERIES_TO_CACHE: '/queries_to_cache.conf'
ENDPOINT_TYPE: ${FOCUS_ENDPOINT_TYPE:-blaze}
volumes:
- /srv/docker/bridgehead/ccp/queries_to_cache.conf:/queries_to_cache.conf
depends_on:
- "beam-proxy"
- "blaze"
beam-proxy:
image: docker.verbis.dkfz.de/cache/samply/beam-proxy:${BEAM_TAG}
image: docker.verbis.dkfz.de/cache/samply/beam-proxy:develop
container_name: bridgehead-beam-proxy
environment:
BROKER_URL: ${BROKER_URL}
@ -59,6 +52,7 @@ services:
- /etc/bridgehead/trusted-ca-certs:/conf/trusted-ca-certs:ro
- /srv/docker/bridgehead/ccp/root.crt.pem:/conf/root.crt.pem:ro
volumes:
blaze-data:

View File

@ -0,0 +1,18 @@
version: "3.7"
services:
adt2fhir-rest:
container_name: bridgehead-adt2fhir-rest
image: docker.verbis.dkfz.de/ccp/adt2fhir-rest:main
environment:
IDTYPE: BK_${IDMANAGEMENT_FRIENDLY_ID}_L-ID
MAINZELLISTE_APIKEY: ${IDMANAGER_LOCAL_PATIENTLIST_APIKEY}
SALT: ${LOCAL_SALT}
restart: always
labels:
- "traefik.enable=true"
- "traefik.http.routers.adt2fhir-rest.rule=PathPrefix(`/adt2fhir-rest`)"
- "traefik.http.middlewares.adt2fhir-rest_strip.stripprefix.prefixes=/adt2fhir-rest"
- "traefik.http.services.adt2fhir-rest.loadbalancer.server.port=8080"
- "traefik.http.routers.adt2fhir-rest.tls=true"
- "traefik.http.routers.adt2fhir-rest.middlewares=adt2fhir-rest_strip,auth"

View File

@ -0,0 +1,13 @@
#!/bin/bash
function adt2fhirRestSetup() {
if [ -n "$ENABLE_ADT2FHIR_REST" ]; then
log INFO "ADT2FHIR-REST setup detected -- will start adt2fhir-rest API."
if [ ! -n "$IDMANAGER_LOCAL_PATIENTLIST_APIKEY" ]; then
log ERROR "Missing ID-Management Module! Fix this by setting up ID Management:"
exit 1;
fi
OVERRIDE+=" -f ./$PROJECT/modules/adt2fhir-rest-compose.yml"
LOCAL_SALT="$(echo \"local-random-salt\" | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 30)"
fi
}

View File

@ -1,32 +0,0 @@
version: "3.7"
services:
blaze-secondary:
image: docker.verbis.dkfz.de/cache/samply/blaze:0.28
container_name: bridgehead-ccp-blaze-secondary
environment:
BASE_URL: "http://bridgehead-ccp-blaze-secondary:8080"
JAVA_TOOL_OPTIONS: "-Xmx${BLAZE_MEMORY_CAP:-4096}m"
DB_RESOURCE_CACHE_SIZE: ${BLAZE_RESOURCE_CACHE_CAP:-2500000}
DB_BLOCK_CACHE_SIZE: $BLAZE_MEMORY_CAP
ENFORCE_REFERENTIAL_INTEGRITY: "false"
volumes:
- "blaze-secondary-data:/app/data"
labels:
- "traefik.enable=true"
- "traefik.http.routers.blaze-secondary_ccp.rule=PathPrefix(`/ccp-localdatamanagement-secondary`)"
- "traefik.http.middlewares.ccp_b-secondary_strip.stripprefix.prefixes=/ccp-localdatamanagement-secondary"
- "traefik.http.services.blaze-secondary_ccp.loadbalancer.server.port=8080"
- "traefik.http.routers.blaze-secondary_ccp.middlewares=ccp_b-secondary_strip,auth"
- "traefik.http.routers.blaze-secondary_ccp.tls=true"
obds2fhir-rest:
environment:
STORE_PATH: ${STORE_PATH:-http://blaze:8080/fhir}
exporter:
environment:
BLAZE_HOST: "blaze-secondary"
volumes:
blaze-secondary-data:

View File

@ -1,11 +0,0 @@
#!/bin/bash
function blazeSecondarySetup() {
if [ -n "$ENABLE_SECONDARY_BLAZE" ]; then
log INFO "Secondary Blaze setup detected -- will start second blaze."
OVERRIDE+=" -f ./$PROJECT/modules/blaze-secondary-compose.yml"
#make oBDS2FHIR ignore ID-Management and replace target Blaze
PATIENTLIST_URL=" "
STORE_PATH="http://blaze-secondary:8080/fhir"
fi
}

View File

@ -1,171 +0,0 @@
version: "3.7"
services:
rstudio:
container_name: bridgehead-rstudio
image: docker.verbis.dkfz.de/ccp/dktk-rstudio:latest
environment:
#DEFAULT_USER: "rstudio" # This line is kept for informational purposes
PASSWORD: "${RSTUDIO_ADMIN_PASSWORD}" # It is required, even if the authentication is disabled
DISABLE_AUTH: "true" # https://rocker-project.org/images/versioned/rstudio.html#how-to-use
HTTP_RELATIVE_PATH: "/rstudio"
ALL_PROXY: "http://forward_proxy:3128" # https://rocker-project.org/use/networking.html
labels:
- "traefik.enable=true"
- "traefik.http.routers.rstudio_ccp.rule=PathPrefix(`/rstudio`)"
- "traefik.http.services.rstudio_ccp.loadbalancer.server.port=8787"
- "traefik.http.middlewares.rstudio_ccp_strip.stripprefix.prefixes=/rstudio"
- "traefik.http.routers.rstudio_ccp.tls=true"
- "traefik.http.routers.rstudio_ccp.middlewares=oidcAuth,rstudio_ccp_strip"
networks:
- rstudio
opal:
container_name: bridgehead-opal
image: docker.verbis.dkfz.de/ccp/dktk-opal:test
labels:
- "traefik.enable=true"
- "traefik.http.routers.opal_ccp.rule=PathPrefix(`/opal`)"
- "traefik.http.services.opal_ccp.loadbalancer.server.port=8080"
- "traefik.http.routers.opal_ccp.tls=true"
links:
- opal-rserver
- opal-db
environment:
JAVA_OPTS: "-Xms1G -Xmx8G -XX:+UseG1GC -Dhttps.proxyHost=forward_proxy -Dhttps.proxyPort=3128"
# OPAL_ADMINISTRATOR_USER: "administrator" # This line is kept for informational purposes
OPAL_ADMINISTRATOR_PASSWORD: "${OPAL_ADMIN_PASSWORD}"
POSTGRESDATA_HOST: "opal-db"
POSTGRESDATA_DATABASE: "opal"
POSTGRESDATA_USER: "opal"
POSTGRESDATA_PASSWORD: "${OPAL_DB_PASSWORD}"
ROCK_HOSTS: "opal-rserver:8085"
APP_URL: "https://${HOST}/opal"
APP_CONTEXT_PATH: "/opal"
OPAL_PRIVATE_KEY: "/run/secrets/opal-key.pem"
OPAL_CERTIFICATE: "/run/secrets/opal-cert.pem"
OIDC_URL: "${OIDC_URL}"
OIDC_REALM: "${OIDC_REALM}"
OIDC_CLIENT_ID: "${OIDC_PRIVATE_CLIENT_ID}"
OIDC_CLIENT_SECRET: "${OIDC_CLIENT_SECRET}"
OIDC_ADMIN_GROUP: "${OIDC_ADMIN_GROUP}"
TOKEN_MANAGER_PASSWORD: "${TOKEN_MANAGER_OPAL_PASSWORD}"
EXPORTER_PASSWORD: "${EXPORTER_OPAL_PASSWORD}"
BEAM_APP_ID: token-manager.${PROXY_ID}
BEAM_SECRET: ${TOKEN_MANAGER_SECRET}
BEAM_DATASHIELD_PROXY: request-manager
volumes:
- "/var/cache/bridgehead/ccp/opal-metadata-db:/srv" # Opal metadata
secrets:
- opal-cert.pem
- opal-key.pem
opal-db:
container_name: bridgehead-opal-db
image: docker.verbis.dkfz.de/cache/postgres:${POSTGRES_TAG}
environment:
POSTGRES_PASSWORD: "${OPAL_DB_PASSWORD}" # Set in datashield-setup.sh
POSTGRES_USER: "opal"
POSTGRES_DB: "opal"
volumes:
- "/var/cache/bridgehead/ccp/opal-db:/var/lib/postgresql/data" # Opal project data (imported from exporter)
opal-rserver:
container_name: bridgehead-opal-rserver
image: docker.verbis.dkfz.de/ccp/dktk-rserver # datashield/rock-base + dsCCPhos
tmpfs:
- /srv
beam-connect:
image: docker.verbis.dkfz.de/cache/samply/beam-connect:develop
container_name: bridgehead-datashield-connect
environment:
PROXY_URL: "http://beam-proxy:8081"
TLS_CA_CERTIFICATES_DIR: /run/secrets
APP_ID: datashield-connect.${SITE_ID}.${BROKER_ID}
PROXY_APIKEY: ${DATASHIELD_CONNECT_SECRET}
DISCOVERY_URL: "./map/central.json"
LOCAL_TARGETS_FILE: "./map/local.json"
NO_AUTH: "true"
secrets:
- opal-cert.pem
depends_on:
- beam-proxy
volumes:
- /tmp/bridgehead/opal-map/:/map/:ro
networks:
- default
- rstudio
traefik:
labels:
- "traefik.http.middlewares.oidcAuth.forwardAuth.address=http://oauth2-proxy:4180/"
- "traefik.http.middlewares.oidcAuth.forwardAuth.trustForwardHeader=true"
- "traefik.http.middlewares.oidcAuth.forwardAuth.authResponseHeaders=X-Auth-Request-Access-Token,Authorization"
networks:
- default
- rstudio
forward_proxy:
networks:
- default
- rstudio
beam-proxy:
environment:
APP_datashield-connect_KEY: ${DATASHIELD_CONNECT_SECRET}
APP_token-manager_KEY: ${TOKEN_MANAGER_SECRET}
# TODO: Allow users of group /DataSHIELD and OIDC_USER_GROUP at the same time:
# Maybe a solution would be (https://oauth2-proxy.github.io/oauth2-proxy/configuration/oauth_provider):
# --allowed-groups=/DataSHIELD,OIDC_USER_GROUP
oauth2-proxy:
image: docker.verbis.dkfz.de/cache/oauth2-proxy/oauth2-proxy:latest
container_name: bridgehead-oauth2proxy
command: >-
--allowed-group=DataSHIELD
--oidc-groups-claim=${OIDC_GROUP_CLAIM}
--auth-logging=true
--whitelist-domain=${HOST}
--http-address="0.0.0.0:4180"
--reverse-proxy=true
--upstream="static://202"
--email-domain="*"
--cookie-name="_BRIDGEHEAD_oauth2"
--cookie-secret="${OAUTH2_PROXY_SECRET}"
--cookie-expire="12h"
--cookie-secure="true"
--cookie-httponly="true"
#OIDC settings
--provider="keycloak-oidc"
--provider-display-name="VerbIS Login"
--client-id="${OIDC_PRIVATE_CLIENT_ID}"
--client-secret="${OIDC_CLIENT_SECRET}"
--redirect-url="https://${HOST}${OAUTH2_CALLBACK}"
--oidc-issuer-url="${OIDC_ISSUER_URL}"
--scope="openid email profile"
--code-challenge-method="S256"
--skip-provider-button=true
#X-Forwarded-Header settings - true/false depending on your needs
--pass-basic-auth=true
--pass-user-headers=false
--pass-access-token=false
labels:
- "traefik.enable=true"
- "traefik.http.routers.oauth2_proxy.rule=PathPrefix(`/oauth2`)"
- "traefik.http.services.oauth2_proxy.loadbalancer.server.port=4180"
- "traefik.http.routers.oauth2_proxy.tls=true"
environment:
http_proxy: "http://forward_proxy:3128"
https_proxy: "http://forward_proxy:3128"
depends_on:
forward_proxy:
condition: service_healthy
secrets:
opal-cert.pem:
file: /tmp/bridgehead/opal-cert.pem
opal-key.pem:
file: /tmp/bridgehead/opal-key.pem
networks:
rstudio:

View File

@ -1,157 +0,0 @@
<template id="opal-ccp" source-id="blaze-store" opal-project="ccp-demo" target-id="opal" >
<container csv-filename="Patient-${TIMESTAMP}.csv" opal-table="patient" opal-entity-type="Patient">
<attribute csv-column="patient-id" opal-value-type="text" primary-key="true" val-fhir-path="Patient.id.value" anonym="Pat" op="EXTRACT_RELATIVE_ID"/>
<attribute csv-column="dktk-id-global" opal-value-type="text" val-fhir-path="Patient.identifier.where(type.coding.code = 'Global').value.value"/>
<attribute csv-column="dktk-id-lokal" opal-value-type="text" val-fhir-path="Patient.identifier.where(type.coding.code = 'Lokal').value.value" />
<attribute csv-column="geburtsdatum" opal-value-type="date" val-fhir-path="Patient.birthDate.value"/>
<attribute csv-column="geschlecht" opal-value-type="text" val-fhir-path="Patient.gender.value" />
<attribute csv-column="datum_des_letztbekannten_vitalstatus" opal-value-type="date" val-fhir-path="Observation.where(code.coding.code = '75186-7').effective.value" join-fhir-path="/Observation.where(code.coding.code = '75186-7').subject.reference.value"/>
<attribute csv-column="vitalstatus" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '75186-7').value.coding.code.value" join-fhir-path="/Observation.where(code.coding.code = '75186-7').subject.reference.value"/>
<!--fehlt in ADT2FHIR--><attribute csv-column="tod_tumorbedingt" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '68343-3').value.coding.where(system = 'http://fhir.de/CodeSystem/bfarm/icd-10-gm').code.value" join-fhir-path="/Observation.where(code.coding.code = '68343-3').subject.reference.value"/>
<!--fehlt in ADT2FHIR--><attribute csv-column="todesursachen" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '68343-3').value.coding.where(system = 'http://dktk.dkfz.de/fhir/onco/core/CodeSystem/JNUCS').code.value" join-fhir-path="/Observation.where(code.coding.code = '68343-3').subject.reference.value"/>
</container>
<container csv-filename="Diagnosis-${TIMESTAMP}.csv" opal-table="diagnosis" opal-entity-type="Diagnosis">
<attribute csv-column="diagnosis-id" primary-key="true" opal-value-type="text" val-fhir-path="Condition.id.value" anonym="Dia" op="EXTRACT_RELATIVE_ID"/>
<attribute csv-column="patient-id" opal-value-type="text" val-fhir-path="Condition.subject.reference.value" anonym="Pat"/>
<attribute csv-column="primaerdiagnose" opal-value-type="text" val-fhir-path="Condition.code.coding.code.value"/>
<attribute csv-column="tumor_diagnosedatum" opal-value-type="date" val-fhir-path="Condition.onset.value"/>
<attribute csv-column="primaertumor_diagnosetext" opal-value-type="text" val-fhir-path="Condition.code.text.value"/>
<attribute csv-column="version_des_icd-10_katalogs" opal-value-type="integer" val-fhir-path="Condition.code.coding.version.value"/>
<attribute csv-column="lokalisation" opal-value-type="text" val-fhir-path="Condition.bodySite.coding.where(system = 'urn:oid:2.16.840.1.113883.6.43.1').code.value"/>
<attribute csv-column="icd-o_katalog_topographie_version" opal-value-type="text" val-fhir-path="Condition.bodySite.coding.where(system = 'urn:oid:2.16.840.1.113883.6.43.1').version.value"/>
<attribute csv-column="seitenlokalisation_nach_adt-gekid" opal-value-type="text" val-fhir-path="Condition.bodySite.coding.where(system = 'http://dktk.dkfz.de/fhir/onco/core/CodeSystem/SeitenlokalisationCS').code.value"/>
</container>
<container csv-filename="Progress-${TIMESTAMP}.csv" opal-table="progress" opal-entity-type="Progress">
<!--it would be better to generate a an ID, instead of extracting the ClinicalImpression id-->
<attribute csv-column="progress-id" primary-key="true" opal-value-type="text" val-fhir-path="ClinicalImpression.id.value" anonym="Pro" op="EXTRACT_RELATIVE_ID"/>
<attribute csv-column="diagnosis-id" opal-value-type="text" val-fhir-path="ClinicalImpression.problem.reference.value" anonym="Dia"/>
<attribute csv-column="patient-id" opal-value-type="text" val-fhir-path="ClinicalImpression.subject.reference.value" anonym="Pat" />
<attribute csv-column="untersuchungs-_befunddatum_im_verlauf" opal-value-type="date" val-fhir-path="ClinicalImpression.effective.value" />
<!-- just for evaluation: redundant to Untersuchungs-, Befunddatum im Verlauf-->
<attribute csv-column="datum_lokales_oder_regionaeres_rezidiv" opal-value-type="date" val-fhir-path="Observation.where(code.coding.code = 'LA4583-6').effective.value" join-fhir-path="ClinicalImpression.finding.itemReference.reference.value" />
<attribute csv-column="gesamtbeurteilung_tumorstatus" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21976-6').value.coding.code.value" join-fhir-path="ClinicalImpression.finding.itemReference.reference.value"/>
<attribute csv-column="lokales_oder_regionaeres_rezidiv" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = 'LA4583-6').value.coding.code.value" join-fhir-path="ClinicalImpression.finding.itemReference.reference.value"/>
<attribute csv-column="lymphknoten-rezidiv" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = 'LA4370-8').value.coding.code.value" join-fhir-path="ClinicalImpression.finding.itemReference.reference.value" />
<attribute csv-column="fernmetastasen" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = 'LA4226-2').value.coding.code.value" join-fhir-path="ClinicalImpression.finding.itemReference.reference.value" />
</container>
<container csv-filename="Histology-${TIMESTAMP}.csv" opal-table="histology" opal-entity-type="Histology" >
<attribute csv-column="histology-id" primary-key="true" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '59847-4').id" anonym="His" op="EXTRACT_RELATIVE_ID"/>
<attribute csv-column="diagnosis-id" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '59847-4').focus.reference.value" anonym="Dia"/>
<attribute csv-column="patient-id" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '59847-4').subject.reference.value" anonym="Pat" />
<attribute csv-column="histologie_datum" opal-value-type="date" val-fhir-path="Observation.where(code.coding.code = '59847-4').effective.value"/>
<attribute csv-column="icd-o_katalog_morphologie_version" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '59847-4').value.coding.version.value" />
<attribute csv-column="morphologie" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '59847-4').value.coding.code.value"/>
<attribute csv-column="morphologie-freitext" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '59847-4').value.text.value"/>
<attribute csv-column="grading" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '59542-1').value.coding.code.value" join-fhir-path="Observation.where(code.coding.code = '59847-4').hasMember.reference.value"/>
</container>
<container csv-filename="Metastasis-${TIMESTAMP}.csv" opal-table="metastasis" opal-entity-type="Metastasis" >
<attribute csv-column="metastasis-id" primary-key="true" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21907-1').id" anonym="Met" op="EXTRACT_RELATIVE_ID"/>
<attribute csv-column="diagnosis-id" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21907-1').focus.reference.value" anonym="Dia"/>
<attribute csv-column="patient-id" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21907-1').subject.reference.value" anonym="Pat" />
<attribute csv-column="datum_fernmetastasen" opal-value-type="date" val-fhir-path="Observation.where(code.coding.code = '21907-1').effective.value"/>
<attribute csv-column="fernmetastasen_vorhanden" opal-value-type="boolean" val-fhir-path="Observation.where(code.coding.code = '21907-1').value.coding.code.value"/>
<attribute csv-column="lokalisation_fernmetastasen" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21907-1').bodySite.coding.code.value"/>
</container>
<container csv-filename="TNM-${TIMESTAMP}.csv" opal-table="tnm" opal-entity-type="TNM">
<attribute csv-column="tnm-id" primary-key="true" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').id" anonym="TNM" op="EXTRACT_RELATIVE_ID"/>
<attribute csv-column="diagnosis-id" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').focus.reference.value" anonym="Dia"/>
<attribute csv-column="patient-id" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').subject.reference.value" anonym="Pat" />
<attribute csv-column="datum_der_tnm_dokumentation_datum_befund" opal-value-type="date" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').effective.value"/>
<attribute csv-column="uicc_stadium" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').value.coding.code.value"/>
<attribute csv-column="tnm-t" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').component.where(code.coding.code = '21905-5' or code.coding.code = '21899-0').value.coding.code.value"/>
<attribute csv-column="tnm-n" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').component.where(code.coding.code = '21906-3' or code.coding.code = '21900-6').value.coding.code.value"/>
<attribute csv-column="tnm-m" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').component.where(code.coding.code = '21907-1' or code.coding.code = '21901-4').value.coding.code.value"/>
<attribute csv-column="c_p_u_preefix_t" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').component.where(code.coding.code = '21905-5' or code.coding.code = '21899-0').extension('http://dktk.dkfz.de/fhir/StructureDefinition/onco-core-Extension-TNMcpuPraefix').value.coding.code.value"/>
<attribute csv-column="c_p_u_preefix_n" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').component.where(code.coding.code = '21906-3' or code.coding.code = '21900-6').extension('http://dktk.dkfz.de/fhir/StructureDefinition/onco-core-Extension-TNMcpuPraefix').value.coding.code.value"/>
<attribute csv-column="c_p_u_preefix_m" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').component.where(code.coding.code = '21907-1' or code.coding.code = '21901-4').extension('http://dktk.dkfz.de/fhir/StructureDefinition/onco-core-Extension-TNMcpuPraefix').value.coding.code.value"/>
<attribute csv-column="tnm-y-symbol" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').component.where(code.coding.code = '59479-6' or code.coding.code = '59479-6').value.coding.code.value"/>
<attribute csv-column="tnm-r-symbol" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').component.where(code.coding.code = '21983-2' or code.coding.code = '21983-2').value.coding.code.value"/>
<attribute csv-column="tnm-m-symbol" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').component.where(code.coding.code = '42030-7' or code.coding.code = '42030-7').value.coding.code.value"/>
<!--nur bei UICC, nicht in ADT2FHIR--><attribute csv-column="tnm-version" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').value.coding.version.value"/>
</container>
<container csv-filename="System-Therapy-${TIMESTAMP}.csv" opal-table="system-therapy" opal-entity-type="SystemTherapy">
<attribute csv-column="system-therapy-id" primary-key="true" opal-value-type="text" val-fhir-path="MedicationStatement.id" anonym="Sys" op="EXTRACT_RELATIVE_ID"/>
<attribute csv-column="diagnosis-id" opal-value-type="text" val-fhir-path="MedicationStatement.reasonReference.reference.value" anonym="Dia"/>
<attribute csv-column="patient-id" opal-value-type="text" val-fhir-path="MedicationStatement.subject.reference.value" anonym="Pat" />
<attribute csv-column="systemische_therapie_stellung_zu_operativer_therapie" opal-value-type="text" val-fhir-path="MedicationStatement.extension('http://dktk.dkfz.de/fhir/StructureDefinition/onco-core-Extension-StellungZurOp').value.coding.code.value"/>
<attribute csv-column="intention_chemotherapie" opal-value-type="text" val-fhir-path="MedicationStatement.extension('http://dktk.dkfz.de/fhir/StructureDefinition/onco-core-Extension-SYSTIntention').value.coding.code.value"/>
<attribute csv-column="therapieart" opal-value-type="text" val-fhir-path="MedicationStatement.category.coding.code.value"/>
<attribute csv-column="systemische_therapie_beginn" opal-value-type="date" val-fhir-path="MedicationStatement.effective.start.value"/>
<attribute csv-column="systemische_therapie_ende" opal-value-type="date" val-fhir-path="MedicationStatement.effective.end.value"/>
<attribute csv-column="systemische_therapie_protokoll" opal-value-type="text" val-fhir-path="MedicationStatement.extension('http://dktk.dkfz.de/fhir/StructureDefinition/onco-core-Extension-SystemischeTherapieProtokoll').value.text.value"/>
<attribute csv-column="systemische_therapie_substanzen" opal-value-type="text" val-fhir-path="MedicationStatement.medication.text.value"/>
<attribute csv-column="chemotherapie" opal-value-type="boolean" val-fhir-path="MedicationStatement.where(category.coding.code = 'CH').exists().value" />
<attribute csv-column="hormontherapie" opal-value-type="boolean" val-fhir-path="MedicationStatement.where(category.coding.code = 'HO').exists().value" />
<attribute csv-column="immuntherapie" opal-value-type="boolean" val-fhir-path="MedicationStatement.where(category.coding.code = 'IM').exists().value" />
<attribute csv-column="knochenmarktransplantation" opal-value-type="boolean" val-fhir-path="MedicationStatement.where(category.coding.code = 'KM').exists().value" />
<attribute csv-column="abwartende_strategie" opal-value-type="boolean" val-fhir-path="MedicationStatement.where(category.coding.code = 'WS').exists().value" />
</container>
<container csv-filename="Surgery-${TIMESTAMP}.csv" opal-table="surgery" opal-entity-type="Surgery">
<attribute csv-column="surgery-id" primary-key="true" opal-value-type="text" val-fhir-path="Procedure.where(category.coding.code = 'OP').id" anonym="Sur" op="EXTRACT_RELATIVE_ID"/>
<attribute csv-column="diagnosis-id" opal-value-type="text" val-fhir-path="Procedure.where(category.coding.code = 'OP').reasonReference.reference.value" anonym="Dia"/>
<attribute csv-column="patient-id" opal-value-type="text" val-fhir-path="Procedure.where(category.coding.code = 'OP').subject.reference.value" anonym="Pat" />
<attribute csv-column="ops-code" opal-value-type="text" val-fhir-path="Procedure.where(category.coding.code = 'OP').code.coding.code.value"/>
<attribute csv-column="datum_der_op" opal-value-type="date" val-fhir-path="Procedure.where(category.coding.code = 'OP').performed.value"/>
<attribute csv-column="intention_op" opal-value-type="text" val-fhir-path="Procedure.extension('http://dktk.dkfz.de/fhir/StructureDefinition/onco-core-Extension-OPIntention').value.coding.code.value"/>
<attribute csv-column="lokale_beurteilung_resttumor" opal-value-type="text" val-fhir-path="Procedure.where(category.coding.code = 'OP').outcome.coding.where(system = 'http://dktk.dkfz.de/fhir/onco/core/CodeSystem/LokaleBeurteilungResidualstatusCS').code.value" />
<attribute csv-column="gesamtbeurteilung_resttumor" opal-value-type="text" val-fhir-path="Procedure.where(category.coding.code = 'OP').outcome.coding.where(system = 'http://dktk.dkfz.de/fhir/onco/core/CodeSystem/GesamtbeurteilungResidualstatusCS').code.value" />
</container>
<container csv-filename="Radiation-Therapy-${TIMESTAMP}.csv" opal-table="radiation-therapy" opal-entity-type="RadiationTherapy">
<attribute csv-column="radiation-therapy-id" primary-key="true" opal-value-type="text" val-fhir-path="Procedure.where(category.coding.code = 'ST').id" anonym="Rad" op="EXTRACT_RELATIVE_ID"/>
<attribute csv-column="diagnosis-id" opal-value-type="text" val-fhir-path="Procedure.where(category.coding.code = 'ST').reasonReference.reference.value" anonym="Dia"/>
<attribute csv-column="patient-id" opal-value-type="text" val-fhir-path="Procedure.where(category.coding.code = 'ST').subject.reference.value" anonym="Pat" />
<attribute csv-column="strahlentherapie_stellung_zu_operativer_therapie" opal-value-type="text" val-fhir-path="Procedure.extension('http://dktk.dkfz.de/fhir/StructureDefinition/onco-core-Extension-StellungZurOp').value.coding.code.value"/>
<attribute csv-column="intention_strahlentherapie" opal-value-type="text" val-fhir-path="Procedure.extension('http://dktk.dkfz.de/fhir/StructureDefinition/onco-core-Extension-SYSTIntention').value.coding.code.value" />
<attribute csv-column="strahlentherapie_beginn" opal-value-type="date" val-fhir-path="Procedure.where(category.coding.code = 'ST').performed.start.value"/>
<attribute csv-column="strahlentherapie_ende" opal-value-type="date" val-fhir-path="Procedure.where(category.coding.code = 'ST').performed.end.value"/>
</container>
<container csv-filename="Molecular-Marker-${TIMESTAMP}.csv" opal-table="molecular-marker" opal-entity-type="MolecularMarker">
<attribute csv-column="mol-marker-id" primary-key="true" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '69548-6').id" anonym="Mol" op="EXTRACT_RELATIVE_ID"/>
<attribute csv-column="diagnosis-id" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '69548-6').focus.reference.value" anonym="Dia" />
<attribute csv-column="patient-id" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '69548-6').subject.reference.value" anonym="Pat" />
<attribute csv-column="datum_der_datenerhebung" opal-value-type="date" val-fhir-path="Observation.where(code.coding.code = '69548-6').effective.value"/>
<attribute csv-column="marker" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '69548-6').component.value.coding.code.value"/>
<attribute csv-column="status_des_molekularen_markers" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '69548-6').value.coding.code.value" />
<attribute csv-column="zusaetzliche_alternative_dokumentation" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '69548-6').value.text.value"/>
</container>
<container csv-filename="Sample-${TIMESTAMP}.csv" opal-table="sample" opal-entity-type="Sample">
<attribute csv-column="sample-id" primary-key="true" opal-value-type="text" val-fhir-path="Specimen.id" anonym="Sam" op="EXTRACT_RELATIVE_ID"/>
<attribute csv-column="patient-id" opal-value-type="text" val-fhir-path="Specimen.subject.reference.value" anonym="Pat" />
<attribute csv-column="entnahmedatum" opal-value-type="date" val-fhir-path="Specimen.collection.collectedDateTime.value"/>
<attribute csv-column="probenart" opal-value-type="text" val-fhir-path="Specimen.type.coding.code.value"/>
<attribute csv-column="status" opal-value-type="text" val-fhir-path="Specimen.status.code.value"/>
<attribute csv-column="projekt" opal-value-type="text" val-fhir-path="Specimen.identifier.system.value"/>
<!-- @TODO: it is still necessary to clarify whether it would not be better to take the quantity of collection.quantity -->
<attribute csv-column="menge" opal-value-type="integer" val-fhir-path="Specimen.container.specimenQuantity.value.value"/>
<attribute csv-column="einheit" opal-value-type="text" val-fhir-path="Specimen.container.specimenQuantity.unit.value"/>
<attribute csv-column="aliquot" opal-value-type="text" val-fhir-path="Specimen.parent.reference.exists().value" />
</container>
<fhir-rev-include>Observation:patient</fhir-rev-include>
<fhir-rev-include>Condition:patient</fhir-rev-include>
<fhir-rev-include>ClinicalImpression:patient</fhir-rev-include>
<fhir-rev-include>MedicationStatement:patient</fhir-rev-include>
<fhir-rev-include>Procedure:patient</fhir-rev-include>
<fhir-rev-include>Specimen:patient</fhir-rev-include>
</template>

View File

@ -1,44 +0,0 @@
#!/bin/bash -e
if [ "$ENABLE_DATASHIELD" == true ]; then
# HACK: This only works because exporter-setup.sh and teiler-setup.sh are sourced after datashield-setup.sh
if [ -z "${ENABLE_EXPORTER}" ] || [ "${ENABLE_EXPORTER}" != "true" ]; then
log WARN "The ENABLE_EXPORTER variable is either not set or not set to 'true'."
fi
OAUTH2_CALLBACK=/oauth2/callback
OAUTH2_PROXY_SECRET="$(echo \"This is a salt string to generate one consistent encryption key for the oauth2_proxy. It is not required to be secret.\" | sha1sum | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 32)"
add_private_oidc_redirect_url "${OAUTH2_CALLBACK}"
log INFO "DataSHIELD setup detected -- will start DataSHIELD services."
OVERRIDE+=" -f ./$PROJECT/modules/datashield-compose.yml"
EXPORTER_OPAL_PASSWORD="$(generate_password \"exporter in Opal\")"
TOKEN_MANAGER_OPAL_PASSWORD="$(generate_password \"Token Manager in Opal\")"
OPAL_DB_PASSWORD="$(echo \"Opal DB\" | generate_simple_password)"
OPAL_ADMIN_PASSWORD="$(generate_password \"admin password for Opal\")"
RSTUDIO_ADMIN_PASSWORD="$(generate_password \"admin password for R-Studio\")"
DATASHIELD_CONNECT_SECRET="$(echo \"DataShield Connect\" | generate_simple_password)"
TOKEN_MANAGER_SECRET="$(echo \"Token Manager\" | generate_simple_password)"
if [ ! -e /tmp/bridgehead/opal-cert.pem ]; then
mkdir -p /tmp/bridgehead/
openssl req -x509 -newkey rsa:4096 -nodes -keyout /tmp/bridgehead/opal-key.pem -out /tmp/bridgehead/opal-cert.pem -days 3650 -subj "/CN=opal/C=DE"
fi
mkdir -p /tmp/bridgehead/opal-map
sites="$(cat ./$PROJECT/modules/datashield-sites.json)"
echo "$sites" | docker_jq -n --args '{"sites": input | map({
"name": .,
"id": .,
"virtualhost": "\(.):443",
"beamconnect": "datashield-connect.\(.).'"$BROKER_ID"'"
})}' $sites >/tmp/bridgehead/opal-map/central.json
echo "$sites" | docker_jq -n --args '[{
"external": "'"$SITE_ID"':443",
"internal": "opal:8443",
"allowed": input | map("\(.).'"$BROKER_ID"'")
}]' >/tmp/bridgehead/opal-map/local.json
if [ "$USER" == "root" ]; then
chown -R bridgehead:docker /tmp/bridgehead
chmod g+wr /tmp/bridgehead/opal-map/*
chmod g+r /tmp/bridgehead/opal-key.pem
fi
add_private_oidc_redirect_url "/opal/*"
fi

View File

@ -1,15 +0,0 @@
[
"berlin",
"muenchen-lmu",
"dresden",
"freiburg",
"muenchen-tum",
"tuebingen",
"mainz",
"frankfurt",
"essen",
"dktk-datashield-test",
"dktk-test",
"mannheim",
"central-ds-orchestrator"
]

View File

@ -1,28 +0,0 @@
# DataSHIELD
This module constitutes the infrastructure to run DataSHIELD within the bridgehead.
For more information about DataSHIELD, please visit https://www.datashield.org/
## R-Studio
To connect to the different bridgeheads of the CCP through DataSHIELD, you can use your own R-Studio environment.
However, this R-Studio has already installed the DataSHIELD libraries and is integrated within the bridgehead.
This can save you some time for extra configuration of your R-Studio environment.
## Opal
This is the core of DataSHIELD. It is made up of Opal, a Postgres database and an R-server.
For more information about Opal, please visit https://opaldoc.obiba.org
### Opal
Opal is OBiBas core database application for biobanks.
### Opal-DB
Opal requires a database to import the data for DataSHIELD. We use a Postgres instance as database.
The data is imported within the bridgehead through the exporter.
### Opal-R-Server
R-Server to execute R scripts in DataSHIELD.
## Beam
### Beam-Connect
Beam-Connect is used to route http(s) traffic through beam to enable R-Studio to access data from other bridgeheads that have datashield enabled.
### Beam-Proxy
The usual beam proxy used for communication.

View File

@ -1,4 +1,4 @@
#!/bin/bash -e
#!/bin/bash
if [ -n "${ENABLE_DNPM}" ]; then
log INFO "DNPM setup detected (Beam.Connect) -- will start Beam.Connect for DNPM."

View File

@ -1,6 +0,0 @@
# Full Excel Export
curl --location --request POST 'https://${HOST}/ccp-exporter/request?query=Patient&query-format=FHIR_PATH&template-id=ccp&output-format=EXCEL' \
--header 'x-api-key: ${EXPORT_API_KEY}'
# QB
curl --location --request POST 'https://${HOST}/ccp-reporter/generate?template-id=ccp'

View File

@ -1,72 +0,0 @@
version: "3.7"
services:
exporter:
image: docker.verbis.dkfz.de/ccp/dktk-exporter:latest
container_name: bridgehead-ccp-exporter
environment:
JAVA_OPTS: "-Xms1G -Xmx8G -XX:+UseG1GC"
LOG_LEVEL: "INFO"
EXPORTER_API_KEY: "${EXPORTER_API_KEY}" # Set in exporter-setup.sh
CROSS_ORIGINS: "https://${HOST}"
EXPORTER_DB_USER: "exporter"
EXPORTER_DB_PASSWORD: "${EXPORTER_DB_PASSWORD}" # Set in exporter-setup.sh
EXPORTER_DB_URL: "jdbc:postgresql://exporter-db:5432/exporter"
HTTP_RELATIVE_PATH: "/ccp-exporter"
SITE: "${SITE_ID}"
HTTP_SERVLET_REQUEST_SCHEME: "https"
OPAL_PASSWORD: "${EXPORTER_OPAL_PASSWORD}"
labels:
- "traefik.enable=true"
- "traefik.http.routers.exporter_ccp.rule=PathPrefix(`/ccp-exporter`)"
- "traefik.http.services.exporter_ccp.loadbalancer.server.port=8092"
- "traefik.http.routers.exporter_ccp.tls=true"
- "traefik.http.middlewares.exporter_ccp_strip.stripprefix.prefixes=/ccp-exporter"
- "traefik.http.routers.exporter_ccp.middlewares=exporter_ccp_strip"
volumes:
- "/var/cache/bridgehead/ccp/exporter-files:/app/exporter-files/output"
exporter-db:
image: docker.verbis.dkfz.de/cache/postgres:${POSTGRES_TAG}
container_name: bridgehead-ccp-exporter-db
environment:
POSTGRES_USER: "exporter"
POSTGRES_PASSWORD: "${EXPORTER_DB_PASSWORD}" # Set in exporter-setup.sh
POSTGRES_DB: "exporter"
volumes:
# Consider removing this volume once we find a solution to save Lens-queries to be executed in the explorer.
- "/var/cache/bridgehead/ccp/exporter-db:/var/lib/postgresql/data"
reporter:
image: docker.verbis.dkfz.de/ccp/dktk-reporter:latest
container_name: bridgehead-ccp-reporter
environment:
JAVA_OPTS: "-Xms1G -Xmx8G -XX:+UseG1GC"
LOG_LEVEL: "INFO"
CROSS_ORIGINS: "https://${HOST}"
HTTP_RELATIVE_PATH: "/ccp-reporter"
SITE: "${SITE_ID}"
EXPORTER_API_KEY: "${EXPORTER_API_KEY}" # Set in exporter-setup.sh
EXPORTER_URL: "http://exporter:8092"
LOG_FHIR_VALIDATION: "false"
HTTP_SERVLET_REQUEST_SCHEME: "https"
# In this initial development state of the bridgehead, we are trying to have so many volumes as possible.
# However, in the first executions in the CCP sites, this volume seems to be very important. A report is
# a process that can take several hours, because it depends on the exporter.
# There is a risk that the bridgehead restarts, losing the already created export.
volumes:
- "/var/cache/bridgehead/ccp/reporter-files:/app/reports"
labels:
- "traefik.enable=true"
- "traefik.http.routers.reporter_ccp.rule=PathPrefix(`/ccp-reporter`)"
- "traefik.http.services.reporter_ccp.loadbalancer.server.port=8095"
- "traefik.http.routers.reporter_ccp.tls=true"
- "traefik.http.middlewares.reporter_ccp_strip.stripprefix.prefixes=/ccp-reporter"
- "traefik.http.routers.reporter_ccp.middlewares=reporter_ccp_strip"
focus:
environment:
EXPORTER_URL: "http://exporter:8092"
AUTH_HEADER: "${EXPORTER_API_KEY}"

View File

@ -1,8 +0,0 @@
#!/bin/bash -e
if [ "$ENABLE_EXPORTER" == true ]; then
log INFO "Exporter setup detected -- will start Exporter service."
OVERRIDE+=" -f ./$PROJECT/modules/exporter-compose.yml"
EXPORTER_DB_PASSWORD="$(echo \"This is a salt string to generate one consistent password for the exporter. It is not required to be secret.\" | sha1sum | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 30)"
EXPORTER_API_KEY="$(echo \"This is a salt string to generate one consistent API KEY for the exporter. It is not required to be secret.\" | sha1sum | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 64)"
fi

View File

@ -1,15 +0,0 @@
# Exporter and Reporter
## Exporter
The exporter is a REST API that exports the data of the different databases of the bridgehead in a set of tables.
It can accept different output formats as CSV, Excel, JSON or XML. It can also export data into Opal.
## Exporter-DB
It is a database to save queries for its execution in the exporter.
The exporter manages also the different executions of the same query in through the database.
## Reporter
This component is a plugin of the exporter that allows to create more complex Excel reports described in templates.
It is compatible with different template engines as Groovy, Thymeleaf,...
It is perfect to generate a document as our traditional CCP quality report.

View File

@ -1,29 +0,0 @@
version: "3.7"
services:
fhir2sql:
depends_on:
- "dashboard-db"
- "blaze"
image: docker.verbis.dkfz.de/cache/samply/fhir2sql:latest
container_name: bridgehead-ccp-dashboard-fhir2sql
environment:
BLAZE_BASE_URL: "http://bridgehead-ccp-blaze:8080"
PG_HOST: "dashboard-db"
PG_USERNAME: "dashboard"
PG_PASSWORD: "${DASHBOARD_DB_PASSWORD}" # Set in dashboard-setup.sh
PG_DBNAME: "dashboard"
dashboard-db:
image: docker.verbis.dkfz.de/cache/postgres:${POSTGRES_TAG}
container_name: bridgehead-ccp-dashboard-db
environment:
POSTGRES_USER: "dashboard"
POSTGRES_PASSWORD: "${DASHBOARD_DB_PASSWORD}" # Set in dashboard-setup.sh
POSTGRES_DB: "dashboard"
volumes:
- "/var/cache/bridgehead/ccp/dashboard-db:/var/lib/postgresql/data"
focus:
environment:
POSTGRES_CONNECTION_STRING: "postgresql://dashboard:${DASHBOARD_DB_PASSWORD}@dashboard-db/dashboard"

View File

@ -1,8 +0,0 @@
#!/bin/bash -e
if [ "$ENABLE_FHIR2SQL" == true ]; then
log INFO "Dashboard setup detected -- will start Dashboard backend and FHIR2SQL service."
OVERRIDE+=" -f ./$PROJECT/modules/fhir2sql-compose.yml"
DASHBOARD_DB_PASSWORD="$(generate_simple_password 'fhir2sql')"
FOCUS_ENDPOINT_TYPE="blaze-and-sql"
fi

View File

@ -1,36 +0,0 @@
# fhir2sql
fhir2sql connects to Blaze, retrieves data, and syncs it with a PostgreSQL database. The application is designed to run continuously, syncing data at regular intervals.
The Dashboard module is a optional component of the Bridgehead CCP setup. When enabled, it starts two Docker services: **fhir2sql** and **dashboard-db**. Data held in PostgreSQL is only stored temporarily and Blaze is considered to be the 'leading system' or 'source of truth'.
## Services
### fhir2sql
* Image: docker.verbis.dkfz.de/cache/samply/fhir2sql:latest
* Container name: bridgehead-ccp-dashboard-fhir2sql
* Depends on: dashboard-db
* Environment variables:
- BLAZE_BASE_URL: The base URL of the Blaze FHIR server (set to http://blaze:8080/fhir/)
- PG_HOST: The hostname of the PostgreSQL database (set to dashboard-db)
- PG_USERNAME: The username for the PostgreSQL database (set to dashboard)
- PG_PASSWORD: The password for the PostgreSQL database (set to the value of DASHBOARD_DB_PASSWORD)
- PG_DBNAME: The name of the PostgreSQL database (set to dashboard)
### dashboard-db
* Image: docker.verbis.dkfz.de/cache/postgres:${POSTGRES_TAG}
* Container name: bridgehead-ccp-dashboard-db
* Environment variables:
- POSTGRES_USER: The username for the PostgreSQL database (set to dashboard)
- POSTGRES_PASSWORD: The password for the PostgreSQL database (set to the value of DASHBOARD_DB_PASSWORD)
- POSTGRES_DB: The name of the PostgreSQL database (set to dashboard)
* Volumes:
- /var/cache/bridgehead/ccp/dashboard-db:/var/lib/postgresql/data
The volume used by dashboard-db can be removed safely and should be restored to a working order by re-importing data from Blaze.
### Environment Variables
* DASHBOARD_DB_PASSWORD: A generated password for the PostgreSQL database, created using a salt string and the SHA1 hash function.
* POSTGRES_TAG: The tag of the PostgreSQL image to use (not set in this module, but required by the dashboard-db service).
### Setup
To enable the Dashboard module, set the ENABLE_FHIR2SQL environment variable to true. The dashboard-setup.sh script will then start the fhir2sql and dashboard-db services, using the environment variables and volumes defined above.

View File

@ -1,5 +1,4 @@
version: "3.7"
services:
id-manager:
image: docker.verbis.dkfz.de/bridgehead/magicpl
@ -14,30 +13,21 @@ services:
MAGICPL_CONNECTOR_APIKEY: ${IDMANAGER_READ_APIKEY}
MAGICPL_CENTRAL_PATIENTLIST_APIKEY: ${IDMANAGER_CENTRAL_PATIENTLIST_APIKEY}
MAGICPL_CONTROLNUMBERGENERATOR_APIKEY: ${IDMANAGER_CONTROLNUMBERGENERATOR_APIKEY}
MAGICPL_OIDC_CLIENT_ID: ${IDMANAGER_AUTH_CLIENT_ID}
MAGICPL_OIDC_CLIENT_SECRET: ${IDMANAGER_AUTH_CLIENT_SECRET}
depends_on:
- patientlist
- traefik-forward-auth
labels:
- "traefik.enable=true"
# Router with Authentication
- "traefik.http.routers.id-manager.rule=PathPrefix(`/id-manager`)"
- "traefik.http.services.id-manager.loadbalancer.server.port=8080"
- "traefik.http.routers.id-manager.tls=true"
- "traefik.http.routers.id-manager.middlewares=traefik-forward-auth-idm"
- "traefik.http.routers.id-manager.service=id-manager-service"
# Router without Authentication
- "traefik.http.routers.id-manager-compatibility.rule=PathPrefix(`/id-manager/paths/translator/getIds`)"
- "traefik.http.routers.id-manager-compatibility.tls=true"
- "traefik.http.routers.id-manager-compatibility.service=id-manager-service"
# Definition of Service
- "traefik.http.services.id-manager-service.loadbalancer.server.port=8080"
- "traefik.http.services.id-manager-service.loadbalancer.server.scheme=http"
patientlist:
image: docker.verbis.dkfz.de/bridgehead/mainzelliste
container_name: bridgehead-patientlist
environment:
- TOMCAT_REVERSEPROXY_FQDN=${HOST}
- TOMCAT_REVERSEPROXY_SSL=true
- ML_SITE=${IDMANAGEMENT_FRIENDLY_ID}
- ML_DB_PASS=${PATIENTLIST_POSTGRES_PASSWORD}
- ML_API_KEY=${IDMANAGER_LOCAL_PATIENTLIST_APIKEY}
@ -53,7 +43,7 @@ services:
- patientlist-db
patientlist-db:
image: docker.verbis.dkfz.de/cache/postgres:${POSTGRES_TAG}
image: docker.verbis.dkfz.de/cache/postgres:15.6-alpine
container_name: bridgehead-patientlist-db
environment:
POSTGRES_USER: "mainzelliste"
@ -64,49 +54,5 @@ services:
# NOTE: Add backups here. This is only imported if /var/lib/bridgehead/data/patientlist/ is empty!!!
- "/tmp/bridgehead/patientlist/:/docker-entrypoint-initdb.d/"
traefik-forward-auth:
image: docker.verbis.dkfz.de/cache/oauth2-proxy/oauth2-proxy:latest
environment:
- http_proxy=http://forward_proxy:3128
- https_proxy=http://forward_proxy:3128
- OAUTH2_PROXY_PROVIDER=oidc
- OAUTH2_PROXY_SKIP_PROVIDER_BUTTON=true
- OAUTH2_PROXY_OIDC_ISSUER_URL=https://login.verbis.dkfz.de/realms/master
- OAUTH2_PROXY_CLIENT_ID=bridgehead-${SITE_ID}
- OAUTH2_PROXY_CLIENT_SECRET=${IDMANAGER_AUTH_CLIENT_SECRET}
- OAUTH2_PROXY_COOKIE_SECRET=${IDMANAGER_AUTH_COOKIE_SECRET}
- OAUTH2_PROXY_COOKIE_NAME=_BRIDGEHEAD_oauth2_idm
- OAUTH2_PROXY_COOKIE_DOMAINS=.${HOST}
- OAUTH2_PROXY_HTTP_ADDRESS=:4180
- OAUTH2_PROXY_REVERSE_PROXY=true
- OAUTH2_PROXY_WHITELIST_DOMAINS=.${HOST}
- OAUTH2_PROXY_UPSTREAMS=static://202
- OAUTH2_PROXY_EMAIL_DOMAINS=*
- OAUTH2_PROXY_SCOPE=openid profile email
# Pass Authorization Header and some user information to backend services
- OAUTH2_PROXY_SET_AUTHORIZATION_HEADER=true
- OAUTH2_PROXY_SET_XAUTHREQUEST=true
# Keycloak has an expiration time of 60s therefore oauth2-proxy needs to refresh after that
- OAUTH2_PROXY_COOKIE_REFRESH=60s
- OAUTH2_PROXY_ALLOWED_GROUPS=DKTK-CCP-PPSN
- OAUTH2_PROXY_PROXY_PREFIX=/oauth2-idm
labels:
- "traefik.enable=true"
- "traefik.http.services.traefik-forward-auth.loadbalancer.server.port=4180"
- "traefik.http.routers.traefik-forward-auth.rule=Host(`${HOST}`) && PathPrefix(`/oauth2-idm`)"
- "traefik.http.routers.traefik-forward-auth.tls=true"
- "traefik.http.middlewares.traefik-forward-auth-idm.forwardauth.address=http://traefik-forward-auth:4180"
- "traefik.http.middlewares.traefik-forward-auth-idm.forwardauth.authResponseHeaders=Authorization"
depends_on:
forward_proxy:
condition: service_healthy
ccp-patient-project-identificator:
image: samply/ccp-patient-project-identificator
container_name: bridgehead-ccp-patient-project-identificator
environment:
MAINZELLISTE_APIKEY: ${IDMANAGER_LOCAL_PATIENTLIST_APIKEY}
SITE_NAME: ${SITE_NAME}
volumes:
patientlist-db-data:

View File

@ -1,9 +1,9 @@
#!/bin/bash -e
#!/bin/bash
function idManagementSetup() {
if [ -n "$IDMANAGER_UPLOAD_APIKEY" ]; then
log INFO "id-management setup detected -- will start id-management (mainzelliste & magicpl)."
OVERRIDE+=" -f ./ccp/modules/id-management-compose.yml"
OVERRIDE+=" -f ./$PROJECT/modules/id-management-compose.yml"
# Auto Generate local Passwords
PATIENTLIST_POSTGRES_PASSWORD="$(echo \"id-management-module-db-password-salt\" | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 30)"

View File

@ -2,7 +2,7 @@ version: "3.7"
services:
mtba:
image: docker.verbis.dkfz.de/cache/samply/mtba:develop
image: docker.verbis.dkfz.de/cache/samply/mtba:1.0.0
container_name: bridgehead-mtba
environment:
BLAZE_STORE_URL: http://blaze:8080
@ -11,30 +11,22 @@ services:
ID_MANAGER_API_KEY: ${IDMANAGER_UPLOAD_APIKEY}
ID_MANAGER_PSEUDONYM_ID_TYPE: BK_${IDMANAGEMENT_FRIENDLY_ID}_L-ID
ID_MANAGER_URL: http://id-manager:8080/id-manager
PATIENT_CSV_FIRST_NAME_HEADER: ${MTBA_PATIENT_CSV_FIRST_NAME_HEADER:-FIRST_NAME}
PATIENT_CSV_LAST_NAME_HEADER: ${MTBA_PATIENT_CSV_LAST_NAME_HEADER:-LAST_NAME}
PATIENT_CSV_GENDER_HEADER: ${MTBA_PATIENT_CSV_GENDER_HEADER:-GENDER}
PATIENT_CSV_BIRTHDAY_HEADER: ${MTBA_PATIENT_CSV_BIRTHDAY_HEADER:-BIRTHDAY}
PATIENT_CSV_FIRST_NAME_HEADER: ${MTBA_PATIENT_CSV_FIRST_NAME_HEADER}
PATIENT_CSV_LAST_NAME_HEADER: ${MTBA_PATIENT_CSV_LAST_NAME_HEADER}
PATIENT_CSV_GENDER_HEADER: ${MTBA_PATIENT_CSV_GENDER_HEADER}
PATIENT_CSV_BIRTHDAY_HEADER: ${MTBA_PATIENT_CSV_BIRTHDAY_HEADER}
CBIOPORTAL_URL: http://cbioportal:8080
FILE_CHARSET: ${MTBA_FILE_CHARSET:-UTF-8}
FILE_END_OF_LINE: ${MTBA_FILE_END_OF_LINE:-LF}
CSV_DELIMITER: ${MTBA_CSV_DELIMITER:-TAB}
HTTP_RELATIVE_PATH: "/mtba"
OIDC_ADMIN_GROUP: "${OIDC_ADMIN_GROUP}"
OIDC_CLIENT_ID: "${OIDC_PRIVATE_CLIENT_ID}"
OIDC_CLIENT_SECRET: "${OIDC_CLIENT_SECRET}"
OIDC_REALM: "${OIDC_REALM}"
OIDC_URL: "${OIDC_URL}"
FILE_CHARSET: ${MTBA_FILE_CHARSET}
FILE_END_OF_LINE: ${MTBA_FILE_END_OF_LINE}
CSV_DELIMITER: ${MTBA_CSV_DELIMITER}
labels:
- "traefik.enable=true"
- "traefik.http.routers.mtba_ccp.rule=PathPrefix(`/mtba`)"
- "traefik.http.services.mtba_ccp.loadbalancer.server.port=8480"
- "traefik.http.routers.mtba_ccp.tls=true"
- "traefik.http.routers.mtba.rule=PathPrefix(`/`)"
- "traefik.http.services.mtba.loadbalancer.server.port=80"
- "traefik.http.routers.mtba.tls=true"
volumes:
- /var/cache/bridgehead/ccp/mtba/input:/app/input
- /var/cache/bridgehead/ccp/mtba/persist:/app/persist
- /tmp/bridgehead/mtba/input:/app/input
- /tmp/bridgehead/mtba/persist:/app/persist
# TODO: Include CBioPortal in Deployment ...
# NOTE: CBioPortal can't load data while the system is running. So after import of data bridgehead needs to be restarted!

View File

@ -1,12 +1,12 @@
#!/bin/bash -e
#!/bin/bash
function mtbaSetup() {
if [ -n "$ENABLE_MTBA" ];then
log INFO "MTBA setup detected -- will start MTBA Service and CBioPortal."
if [ ! -n "$IDMANAGER_UPLOAD_APIKEY" ]; then
log ERROR "Missing ID-Management Module! Fix this by setting up ID Management:"
exit 1;
fi
OVERRIDE+=" -f ./$PROJECT/modules/mtba-compose.yml"
add_private_oidc_redirect_url "/mtba/*"
fi
}
}

View File

@ -1,6 +0,0 @@
# Molecular Tumor Board Alliance (MTBA)
In this module, the genetic data to import is stored in a directory (/tmp/bridgehead/mtba/input). A process checks
regularly if there are files in the directory. The files are pseudonomized when the IDAT is provided. The files are
combined with clinical data of the blaze and imported in cBioPortal. On the other hand, this files are also imported in
Blaze.

View File

@ -1,5 +1,4 @@
version: "3.7"
volumes:
nngm-rest:
@ -22,6 +21,9 @@ services:
- "traefik.http.routers.connector.middlewares=connector_strip,auth-nngm"
volumes:
- nngm-rest:/var/log
traefik:
labels:
- "traefik.http.middlewares.auth-nngm.basicauth.users=${NNGM_AUTH}"

View File

@ -1,4 +1,4 @@
#!/bin/bash -e
#!/bin/bash
if [ -n "$NNGM_CTS_APIKEY" ]; then
log INFO "nNGM setup detected -- will start nNGM Connector."

View File

@ -1,20 +0,0 @@
version: "3.7"
services:
obds2fhir-rest:
container_name: bridgehead-obds2fhir-rest
image: docker.verbis.dkfz.de/ccp/obds2fhir-rest:main
environment:
IDTYPE: BK_${IDMANAGEMENT_FRIENDLY_ID}_L-ID
MAINZELLISTE_APIKEY: ${IDMANAGER_LOCAL_PATIENTLIST_APIKEY}
SALT: ${LOCAL_SALT}
KEEP_INTERNAL_ID: ${KEEP_INTERNAL_ID:-false}
MAINZELLISTE_URL: ${PATIENTLIST_URL:-http://patientlist:8080/patientlist}
restart: always
labels:
- "traefik.enable=true"
- "traefik.http.routers.obds2fhir-rest.rule=PathPrefix(`/obds2fhir-rest`) || PathPrefix(`/adt2fhir-rest`)"
- "traefik.http.middlewares.obds2fhir-rest_strip.stripprefix.prefixes=/obds2fhir-rest,/adt2fhir-rest"
- "traefik.http.services.obds2fhir-rest.loadbalancer.server.port=8080"
- "traefik.http.routers.obds2fhir-rest.tls=true"
- "traefik.http.routers.obds2fhir-rest.middlewares=obds2fhir-rest_strip,auth"

View File

@ -1,13 +0,0 @@
#!/bin/bash
function obds2fhirRestSetup() {
if [ -n "$ENABLE_OBDS2FHIR_REST" ]; then
log INFO "oBDS2FHIR-REST setup detected -- will start obds2fhir-rest module."
if [ ! -n "$IDMANAGER_UPLOAD_APIKEY" ]; then
log ERROR "Missing ID-Management Module! Fix this by setting up ID Management:"
PATIENTLIST_URL=" "
fi
OVERRIDE+=" -f ./ccp/modules/obds2fhir-rest-compose.yml"
LOCAL_SALT="$(echo \"local-random-salt\" | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 30)"
fi
}

View File

@ -1,81 +0,0 @@
version: "3.7"
services:
teiler-orchestrator:
image: docker.verbis.dkfz.de/cache/samply/teiler-orchestrator:latest
container_name: bridgehead-teiler-orchestrator
labels:
- "traefik.enable=true"
- "traefik.http.routers.teiler_orchestrator_ccp.rule=PathPrefix(`/ccp-teiler`)"
- "traefik.http.services.teiler_orchestrator_ccp.loadbalancer.server.port=9000"
- "traefik.http.routers.teiler_orchestrator_ccp.tls=true"
- "traefik.http.middlewares.teiler_orchestrator_ccp_strip.stripprefix.prefixes=/ccp-teiler"
- "traefik.http.routers.teiler_orchestrator_ccp.middlewares=teiler_orchestrator_ccp_strip"
environment:
TEILER_BACKEND_URL: "https://${HOST}/ccp-teiler-backend"
TEILER_DASHBOARD_URL: "https://${HOST}/ccp-teiler-dashboard"
DEFAULT_LANGUAGE: "${TEILER_DEFAULT_LANGUAGE_LOWER_CASE}"
HTTP_RELATIVE_PATH: "/ccp-teiler"
teiler-dashboard:
image: docker.verbis.dkfz.de/cache/samply/teiler-dashboard:develop
container_name: bridgehead-teiler-dashboard
labels:
- "traefik.enable=true"
- "traefik.http.routers.teiler_dashboard_ccp.rule=PathPrefix(`/ccp-teiler-dashboard`)"
- "traefik.http.services.teiler_dashboard_ccp.loadbalancer.server.port=80"
- "traefik.http.routers.teiler_dashboard_ccp.tls=true"
- "traefik.http.middlewares.teiler_dashboard_ccp_strip.stripprefix.prefixes=/ccp-teiler-dashboard"
- "traefik.http.routers.teiler_dashboard_ccp.middlewares=teiler_dashboard_ccp_strip"
environment:
DEFAULT_LANGUAGE: "${TEILER_DEFAULT_LANGUAGE}"
TEILER_BACKEND_URL: "https://${HOST}/ccp-teiler-backend"
OIDC_URL: "${OIDC_URL}"
OIDC_REALM: "${OIDC_REALM}"
OIDC_CLIENT_ID: "${OIDC_PUBLIC_CLIENT_ID}"
OIDC_TOKEN_GROUP: "${OIDC_GROUP_CLAIM}"
TEILER_ADMIN_NAME: "${OPERATOR_FIRST_NAME} ${OPERATOR_LAST_NAME}"
TEILER_ADMIN_EMAIL: "${OPERATOR_EMAIL}"
TEILER_ADMIN_PHONE: "${OPERATOR_PHONE}"
TEILER_PROJECT: "${PROJECT}"
EXPORTER_API_KEY: "${EXPORTER_API_KEY}"
TEILER_ORCHESTRATOR_URL: "https://${HOST}/ccp-teiler"
TEILER_DASHBOARD_HTTP_RELATIVE_PATH: "/ccp-teiler-dashboard"
TEILER_ORCHESTRATOR_HTTP_RELATIVE_PATH: "/ccp-teiler"
TEILER_USER: "${OIDC_USER_GROUP}"
TEILER_ADMIN: "${OIDC_ADMIN_GROUP}"
REPORTER_DEFAULT_TEMPLATE_ID: "ccp-qb"
EXPORTER_DEFAULT_TEMPLATE_ID: "ccp"
teiler-backend:
image: docker.verbis.dkfz.de/ccp/dktk-teiler-backend:latest
container_name: bridgehead-teiler-backend
labels:
- "traefik.enable=true"
- "traefik.http.routers.teiler_backend_ccp.rule=PathPrefix(`/ccp-teiler-backend`)"
- "traefik.http.services.teiler_backend_ccp.loadbalancer.server.port=8085"
- "traefik.http.routers.teiler_backend_ccp.tls=true"
- "traefik.http.middlewares.teiler_backend_ccp_strip.stripprefix.prefixes=/ccp-teiler-backend"
- "traefik.http.routers.teiler_backend_ccp.middlewares=teiler_backend_ccp_strip"
environment:
LOG_LEVEL: "INFO"
APPLICATION_PORT: "8085"
APPLICATION_ADDRESS: "${HOST}"
DEFAULT_LANGUAGE: "${TEILER_DEFAULT_LANGUAGE}"
CONFIG_ENV_VAR_PATH: "/run/secrets/ccp.conf"
TEILER_ORCHESTRATOR_HTTP_RELATIVE_PATH: "/ccp-teiler"
TEILER_ORCHESTRATOR_URL: "https://${HOST}/ccp-teiler"
TEILER_DASHBOARD_DE_URL: "https://${HOST}/ccp-teiler-dashboard/de"
TEILER_DASHBOARD_EN_URL: "https://${HOST}/ccp-teiler-dashboard/en"
CENTRAX_URL: "${CENTRAXX_URL}"
HTTP_PROXY: "http://forward_proxy:3128"
ENABLE_MTBA: "${ENABLE_MTBA}"
ENABLE_DATASHIELD: "${ENABLE_DATASHIELD}"
secrets:
- ccp.conf
secrets:
ccp.conf:
file: /etc/bridgehead/ccp.conf

View File

@ -1,9 +0,0 @@
#!/bin/bash -e
if [ "$ENABLE_TEILER" == true ];then
log INFO "Teiler setup detected -- will start Teiler services."
OVERRIDE+=" -f ./$PROJECT/modules/teiler-compose.yml"
TEILER_DEFAULT_LANGUAGE=DE
TEILER_DEFAULT_LANGUAGE_LOWER_CASE=${TEILER_DEFAULT_LANGUAGE,,}
add_public_oidc_redirect_url "/ccp-teiler/*"
fi

View File

@ -1,19 +0,0 @@
# Teiler
This module orchestrates the different microfrontends of the bridgehead as a single page application.
## Teiler Orchestrator
Single SPA component that consists on the root HTML site of the single page application and a javascript code that
gets the information about the microfrontend calling the teiler backend and is responsible for registering them. With the
resulting mapping, it can initialize, mount and unmount the required microfrontends on the fly.
The microfrontends run independently in different containers and can be based on different frameworks (Angular, Vue, React,...)
This microfrontends can run as single alone but need an extension with Single-SPA (https://single-spa.js.org/docs/ecosystem).
There are also available three templates (Angular, Vue, React) to be directly extended to be used directly in the teiler.
## Teiler Dashboard
It consists on the main dashboard and a set of embedded services.
### Login
user and password in ccp.local.conf
## Teiler Backend
In this component, the microfrontends are configured.

View File

@ -1,2 +0,0 @@
bGlicmFyeSBSZXRyaWV2ZQp1c2luZyBGSElSIHZlcnNpb24gJzQuMC4wJwppbmNsdWRlIEZISVJIZWxwZXJzIHZlcnNpb24gJzQuMC4wJwoKY29kZXN5c3RlbSBsb2luYzogJ2h0dHA6Ly9sb2luYy5vcmcnCgpjb250ZXh0IFBhdGllbnQKCgpES1RLX1NUUkFUX0dFTkRFUl9TVFJBVElGSUVSCgpES1RLX1NUUkFUX1BSSU1BUllfRElBR05PU0lTX05PX1NPUlRfU1RSQVRJRklFUgpES1RLX1NUUkFUX0FHRV9DTEFTU19TVFJBVElGSUVSCgpES1RLX1NUUkFUX0RFQ0VBU0VEX1NUUkFUSUZJRVIKCkRLVEtfU1RSQVRfRElBR05PU0lTX1NUUkFUSUZJRVIKCkRLVEtfU1RSQVRfU1BFQ0lNRU5fU1RSQVRJRklFUgoKREtUS19TVFJBVF9QUk9DRURVUkVfU1RSQVRJRklFUgoKREtUS19TVFJBVF9NRURJQ0FUSU9OX1NUUkFUSUZJRVIKCiAgREtUS19TVFJBVF9ISVNUT0xPR1lfU1RSQVRJRklFUgpES1RLX1NUUkFUX0RFRl9JTl9JTklUSUFMX1BPUFVMQVRJT04KdHJ1ZQ==
bGlicmFyeSBSZXRyaWV2ZQp1c2luZyBGSElSIHZlcnNpb24gJzQuMC4wJwppbmNsdWRlIEZISVJIZWxwZXJzIHZlcnNpb24gJzQuMC4wJwoKY29kZXN5c3RlbSBsb2luYzogJ2h0dHA6Ly9sb2luYy5vcmcnCmNvZGVzeXN0ZW0gaWNkMTA6ICdodHRwOi8vZmhpci5kZS9Db2RlU3lzdGVtL2JmYXJtL2ljZC0xMC1nbScKY29kZXN5c3RlbSBtb3JwaDogJ3VybjpvaWQ6Mi4xNi44NDAuMS4xMTM4ODMuNi40My4xJwoKY29udGV4dCBQYXRpZW50CgoKREtUS19TVFJBVF9HRU5ERVJfU1RSQVRJRklFUgoKREtUS19TVFJBVF9QUklNQVJZX0RJQUdOT1NJU19OT19TT1JUX1NUUkFUSUZJRVIKREtUS19TVFJBVF9BR0VfQ0xBU1NfU1RSQVRJRklFUgoKREtUS19TVFJBVF9ERUNFQVNFRF9TVFJBVElGSUVSCgpES1RLX1NUUkFUX0RJQUdOT1NJU19TVFJBVElGSUVSCgpES1RLX1NUUkFUX1NQRUNJTUVOX1NUUkFUSUZJRVIKCkRLVEtfU1RSQVRfUFJPQ0VEVVJFX1NUUkFUSUZJRVIKCkRLVEtfU1RSQVRfTUVESUNBVElPTl9TVFJBVElGSUVSCgogIERLVEtfU1RSQVRfSElTVE9MT0dZX1NUUkFUSUZJRVIKREtUS19TVFJBVF9ERUZfSU5fSU5JVElBTF9QT1BVTEFUSU9OKGV4aXN0cyBbQ29uZGl0aW9uOiBDb2RlICdDNjEnIGZyb20gaWNkMTBdKSBhbmQgCigoZXhpc3RzIGZyb20gW09ic2VydmF0aW9uOiBDb2RlICc1OTg0Ny00JyBmcm9tIGxvaW5jXSBPCndoZXJlIE8udmFsdWUuY29kaW5nLmNvZGUgY29udGFpbnMgJzgxNDAvMycpIG9yIAooZXhpc3RzIGZyb20gW09ic2VydmF0aW9uOiBDb2RlICc1OTg0Ny00JyBmcm9tIGxvaW5jXSBPCndoZXJlIE8udmFsdWUuY29kaW5nLmNvZGUgY29udGFpbnMgJzgxNDcvMycpIG9yIAooZXhpc3RzIGZyb20gW09ic2VydmF0aW9uOiBDb2RlICc1OTg0Ny00JyBmcm9tIGxvaW5jXSBPCndoZXJlIE8udmFsdWUuY29kaW5nLmNvZGUgY29udGFpbnMgJzg0ODAvMycpIG9yIAooZXhpc3RzIGZyb20gW09ic2VydmF0aW9uOiBDb2RlICc1OTg0Ny00JyBmcm9tIGxvaW5jXSBPCndoZXJlIE8udmFsdWUuY29kaW5nLmNvZGUgY29udGFpbnMgJzg1MDAvMycpKQ==

View File

@ -2,23 +2,12 @@ BROKER_ID=broker.ccp-it.dktk.dkfz.de
BROKER_URL=https://${BROKER_ID}
PROXY_ID=${SITE_ID}.${BROKER_ID}
FOCUS_BEAM_SECRET_SHORT="$(cat /proc/sys/kernel/random/uuid | sed 's/[-]//g' | head -c 20)"
FOCUS_RETRY_COUNT=${FOCUS_RETRY_COUNT:-64}
FOCUS_RETRY_COUNT=32
SUPPORT_EMAIL=support-ccp@dkfz-heidelberg.de
PRIVATEKEYFILENAME=/etc/bridgehead/pki/${SITE_ID}.priv.pem
BROKER_URL_FOR_PREREQ=$BROKER_URL
OIDC_USER_GROUP="DKTK_CCP_$(capitalize_first_letter ${SITE_ID})"
OIDC_ADMIN_GROUP="DKTK_CCP_$(capitalize_first_letter ${SITE_ID})_Verwalter"
OIDC_PRIVATE_CLIENT_ID=${SITE_ID}-private
OIDC_PUBLIC_CLIENT_ID=${SITE_ID}-public
# Use "test-realm-01" for testing
OIDC_REALM="${OIDC_REALM:-master}"
OIDC_URL="https://login.verbis.dkfz.de"
OIDC_ISSUER_URL="${OIDC_URL}/realms/${OIDC_REALM}"
OIDC_GROUP_CLAIM="groups"
POSTGRES_TAG=15.6-alpine
for module in $PROJECT/modules/*.sh
do
@ -28,5 +17,4 @@ done
idManagementSetup
mtbaSetup
obds2fhirRestSetup
blazeSecondarySetup
adt2fhirRestSetup

View File

@ -1,66 +0,0 @@
version: "3.7"
services:
blaze:
image: docker.verbis.dkfz.de/cache/samply/blaze:0.28
container_name: bridgehead-dhki-blaze
environment:
BASE_URL: "http://bridgehead-dhki-blaze:8080"
JAVA_TOOL_OPTIONS: "-Xmx${BLAZE_MEMORY_CAP:-4096}m"
DB_RESOURCE_CACHE_SIZE: ${BLAZE_RESOURCE_CACHE_CAP:-2500000}
DB_BLOCK_CACHE_SIZE: $BLAZE_MEMORY_CAP
ENFORCE_REFERENTIAL_INTEGRITY: "false"
volumes:
- "blaze-data:/app/data"
labels:
- "traefik.enable=true"
- "traefik.http.routers.blaze_dhki.rule=PathPrefix(`/dhki-localdatamanagement`)"
- "traefik.http.middlewares.dhki_b_strip.stripprefix.prefixes=/dhki-localdatamanagement"
- "traefik.http.services.blaze_dhki.loadbalancer.server.port=8080"
- "traefik.http.routers.blaze_dhki.middlewares=dhki_b_strip,auth"
- "traefik.http.routers.blaze_dhki.tls=true"
focus:
image: docker.verbis.dkfz.de/cache/samply/focus:${FOCUS_TAG}
container_name: bridgehead-focus
environment:
API_KEY: ${FOCUS_BEAM_SECRET_SHORT}
BEAM_APP_ID_LONG: focus.${PROXY_ID}
PROXY_ID: ${PROXY_ID}
BLAZE_URL: "http://bridgehead-dhki-blaze:8080/fhir/"
BEAM_PROXY_URL: http://beam-proxy:8081
RETRY_COUNT: ${FOCUS_RETRY_COUNT}
EPSILON: 0.28
QUERIES_TO_CACHE: '/queries_to_cache.conf'
volumes:
- /srv/docker/bridgehead/dhki/queries_to_cache.conf:/queries_to_cache.conf
depends_on:
- "beam-proxy"
- "blaze"
beam-proxy:
image: docker.verbis.dkfz.de/cache/samply/beam-proxy:develop
container_name: bridgehead-beam-proxy
environment:
BROKER_URL: ${BROKER_URL}
PROXY_ID: ${PROXY_ID}
APP_focus_KEY: ${FOCUS_BEAM_SECRET_SHORT}
PRIVKEY_FILE: /run/secrets/proxy.pem
ALL_PROXY: http://forward_proxy:3128
TLS_CA_CERTIFICATES_DIR: /conf/trusted-ca-certs
ROOTCERT_FILE: /conf/root.crt.pem
secrets:
- proxy.pem
depends_on:
- "forward_proxy"
volumes:
- /etc/bridgehead/trusted-ca-certs:/conf/trusted-ca-certs:ro
- /srv/docker/bridgehead/dhki/root.crt.pem:/conf/root.crt.pem:ro
volumes:
blaze-data:
secrets:
proxy.pem:
file: /etc/bridgehead/pki/${SITE_ID}.priv.pem

View File

@ -1,2 +0,0 @@
bGlicmFyeSBSZXRyaWV2ZQp1c2luZyBGSElSIHZlcnNpb24gJzQuMC4wJwppbmNsdWRlIEZISVJIZWxwZXJzIHZlcnNpb24gJzQuMC4wJwpjb2Rlc3lzdGVtIFNhbXBsZU1hdGVyaWFsVHlwZTogJ2h0dHBzOi8vZmhpci5iYm1yaS5kZS9Db2RlU3lzdGVtL1NhbXBsZU1hdGVyaWFsVHlwZScKCmNvZGVzeXN0ZW0gbG9pbmM6ICdodHRwOi8vbG9pbmMub3JnJwoKY29udGV4dCBQYXRpZW50CgpES1RLX1NUUkFUX0dFTkRFUl9TVFJBVElGSUVSCgpES1RLX1NUUkFUX0FHRV9TVFJBVElGSUVSCgpES1RLX1NUUkFUX0RFQ0VBU0VEX1NUUkFUSUZJRVIKCkRLVEtfU1RSQVRfRElBR05PU0lTX1NUUkFUSUZJRVIKCkRIS0lfU1RSQVRfU1BFQ0lNRU5fU1RSQVRJRklFUgoKREtUS19TVFJBVF9QUk9DRURVUkVfU1RSQVRJRklFUgoKREhLSV9TVFJBVF9NRURJQ0FUSU9OX1NUUkFUSUZJRVIKCkRIS0lfU1RSQVRfRU5DT1VOVEVSX1NUUkFUSUZJRVIKREtUS19TVFJBVF9ERUZfSU5fSU5JVElBTF9QT1BVTEFUSU9OCnRydWU=
bGlicmFyeSBSZXRyaWV2ZQp1c2luZyBGSElSIHZlcnNpb24gJzQuMC4wJwppbmNsdWRlIEZISVJIZWxwZXJzIHZlcnNpb24gJzQuMC4wJwpjb2Rlc3lzdGVtIFNhbXBsZU1hdGVyaWFsVHlwZTogJ2h0dHBzOi8vZmhpci5iYm1yaS5kZS9Db2RlU3lzdGVtL1NhbXBsZU1hdGVyaWFsVHlwZScKCmNvZGVzeXN0ZW0gbG9pbmM6ICdodHRwOi8vbG9pbmMub3JnJwpjb2Rlc3lzdGVtIGljZDEwOiAnaHR0cDovL2ZoaXIuZGUvQ29kZVN5c3RlbS9iZmFybS9pY2QtMTAtZ20nCmNvZGVzeXN0ZW0gbW9ycGg6ICd1cm46b2lkOjIuMTYuODQwLjEuMTEzODgzLjYuNDMuMScKCmNvbnRleHQgUGF0aWVudAoKREtUS19TVFJBVF9HRU5ERVJfU1RSQVRJRklFUgoKREtUS19TVFJBVF9BR0VfU1RSQVRJRklFUgoKREtUS19TVFJBVF9ERUNFQVNFRF9TVFJBVElGSUVSCgpES1RLX1NUUkFUX0RJQUdOT1NJU19TVFJBVElGSUVSCgpESEtJX1NUUkFUX1NQRUNJTUVOX1NUUkFUSUZJRVIKCkRLVEtfU1RSQVRfUFJPQ0VEVVJFX1NUUkFUSUZJRVIKCkRIS0lfU1RSQVRfTUVESUNBVElPTl9TVFJBVElGSUVSCgpESEtJX1NUUkFUX0VOQ09VTlRFUl9TVFJBVElGSUVSCkRLVEtfU1RSQVRfREVGX0lOX0lOSVRJQUxfUE9QVUxBVElPTgooKChleGlzdHMgW0NvbmRpdGlvbjogQ29kZSAnQzM0LjknIGZyb20gaWNkMTBdKSBvcgooZXhpc3RzIFtDb25kaXRpb246IENvZGUgJ0MzNC44JyBmcm9tIGljZDEwXSkgb3IKKGV4aXN0cyBbQ29uZGl0aW9uOiBDb2RlICdDMzQuMCcgZnJvbSBpY2QxMF0pIG9yCihleGlzdHMgW0NvbmRpdGlvbjogQ29kZSAnQzM0LjInIGZyb20gaWNkMTBdKSBvcgooZXhpc3RzIFtDb25kaXRpb246IENvZGUgJ0MzNC4xJyBmcm9tIGljZDEwXSkgb3IKKGV4aXN0cyBbQ29uZGl0aW9uOiBDb2RlICdDMzQuMycgZnJvbSBpY2QxMF0pKSBhbmQKKChleGlzdHMgZnJvbSBbT2JzZXJ2YXRpb246IENvZGUgJzU5ODQ3LTQnIGZyb20gbG9pbmNdIE8Kd2hlcmUgTy52YWx1ZS5jb2RpbmcuY29kZSBjb250YWlucyAnODE0MC8zJykgb3IKKGV4aXN0cyBmcm9tIFtPYnNlcnZhdGlvbjogQ29kZSAnNTk4NDctNCcgZnJvbSBsb2luY10gTwp3aGVyZSBPLnZhbHVlLmNvZGluZy5jb2RlIGNvbnRhaW5zICc4MTQxLzMnKSBvcgooZXhpc3RzIGZyb20gW09ic2VydmF0aW9uOiBDb2RlICc1OTg0Ny00JyBmcm9tIGxvaW5jXSBPCndoZXJlIE8udmFsdWUuY29kaW5nLmNvZGUgY29udGFpbnMgJzgxNDMvMycpIG9yCihleGlzdHMgZnJvbSBbT2JzZXJ2YXRpb246IENvZGUgJzU5ODQ3LTQnIGZyb20gbG9pbmNdIE8Kd2hlcmUgTy52YWx1ZS5jb2RpbmcuY29kZSBjb250YWlucyAnODE0Ny8zJykgb3IKKGV4aXN0cyBmcm9tIFtPYnNlcnZhdGlvbjogQ29kZSAnNTk4NDctNCcgZnJvbSBsb2luY10gTwp3aGVyZSBPLnZhbHVlLmNvZGluZy5jb2RlIGNvbnRhaW5zICc4MjUwLzMnKSBvcgooZXhpc3RzIGZyb20gW09ic2VydmF0aW9uOiBDb2RlICc1OTg0Ny00JyBmcm9tIGxvaW5jXSBPCndoZXJlIE8udmFsdWUuY29kaW5nLmNvZGUgY29udGFpbnMgJzgyNTEvMycpIG9yCihleGlzdHMgZnJvbSBbT2JzZXJ2YXRpb246IENvZGUgJzU5ODQ3LTQnIGZyb20gbG9pbmNdIE8Kd2hlcmUgTy52YWx1ZS5jb2RpbmcuY29kZSBjb250YWlucyAnODI1Mi8zJykgb3IKKGV4aXN0cyBmcm9tIFtPYnNlcnZhdGlvbjogQ29kZSAnNTk4NDctNCcgZnJvbSBsb2luY10gTwp3aGVyZSBPLnZhbHVlLmNvZGluZy5jb2RlIGNvbnRhaW5zICc4MjUzLzMnKSBvcgooZXhpc3RzIGZyb20gW09ic2VydmF0aW9uOiBDb2RlICc1OTg0Ny00JyBmcm9tIGxvaW5jXSBPCndoZXJlIE8udmFsdWUuY29kaW5nLmNvZGUgY29udGFpbnMgJzgyNTUvMycpIG9yCihleGlzdHMgZnJvbSBbT2JzZXJ2YXRpb246IENvZGUgJzU5ODQ3LTQnIGZyb20gbG9pbmNdIE8Kd2hlcmUgTy52YWx1ZS5jb2RpbmcuY29kZSBjb250YWlucyAnODI2MC8zJykgb3IKKGV4aXN0cyBmcm9tIFtPYnNlcnZhdGlvbjogQ29kZSAnNTk4NDctNCcgZnJvbSBsb2luY10gTwp3aGVyZSBPLnZhbHVlLmNvZGluZy5jb2RlIGNvbnRhaW5zICc4MzEwLzMnKSBvcgooZXhpc3RzIGZyb20gW09ic2VydmF0aW9uOiBDb2RlICc1OTg0Ny00JyBmcm9tIGxvaW5jXSBPCndoZXJlIE8udmFsdWUuY29kaW5nLmNvZGUgY29udGFpbnMgJzgzMzMvMycpIG9yCihleGlzdHMgZnJvbSBbT2JzZXJ2YXRpb246IENvZGUgJzU5ODQ3LTQnIGZyb20gbG9pbmNdIE8Kd2hlcmUgTy52YWx1ZS5jb2RpbmcuY29kZSBjb250YWlucyAnODQ3MC8zJykgb3IKKGV4aXN0cyBmcm9tIFtPYnNlcnZhdGlvbjogQ29kZSAnNTk4NDctNCcgZnJvbSBsb2luY10gTwp3aGVyZSBPLnZhbHVlLmNvZGluZy5jb2RlIGNvbnRhaW5zICc4NDgwLzMnKSBvcgooZXhpc3RzIGZyb20gW09ic2VydmF0aW9uOiBDb2RlICc1OTg0Ny00JyBmcm9tIGxvaW5jXSBPCndoZXJlIE8udmFsdWUuY29kaW5nLmNvZGUgY29udGFpbnMgJzg0OTAvMycpIG9yCihleGlzdHMgZnJvbSBbT2JzZXJ2YXRpb246IENvZGUgJzU5ODQ3LTQnIGZyb20gbG9pbmNdIE8Kd2hlcmUgTy52YWx1ZS5jb2RpbmcuY29kZSBjb250YWlucyAnODU1MC8zJykgb3IKKGV4aXN0cyBmcm9tIFtPYnNlcnZhdGlvbjogQ29kZSAnNTk4NDctNCcgZnJvbSBsb2luY10gTwp3aGVyZSBPLnZhbHVlLmNvZGluZy5jb2RlIGNvbnRhaW5zICc4MDUyLzMnKSkp

View File

@ -1,20 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDNTCCAh2gAwIBAgIUSWUPebUMNfJvPKMjdgX+WiH+OXgwDQYJKoZIhvcNAQEL
BQAwFjEUMBIGA1UEAxMLQnJva2VyLVJvb3QwHhcNMjQwMTA1MDg1NTM4WhcNMzQw
MTAyMDg1NjA4WjAWMRQwEgYDVQQDEwtCcm9rZXItUm9vdDCCASIwDQYJKoZIhvcN
AQEBBQADggEPADCCAQoCggEBAL/nvo9Bn1/6Z/K4BKoLM6/mVziM4cmXTVx4npVz
pnptwPPFU4rz47akRZ6ZMD5MO0bsyvaxG1nwVrW3aAGC42JIGTdZHKwMKrd35sxw
k3YlGJagGUs+bKHUCL55OcSmyDWlh/UhA8+eeJWjOt9u0nYXv+vi+N4JSHA0oC9D
bTF1v+7blrTQagf7PTPSF3pe22iXOjJYdOkZMWoMoNAjn6F958fkLNLY3csOZwvP
/3eyNNawyAEPWeIm33Zk630NS8YHggz6WCqwXvuaKb6910mRP8jgauaYsqgsOyDt
pbWuvk//aZWdGeN9RNsAA8eGppygiwm/m9eRC6I0shDwv6ECAwEAAaN7MHkwDgYD
VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFn/dbW1J3ry
7TBzbKo3H4vJr2MiMB8GA1UdIwQYMBaAFFn/dbW1J3ry7TBzbKo3H4vJr2MiMBYG
A1UdEQQPMA2CC0Jyb2tlci1Sb290MA0GCSqGSIb3DQEBCwUAA4IBAQCa2V8B8aad
XNDS1EUIi9oMdvGvkolcdFwx9fI++qu9xSIaZs5GETHck3oYKZF0CFP5ESnKDn5w
enWgm5M0y+hVZppzB163WmET1efBXwrdyn8j4336NjX352h63JGWCaI2CfZ1qG1p
kf5W9CVXllSFaJe5r994ovgyHvK2ucWwe8l8iMJbQhH79oKi/9uJMCD6aUXnpg1K
nPHW1lsVx6foqYWijdBdtFU2i7LSH2OYo0nb1PgRnY/SABV63JHfJnqW9dZy4f7G
rpsvvrmFrKmEnCZH0n6qveY3Z5bMD94Yx0ebkCTYEqAw3pV65gwxrzBTpEg6dgF0
eG0eKFUS0REJ
-----END CERTIFICATE-----

View File

@ -1,20 +0,0 @@
BROKER_ID=broker.hector.dkfz.de
BROKER_URL=https://${BROKER_ID}
PROXY_ID=${SITE_ID}.${BROKER_ID}
FOCUS_BEAM_SECRET_SHORT="$(cat /proc/sys/kernel/random/uuid | sed 's/[-]//g' | head -c 20)"
FOCUS_RETRY_COUNT=${FOCUS_RETRY_COUNT:-64}
SUPPORT_EMAIL=support-ccp@dkfz-heidelberg.de
PRIVATEKEYFILENAME=/etc/bridgehead/pki/${SITE_ID}.priv.pem
BROKER_URL_FOR_PREREQ=$BROKER_URL
POSTGRES_TAG=15.6-alpine
for module in ccp/modules/*.sh
do
log DEBUG "sourcing $module"
source $module
done
idManagementSetup
obds2fhirRestSetup

View File

@ -1,63 +0,0 @@
version: "3.7"
services:
blaze:
image: docker.verbis.dkfz.de/cache/samply/blaze:0.28
container_name: bridgehead-itcc-blaze
environment:
BASE_URL: "http://bridgehead-itcc-blaze:8080"
JAVA_TOOL_OPTIONS: "-Xmx${BLAZE_MEMORY_CAP:-4096}m"
DB_RESOURCE_CACHE_SIZE: ${BLAZE_RESOURCE_CACHE_CAP:-2500000}
DB_BLOCK_CACHE_SIZE: $BLAZE_MEMORY_CAP
ENFORCE_REFERENTIAL_INTEGRITY: "false"
volumes:
- "blaze-data:/app/data"
labels:
- "traefik.enable=true"
- "traefik.http.routers.blaze_itcc.rule=PathPrefix(`/itcc-localdatamanagement`)"
- "traefik.http.middlewares.itcc_b_strip.stripprefix.prefixes=/itcc-localdatamanagement"
- "traefik.http.services.blaze_itcc.loadbalancer.server.port=8080"
- "traefik.http.routers.blaze_itcc.middlewares=itcc_b_strip,auth"
- "traefik.http.routers.blaze_itcc.tls=true"
focus:
image: docker.verbis.dkfz.de/cache/samply/focus:${FOCUS_TAG}
container_name: bridgehead-focus
environment:
API_KEY: ${FOCUS_BEAM_SECRET_SHORT}
BEAM_APP_ID_LONG: focus.${PROXY_ID}
PROXY_ID: ${PROXY_ID}
BLAZE_URL: "http://bridgehead-itcc-blaze:8080/fhir/"
BEAM_PROXY_URL: http://beam-proxy:8081
RETRY_COUNT: ${FOCUS_RETRY_COUNT}
EPSILON: 0.28
depends_on:
- "beam-proxy"
- "blaze"
beam-proxy:
image: docker.verbis.dkfz.de/cache/samply/beam-proxy:${BEAM_TAG}
container_name: bridgehead-beam-proxy
environment:
BROKER_URL: ${BROKER_URL}
PROXY_ID: ${PROXY_ID}
APP_focus_KEY: ${FOCUS_BEAM_SECRET_SHORT}
PRIVKEY_FILE: /run/secrets/proxy.pem
ALL_PROXY: http://forward_proxy:3128
TLS_CA_CERTIFICATES_DIR: /conf/trusted-ca-certs
ROOTCERT_FILE: /conf/root.crt.pem
secrets:
- proxy.pem
depends_on:
- "forward_proxy"
volumes:
- /etc/bridgehead/trusted-ca-certs:/conf/trusted-ca-certs:ro
- /srv/docker/bridgehead/itcc/root.crt.pem:/conf/root.crt.pem:ro
volumes:
blaze-data:
secrets:
proxy.pem:
file: /etc/bridgehead/pki/${SITE_ID}.priv.pem

View File

@ -1,33 +0,0 @@
version: "3.7"
services:
landing:
container_name: lens_federated-search
image: docker.verbis.dkfz.de/ccp/lens:${SITE_ID}
labels:
- "traefik.enable=true"
- "traefik.http.routers.landing.rule=PathPrefix(`/`)"
- "traefik.http.services.landing.loadbalancer.server.port=80"
- "traefik.http.routers.landing.tls=true"
spot:
image: docker.verbis.dkfz.de/ccp-private/central-spot
environment:
BEAM_SECRET: "${FOCUS_BEAM_SECRET_SHORT}"
BEAM_URL: http://beam-proxy:8081
BEAM_PROXY_ID: ${SITE_ID}
BEAM_BROKER_ID: ${BROKER_ID}
BEAM_APP_ID: "focus"
PROJECT_METADATA: "dktk_supervisors"
depends_on:
- "beam-proxy"
labels:
- "traefik.enable=true"
- "traefik.http.services.spot.loadbalancer.server.port=8080"
- "traefik.http.middlewares.corsheaders2.headers.accesscontrolallowmethods=GET,OPTIONS,POST"
- "traefik.http.middlewares.corsheaders2.headers.accesscontrolalloworiginlist=https://${HOST}"
- "traefik.http.middlewares.corsheaders2.headers.accesscontrolallowcredentials=true"
- "traefik.http.middlewares.corsheaders2.headers.accesscontrolmaxage=-1"
- "traefik.http.routers.spot.rule=Host(`${HOST}`) && PathPrefix(`/backend`)"
- "traefik.http.middlewares.stripprefix_spot.stripprefix.prefixes=/backend"
- "traefik.http.routers.spot.tls=true"
- "traefik.http.routers.spot.middlewares=corsheaders2,stripprefix_spot"

View File

@ -1,5 +0,0 @@
#!/bin/bash
if [ -n "$ENABLE_LENS" ];then
OVERRIDE+=" -f ./$PROJECT/modules/lens-compose.yml"
fi

View File

@ -1,20 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDNTCCAh2gAwIBAgIUW34NEb7bl0+Ywx+I1VKtY5vpAOowDQYJKoZIhvcNAQEL
BQAwFjEUMBIGA1UEAxMLQnJva2VyLVJvb3QwHhcNMjQwMTIyMTMzNzEzWhcNMzQw
MTE5MTMzNzQzWjAWMRQwEgYDVQQDEwtCcm9rZXItUm9vdDCCASIwDQYJKoZIhvcN
AQEBBQADggEPADCCAQoCggEBAL5UegLXTlq3XRRj8LyFs3aF0tpRPVoW9RXp5kFI
TnBvyO6qjNbMDT/xK+4iDtEX4QQUvsxAKxfXbe9i1jpdwjgH7JHaSGm2IjAiKLqO
OXQQtguWwfNmmp96Ql13ArLj458YH08xMO/w2NFWGwB/hfARa4z/T0afFuc/tKJf
XbGCG9xzJ9tmcG45QN8NChGhVvaTweNdVxGWlpHxmi0Mn8OM9CEuB7nPtTTiBuiu
pRC2zVVmNjVp4ktkAqL7IHOz+/F5nhiz6tOika9oD3376Xj055lPznLcTQn2+4d7
K7ZrBopCFxIQPjkgmYRLfPejbpdUjK1UVJw7hbWkqWqH7JMCAwEAAaN7MHkwDgYD
VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFGjvRcaIP4HM
poIguUAK9YL2n7fbMB8GA1UdIwQYMBaAFGjvRcaIP4HMpoIguUAK9YL2n7fbMBYG
A1UdEQQPMA2CC0Jyb2tlci1Sb290MA0GCSqGSIb3DQEBCwUAA4IBAQCbzycJSaDm
AXXNJqQ88djrKs5MDXS8RIjS/cu2ayuLaYDe+BzVmUXNA0Vt9nZGdaz63SLLcjpU
fNSxBfKbwmf7s30AK8Cnfj9q4W/BlBeVizUHQsg1+RQpDIdMrRQrwkXv8mfLw+w5
3oaXNW6W/8KpBp/H8TBZ6myl6jCbeR3T8EMXBwipMGop/1zkbF01i98Xpqmhx2+l
n+80ofPsSspOo5XmgCZym8CD/m/oFHmjcvOfpOCvDh4PZ+i37pmbSlCYoMpla3u/
7MJMP5lugfLBYNDN2p+V4KbHP/cApCDT5UWLOeAWjgiZQtHH5ilDeYqEc1oPjyJt
Rtup0MTxSJtN
-----END CERTIFICATE-----

View File

@ -1,14 +0,0 @@
BROKER_ID=test-no-real-data.broker.samply.de
BROKER_URL=https://${BROKER_ID}
PROXY_ID=${SITE_ID}.${BROKER_ID}
FOCUS_BEAM_SECRET_SHORT="$(cat /proc/sys/kernel/random/uuid | sed 's/[-]//g' | head -c 20)"
FOCUS_RETRY_COUNT=${FOCUS_RETRY_COUNT:-64}
SUPPORT_EMAIL=arturo.macias@dkfz-heidelberg.de
PRIVATEKEYFILENAME=/etc/bridgehead/pki/${SITE_ID}.priv.pem
BROKER_URL_FOR_PREREQ=$BROKER_URL
for module in $PROJECT/modules/*.sh
do
log DEBUG "sourcing $module"
source $module
done

View File

@ -1,67 +0,0 @@
version: "3.7"
services:
landing:
deploy:
replicas: 0 #deactivate landing page
blaze:
image: docker.verbis.dkfz.de/cache/samply/blaze:0.28
container_name: bridgehead-kr-blaze
environment:
BASE_URL: "http://bridgehead-kr-blaze:8080"
JAVA_TOOL_OPTIONS: "-Xmx${BLAZE_MEMORY_CAP:-4096}m"
DB_RESOURCE_CACHE_SIZE: ${BLAZE_RESOURCE_CACHE_CAP:-2500000}
DB_BLOCK_CACHE_SIZE: $BLAZE_MEMORY_CAP
ENFORCE_REFERENTIAL_INTEGRITY: "false"
volumes:
- "blaze-data:/app/data"
labels:
- "traefik.enable=true"
- "traefik.http.routers.blaze_kr.rule=PathPrefix(`/kr-localdatamanagement`)"
- "traefik.http.middlewares.kr_b_strip.stripprefix.prefixes=/kr-localdatamanagement"
- "traefik.http.services.blaze_kr.loadbalancer.server.port=8080"
- "traefik.http.routers.blaze_kr.middlewares=kr_b_strip,auth"
- "traefik.http.routers.blaze_kr.tls=true"
focus:
image: docker.verbis.dkfz.de/cache/samply/focus:${FOCUS_TAG}
container_name: bridgehead-focus
environment:
API_KEY: ${FOCUS_BEAM_SECRET_SHORT}
BEAM_APP_ID_LONG: focus.${PROXY_ID}
PROXY_ID: ${PROXY_ID}
BLAZE_URL: "http://bridgehead-kr-blaze:8080/fhir/"
BEAM_PROXY_URL: http://beam-proxy:8081
RETRY_COUNT: ${FOCUS_RETRY_COUNT}
EPSILON: 0.28
depends_on:
- "beam-proxy"
- "blaze"
beam-proxy:
image: docker.verbis.dkfz.de/cache/samply/beam-proxy:develop
container_name: bridgehead-beam-proxy
environment:
BROKER_URL: ${BROKER_URL}
PROXY_ID: ${PROXY_ID}
APP_focus_KEY: ${FOCUS_BEAM_SECRET_SHORT}
PRIVKEY_FILE: /run/secrets/proxy.pem
ALL_PROXY: http://forward_proxy:3128
TLS_CA_CERTIFICATES_DIR: /conf/trusted-ca-certs
ROOTCERT_FILE: /conf/root.crt.pem
secrets:
- proxy.pem
depends_on:
- "forward_proxy"
volumes:
- /etc/bridgehead/trusted-ca-certs:/conf/trusted-ca-certs:ro
- /srv/docker/bridgehead/kr/root.crt.pem:/conf/root.crt.pem:ro
volumes:
blaze-data:
secrets:
proxy.pem:
file: /etc/bridgehead/pki/${SITE_ID}.priv.pem

View File

@ -1,6 +0,0 @@
# Full Excel Export
curl --location --request POST 'https://${HOST}/ccp-exporter/request?query=Patient&query-format=FHIR_PATH&template-id=ccp&output-format=EXCEL' \
--header 'x-api-key: ${EXPORT_API_KEY}'
# QB
curl --location --request POST 'https://${HOST}/ccp-reporter/generate?template-id=ccp'

View File

@ -1,67 +0,0 @@
version: "3.7"
services:
exporter:
image: docker.verbis.dkfz.de/ccp/dktk-exporter:latest
container_name: bridgehead-ccp-exporter
environment:
JAVA_OPTS: "-Xms1G -Xmx8G -XX:+UseG1GC"
LOG_LEVEL: "INFO"
EXPORTER_API_KEY: "${EXPORTER_API_KEY}" # Set in exporter-setup.sh
CROSS_ORIGINS: "https://${HOST}"
EXPORTER_DB_USER: "exporter"
EXPORTER_DB_PASSWORD: "${EXPORTER_DB_PASSWORD}" # Set in exporter-setup.sh
EXPORTER_DB_URL: "jdbc:postgresql://exporter-db:5432/exporter"
HTTP_RELATIVE_PATH: "/ccp-exporter"
SITE: "${SITE_ID}"
HTTP_SERVLET_REQUEST_SCHEME: "https"
OPAL_PASSWORD: "${EXPORTER_OPAL_PASSWORD}"
labels:
- "traefik.enable=true"
- "traefik.http.routers.exporter_ccp.rule=PathPrefix(`/ccp-exporter`)"
- "traefik.http.services.exporter_ccp.loadbalancer.server.port=8092"
- "traefik.http.routers.exporter_ccp.tls=true"
- "traefik.http.middlewares.exporter_ccp_strip.stripprefix.prefixes=/ccp-exporter"
- "traefik.http.routers.exporter_ccp.middlewares=exporter_ccp_strip"
volumes:
- "/var/cache/bridgehead/ccp/exporter-files:/app/exporter-files/output"
exporter-db:
image: docker.verbis.dkfz.de/cache/postgres:${POSTGRES_TAG}
container_name: bridgehead-ccp-exporter-db
environment:
POSTGRES_USER: "exporter"
POSTGRES_PASSWORD: "${EXPORTER_DB_PASSWORD}" # Set in exporter-setup.sh
POSTGRES_DB: "exporter"
volumes:
# Consider removing this volume once we find a solution to save Lens-queries to be executed in the explorer.
- "/var/cache/bridgehead/ccp/exporter-db:/var/lib/postgresql/data"
reporter:
image: docker.verbis.dkfz.de/ccp/dktk-reporter:latest
container_name: bridgehead-ccp-reporter
environment:
JAVA_OPTS: "-Xms1G -Xmx8G -XX:+UseG1GC"
LOG_LEVEL: "INFO"
CROSS_ORIGINS: "https://${HOST}"
HTTP_RELATIVE_PATH: "/ccp-reporter"
SITE: "${SITE_ID}"
EXPORTER_API_KEY: "${EXPORTER_API_KEY}" # Set in exporter-setup.sh
EXPORTER_URL: "http://exporter:8092"
LOG_FHIR_VALIDATION: "false"
HTTP_SERVLET_REQUEST_SCHEME: "https"
# In this initial development state of the bridgehead, we are trying to have so many volumes as possible.
# However, in the first executions in the CCP sites, this volume seems to be very important. A report is
# a process that can take several hours, because it depends on the exporter.
# There is a risk that the bridgehead restarts, losing the already created export.
volumes:
- "/var/cache/bridgehead/ccp/reporter-files:/app/reports"
labels:
- "traefik.enable=true"
- "traefik.http.routers.reporter_ccp.rule=PathPrefix(`/ccp-reporter`)"
- "traefik.http.services.reporter_ccp.loadbalancer.server.port=8095"
- "traefik.http.routers.reporter_ccp.tls=true"
- "traefik.http.middlewares.reporter_ccp_strip.stripprefix.prefixes=/ccp-reporter"
- "traefik.http.routers.reporter_ccp.middlewares=reporter_ccp_strip"

View File

@ -1,8 +0,0 @@
#!/bin/bash -e
if [ "$ENABLE_EXPORTER" == true ]; then
log INFO "Exporter setup detected -- will start Exporter service."
OVERRIDE+=" -f ./$PROJECT/modules/exporter-compose.yml"
EXPORTER_DB_PASSWORD="$(echo \"This is a salt string to generate one consistent password for the exporter. It is not required to be secret.\" | sha1sum | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 30)"
EXPORTER_API_KEY="$(echo \"This is a salt string to generate one consistent API KEY for the exporter. It is not required to be secret.\" | sha1sum | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 64)"
fi

View File

@ -1,15 +0,0 @@
# Exporter and Reporter
## Exporter
The exporter is a REST API that exports the data of the different databases of the bridgehead in a set of tables.
It can accept different output formats as CSV, Excel, JSON or XML. It can also export data into Opal.
## Exporter-DB
It is a database to save queries for its execution in the exporter.
The exporter manages also the different executions of the same query in through the database.
## Reporter
This component is a plugin of the exporter that allows to create more complex Excel reports described in templates.
It is compatible with different template engines as Groovy, Thymeleaf,...
It is perfect to generate a document as our traditional CCP quality report.

View File

@ -1,35 +0,0 @@
version: "3.7"
services:
landing:
deploy:
replicas: 1 #reactivate if lens is in use
container_name: lens_federated-search
image: docker.verbis.dkfz.de/ccp/lens:${SITE_ID}
labels:
- "traefik.enable=true"
- "traefik.http.routers.landing.rule=PathPrefix(`/`)"
- "traefik.http.services.landing.loadbalancer.server.port=80"
- "traefik.http.routers.landing.tls=true"
spot:
image: docker.verbis.dkfz.de/ccp-private/central-spot
environment:
BEAM_SECRET: "${FOCUS_BEAM_SECRET_SHORT}"
BEAM_URL: http://beam-proxy:8081
BEAM_PROXY_ID: ${SITE_ID}
BEAM_BROKER_ID: ${BROKER_ID}
BEAM_APP_ID: "focus"
PROJECT_METADATA: "kr_supervisors"
depends_on:
- "beam-proxy"
labels:
- "traefik.enable=true"
- "traefik.http.services.spot.loadbalancer.server.port=8080"
- "traefik.http.middlewares.corsheaders2.headers.accesscontrolallowmethods=GET,OPTIONS,POST"
- "traefik.http.middlewares.corsheaders2.headers.accesscontrolalloworiginlist=https://${HOST}"
- "traefik.http.middlewares.corsheaders2.headers.accesscontrolallowcredentials=true"
- "traefik.http.middlewares.corsheaders2.headers.accesscontrolmaxage=-1"
- "traefik.http.routers.spot.rule=Host(`${HOST}`) && PathPrefix(`/backend`)"
- "traefik.http.middlewares.stripprefix_spot.stripprefix.prefixes=/backend"
- "traefik.http.routers.spot.tls=true"
- "traefik.http.routers.spot.middlewares=corsheaders2,stripprefix_spot"

View File

@ -1,5 +0,0 @@
#!/bin/bash
if [ -n "$ENABLE_LENS" ];then
OVERRIDE+=" -f ./$PROJECT/modules/lens-compose.yml"
fi

View File

@ -1,20 +0,0 @@
version: "3.7"
services:
obds2fhir-rest:
container_name: bridgehead-obds2fhir-rest
image: docker.verbis.dkfz.de/ccp/obds2fhir-rest:main
environment:
IDTYPE: BK_${IDMANAGEMENT_FRIENDLY_ID}_L-ID
MAINZELLISTE_APIKEY: ${IDMANAGER_LOCAL_PATIENTLIST_APIKEY}
SALT: ${LOCAL_SALT}
KEEP_INTERNAL_ID: ${KEEP_INTERNAL_ID:-false}
MAINZELLISTE_URL: ${PATIENTLIST_URL:-http://patientlist:8080/patientlist}
restart: always
labels:
- "traefik.enable=true"
- "traefik.http.routers.obds2fhir-rest.rule=PathPrefix(`/obds2fhir-rest`) || PathPrefix(`/adt2fhir-rest`)"
- "traefik.http.middlewares.obds2fhir-rest_strip.stripprefix.prefixes=/obds2fhir-rest,/adt2fhir-rest"
- "traefik.http.services.obds2fhir-rest.loadbalancer.server.port=8080"
- "traefik.http.routers.obds2fhir-rest.tls=true"
- "traefik.http.routers.obds2fhir-rest.middlewares=obds2fhir-rest_strip,auth"

View File

@ -1,13 +0,0 @@
#!/bin/bash
function obds2fhirRestSetup() {
if [ -n "$ENABLE_OBDS2FHIR_REST" ]; then
log INFO "oBDS2FHIR-REST setup detected -- will start obds2fhir-rest module."
if [ ! -n "$IDMANAGER_UPLOAD_APIKEY" ]; then
log ERROR "Missing ID-Management Module! Fix this by setting up ID Management:"
PATIENTLIST_URL=" "
fi
OVERRIDE+=" -f ./$PROJECT/modules/obds2fhir-rest-compose.yml"
LOCAL_SALT="$(echo \"local-random-salt\" | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 30)"
fi
}

View File

@ -1,81 +0,0 @@
version: "3.7"
services:
teiler-orchestrator:
image: docker.verbis.dkfz.de/cache/samply/teiler-orchestrator:latest
container_name: bridgehead-teiler-orchestrator
labels:
- "traefik.enable=true"
- "traefik.http.routers.teiler_orchestrator_ccp.rule=PathPrefix(`/ccp-teiler`)"
- "traefik.http.services.teiler_orchestrator_ccp.loadbalancer.server.port=9000"
- "traefik.http.routers.teiler_orchestrator_ccp.tls=true"
- "traefik.http.middlewares.teiler_orchestrator_ccp_strip.stripprefix.prefixes=/ccp-teiler"
- "traefik.http.routers.teiler_orchestrator_ccp.middlewares=teiler_orchestrator_ccp_strip"
environment:
TEILER_BACKEND_URL: "https://${HOST}/ccp-teiler-backend"
TEILER_DASHBOARD_URL: "https://${HOST}/ccp-teiler-dashboard"
DEFAULT_LANGUAGE: "${TEILER_DEFAULT_LANGUAGE_LOWER_CASE}"
HTTP_RELATIVE_PATH: "/ccp-teiler"
teiler-dashboard:
image: docker.verbis.dkfz.de/cache/samply/teiler-dashboard:develop
container_name: bridgehead-teiler-dashboard
labels:
- "traefik.enable=true"
- "traefik.http.routers.teiler_dashboard_ccp.rule=PathPrefix(`/ccp-teiler-dashboard`)"
- "traefik.http.services.teiler_dashboard_ccp.loadbalancer.server.port=80"
- "traefik.http.routers.teiler_dashboard_ccp.tls=true"
- "traefik.http.middlewares.teiler_dashboard_ccp_strip.stripprefix.prefixes=/ccp-teiler-dashboard"
- "traefik.http.routers.teiler_dashboard_ccp.middlewares=teiler_dashboard_ccp_strip"
environment:
DEFAULT_LANGUAGE: "${TEILER_DEFAULT_LANGUAGE}"
TEILER_BACKEND_URL: "https://${HOST}/ccp-teiler-backend"
OIDC_URL: "${OIDC_URL}"
OIDC_REALM: "${OIDC_REALM}"
OIDC_CLIENT_ID: "${OIDC_PUBLIC_CLIENT_ID}"
OIDC_TOKEN_GROUP: "${OIDC_GROUP_CLAIM}"
TEILER_ADMIN_NAME: "${OPERATOR_FIRST_NAME} ${OPERATOR_LAST_NAME}"
TEILER_ADMIN_EMAIL: "${OPERATOR_EMAIL}"
TEILER_ADMIN_PHONE: "${OPERATOR_PHONE}"
TEILER_PROJECT: "${PROJECT}"
EXPORTER_API_KEY: "${EXPORTER_API_KEY}"
TEILER_ORCHESTRATOR_URL: "https://${HOST}/ccp-teiler"
TEILER_DASHBOARD_HTTP_RELATIVE_PATH: "/ccp-teiler-dashboard"
TEILER_ORCHESTRATOR_HTTP_RELATIVE_PATH: "/ccp-teiler"
TEILER_USER: "${OIDC_USER_GROUP}"
TEILER_ADMIN: "${OIDC_ADMIN_GROUP}"
REPORTER_DEFAULT_TEMPLATE_ID: "ccp-qb"
EXPORTER_DEFAULT_TEMPLATE_ID: "ccp"
teiler-backend:
image: docker.verbis.dkfz.de/ccp/dktk-teiler-backend:latest
container_name: bridgehead-teiler-backend
labels:
- "traefik.enable=true"
- "traefik.http.routers.teiler_backend_ccp.rule=PathPrefix(`/ccp-teiler-backend`)"
- "traefik.http.services.teiler_backend_ccp.loadbalancer.server.port=8085"
- "traefik.http.routers.teiler_backend_ccp.tls=true"
- "traefik.http.middlewares.teiler_backend_ccp_strip.stripprefix.prefixes=/ccp-teiler-backend"
- "traefik.http.routers.teiler_backend_ccp.middlewares=teiler_backend_ccp_strip"
environment:
LOG_LEVEL: "INFO"
APPLICATION_PORT: "8085"
APPLICATION_ADDRESS: "${HOST}"
DEFAULT_LANGUAGE: "${TEILER_DEFAULT_LANGUAGE}"
CONFIG_ENV_VAR_PATH: "/run/secrets/ccp.conf"
TEILER_ORCHESTRATOR_HTTP_RELATIVE_PATH: "/ccp-teiler"
TEILER_ORCHESTRATOR_URL: "https://${HOST}/ccp-teiler"
TEILER_DASHBOARD_DE_URL: "https://${HOST}/ccp-teiler-dashboard/de"
TEILER_DASHBOARD_EN_URL: "https://${HOST}/ccp-teiler-dashboard/en"
CENTRAX_URL: "${CENTRAXX_URL}"
HTTP_PROXY: "http://forward_proxy:3128"
ENABLE_MTBA: "${ENABLE_MTBA}"
ENABLE_DATASHIELD: "${ENABLE_DATASHIELD}"
secrets:
- ccp.conf
secrets:
ccp.conf:
file: /etc/bridgehead/ccp.conf

View File

@ -1,9 +0,0 @@
#!/bin/bash -e
if [ "$ENABLE_TEILER" == true ];then
log INFO "Teiler setup detected -- will start Teiler services."
OVERRIDE+=" -f ./$PROJECT/modules/teiler-compose.yml"
TEILER_DEFAULT_LANGUAGE=DE
TEILER_DEFAULT_LANGUAGE_LOWER_CASE=${TEILER_DEFAULT_LANGUAGE,,}
add_public_oidc_redirect_url "/ccp-teiler/*"
fi

View File

@ -1,19 +0,0 @@
# Teiler
This module orchestrates the different microfrontends of the bridgehead as a single page application.
## Teiler Orchestrator
Single SPA component that consists on the root HTML site of the single page application and a javascript code that
gets the information about the microfrontend calling the teiler backend and is responsible for registering them. With the
resulting mapping, it can initialize, mount and unmount the required microfrontends on the fly.
The microfrontends run independently in different containers and can be based on different frameworks (Angular, Vue, React,...)
This microfrontends can run as single alone but need an extension with Single-SPA (https://single-spa.js.org/docs/ecosystem).
There are also available three templates (Angular, Vue, React) to be directly extended to be used directly in the teiler.
## Teiler Dashboard
It consists on the main dashboard and a set of embedded services.
### Login
user and password in ccp.local.conf
## Teiler Backend
In this component, the microfrontends are configured.

View File

@ -1,20 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDNTCCAh2gAwIBAgIUW34NEb7bl0+Ywx+I1VKtY5vpAOowDQYJKoZIhvcNAQEL
BQAwFjEUMBIGA1UEAxMLQnJva2VyLVJvb3QwHhcNMjQwMTIyMTMzNzEzWhcNMzQw
MTE5MTMzNzQzWjAWMRQwEgYDVQQDEwtCcm9rZXItUm9vdDCCASIwDQYJKoZIhvcN
AQEBBQADggEPADCCAQoCggEBAL5UegLXTlq3XRRj8LyFs3aF0tpRPVoW9RXp5kFI
TnBvyO6qjNbMDT/xK+4iDtEX4QQUvsxAKxfXbe9i1jpdwjgH7JHaSGm2IjAiKLqO
OXQQtguWwfNmmp96Ql13ArLj458YH08xMO/w2NFWGwB/hfARa4z/T0afFuc/tKJf
XbGCG9xzJ9tmcG45QN8NChGhVvaTweNdVxGWlpHxmi0Mn8OM9CEuB7nPtTTiBuiu
pRC2zVVmNjVp4ktkAqL7IHOz+/F5nhiz6tOika9oD3376Xj055lPznLcTQn2+4d7
K7ZrBopCFxIQPjkgmYRLfPejbpdUjK1UVJw7hbWkqWqH7JMCAwEAAaN7MHkwDgYD
VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFGjvRcaIP4HM
poIguUAK9YL2n7fbMB8GA1UdIwQYMBaAFGjvRcaIP4HMpoIguUAK9YL2n7fbMBYG
A1UdEQQPMA2CC0Jyb2tlci1Sb290MA0GCSqGSIb3DQEBCwUAA4IBAQCbzycJSaDm
AXXNJqQ88djrKs5MDXS8RIjS/cu2ayuLaYDe+BzVmUXNA0Vt9nZGdaz63SLLcjpU
fNSxBfKbwmf7s30AK8Cnfj9q4W/BlBeVizUHQsg1+RQpDIdMrRQrwkXv8mfLw+w5
3oaXNW6W/8KpBp/H8TBZ6myl6jCbeR3T8EMXBwipMGop/1zkbF01i98Xpqmhx2+l
n+80ofPsSspOo5XmgCZym8CD/m/oFHmjcvOfpOCvDh4PZ+i37pmbSlCYoMpla3u/
7MJMP5lugfLBYNDN2p+V4KbHP/cApCDT5UWLOeAWjgiZQtHH5ilDeYqEc1oPjyJt
Rtup0MTxSJtN
-----END CERTIFICATE-----

16
kr/vars
View File

@ -1,16 +0,0 @@
BROKER_ID=test-no-real-data.broker.samply.de
BROKER_URL=https://${BROKER_ID}
PROXY_ID=${SITE_ID}.${BROKER_ID}
FOCUS_BEAM_SECRET_SHORT="$(cat /proc/sys/kernel/random/uuid | sed 's/[-]//g' | head -c 20)"
FOCUS_RETRY_COUNT=${FOCUS_RETRY_COUNT:-64}
SUPPORT_EMAIL=arturo.macias@dkfz-heidelberg.de
PRIVATEKEYFILENAME=/etc/bridgehead/pki/${SITE_ID}.priv.pem
BROKER_URL_FOR_PREREQ=$BROKER_URL
for module in $PROJECT/modules/*.sh
do
log DEBUG "sourcing $module"
source $module
done
obds2fhirRestSetup

View File

@ -53,8 +53,8 @@ checkOwner(){
}
printUsage() {
echo "Usage: bridgehead start|stop|logs|docker-logs|is-running|update|install|uninstall|adduser|enroll PROJECTNAME"
echo "PROJECTNAME should be one of ccp|bbmri|cce|itcc|kr|dhki"
echo "Usage: bridgehead start|stop|logs|is-running|update|install|uninstall|adduser|enroll PROJECTNAME"
echo "PROJECTNAME should be one of ccp|bbmri"
}
checkRequirements() {
@ -155,30 +155,6 @@ setHostname() {
fi
}
# This function optimizes the usage of memory through blaze, according to the official performance tuning guide:
# https://github.com/samply/blaze/blob/master/docs/tuning-guide.md
# Short summary of the adjustments made:
# - set blaze memory cap to a quarter of the system memory
# - set db block cache size to a quarter of the system memory
# - limit resource count allowed in blaze to 1,25M per 4GB available system memory
optimizeBlazeMemoryUsage() {
if [ -z "$BLAZE_MEMORY_CAP" ]; then
system_memory_in_mb=$(LC_ALL=C free -m | grep 'Mem:' | awk '{print $2}');
export BLAZE_MEMORY_CAP=$(($system_memory_in_mb/4));
fi
if [ -z "$BLAZE_RESOURCE_CACHE_CAP" ]; then
available_system_memory_chunks=$((BLAZE_MEMORY_CAP / 1000))
if [ $available_system_memory_chunks -eq 0 ]; then
log WARN "Only ${BLAZE_MEMORY_CAP} system memory available for Blaze. If your Blaze stores more than 128000 fhir ressources it will run significally slower."
export BLAZE_RESOURCE_CACHE_CAP=128000;
export BLAZE_CQL_CACHE_CAP=32;
else
export BLAZE_RESOURCE_CACHE_CAP=$((available_system_memory_chunks * 312500))
export BLAZE_CQL_CACHE_CAP=$((($system_memory_in_mb/4)/16));
fi
fi
}
# Takes 1) The Backup Directory Path 2) The name of the Service to be backuped
# Creates 3 Backups: 1) For the past seven days 2) For the current month and 3) for each calendar week
createEncryptedPostgresBackup(){
@ -263,113 +239,3 @@ add_basic_auth_user() {
log DEBUG "Saving clear text credentials in $FILE. If wanted, delete them manually."
sed -i "/^$NAME/ s|$|\n# User: $USER\n# Password: $PASSWORD|" $FILE
}
OIDC_PUBLIC_REDIRECT_URLS=${OIDC_PUBLIC_REDIRECT_URLS:-""}
OIDC_PRIVATE_REDIRECT_URLS=${OIDC_PRIVATE_REDIRECT_URLS:-""}
# Add a redirect url to the public oidc client of the bridgehead
function add_public_oidc_redirect_url() {
if [[ $OIDC_PUBLIC_REDIRECT_URLS == "" ]]; then
OIDC_PUBLIC_REDIRECT_URLS+="$(generate_redirect_urls $1)"
else
OIDC_PUBLIC_REDIRECT_URLS+=",$(generate_redirect_urls $1)"
fi
}
# Add a redirect url to the private oidc client of the bridgehead
function add_private_oidc_redirect_url() {
if [[ $OIDC_PRIVATE_REDIRECT_URLS == "" ]]; then
OIDC_PRIVATE_REDIRECT_URLS+="$(generate_redirect_urls $1)"
else
OIDC_PRIVATE_REDIRECT_URLS+=",$(generate_redirect_urls $1)"
fi
}
function sync_secrets() {
local delimiter=$'\x1E'
local secret_sync_args=""
if [[ $OIDC_PRIVATE_REDIRECT_URLS != "" ]]; then
secret_sync_args="OIDC:OIDC_CLIENT_SECRET:private;$OIDC_PRIVATE_REDIRECT_URLS"
fi
if [[ $OIDC_PUBLIC_REDIRECT_URLS != "" ]]; then
if [[ $secret_sync_args == "" ]]; then
secret_sync_args="OIDC:OIDC_PUBLIC:public;$OIDC_PUBLIC_REDIRECT_URLS"
else
secret_sync_args+="${delimiter}OIDC:OIDC_PUBLIC:public;$OIDC_PUBLIC_REDIRECT_URLS"
fi
fi
if [[ $secret_sync_args == "" ]]; then
return
fi
mkdir -p /var/cache/bridgehead/secrets/ || fail_and_report 1 "Failed to create '/var/cache/bridgehead/secrets/'. Please run sudo './bridgehead install $PROJECT' again."
touch /var/cache/bridgehead/secrets/oidc
docker run --rm \
-v /var/cache/bridgehead/secrets/oidc:/usr/local/cache \
-v $PRIVATEKEYFILENAME:/run/secrets/privkey.pem:ro \
-v /srv/docker/bridgehead/$PROJECT/root.crt.pem:/run/secrets/root.crt.pem:ro \
-v /etc/bridgehead/trusted-ca-certs:/conf/trusted-ca-certs:ro \
-e TLS_CA_CERTIFICATES_DIR=/conf/trusted-ca-certs \
-e NO_PROXY=localhost,127.0.0.1 \
-e ALL_PROXY=$HTTPS_PROXY_FULL_URL \
-e PROXY_ID=$PROXY_ID \
-e BROKER_URL=$BROKER_URL \
-e OIDC_PROVIDER=secret-sync-central.oidc-client-enrollment.$BROKER_ID \
-e SECRET_DEFINITIONS=$secret_sync_args \
docker.verbis.dkfz.de/cache/samply/secret-sync-local:latest
set -a # Export variables as environment variables
source /var/cache/bridgehead/secrets/*
set +a # Export variables in the regular way
}
capitalize_first_letter() {
input="$1"
capitalized="$(tr '[:lower:]' '[:upper:]' <<< ${input:0:1})${input:1}"
echo "$capitalized"
}
# Generate a string of ',' separated string of redirect urls relative to $HOST.
# $1 will be appended to the url
# If the host looks like dev-jan.inet.dkfz-heidelberg.de it will generate urls with dev-jan and the original $HOST as url Authorities
function generate_redirect_urls(){
local redirect_urls="https://${HOST}$1"
local host_without_proxy="$(echo "$HOST" | cut -d '.' -f1)"
# Only append second url if its different and the host is not an ip address
if [[ "$HOST" != "$host_without_proxy" && ! "$HOST" =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
redirect_urls+=",https://$host_without_proxy$1"
fi
echo "$redirect_urls"
}
# This password contains at least one special char, a random number and a random upper and lower case letter
generate_password(){
local seed_text="$1"
local seed_num=$(awk 'BEGIN{FS=""} NR==1{print $10}' /etc/bridgehead/pki/${SITE_ID}.priv.pem | od -An -tuC)
local nums="1234567890"
local n=$(echo "$seed_num" | awk '{print $1 % 10}')
local random_digit=${nums:$n:1}
local n=$(echo "$seed_num" | awk '{print $1 % 26}')
local upper="ABCDEFGHIJKLMNOPQRSTUVWXYZ"
local lower="abcdefghijklmnopqrstuvwxyz"
local random_upper=${upper:$n:1}
local random_lower=${lower:$n:1}
local n=$(echo "$seed_num" | awk '{print $1 % 8}')
local special='@#$%^&+='
local random_special=${special:$n:1}
local combined_text="This is a salt string to generate one consistent password for ${seed_text}. It is not required to be secret."
local main_password=$(echo "${combined_text}" | sha1sum | openssl pkeyutl -sign -inkey "/etc/bridgehead/pki/${SITE_ID}.priv.pem" 2> /dev/null | base64 | head -c 26 | sed 's/\//A/g')
echo "${main_password}${random_digit}${random_upper}${random_lower}${random_special}"
}
# This password only contains alphanumeric characters
generate_simple_password(){
local seed_text="$1"
local combined_text="This is a salt string to generate one consistent password for ${seed_text}. It is not required to be secret."
echo "${combined_text}" | sha1sum | openssl pkeyutl -sign -inkey "/etc/bridgehead/pki/${SITE_ID}.priv.pem" 2> /dev/null | base64 | head -c 26 | sed 's/[+\/]/A/g'
}
docker_jq() {
docker run --rm -i docker.verbis.dkfz.de/cache/jqlang/jq:latest "$@"
}

View File

@ -52,21 +52,6 @@ case "$PROJECT" in
bbmri)
site_configuration_repository_middle="git.verbis.dkfz.de/bbmri-bridgehead-configs/"
;;
cce)
site_configuration_repository_middle="git.verbis.dkfz.de/cce-sites/"
;;
itcc)
site_configuration_repository_middle="git.verbis.dkfz.de/itcc-sites/"
;;
dhki)
site_configuration_repository_middle="git.verbis.dkfz.de/dhki/"
;;
kr)
site_configuration_repository_middle="git.verbis.dkfz.de/krebsregister-sites/"
;;
dhki)
site_configuration_repository_middle="git.verbis.dkfz.de/dhki/"
;;
minimal)
site_configuration_repository_middle="git.verbis.dkfz.de/minimal-bridgehead-configs/"
;;
@ -104,9 +89,6 @@ elif [[ "$DEV_MODE" == "DEV" ]]; then
fi
chown -R bridgehead /etc/bridgehead /srv/docker/bridgehead
mkdir -p /tmp/bridgehead /var/cache/bridgehead
chown -R bridgehead:docker /tmp/bridgehead /var/cache/bridgehead
chmod -R g+wr /var/cache/bridgehead /tmp/bridgehead
log INFO "System preparation is completed and configuration is present."

View File

@ -3,16 +3,14 @@
source lib/functions.sh
detectCompose
CONFIG_DIR="/etc/bridgehead/"
COMPONENT_DIR="/srv/docker/bridgehead/"
if ! id "bridgehead" &>/dev/null; then
log ERROR "User bridgehead does not exist. Please run bridgehead install $PROJECT"
exit 1
fi
checkOwner "${CONFIG_DIR}" bridgehead || exit 1
checkOwner "${COMPONENT_DIR}" bridgehead || exit 1
checkOwner /srv/docker/bridgehead bridgehead || exit 1
checkOwner /etc/bridgehead bridgehead || exit 1
## Check if user is a su
log INFO "Checking if all prerequisites are met ..."
@ -34,31 +32,31 @@ fi
log INFO "Checking configuration ..."
## Download submodule
if [ ! -d "${CONFIG_DIR}" ]; then
fail_and_report 1 "Please set up the config folder at ${CONFIG_DIR}. Instruction are in the readme."
if [ ! -d "/etc/bridgehead/" ]; then
fail_and_report 1 "Please set up the config folder at /etc/bridgehead. Instruction are in the readme."
fi
# TODO: Check all required variables here in a generic loop
#check if project env is present
if [ -d "${CONFIG_DIR}${PROJECT}.conf" ]; then
fail_and_report 1 "Project config not found. Please copy the template from ${PROJECT} and put it under ${CONFIG_DIR}${PROJECT}.conf."
if [ -d "/etc/bridgehead/${PROJECT}.conf" ]; then
fail_and_report 1 "Project config not found. Please copy the template from ${PROJECT} and put it under /etc/bridgehead-config/${PROJECT}.conf."
fi
# TODO: Make sure you're in the right directory, or, even better, be independent from the working directory.
log INFO "Checking ssl cert for accessing bridgehead via https"
if [ ! -d "${CONFIG_DIR}traefik-tls" ]; then
if [ ! -d "/etc/bridgehead/traefik-tls" ]; then
log WARN "TLS certs for accessing bridgehead via https missing, we'll now create a self-signed one. Please consider getting an officially signed one (e.g. via Let's Encrypt ...) and put into /etc/bridgehead/traefik-tls"
mkdir -p /etc/bridgehead/traefik-tls
fi
if [ ! -e "${CONFIG_DIR}traefik-tls/fullchain.pem" ]; then
if [ ! -e "/etc/bridgehead/traefik-tls/fullchain.pem" ]; then
openssl req -x509 -newkey rsa:4096 -nodes -keyout /etc/bridgehead/traefik-tls/privkey.pem -out /etc/bridgehead/traefik-tls/fullchain.pem -days 3650 -subj "/CN=$HOST"
fi
if [ -e "${CONFIG_DIR}"vault.conf ]; then
if [ -e /etc/bridgehead/vault.conf ]; then
if [ "$(stat -c "%a %U" /etc/bridgehead/vault.conf)" != "600 bridgehead" ]; then
fail_and_report 1 "/etc/bridgehead/vault.conf has wrong owner/permissions. To correct this issue, run chmod 600 /etc/bridgehead/vault.conf && chown bridgehead /etc/bridgehead/vault.conf."
fi
@ -66,7 +64,7 @@ fi
log INFO "Checking network access ($BROKER_URL_FOR_PREREQ) ..."
source "${CONFIG_DIR}${PROJECT}".conf
source /etc/bridgehead/${PROJECT}.conf
source ${PROJECT}/vars
if [ "${PROJECT}" != "minimal" ]; then
@ -94,10 +92,10 @@ if [ "${PROJECT}" != "minimal" ]; then
fi
fi
checkPrivKey() {
if [ -e "${CONFIG_DIR}pki/${SITE_ID}.priv.pem" ]; then
if [ -e /etc/bridgehead/pki/${SITE_ID}.priv.pem ]; then
log INFO "Success - private key found."
else
log ERROR "Unable to find private key at ${CONFIG_DIR}pki/${SITE_ID}.priv.pem. To fix, please run\n bridgehead enroll ${PROJECT}\nand follow the instructions."
log ERROR "Unable to find private key at /etc/bridgehead/pki/${SITE_ID}.priv.pem. To fix, please run\n bridgehead enroll ${PROJECT}\nand follow the instructions."
return 1
fi
return 0
@ -109,11 +107,6 @@ else
checkPrivKey || exit 1
fi
for dir in "${CONFIG_DIR}" "${COMPONENT_DIR}"; do
log INFO "Checking branch: $(cd $dir && echo "$dir $(git branch --show-current)")"
hc_send log "Checking branch: $(cd $dir && echo "$dir $(git branch --show-current)")"
done
log INFO "Success - all prerequisites are met!"
hc_send log "Success - all prerequisites are met!"

View File

@ -1,9 +1,8 @@
[Unit]
Description=Daily Updates at 6am of Bridgehead (%i)
Description=Hourly Updates of Bridgehead (%i)
[Timer]
OnCalendar=*-*-* 06:00:00
Persistent=true
OnCalendar=*-*-* *:00:00
[Install]
WantedBy=basic.target

View File

@ -10,13 +10,13 @@ services:
- --providers.docker=true
- --providers.docker.exposedbydefault=false
- --providers.file.directory=/configuration/
- --api.dashboard=false
- --api.dashboard=true
- --accesslog=true
- --entrypoints.web.http.redirections.entrypoint.to=websecure
- --entrypoints.web.http.redirections.entrypoint.scheme=https
labels:
- "traefik.enable=true"
- "traefik.http.routers.dashboard.rule=PathPrefix(`/api`) || PathPrefix(`/dashboard/`)"
- "traefik.http.routers.dashboard.rule=PathPrefix(`/api`) || PathPrefix(`/dashboard`)"
- "traefik.http.routers.dashboard.entrypoints=websecure"
- "traefik.http.routers.dashboard.service=api@internal"
- "traefik.http.routers.dashboard.tls=true"
@ -42,9 +42,6 @@ services:
- /var/spool/squid
volumes:
- /etc/bridgehead/trusted-ca-certs:/docker/custom-certs/:ro
healthcheck:
# Wait 1s before marking this service healthy. Required for the oauth2-proxy to talk to the OIDC provider on startup which will fail if the forward proxy is not started yet.
test: ["CMD", "sleep", "1"]
landing:
container_name: bridgehead-landingpage
@ -58,4 +55,6 @@ services:
HOST: ${HOST}
PROJECT: ${PROJECT}
SITE_NAME: ${SITE_NAME}
ENVIRONMENT: ${ENVIRONMENT}
ENVIRONMENT: "production"

View File

@ -2,7 +2,7 @@ version: "3.7"
services:
dnpm-beam-proxy:
image: docker.verbis.dkfz.de/cache/samply/beam-proxy:${BEAM_TAG}
image: docker.verbis.dkfz.de/cache/samply/beam-proxy:develop
container_name: bridgehead-dnpm-beam-proxy
environment:
BROKER_URL: ${DNPM_BROKER_URL}
@ -18,7 +18,7 @@ services:
- "forward_proxy"
volumes:
- /etc/bridgehead/trusted-ca-certs:/conf/trusted-ca-certs:ro
- /srv/docker/bridgehead/ccp/root.crt.pem:/conf/root.crt.pem:ro
- /etc/bridgehead/dnpm/aachen.crt.pem:/conf/root.crt.pem:ro
dnpm-beam-connect:
depends_on: [ dnpm-beam-proxy ]

View File

@ -6,7 +6,7 @@ if [ -n "${ENABLE_DNPM}" ]; then
# Set variables required for Beam-Connect
DNPM_BEAM_SECRET_SHORT="$(cat /proc/sys/kernel/random/uuid | sed 's/[-]//g' | head -c 20)"
DNPM_BROKER_ID="broker.ccp-it.dktk.dkfz.de"
DNPM_BROKER_ID="dnpm-aachen-broker.samply.de"
DNPM_BROKER_URL="https://${DNPM_BROKER_ID}"
if [ -z ${BROKER_URL_FOR_PREREQ+x} ]; then
BROKER_URL_FOR_PREREQ=$DNPM_BROKER_URL