Compare commits

..

1 Commits

Author SHA1 Message Date
187b3f05b6 fix: switch sites that are still on this branch to main 2023-06-30 10:51:33 +02:00
121 changed files with 444 additions and 3833 deletions

1
.github/CODEOWNERS vendored
View File

@ -1 +0,0 @@
* @samply/bridgehead-developers

View File

@ -1,39 +0,0 @@
import os
import requests
from datetime import datetime, timedelta
# Configuration
GITHUB_TOKEN = os.getenv('GITHUB_TOKEN')
REPO = 'samply/bridgehead'
HEADERS = {'Authorization': f'token {GITHUB_TOKEN}', 'Accept': 'application/vnd.github.v3+json'}
API_URL = f'https://api.github.com/repos/{REPO}/branches'
INACTIVE_DAYS = 365
CUTOFF_DATE = datetime.now() - timedelta(days=INACTIVE_DAYS)
# Fetch all branches
def get_branches():
response = requests.get(API_URL, headers=HEADERS)
response.raise_for_status()
return response.json() if response.status_code == 200 else []
# Rename inactive branches
def rename_branch(old_name, new_name):
rename_url = f'https://api.github.com/repos/{REPO}/branches/{old_name}/rename'
response = requests.post(rename_url, json={'new_name': new_name}, headers=HEADERS)
response.raise_for_status()
print(f"Renamed branch {old_name} to {new_name}" if response.status_code == 201 else f"Failed to rename {old_name}: {response.status_code}")
# Check if the branch is inactive
def is_inactive(commit_url):
last_commit_date = requests.get(commit_url, headers=HEADERS).json()['commit']['committer']['date']
return datetime.strptime(last_commit_date, '%Y-%m-%dT%H:%M:%SZ') < CUTOFF_DATE
# Rename inactive branches
def main():
for branch in get_branches():
if is_inactive(branch['commit']['url']):
#rename_branch(branch['name'], f"archived/{branch['name']}")
print(f"[LOG] Branch '{branch['name']}' is inactive and would be renamed to 'archived/{branch['name']}'")
if __name__ == "__main__":
main()

View File

@ -1,27 +0,0 @@
name: Cleanup - Rename Inactive Branches
on:
schedule:
- cron: '0 0 * * 0' # Runs every Sunday at midnight
jobs:
archive-stale-branches:
runs-on: ubuntu-latest
steps:
- name: Checkout Repository
uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: '3.x'
- name: Install Libraries
run: pip install requests
- name: Run Script to Rename Inactive Branches
run: |
python .github/scripts/rename_inactive_branches.py
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

5
.gitignore vendored
View File

@ -1,9 +1,6 @@
##Ignore site configuration
.gitmodules
site-config/*
.idea
## Ignore site configuration
*/docker-compose.override.yml
## MAC OS
.DS_Store

262
README.md
View File

@ -21,17 +21,10 @@ This repository is the starting point for any information and tools you will nee
- [HTTPS Access](#https-access)
- [TLS terminating proxies](#tls-terminating-proxies)
- [File structure](#file-structure)
- [BBMRI-ERIC Directory entry needed](#bbmri-eric-directory-entry-needed)
- [Directory sync tool](#directory-sync-tool)
- [Loading data](#loading-data)
- [Teiler (Frontend)](#teiler-frontend)
- [Data Exporter Service](#data-exporter-service)
- [Data Quality Report](#data-quality-report)
4. [Things you should know](#things-you-should-know)
- [Auto-Updates](#auto-updates)
- [Auto-Backups](#auto-backups)
- [Non-Linux OS](#non-linux-os)
- [FAQ](#faq)
5. [Troubleshooting](#troubleshooting)
- [Docker Daemon Proxy Configuration](#docker-daemon-proxy-configuration)
- [Monitoring](#monitoring)
@ -39,10 +32,6 @@ This repository is the starting point for any information and tools you will nee
## Requirements
The data protection officer at your site will probably want to know exactly what our software does with patient data, and you may need to get their approval before you are allowed to install a Bridgehead. To help you with this, we have provided some data protection concepts:
- [Germany](https://www.bbmri.de/biobanking/it/infrastruktur/datenschutzkonzept/)
### Hardware
Hardware requirements strongly depend on the specific use-cases of your network as well as on the data it is going to serve. Most use-cases are well-served with the following configuration:
@ -51,8 +40,6 @@ Hardware requirements strongly depend on the specific use-cases of your network
- 32 GB RAM
- 160GB Hard Drive, SSD recommended
We recommend using a dedicated VM for the Bridgehead, with no other applications running on it. While the Bridgehead can, in principle, run on a shared VM, you might run into surprising problems such as resource conflicts (e.g., two apps using tcp port 443).
### Software
You are strongly recommended to install the Bridgehead under a Linux operating system (but see the section [Non-Linux OS](#non-linux-os)). You will need root (administrator) priveleges on this machine in order to perform the deployment. We recommend the newest Ubuntu LTS server release.
@ -63,41 +50,16 @@ Ensure the following software (or newer) is installed:
- docker >= 20.10.1
- docker-compose >= 2.xx (`docker-compose` and `docker compose` are both supported).
- systemd
- curl
We recommend to install Docker(-compose) from its official sources as described on the [Docker website](https://docs.docker.com).
> 📝 Note for Ubuntu: Snap versions of Docker are not supported.
Note for Ubuntu: Please note that snap versions of Docker are not supported.
### Network
A Bridgehead communicates to all central components via outgoing HTTPS connections.
Since it needs to carry sensitive patient data, Bridgeheads are intended to be deployed within your institution's secure network and behave well even in networks in strict security settings, e.g. firewall rules. The only connectivity required is an outgoing HTTPS proxy. TLS termination is supported, too (see [below](#tls-terminating-proxies))
Your site might require an outgoing proxy (i.e. HTTPS forward proxy) to connect to external servers; you should discuss this with your local systems administration. In that case, you will need to note down the URL of the proxy. If the proxy requires authentication, you will also need to make a note of its username and password. This information will be used later on during the installation process. TLS terminating proxies are also supported, see [here](#tls-terminating-proxies). Apart from the Bridgehead itself, you may also need to configure the proxy server in [git](https://gist.github.com/evantoli/f8c23a37eb3558ab8765) and [docker](https://docs.docker.com/network/proxy/).
The following URLs need to be accessible (prefix with `https://`):
* To fetch code and configuration from git repositories
* github.com
* git.verbis.dkfz.de
* To fetch docker images
* docker.verbis.dkfz.de
* Official Docker, Inc. URLs (subject to change, see [official list](https://docs.docker.com/desktop/setup/allow-list/))
* hub.docker.com
* registry-1.docker.io
* production.cloudflare.docker.com
* To report bridgeheads operational status
* healthchecks.verbis.dkfz.de
* only for DKTK/CCP
* broker.ccp-it.dktk.dkfz.de
* only for BBMRI-ERIC
* broker.bbmri.samply.de
* gitlab.bbmri-eric.eu
* only for German Biobank Node
* broker.bbmri.de
> 📝 This URL list is subject to change. Instead of the individual names, we highly recommend whitelisting wildcard domains: *.dkfz.de, github.com, *.docker.com, *.docker.io, *.samply.de, *.bbmri.de.
> 📝 Ubuntu's pre-installed uncomplicated firewall (ufw) is known to conflict with Docker, more info [here](https://github.com/chaifeng/ufw-docker).
Note for Ubuntu: Please note that the uncomplicated firewall (ufw) is known to conflict with Docker [here](https://github.com/chaifeng/ufw-docker).
## Deployment
@ -130,7 +92,7 @@ Mention:
We will set the repository up for you. We will then send you:
- A Repository Short Name (RSN). Beware: this is distinct from your site name.
- Repository URL containing the acces token eg. https://BH_Dummy:dummy_token@git.verbis.dkfz.de/<project>-bridgehead-configs/dummy.git
- Repository URL containing the acces token eg. https://BH_Dummy:dummy_token@git.verbis.dkfz.de/bbmri-bridgehead-configs/dummy.git
During the installation, your Bridgehead will download your site's configuration from GitLab and you can review the details provided to us by email.
@ -159,7 +121,7 @@ Pay special attention to:
Clone the bridgehead repository:
```shell
sudo mkdir -p /srv/docker/
sudo git clone -b main https://github.com/samply/bridgehead.git /srv/docker/bridgehead
sudo git clone https://github.com/samply/bridgehead.git /srv/docker/bridgehead
```
Then, run the installation script:
@ -178,7 +140,7 @@ cd /srv/docker/bridgehead
sudo ./bridgehead enroll <PROJECT>
```
... and follow the instructions on the screen. Please send your default Collection ID and the display name of your site together with the certificate request when you enroll. You should then be prompted to do the next step:
... and follow the instructions on the screen. You should then be prompted to do the next step:
### Starting and stopping your Bridgehead
@ -207,7 +169,7 @@ sudo systemctl [enable|disable] bridgehead@<PROJECT>.service
After starting the Bridgehead, you can watch the initialization process with the following command:
```shell
/srv/docker/bridgehead/bridgehead logs <project> -f
journalctl -u bridgehead@bbmri -f
```
if this exits with something similar to the following:
@ -227,9 +189,8 @@ docker ps
There should be 6 - 10 Docker proceses. If there are fewer, then you know that something has gone wrong. To see what is going on, run:
```shell
/srv/docker/bridgehead/bridgehead logs <Project> -f
journalctl -u bridgehead@bbmri -f
```
This translates to a journalctl command so all the regular journalctl flags can be used.
Once the Bridgehead has passed these checks, take a look at the landing page:
@ -243,7 +204,7 @@ You can either do this in a browser or with curl. If you visit the URL in the br
curl -k https://localhost
```
Should the landing page not show anything, you can inspect the logs of the containers to determine what is going wrong. To do this you can use `./bridgehead docker-logs <Project> -f` to follow the logs of the container. This transaltes to a docker compose logs command meaning all the ususal docker logs flags work.
If you get errors when you do this, you need to use ```docker logs``` to examine your landing page container in order to determine what is going wrong.
If you have chosen to take part in our monitoring program (by setting the ```MONITOR_APIKEY``` variable in the configuration), you will be informed by email when problems are detected in your Bridgehead.
@ -259,8 +220,6 @@ sh bridgehead uninstall
## Site-specific configuration
[How to Change Config Access Token](docs/update-access-token.md)
### HTTPS Access
Even within your internal network, the Bridgehead enforces HTTPS for all services. During the installation, a self-signed, long-lived certificate was created for you. To increase security, you can simply replace the files under `/etc/bridgehead/traefik-tls` with ones from established certification authorities such as [Let's Encrypt](https://letsencrypt.org) or [DFN-AAI](https://www.aai.dfn.de).
@ -269,21 +228,6 @@ Even within your internal network, the Bridgehead enforces HTTPS for all service
All of the Bridgehead's outgoing connections are secured by transport encryption (TLS) and a Bridgehead will refuse to connect if certificate verification fails. If your local forward proxy server performs TLS termination, please place its CA certificate in `/etc/bridgehead/trusted-ca-certs` as a `.pem` file, e.g. `/etc/bridgehead/trusted-ca-certs/mylocalca.pem`. Then, all Bridgehead components will pick up this certificate and trust it for outgoing connections.
To find the certificate file, first run the following:
```
curl -v https://broker.bbmri.samply.de/v1/health
```
In the output, look out for the line:
```
successfully set certificate verify locations:
```
Here a file will be mentioned, perhaps in the directory /etc/ssl/certs. The exact location will depend on your operating system. This is the file that you need to copy.
### File structure
- `/srv/docker/bridgehead` contains this git repository with the shell scripts and *project-specific configuration*. In here, all files are identical for all sites. You should not make any changes here.
@ -296,130 +240,36 @@ Here a file will be mentioned, perhaps in the directory /etc/ssl/certs. The exac
Your Bridgehead's actual data is not stored in the above directories, but in named docker volumes, see `docker volume ls` and `docker volume inspect <volume_name>`.
### BBMRI-ERIC Directory entry needed
### Directory sync
If you run a biobank, you should be listed together with your collections with in the [Directory](https://directory.bbmri-eric.eu), a BBMRI-ERIC project that catalogs biobanks.
This is an optional feature for bbmri projects. It keeps the [BBMRI Directory](https://directory.bbmri-eric.eu/) up to date with your local data eg. number of samples. It also updates the local FHIR store with the latest contact details etc. from the Directory. You must explicitly set your country specific directory url, username and password to enable this feature.
To do this, contact the BBMRI-ERIC national node for the country where your biobank is based, see [the list of nodes](http://www.bbmri-eric.eu/national-nodes/).
Full details can be found in [directory_sync_service](https://github.com/samply/directory_sync_service).
Once you have added your biobank to the Directory you got persistent identifier (PID) for your biobank and unique identifiers (IDs) for your collections. The collection IDs are necessary for the biospecimens assigning to the collections and later in the data flows between BBMRI-ERIC tools. In case you cannot distribute all your biospecimens within collections via assigning the collection IDs, **you should choose one of your sample collections as a default collection for your biobank**. This collection will be automatically used to label any samples that have not been assigned a collection ID in your ETL process. Make a note of this default collection ID, you will need it later on in the installation process.
### Directory sync tool
The Bridgehead's **Directory Sync** is an optional feature that keeps the BBMRI-ERIC Directory up to date with your local data, e.g. number of samples. Conversely, it can also update the local FHIR store with the latest contact details etc. from the BBMRI-ERIC Directory.
You should talk with your local data protection group regarding the information that is published by Directory sync.
To enable it, you will need to explicitly set the username and password variables for BBMRI-ERIC Directory login in the configuration file of your GitLab repository (e.g. ```bbmri.conf```). Here is an example minimal config:
To enable it, you will need to set these variables to the ```bbmri.conf``` file of your GitLab repository. Here is an example config:
```
### Directory sync service
DS_DIRECTORY_URL=https://directory.bbmri-eric.eu
DS_DIRECTORY_USER_NAME=your_directory_username
DS_DIRECTORY_USER_PASS=your_directory_password
DS_DIRECTORY_USER_PASS=qwdnqwswdvqHBVGFR9887
DS_TIMER_CRON="0 22 * * *"
```
Please contact your National Node or Directory support (directory-dev@helpdesk.bbmri-eric.eu) to obtain these credentials.
You must contact the Directory for your national node to find the URL, and to register as a user.
The following environment variables can be used from within your config file to control the behavior of Directory sync:
Additionally, you should choose when you want Directory sync to run. In the example above, this is set to happen at 10 pm every evening. You can modify this to suit your requirements. The timer specification should follow the [cron](https://crontab.guru) convention.
| Variable | Purpose | Default if not specified |
|:-----------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------|:---------------------------------------|
| DS_DIRECTORY_URL | Base URL of the Directory | https://directory-backend.molgenis.net |
| DS_DIRECTORY_USER_NAME | User name for logging in to Directory **Mandatory** | |
| DS_DIRECTORY_USER_PASS | Password for logging in to Directory **Mandatory** | |
| DS_DIRECTORY_DEFAULT_COLLECTION_ID | ID of collection to be used if not in samples | |
| DS_DIRECTORY_ALLOW_STAR_MODEL | Set to 'True' to send star model info to Directory | True |
| DS_FHIR_STORE_URL | URL for FHIR store | http://bridgehead-bbmri-blaze:8080 |
| DS_TIMER_CRON | Execution interval for Directory sync, [cron](https://crontab.guru) format | 0 22 * * * |
| DS_IMPORT_BIOBANKS | Set to 'True' to import biobank metadata from Directory | True |
| DS_IMPORT_COLLECTIONS | Set to 'True' to import collection metadata from Directory | True |
Once you have finished editing the config, the Bridgehead will autoupdate the config with the values and will sync data at regular intervals, using the time specified in DS_TIMER_CRON.
Once you edited the gitlab config. The bridgehead will autoupdate the config with the values and will sync the data.
There will be a delay before the effects of Directory sync become visible. First, you will need to wait until the time you have specified in ```TIMER_CRON```. Second, the information will then be synchronized from your national node with the central European Directory. This can take up to 24 hours.
More details of Directory sync can be found in [directory_sync_service](https://github.com/samply/directory_sync_service).
### Loading data
The data accessed by the federated search is held in the Bridgehead in a FHIR store (we use Blaze).
You can load data into this store by using its FHIR API:
```
https://<Name of your server>/bbmri-localdatamanagement/fhir
```
The name of your server will generally be the full name of the VM that the Bridgehead runs on. You can alternatively supply an IP address.
The FHIR API uses basic auth. You can find the credentials in `/etc/bridgehead/<project>.local.conf`.
Note that if you don't have a DNS certificate for the Bridgehead, you will need to allow an insecure connection. E.g. with curl, use the `-k` flag.
The storage space on your hard drive will depend on the number of FHIR resources that you intend to generate. This will be the sum of the number of patients/subjects, the number of samples, the number of conditions/diseases and the number of observations. As a general rule of thumb, you can assume that each resource will consume about 2 kilobytes of disk space.
For more information on Blaze performance, please refer to [import performance](https://github.com/samply/blaze/blob/master/docs/performance/import.md).
### Clearing data
The Bridgehead's FHIR store, Blaze, saves its data in a Docker volume. This means that the data will persist even if you stop the Bridgehead. You can clear existing data from the FHIR store by deleting the relevant Docker volume.
First, stop the Bridgehead:
```shell
sudo systemctl stop bridgehead@<PROJECT>.service
```
Now remove the volume:
```shell
docker volume rm <PROJECT>_blaze-data
```
Finally, restart the Bridgehead:
```shell
sudo systemctl start bridgehead@<PROJECT>.service
```
You will need to do this for example if you are using a VM as a test environment and you subsequently want to use the same VM for production.
#### ETL for BBMRI and GBA
Normally, you will need to build your own ETL to feed the Bridgehead. However, there is one case where a short cut might be available:
- If you are using CentraXX as a BIMS and you have a FHIR-Export License, then you can employ standard mapping scripts that access the CentraXX-internal data structures and map the data onto the BBMRI FHIR profile. It may be necessary to adjust a few parameters, but this is nonetheless significantly easier than writing your own ETL.
You can find the profiles for generating FHIR in [Simplifier](https://simplifier.net/bbmri.de/~resources?category=Profile).
### Teiler (Frontend)
Teiler is the web-based frontend of the Bridgehead, providing access to its various internal, and external services and components.
To learn how to integrate your custom module into Teiler, please refer to https://github.com/samply/teiler-dashboard.
- To activate Teiler, set the following environment variable in your `<PROJECT>.conf` file:
```bash
ENABLE_TEILER=true
```
### Data Exporter Service
The Exporter is a dedicated service for extracting and exporting Bridgehead data in (tabular) formats such as Excel, CSV, Opal, JSON, XML, ...
- To enable the Exporter service, set the following environment variable in your `<PROJECT>.conf` file:
```bash
ENABLE_EXPORTER=true
```
#### Data Quality Report
To assess the quality and plausibility of your imported data, the Reporter component is pre-configured to generate Excel reports with data quality metrics and statistical analyses. Reporter is part of the Exporter and can be enabled by setting the same environment variable in your `<PROJECT>.conf` file:
```bash
ENABLE_EXPORTER=true
```
For convenience, it's recommended to enable the Teiler web frontend alongside the Exporter to access export and quality control features via a web interface: set the following environment varibles in your `<PROJECT>.conf` file:
```bash
ENABLE_TEILER=true
ENABLE_EXPORTER=true
```
## Things you should know
### Auto-Updates
Your Bridgehead will automatically and regularly check for updates. Whenever something has been updates (e.g., one of the git repositories or one of the docker images), your Bridgehead is automatically restarted. This should happen automatically and does not need any configuration.
If you would like to understand what happens exactly and when, please check the systemd units deployed during the [installation](#base-installation) via `systemctl cat bridgehead-update@<PROJECT>.service` and `systemctl cat bridgehead-update@<PROJECT>.timer`.
If you would like to understand what happens exactly and when, please check the systemd units deployed during the [installation](#base-installation) via `systemctl cat bridgehead-update@<PROJECT>.service` and `systemctl cat bridgehead-update@<PROJECT.timer`.
### Auto-Backups
@ -452,80 +302,12 @@ We have tested the installation procedure with an Ubuntu 22.04 guest system runn
Installation under WSL ought to work, but we have not tested this.
### FAQ
**Q: How is the security of GitHub pulls, volumes/containers, and image signing ensured?**
A: Changes to Git branches that could be delivered to sites (main and develop) must be accepted via a pull request with at least two positive reviews.
Containers/images are not built manually, but rather automatically through a CI/CD pipeline, so that an image can be rolled back to a defined code version at any time without changes.
**Note:** If firewall access for (outgoing) connections to GitHub and/or Docker Hub is problematic at the site, mirrors for both services are available, operated by the DKFZ.
**Q: How is authentication between users and components regulated?**
A: When setting up a Bridgehead, a private key and a so-called Certificate Sign Request (CSR) are generated locally. This CSR is manually signed by the broker operator, which allows the Bridgehead access to the network infrastructure.
All communication runs via Samply.Beam and is therefore end-to-end encrypted, but also signed. This allows the integrity and authenticity of the sender to be technically verified (which happens automatically both in the broker and at the recipients).
The connection to the broker is additionally secured using traditional TLS (transport encryption over https).
**Q: Are there any statistics on incoming traffic from the Bridgehead (what goes in and what goes out)?**
A: Incoming and outgoing traffic can only enter/leave the Bridgehead via a forward or reverse proxy, respectively. These components log all connections.
Statistical analysis is not currently being conducted, but is on the roadmap for some projects. We are also working on a dashboard for all tasks/responses delivered via Samply.Beam.
**Q: How is container access controlled, and what permission level is used?**
A: Currently, it is not possible to run the Bridgehead "out-of-the-box" as a rootless Docker Compose stack. The main reason is the operation of the reverse proxy (Traefik), which binds to the privileged ports 80 (HTTP) and 443 (HTTPS).
Otherwise, there are no known technical obstacles, although we don't have concrete experience implementing this.
At the file system level, a "bridgehead" user is created during installation, which manages the configuration and Bridgehead folders.
**Q: Is a cloud installation (not a company-owned one, but an external service provider) possible?**
A: Technically, yes. This is primarily a data protection issue between the participant and their cloud provider.
The Bridgehead contains a data storage system that, during use, contains sensitive patient and sample data.
There are cloud providers with whom appropriately worded contracts can be concluded to make this possible.
Of course, the details must be discussed with the responsible data protection officer.
**Q: What needs to be considered regarding the Docker distribution/registry, and how is it used here?**
A: The Bridgehead images are located both in Docker Hub and mirrored in a registry operated by the DKFZ.
The latter is used by default, avoiding potential issues with Docker Hub URL activation or rate limits.
When using automatic updates (highly recommended), an daily check is performed for:
- site configuration updates
- Bridgehead software updates
- container image updates
If updates are found, they are downloaded and applied.
See the first question for the control mechanism.
**Q: Is data only transferred one-way (Bridgehead/FHIR Store → Central/Locator), or is two-way access necessary?**
A: By using Samply.Beam, only one outgoing connection to the broker is required at the network level (i.e., Bridgehead → Broker).
## Troubleshooting
### Docker Daemon Proxy Configuration
Docker has a background daemon, responsible for downloading images and starting them. Sometimes, proxy configuration from your system won't carry over and it will fail to download images. In that case, you'll need to configure the proxy inside the system unit of docker by creating the file `/etc/systemd/system/docker.service.d/proxy.conf` with the following content:
Docker has a background daemon, responsible for downloading images and starting them. Sometimes, proxy configuration from your system won't carry over and it will fail to download images. In that case, configure the proxy for this daemon as described in the [official documentation](https://docs.docker.com).
``` ini
[Service]
Environment="HTTP_PROXY=http://proxy.example.com:3128"
Environment="HTTPS_PROXY=https://proxy.example.com:3128"
Environment="NO_PROXY=localhost,127.0.0.1,some-local-docker-registry.example.com,.corp"
```
After saving the configuration file, you'll need to reload the system daemon for the changes to take effect:
``` shell
sudo systemctl daemon-reload
```
and restart the docker daemon:
``` shell
sudo systemctl restart docker
```
For more information, please consult the [official documentation](https://docs.docker.com/config/daemon/systemd/#httphttps-proxy).
### Monitoring

View File

@ -0,0 +1,8 @@
services:
directory_sync_service:
image: "docker.verbis.dkfz.de/cache/samply/directory_sync_service"
environment:
DS_DIRECTORY_URL: ${DS_DIRECTORY_URL}
DS_DIRECTORY_USER_NAME: ${DS_DIRECTORY_USER_NAME}
DS_DIRECTORY_PASS_CODE: ${DS_DIRECTORY_PASS_CODE}
DS_TIMER_CRON: ${DS_TIMER_CRON}

8
bbmri/directory-sync.sh Executable file
View File

@ -0,0 +1,8 @@
#!/bin/bash
function dirSetup() {
if [ -n "$DS_DIRECTORY_USER_NAME" ]; then
log INFO "Directory sync setup detected -- will start directory sync service."
OVERRIDE+=" -f ./$PROJECT/directory-sync-compose.yml"
fi
}

View File

@ -1,17 +1,65 @@
version: "3.7"
# This includes only the shared persistence for BBMRI-ERIC and GBN. Federation components are included as modules, see vars.
services:
traefik:
container_name: bridgehead-traefik
image: docker.verbis.dkfz.de/cache/traefik:latest
command:
- --entrypoints.web.address=:80
- --entrypoints.websecure.address=:443
- --providers.docker=true
- --providers.docker.exposedbydefault=false
- --providers.file.directory=/configuration/
- --api.dashboard=true
- --accesslog=true
- --entrypoints.web.http.redirections.entrypoint.to=websecure
- --entrypoints.web.http.redirections.entrypoint.scheme=https
labels:
- "traefik.enable=true"
- "traefik.http.routers.dashboard.rule=PathPrefix(`/api`) || PathPrefix(`/dashboard`)"
- "traefik.http.routers.dashboard.entrypoints=websecure"
- "traefik.http.routers.dashboard.service=api@internal"
- "traefik.http.routers.dashboard.tls=true"
- "traefik.http.routers.dashboard.middlewares=auth"
- "traefik.http.middlewares.auth.basicauth.users=${LDM_LOGIN}"
ports:
- 80:80
- 443:443
volumes:
- /etc/bridgehead/traefik-tls:/certs:ro
- ../lib/traefik-configuration/:/configuration:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
forward_proxy:
container_name: bridgehead-forward-proxy
image: docker.verbis.dkfz.de/cache/samply/bridgehead-forward-proxy:latest
environment:
HTTPS_PROXY: ${HTTPS_PROXY_URL}
USERNAME: ${HTTPS_PROXY_USERNAME}
PASSWORD: ${HTTPS_PROXY_PASSWORD}
volumes:
- /etc/bridgehead/trusted-ca-certs:/docker/custom-certs/:ro
landing:
container_name: bridgehead-landingpage
image: docker.verbis.dkfz.de/cache/samply/bridgehead-landingpage:master
labels:
- "traefik.enable=true"
- "traefik.http.routers.landing.rule=PathPrefix(`/`)"
- "traefik.http.services.landing.loadbalancer.server.port=80"
- "traefik.http.routers.landing.tls=true"
environment:
HOST: ${HOST}
PROJECT: ${PROJECT}
SITE_NAME: ${SITE_NAME}
blaze:
image: docker.verbis.dkfz.de/cache/samply/blaze:${BLAZE_TAG}
image: docker.verbis.dkfz.de/cache/samply/blaze:0.19
container_name: bridgehead-bbmri-blaze
environment:
BASE_URL: "http://bridgehead-bbmri-blaze:8080"
JAVA_TOOL_OPTIONS: "-Xmx${BLAZE_MEMORY_CAP:-4096}m"
DB_RESOURCE_CACHE_SIZE: ${BLAZE_RESOURCE_CACHE_CAP:-2500000}
DB_BLOCK_CACHE_SIZE: ${BLAZE_MEMORY_CAP}
CQL_EXPR_CACHE_SIZE: ${BLAZE_CQL_CACHE_CAP:-32}
JAVA_TOOL_OPTIONS: "-Xmx4g"
LOG_LEVEL: "debug"
ENFORCE_REFERENTIAL_INTEGRITY: "false"
volumes:
- "blaze-data:/app/data"
@ -23,10 +71,43 @@ services:
- "traefik.http.routers.blaze_ccp.middlewares=ccp_b_strip,auth"
- "traefik.http.routers.blaze_ccp.tls=true"
focus:
image: docker.verbis.dkfz.de/cache/samply/focus:develop
container_name: bridgehead-focus
environment:
API_KEY: ${FOCUS_BEAM_SECRET_SHORT}
BEAM_APP_ID_LONG: focus.${PROXY_ID}
PROXY_ID: ${PROXY_ID}
BLAZE_URL: "http://bridgehead-bbmri-blaze:8080/fhir/"
BEAM_PROXY_URL: http://beam-proxy:8081
RETRY_COUNT: ${FOCUS_RETRY_COUNT}
depends_on:
- "beam-proxy"
- "blaze"
beam-proxy:
image: docker.verbis.dkfz.de/cache/samply/beam-proxy:develop
container_name: bridgehead-beam-proxy
environment:
BROKER_URL: ${BROKER_URL}
PROXY_ID: ${PROXY_ID}
APP_0_ID: focus
APP_0_KEY: ${FOCUS_BEAM_SECRET_SHORT}
PRIVKEY_FILE: /run/secrets/proxy.pem
ALL_PROXY: http://forward_proxy:3128
TLS_CA_CERTIFICATES_DIR: /conf/trusted-ca-certs
ROOTCERT_FILE: /conf/root.crt.pem
secrets:
- proxy.pem
depends_on:
- "forward_proxy"
volumes:
- /etc/bridgehead/trusted-ca-certs:/conf/trusted-ca-certs:ro
- ./root.crt.pem:/conf/root.crt.pem:ro
volumes:
blaze-data:
# used in modules *-locator.yml
secrets:
proxy.pem:
file: /etc/bridgehead/pki/${SITE_ID}.priv.pem

View File

@ -1,18 +0,0 @@
version: "3.7"
services:
directory_sync_service:
image: "docker.verbis.dkfz.de/cache/samply/directory_sync_service"
environment:
DS_DIRECTORY_URL: ${DS_DIRECTORY_URL:-https://directory.bbmri-eric.eu}
DS_DIRECTORY_USER_NAME: ${DS_DIRECTORY_USER_NAME}
DS_DIRECTORY_USER_PASS: ${DS_DIRECTORY_USER_PASS}
DS_TIMER_CRON: ${DS_TIMER_CRON:-0 22 * * *}
DS_DIRECTORY_ALLOW_STAR_MODEL: ${DS_DIRECTORY_ALLOW_STAR_MODEL:-true}
DS_DIRECTORY_MOCK: ${DS_DIRECTORY_MOCK}
DS_DIRECTORY_DEFAULT_COLLECTION_ID: ${DS_DIRECTORY_DEFAULT_COLLECTION_ID}
DS_DIRECTORY_COUNTRY: ${DS_DIRECTORY_COUNTRY}
DS_IMPORT_BIOBANKS: ${DS_IMPORT_BIOBANKS:-true}
DS_IMPORT_COLLECTIONS: ${DS_IMPORT_COLLECTIONS:-true}
depends_on:
- "blaze"

View File

@ -1,6 +0,0 @@
#!/bin/bash
if [ -n "${DS_DIRECTORY_USER_NAME}" ]; then
log INFO "Directory sync setup detected -- will start directory sync service."
OVERRIDE+=" -f ./$PROJECT/modules/directory-sync-compose.yml"
fi

View File

@ -1,36 +0,0 @@
version: "3.7"
services:
focus-eric:
image: docker.verbis.dkfz.de/cache/samply/focus:${FOCUS_TAG}-bbmri
container_name: bridgehead-focus-eric
environment:
API_KEY: ${ERIC_FOCUS_BEAM_SECRET_SHORT}
BEAM_APP_ID_LONG: focus.${ERIC_PROXY_ID}
PROXY_ID: ${ERIC_PROXY_ID}
BLAZE_URL: "http://blaze:8080/fhir/"
BEAM_PROXY_URL: http://beam-proxy-eric:8081
RETRY_COUNT: ${FOCUS_RETRY_COUNT}
depends_on:
- "beam-proxy-eric"
- "blaze"
beam-proxy-eric:
image: docker.verbis.dkfz.de/cache/samply/beam-proxy:${BEAM_TAG}
container_name: bridgehead-beam-proxy-eric
environment:
BROKER_URL: ${ERIC_BROKER_URL}
PROXY_ID: ${ERIC_PROXY_ID}
APP_focus_KEY: ${ERIC_FOCUS_BEAM_SECRET_SHORT}
PRIVKEY_FILE: /run/secrets/proxy.pem
ALL_PROXY: http://forward_proxy:3128
TLS_CA_CERTIFICATES_DIR: /conf/trusted-ca-certs
ROOTCERT_FILE: /conf/root.crt.pem
secrets:
- proxy.pem
depends_on:
- "forward_proxy"
volumes:
- /etc/bridgehead/trusted-ca-certs:/conf/trusted-ca-certs:ro
- /srv/docker/bridgehead/bbmri/modules/${ERIC_ROOT_CERT}.root.crt.pem:/conf/root.crt.pem:ro

View File

@ -1,32 +0,0 @@
#!/bin/bash
if [ "${ENABLE_ERIC}" == "true" ]; then
log INFO "BBMRI-ERIC setup detected -- will start services for BBMRI-ERIC."
OVERRIDE+=" -f ./$PROJECT/modules/eric-compose.yml"
# The environment needs to be defined in /etc/bridgehead
case "$ENVIRONMENT" in
"production")
export ERIC_BROKER_ID=broker.bbmri.samply.de
export ERIC_ROOT_CERT=eric
;;
"acceptance")
export ERIC_BROKER_ID=broker-acc.bbmri-acc.samply.de
export ERIC_ROOT_CERT=eric.acc
;;
"test")
export ERIC_BROKER_ID=broker-test.bbmri-test.samply.de
export ERIC_ROOT_CERT=eric.test
;;
*)
report_error 6 "Environment \"$ENVIRONMENT\" is unknown. Assuming production. FIX THIS!"
export ERIC_BROKER_ID=broker.bbmri.samply.de
export ERIC_ROOT_CERT=eric
;;
esac
ERIC_BROKER_URL=https://${ERIC_BROKER_ID}
ERIC_PROXY_ID=${SITE_ID}.${ERIC_BROKER_ID}
ERIC_FOCUS_BEAM_SECRET_SHORT="$(cat /proc/sys/kernel/random/uuid | sed 's/[-]//g' | head -c 20)"
ERIC_SUPPORT_EMAIL=bridgehead@helpdesk.bbmri-eric.eu
fi

View File

@ -1,20 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDNTCCAh2gAwIBAgIUFzdpDi1OLdXyogtCsktHFhCILtMwDQYJKoZIhvcNAQEL
BQAwFjEUMBIGA1UEAxMLQnJva2VyLVJvb3QwHhcNMjUwNjEwMTQzNjE1WhcNMzUw
NjA4MTQzNjQ1WjAWMRQwEgYDVQQDEwtCcm9rZXItUm9vdDCCASIwDQYJKoZIhvcN
AQEBBQADggEPADCCAQoCggEBALpJCWE9Qe19R9DqotdkPV6jfiuJSKI3UYkCWdWG
nRfkKB6OaY5t3JCHDqaEME9FwSd2nFXhTp5F6snG/K7g8MCLIEzGzuSnrdjGqINq
zXLfgqnxvQpPR4ARLNNgnKxZaq7m4Q3T/l+QAshK6CnCUWFQ6q5x3g/pZHFP2USd
/G2FtDHX6YK4bHbbnigIPG6PdY2RYy60i30XGdIPBNf82XGkAtPUBz731gHOV5Vg
d+jfAqTwZAhYC2CcNmswFw1H9GrvTI/9KZWKcZNUIqemc0A/FyEyONUM18/vjQ7D
lUwOcQsgAg44QTOUPgqXv3sJPQM5EnGuv3yYV9u6Y2i78M8CAwEAAaN7MHkwDgYD
VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFPrDeNWgtEyZ
VM0yeoRZdK2QGjyvMB8GA1UdIwQYMBaAFPrDeNWgtEyZVM0yeoRZdK2QGjyvMBYG
A1UdEQQPMA2CC0Jyb2tlci1Sb290MA0GCSqGSIb3DQEBCwUAA4IBAQAD2S0kqL18
laewh+qnyZ0WMq12mLV/Rwll6ZuShCx2uAu3UZuIGWk3l7gG5zlws+i+zbaNcn4o
HsS3WG9kiNLOMKp8LXGkjErl6RaQr+kb8qgYFTPjOr6v0OdVn6ve9RDNYB5Hd+zE
9jAWmS8PfS2AldE4VAd0C4pWTAinhnKGrKdn1YAX5x+LMq1y0lc1Pd4CDgsjD6SS
3td7JtenXqCX0mN0XSeck7vvFGa6QpcQoVcN9tRENctHZTwyeGA21IkXylpFPUkE
LT60k48fNC8TZkBlfvtVGRebpm5krXIKEaVy5LniEpSuOR4hTqsgoQDntBjW4zHA
GeWQ1wQNTEBX
-----END CERTIFICATE-----

View File

@ -1,20 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDNTCCAh2gAwIBAgIUJ0g7k2vrdAwNTU38S1/mU8NO26MwDQYJKoZIhvcNAQEL
BQAwFjEUMBIGA1UEAxMLQnJva2VyLVJvb3QwHhcNMjMwNzEwMTIyMzQxWhcNMzMw
NzA3MTIyNDExWjAWMRQwEgYDVQQDEwtCcm9rZXItUm9vdDCCASIwDQYJKoZIhvcN
AQEBBQADggEPADCCAQoCggEBALMvc/fApbsAl+/NXDszNgffNR5llAb9CfxzdnRn
ryoBqZdPevBYZZfKBARRKjFbXRDdPWbE7erDeo1LiCM6PObXCuT9wmGWJtvfkmqW
3Z/a75e4r360kceMEGVn4kWpi9dz8s7+oXVZURjW2r13h6pq6xQNZDNlXmpR8wHG
58TSrQC4n1vzdSwMWdptgOA8Sw8adR7ZJI1yNZpmynB2QolKKNESI7FcSKC/+b+H
LoPkseAwQG9yJo23qEw1GZS67B47iKIqX2wp9VLQobHw7ncrhKXQLSWq973k/Swp
7lBdfOsTouf72flLiF1HbdOLcFDmWgIbf5scj2HaQe8b/UcCAwEAAaN7MHkwDgYD
VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFHYxBJiJZieW
e6G1vwn6Q36/crgNMB8GA1UdIwQYMBaAFHYxBJiJZieWe6G1vwn6Q36/crgNMBYG
A1UdEQQPMA2CC0Jyb2tlci1Sb290MA0GCSqGSIb3DQEBCwUAA4IBAQCN6WVNYpWJ
6Z1Ee+otLZYMXhjyR6NUQ5s0aHiug97gB8mTiNlgXiiTgipCbofEmENgh1inYrPC
WfdXxqOaekSXCQW6nSO1KtBzEYtkN5LrN1cjKqt51P2DbkllinK37wwCS2Kfup1+
yjhTRxrehSIfsMVK6bTUeSoc8etkgwErZpORhlpqZKWhmOwcMpgsYJJOLhUetqc1
UNe/254bc0vqHEPT6VI/86c7qAmk1xR0RUfrnKAEqZtUeuoj2fe1L/6yOB16fxt5
3V3oim7EO6eZCTjDo9fU5DaFiqSMe7WVdr03Na0cWet60XKRH/xaiC6gMWdHWcbh
vZdXnV1qjlM2
-----END CERTIFICATE-----

View File

@ -1,86 +0,0 @@
version: "3.7"
services:
exporter:
image: docker.verbis.dkfz.de/ccp/dktk-exporter:latest
container_name: bridgehead-bbmri-exporter
environment:
JAVA_OPTS: "-Xms1G -Xmx8G -XX:+UseG1GC"
LOG_LEVEL: "INFO"
EXPORTER_API_KEY: "${EXPORTER_API_KEY}" # Set in exporter-setup.sh
CROSS_ORIGINS: "https://${HOST}"
EXPORTER_DB_USER: "exporter"
EXPORTER_DB_PASSWORD: "${EXPORTER_DB_PASSWORD}" # Set in exporter-setup.sh
EXPORTER_DB_URL: "jdbc:postgresql://exporter-db:5432/exporter"
HTTP_RELATIVE_PATH: "/bbmri-exporter"
SITE: "${SITE_ID}"
HTTP_SERVLET_REQUEST_SCHEME: "https"
OPAL_PASSWORD: "${EXPORTER_OPAL_PASSWORD}"
labels:
- "traefik.enable=true"
- "traefik.http.routers.exporter_bbmri.rule=PathPrefix(`/bbmri-exporter`)"
- "traefik.http.services.exporter_bbmri.loadbalancer.server.port=8092"
- "traefik.http.routers.exporter_bbmri.tls=true"
- "traefik.http.middlewares.exporter_bbmri_strip.stripprefix.prefixes=/bbmri-exporter"
- "traefik.http.routers.exporter_bbmri.middlewares=exporter_bbmri_strip"
# Main router
- "traefik.http.routers.exporter_bbmri.priority=20"
# API router
- "traefik.http.routers.exporter_bbmri_api.middlewares=exporter_bbmri_strip,exporter_auth"
- "traefik.http.routers.exporter_bbmri_api.rule=PathRegexp(`/bbmri-exporter/.+`)"
- "traefik.http.routers.exporter_bbmri_api.tls=true"
- "traefik.http.routers.exporter_bbmri_api.priority=25"
# Shared middlewares
- "traefik.http.middlewares.exporter_auth.basicauth.users=${EXPORTER_USER}"
volumes:
- "/var/cache/bridgehead/bbmri/exporter-files:/app/exporter-files/output"
exporter-db:
image: docker.verbis.dkfz.de/cache/postgres:${POSTGRES_TAG}
container_name: bridgehead-bbmri-exporter-db
environment:
POSTGRES_USER: "exporter"
POSTGRES_PASSWORD: "${EXPORTER_DB_PASSWORD}" # Set in exporter-setup.sh
POSTGRES_DB: "exporter"
volumes:
# Consider removing this volume once we find a solution to save Lens-queries to be executed in the explorer.
- "/var/cache/bridgehead/bbmri/exporter-db:/var/lib/postgresql/data"
reporter:
image: docker.verbis.dkfz.de/ccp/dktk-reporter:latest
container_name: bridgehead-bbmri-reporter
environment:
JAVA_OPTS: "-Xms1G -Xmx8G -XX:+UseG1GC"
LOG_LEVEL: "INFO"
CROSS_ORIGINS: "https://${HOST}"
HTTP_RELATIVE_PATH: "/bbmri-reporter"
SITE: "${SITE_ID}"
EXPORTER_API_KEY: "${EXPORTER_API_KEY}" # Set in exporter-setup.sh
EXPORTER_URL: "http://exporter:8092"
LOG_FHIR_VALIDATION: "false"
HTTP_SERVLET_REQUEST_SCHEME: "https"
# In this initial development state of the bridgehead, we are trying to have so many volumes as possible.
# However, in the first executions in the bbmri sites, this volume seems to be very important. A report is
# a process that can take several hours, because it depends on the exporter.
# There is a risk that the bridgehead restarts, losing the already created export.
volumes:
- "/var/cache/bridgehead/bbmri/reporter-files:/app/reports"
labels:
- "traefik.enable=true"
- "traefik.http.routers.reporter_bbmri.rule=PathPrefix(`/bbmri-reporter`)"
- "traefik.http.services.reporter_bbmri.loadbalancer.server.port=8095"
- "traefik.http.routers.reporter_bbmri.tls=true"
- "traefik.http.middlewares.reporter_bbmri_strip.stripprefix.prefixes=/bbmri-reporter"
- "traefik.http.routers.reporter_bbmri.middlewares=reporter_bbmri_strip"
- "traefik.http.routers.reporter_bbmri.priority=20"
- "traefik.http.routers.reporter_bbmri_api.middlewares=reporter_bbmri_strip,exporter_auth"
- "traefik.http.routers.reporter_bbmri_api.rule=PathRegexp(`/bbmri-reporter/.+`)"
- "traefik.http.routers.reporter_bbmri_api.tls=true"
- "traefik.http.routers.reporter_bbmri_api.priority=25"

View File

@ -1,8 +0,0 @@
#!/bin/bash -e
if [ "$ENABLE_EXPORTER" == true ]; then
log INFO "Exporter setup detected -- will start Exporter service."
OVERRIDE+=" -f ./$PROJECT/modules/exporter-compose.yml"
EXPORTER_DB_PASSWORD="$(echo \"This is a salt string to generate one consistent password for the exporter. It is not required to be secret.\" | sha1sum | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 30)"
EXPORTER_API_KEY="$(echo \"This is a salt string to generate one consistent API KEY for the exporter. It is not required to be secret.\" | sha1sum | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 64)"
fi

View File

@ -1,15 +0,0 @@
# Exporter and Reporter
## Exporter
The exporter is a REST API that exports the data of the different databases of the bridgehead in a set of tables.
It can accept different output formats as CSV, Excel, JSON or XML. It can also export data into Opal.
## Exporter-DB
It is a database to save queries for its execution in the exporter.
The exporter manages also the different executions of the same query in through the database.
## Reporter
This component is a plugin of the exporter that allows to create more complex Excel reports described in templates.
It is compatible with different template engines as Groovy, Thymeleaf,...
It is perfect to generate a document as our traditional CCP quality report.

View File

@ -1,36 +0,0 @@
version: "3.7"
services:
focus-gbn:
image: docker.verbis.dkfz.de/cache/samply/focus:${FOCUS_TAG}-bbmri
container_name: bridgehead-focus-gbn
environment:
API_KEY: ${GBN_FOCUS_BEAM_SECRET_SHORT}
BEAM_APP_ID_LONG: focus.${GBN_PROXY_ID}
PROXY_ID: ${GBN_PROXY_ID}
BLAZE_URL: "http://blaze:8080/fhir/"
BEAM_PROXY_URL: http://beam-proxy-gbn:8081
RETRY_COUNT: ${FOCUS_RETRY_COUNT}
depends_on:
- "beam-proxy-gbn"
- "blaze"
beam-proxy-gbn:
image: docker.verbis.dkfz.de/cache/samply/beam-proxy:${BEAM_TAG}
container_name: bridgehead-beam-proxy-gbn
environment:
BROKER_URL: ${GBN_BROKER_URL}
PROXY_ID: ${GBN_PROXY_ID}
APP_focus_KEY: ${GBN_FOCUS_BEAM_SECRET_SHORT}
PRIVKEY_FILE: /run/secrets/proxy.pem
ALL_PROXY: http://forward_proxy:3128
TLS_CA_CERTIFICATES_DIR: /conf/trusted-ca-certs
ROOTCERT_FILE: /conf/root.crt.pem
secrets:
- proxy.pem
depends_on:
- "forward_proxy"
volumes:
- /etc/bridgehead/trusted-ca-certs:/conf/trusted-ca-certs:ro
- /srv/docker/bridgehead/bbmri/modules/${GBN_ROOT_CERT}.root.crt.pem:/conf/root.crt.pem:ro

View File

@ -1,28 +0,0 @@
#!/bin/bash
if [ "${ENABLE_GBN}" == "true" ]; then
log INFO "GBN setup detected -- will start services for German Biobank Node."
OVERRIDE+=" -f ./$PROJECT/modules/gbn-compose.yml"
# The environment needs to be defined in /etc/bridgehead
case "$ENVIRONMENT" in
"production")
export GBN_BROKER_ID=broker.bbmri.de
export GBN_ROOT_CERT=gbn
;;
"test")
export GBN_BROKER_ID=broker.test.bbmri.de
export GBN_ROOT_CERT=gbn.test
;;
*)
report_error 6 "Environment \"$ENVIRONMENT\" is unknown. Assuming production. FIX THIS!"
export GBN_BROKER_ID=broker.bbmri.de
export GBN_ROOT_CERT=gbn
;;
esac
GBN_BROKER_URL=https://${GBN_BROKER_ID}
GBN_PROXY_ID=${SITE_ID}.${GBN_BROKER_ID}
GBN_FOCUS_BEAM_SECRET_SHORT="$(cat /proc/sys/kernel/random/uuid | sed 's/[-]//g' | head -c 20)"
GBN_SUPPORT_EMAIL=feedback@germanbiobanknode.de
fi

View File

@ -1,20 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDNTCCAh2gAwIBAgIUckVOQQWZBTC0pWhn1X3lPxAWricwDQYJKoZIhvcNAQEL
BQAwFjEUMBIGA1UEAxMLQnJva2VyLVJvb3QwHhcNMjMwOTA0MDkwMTQ0WhcNMzMw
OTAxMDkwMjEzWjAWMRQwEgYDVQQDEwtCcm9rZXItUm9vdDCCASIwDQYJKoZIhvcN
AQEBBQADggEPADCCAQoCggEBAOOD+CVvteBmu1hKV1QlfbHmiLCnuf6F+9k+1u/b
6as6k7BURn8KZAxVLWSIwC6x2C7n9CHN9Jieb4DWpS0XmXQVUEpT1/yiLGBdxp2x
nrbzm7caOunsWsPlGOcXPJKJpzAhcg58RDzXZ+2+shulSmsgPNlWBaLhNL5wj0sQ
MzbwGVlGIJg18Ye/9WgQkO2ZcnTGb5cRsChKs4H43ZC34ZSSk7wqWg6P3e2xFam1
YKXBOZzhwHoI4AxUQ+gd6upz5dqcwbaNZm10VP8fMac2dMLw9cOCS0ueDCS4viLd
A69yds19AndBPMZhoEY1UHafjJ1uITRJQpaaB4vNliX+1rECAwEAAaN7MHkwDgYD
VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFC74YIorSwWD
/s5ozz3xvqUMDJ3qMB8GA1UdIwQYMBaAFC74YIorSwWD/s5ozz3xvqUMDJ3qMBYG
A1UdEQQPMA2CC0Jyb2tlci1Sb290MA0GCSqGSIb3DQEBCwUAA4IBAQCzcIccBzYr
sHCGTGsSyLGBYsuI5yl+hvFOitYTha/mC+XBxq2R6By2WzbfSZtyZkUtC/+FqdCY
VtMSjbDVXtBgsabfqODBobHmPyOEmNUX4IGcyn06rdM+rHQRah98lF+PhiPPO42F
9Wj8dkq4/Gf+Yarq31ZbY0sed2sEPZ/bV26Og8Ft9qip5gKwklyakAiCnDIq+QBd
ltvng3g08AQM0o5KIphP2/WU0UoSk1YPVMjRxuLiFg8xvr2EdCQQ9oA7xbhrmAXe
242HVW/7KokjmowyWTQlIUGnuGdCOtTl8h74eHTID0YWO68hHkA0J5Ox2j4dZxvw
HRFTxAR1gGKX
-----END CERTIFICATE-----

View File

@ -1,20 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDNTCCAh2gAwIBAgIUQJjusHYR89Xas+kRbg41aHZxfmcwDQYJKoZIhvcNAQEL
BQAwFjEUMBIGA1UEAxMLQnJva2VyLVJvb3QwHhcNMjMwODIxMDk1MDI1WhcNMzMw
ODE4MDk1MDU1WjAWMRQwEgYDVQQDEwtCcm9rZXItUm9vdDCCASIwDQYJKoZIhvcN
AQEBBQADggEPADCCAQoCggEBAMP0jt2tSk23Bu+QeogqlFwjbMnqwRcWGKAOF4ch
aOK2B5u/BnpqIZDZbhfSIJTv8DPe3+nA2VqRfSiW3HbV0auqxx1ii2ZmHYbvO2P/
Jj6hyIiYYGqCMRVXk7iB+DfMysQEaSJO/7lJSprlVQCl0u7MAQ4q/szVNwcCm2Xi
iE00Wlota2xTYjnJHYjeaLZL4kQsjqW2aCWHG4q77Z4NXT+lXN9XXedgoXLhuwWl
UyHhXPjyCVu1iFzsXwSTodPAETGoInRYMqMA7PrbHZu1b2Jz0BwCQ+bark1td+Mf
l3uP0QduhZnH6zGO0KyUFRzeiesgabv5bgUeSSsIOVjnLJUCAwEAAaN7MHkwDgYD
VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFME99nPh1Vuo
7eRaymL2Ps7qGxIdMB8GA1UdIwQYMBaAFME99nPh1Vuo7eRaymL2Ps7qGxIdMBYG
A1UdEQQPMA2CC0Jyb2tlci1Sb290MA0GCSqGSIb3DQEBCwUAA4IBAQB0WG0xT00R
5CA0tVHaNo8bQuAXytu566TspKc5vVd3r6mglj/MiSSQG2MVz+GUU6LnnApgln1P
pvZuyaldB0QdTTLeJVMr/eFtZonlxqcxkj+VW2Y7mRHT7Xx9GQvzKYvSK5m/+xzH
pAQl8AirgkoZ5b+ltlzM0pDAH204xj3/skmGqM/o0FKzRtpetHYkZPiquHCmO2Cp
nTMkv7c2qu5t2Dm5q0Tmb7ZRoA1yIYhDn/UfhTAVWQnoMfXK8oB9nkRRb7pAfOXo
W1K4A+oWqKrJwfIH/Ycnw7hu8hPuGOyIN/PLnLpJp9M2I67vywp5lIvFib4UukyJ
wJw6/iTienIA
-----END CERTIFICATE-----

View File

@ -1,70 +0,0 @@
version: "3.7"
services:
teiler-orchestrator:
image: docker.verbis.dkfz.de/cache/samply/teiler-orchestrator:latest
container_name: bridgehead-teiler-orchestrator
labels:
- "traefik.enable=true"
- "traefik.http.routers.teiler_orchestrator_bbmri.rule=PathPrefix(`/bbmri-teiler`)"
- "traefik.http.services.teiler_orchestrator_bbmri.loadbalancer.server.port=9000"
- "traefik.http.routers.teiler_orchestrator_bbmri.tls=true"
- "traefik.http.middlewares.teiler_orchestrator_bbmri_strip.stripprefix.prefixes=/bbmri-teiler"
- "traefik.http.routers.teiler_orchestrator_bbmri.middlewares=teiler_orchestrator_bbmri_strip"
environment:
TEILER_BACKEND_URL: "https://${HOST}/bbmri-teiler-backend"
TEILER_DASHBOARD_URL: "https://${HOST}/bbmri-teiler-dashboard"
DEFAULT_LANGUAGE: "${TEILER_DEFAULT_LANGUAGE_LOWER_CASE}"
HTTP_RELATIVE_PATH: "/bbmri-teiler"
teiler-dashboard:
image: docker.verbis.dkfz.de/cache/samply/teiler-dashboard:develop
container_name: bridgehead-teiler-dashboard
labels:
- "traefik.enable=true"
- "traefik.http.routers.teiler_dashboard_bbmri.rule=PathPrefix(`/bbmri-teiler-dashboard`)"
- "traefik.http.services.teiler_dashboard_bbmri.loadbalancer.server.port=80"
- "traefik.http.routers.teiler_dashboard_bbmri.tls=true"
- "traefik.http.middlewares.teiler_dashboard_bbmri_strip.stripprefix.prefixes=/bbmri-teiler-dashboard"
- "traefik.http.routers.teiler_dashboard_bbmri.middlewares=teiler_dashboard_bbmri_strip"
environment:
DEFAULT_LANGUAGE: "${TEILER_DEFAULT_LANGUAGE}"
TEILER_BACKEND_URL: "https://${HOST}/bbmri-teiler-backend"
TEILER_DASHBOARD_URL: "https://${HOST}/bbmri-teiler-dashboard"
OIDC_URL: "${OIDC_URL}"
OIDC_CLIENT_ID: "${OIDC_PUBLIC_CLIENT_ID}"
OIDC_TOKEN_GROUP: "${OIDC_GROUP_CLAIM}"
TEILER_ADMIN_NAME: "${OPERATOR_FIRST_NAME} ${OPERATOR_LAST_NAME}"
TEILER_ADMIN_EMAIL: "${OPERATOR_EMAIL}"
TEILER_ADMIN_PHONE: "${OPERATOR_PHONE}"
TEILER_PROJECT: "${PROJECT}"
EXPORTER_API_KEY: "${EXPORTER_API_KEY}"
TEILER_ORCHESTRATOR_URL: "https://${HOST}/bbmri-teiler"
TEILER_ORCHESTRATOR_HTTP_RELATIVE_PATH: "/bbmri-teiler"
TEILER_USER: "${OIDC_USER_GROUP}"
TEILER_ADMIN: "${OIDC_ADMIN_GROUP}"
REPORTER_DEFAULT_TEMPLATE_ID: "bbmri-qb"
EXPORTER_DEFAULT_TEMPLATE_ID: "bbmri"
teiler-backend:
image: docker.verbis.dkfz.de/ccp/bbmri-teiler-backend:latest
container_name: bridgehead-teiler-backend
labels:
- "traefik.enable=true"
- "traefik.http.routers.teiler_backend_bbmri.rule=PathPrefix(`/bbmri-teiler-backend`)"
- "traefik.http.services.teiler_backend_bbmri.loadbalancer.server.port=8085"
- "traefik.http.routers.teiler_backend_bbmri.tls=true"
- "traefik.http.middlewares.teiler_backend_bbmri_strip.stripprefix.prefixes=/bbmri-teiler-backend"
- "traefik.http.routers.teiler_backend_bbmri.middlewares=teiler_backend_bbmri_strip"
environment:
LOG_LEVEL: "INFO"
APPLICATION_PORT: "8085"
APPLICATION_ADDRESS: "${HOST}"
DEFAULT_LANGUAGE: "${TEILER_DEFAULT_LANGUAGE}"
TEILER_ORCHESTRATOR_HTTP_RELATIVE_PATH: "/bbmri-teiler"
TEILER_ORCHESTRATOR_URL: "https://${HOST}/bbmri-teiler"
TEILER_DASHBOARD_DE_URL: "https://${HOST}/bbmri-teiler-dashboard/de"
TEILER_DASHBOARD_EN_URL: "https://${HOST}/bbmri-teiler-dashboard/en"
HTTP_PROXY: "http://forward_proxy:3128"

View File

@ -1,8 +0,0 @@
#!/bin/bash -e
if [ "$ENABLE_TEILER" == true ];then
log INFO "Teiler setup detected -- will start Teiler services."
OVERRIDE+=" -f ./$PROJECT/modules/teiler-compose.yml"
TEILER_DEFAULT_LANGUAGE=EN
TEILER_DEFAULT_LANGUAGE_LOWER_CASE=${TEILER_DEFAULT_LANGUAGE,,}
fi

View File

@ -1,19 +0,0 @@
# Teiler
This module orchestrates the different microfrontends of the bridgehead as a single page application.
## Teiler Orchestrator
Single SPA component that consists on the root HTML site of the single page application and a javascript code that
gets the information about the microfrontend calling the teiler backend and is responsible for registering them. With the
resulting mapping, it can initialize, mount and unmount the required microfrontends on the fly.
The microfrontends run independently in different containers and can be based on different frameworks (Angular, Vue, React,...)
This microfrontends can run as single alone but need an extension with Single-SPA (https://single-spa.js.org/docs/ecosystem).
There are also available three templates (Angular, Vue, React) to be directly extended to be used directly in the teiler.
## Teiler Dashboard
It consists on the main dashboard and a set of embedded services.
### Login
user and password in ccp.local.conf
## Teiler Backend
In this component, the microfrontends are configured.

View File

@ -1,42 +1,11 @@
BROKER_ID=broker-test.bbmri-test.samply.de
BROKER_ID=broker.bbmri.samply.de
BROKER_URL=https://${BROKER_ID}
PROXY_ID=${SITE_ID}.${BROKER_ID}
FOCUS_BEAM_SECRET_SHORT="$(cat /proc/sys/kernel/random/uuid | sed 's/[-]//g' | head -c 20)"
FOCUS_RETRY_COUNT=32
SUPPORT_EMAIL=bridgehead@helpdesk.bbmri-eric.eu
PRIVATEKEYFILENAME=/etc/bridgehead/pki/${SITE_ID}.priv.pem
BROKER_URL_FOR_PREREQ=$BROKER_URL
# Makes sense for all European Biobanks
: ${ENABLE_ERIC:=true}
# Makes only sense for German Biobanks
: ${ENABLE_GBN:=false}
FOCUS_RETRY_COUNT=${FOCUS_RETRY_COUNT:-64}
for module in $PROJECT/modules/*.sh
do
log DEBUG "sourcing $module"
source $module
done
SUPPORT_EMAIL=$ERIC_SUPPORT_EMAIL
BROKER_URL_FOR_PREREQ="${ERIC_BROKER_URL:-$GBN_BROKER_URL}"
if [ -n "$GBN_SUPPORT_EMAIL" ]; then
SUPPORT_EMAIL=$GBN_SUPPORT_EMAIL
fi
function do_enroll {
COUNT=0
if [ "$ENABLE_ERIC" == "true" ]; then
do_enroll_inner $ERIC_PROXY_ID $ERIC_SUPPORT_EMAIL
COUNT=$((COUNT+1))
fi
if [ "$ENABLE_GBN" == "true" ]; then
do_enroll_inner $GBN_PROXY_ID $GBN_SUPPORT_EMAIL
COUNT=$((COUNT+1))
fi
if [ $COUNT -ge 2 ]; then
echo
echo "You just received $COUNT certificate signing requests (CSR). Please send $COUNT e-mails, with 1 CSR each, to the respective e-mail address."
fi
}
# This will load directory-sync setup.
source $PROJECT/directory-sync.sh
dirSetup

View File

@ -32,83 +32,31 @@ case "$PROJECT" in
bbmri)
#nothing extra to do
;;
cce)
#nothing extra to do
;;
itcc)
#nothing extra to do
;;
kr)
#nothing extra to do
;;
dhki)
#nothing extra to do
;;
minimal)
#nothing extra to do
;;
*)
printUsage
exit 1
;;
esac
# Loads config variables and runs the projects setup script
loadVars() {
# Load variables from /etc/bridgehead and /srv/docker/bridgehead
set -a
# Source the project specific config file
source /etc/bridgehead/$PROJECT.conf || fail_and_report 1 "/etc/bridgehead/$PROJECT.conf not found"
# Source the project specific local config file if present
# This file is ignored by git as oposed to the regular config file as it contains private site information like etl auth data
if [ -e /etc/bridgehead/$PROJECT.local.conf ]; then
log INFO "Applying /etc/bridgehead/$PROJECT.local.conf"
source /etc/bridgehead/$PROJECT.local.conf || fail_and_report 1 "Found /etc/bridgehead/$PROJECT.local.conf but failed to import"
fi
# Set execution environment on main default to prod else test
if [[ -z "${ENVIRONMENT+x}" ]]; then
if [ "$(git rev-parse --abbrev-ref HEAD)" == "main" ]; then
ENVIRONMENT="production"
else
ENVIRONMENT="test" # we have acceptance environment in BBMRI ERIC and it would be more appropriate to default to that one in case the data they have in BH is real, but I'm gonna leave it as is for backward compatibility
fi
fi
# Source the versions of the images components
case "$ENVIRONMENT" in
"production")
source ./versions/prod
;;
"test")
source ./versions/test
;;
"acceptance")
source ./versions/acceptance
;;
*)
report_error 7 "Environment \"$ENVIRONMENT\" is unknown. Assuming production. FIX THIS!"
source ./versions/prod
;;
esac
fetchVarsFromVaultByFile /etc/bridgehead/$PROJECT.conf || fail_and_report 1 "Unable to fetchVarsFromVaultByFile"
setHostname
optimizeBlazeMemoryUsage
# Run project specific setup if it exists
# This will ususally modiy the `OVERRIDE` to include all the compose files that the project depends on
# This is also where projects specify which modules to load
[ -e ./$PROJECT/vars ] && source ./$PROJECT/vars
set +a
OVERRIDE=${OVERRIDE:=""}
# minimal contains shared components, so potential overrides must be applied in every project
if [ -f "minimal/docker-compose.override.yml" ]; then
log INFO "Applying Bridgehead common components override (minimal/docker-compose.override.yml)"
OVERRIDE+=" -f ./minimal/docker-compose.override.yml"
fi
if [ -f "$PROJECT/docker-compose.override.yml" ]; then
log INFO "Applying $PROJECT/docker-compose.override.yml"
OVERRIDE+=" -f ./$PROJECT/docker-compose.override.yml"
fi
detectCompose
setupProxy
setHostname
}
case "$ACTION" in
@ -116,32 +64,20 @@ case "$ACTION" in
loadVars
hc_send log "Bridgehead $PROJECT startup: Checking requirements ..."
checkRequirements
sync_secrets
hc_send log "Bridgehead $PROJECT startup: Requirements checked out. Now starting bridgehead ..."
exec $COMPOSE -p $PROJECT -f ./minimal/docker-compose.yml -f ./$PROJECT/docker-compose.yml $OVERRIDE up --abort-on-container-exit
export LDM_LOGIN=$(getLdmPassword)
exec $COMPOSE -f ./$PROJECT/docker-compose.yml $OVERRIDE up --abort-on-container-exit
;;
stop)
loadVars
# Kill stale secret-sync instances if present
docker kill $(docker ps -q --filter ancestor=docker.verbis.dkfz.de/cache/samply/secret-sync-local) 2>/dev/null || true
# HACK: This is temporarily to properly shut down false bridgehead instances (bridgehead-ccp instead ccp)
$COMPOSE -p bridgehead-$PROJECT -f ./minimal/docker-compose.yml -f ./$PROJECT/docker-compose.yml $OVERRIDE down
exec $COMPOSE -p $PROJECT -f ./minimal/docker-compose.yml -f ./$PROJECT/docker-compose.yml $OVERRIDE down
# HACK: This is tempoarily to properly shut down false bridgehead instances (bridgehead-ccp instead ccp)
$COMPOSE -p bridgehead-$PROJECT -f ./$PROJECT/docker-compose.yml $OVERRIDE down
exec $COMPOSE -f ./$PROJECT/docker-compose.yml $OVERRIDE down
;;
is-running)
bk_is_running
exit $?
;;
logs)
loadVars
shift 2
exec journalctl -u bridgehead@$PROJECT -u bridgehead-update@$PROJECT -a $@
;;
docker-logs)
loadVars
shift 2
exec $COMPOSE -p $PROJECT -f ./minimal/docker-compose.yml -f ./$PROJECT/docker-compose.yml $OVERRIDE logs -f $@
;;
update)
loadVars
exec ./lib/update-bridgehead.sh $PROJECT
@ -159,17 +95,14 @@ case "$ACTION" in
uninstall)
exec ./lib/uninstall-bridgehead.sh $PROJECT
;;
adduser)
loadVars
log "INFO" "Adding encrypted credentials in /etc/bridgehead/$PROJECT.local.conf"
read -p "Please choose the component (LDM_AUTH|NNGM_AUTH) you want to add a user to : " COMPONENT
read -p "Please enter a username: " USER
read -s -p "Please enter a password (will not be echoed): "$'\n' PASSWORD
add_basic_auth_user $USER $PASSWORD $COMPONENT $PROJECT
;;
enroll)
loadVars
do_enroll $PROXY_ID
if [ -e $PRIVATEKEYFILENAME ]; then
log ERROR "Private key already exists at $PRIVATEKEYFILENAME. Please delete first to proceed."
exit 1
fi
docker run --rm -ti -v /etc/bridgehead/pki:/etc/bridgehead/pki samply/beam-enroll:latest --output-file $PRIVATEKEYFILENAME --proxy-id $PROXY_ID --admin-email $SUPPORT_EMAIL
chmod 600 $PRIVATEKEYFILENAME
;;
preRun | preUpdate)
fixPermissions

View File

@ -1,68 +0,0 @@
version: "3.7"
services:
blaze:
image: docker.verbis.dkfz.de/cache/samply/blaze:${BLAZE_TAG}
container_name: bridgehead-cce-blaze
environment:
BASE_URL: "http://bridgehead-cce-blaze:8080"
JAVA_TOOL_OPTIONS: "-Xmx${BLAZE_MEMORY_CAP:-4096}m"
DB_RESOURCE_CACHE_SIZE: ${BLAZE_RESOURCE_CACHE_CAP:-2500000}
DB_BLOCK_CACHE_SIZE: ${BLAZE_MEMORY_CAP}
CQL_EXPR_CACHE_SIZE: ${BLAZE_CQL_CACHE_CAP:-32}
ENFORCE_REFERENTIAL_INTEGRITY: "false"
volumes:
- "blaze-data:/app/data"
labels:
- "traefik.enable=true"
- "traefik.http.routers.blaze_cce.rule=PathPrefix(`/cce-localdatamanagement`)"
- "traefik.http.middlewares.cce_b_strip.stripprefix.prefixes=/cce-localdatamanagement"
- "traefik.http.services.blaze_cce.loadbalancer.server.port=8080"
- "traefik.http.routers.blaze_cce.middlewares=cce_b_strip,auth"
- "traefik.http.routers.blaze_cce.tls=true"
focus:
image: docker.verbis.dkfz.de/cache/samply/focus:${FOCUS_TAG}
container_name: bridgehead-focus
environment:
API_KEY: ${FOCUS_BEAM_SECRET_SHORT}
BEAM_APP_ID_LONG: focus.${PROXY_ID}
PROXY_ID: ${PROXY_ID}
BLAZE_URL: "http://bridgehead-cce-blaze:8080/fhir/"
BEAM_PROXY_URL: http://beam-proxy:8081
RETRY_COUNT: ${FOCUS_RETRY_COUNT}
EPSILON: 0.28
QUERIES_TO_CACHE: '/queries_to_cache.conf'
ENDPOINT_TYPE: ${FOCUS_ENDPOINT_TYPE:-blaze}
volumes:
- /srv/docker/bridgehead/cce/queries_to_cache.conf:/queries_to_cache.conf:ro
depends_on:
- "beam-proxy"
- "blaze"
beam-proxy:
image: docker.verbis.dkfz.de/cache/samply/beam-proxy:${BEAM_TAG}
container_name: bridgehead-beam-proxy
environment:
BROKER_URL: ${BROKER_URL}
PROXY_ID: ${PROXY_ID}
APP_focus_KEY: ${FOCUS_BEAM_SECRET_SHORT}
PRIVKEY_FILE: /run/secrets/proxy.pem
ALL_PROXY: http://forward_proxy:3128
TLS_CA_CERTIFICATES_DIR: /conf/trusted-ca-certs
ROOTCERT_FILE: /conf/root.crt.pem
secrets:
- proxy.pem
depends_on:
- "forward_proxy"
volumes:
- /etc/bridgehead/trusted-ca-certs:/conf/trusted-ca-certs:ro
- /srv/docker/bridgehead/cce/root.crt.pem:/conf/root.crt.pem:ro
volumes:
blaze-data:
secrets:
proxy.pem:
file: /etc/bridgehead/pki/${SITE_ID}.priv.pem

View File

@ -1,33 +0,0 @@
version: "3.7"
services:
landing:
container_name: lens_federated-search
image: docker.verbis.dkfz.de/ccp/lens:${SITE_ID}
labels:
- "traefik.enable=true"
- "traefik.http.routers.landing.rule=PathPrefix(`/`)"
- "traefik.http.services.landing.loadbalancer.server.port=80"
- "traefik.http.routers.landing.tls=true"
spot:
image: docker.verbis.dkfz.de/ccp-private/central-spot
environment:
BEAM_SECRET: "${FOCUS_BEAM_SECRET_SHORT}"
BEAM_URL: http://beam-proxy:8081
BEAM_PROXY_ID: ${SITE_ID}
BEAM_BROKER_ID: ${BROKER_ID}
BEAM_APP_ID: "focus"
PROJECT_METADATA: "cce_supervisors"
depends_on:
- "beam-proxy"
labels:
- "traefik.enable=true"
- "traefik.http.services.spot.loadbalancer.server.port=8080"
- "traefik.http.middlewares.corsheaders2.headers.accesscontrolallowmethods=GET,OPTIONS,POST"
- "traefik.http.middlewares.corsheaders2.headers.accesscontrolalloworiginlist=https://${HOST}"
- "traefik.http.middlewares.corsheaders2.headers.accesscontrolallowcredentials=true"
- "traefik.http.middlewares.corsheaders2.headers.accesscontrolmaxage=-1"
- "traefik.http.routers.spot.rule=Host(`${HOST}`) && PathPrefix(`/backend`)"
- "traefik.http.middlewares.stripprefix_spot.stripprefix.prefixes=/backend"
- "traefik.http.routers.spot.tls=true"
- "traefik.http.routers.spot.middlewares=corsheaders2,stripprefix_spot"

View File

@ -1,5 +0,0 @@
#!/bin/bash
if [ -n "$ENABLE_LENS" ];then
OVERRIDE+=" -f ./$PROJECT/modules/lens-compose.yml"
fi

View File

@ -1,2 +0,0 @@
bGlicmFyeSBSZXRyaWV2ZQp1c2luZyBGSElSIHZlcnNpb24gJzQuMC4wJwppbmNsdWRlIEZISVJIZWxwZXJzIHZlcnNpb24gJzQuMC4wJwpjb2Rlc3lzdGVtIFNhbXBsZU1hdGVyaWFsVHlwZTogJ2h0dHBzOi8vZmhpci5iYm1yaS5kZS9Db2RlU3lzdGVtL1NhbXBsZU1hdGVyaWFsVHlwZScKCmNvZGVzeXN0ZW0gbG9pbmM6ICdodHRwOi8vbG9pbmMub3JnJwoKY29udGV4dCBQYXRpZW50CgpES1RLX1NUUkFUX0dFTkRFUl9TVFJBVElGSUVSCgpES1RLX1NUUkFUX0FHRV9TVFJBVElGSUVSCgpES1RLX1NUUkFUX0RFQ0VBU0VEX1NUUkFUSUZJRVIKCkRLVEtfU1RSQVRfRElBR05PU0lTX1NUUkFUSUZJRVIKCkRLVEtfU1RSQVRfU1BFQ0lNRU5fU1RSQVRJRklFUgoKREtUS19TVFJBVF9QUk9DRURVUkVfU1RSQVRJRklFUgoKREtUS19TVFJBVF9NRURJQ0FUSU9OX1NUUkFUSUZJRVIKREtUS19TVFJBVF9ERUZfSU5fSU5JVElBTF9QT1BVTEFUSU9OCnRydWU=
bGlicmFyeSBSZXRyaWV2ZQp1c2luZyBGSElSIHZlcnNpb24gJzQuMC4wJwppbmNsdWRlIEZISVJIZWxwZXJzIHZlcnNpb24gJzQuMC4wJwpjb2Rlc3lzdGVtIFNhbXBsZU1hdGVyaWFsVHlwZTogJ2h0dHBzOi8vZmhpci5iYm1yaS5kZS9Db2RlU3lzdGVtL1NhbXBsZU1hdGVyaWFsVHlwZScKCmNvZGVzeXN0ZW0gbG9pbmM6ICdodHRwOi8vbG9pbmMub3JnJwpjb2Rlc3lzdGVtIGljZDEwOiAnaHR0cDovL2ZoaXIuZGUvQ29kZVN5c3RlbS9iZmFybS9pY2QtMTAtZ20nCmNvZGVzeXN0ZW0gbW9ycGg6ICd1cm46b2lkOjIuMTYuODQwLjEuMTEzODgzLjYuNDMuMScKCmNvbnRleHQgUGF0aWVudAoKREtUS19TVFJBVF9HRU5ERVJfU1RSQVRJRklFUgoKREtUS19TVFJBVF9BR0VfU1RSQVRJRklFUgoKREtUS19TVFJBVF9ERUNFQVNFRF9TVFJBVElGSUVSCgpES1RLX1NUUkFUX0RJQUdOT1NJU19TVFJBVElGSUVSCgpES1RLX1NUUkFUX1NQRUNJTUVOX1NUUkFUSUZJRVIKCkRLVEtfU1RSQVRfUFJPQ0VEVVJFX1NUUkFUSUZJRVIKCkRLVEtfU1RSQVRfTUVESUNBVElPTl9TVFJBVElGSUVSCkRLVEtfU1RSQVRfREVGX0lOX0lOSVRJQUxfUE9QVUxBVElPTgooKGV4aXN0cyBbQ29uZGl0aW9uOiBDb2RlICdDNjEnIGZyb20gaWNkMTBdKSBhbmQKKChleGlzdHMgZnJvbSBbT2JzZXJ2YXRpb246IENvZGUgJzU5ODQ3LTQnIGZyb20gbG9pbmNdIE8Kd2hlcmUgTy52YWx1ZS5jb2RpbmcuY29kZSBjb250YWlucyAnODE0MC8zJykgb3IKKGV4aXN0cyBmcm9tIFtPYnNlcnZhdGlvbjogQ29kZSAnNTk4NDctNCcgZnJvbSBsb2luY10gTwp3aGVyZSBPLnZhbHVlLmNvZGluZy5jb2RlIGNvbnRhaW5zICc4MTQ3LzMnKSBvcgooZXhpc3RzIGZyb20gW09ic2VydmF0aW9uOiBDb2RlICc1OTg0Ny00JyBmcm9tIGxvaW5jXSBPCndoZXJlIE8udmFsdWUuY29kaW5nLmNvZGUgY29udGFpbnMgJzg0ODAvMycpIG9yCihleGlzdHMgZnJvbSBbT2JzZXJ2YXRpb246IENvZGUgJzU5ODQ3LTQnIGZyb20gbG9pbmNdIE8Kd2hlcmUgTy52YWx1ZS5jb2RpbmcuY29kZSBjb250YWlucyAnODUwMC8zJykpKQ==

View File

@ -1,20 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDNTCCAh2gAwIBAgIUW34NEb7bl0+Ywx+I1VKtY5vpAOowDQYJKoZIhvcNAQEL
BQAwFjEUMBIGA1UEAxMLQnJva2VyLVJvb3QwHhcNMjQwMTIyMTMzNzEzWhcNMzQw
MTE5MTMzNzQzWjAWMRQwEgYDVQQDEwtCcm9rZXItUm9vdDCCASIwDQYJKoZIhvcN
AQEBBQADggEPADCCAQoCggEBAL5UegLXTlq3XRRj8LyFs3aF0tpRPVoW9RXp5kFI
TnBvyO6qjNbMDT/xK+4iDtEX4QQUvsxAKxfXbe9i1jpdwjgH7JHaSGm2IjAiKLqO
OXQQtguWwfNmmp96Ql13ArLj458YH08xMO/w2NFWGwB/hfARa4z/T0afFuc/tKJf
XbGCG9xzJ9tmcG45QN8NChGhVvaTweNdVxGWlpHxmi0Mn8OM9CEuB7nPtTTiBuiu
pRC2zVVmNjVp4ktkAqL7IHOz+/F5nhiz6tOika9oD3376Xj055lPznLcTQn2+4d7
K7ZrBopCFxIQPjkgmYRLfPejbpdUjK1UVJw7hbWkqWqH7JMCAwEAAaN7MHkwDgYD
VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFGjvRcaIP4HM
poIguUAK9YL2n7fbMB8GA1UdIwQYMBaAFGjvRcaIP4HMpoIguUAK9YL2n7fbMBYG
A1UdEQQPMA2CC0Jyb2tlci1Sb290MA0GCSqGSIb3DQEBCwUAA4IBAQCbzycJSaDm
AXXNJqQ88djrKs5MDXS8RIjS/cu2ayuLaYDe+BzVmUXNA0Vt9nZGdaz63SLLcjpU
fNSxBfKbwmf7s30AK8Cnfj9q4W/BlBeVizUHQsg1+RQpDIdMrRQrwkXv8mfLw+w5
3oaXNW6W/8KpBp/H8TBZ6myl6jCbeR3T8EMXBwipMGop/1zkbF01i98Xpqmhx2+l
n+80ofPsSspOo5XmgCZym8CD/m/oFHmjcvOfpOCvDh4PZ+i37pmbSlCYoMpla3u/
7MJMP5lugfLBYNDN2p+V4KbHP/cApCDT5UWLOeAWjgiZQtHH5ilDeYqEc1oPjyJt
Rtup0MTxSJtN
-----END CERTIFICATE-----

View File

@ -1,14 +0,0 @@
BROKER_ID=test-no-real-data.broker.samply.de
BROKER_URL=https://${BROKER_ID}
PROXY_ID=${SITE_ID}.${BROKER_ID}
FOCUS_BEAM_SECRET_SHORT="$(cat /proc/sys/kernel/random/uuid | sed 's/[-]//g' | head -c 20)"
FOCUS_RETRY_COUNT=${FOCUS_RETRY_COUNT:-64}
SUPPORT_EMAIL=manoj.waikar@dkfz-heidelberg.de
PRIVATEKEYFILENAME=/etc/bridgehead/pki/${SITE_ID}.priv.pem
BROKER_URL_FOR_PREREQ=$BROKER_URL
for module in $PROJECT/modules/*.sh
do
log DEBUG "sourcing $module"
source $module
done

View File

@ -1,15 +1,65 @@
version: "3.7"
services:
traefik:
container_name: bridgehead-traefik
image: docker.verbis.dkfz.de/cache/traefik:latest
command:
- --entrypoints.web.address=:80
- --entrypoints.websecure.address=:443
- --providers.docker=true
- --providers.docker.exposedbydefault=false
- --providers.file.directory=/configuration/
- --api.dashboard=true
- --accesslog=true
- --entrypoints.web.http.redirections.entrypoint.to=websecure
- --entrypoints.web.http.redirections.entrypoint.scheme=https
labels:
- "traefik.enable=true"
- "traefik.http.routers.dashboard.rule=PathPrefix(`/api`) || PathPrefix(`/dashboard`)"
- "traefik.http.routers.dashboard.entrypoints=websecure"
- "traefik.http.routers.dashboard.service=api@internal"
- "traefik.http.routers.dashboard.tls=true"
- "traefik.http.routers.dashboard.middlewares=auth"
- "traefik.http.middlewares.auth.basicauth.users=${LDM_LOGIN}"
ports:
- 80:80
- 443:443
volumes:
- /etc/bridgehead/traefik-tls:/certs:ro
- ../lib/traefik-configuration/:/configuration:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
forward_proxy:
container_name: bridgehead-forward-proxy
image: docker.verbis.dkfz.de/cache/samply/bridgehead-forward-proxy:latest
environment:
HTTPS_PROXY: ${HTTPS_PROXY_URL}
USERNAME: ${HTTPS_PROXY_USERNAME}
PASSWORD: ${HTTPS_PROXY_PASSWORD}
volumes:
- /etc/bridgehead/trusted-ca-certs:/docker/custom-certs/:ro
landing:
container_name: bridgehead-landingpage
image: docker.verbis.dkfz.de/cache/samply/bridgehead-landingpage:master
labels:
- "traefik.enable=true"
- "traefik.http.routers.landing.rule=PathPrefix(`/`)"
- "traefik.http.services.landing.loadbalancer.server.port=80"
- "traefik.http.routers.landing.tls=true"
environment:
HOST: ${HOST}
PROJECT: ${PROJECT}
SITE_NAME: ${SITE_NAME}
blaze:
image: docker.verbis.dkfz.de/cache/samply/blaze:${BLAZE_TAG}
image: docker.verbis.dkfz.de/cache/samply/blaze:0.19
container_name: bridgehead-ccp-blaze
environment:
BASE_URL: "http://bridgehead-ccp-blaze:8080"
JAVA_TOOL_OPTIONS: "-Xmx${BLAZE_MEMORY_CAP:-4096}m"
DB_RESOURCE_CACHE_SIZE: ${BLAZE_RESOURCE_CACHE_CAP:-2500000}
DB_BLOCK_CACHE_SIZE: ${BLAZE_MEMORY_CAP}
CQL_EXPR_CACHE_SIZE: ${BLAZE_CQL_CACHE_CAP:-32}
JAVA_TOOL_OPTIONS: "-Xmx4g"
LOG_LEVEL: "debug"
ENFORCE_REFERENTIAL_INTEGRITY: "false"
volumes:
- "blaze-data:/app/data"
@ -21,32 +71,29 @@ services:
- "traefik.http.routers.blaze_ccp.middlewares=ccp_b_strip,auth"
- "traefik.http.routers.blaze_ccp.tls=true"
focus:
image: docker.verbis.dkfz.de/cache/samply/focus:${FOCUS_TAG}-dktk
container_name: bridgehead-focus
spot:
image: docker.verbis.dkfz.de/cache/samply/spot:latest
container_name: bridgehead-spot
environment:
API_KEY: ${FOCUS_BEAM_SECRET_SHORT}
BEAM_APP_ID_LONG: focus.${PROXY_ID}
SECRET: ${SPOT_BEAM_SECRET_LONG}
APPID: spot
PROXY_ID: ${PROXY_ID}
BLAZE_URL: "http://bridgehead-ccp-blaze:8080/fhir/"
BEAM_PROXY_URL: http://beam-proxy:8081
RETRY_COUNT: ${FOCUS_RETRY_COUNT}
EPSILON: 0.28
QUERIES_TO_CACHE: '/queries_to_cache.conf'
ENDPOINT_TYPE: ${FOCUS_ENDPOINT_TYPE:-blaze}
volumes:
- /srv/docker/bridgehead/ccp/queries_to_cache.conf:/queries_to_cache.conf:ro
LDM_URL: http://bridgehead-ccp-blaze:8080/fhir
BEAM_PROXY: http://beam-proxy:8081
depends_on:
- "beam-proxy"
- "blaze"
beam-proxy:
image: docker.verbis.dkfz.de/cache/samply/beam-proxy:${BEAM_TAG}
image: docker.verbis.dkfz.de/cache/samply/beam-proxy:develop
container_name: bridgehead-beam-proxy
environment:
BROKER_URL: ${BROKER_URL}
PROXY_ID: ${PROXY_ID}
APP_focus_KEY: ${FOCUS_BEAM_SECRET_SHORT}
APP_0_ID: spot
APP_0_KEY: ${SPOT_BEAM_SECRET_SHORT}
APP_1_ID: report-hub
APP_1_KEY: ${REPORTHUB_BEAM_SECRET_SHORT}
PRIVKEY_FILE: /run/secrets/proxy.pem
ALL_PROXY: http://forward_proxy:3128
TLS_CA_CERTIFICATES_DIR: /conf/trusted-ca-certs
@ -57,7 +104,8 @@ services:
- "forward_proxy"
volumes:
- /etc/bridgehead/trusted-ca-certs:/conf/trusted-ca-certs:ro
- /srv/docker/bridgehead/ccp/root.crt.pem:/conf/root.crt.pem:ro
- ./root.crt.pem:/conf/root.crt.pem:ro
volumes:
blaze-data:

34
ccp/exliquid-compose.yml Normal file
View File

@ -0,0 +1,34 @@
version: "3.7"
services:
exliquid-task-store:
image: docker.verbis.dkfz.de/cache/samply/blaze:0.19
container_name: bridgehead-exliquid-task-store
environment:
BASE_URL: "http://bridgehead-exliquid-task-store:8080"
JAVA_TOOL_OPTIONS: "-Xmx1g"
volumes:
- "exliquid-task-store-data:/app/data"
labels:
- "traefik.enable=false"
exliquid-report-hub:
image: docker.verbis.dkfz.de/cache/samply/report-hub:latest
container_name: bridgehead-exliquid-report-hub
environment:
SPRING_WEBFLUX_BASE_PATH: "/exliquid"
JAVA_TOOL_OPTIONS: "-Xmx1g"
APP_BEAM_APPID: "report-hub.${PROXY_ID}"
APP_BEAM_SECRET: ${REPORTHUB_BEAM_SECRET_SHORT}
APP_BEAM_PROXY_BASEURL: http://beam-proxy:8081
APP_TASKSTORE_BASEURL: "http://bridgehead-exliquid-task-store:8080/fhir"
APP_DATASTORE_BASEURL: http://bridgehead-ccp-blaze:8080/fhir
restart: always
labels:
- "traefik.enable=true"
- "traefik.http.routers.report-ccp.rule=PathPrefix(`/exliquid`)"
- "traefik.http.services.report-ccp.loadbalancer.server.port=8080"
- "traefik.http.routers.report-ccp.tls=true"
volumes:
exliquid-task-store-data:

19
ccp/exliquid-setup.sh Normal file
View File

@ -0,0 +1,19 @@
#!/bin/bash
function exliquidSetup() {
case ${SITE_ID} in
berlin|dresden|essen|frankfurt|freiburg|luebeck|mainz|muenchen-lmu|muenchen-tum|mannheim|tuebingen)
EXLIQUID=1
;;
dktk-test)
EXLIQUID=1
;;
*)
EXLIQUID=0
;;
esac
if [[ $EXLIQUID -eq 1 ]]; then
log INFO "EXLIQUID setup detected -- will start Report-Hub."
OVERRIDE+=" -f ./$PROJECT/exliquid-compose.yml"
fi
}

View File

@ -1,32 +0,0 @@
version: "3.7"
services:
blaze-secondary:
image: docker.verbis.dkfz.de/cache/samply/blaze:${BLAZE_TAG}
container_name: bridgehead-ccp-blaze-secondary
environment:
BASE_URL: "http://bridgehead-ccp-blaze-secondary:8080"
JAVA_TOOL_OPTIONS: "-Xmx${BLAZE_MEMORY_CAP:-4096}m"
DB_RESOURCE_CACHE_SIZE: ${BLAZE_RESOURCE_CACHE_CAP:-2500000}
DB_BLOCK_CACHE_SIZE: $BLAZE_MEMORY_CAP
ENFORCE_REFERENTIAL_INTEGRITY: "false"
volumes:
- "blaze-secondary-data:/app/data"
labels:
- "traefik.enable=true"
- "traefik.http.routers.blaze-secondary_ccp.rule=PathPrefix(`/ccp-localdatamanagement-secondary`)"
- "traefik.http.middlewares.ccp_b-secondary_strip.stripprefix.prefixes=/ccp-localdatamanagement-secondary"
- "traefik.http.services.blaze-secondary_ccp.loadbalancer.server.port=8080"
- "traefik.http.routers.blaze-secondary_ccp.middlewares=ccp_b-secondary_strip,auth"
- "traefik.http.routers.blaze-secondary_ccp.tls=true"
obds2fhir-rest:
environment:
STORE_PATH: ${STORE_PATH:-http://blaze:8080/fhir}
exporter:
environment:
BLAZE_HOST: "blaze-secondary"
volumes:
blaze-secondary-data:

View File

@ -1,11 +0,0 @@
#!/bin/bash
function blazeSecondarySetup() {
if [ -n "$ENABLE_SECONDARY_BLAZE" ]; then
log INFO "Secondary Blaze setup detected -- will start second blaze."
OVERRIDE+=" -f ./$PROJECT/modules/blaze-secondary-compose.yml"
#make oBDS2FHIR ignore ID-Management and replace target Blaze
PATIENTLIST_URL=" "
STORE_PATH="http://blaze-secondary:8080/fhir"
fi
}

View File

@ -1,170 +0,0 @@
version: "3.7"
services:
rstudio:
container_name: bridgehead-rstudio
image: docker.verbis.dkfz.de/ccp/dktk-rstudio:latest
environment:
#DEFAULT_USER: "rstudio" # This line is kept for informational purposes
PASSWORD: "${RSTUDIO_ADMIN_PASSWORD}" # It is required, even if the authentication is disabled
DISABLE_AUTH: "true" # https://rocker-project.org/images/versioned/rstudio.html#how-to-use
HTTP_RELATIVE_PATH: "/rstudio"
ALL_PROXY: "http://forward_proxy:3128" # https://rocker-project.org/use/networking.html
labels:
- "traefik.enable=true"
- "traefik.http.routers.rstudio_ccp.rule=PathPrefix(`/rstudio`)"
- "traefik.http.services.rstudio_ccp.loadbalancer.server.port=8787"
- "traefik.http.middlewares.rstudio_ccp_strip.stripprefix.prefixes=/rstudio"
- "traefik.http.routers.rstudio_ccp.tls=true"
- "traefik.http.routers.rstudio_ccp.middlewares=oidcAuth,rstudio_ccp_strip"
networks:
- rstudio
opal:
container_name: bridgehead-opal
image: docker.verbis.dkfz.de/ccp/dktk-opal:latest
labels:
- "traefik.enable=true"
- "traefik.http.routers.opal_ccp.rule=PathPrefix(`/opal`)"
- "traefik.http.services.opal_ccp.loadbalancer.server.port=8080"
- "traefik.http.routers.opal_ccp.tls=true"
links:
- opal-rserver
- opal-db
environment:
JAVA_OPTS: "-Xms1G -Xmx8G -XX:+UseG1GC -Dhttps.proxyHost=forward_proxy -Dhttps.proxyPort=3128"
# OPAL_ADMINISTRATOR_USER: "administrator" # This line is kept for informational purposes
OPAL_ADMINISTRATOR_PASSWORD: "${OPAL_ADMIN_PASSWORD}"
POSTGRESDATA_HOST: "opal-db"
POSTGRESDATA_DATABASE: "opal"
POSTGRESDATA_USER: "opal"
POSTGRESDATA_PASSWORD: "${OPAL_DB_PASSWORD}"
ROCK_HOSTS: "opal-rserver:8085"
APP_URL: "https://${HOST}/opal"
APP_CONTEXT_PATH: "/opal"
OPAL_PRIVATE_KEY: "/run/secrets/opal-key.pem"
OPAL_CERTIFICATE: "/run/secrets/opal-cert.pem"
OIDC_URL: "${OIDC_URL}"
OIDC_CLIENT_ID: "${OIDC_PRIVATE_CLIENT_ID}"
OIDC_CLIENT_SECRET: "${OIDC_CLIENT_SECRET}"
OIDC_ADMIN_GROUP: "${OIDC_ADMIN_GROUP}"
TOKEN_MANAGER_PASSWORD: "${TOKEN_MANAGER_OPAL_PASSWORD}"
EXPORTER_PASSWORD: "${EXPORTER_OPAL_PASSWORD}"
BEAM_APP_ID: token-manager.${PROXY_ID}
BEAM_SECRET: ${TOKEN_MANAGER_SECRET}
BEAM_DATASHIELD_PROXY: request-manager
volumes:
- "/var/cache/bridgehead/ccp/opal-metadata-db:/srv" # Opal metadata
secrets:
- opal-cert.pem
- opal-key.pem
opal-db:
container_name: bridgehead-opal-db
image: docker.verbis.dkfz.de/cache/postgres:${POSTGRES_TAG}
environment:
POSTGRES_PASSWORD: "${OPAL_DB_PASSWORD}" # Set in datashield-setup.sh
POSTGRES_USER: "opal"
POSTGRES_DB: "opal"
volumes:
- "/var/cache/bridgehead/ccp/opal-db:/var/lib/postgresql/data" # Opal project data (imported from exporter)
opal-rserver:
container_name: bridgehead-opal-rserver
image: docker.verbis.dkfz.de/ccp/dktk-rserver # datashield/rock-base + dsCCPhos
tmpfs:
- /srv
beam-connect:
image: docker.verbis.dkfz.de/cache/samply/beam-connect:develop
container_name: bridgehead-datashield-connect
environment:
PROXY_URL: "http://beam-proxy:8081"
TLS_CA_CERTIFICATES_DIR: /run/secrets
APP_ID: datashield-connect.${SITE_ID}.${BROKER_ID}
PROXY_APIKEY: ${DATASHIELD_CONNECT_SECRET}
DISCOVERY_URL: "./map/central.json"
LOCAL_TARGETS_FILE: "./map/local.json"
NO_AUTH: "true"
secrets:
- opal-cert.pem
depends_on:
- beam-proxy
volumes:
- /tmp/bridgehead/opal-map/:/map/:ro
networks:
- default
- rstudio
traefik:
labels:
- "traefik.http.middlewares.oidcAuth.forwardAuth.address=http://oauth2-proxy:4180/"
- "traefik.http.middlewares.oidcAuth.forwardAuth.trustForwardHeader=true"
- "traefik.http.middlewares.oidcAuth.forwardAuth.authResponseHeaders=X-Auth-Request-Access-Token,Authorization"
networks:
- default
- rstudio
forward_proxy:
networks:
- default
- rstudio
beam-proxy:
environment:
APP_datashield-connect_KEY: ${DATASHIELD_CONNECT_SECRET}
APP_token-manager_KEY: ${TOKEN_MANAGER_SECRET}
# TODO: Allow users of group /DataSHIELD and OIDC_USER_GROUP at the same time:
# Maybe a solution would be (https://oauth2-proxy.github.io/oauth2-proxy/configuration/oauth_provider):
# --allowed-groups=/DataSHIELD,OIDC_USER_GROUP
oauth2-proxy:
image: docker.verbis.dkfz.de/cache/oauth2-proxy/oauth2-proxy:latest
container_name: bridgehead-oauth2proxy
command: >-
--allowed-group=DataSHIELD
--oidc-groups-claim=${OIDC_GROUP_CLAIM}
--auth-logging=true
--whitelist-domain=${HOST}
--http-address="0.0.0.0:4180"
--reverse-proxy=true
--upstream="static://202"
--email-domain="*"
--cookie-name="_BRIDGEHEAD_oauth2"
--cookie-secret="${OAUTH2_PROXY_SECRET}"
--cookie-expire="12h"
--cookie-secure="true"
--cookie-httponly="true"
#OIDC settings
--provider="keycloak-oidc"
--provider-display-name="VerbIS Login"
--client-id="${OIDC_PRIVATE_CLIENT_ID}"
--client-secret="${OIDC_CLIENT_SECRET}"
--redirect-url="https://${HOST}${OAUTH2_CALLBACK}"
--oidc-issuer-url="${OIDC_URL}"
--scope="openid email profile"
--code-challenge-method="S256"
--skip-provider-button=true
#X-Forwarded-Header settings - true/false depending on your needs
--pass-basic-auth=true
--pass-user-headers=false
--pass-access-token=false
labels:
- "traefik.enable=true"
- "traefik.http.routers.oauth2_proxy.rule=PathPrefix(`/oauth2`)"
- "traefik.http.services.oauth2_proxy.loadbalancer.server.port=4180"
- "traefik.http.routers.oauth2_proxy.tls=true"
environment:
http_proxy: "http://forward_proxy:3128"
https_proxy: "http://forward_proxy:3128"
depends_on:
forward_proxy:
condition: service_healthy
secrets:
opal-cert.pem:
file: /tmp/bridgehead/opal-cert.pem
opal-key.pem:
file: /tmp/bridgehead/opal-key.pem
networks:
rstudio:

View File

@ -1,157 +0,0 @@
<template id="opal-ccp" source-id="blaze-store" opal-project="ccp-demo" target-id="opal" >
<container csv-filename="Patient-${TIMESTAMP}.csv" opal-table="patient" opal-entity-type="Patient">
<attribute csv-column="patient-id" opal-value-type="text" primary-key="true" val-fhir-path="Patient.id.value" anonym="Pat" op="EXTRACT_RELATIVE_ID"/>
<attribute csv-column="dktk-id-global" opal-value-type="text" val-fhir-path="Patient.identifier.where(type.coding.code = 'Global').value.value"/>
<attribute csv-column="dktk-id-lokal" opal-value-type="text" val-fhir-path="Patient.identifier.where(type.coding.code = 'Lokal').value.value" />
<attribute csv-column="geburtsdatum" opal-value-type="date" val-fhir-path="Patient.birthDate.value"/>
<attribute csv-column="geschlecht" opal-value-type="text" val-fhir-path="Patient.gender.value" />
<attribute csv-column="datum_des_letztbekannten_vitalstatus" opal-value-type="date" val-fhir-path="Observation.where(code.coding.code = '75186-7').effective.value" join-fhir-path="/Observation.where(code.coding.code = '75186-7').subject.reference.value"/>
<attribute csv-column="vitalstatus" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '75186-7').value.coding.code.value" join-fhir-path="/Observation.where(code.coding.code = '75186-7').subject.reference.value"/>
<!--fehlt in ADT2FHIR--><attribute csv-column="tod_tumorbedingt" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '68343-3').value.coding.where(system = 'http://fhir.de/CodeSystem/bfarm/icd-10-gm').code.value" join-fhir-path="/Observation.where(code.coding.code = '68343-3').subject.reference.value"/>
<!--fehlt in ADT2FHIR--><attribute csv-column="todesursachen" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '68343-3').value.coding.where(system = 'http://dktk.dkfz.de/fhir/onco/core/CodeSystem/JNUCS').code.value" join-fhir-path="/Observation.where(code.coding.code = '68343-3').subject.reference.value"/>
</container>
<container csv-filename="Diagnosis-${TIMESTAMP}.csv" opal-table="diagnosis" opal-entity-type="Diagnosis">
<attribute csv-column="diagnosis-id" primary-key="true" opal-value-type="text" val-fhir-path="Condition.id.value" anonym="Dia" op="EXTRACT_RELATIVE_ID"/>
<attribute csv-column="patient-id" opal-value-type="text" val-fhir-path="Condition.subject.reference.value" anonym="Pat"/>
<attribute csv-column="primaerdiagnose" opal-value-type="text" val-fhir-path="Condition.code.coding.code.value"/>
<attribute csv-column="tumor_diagnosedatum" opal-value-type="date" val-fhir-path="Condition.onset.value"/>
<attribute csv-column="primaertumor_diagnosetext" opal-value-type="text" val-fhir-path="Condition.code.text.value"/>
<attribute csv-column="version_des_icd-10_katalogs" opal-value-type="integer" val-fhir-path="Condition.code.coding.version.value"/>
<attribute csv-column="lokalisation" opal-value-type="text" val-fhir-path="Condition.bodySite.coding.where(system = 'urn:oid:2.16.840.1.113883.6.43.1').code.value"/>
<attribute csv-column="icd-o_katalog_topographie_version" opal-value-type="text" val-fhir-path="Condition.bodySite.coding.where(system = 'urn:oid:2.16.840.1.113883.6.43.1').version.value"/>
<attribute csv-column="seitenlokalisation_nach_adt-gekid" opal-value-type="text" val-fhir-path="Condition.bodySite.coding.where(system = 'http://dktk.dkfz.de/fhir/onco/core/CodeSystem/SeitenlokalisationCS').code.value"/>
</container>
<container csv-filename="Progress-${TIMESTAMP}.csv" opal-table="progress" opal-entity-type="Progress">
<!--it would be better to generate a an ID, instead of extracting the ClinicalImpression id-->
<attribute csv-column="progress-id" primary-key="true" opal-value-type="text" val-fhir-path="ClinicalImpression.id.value" anonym="Pro" op="EXTRACT_RELATIVE_ID"/>
<attribute csv-column="diagnosis-id" opal-value-type="text" val-fhir-path="ClinicalImpression.problem.reference.value" anonym="Dia"/>
<attribute csv-column="patient-id" opal-value-type="text" val-fhir-path="ClinicalImpression.subject.reference.value" anonym="Pat" />
<attribute csv-column="untersuchungs-_befunddatum_im_verlauf" opal-value-type="date" val-fhir-path="ClinicalImpression.effective.value" />
<!-- just for evaluation: redundant to Untersuchungs-, Befunddatum im Verlauf-->
<attribute csv-column="datum_lokales_oder_regionaeres_rezidiv" opal-value-type="date" val-fhir-path="Observation.where(code.coding.code = 'LA4583-6').effective.value" join-fhir-path="ClinicalImpression.finding.itemReference.reference.value" />
<attribute csv-column="gesamtbeurteilung_tumorstatus" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21976-6').value.coding.code.value" join-fhir-path="ClinicalImpression.finding.itemReference.reference.value"/>
<attribute csv-column="lokales_oder_regionaeres_rezidiv" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = 'LA4583-6').value.coding.code.value" join-fhir-path="ClinicalImpression.finding.itemReference.reference.value"/>
<attribute csv-column="lymphknoten-rezidiv" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = 'LA4370-8').value.coding.code.value" join-fhir-path="ClinicalImpression.finding.itemReference.reference.value" />
<attribute csv-column="fernmetastasen" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = 'LA4226-2').value.coding.code.value" join-fhir-path="ClinicalImpression.finding.itemReference.reference.value" />
</container>
<container csv-filename="Histology-${TIMESTAMP}.csv" opal-table="histology" opal-entity-type="Histology" >
<attribute csv-column="histology-id" primary-key="true" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '59847-4').id" anonym="His" op="EXTRACT_RELATIVE_ID"/>
<attribute csv-column="diagnosis-id" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '59847-4').focus.reference.value" anonym="Dia"/>
<attribute csv-column="patient-id" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '59847-4').subject.reference.value" anonym="Pat" />
<attribute csv-column="histologie_datum" opal-value-type="date" val-fhir-path="Observation.where(code.coding.code = '59847-4').effective.value"/>
<attribute csv-column="icd-o_katalog_morphologie_version" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '59847-4').value.coding.version.value" />
<attribute csv-column="morphologie" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '59847-4').value.coding.code.value"/>
<attribute csv-column="morphologie-freitext" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '59847-4').value.text.value"/>
<attribute csv-column="grading" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '59542-1').value.coding.code.value" join-fhir-path="Observation.where(code.coding.code = '59847-4').hasMember.reference.value"/>
</container>
<container csv-filename="Metastasis-${TIMESTAMP}.csv" opal-table="metastasis" opal-entity-type="Metastasis" >
<attribute csv-column="metastasis-id" primary-key="true" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21907-1').id" anonym="Met" op="EXTRACT_RELATIVE_ID"/>
<attribute csv-column="diagnosis-id" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21907-1').focus.reference.value" anonym="Dia"/>
<attribute csv-column="patient-id" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21907-1').subject.reference.value" anonym="Pat" />
<attribute csv-column="datum_fernmetastasen" opal-value-type="date" val-fhir-path="Observation.where(code.coding.code = '21907-1').effective.value"/>
<attribute csv-column="fernmetastasen_vorhanden" opal-value-type="boolean" val-fhir-path="Observation.where(code.coding.code = '21907-1').value.coding.code.value"/>
<attribute csv-column="lokalisation_fernmetastasen" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21907-1').bodySite.coding.code.value"/>
</container>
<container csv-filename="TNM-${TIMESTAMP}.csv" opal-table="tnm" opal-entity-type="TNM">
<attribute csv-column="tnm-id" primary-key="true" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').id" anonym="TNM" op="EXTRACT_RELATIVE_ID"/>
<attribute csv-column="diagnosis-id" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').focus.reference.value" anonym="Dia"/>
<attribute csv-column="patient-id" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').subject.reference.value" anonym="Pat" />
<attribute csv-column="datum_der_tnm_dokumentation_datum_befund" opal-value-type="date" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').effective.value"/>
<attribute csv-column="uicc_stadium" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').value.coding.code.value"/>
<attribute csv-column="tnm-t" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').component.where(code.coding.code = '21905-5' or code.coding.code = '21899-0').value.coding.code.value"/>
<attribute csv-column="tnm-n" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').component.where(code.coding.code = '21906-3' or code.coding.code = '21900-6').value.coding.code.value"/>
<attribute csv-column="tnm-m" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').component.where(code.coding.code = '21907-1' or code.coding.code = '21901-4').value.coding.code.value"/>
<attribute csv-column="c_p_u_preefix_t" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').component.where(code.coding.code = '21905-5' or code.coding.code = '21899-0').extension('http://dktk.dkfz.de/fhir/StructureDefinition/onco-core-Extension-TNMcpuPraefix').value.coding.code.value"/>
<attribute csv-column="c_p_u_preefix_n" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').component.where(code.coding.code = '21906-3' or code.coding.code = '21900-6').extension('http://dktk.dkfz.de/fhir/StructureDefinition/onco-core-Extension-TNMcpuPraefix').value.coding.code.value"/>
<attribute csv-column="c_p_u_preefix_m" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').component.where(code.coding.code = '21907-1' or code.coding.code = '21901-4').extension('http://dktk.dkfz.de/fhir/StructureDefinition/onco-core-Extension-TNMcpuPraefix').value.coding.code.value"/>
<attribute csv-column="tnm-y-symbol" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').component.where(code.coding.code = '59479-6' or code.coding.code = '59479-6').value.coding.code.value"/>
<attribute csv-column="tnm-r-symbol" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').component.where(code.coding.code = '21983-2' or code.coding.code = '21983-2').value.coding.code.value"/>
<attribute csv-column="tnm-m-symbol" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').component.where(code.coding.code = '42030-7' or code.coding.code = '42030-7').value.coding.code.value"/>
<!--nur bei UICC, nicht in ADT2FHIR--><attribute csv-column="tnm-version" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').value.coding.version.value"/>
</container>
<container csv-filename="System-Therapy-${TIMESTAMP}.csv" opal-table="system-therapy" opal-entity-type="SystemTherapy">
<attribute csv-column="system-therapy-id" primary-key="true" opal-value-type="text" val-fhir-path="MedicationStatement.id" anonym="Sys" op="EXTRACT_RELATIVE_ID"/>
<attribute csv-column="diagnosis-id" opal-value-type="text" val-fhir-path="MedicationStatement.reasonReference.reference.value" anonym="Dia"/>
<attribute csv-column="patient-id" opal-value-type="text" val-fhir-path="MedicationStatement.subject.reference.value" anonym="Pat" />
<attribute csv-column="systemische_therapie_stellung_zu_operativer_therapie" opal-value-type="text" val-fhir-path="MedicationStatement.extension('http://dktk.dkfz.de/fhir/StructureDefinition/onco-core-Extension-StellungZurOp').value.coding.code.value"/>
<attribute csv-column="intention_chemotherapie" opal-value-type="text" val-fhir-path="MedicationStatement.extension('http://dktk.dkfz.de/fhir/StructureDefinition/onco-core-Extension-SYSTIntention').value.coding.code.value"/>
<attribute csv-column="therapieart" opal-value-type="text" val-fhir-path="MedicationStatement.category.coding.code.value"/>
<attribute csv-column="systemische_therapie_beginn" opal-value-type="date" val-fhir-path="MedicationStatement.effective.start.value"/>
<attribute csv-column="systemische_therapie_ende" opal-value-type="date" val-fhir-path="MedicationStatement.effective.end.value"/>
<attribute csv-column="systemische_therapie_protokoll" opal-value-type="text" val-fhir-path="MedicationStatement.extension('http://dktk.dkfz.de/fhir/StructureDefinition/onco-core-Extension-SystemischeTherapieProtokoll').value.text.value"/>
<attribute csv-column="systemische_therapie_substanzen" opal-value-type="text" val-fhir-path="MedicationStatement.medication.text.value"/>
<attribute csv-column="chemotherapie" opal-value-type="boolean" val-fhir-path="MedicationStatement.where(category.coding.code = 'CH').exists().value" />
<attribute csv-column="hormontherapie" opal-value-type="boolean" val-fhir-path="MedicationStatement.where(category.coding.code = 'HO').exists().value" />
<attribute csv-column="immuntherapie" opal-value-type="boolean" val-fhir-path="MedicationStatement.where(category.coding.code = 'IM').exists().value" />
<attribute csv-column="knochenmarktransplantation" opal-value-type="boolean" val-fhir-path="MedicationStatement.where(category.coding.code = 'KM').exists().value" />
<attribute csv-column="abwartende_strategie" opal-value-type="boolean" val-fhir-path="MedicationStatement.where(category.coding.code = 'WS').exists().value" />
</container>
<container csv-filename="Surgery-${TIMESTAMP}.csv" opal-table="surgery" opal-entity-type="Surgery">
<attribute csv-column="surgery-id" primary-key="true" opal-value-type="text" val-fhir-path="Procedure.where(category.coding.code = 'OP').id" anonym="Sur" op="EXTRACT_RELATIVE_ID"/>
<attribute csv-column="diagnosis-id" opal-value-type="text" val-fhir-path="Procedure.where(category.coding.code = 'OP').reasonReference.reference.value" anonym="Dia"/>
<attribute csv-column="patient-id" opal-value-type="text" val-fhir-path="Procedure.where(category.coding.code = 'OP').subject.reference.value" anonym="Pat" />
<attribute csv-column="ops-code" opal-value-type="text" val-fhir-path="Procedure.where(category.coding.code = 'OP').code.coding.code.value"/>
<attribute csv-column="datum_der_op" opal-value-type="date" val-fhir-path="Procedure.where(category.coding.code = 'OP').performed.value"/>
<attribute csv-column="intention_op" opal-value-type="text" val-fhir-path="Procedure.extension('http://dktk.dkfz.de/fhir/StructureDefinition/onco-core-Extension-OPIntention').value.coding.code.value"/>
<attribute csv-column="lokale_beurteilung_resttumor" opal-value-type="text" val-fhir-path="Procedure.where(category.coding.code = 'OP').outcome.coding.where(system = 'http://dktk.dkfz.de/fhir/onco/core/CodeSystem/LokaleBeurteilungResidualstatusCS').code.value" />
<attribute csv-column="gesamtbeurteilung_resttumor" opal-value-type="text" val-fhir-path="Procedure.where(category.coding.code = 'OP').outcome.coding.where(system = 'http://dktk.dkfz.de/fhir/onco/core/CodeSystem/GesamtbeurteilungResidualstatusCS').code.value" />
</container>
<container csv-filename="Radiation-Therapy-${TIMESTAMP}.csv" opal-table="radiation-therapy" opal-entity-type="RadiationTherapy">
<attribute csv-column="radiation-therapy-id" primary-key="true" opal-value-type="text" val-fhir-path="Procedure.where(category.coding.code = 'ST').id" anonym="Rad" op="EXTRACT_RELATIVE_ID"/>
<attribute csv-column="diagnosis-id" opal-value-type="text" val-fhir-path="Procedure.where(category.coding.code = 'ST').reasonReference.reference.value" anonym="Dia"/>
<attribute csv-column="patient-id" opal-value-type="text" val-fhir-path="Procedure.where(category.coding.code = 'ST').subject.reference.value" anonym="Pat" />
<attribute csv-column="strahlentherapie_stellung_zu_operativer_therapie" opal-value-type="text" val-fhir-path="Procedure.extension('http://dktk.dkfz.de/fhir/StructureDefinition/onco-core-Extension-StellungZurOp').value.coding.code.value"/>
<attribute csv-column="intention_strahlentherapie" opal-value-type="text" val-fhir-path="Procedure.extension('http://dktk.dkfz.de/fhir/StructureDefinition/onco-core-Extension-SYSTIntention').value.coding.code.value" />
<attribute csv-column="strahlentherapie_beginn" opal-value-type="date" val-fhir-path="Procedure.where(category.coding.code = 'ST').performed.start.value"/>
<attribute csv-column="strahlentherapie_ende" opal-value-type="date" val-fhir-path="Procedure.where(category.coding.code = 'ST').performed.end.value"/>
</container>
<container csv-filename="Molecular-Marker-${TIMESTAMP}.csv" opal-table="molecular-marker" opal-entity-type="MolecularMarker">
<attribute csv-column="mol-marker-id" primary-key="true" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '69548-6').id" anonym="Mol" op="EXTRACT_RELATIVE_ID"/>
<attribute csv-column="diagnosis-id" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '69548-6').focus.reference.value" anonym="Dia" />
<attribute csv-column="patient-id" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '69548-6').subject.reference.value" anonym="Pat" />
<attribute csv-column="datum_der_datenerhebung" opal-value-type="date" val-fhir-path="Observation.where(code.coding.code = '69548-6').effective.value"/>
<attribute csv-column="marker" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '69548-6').component.value.coding.code.value"/>
<attribute csv-column="status_des_molekularen_markers" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '69548-6').value.coding.code.value" />
<attribute csv-column="zusaetzliche_alternative_dokumentation" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '69548-6').value.text.value"/>
</container>
<container csv-filename="Sample-${TIMESTAMP}.csv" opal-table="sample" opal-entity-type="Sample">
<attribute csv-column="sample-id" primary-key="true" opal-value-type="text" val-fhir-path="Specimen.id" anonym="Sam" op="EXTRACT_RELATIVE_ID"/>
<attribute csv-column="patient-id" opal-value-type="text" val-fhir-path="Specimen.subject.reference.value" anonym="Pat" />
<attribute csv-column="entnahmedatum" opal-value-type="date" val-fhir-path="Specimen.collection.collectedDateTime.value"/>
<attribute csv-column="probenart" opal-value-type="text" val-fhir-path="Specimen.type.coding.code.value"/>
<attribute csv-column="status" opal-value-type="text" val-fhir-path="Specimen.status.code.value"/>
<attribute csv-column="projekt" opal-value-type="text" val-fhir-path="Specimen.identifier.system.value"/>
<!-- @TODO: it is still necessary to clarify whether it would not be better to take the quantity of collection.quantity -->
<attribute csv-column="menge" opal-value-type="integer" val-fhir-path="Specimen.container.specimenQuantity.value.value"/>
<attribute csv-column="einheit" opal-value-type="text" val-fhir-path="Specimen.container.specimenQuantity.unit.value"/>
<attribute csv-column="aliquot" opal-value-type="text" val-fhir-path="Specimen.parent.reference.exists().value" />
</container>
<fhir-rev-include>Observation:patient</fhir-rev-include>
<fhir-rev-include>Condition:patient</fhir-rev-include>
<fhir-rev-include>ClinicalImpression:patient</fhir-rev-include>
<fhir-rev-include>MedicationStatement:patient</fhir-rev-include>
<fhir-rev-include>Procedure:patient</fhir-rev-include>
<fhir-rev-include>Specimen:patient</fhir-rev-include>
</template>

View File

@ -1,44 +0,0 @@
#!/bin/bash -e
if [ "$ENABLE_DATASHIELD" == true ]; then
# HACK: This only works because exporter-setup.sh and teiler-setup.sh are sourced after datashield-setup.sh
if [ -z "${ENABLE_EXPORTER}" ] || [ "${ENABLE_EXPORTER}" != "true" ]; then
log WARN "The ENABLE_EXPORTER variable is either not set or not set to 'true'."
fi
OAUTH2_CALLBACK=/oauth2/callback
OAUTH2_PROXY_SECRET="$(echo \"This is a salt string to generate one consistent encryption key for the oauth2_proxy. It is not required to be secret.\" | sha1sum | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 32)"
add_private_oidc_redirect_url "${OAUTH2_CALLBACK}"
log INFO "DataSHIELD setup detected -- will start DataSHIELD services."
OVERRIDE+=" -f ./$PROJECT/modules/datashield-compose.yml"
EXPORTER_OPAL_PASSWORD="$(generate_password \"exporter in Opal\")"
TOKEN_MANAGER_OPAL_PASSWORD="$(generate_password \"Token Manager in Opal\")"
OPAL_DB_PASSWORD="$(echo \"Opal DB\" | generate_simple_password)"
OPAL_ADMIN_PASSWORD="$(generate_password \"admin password for Opal\")"
RSTUDIO_ADMIN_PASSWORD="$(generate_password \"admin password for R-Studio\")"
DATASHIELD_CONNECT_SECRET="$(echo \"DataShield Connect\" | generate_simple_password)"
TOKEN_MANAGER_SECRET="$(echo \"Token Manager\" | generate_simple_password)"
if [ ! -e /tmp/bridgehead/opal-cert.pem ]; then
mkdir -p /tmp/bridgehead/
openssl req -x509 -newkey rsa:4096 -nodes -keyout /tmp/bridgehead/opal-key.pem -out /tmp/bridgehead/opal-cert.pem -days 3650 -subj "/CN=opal/C=DE"
fi
mkdir -p /tmp/bridgehead/opal-map
sites="$(cat ./$PROJECT/modules/datashield-sites.json)"
echo "$sites" | docker_jq -n --args '{"sites": input | map({
"name": .,
"id": .,
"virtualhost": "\(.):443",
"beamconnect": "datashield-connect.\(.).'"$BROKER_ID"'"
})}' $sites >/tmp/bridgehead/opal-map/central.json
echo "$sites" | docker_jq -n --args '[{
"external": "'"$SITE_ID"':443",
"internal": "opal:8443",
"allowed": input | map("\(.).'"$BROKER_ID"'")
}]' >/tmp/bridgehead/opal-map/local.json
if [ "$USER" == "root" ]; then
chown -R bridgehead:docker /tmp/bridgehead
chmod g+wr /tmp/bridgehead/opal-map/*
chmod g+r /tmp/bridgehead/opal-key.pem
fi
add_private_oidc_redirect_url "/opal/*"
fi

View File

@ -1,15 +0,0 @@
[
"berlin",
"muenchen-lmu",
"dresden",
"freiburg",
"muenchen-tum",
"tuebingen",
"mainz",
"frankfurt",
"essen",
"dktk-datashield-test",
"dktk-test",
"mannheim",
"central-ds-orchestrator"
]

View File

@ -1,28 +0,0 @@
# DataSHIELD
This module constitutes the infrastructure to run DataSHIELD within the bridgehead.
For more information about DataSHIELD, please visit https://www.datashield.org/
## R-Studio
To connect to the different bridgeheads of the CCP through DataSHIELD, you can use your own R-Studio environment.
However, this R-Studio has already installed the DataSHIELD libraries and is integrated within the bridgehead.
This can save you some time for extra configuration of your R-Studio environment.
## Opal
This is the core of DataSHIELD. It is made up of Opal, a Postgres database and an R-server.
For more information about Opal, please visit https://opaldoc.obiba.org
### Opal
Opal is OBiBas core database application for biobanks.
### Opal-DB
Opal requires a database to import the data for DataSHIELD. We use a Postgres instance as database.
The data is imported within the bridgehead through the exporter.
### Opal-R-Server
R-Server to execute R scripts in DataSHIELD.
## Beam
### Beam-Connect
Beam-Connect is used to route http(s) traffic through beam to enable R-Studio to access data from other bridgeheads that have datashield enabled.
### Beam-Proxy
The usual beam proxy used for communication.

View File

@ -1,39 +0,0 @@
version: "3.7"
services:
beam-proxy:
environment:
APP_dnpm-connect_KEY: ${DNPM_BEAM_SECRET_SHORT}
dnpm-beam-connect:
depends_on: [ beam-proxy ]
image: docker.verbis.dkfz.de/cache/samply/beam-connect:develop
container_name: bridgehead-dnpm-beam-connect
environment:
PROXY_URL: http://beam-proxy:8081
PROXY_APIKEY: ${DNPM_BEAM_SECRET_SHORT}
APP_ID: dnpm-connect.${PROXY_ID}
DISCOVERY_URL: "./conf/central_targets.json"
LOCAL_TARGETS_FILE: "/conf/connect_targets.json"
HTTP_PROXY: "http://forward_proxy:3128"
HTTPS_PROXY: "http://forward_proxy:3128"
NO_PROXY: beam-proxy,dnpm-backend,host.docker.internal${DNPM_ADDITIONAL_NO_PROXY}
RUST_LOG: ${RUST_LOG:-info}
NO_AUTH: "true"
TLS_CA_CERTIFICATES_DIR: ./conf/trusted-ca-certs
extra_hosts:
- "host.docker.internal:host-gateway"
volumes:
- /etc/bridgehead/trusted-ca-certs:/conf/trusted-ca-certs:ro
- /etc/bridgehead/dnpm/local_targets.json:/conf/connect_targets.json:ro
- /srv/docker/bridgehead/minimal/modules/dnpm-central-targets.json:/conf/central_targets.json:ro
labels:
- "traefik.enable=true"
- "traefik.http.routers.dnpm-connect.rule=PathPrefix(`/dnpm-connect`)"
- "traefik.http.middlewares.dnpm-connect-strip.stripprefix.prefixes=/dnpm-connect"
- "traefik.http.routers.dnpm-connect.middlewares=dnpm-connect-strip"
- "traefik.http.services.dnpm-connect.loadbalancer.server.port=8062"
- "traefik.http.routers.dnpm-connect.tls=true"
dnpm-echo:
image: docker.verbis.dkfz.de/cache/samply/bridgehead-echo:latest
container_name: bridgehead-dnpm-echo

View File

@ -1,99 +0,0 @@
version: "3.7"
services:
dnpm-mysql:
image: mysql:9
healthcheck:
test: [ "CMD", "mysqladmin" ,"ping", "-h", "localhost" ]
interval: 3s
timeout: 5s
retries: 5
environment:
MYSQL_ROOT_HOST: "%"
MYSQL_ROOT_PASSWORD: ${DNPM_MYSQL_ROOT_PASSWORD}
volumes:
- /var/cache/bridgehead/dnpm/mysql:/var/lib/mysql
dnpm-authup:
image: authup/authup:latest
container_name: bridgehead-dnpm-authup
volumes:
- /var/cache/bridgehead/dnpm/authup:/usr/src/app/writable
depends_on:
dnpm-mysql:
condition: service_healthy
command: server/core start
environment:
- PUBLIC_URL=https://${HOST}/auth/
- AUTHORIZE_REDIRECT_URL=https://${HOST}
- ROBOT_ADMIN_ENABLED=true
- ROBOT_ADMIN_SECRET=${DNPM_AUTHUP_SECRET}
- ROBOT_ADMIN_SECRET_RESET=true
- DB_TYPE=mysql
- DB_HOST=dnpm-mysql
- DB_USERNAME=root
- DB_PASSWORD=${DNPM_MYSQL_ROOT_PASSWORD}
- DB_DATABASE=auth
labels:
- "traefik.enable=true"
- "traefik.http.middlewares.authup-strip.stripprefix.prefixes=/auth"
- "traefik.http.routers.dnpm-auth.middlewares=authup-strip"
- "traefik.http.routers.dnpm-auth.rule=PathPrefix(`/auth`)"
- "traefik.http.services.dnpm-auth.loadbalancer.server.port=3000"
- "traefik.http.routers.dnpm-auth.tls=true"
dnpm-portal:
image: ghcr.io/dnpm-dip/portal:latest
container_name: bridgehead-dnpm-portal
environment:
- NUXT_API_URL=http://dnpm-backend:9000/
- NUXT_PUBLIC_API_URL=https://${HOST}/api/
- NUXT_AUTHUP_URL=http://dnpm-authup:3000/
- NUXT_PUBLIC_AUTHUP_URL=https://${HOST}/auth/
labels:
- "traefik.enable=true"
- "traefik.http.routers.dnpm-frontend.rule=PathPrefix(`/`)"
- "traefik.http.services.dnpm-frontend.loadbalancer.server.port=3000"
- "traefik.http.routers.dnpm-frontend.tls=true"
dnpm-backend:
container_name: bridgehead-dnpm-backend
image: ghcr.io/dnpm-dip/backend:latest
environment:
- LOCAL_SITE=${ZPM_SITE}:${SITE_NAME} # Format: {Site-ID}:{Site-name}, e.g. UKT:Tübingen
- RD_RANDOM_DATA=${DNPM_SYNTH_NUM:--1}
- MTB_RANDOM_DATA=${DNPM_SYNTH_NUM:--1}
- HATEOAS_HOST=https://${HOST}
- CONNECTOR_TYPE=broker
- AUTHUP_URL=robot://system:${DNPM_AUTHUP_SECRET}@http://dnpm-authup:3000
volumes:
- /etc/bridgehead/dnpm/config:/dnpm_config
- /var/cache/bridgehead/dnpm/backend-data:/dnpm_data
depends_on:
dnpm-authup:
condition: service_healthy
labels:
- "traefik.enable=true"
- "traefik.http.services.dnpm-backend.loadbalancer.server.port=9000"
# expose everything
- "traefik.http.routers.dnpm-backend.rule=PathPrefix(`/api`)"
- "traefik.http.routers.dnpm-backend.tls=true"
- "traefik.http.routers.dnpm-backend.service=dnpm-backend"
# except ETL
- "traefik.http.routers.dnpm-backend-etl.rule=PathRegexp(`^/api(/.*)?etl(/.*)?$`)"
- "traefik.http.routers.dnpm-backend-etl.tls=true"
- "traefik.http.routers.dnpm-backend-etl.service=dnpm-backend"
# this needs an ETL processor with support for basic auth
- "traefik.http.routers.dnpm-backend-etl.middlewares=auth"
# except peer-to-peer
- "traefik.http.routers.dnpm-backend-peer.rule=PathRegexp(`^/api(/.*)?/peer2peer(/.*)?$`)"
- "traefik.http.routers.dnpm-backend-peer.tls=true"
- "traefik.http.routers.dnpm-backend-peer.service=dnpm-backend"
- "traefik.http.routers.dnpm-backend-peer.middlewares=dnpm-backend-peer"
# this effectively denies all requests
# this is okay, because requests from peers don't go through Traefik
- "traefik.http.middlewares.dnpm-backend-peer.ipWhiteList.sourceRange=0.0.0.0/32"
landing:
labels:
- "traefik.http.routers.landing.rule=PathPrefix(`/landing`)"

View File

@ -1,16 +0,0 @@
#!/bin/bash
if [ -n "${ENABLE_DNPM_NODE}" ]; then
log INFO "DNPM setup detected -- will start DNPM:DIP node."
OVERRIDE+=" -f ./$PROJECT/modules/dnpm-node-compose.yml"
# Set variables required for BwHC Node. ZPM_SITE is assumed to be set in /etc/bridgehead/<project>.conf
if [ -z "${ZPM_SITE+x}" ]; then
log ERROR "Mandatory variable ZPM_SITE not defined!"
exit 1
fi
mkdir -p /var/cache/bridgehead/dnpm/ || fail_and_report 1 "Failed to create '/var/cache/bridgehead/dnpm/'. Please run sudo './bridgehead install $PROJECT' again to fix the permissions."
DNPM_SYNTH_NUM=${DNPM_SYNTH_NUM:--1}
DNPM_MYSQL_ROOT_PASSWORD="$(generate_simple_password 'dnpm mysql')"
DNPM_AUTHUP_SECRET="$(generate_simple_password 'dnpm authup')"
fi

View File

@ -1,15 +0,0 @@
#!/bin/bash -e
if [ -n "${ENABLE_DNPM}" ]; then
log INFO "DNPM setup detected (Beam.Connect) -- will start Beam.Connect for DNPM."
OVERRIDE+=" -f ./$PROJECT/modules/dnpm-compose.yml"
# Set variables required for Beam-Connect
DNPM_BEAM_SECRET_SHORT="$(cat /proc/sys/kernel/random/uuid | sed 's/[-]//g' | head -c 20)"
# If the DNPM_NO_PROXY variable is set, prefix it with a comma (as it gets added to a comma separated list)
if [ -n "${DNPM_NO_PROXY}" ]; then
DNPM_ADDITIONAL_NO_PROXY=",${DNPM_NO_PROXY}"
else
DNPM_ADDITIONAL_NO_PROXY=""
fi
fi

View File

@ -1,6 +0,0 @@
# Full Excel Export
curl --location --request POST 'https://${HOST}/ccp-exporter/request?query=Patient&query-format=FHIR_PATH&template-id=ccp&output-format=EXCEL' \
--header 'x-api-key: ${EXPORT_API_KEY}'
# QB
curl --location --request POST 'https://${HOST}/ccp-reporter/generate?template-id=ccp'

View File

@ -1,72 +0,0 @@
version: "3.7"
services:
exporter:
image: docker.verbis.dkfz.de/ccp/dktk-exporter:latest
container_name: bridgehead-ccp-exporter
environment:
JAVA_OPTS: "-Xms1G -Xmx8G -XX:+UseG1GC"
LOG_LEVEL: "INFO"
EXPORTER_API_KEY: "${EXPORTER_API_KEY}" # Set in exporter-setup.sh
CROSS_ORIGINS: "https://${HOST}"
EXPORTER_DB_USER: "exporter"
EXPORTER_DB_PASSWORD: "${EXPORTER_DB_PASSWORD}" # Set in exporter-setup.sh
EXPORTER_DB_URL: "jdbc:postgresql://exporter-db:5432/exporter"
HTTP_RELATIVE_PATH: "/ccp-exporter"
SITE: "${SITE_ID}"
HTTP_SERVLET_REQUEST_SCHEME: "https"
OPAL_PASSWORD: "${EXPORTER_OPAL_PASSWORD}"
labels:
- "traefik.enable=true"
- "traefik.http.routers.exporter_ccp.rule=PathPrefix(`/ccp-exporter`)"
- "traefik.http.services.exporter_ccp.loadbalancer.server.port=8092"
- "traefik.http.routers.exporter_ccp.tls=true"
- "traefik.http.middlewares.exporter_ccp_strip.stripprefix.prefixes=/ccp-exporter"
- "traefik.http.routers.exporter_ccp.middlewares=exporter_ccp_strip"
volumes:
- "/var/cache/bridgehead/ccp/exporter-files:/app/exporter-files/output"
exporter-db:
image: docker.verbis.dkfz.de/cache/postgres:${POSTGRES_TAG}
container_name: bridgehead-ccp-exporter-db
environment:
POSTGRES_USER: "exporter"
POSTGRES_PASSWORD: "${EXPORTER_DB_PASSWORD}" # Set in exporter-setup.sh
POSTGRES_DB: "exporter"
volumes:
# Consider removing this volume once we find a solution to save Lens-queries to be executed in the explorer.
- "/var/cache/bridgehead/ccp/exporter-db:/var/lib/postgresql/data"
reporter:
image: docker.verbis.dkfz.de/ccp/dktk-reporter:latest
container_name: bridgehead-ccp-reporter
environment:
JAVA_OPTS: "-Xms1G -Xmx8G -XX:+UseG1GC"
LOG_LEVEL: "INFO"
CROSS_ORIGINS: "https://${HOST}"
HTTP_RELATIVE_PATH: "/ccp-reporter"
SITE: "${SITE_ID}"
EXPORTER_API_KEY: "${EXPORTER_API_KEY}" # Set in exporter-setup.sh
EXPORTER_URL: "http://exporter:8092"
LOG_FHIR_VALIDATION: "false"
HTTP_SERVLET_REQUEST_SCHEME: "https"
# In this initial development state of the bridgehead, we are trying to have so many volumes as possible.
# However, in the first executions in the CCP sites, this volume seems to be very important. A report is
# a process that can take several hours, because it depends on the exporter.
# There is a risk that the bridgehead restarts, losing the already created export.
volumes:
- "/var/cache/bridgehead/ccp/reporter-files:/app/reports"
labels:
- "traefik.enable=true"
- "traefik.http.routers.reporter_ccp.rule=PathPrefix(`/ccp-reporter`)"
- "traefik.http.services.reporter_ccp.loadbalancer.server.port=8095"
- "traefik.http.routers.reporter_ccp.tls=true"
- "traefik.http.middlewares.reporter_ccp_strip.stripprefix.prefixes=/ccp-reporter"
- "traefik.http.routers.reporter_ccp.middlewares=reporter_ccp_strip"
focus:
environment:
EXPORTER_URL: "http://exporter:8092"
EXPORTER_API_KEY: "${EXPORTER_API_KEY}"

View File

@ -1,8 +0,0 @@
#!/bin/bash -e
if [ "$ENABLE_EXPORTER" == true ]; then
log INFO "Exporter setup detected -- will start Exporter service."
OVERRIDE+=" -f ./$PROJECT/modules/exporter-compose.yml"
EXPORTER_DB_PASSWORD="$(echo \"This is a salt string to generate one consistent password for the exporter. It is not required to be secret.\" | sha1sum | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 30)"
EXPORTER_API_KEY="$(echo \"This is a salt string to generate one consistent API KEY for the exporter. It is not required to be secret.\" | sha1sum | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 64)"
fi

View File

@ -1,15 +0,0 @@
# Exporter and Reporter
## Exporter
The exporter is a REST API that exports the data of the different databases of the bridgehead in a set of tables.
It can accept different output formats as CSV, Excel, JSON or XML. It can also export data into Opal.
## Exporter-DB
It is a database to save queries for its execution in the exporter.
The exporter manages also the different executions of the same query in through the database.
## Reporter
This component is a plugin of the exporter that allows to create more complex Excel reports described in templates.
It is compatible with different template engines as Groovy, Thymeleaf,...
It is perfect to generate a document as our traditional CCP quality report.

View File

@ -1,29 +0,0 @@
version: "3.7"
services:
fhir2sql:
depends_on:
- "dashboard-db"
- "blaze"
image: docker.verbis.dkfz.de/cache/samply/fhir2sql:latest
container_name: bridgehead-ccp-dashboard-fhir2sql
environment:
BLAZE_BASE_URL: "http://bridgehead-ccp-blaze:8080"
PG_HOST: "dashboard-db"
PG_USERNAME: "dashboard"
PG_PASSWORD: "${DASHBOARD_DB_PASSWORD}" # Set in dashboard-setup.sh
PG_DBNAME: "dashboard"
dashboard-db:
image: docker.verbis.dkfz.de/cache/postgres:${POSTGRES_TAG}
container_name: bridgehead-ccp-dashboard-db
environment:
POSTGRES_USER: "dashboard"
POSTGRES_PASSWORD: "${DASHBOARD_DB_PASSWORD}" # Set in dashboard-setup.sh
POSTGRES_DB: "dashboard"
volumes:
- "/var/cache/bridgehead/ccp/dashboard-db:/var/lib/postgresql/data"
focus:
environment:
POSTGRES_CONNECTION_STRING: "postgresql://dashboard:${DASHBOARD_DB_PASSWORD}@dashboard-db/dashboard"

View File

@ -1,8 +0,0 @@
#!/bin/bash -e
if [ "$ENABLE_FHIR2SQL" == true ]; then
log INFO "Dashboard setup detected -- will start Dashboard backend and FHIR2SQL service."
OVERRIDE+=" -f ./$PROJECT/modules/fhir2sql-compose.yml"
DASHBOARD_DB_PASSWORD="$(generate_simple_password 'fhir2sql')"
FOCUS_ENDPOINT_TYPE="blaze-and-sql"
fi

View File

@ -1,36 +0,0 @@
# fhir2sql
fhir2sql connects to Blaze, retrieves data, and syncs it with a PostgreSQL database. The application is designed to run continuously, syncing data at regular intervals.
The Dashboard module is a optional component of the Bridgehead CCP setup. When enabled, it starts two Docker services: **fhir2sql** and **dashboard-db**. Data held in PostgreSQL is only stored temporarily and Blaze is considered to be the 'leading system' or 'source of truth'.
## Services
### fhir2sql
* Image: docker.verbis.dkfz.de/cache/samply/fhir2sql:latest
* Container name: bridgehead-ccp-dashboard-fhir2sql
* Depends on: dashboard-db
* Environment variables:
- BLAZE_BASE_URL: The base URL of the Blaze FHIR server (set to http://blaze:8080/fhir/)
- PG_HOST: The hostname of the PostgreSQL database (set to dashboard-db)
- PG_USERNAME: The username for the PostgreSQL database (set to dashboard)
- PG_PASSWORD: The password for the PostgreSQL database (set to the value of DASHBOARD_DB_PASSWORD)
- PG_DBNAME: The name of the PostgreSQL database (set to dashboard)
### dashboard-db
* Image: docker.verbis.dkfz.de/cache/postgres:${POSTGRES_TAG}
* Container name: bridgehead-ccp-dashboard-db
* Environment variables:
- POSTGRES_USER: The username for the PostgreSQL database (set to dashboard)
- POSTGRES_PASSWORD: The password for the PostgreSQL database (set to the value of DASHBOARD_DB_PASSWORD)
- POSTGRES_DB: The name of the PostgreSQL database (set to dashboard)
* Volumes:
- /var/cache/bridgehead/ccp/dashboard-db:/var/lib/postgresql/data
The volume used by dashboard-db can be removed safely and should be restored to a working order by re-importing data from Blaze.
### Environment Variables
* DASHBOARD_DB_PASSWORD: A generated password for the PostgreSQL database, created using a salt string and the SHA1 hash function.
* POSTGRES_TAG: The tag of the PostgreSQL image to use (not set in this module, but required by the dashboard-db service).
### Setup
To enable the Dashboard module, set the ENABLE_FHIR2SQL environment variable to true. The dashboard-setup.sh script will then start the fhir2sql and dashboard-db services, using the environment variables and volumes defined above.

View File

@ -1,12 +1,10 @@
version: "3.7"
services:
id-manager:
image: docker.verbis.dkfz.de/bridgehead/magicpl
container_name: bridgehead-id-manager
environment:
TOMCAT_REVERSEPROXY_FQDN: ${HOST}
TOMCAT_REVERSEPROXY_SSL: "true"
MAGICPL_SITE: ${IDMANAGEMENT_FRIENDLY_ID}
MAGICPL_ALLOWED_ORIGINS: https://${HOST}
MAGICPL_LOCAL_PATIENTLIST_APIKEY: ${IDMANAGER_LOCAL_PATIENTLIST_APIKEY}
@ -14,30 +12,21 @@ services:
MAGICPL_CONNECTOR_APIKEY: ${IDMANAGER_READ_APIKEY}
MAGICPL_CENTRAL_PATIENTLIST_APIKEY: ${IDMANAGER_CENTRAL_PATIENTLIST_APIKEY}
MAGICPL_CONTROLNUMBERGENERATOR_APIKEY: ${IDMANAGER_CONTROLNUMBERGENERATOR_APIKEY}
MAGICPL_OIDC_CLIENT_ID: ${IDMANAGER_AUTH_CLIENT_ID}
MAGICPL_OIDC_CLIENT_SECRET: ${IDMANAGER_AUTH_CLIENT_SECRET}
depends_on:
- patientlist
- traefik-forward-auth
labels:
- "traefik.enable=true"
# Router with Authentication
- "traefik.http.routers.id-manager.rule=PathPrefix(`/id-manager`)"
- "traefik.http.services.id-manager.loadbalancer.server.port=8080"
- "traefik.http.routers.id-manager.tls=true"
- "traefik.http.routers.id-manager.middlewares=traefik-forward-auth-idm"
- "traefik.http.routers.id-manager.service=id-manager-service"
# Router without Authentication
- "traefik.http.routers.id-manager-compatibility.rule=PathPrefix(`/id-manager/paths/translator/getIds`)"
- "traefik.http.routers.id-manager-compatibility.tls=true"
- "traefik.http.routers.id-manager-compatibility.service=id-manager-service"
# Definition of Service
- "traefik.http.services.id-manager-service.loadbalancer.server.port=8080"
- "traefik.http.services.id-manager-service.loadbalancer.server.scheme=http"
patientlist:
image: docker.verbis.dkfz.de/bridgehead/mainzelliste
container_name: bridgehead-patientlist
environment:
- TOMCAT_REVERSEPROXY_FQDN=${HOST}
- TOMCAT_REVERSEPROXY_SSL=true
- ML_SITE=${IDMANAGEMENT_FRIENDLY_ID}
- ML_DB_PASS=${PATIENTLIST_POSTGRES_PASSWORD}
- ML_API_KEY=${IDMANAGER_LOCAL_PATIENTLIST_APIKEY}
@ -53,7 +42,7 @@ services:
- patientlist-db
patientlist-db:
image: docker.verbis.dkfz.de/cache/postgres:${POSTGRES_TAG}
image: docker.verbis.dkfz.de/cache/postgres:15.1-alpine
container_name: bridgehead-patientlist-db
environment:
POSTGRES_USER: "mainzelliste"
@ -64,49 +53,5 @@ services:
# NOTE: Add backups here. This is only imported if /var/lib/bridgehead/data/patientlist/ is empty!!!
- "/tmp/bridgehead/patientlist/:/docker-entrypoint-initdb.d/"
traefik-forward-auth:
image: docker.verbis.dkfz.de/cache/oauth2-proxy/oauth2-proxy:latest
environment:
- http_proxy=http://forward_proxy:3128
- https_proxy=http://forward_proxy:3128
- OAUTH2_PROXY_PROVIDER=oidc
- OAUTH2_PROXY_SKIP_PROVIDER_BUTTON=true
- OAUTH2_PROXY_OIDC_ISSUER_URL=https://login.verbis.dkfz.de/realms/master
- OAUTH2_PROXY_CLIENT_ID=bridgehead-${SITE_ID}
- OAUTH2_PROXY_CLIENT_SECRET=${IDMANAGER_AUTH_CLIENT_SECRET}
- OAUTH2_PROXY_COOKIE_SECRET=${IDMANAGER_AUTH_COOKIE_SECRET}
- OAUTH2_PROXY_COOKIE_NAME=_BRIDGEHEAD_oauth2_idm
- OAUTH2_PROXY_COOKIE_DOMAINS=.${HOST}
- OAUTH2_PROXY_HTTP_ADDRESS=:4180
- OAUTH2_PROXY_REVERSE_PROXY=true
- OAUTH2_PROXY_WHITELIST_DOMAINS=.${HOST}
- OAUTH2_PROXY_UPSTREAMS=static://202
- OAUTH2_PROXY_EMAIL_DOMAINS=*
- OAUTH2_PROXY_SCOPE=openid profile email
# Pass Authorization Header and some user information to backend services
- OAUTH2_PROXY_SET_AUTHORIZATION_HEADER=true
- OAUTH2_PROXY_SET_XAUTHREQUEST=true
# Keycloak has an expiration time of 60s therefore oauth2-proxy needs to refresh after that
- OAUTH2_PROXY_COOKIE_REFRESH=60s
- OAUTH2_PROXY_ALLOWED_GROUPS=DKTK-CCP-PPSN
- OAUTH2_PROXY_PROXY_PREFIX=/oauth2-idm
labels:
- "traefik.enable=true"
- "traefik.http.services.traefik-forward-auth.loadbalancer.server.port=4180"
- "traefik.http.routers.traefik-forward-auth.rule=Host(`${HOST}`) && PathPrefix(`/oauth2-idm`)"
- "traefik.http.routers.traefik-forward-auth.tls=true"
- "traefik.http.middlewares.traefik-forward-auth-idm.forwardauth.address=http://traefik-forward-auth:4180"
- "traefik.http.middlewares.traefik-forward-auth-idm.forwardauth.authResponseHeaders=Authorization"
depends_on:
forward_proxy:
condition: service_healthy
ccp-patient-project-identificator:
image: docker.verbis.dkfz.de/cache/samply/ccp-patient-project-identificator
container_name: bridgehead-ccp-patient-project-identificator
environment:
MAINZELLISTE_APIKEY: ${IDMANAGER_LOCAL_PATIENTLIST_APIKEY}
SITE_NAME: ${IDMANAGEMENT_FRIENDLY_ID}
volumes:
patientlist-db-data:

View File

@ -1,12 +1,12 @@
#!/bin/bash -e
#!/bin/bash
function idManagementSetup() {
if [ -n "$IDMANAGER_UPLOAD_APIKEY" ]; then
log INFO "id-management setup detected -- will start id-management (mainzelliste & magicpl)."
OVERRIDE+=" -f ./ccp/modules/id-management-compose.yml"
OVERRIDE+=" -f ./$PROJECT/modules/id-management-compose.yml"
# Auto Generate local Passwords
PATIENTLIST_POSTGRES_PASSWORD="$(echo \"id-management-module-db-password-salt\" | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 30)"
PATIENTLIST_POSTGRES_PASSWORD="$(echo \"id-management-module-db-password-salt\" | openssl rsautl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 30)"
IDMANAGER_LOCAL_PATIENTLIST_APIKEY="$(cat /proc/sys/kernel/random/uuid | sed 's/[-]//g' | head -c 20)"
# Transform Seeds Configuration to pass it to the Mainzelliste Container
@ -39,7 +39,6 @@ function applySpecialCases() {
result="$1";
result="${result/Lmu/LMU}";
result="${result/Tum/TUM}";
result="${result/Dktk Test/Teststandort}";
echo "$result";
}

View File

@ -1,12 +0,0 @@
#!/bin/bash -e
function mtbaSetup() {
if [ -n "$ENABLE_MTBA" ];then
log INFO "MTBA setup detected -- will start MTBA Service and CBioPortal."
if [ ! -n "$IDMANAGER_UPLOAD_APIKEY" ]; then
log ERROR "Missing ID-Management Module! Fix this by setting up ID Management:"
fi
OVERRIDE+=" -f ./$PROJECT/modules/mtba-compose.yml"
add_private_oidc_redirect_url "/mtba/*"
fi
}

View File

@ -1,6 +0,0 @@
# Molecular Tumor Board Alliance (MTBA)
In this module, the genetic data to import is stored in a directory (/tmp/bridgehead/mtba/input). A process checks
regularly if there are files in the directory. The files are pseudonomized when the IDAT is provided. The files are
combined with clinical data of the blaze and imported in cBioPortal. On the other hand, this files are also imported in
Blaze.

View File

@ -1,6 +0,0 @@
#!/bin/bash -e
if [ -n "$NNGM_CTS_APIKEY" ]; then
log INFO "nNGM setup detected -- will start nNGM Connector."
OVERRIDE+=" -f ./$PROJECT/modules/nngm-compose.yml"
fi

View File

@ -1,19 +0,0 @@
version: "3.7"
services:
obds2fhir-rest:
container_name: bridgehead-obds2fhir-rest
image: docker.verbis.dkfz.de/samply/obds2fhir-rest:main
environment:
IDTYPE: BK_${IDMANAGEMENT_FRIENDLY_ID}_L-ID
MAINZELLISTE_APIKEY: ${IDMANAGER_LOCAL_PATIENTLIST_APIKEY}
SALT: ${LOCAL_SALT}
KEEP_INTERNAL_ID: ${KEEP_INTERNAL_ID:-false}
MAINZELLISTE_URL: ${PATIENTLIST_URL:-http://patientlist:8080/patientlist}
labels:
- "traefik.enable=true"
- "traefik.http.routers.obds2fhir-rest.rule=PathPrefix(`/obds2fhir-rest`) || PathPrefix(`/adt2fhir-rest`)"
- "traefik.http.middlewares.obds2fhir-rest_strip.stripprefix.prefixes=/obds2fhir-rest,/adt2fhir-rest"
- "traefik.http.services.obds2fhir-rest.loadbalancer.server.port=8080"
- "traefik.http.routers.obds2fhir-rest.tls=true"
- "traefik.http.routers.obds2fhir-rest.middlewares=obds2fhir-rest_strip,auth"

View File

@ -1,13 +0,0 @@
#!/bin/bash
function obds2fhirRestSetup() {
if [ -n "$ENABLE_OBDS2FHIR_REST" ]; then
log INFO "oBDS2FHIR-REST setup detected -- will start obds2fhir-rest module."
if [ ! -n "$IDMANAGER_UPLOAD_APIKEY" ]; then
log ERROR "Missing ID-Management Module! Fix this by setting up ID Management:"
PATIENTLIST_URL=" "
fi
OVERRIDE+=" -f ./ccp/modules/obds2fhir-rest-compose.yml"
LOCAL_SALT="$(echo \"local-random-salt\" | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 30)"
fi
}

View File

@ -1,73 +0,0 @@
version: "3.7"
services:
teiler-orchestrator:
image: docker.verbis.dkfz.de/cache/samply/teiler-orchestrator:latest
container_name: bridgehead-teiler-orchestrator
labels:
- "traefik.enable=true"
- "traefik.http.routers.teiler_orchestrator_ccp.rule=PathPrefix(`/ccp-teiler`)"
- "traefik.http.services.teiler_orchestrator_ccp.loadbalancer.server.port=9000"
- "traefik.http.routers.teiler_orchestrator_ccp.tls=true"
- "traefik.http.middlewares.teiler_orchestrator_ccp_strip.stripprefix.prefixes=/ccp-teiler"
- "traefik.http.routers.teiler_orchestrator_ccp.middlewares=teiler_orchestrator_ccp_strip"
environment:
TEILER_BACKEND_URL: "https://${HOST}/ccp-teiler-backend"
TEILER_DASHBOARD_URL: "https://${HOST}/ccp-teiler-dashboard"
DEFAULT_LANGUAGE: "${TEILER_DEFAULT_LANGUAGE_LOWER_CASE}"
HTTP_RELATIVE_PATH: "/ccp-teiler"
teiler-dashboard:
image: docker.verbis.dkfz.de/cache/samply/teiler-dashboard:develop
container_name: bridgehead-teiler-dashboard
labels:
- "traefik.enable=true"
- "traefik.http.routers.teiler_dashboard_ccp.rule=PathPrefix(`/ccp-teiler-dashboard`)"
- "traefik.http.services.teiler_dashboard_ccp.loadbalancer.server.port=80"
- "traefik.http.routers.teiler_dashboard_ccp.tls=true"
- "traefik.http.middlewares.teiler_dashboard_ccp_strip.stripprefix.prefixes=/ccp-teiler-dashboard"
- "traefik.http.routers.teiler_dashboard_ccp.middlewares=teiler_dashboard_ccp_strip"
environment:
DEFAULT_LANGUAGE: "${TEILER_DEFAULT_LANGUAGE}"
TEILER_BACKEND_URL: "https://${HOST}/ccp-teiler-backend"
TEILER_DASHBOARD_URL: "https://${HOST}/ccp-teiler-dashboard"
OIDC_URL: "${OIDC_URL}"
OIDC_CLIENT_ID: "${OIDC_PUBLIC_CLIENT_ID}"
OIDC_TOKEN_GROUP: "${OIDC_GROUP_CLAIM}"
TEILER_ADMIN_NAME: "${OPERATOR_FIRST_NAME} ${OPERATOR_LAST_NAME}"
TEILER_ADMIN_EMAIL: "${OPERATOR_EMAIL}"
TEILER_ADMIN_PHONE: "${OPERATOR_PHONE}"
TEILER_PROJECT: "${PROJECT}"
EXPORTER_API_KEY: "${EXPORTER_API_KEY}"
TEILER_ORCHESTRATOR_URL: "https://${HOST}/ccp-teiler"
TEILER_ORCHESTRATOR_HTTP_RELATIVE_PATH: "/ccp-teiler"
TEILER_USER: "${OIDC_USER_GROUP}"
TEILER_ADMIN: "${OIDC_ADMIN_GROUP}"
REPORTER_DEFAULT_TEMPLATE_ID: "ccp-qb"
EXPORTER_DEFAULT_TEMPLATE_ID: "ccp"
teiler-backend:
image: docker.verbis.dkfz.de/ccp/dktk-teiler-backend:latest
container_name: bridgehead-teiler-backend
labels:
- "traefik.enable=true"
- "traefik.http.routers.teiler_backend_ccp.rule=PathPrefix(`/ccp-teiler-backend`)"
- "traefik.http.services.teiler_backend_ccp.loadbalancer.server.port=8085"
- "traefik.http.routers.teiler_backend_ccp.tls=true"
- "traefik.http.middlewares.teiler_backend_ccp_strip.stripprefix.prefixes=/ccp-teiler-backend"
- "traefik.http.routers.teiler_backend_ccp.middlewares=teiler_backend_ccp_strip"
environment:
LOG_LEVEL: "INFO"
APPLICATION_PORT: "8085"
APPLICATION_ADDRESS: "${HOST}"
DEFAULT_LANGUAGE: "${TEILER_DEFAULT_LANGUAGE}"
TEILER_ORCHESTRATOR_HTTP_RELATIVE_PATH: "/ccp-teiler"
TEILER_ORCHESTRATOR_URL: "https://${HOST}/ccp-teiler"
TEILER_DASHBOARD_DE_URL: "https://${HOST}/ccp-teiler-dashboard/de"
TEILER_DASHBOARD_EN_URL: "https://${HOST}/ccp-teiler-dashboard/en"
HTTP_PROXY: "http://forward_proxy:3128"
ENABLE_MTBA: "${ENABLE_MTBA}"
ENABLE_DATASHIELD: "${ENABLE_DATASHIELD}"
IDMANAGER_UPLOAD_APIKEY: "${IDMANAGER_UPLOAD_APIKEY}" # Only used to check if the ID Manager is active

View File

@ -1,9 +0,0 @@
#!/bin/bash -e
if [ "$ENABLE_TEILER" == true ];then
log INFO "Teiler setup detected -- will start Teiler services."
OVERRIDE+=" -f ./$PROJECT/modules/teiler-compose.yml"
TEILER_DEFAULT_LANGUAGE=DE
TEILER_DEFAULT_LANGUAGE_LOWER_CASE=${TEILER_DEFAULT_LANGUAGE,,}
add_public_oidc_redirect_url "/ccp-teiler/*"
fi

View File

@ -1,19 +0,0 @@
# Teiler
This module orchestrates the different microfrontends of the bridgehead as a single page application.
## Teiler Orchestrator
Single SPA component that consists on the root HTML site of the single page application and a javascript code that
gets the information about the microfrontend calling the teiler backend and is responsible for registering them. With the
resulting mapping, it can initialize, mount and unmount the required microfrontends on the fly.
The microfrontends run independently in different containers and can be based on different frameworks (Angular, Vue, React,...)
This microfrontends can run as single alone but need an extension with Single-SPA (https://single-spa.js.org/docs/ecosystem).
There are also available three templates (Angular, Vue, React) to be directly extended to be used directly in the teiler.
## Teiler Dashboard
It consists on the main dashboard and a set of embedded services.
### Login
user and password in ccp.local.conf
## Teiler Backend
In this component, the microfrontends are configured.

View File

@ -11,29 +11,22 @@ services:
ID_MANAGER_API_KEY: ${IDMANAGER_UPLOAD_APIKEY}
ID_MANAGER_PSEUDONYM_ID_TYPE: BK_${IDMANAGEMENT_FRIENDLY_ID}_L-ID
ID_MANAGER_URL: http://id-manager:8080/id-manager
PATIENT_CSV_FIRST_NAME_HEADER: ${MTBA_PATIENT_CSV_FIRST_NAME_HEADER:-FIRST_NAME}
PATIENT_CSV_LAST_NAME_HEADER: ${MTBA_PATIENT_CSV_LAST_NAME_HEADER:-LAST_NAME}
PATIENT_CSV_GENDER_HEADER: ${MTBA_PATIENT_CSV_GENDER_HEADER:-GENDER}
PATIENT_CSV_BIRTHDAY_HEADER: ${MTBA_PATIENT_CSV_BIRTHDAY_HEADER:-BIRTHDAY}
PATIENT_CSV_FIRST_NAME_HEADER: ${MTBA_PATIENT_CSV_FIRST_NAME_HEADER}
PATIENT_CSV_LAST_NAME_HEADER: ${MTBA_PATIENT_CSV_LAST_NAME_HEADER}
PATIENT_CSV_GENDER_HEADER: ${MTBA_PATIENT_CSV_GENDER_HEADER}
PATIENT_CSV_BIRTHDAY_HEADER: ${MTBA_PATIENT_CSV_BIRTHDAY_HEADER}
CBIOPORTAL_URL: http://cbioportal:8080
FILE_CHARSET: ${MTBA_FILE_CHARSET:-UTF-8}
FILE_END_OF_LINE: ${MTBA_FILE_END_OF_LINE:-LF}
CSV_DELIMITER: ${MTBA_CSV_DELIMITER:-TAB}
HTTP_RELATIVE_PATH: "/mtba"
OIDC_ADMIN_GROUP: "${OIDC_ADMIN_GROUP}"
OIDC_CLIENT_ID: "${OIDC_PRIVATE_CLIENT_ID}"
OIDC_CLIENT_SECRET: "${OIDC_CLIENT_SECRET}"
OIDC_URL: "${OIDC_URL}"
FILE_CHARSET: ${MTBA_FILE_CHARSET}
FILE_END_OF_LINE: ${MTBA_FILE_END_OF_LINE}
CSV_DELIMITER: ${MTBA_CSV_DELIMITER}
labels:
- "traefik.enable=true"
- "traefik.http.routers.mtba_ccp.rule=PathPrefix(`/mtba`)"
- "traefik.http.services.mtba_ccp.loadbalancer.server.port=8480"
- "traefik.http.routers.mtba_ccp.tls=true"
- "traefik.http.routers.mtba.rule=PathPrefix(`/`)"
- "traefik.http.services.mtba.loadbalancer.server.port=80"
- "traefik.http.routers.mtba.tls=true"
volumes:
- /var/cache/bridgehead/ccp/mtba/input:/app/input
- /var/cache/bridgehead/ccp/mtba/persist:/app/persist
- /tmp/bridgehead/mtba/input:/app/input
- /tmp/bridgehead/mtba/persist:/app/persist
# TODO: Include CBioPortal in Deployment ...
# NOTE: CBioPortal can't load data while the system is running. So after import of data bridgehead needs to be restarted!

View File

@ -1,5 +1,4 @@
version: "3.7"
volumes:
nngm-rest:
@ -12,15 +11,14 @@ services:
CTS_API_KEY: ${NNGM_CTS_APIKEY}
CRYPT_KEY: ${NNGM_CRYPTKEY}
#CTS_MAGICPL_SITE: ${SITE_ID}TODO
restart: always
labels:
- "traefik.enable=true"
- "traefik.http.routers.connector.rule=PathPrefix(`/nngm-connector`)"
- "traefik.http.middlewares.connector_strip.stripprefix.prefixes=/nngm-connector"
- "traefik.http.services.connector.loadbalancer.server.port=8080"
- "traefik.http.routers.connector.tls=true"
- "traefik.http.routers.connector.middlewares=connector_strip,auth-nngm"
- "traefik.http.routers.connector.middlewares=connector_strip,auth"
volumes:
- nngm-rest:/var/log
traefik:
labels:
- "traefik.http.middlewares.auth-nngm.basicauth.users=${NNGM_AUTH}"

24
ccp/nngm-setup.sh Normal file
View File

@ -0,0 +1,24 @@
#!/bin/bash
##nNGM vars:
#NNGM_MAGICPL_APIKEY
#NNGM_CTS_APIKEY
#NNGM_CRYPTKEY
function nngmSetup() {
if [ -n "$NNGM_CTS_APIKEY" ]; then
log INFO "nNGM setup detected -- will start nNGM Connector."
OVERRIDE+=" -f ./$PROJECT/nngm-compose.yml"
fi
}
function mtbaSetup() {
# TODO: Check if ID-Management Module is activated!
if [ -n "$ENABLE_MTBA" ];then
log INFO "MTBA setup detected -- will start MTBA Service and CBioPortal."
if [ ! -n "$IDMANAGER_UPLOAD_APIKEY" ]; then
log ERROR "Detected MTBA Module configuration but ID-Management Module seems not to be configured!"
exit 1;
fi
OVERRIDE+=" -f ./$PROJECT/mtba-compose.yml"
fi
}

View File

@ -1,3 +0,0 @@
bGlicmFyeSBSZXRyaWV2ZQp1c2luZyBGSElSIHZlcnNpb24gJzQuMC4wJwppbmNsdWRlIEZISVJIZWxwZXJzIHZlcnNpb24gJzQuMC4wJwoKY29kZXN5c3RlbSBsb2luYzogJ2h0dHA6Ly9sb2luYy5vcmcnCgpjb250ZXh0IFBhdGllbnQKCgpES1RLX1NUUkFUX0dFTkRFUl9TVFJBVElGSUVSCgpES1RLX1NUUkFUX1BSSU1BUllfRElBR05PU0lTX05PX1NPUlRfU1RSQVRJRklFUgpES1RLX1NUUkFUX0FHRV9DTEFTU19TVFJBVElGSUVSCgpES1RLX1NUUkFUX0RFQ0VBU0VEX1NUUkFUSUZJRVIKCkRLVEtfU1RSQVRfRElBR05PU0lTX1NUUkFUSUZJRVIKCkRLVEtfUkVQTEFDRV9TUEVDSU1FTl9TVFJBVElGSUVSaWYgSW5Jbml0aWFsUG9wdWxhdGlvbiB0aGVuIFtTcGVjaW1lbl0gZWxzZSB7fSBhcyBMaXN0PFNwZWNpbWVuPgpES1RLX1NUUkFUX1BST0NFRFVSRV9TVFJBVElGSUVSCgpES1RLX1NUUkFUX01FRElDQVRJT05fU1RSQVRJRklFUgoKICBES1RLX1JFUExBQ0VfSElTVE9MT0dZX1NUUkFUSUZJRVIKIGlmIGhpc3RvLmNvZGUuY29kaW5nLndoZXJlKGNvZGUgPSAnNTk4NDctNCcpLmNvZGUuZmlyc3QoKSBpcyBudWxsIHRoZW4gMCBlbHNlIDEKREtUS19TVFJBVF9ERUZfSU5fSU5JVElBTF9QT1BVTEFUSU9OCnRydWU=
bGlicmFyeSBSZXRyaWV2ZQp1c2luZyBGSElSIHZlcnNpb24gJzQuMC4wJwppbmNsdWRlIEZISVJIZWxwZXJzIHZlcnNpb24gJzQuMC4wJwoKY29kZXN5c3RlbSBsb2luYzogJ2h0dHA6Ly9sb2luYy5vcmcnCmNvZGVzeXN0ZW0gaWNkMTA6ICdodHRwOi8vZmhpci5kZS9Db2RlU3lzdGVtL2JmYXJtL2ljZC0xMC1nbScKY29kZXN5c3RlbSBtb3JwaDogJ3VybjpvaWQ6Mi4xNi44NDAuMS4xMTM4ODMuNi40My4xJwoKY29udGV4dCBQYXRpZW50CgoKREtUS19TVFJBVF9HRU5ERVJfU1RSQVRJRklFUgoKREtUS19TVFJBVF9QUklNQVJZX0RJQUdOT1NJU19OT19TT1JUX1NUUkFUSUZJRVIKREtUS19TVFJBVF9BR0VfQ0xBU1NfU1RSQVRJRklFUgoKREtUS19TVFJBVF9ERUNFQVNFRF9TVFJBVElGSUVSCgpES1RLX1NUUkFUX0RJQUdOT1NJU19TVFJBVElGSUVSCgpES1RLX1JFUExBQ0VfU1BFQ0lNRU5fU1RSQVRJRklFUmlmIEluSW5pdGlhbFBvcHVsYXRpb24gdGhlbiBbU3BlY2ltZW5dIGVsc2Uge30gYXMgTGlzdDxTcGVjaW1lbj4KREtUS19TVFJBVF9QUk9DRURVUkVfU1RSQVRJRklFUgoKREtUS19TVFJBVF9NRURJQ0FUSU9OX1NUUkFUSUZJRVIKCiAgREtUS19SRVBMQUNFX0hJU1RPTE9HWV9TVFJBVElGSUVSCiBpZiBoaXN0by5jb2RlLmNvZGluZy53aGVyZShjb2RlID0gJzU5ODQ3LTQnKS5jb2RlLmZpcnN0KCkgaXMgbnVsbCB0aGVuIDAgZWxzZSAxCkRLVEtfU1RSQVRfREVGX0lOX0lOSVRJQUxfUE9QVUxBVElPTihleGlzdHMgW0NvbmRpdGlvbjogQ29kZSAnQzYxJyBmcm9tIGljZDEwXSkgYW5kIAooKGV4aXN0cyBmcm9tIFtPYnNlcnZhdGlvbjogQ29kZSAnNTk4NDctNCcgZnJvbSBsb2luY10gTwp3aGVyZSBPLnZhbHVlLmNvZGluZy5jb2RlIGNvbnRhaW5zICc4MTQwLzMnKSBvciAKKGV4aXN0cyBmcm9tIFtPYnNlcnZhdGlvbjogQ29kZSAnNTk4NDctNCcgZnJvbSBsb2luY10gTwp3aGVyZSBPLnZhbHVlLmNvZGluZy5jb2RlIGNvbnRhaW5zICc4MTQ3LzMnKSBvciAKKGV4aXN0cyBmcm9tIFtPYnNlcnZhdGlvbjogQ29kZSAnNTk4NDctNCcgZnJvbSBsb2luY10gTwp3aGVyZSBPLnZhbHVlLmNvZGluZy5jb2RlIGNvbnRhaW5zICc4NDgwLzMnKSBvciAKKGV4aXN0cyBmcm9tIFtPYnNlcnZhdGlvbjogQ29kZSAnNTk4NDctNCcgZnJvbSBsb2luY10gTwp3aGVyZSBPLnZhbHVlLmNvZGluZy5jb2RlIGNvbnRhaW5zICc4NTAwLzMnKSk=
ORGANOID_DASHBOARD_PUBLIC

View File

@ -1,20 +1,20 @@
-----BEGIN CERTIFICATE-----
MIIDNTCCAh2gAwIBAgIUN7yzueIZzwpe8PaPEIMY8zoH+eMwDQYJKoZIhvcNAQEL
BQAwFjEUMBIGA1UEAxMLQnJva2VyLVJvb3QwHhcNMjMwNTIzMTAxNzIzWhcNMzMw
NTIwMTAxNzUzWjAWMRQwEgYDVQQDEwtCcm9rZXItUm9vdDCCASIwDQYJKoZIhvcN
AQEBBQADggEPADCCAQoCggEBAN5JAj+HydSGaxvA0AOcrXVTZ9FfsH0cMVBlQb72
bGZgrRvkqtB011TNXZfsHl7rPxCY61DcsDJfFq3+8VHT+S9HE0qV1bEwP+oA3xc4
Opq77av77cNNOqDC7h+jyPhHcUaE33iddmrH9Zn2ofWTSkKHHu3PAe5udCrc2QnD
4PLRF6gqiEY1mcGknJrXj1ff/X0nRY/m6cnHNXz0Cvh8oPOtbdfGgfZjID2/fJNP
fNoNKqN+5oJAZ+ZZ9id9rBvKj1ivW3F2EoGjZF268SgZzc5QrM/D1OpSBQf5SF/V
qUPcQTgt9ry3YR+SZYazLkfKMEOWEa0WsqJVgXdQ6FyergcCAwEAAaN7MHkwDgYD
VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFEa70kcseqU5
bHx2zSt4bG21HokhMB8GA1UdIwQYMBaAFEa70kcseqU5bHx2zSt4bG21HokhMBYG
A1UdEQQPMA2CC0Jyb2tlci1Sb290MA0GCSqGSIb3DQEBCwUAA4IBAQCGmE7NXW4T
6J4mV3b132cGEMD7grx5JeiXK5EHMlswUS+Odz0NcBNzhUHdG4WVMbrilHbI5Ua+
6jdKx5WwnqzjQvElP0MCw6sH/35gbokWgk1provOP99WOFRsQs+9Sm8M2XtMf9HZ
m3wABwU/O+dhZZ1OT1PjSZD0OKWKqH/KvlsoF5R6P888KpeYFiIWiUNS5z21Jm8A
ZcllJjiRJ60EmDwSUOQVJJSMOvtr6xTZDZLtAKSN8zN08lsNGzyrFwqjDwU0WTqp
scMXEGBsWQjlvxqDnXyljepR0oqRIjOvgrWaIgbxcnu98tK/OdBGwlAPKNUW7Crr
vO+eHxl9iqd4
MIIDNTCCAh2gAwIBAgIUMeGRSrNPhRdQ1tU7uK5+lUa4f38wDQYJKoZIhvcNAQEL
BQAwFjEUMBIGA1UEAxMLQnJva2VyLVJvb3QwHhcNMjIwOTI5MTQxMjU1WhcNMzIw
OTI2MTQxMzI1WjAWMRQwEgYDVQQDEwtCcm9rZXItUm9vdDCCASIwDQYJKoZIhvcN
AQEBBQADggEPADCCAQoCggEBAMYyroOUeb27mYzClOrjCmgIceLalsFA0aVCh5mZ
KtP8+1U3oq/7exP30gXiJojxW7xoerfyQY9s0Sz5YYbxYbuskFOYEtyAILB/pxgd
+k+J3tlZKolpfmo7WT5tZiHxH/zjrtAYGnuB2xPHRMCWh/tHYrELgXQuilNol24y
GBa1plTlARy0aKEDUHp87WLhD2qH7B8sFlLgo0+gunE1UtR2HMSPF45w3VXszyG6
fJNrAj0yPnKy3Dm1BMO3jDO2e0A9lCQ71a4j4TeKePfCk1xCArSu6PpiwiacKplF
c6CRR6KrWVm2g+8Y2hFcOBG/Py2xusm3PWbpylGq6vtFRkkCAwEAAaN7MHkwDgYD
VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFEFxD6BQwQO5
xsJ+3cvZypsnh6dDMB8GA1UdIwQYMBaAFEFxD6BQwQO5xsJ+3cvZypsnh6dDMBYG
A1UdEQQPMA2CC0Jyb2tlci1Sb290MA0GCSqGSIb3DQEBCwUAA4IBAQB5zTeIhV/3
3Am6O144EFtnIeaZ2w0D6aEHqHAZp50vJv3+uQfOliCOzgw7VDxI4Zz2JALjlR/i
uOYHsu3YIRMIOmPOjqrdDJa6auB0ufL4oUPfCRln7Fh0f3JVlz3BUoHsSDt949p4
g0nnsciL2JHuzlqjn7Jyt3L7dAHrlFKulCcuidG5D3cqXrRCbF83f+k3TC/HRiNd
25oMi7I4MP/SOCdfQGUGIsHIf/0hSm3pNjDOrC/XuI/8gh2f5io+Y8V+hMwMBcm4
JbH8bdyBB+EIhsNbTwf2MWntD5bmg47sf7hh23aNvKXI67Li1pTI2t1CqiGnFR0U
fCEpeaEAHs0k
-----END CERTIFICATE-----

View File

@ -1,35 +1,19 @@
BROKER_ID=broker.ccp-it.dktk.dkfz.de
BROKER_ID=broker.dev.ccp-it.dktk.dkfz.de
BROKER_URL=https://${BROKER_ID}
PROXY_ID=${SITE_ID}.${BROKER_ID}
FOCUS_BEAM_SECRET_SHORT="$(cat /proc/sys/kernel/random/uuid | sed 's/[-]//g' | head -c 20)"
FOCUS_RETRY_COUNT=${FOCUS_RETRY_COUNT:-64}
SPOT_BEAM_SECRET_SHORT="$(cat /proc/sys/kernel/random/uuid | sed 's/[-]//g' | head -c 20)"
SPOT_BEAM_SECRET_LONG="ApiKey spot.${PROXY_ID} ${SPOT_BEAM_SECRET_SHORT}"
REPORTHUB_BEAM_SECRET_SHORT="$(cat /proc/sys/kernel/random/uuid | sed 's/[-]//g' | head -c 20)"
REPORTHUB_BEAM_SECRET_LONG="ApiKey report-hub.${PROXY_ID} ${REPORTHUB_BEAM_SECRET_SHORT}"
SUPPORT_EMAIL=support-ccp@dkfz-heidelberg.de
PRIVATEKEYFILENAME=/etc/bridgehead/pki/${SITE_ID}.priv.pem
BROKER_URL_FOR_PREREQ=$BROKER_URL
OIDC_USER_GROUP="DKTK_CCP_$(capitalize_first_letter ${SITE_ID})"
OIDC_ADMIN_GROUP="DKTK_CCP_$(capitalize_first_letter ${SITE_ID})_Verwalter"
OIDC_PRIVATE_CLIENT_ID=${SITE_ID}-private
OIDC_PUBLIC_CLIENT_ID=${SITE_ID}-public
OIDC_URL="https://login.verbis.dkfz.de/realms/test-realm-01"
OIDC_GROUP_CLAIM="groups"
for module in $PROJECT/modules/*.sh
do
log DEBUG "sourcing $module"
source $module
done
# This will load id-management setup. Effective only if id-management configuration is defined.
source $PROJECT/modules/id-management-setup.sh
idManagementSetup
# This will load nngm setup. Effective only if nngm configuration is defined.
source $PROJECT/nngm-setup.sh
nngmSetup
source $PROJECT/exliquid-setup.sh
exliquidSetup
mtbaSetup
obds2fhirRestSetup
blazeSecondarySetup
for module in modules/*.sh
do
log DEBUG "sourcing $module"
source $module
done
transfairSetup

View File

@ -1,66 +0,0 @@
version: "3.7"
services:
blaze:
image: docker.verbis.dkfz.de/cache/samply/blaze:${BLAZE_TAG}
container_name: bridgehead-dhki-blaze
environment:
BASE_URL: "http://bridgehead-dhki-blaze:8080"
JAVA_TOOL_OPTIONS: "-Xmx${BLAZE_MEMORY_CAP:-4096}m"
DB_RESOURCE_CACHE_SIZE: ${BLAZE_RESOURCE_CACHE_CAP:-2500000}
DB_BLOCK_CACHE_SIZE: $BLAZE_MEMORY_CAP
ENFORCE_REFERENTIAL_INTEGRITY: "false"
volumes:
- "blaze-data:/app/data"
labels:
- "traefik.enable=true"
- "traefik.http.routers.blaze_dhki.rule=PathPrefix(`/dhki-localdatamanagement`)"
- "traefik.http.middlewares.dhki_b_strip.stripprefix.prefixes=/dhki-localdatamanagement"
- "traefik.http.services.blaze_dhki.loadbalancer.server.port=8080"
- "traefik.http.routers.blaze_dhki.middlewares=dhki_b_strip,auth"
- "traefik.http.routers.blaze_dhki.tls=true"
focus:
image: docker.verbis.dkfz.de/cache/samply/focus:${FOCUS_TAG}
container_name: bridgehead-focus
environment:
API_KEY: ${FOCUS_BEAM_SECRET_SHORT}
BEAM_APP_ID_LONG: focus.${PROXY_ID}
PROXY_ID: ${PROXY_ID}
BLAZE_URL: "http://bridgehead-dhki-blaze:8080/fhir/"
BEAM_PROXY_URL: http://beam-proxy:8081
RETRY_COUNT: ${FOCUS_RETRY_COUNT}
EPSILON: 0.28
QUERIES_TO_CACHE: '/queries_to_cache.conf'
volumes:
- /srv/docker/bridgehead/dhki/queries_to_cache.conf:/queries_to_cache.conf:ro
depends_on:
- "beam-proxy"
- "blaze"
beam-proxy:
image: docker.verbis.dkfz.de/cache/samply/beam-proxy:develop
container_name: bridgehead-beam-proxy
environment:
BROKER_URL: ${BROKER_URL}
PROXY_ID: ${PROXY_ID}
APP_focus_KEY: ${FOCUS_BEAM_SECRET_SHORT}
PRIVKEY_FILE: /run/secrets/proxy.pem
ALL_PROXY: http://forward_proxy:3128
TLS_CA_CERTIFICATES_DIR: /conf/trusted-ca-certs
ROOTCERT_FILE: /conf/root.crt.pem
secrets:
- proxy.pem
depends_on:
- "forward_proxy"
volumes:
- /etc/bridgehead/trusted-ca-certs:/conf/trusted-ca-certs:ro
- /srv/docker/bridgehead/dhki/root.crt.pem:/conf/root.crt.pem:ro
volumes:
blaze-data:
secrets:
proxy.pem:
file: /etc/bridgehead/pki/${SITE_ID}.priv.pem

View File

@ -1,2 +0,0 @@
bGlicmFyeSBSZXRyaWV2ZQp1c2luZyBGSElSIHZlcnNpb24gJzQuMC4wJwppbmNsdWRlIEZISVJIZWxwZXJzIHZlcnNpb24gJzQuMC4wJwpjb2Rlc3lzdGVtIFNhbXBsZU1hdGVyaWFsVHlwZTogJ2h0dHBzOi8vZmhpci5iYm1yaS5kZS9Db2RlU3lzdGVtL1NhbXBsZU1hdGVyaWFsVHlwZScKCmNvZGVzeXN0ZW0gbG9pbmM6ICdodHRwOi8vbG9pbmMub3JnJwoKY29udGV4dCBQYXRpZW50CgpES1RLX1NUUkFUX0dFTkRFUl9TVFJBVElGSUVSCgpES1RLX1NUUkFUX0FHRV9TVFJBVElGSUVSCgpES1RLX1NUUkFUX0RFQ0VBU0VEX1NUUkFUSUZJRVIKCkRLVEtfU1RSQVRfRElBR05PU0lTX1NUUkFUSUZJRVIKCkRIS0lfU1RSQVRfU1BFQ0lNRU5fU1RSQVRJRklFUgoKREtUS19TVFJBVF9QUk9DRURVUkVfU1RSQVRJRklFUgoKREhLSV9TVFJBVF9NRURJQ0FUSU9OX1NUUkFUSUZJRVIKCkRIS0lfU1RSQVRfRU5DT1VOVEVSX1NUUkFUSUZJRVIKREtUS19TVFJBVF9ERUZfSU5fSU5JVElBTF9QT1BVTEFUSU9OCnRydWU=
bGlicmFyeSBSZXRyaWV2ZQp1c2luZyBGSElSIHZlcnNpb24gJzQuMC4wJwppbmNsdWRlIEZISVJIZWxwZXJzIHZlcnNpb24gJzQuMC4wJwpjb2Rlc3lzdGVtIFNhbXBsZU1hdGVyaWFsVHlwZTogJ2h0dHBzOi8vZmhpci5iYm1yaS5kZS9Db2RlU3lzdGVtL1NhbXBsZU1hdGVyaWFsVHlwZScKCmNvZGVzeXN0ZW0gbG9pbmM6ICdodHRwOi8vbG9pbmMub3JnJwpjb2Rlc3lzdGVtIGljZDEwOiAnaHR0cDovL2ZoaXIuZGUvQ29kZVN5c3RlbS9iZmFybS9pY2QtMTAtZ20nCmNvZGVzeXN0ZW0gbW9ycGg6ICd1cm46b2lkOjIuMTYuODQwLjEuMTEzODgzLjYuNDMuMScKCmNvbnRleHQgUGF0aWVudAoKREtUS19TVFJBVF9HRU5ERVJfU1RSQVRJRklFUgoKREtUS19TVFJBVF9BR0VfU1RSQVRJRklFUgoKREtUS19TVFJBVF9ERUNFQVNFRF9TVFJBVElGSUVSCgpES1RLX1NUUkFUX0RJQUdOT1NJU19TVFJBVElGSUVSCgpESEtJX1NUUkFUX1NQRUNJTUVOX1NUUkFUSUZJRVIKCkRLVEtfU1RSQVRfUFJPQ0VEVVJFX1NUUkFUSUZJRVIKCkRIS0lfU1RSQVRfTUVESUNBVElPTl9TVFJBVElGSUVSCgpESEtJX1NUUkFUX0VOQ09VTlRFUl9TVFJBVElGSUVSCkRLVEtfU1RSQVRfREVGX0lOX0lOSVRJQUxfUE9QVUxBVElPTgooKChleGlzdHMgW0NvbmRpdGlvbjogQ29kZSAnQzM0LjknIGZyb20gaWNkMTBdKSBvcgooZXhpc3RzIFtDb25kaXRpb246IENvZGUgJ0MzNC44JyBmcm9tIGljZDEwXSkgb3IKKGV4aXN0cyBbQ29uZGl0aW9uOiBDb2RlICdDMzQuMCcgZnJvbSBpY2QxMF0pIG9yCihleGlzdHMgW0NvbmRpdGlvbjogQ29kZSAnQzM0LjInIGZyb20gaWNkMTBdKSBvcgooZXhpc3RzIFtDb25kaXRpb246IENvZGUgJ0MzNC4xJyBmcm9tIGljZDEwXSkgb3IKKGV4aXN0cyBbQ29uZGl0aW9uOiBDb2RlICdDMzQuMycgZnJvbSBpY2QxMF0pKSBhbmQKKChleGlzdHMgZnJvbSBbT2JzZXJ2YXRpb246IENvZGUgJzU5ODQ3LTQnIGZyb20gbG9pbmNdIE8Kd2hlcmUgTy52YWx1ZS5jb2RpbmcuY29kZSBjb250YWlucyAnODE0MC8zJykgb3IKKGV4aXN0cyBmcm9tIFtPYnNlcnZhdGlvbjogQ29kZSAnNTk4NDctNCcgZnJvbSBsb2luY10gTwp3aGVyZSBPLnZhbHVlLmNvZGluZy5jb2RlIGNvbnRhaW5zICc4MTQxLzMnKSBvcgooZXhpc3RzIGZyb20gW09ic2VydmF0aW9uOiBDb2RlICc1OTg0Ny00JyBmcm9tIGxvaW5jXSBPCndoZXJlIE8udmFsdWUuY29kaW5nLmNvZGUgY29udGFpbnMgJzgxNDMvMycpIG9yCihleGlzdHMgZnJvbSBbT2JzZXJ2YXRpb246IENvZGUgJzU5ODQ3LTQnIGZyb20gbG9pbmNdIE8Kd2hlcmUgTy52YWx1ZS5jb2RpbmcuY29kZSBjb250YWlucyAnODE0Ny8zJykgb3IKKGV4aXN0cyBmcm9tIFtPYnNlcnZhdGlvbjogQ29kZSAnNTk4NDctNCcgZnJvbSBsb2luY10gTwp3aGVyZSBPLnZhbHVlLmNvZGluZy5jb2RlIGNvbnRhaW5zICc4MjUwLzMnKSBvcgooZXhpc3RzIGZyb20gW09ic2VydmF0aW9uOiBDb2RlICc1OTg0Ny00JyBmcm9tIGxvaW5jXSBPCndoZXJlIE8udmFsdWUuY29kaW5nLmNvZGUgY29udGFpbnMgJzgyNTEvMycpIG9yCihleGlzdHMgZnJvbSBbT2JzZXJ2YXRpb246IENvZGUgJzU5ODQ3LTQnIGZyb20gbG9pbmNdIE8Kd2hlcmUgTy52YWx1ZS5jb2RpbmcuY29kZSBjb250YWlucyAnODI1Mi8zJykgb3IKKGV4aXN0cyBmcm9tIFtPYnNlcnZhdGlvbjogQ29kZSAnNTk4NDctNCcgZnJvbSBsb2luY10gTwp3aGVyZSBPLnZhbHVlLmNvZGluZy5jb2RlIGNvbnRhaW5zICc4MjUzLzMnKSBvcgooZXhpc3RzIGZyb20gW09ic2VydmF0aW9uOiBDb2RlICc1OTg0Ny00JyBmcm9tIGxvaW5jXSBPCndoZXJlIE8udmFsdWUuY29kaW5nLmNvZGUgY29udGFpbnMgJzgyNTUvMycpIG9yCihleGlzdHMgZnJvbSBbT2JzZXJ2YXRpb246IENvZGUgJzU5ODQ3LTQnIGZyb20gbG9pbmNdIE8Kd2hlcmUgTy52YWx1ZS5jb2RpbmcuY29kZSBjb250YWlucyAnODI2MC8zJykgb3IKKGV4aXN0cyBmcm9tIFtPYnNlcnZhdGlvbjogQ29kZSAnNTk4NDctNCcgZnJvbSBsb2luY10gTwp3aGVyZSBPLnZhbHVlLmNvZGluZy5jb2RlIGNvbnRhaW5zICc4MzEwLzMnKSBvcgooZXhpc3RzIGZyb20gW09ic2VydmF0aW9uOiBDb2RlICc1OTg0Ny00JyBmcm9tIGxvaW5jXSBPCndoZXJlIE8udmFsdWUuY29kaW5nLmNvZGUgY29udGFpbnMgJzgzMzMvMycpIG9yCihleGlzdHMgZnJvbSBbT2JzZXJ2YXRpb246IENvZGUgJzU5ODQ3LTQnIGZyb20gbG9pbmNdIE8Kd2hlcmUgTy52YWx1ZS5jb2RpbmcuY29kZSBjb250YWlucyAnODQ3MC8zJykgb3IKKGV4aXN0cyBmcm9tIFtPYnNlcnZhdGlvbjogQ29kZSAnNTk4NDctNCcgZnJvbSBsb2luY10gTwp3aGVyZSBPLnZhbHVlLmNvZGluZy5jb2RlIGNvbnRhaW5zICc4NDgwLzMnKSBvcgooZXhpc3RzIGZyb20gW09ic2VydmF0aW9uOiBDb2RlICc1OTg0Ny00JyBmcm9tIGxvaW5jXSBPCndoZXJlIE8udmFsdWUuY29kaW5nLmNvZGUgY29udGFpbnMgJzg0OTAvMycpIG9yCihleGlzdHMgZnJvbSBbT2JzZXJ2YXRpb246IENvZGUgJzU5ODQ3LTQnIGZyb20gbG9pbmNdIE8Kd2hlcmUgTy52YWx1ZS5jb2RpbmcuY29kZSBjb250YWlucyAnODU1MC8zJykgb3IKKGV4aXN0cyBmcm9tIFtPYnNlcnZhdGlvbjogQ29kZSAnNTk4NDctNCcgZnJvbSBsb2luY10gTwp3aGVyZSBPLnZhbHVlLmNvZGluZy5jb2RlIGNvbnRhaW5zICc4MDUyLzMnKSkp

View File

@ -1,20 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDNTCCAh2gAwIBAgIUSWUPebUMNfJvPKMjdgX+WiH+OXgwDQYJKoZIhvcNAQEL
BQAwFjEUMBIGA1UEAxMLQnJva2VyLVJvb3QwHhcNMjQwMTA1MDg1NTM4WhcNMzQw
MTAyMDg1NjA4WjAWMRQwEgYDVQQDEwtCcm9rZXItUm9vdDCCASIwDQYJKoZIhvcN
AQEBBQADggEPADCCAQoCggEBAL/nvo9Bn1/6Z/K4BKoLM6/mVziM4cmXTVx4npVz
pnptwPPFU4rz47akRZ6ZMD5MO0bsyvaxG1nwVrW3aAGC42JIGTdZHKwMKrd35sxw
k3YlGJagGUs+bKHUCL55OcSmyDWlh/UhA8+eeJWjOt9u0nYXv+vi+N4JSHA0oC9D
bTF1v+7blrTQagf7PTPSF3pe22iXOjJYdOkZMWoMoNAjn6F958fkLNLY3csOZwvP
/3eyNNawyAEPWeIm33Zk630NS8YHggz6WCqwXvuaKb6910mRP8jgauaYsqgsOyDt
pbWuvk//aZWdGeN9RNsAA8eGppygiwm/m9eRC6I0shDwv6ECAwEAAaN7MHkwDgYD
VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFn/dbW1J3ry
7TBzbKo3H4vJr2MiMB8GA1UdIwQYMBaAFFn/dbW1J3ry7TBzbKo3H4vJr2MiMBYG
A1UdEQQPMA2CC0Jyb2tlci1Sb290MA0GCSqGSIb3DQEBCwUAA4IBAQCa2V8B8aad
XNDS1EUIi9oMdvGvkolcdFwx9fI++qu9xSIaZs5GETHck3oYKZF0CFP5ESnKDn5w
enWgm5M0y+hVZppzB163WmET1efBXwrdyn8j4336NjX352h63JGWCaI2CfZ1qG1p
kf5W9CVXllSFaJe5r994ovgyHvK2ucWwe8l8iMJbQhH79oKi/9uJMCD6aUXnpg1K
nPHW1lsVx6foqYWijdBdtFU2i7LSH2OYo0nb1PgRnY/SABV63JHfJnqW9dZy4f7G
rpsvvrmFrKmEnCZH0n6qveY3Z5bMD94Yx0ebkCTYEqAw3pV65gwxrzBTpEg6dgF0
eG0eKFUS0REJ
-----END CERTIFICATE-----

View File

@ -1,26 +0,0 @@
BROKER_ID=broker.hector.dkfz.de
BROKER_URL=https://${BROKER_ID}
PROXY_ID=${SITE_ID}.${BROKER_ID}
FOCUS_BEAM_SECRET_SHORT="$(cat /proc/sys/kernel/random/uuid | sed 's/[-]//g' | head -c 20)"
FOCUS_RETRY_COUNT=${FOCUS_RETRY_COUNT:-64}
SUPPORT_EMAIL=support-ccp@dkfz-heidelberg.de
PRIVATEKEYFILENAME=/etc/bridgehead/pki/${SITE_ID}.priv.pem
BROKER_URL_FOR_PREREQ=$BROKER_URL
for module in ccp/modules/*.sh
do
log DEBUG "sourcing $module"
source $module
done
idManagementSetup
obds2fhirRestSetup
for module in modules/*.sh
do
log DEBUG "sourcing $module"
source $module
done
transfairSetup

View File

@ -1,42 +0,0 @@
## How to Change Config Access Token
### 1. Generate a New Access Token
1. Go to your Git configuration repository provider, it might be either [git.verbis.dkfz.de](https://git.verbis.dkfz.de) or [gitlab.bbmri-eric.eu](https://gitlab.bbmri-eric.eu).
2. Navigate to the configuration repository for your site.
3. Go to **Settings → Access Tokens** to check if your Access Token is valid or expired.
- **If expired**, create a new Access Token.
4. Configure the new Access Token with the following settings:
- **Expiration date**: One year from today, minus one day.
- **Role**: Developer.
- **Scope**: Only `read_repository`.
5. Save the newly generated Access Token in a secure location.
---
### 2. Replace the Old Access Token
1. Navigate to `/etc/bridgehead` in your system.
2. Run the following command to retrieve the current Git remote URL:
```bash
git remote get-url origin
```
Example output:
```
https://name40dkfz-heidelberg.de:<old_access_token>@git.verbis.dkfz.de/bbmri-bridgehead-configs/test.git
```
3. Replace `<old_access_token>` with your new Access Token in the URL.
4. Set the updated URL using the following command:
```bash
git remote set-url origin https://name40dkfz-heidelberg.de:<new_access_token>@git.verbis.dkfz.de/bbmri-bridgehead-configs/test.git
```
5. Start the Bridgehead update service by running:
```bash
systemctl start bridgehead-update@<project>
```
6. View the output to ensure the update process is successful:
```bash
journalctl -u bridgehead-update@<project> -f
```

View File

@ -1,68 +0,0 @@
version: "3.7"
services:
blaze:
image: docker.verbis.dkfz.de/cache/samply/blaze:${BLAZE_TAG}
container_name: bridgehead-itcc-blaze
environment:
BASE_URL: "http://bridgehead-itcc-blaze:8080"
JAVA_TOOL_OPTIONS: "-Xmx${BLAZE_MEMORY_CAP:-4096}m"
DB_RESOURCE_CACHE_SIZE: ${BLAZE_RESOURCE_CACHE_CAP:-2500000}
DB_BLOCK_CACHE_SIZE: ${BLAZE_MEMORY_CAP}
CQL_EXPR_CACHE_SIZE: ${BLAZE_CQL_CACHE_CAP:-32}
ENFORCE_REFERENTIAL_INTEGRITY: "false"
volumes:
- "blaze-data:/app/data"
labels:
- "traefik.enable=true"
- "traefik.http.routers.blaze_itcc.rule=PathPrefix(`/itcc-localdatamanagement`)"
- "traefik.http.middlewares.itcc_b_strip.stripprefix.prefixes=/itcc-localdatamanagement"
- "traefik.http.services.blaze_itcc.loadbalancer.server.port=8080"
- "traefik.http.routers.blaze_itcc.middlewares=itcc_b_strip,auth"
- "traefik.http.routers.blaze_itcc.tls=true"
focus:
image: docker.verbis.dkfz.de/cache/samply/focus:${FOCUS_TAG}
container_name: bridgehead-focus
environment:
API_KEY: ${FOCUS_BEAM_SECRET_SHORT}
BEAM_APP_ID_LONG: focus.${PROXY_ID}
PROXY_ID: ${PROXY_ID}
BLAZE_URL: "http://bridgehead-itcc-blaze:8080/fhir/"
BEAM_PROXY_URL: http://beam-proxy:8081
RETRY_COUNT: ${FOCUS_RETRY_COUNT}
EPSILON: 0.28
QUERIES_TO_CACHE: '/queries_to_cache.conf'
ENDPOINT_TYPE: ${FOCUS_ENDPOINT_TYPE:-blaze}
volumes:
- /srv/docker/bridgehead/itcc/queries_to_cache.conf:/queries_to_cache.conf:ro
depends_on:
- "beam-proxy"
- "blaze"
beam-proxy:
image: docker.verbis.dkfz.de/cache/samply/beam-proxy:${BEAM_TAG}
container_name: bridgehead-beam-proxy
environment:
BROKER_URL: ${BROKER_URL}
PROXY_ID: ${PROXY_ID}
APP_focus_KEY: ${FOCUS_BEAM_SECRET_SHORT}
PRIVKEY_FILE: /run/secrets/proxy.pem
ALL_PROXY: http://forward_proxy:3128
TLS_CA_CERTIFICATES_DIR: /conf/trusted-ca-certs
ROOTCERT_FILE: /conf/root.crt.pem
secrets:
- proxy.pem
depends_on:
- "forward_proxy"
volumes:
- /etc/bridgehead/trusted-ca-certs:/conf/trusted-ca-certs:ro
- /srv/docker/bridgehead/itcc/root.crt.pem:/conf/root.crt.pem:ro
volumes:
blaze-data:
secrets:
proxy.pem:
file: /etc/bridgehead/pki/${SITE_ID}.priv.pem

View File

@ -1,33 +0,0 @@
version: "3.7"
services:
landing:
container_name: lens_federated-search
image: docker.verbis.dkfz.de/ccp/lens:${SITE_ID}
labels:
- "traefik.enable=true"
- "traefik.http.routers.landing.rule=PathPrefix(`/`)"
- "traefik.http.services.landing.loadbalancer.server.port=80"
- "traefik.http.routers.landing.tls=true"
spot:
image: docker.verbis.dkfz.de/ccp-private/central-spot
environment:
BEAM_SECRET: "${FOCUS_BEAM_SECRET_SHORT}"
BEAM_URL: http://beam-proxy:8081
BEAM_PROXY_ID: ${SITE_ID}
BEAM_BROKER_ID: ${BROKER_ID}
BEAM_APP_ID: "focus"
PROJECT_METADATA: "dktk_supervisors"
depends_on:
- "beam-proxy"
labels:
- "traefik.enable=true"
- "traefik.http.services.spot.loadbalancer.server.port=8080"
- "traefik.http.middlewares.corsheaders2.headers.accesscontrolallowmethods=GET,OPTIONS,POST"
- "traefik.http.middlewares.corsheaders2.headers.accesscontrolalloworiginlist=https://${HOST}"
- "traefik.http.middlewares.corsheaders2.headers.accesscontrolallowcredentials=true"
- "traefik.http.middlewares.corsheaders2.headers.accesscontrolmaxage=-1"
- "traefik.http.routers.spot.rule=Host(`${HOST}`) && PathPrefix(`/backend`)"
- "traefik.http.middlewares.stripprefix_spot.stripprefix.prefixes=/backend"
- "traefik.http.routers.spot.tls=true"
- "traefik.http.routers.spot.middlewares=corsheaders2,stripprefix_spot"

View File

@ -1,5 +0,0 @@
#!/bin/bash
if [ -n "$ENABLE_LENS" ];then
OVERRIDE+=" -f ./$PROJECT/modules/lens-compose.yml"
fi

View File

@ -1,2 +0,0 @@
bGlicmFyeSBSZXRyaWV2ZQp1c2luZyBGSElSIHZlcnNpb24gJzQuMC4wJwppbmNsdWRlIEZISVJIZWxwZXJzIHZlcnNpb24gJzQuMC4wJwpjb2Rlc3lzdGVtIFNhbXBsZU1hdGVyaWFsVHlwZTogJ2h0dHBzOi8vZmhpci5iYm1yaS5kZS9Db2RlU3lzdGVtL1NhbXBsZU1hdGVyaWFsVHlwZScKCmNvZGVzeXN0ZW0gbG9pbmM6ICdodHRwOi8vbG9pbmMub3JnJwoKY29udGV4dCBQYXRpZW50CkRLVEtfU1RSQVRfR0VOREVSX1NUUkFUSUZJRVIKICBES1RLX1NUUkFUX0RJQUdOT1NJU19TVFJBVElGSUVSCiAgSVRDQ19TVFJBVF9BR0VfQ0xBU1NfU1RSQVRJRklFUgogIERLVEtfU1RSQVRfREVGX0lOX0lOSVRJQUxfUE9QVUxBVElPTgp0cnVl
bGlicmFyeSBSZXRyaWV2ZQp1c2luZyBGSElSIHZlcnNpb24gJzQuMC4wJwppbmNsdWRlIEZISVJIZWxwZXJzIHZlcnNpb24gJzQuMC4wJwpjb2Rlc3lzdGVtIFNhbXBsZU1hdGVyaWFsVHlwZTogJ2h0dHBzOi8vZmhpci5iYm1yaS5kZS9Db2RlU3lzdGVtL1NhbXBsZU1hdGVyaWFsVHlwZScKCmNvZGVzeXN0ZW0gbG9pbmM6ICdodHRwOi8vbG9pbmMub3JnJwpjb2Rlc3lzdGVtIG1vbGVjdWxhck1hcmtlcjogJ2h0dHA6Ly93d3cuZ2VuZW5hbWVzLm9yZycKCmNvbnRleHQgUGF0aWVudApES1RLX1NUUkFUX0dFTkRFUl9TVFJBVElGSUVSCiAgREtUS19TVFJBVF9ESUFHTk9TSVNfU1RSQVRJRklFUgogIElUQ0NfU1RSQVRfQUdFX0NMQVNTX1NUUkFUSUZJRVIKICBES1RLX1NUUkFUX0RFRl9JTl9JTklUSUFMX1BPUFVMQVRJT04KKGV4aXN0cyBmcm9tIFtPYnNlcnZhdGlvbjogQ29kZSAnNjk1NDgtNicgZnJvbSBsb2luY10gTwp3aGVyZSBPLmNvbXBvbmVudC53aGVyZShjb2RlLmNvZGluZyBjb250YWlucyBDb2RlICc0ODAxOC02JyBmcm9tIGxvaW5jKS52YWx1ZS5jb2RpbmcgY29udGFpbnMgQ29kZSAnQlJBRicgZnJvbSBtb2xlY3VsYXJNYXJrZXIp

View File

@ -1,20 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDNTCCAh2gAwIBAgIUW34NEb7bl0+Ywx+I1VKtY5vpAOowDQYJKoZIhvcNAQEL
BQAwFjEUMBIGA1UEAxMLQnJva2VyLVJvb3QwHhcNMjQwMTIyMTMzNzEzWhcNMzQw
MTE5MTMzNzQzWjAWMRQwEgYDVQQDEwtCcm9rZXItUm9vdDCCASIwDQYJKoZIhvcN
AQEBBQADggEPADCCAQoCggEBAL5UegLXTlq3XRRj8LyFs3aF0tpRPVoW9RXp5kFI
TnBvyO6qjNbMDT/xK+4iDtEX4QQUvsxAKxfXbe9i1jpdwjgH7JHaSGm2IjAiKLqO
OXQQtguWwfNmmp96Ql13ArLj458YH08xMO/w2NFWGwB/hfARa4z/T0afFuc/tKJf
XbGCG9xzJ9tmcG45QN8NChGhVvaTweNdVxGWlpHxmi0Mn8OM9CEuB7nPtTTiBuiu
pRC2zVVmNjVp4ktkAqL7IHOz+/F5nhiz6tOika9oD3376Xj055lPznLcTQn2+4d7
K7ZrBopCFxIQPjkgmYRLfPejbpdUjK1UVJw7hbWkqWqH7JMCAwEAAaN7MHkwDgYD
VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFGjvRcaIP4HM
poIguUAK9YL2n7fbMB8GA1UdIwQYMBaAFGjvRcaIP4HMpoIguUAK9YL2n7fbMBYG
A1UdEQQPMA2CC0Jyb2tlci1Sb290MA0GCSqGSIb3DQEBCwUAA4IBAQCbzycJSaDm
AXXNJqQ88djrKs5MDXS8RIjS/cu2ayuLaYDe+BzVmUXNA0Vt9nZGdaz63SLLcjpU
fNSxBfKbwmf7s30AK8Cnfj9q4W/BlBeVizUHQsg1+RQpDIdMrRQrwkXv8mfLw+w5
3oaXNW6W/8KpBp/H8TBZ6myl6jCbeR3T8EMXBwipMGop/1zkbF01i98Xpqmhx2+l
n+80ofPsSspOo5XmgCZym8CD/m/oFHmjcvOfpOCvDh4PZ+i37pmbSlCYoMpla3u/
7MJMP5lugfLBYNDN2p+V4KbHP/cApCDT5UWLOeAWjgiZQtHH5ilDeYqEc1oPjyJt
Rtup0MTxSJtN
-----END CERTIFICATE-----

View File

@ -1,14 +0,0 @@
BROKER_ID=test-no-real-data.broker.samply.de
BROKER_URL=https://${BROKER_ID}
PROXY_ID=${SITE_ID}.${BROKER_ID}
FOCUS_BEAM_SECRET_SHORT="$(cat /proc/sys/kernel/random/uuid | sed 's/[-]//g' | head -c 20)"
FOCUS_RETRY_COUNT=${FOCUS_RETRY_COUNT:-64}
SUPPORT_EMAIL=arturo.macias@dkfz-heidelberg.de
PRIVATEKEYFILENAME=/etc/bridgehead/pki/${SITE_ID}.priv.pem
BROKER_URL_FOR_PREREQ=$BROKER_URL
for module in $PROJECT/modules/*.sh
do
log DEBUG "sourcing $module"
source $module
done

View File

@ -1,67 +0,0 @@
version: "3.7"
services:
landing:
deploy:
replicas: 0 #deactivate landing page
blaze:
image: docker.verbis.dkfz.de/cache/samply/blaze:${BLAZE_TAG}
container_name: bridgehead-kr-blaze
environment:
BASE_URL: "http://bridgehead-kr-blaze:8080"
JAVA_TOOL_OPTIONS: "-Xmx${BLAZE_MEMORY_CAP:-4096}m"
DB_RESOURCE_CACHE_SIZE: ${BLAZE_RESOURCE_CACHE_CAP:-2500000}
DB_BLOCK_CACHE_SIZE: $BLAZE_MEMORY_CAP
ENFORCE_REFERENTIAL_INTEGRITY: "false"
volumes:
- "blaze-data:/app/data"
labels:
- "traefik.enable=true"
- "traefik.http.routers.blaze_kr.rule=PathPrefix(`/kr-localdatamanagement`)"
- "traefik.http.middlewares.kr_b_strip.stripprefix.prefixes=/kr-localdatamanagement"
- "traefik.http.services.blaze_kr.loadbalancer.server.port=8080"
- "traefik.http.routers.blaze_kr.middlewares=kr_b_strip,auth"
- "traefik.http.routers.blaze_kr.tls=true"
focus:
image: docker.verbis.dkfz.de/cache/samply/focus:${FOCUS_TAG}
container_name: bridgehead-focus
environment:
API_KEY: ${FOCUS_BEAM_SECRET_SHORT}
BEAM_APP_ID_LONG: focus.${PROXY_ID}
PROXY_ID: ${PROXY_ID}
BLAZE_URL: "http://bridgehead-kr-blaze:8080/fhir/"
BEAM_PROXY_URL: http://beam-proxy:8081
RETRY_COUNT: ${FOCUS_RETRY_COUNT}
EPSILON: 0.28
depends_on:
- "beam-proxy"
- "blaze"
beam-proxy:
image: docker.verbis.dkfz.de/cache/samply/beam-proxy:develop
container_name: bridgehead-beam-proxy
environment:
BROKER_URL: ${BROKER_URL}
PROXY_ID: ${PROXY_ID}
APP_focus_KEY: ${FOCUS_BEAM_SECRET_SHORT}
PRIVKEY_FILE: /run/secrets/proxy.pem
ALL_PROXY: http://forward_proxy:3128
TLS_CA_CERTIFICATES_DIR: /conf/trusted-ca-certs
ROOTCERT_FILE: /conf/root.crt.pem
secrets:
- proxy.pem
depends_on:
- "forward_proxy"
volumes:
- /etc/bridgehead/trusted-ca-certs:/conf/trusted-ca-certs:ro
- /srv/docker/bridgehead/kr/root.crt.pem:/conf/root.crt.pem:ro
volumes:
blaze-data:
secrets:
proxy.pem:
file: /etc/bridgehead/pki/${SITE_ID}.priv.pem

View File

@ -1,6 +0,0 @@
# Full Excel Export
curl --location --request POST 'https://${HOST}/ccp-exporter/request?query=Patient&query-format=FHIR_PATH&template-id=ccp&output-format=EXCEL' \
--header 'x-api-key: ${EXPORT_API_KEY}'
# QB
curl --location --request POST 'https://${HOST}/ccp-reporter/generate?template-id=ccp'

View File

@ -1,67 +0,0 @@
version: "3.7"
services:
exporter:
image: docker.verbis.dkfz.de/ccp/dktk-exporter:latest
container_name: bridgehead-ccp-exporter
environment:
JAVA_OPTS: "-Xms1G -Xmx8G -XX:+UseG1GC"
LOG_LEVEL: "INFO"
EXPORTER_API_KEY: "${EXPORTER_API_KEY}" # Set in exporter-setup.sh
CROSS_ORIGINS: "https://${HOST}"
EXPORTER_DB_USER: "exporter"
EXPORTER_DB_PASSWORD: "${EXPORTER_DB_PASSWORD}" # Set in exporter-setup.sh
EXPORTER_DB_URL: "jdbc:postgresql://exporter-db:5432/exporter"
HTTP_RELATIVE_PATH: "/ccp-exporter"
SITE: "${SITE_ID}"
HTTP_SERVLET_REQUEST_SCHEME: "https"
OPAL_PASSWORD: "${EXPORTER_OPAL_PASSWORD}"
labels:
- "traefik.enable=true"
- "traefik.http.routers.exporter_ccp.rule=PathPrefix(`/ccp-exporter`)"
- "traefik.http.services.exporter_ccp.loadbalancer.server.port=8092"
- "traefik.http.routers.exporter_ccp.tls=true"
- "traefik.http.middlewares.exporter_ccp_strip.stripprefix.prefixes=/ccp-exporter"
- "traefik.http.routers.exporter_ccp.middlewares=exporter_ccp_strip"
volumes:
- "/var/cache/bridgehead/ccp/exporter-files:/app/exporter-files/output"
exporter-db:
image: docker.verbis.dkfz.de/cache/postgres:${POSTGRES_TAG}
container_name: bridgehead-ccp-exporter-db
environment:
POSTGRES_USER: "exporter"
POSTGRES_PASSWORD: "${EXPORTER_DB_PASSWORD}" # Set in exporter-setup.sh
POSTGRES_DB: "exporter"
volumes:
# Consider removing this volume once we find a solution to save Lens-queries to be executed in the explorer.
- "/var/cache/bridgehead/ccp/exporter-db:/var/lib/postgresql/data"
reporter:
image: docker.verbis.dkfz.de/ccp/dktk-reporter:latest
container_name: bridgehead-ccp-reporter
environment:
JAVA_OPTS: "-Xms1G -Xmx8G -XX:+UseG1GC"
LOG_LEVEL: "INFO"
CROSS_ORIGINS: "https://${HOST}"
HTTP_RELATIVE_PATH: "/ccp-reporter"
SITE: "${SITE_ID}"
EXPORTER_API_KEY: "${EXPORTER_API_KEY}" # Set in exporter-setup.sh
EXPORTER_URL: "http://exporter:8092"
LOG_FHIR_VALIDATION: "false"
HTTP_SERVLET_REQUEST_SCHEME: "https"
# In this initial development state of the bridgehead, we are trying to have so many volumes as possible.
# However, in the first executions in the CCP sites, this volume seems to be very important. A report is
# a process that can take several hours, because it depends on the exporter.
# There is a risk that the bridgehead restarts, losing the already created export.
volumes:
- "/var/cache/bridgehead/ccp/reporter-files:/app/reports"
labels:
- "traefik.enable=true"
- "traefik.http.routers.reporter_ccp.rule=PathPrefix(`/ccp-reporter`)"
- "traefik.http.services.reporter_ccp.loadbalancer.server.port=8095"
- "traefik.http.routers.reporter_ccp.tls=true"
- "traefik.http.middlewares.reporter_ccp_strip.stripprefix.prefixes=/ccp-reporter"
- "traefik.http.routers.reporter_ccp.middlewares=reporter_ccp_strip"

View File

@ -1,8 +0,0 @@
#!/bin/bash -e
if [ "$ENABLE_EXPORTER" == true ]; then
log INFO "Exporter setup detected -- will start Exporter service."
OVERRIDE+=" -f ./$PROJECT/modules/exporter-compose.yml"
EXPORTER_DB_PASSWORD="$(echo \"This is a salt string to generate one consistent password for the exporter. It is not required to be secret.\" | sha1sum | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 30)"
EXPORTER_API_KEY="$(echo \"This is a salt string to generate one consistent API KEY for the exporter. It is not required to be secret.\" | sha1sum | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 64)"
fi

View File

@ -1,15 +0,0 @@
# Exporter and Reporter
## Exporter
The exporter is a REST API that exports the data of the different databases of the bridgehead in a set of tables.
It can accept different output formats as CSV, Excel, JSON or XML. It can also export data into Opal.
## Exporter-DB
It is a database to save queries for its execution in the exporter.
The exporter manages also the different executions of the same query in through the database.
## Reporter
This component is a plugin of the exporter that allows to create more complex Excel reports described in templates.
It is compatible with different template engines as Groovy, Thymeleaf,...
It is perfect to generate a document as our traditional CCP quality report.

View File

@ -1,35 +0,0 @@
version: "3.7"
services:
landing:
deploy:
replicas: 1 #reactivate if lens is in use
container_name: lens_federated-search
image: docker.verbis.dkfz.de/ccp/lens:${SITE_ID}
labels:
- "traefik.enable=true"
- "traefik.http.routers.landing.rule=PathPrefix(`/`)"
- "traefik.http.services.landing.loadbalancer.server.port=80"
- "traefik.http.routers.landing.tls=true"
spot:
image: docker.verbis.dkfz.de/ccp-private/central-spot
environment:
BEAM_SECRET: "${FOCUS_BEAM_SECRET_SHORT}"
BEAM_URL: http://beam-proxy:8081
BEAM_PROXY_ID: ${SITE_ID}
BEAM_BROKER_ID: ${BROKER_ID}
BEAM_APP_ID: "focus"
PROJECT_METADATA: "kr_supervisors"
depends_on:
- "beam-proxy"
labels:
- "traefik.enable=true"
- "traefik.http.services.spot.loadbalancer.server.port=8080"
- "traefik.http.middlewares.corsheaders2.headers.accesscontrolallowmethods=GET,OPTIONS,POST"
- "traefik.http.middlewares.corsheaders2.headers.accesscontrolalloworiginlist=https://${HOST}"
- "traefik.http.middlewares.corsheaders2.headers.accesscontrolallowcredentials=true"
- "traefik.http.middlewares.corsheaders2.headers.accesscontrolmaxage=-1"
- "traefik.http.routers.spot.rule=Host(`${HOST}`) && PathPrefix(`/backend`)"
- "traefik.http.middlewares.stripprefix_spot.stripprefix.prefixes=/backend"
- "traefik.http.routers.spot.tls=true"
- "traefik.http.routers.spot.middlewares=corsheaders2,stripprefix_spot"

View File

@ -1,5 +0,0 @@
#!/bin/bash
if [ -n "$ENABLE_LENS" ];then
OVERRIDE+=" -f ./$PROJECT/modules/lens-compose.yml"
fi

View File

@ -1,19 +0,0 @@
version: "3.7"
services:
obds2fhir-rest:
container_name: bridgehead-obds2fhir-rest
image: docker.verbis.dkfz.de/ccp/obds2fhir-rest:main
environment:
IDTYPE: BK_${IDMANAGEMENT_FRIENDLY_ID}_L-ID
MAINZELLISTE_APIKEY: ${IDMANAGER_LOCAL_PATIENTLIST_APIKEY}
SALT: ${LOCAL_SALT}
KEEP_INTERNAL_ID: ${KEEP_INTERNAL_ID:-false}
MAINZELLISTE_URL: ${PATIENTLIST_URL:-http://patientlist:8080/patientlist}
labels:
- "traefik.enable=true"
- "traefik.http.routers.obds2fhir-rest.rule=PathPrefix(`/obds2fhir-rest`) || PathPrefix(`/adt2fhir-rest`)"
- "traefik.http.middlewares.obds2fhir-rest_strip.stripprefix.prefixes=/obds2fhir-rest,/adt2fhir-rest"
- "traefik.http.services.obds2fhir-rest.loadbalancer.server.port=8080"
- "traefik.http.routers.obds2fhir-rest.tls=true"
- "traefik.http.routers.obds2fhir-rest.middlewares=obds2fhir-rest_strip,auth"

View File

@ -1,13 +0,0 @@
#!/bin/bash
function obds2fhirRestSetup() {
if [ -n "$ENABLE_OBDS2FHIR_REST" ]; then
log INFO "oBDS2FHIR-REST setup detected -- will start obds2fhir-rest module."
if [ ! -n "$IDMANAGER_UPLOAD_APIKEY" ]; then
log ERROR "Missing ID-Management Module! Fix this by setting up ID Management:"
PATIENTLIST_URL=" "
fi
OVERRIDE+=" -f ./$PROJECT/modules/obds2fhir-rest-compose.yml"
LOCAL_SALT="$(echo \"local-random-salt\" | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 30)"
fi
}

View File

@ -1,79 +0,0 @@
version: "3.7"
services:
teiler-orchestrator:
image: docker.verbis.dkfz.de/cache/samply/teiler-orchestrator:latest
container_name: bridgehead-teiler-orchestrator
labels:
- "traefik.enable=true"
- "traefik.http.routers.teiler_orchestrator_ccp.rule=PathPrefix(`/ccp-teiler`)"
- "traefik.http.services.teiler_orchestrator_ccp.loadbalancer.server.port=9000"
- "traefik.http.routers.teiler_orchestrator_ccp.tls=true"
- "traefik.http.middlewares.teiler_orchestrator_ccp_strip.stripprefix.prefixes=/ccp-teiler"
- "traefik.http.routers.teiler_orchestrator_ccp.middlewares=teiler_orchestrator_ccp_strip"
environment:
TEILER_BACKEND_URL: "https://${HOST}/ccp-teiler-backend"
TEILER_DASHBOARD_URL: "https://${HOST}/ccp-teiler-dashboard"
DEFAULT_LANGUAGE: "${TEILER_DEFAULT_LANGUAGE_LOWER_CASE}"
HTTP_RELATIVE_PATH: "/ccp-teiler"
teiler-dashboard:
image: docker.verbis.dkfz.de/cache/samply/teiler-dashboard:develop
container_name: bridgehead-teiler-dashboard
labels:
- "traefik.enable=true"
- "traefik.http.routers.teiler_dashboard_ccp.rule=PathPrefix(`/ccp-teiler-dashboard`)"
- "traefik.http.services.teiler_dashboard_ccp.loadbalancer.server.port=80"
- "traefik.http.routers.teiler_dashboard_ccp.tls=true"
- "traefik.http.middlewares.teiler_dashboard_ccp_strip.stripprefix.prefixes=/ccp-teiler-dashboard"
- "traefik.http.routers.teiler_dashboard_ccp.middlewares=teiler_dashboard_ccp_strip"
environment:
DEFAULT_LANGUAGE: "${TEILER_DEFAULT_LANGUAGE}"
TEILER_BACKEND_URL: "https://${HOST}/ccp-teiler-backend"
TEILER_DASHBOARD_URL: "https://${HOST}/ccp-teiler-dashboard"
OIDC_URL: "${OIDC_URL}"
OIDC_CLIENT_ID: "${OIDC_PUBLIC_CLIENT_ID}"
OIDC_TOKEN_GROUP: "${OIDC_GROUP_CLAIM}"
TEILER_ADMIN_NAME: "${OPERATOR_FIRST_NAME} ${OPERATOR_LAST_NAME}"
TEILER_ADMIN_EMAIL: "${OPERATOR_EMAIL}"
TEILER_ADMIN_PHONE: "${OPERATOR_PHONE}"
TEILER_PROJECT: "${PROJECT}"
EXPORTER_API_KEY: "${EXPORTER_API_KEY}"
TEILER_ORCHESTRATOR_URL: "https://${HOST}/ccp-teiler"
TEILER_ORCHESTRATOR_HTTP_RELATIVE_PATH: "/ccp-teiler"
TEILER_USER: "${OIDC_USER_GROUP}"
TEILER_ADMIN: "${OIDC_ADMIN_GROUP}"
REPORTER_DEFAULT_TEMPLATE_ID: "ccp-qb"
EXPORTER_DEFAULT_TEMPLATE_ID: "ccp"
teiler-backend:
image: docker.verbis.dkfz.de/ccp/dktk-teiler-backend:latest
container_name: bridgehead-teiler-backend
labels:
- "traefik.enable=true"
- "traefik.http.routers.teiler_backend_ccp.rule=PathPrefix(`/ccp-teiler-backend`)"
- "traefik.http.services.teiler_backend_ccp.loadbalancer.server.port=8085"
- "traefik.http.routers.teiler_backend_ccp.tls=true"
- "traefik.http.middlewares.teiler_backend_ccp_strip.stripprefix.prefixes=/ccp-teiler-backend"
- "traefik.http.routers.teiler_backend_ccp.middlewares=teiler_backend_ccp_strip"
environment:
LOG_LEVEL: "INFO"
APPLICATION_PORT: "8085"
APPLICATION_ADDRESS: "${HOST}"
DEFAULT_LANGUAGE: "${TEILER_DEFAULT_LANGUAGE}"
CONFIG_ENV_VAR_PATH: "/run/secrets/ccp.conf"
TEILER_ORCHESTRATOR_HTTP_RELATIVE_PATH: "/ccp-teiler"
TEILER_ORCHESTRATOR_URL: "https://${HOST}/ccp-teiler"
TEILER_DASHBOARD_DE_URL: "https://${HOST}/ccp-teiler-dashboard/de"
TEILER_DASHBOARD_EN_URL: "https://${HOST}/ccp-teiler-dashboard/en"
HTTP_PROXY: "http://forward_proxy:3128"
ENABLE_MTBA: "${ENABLE_MTBA}"
ENABLE_DATASHIELD: "${ENABLE_DATASHIELD}"
secrets:
- ccp.conf
secrets:
ccp.conf:
file: /etc/bridgehead/ccp.conf

View File

@ -1,9 +0,0 @@
#!/bin/bash -e
if [ "$ENABLE_TEILER" == true ];then
log INFO "Teiler setup detected -- will start Teiler services."
OVERRIDE+=" -f ./$PROJECT/modules/teiler-compose.yml"
TEILER_DEFAULT_LANGUAGE=DE
TEILER_DEFAULT_LANGUAGE_LOWER_CASE=${TEILER_DEFAULT_LANGUAGE,,}
add_public_oidc_redirect_url "/ccp-teiler/*"
fi

View File

@ -1,19 +0,0 @@
# Teiler
This module orchestrates the different microfrontends of the bridgehead as a single page application.
## Teiler Orchestrator
Single SPA component that consists on the root HTML site of the single page application and a javascript code that
gets the information about the microfrontend calling the teiler backend and is responsible for registering them. With the
resulting mapping, it can initialize, mount and unmount the required microfrontends on the fly.
The microfrontends run independently in different containers and can be based on different frameworks (Angular, Vue, React,...)
This microfrontends can run as single alone but need an extension with Single-SPA (https://single-spa.js.org/docs/ecosystem).
There are also available three templates (Angular, Vue, React) to be directly extended to be used directly in the teiler.
## Teiler Dashboard
It consists on the main dashboard and a set of embedded services.
### Login
user and password in ccp.local.conf
## Teiler Backend
In this component, the microfrontends are configured.

View File

@ -1,20 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDNTCCAh2gAwIBAgIUW34NEb7bl0+Ywx+I1VKtY5vpAOowDQYJKoZIhvcNAQEL
BQAwFjEUMBIGA1UEAxMLQnJva2VyLVJvb3QwHhcNMjQwMTIyMTMzNzEzWhcNMzQw
MTE5MTMzNzQzWjAWMRQwEgYDVQQDEwtCcm9rZXItUm9vdDCCASIwDQYJKoZIhvcN
AQEBBQADggEPADCCAQoCggEBAL5UegLXTlq3XRRj8LyFs3aF0tpRPVoW9RXp5kFI
TnBvyO6qjNbMDT/xK+4iDtEX4QQUvsxAKxfXbe9i1jpdwjgH7JHaSGm2IjAiKLqO
OXQQtguWwfNmmp96Ql13ArLj458YH08xMO/w2NFWGwB/hfARa4z/T0afFuc/tKJf
XbGCG9xzJ9tmcG45QN8NChGhVvaTweNdVxGWlpHxmi0Mn8OM9CEuB7nPtTTiBuiu
pRC2zVVmNjVp4ktkAqL7IHOz+/F5nhiz6tOika9oD3376Xj055lPznLcTQn2+4d7
K7ZrBopCFxIQPjkgmYRLfPejbpdUjK1UVJw7hbWkqWqH7JMCAwEAAaN7MHkwDgYD
VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFGjvRcaIP4HM
poIguUAK9YL2n7fbMB8GA1UdIwQYMBaAFGjvRcaIP4HMpoIguUAK9YL2n7fbMBYG
A1UdEQQPMA2CC0Jyb2tlci1Sb290MA0GCSqGSIb3DQEBCwUAA4IBAQCbzycJSaDm
AXXNJqQ88djrKs5MDXS8RIjS/cu2ayuLaYDe+BzVmUXNA0Vt9nZGdaz63SLLcjpU
fNSxBfKbwmf7s30AK8Cnfj9q4W/BlBeVizUHQsg1+RQpDIdMrRQrwkXv8mfLw+w5
3oaXNW6W/8KpBp/H8TBZ6myl6jCbeR3T8EMXBwipMGop/1zkbF01i98Xpqmhx2+l
n+80ofPsSspOo5XmgCZym8CD/m/oFHmjcvOfpOCvDh4PZ+i37pmbSlCYoMpla3u/
7MJMP5lugfLBYNDN2p+V4KbHP/cApCDT5UWLOeAWjgiZQtHH5ilDeYqEc1oPjyJt
Rtup0MTxSJtN
-----END CERTIFICATE-----

16
kr/vars
View File

@ -1,16 +0,0 @@
BROKER_ID=test-no-real-data.broker.samply.de
BROKER_URL=https://${BROKER_ID}
PROXY_ID=${SITE_ID}.${BROKER_ID}
FOCUS_BEAM_SECRET_SHORT="$(cat /proc/sys/kernel/random/uuid | sed 's/[-]//g' | head -c 20)"
FOCUS_RETRY_COUNT=${FOCUS_RETRY_COUNT:-64}
SUPPORT_EMAIL=arturo.macias@dkfz-heidelberg.de
PRIVATEKEYFILENAME=/etc/bridgehead/pki/${SITE_ID}.priv.pem
BROKER_URL_FOR_PREREQ=$BROKER_URL
for module in $PROJECT/modules/*.sh
do
log DEBUG "sourcing $module"
source $module
done
obds2fhirRestSetup

View File

@ -9,31 +9,12 @@ detectCompose() {
fi
}
setupProxy() {
### Note: As the current data protection concepts do not allow communication via HTTP,
### we are not setting a proxy for HTTP requests.
local http="no"
local https="no"
if [ $HTTPS_PROXY_URL ]; then
local proto="$(echo $HTTPS_PROXY_URL | grep :// | sed -e 's,^\(.*://\).*,\1,g')"
local fqdn="$(echo ${HTTPS_PROXY_URL/$proto/})"
local hostport=$(echo $HTTPS_PROXY_URL | sed -e "s,$proto,,g" | cut -d/ -f1)
HTTPS_PROXY_HOST="$(echo $hostport | sed -e 's,:.*,,g')"
HTTPS_PROXY_PORT="$(echo $hostport | sed -e 's,^.*:,:,g' -e 's,.*:\([0-9]*\).*,\1,g' -e 's,[^0-9],,g')"
if [[ ! -z "$HTTPS_PROXY_USERNAME" && ! -z "$HTTPS_PROXY_PASSWORD" ]]; then
local proto="$(echo $HTTPS_PROXY_URL | grep :// | sed -e 's,^\(.*://\).*,\1,g')"
local fqdn="$(echo ${HTTPS_PROXY_URL/$proto/})"
HTTPS_PROXY_FULL_URL="$(echo $proto$HTTPS_PROXY_USERNAME:$HTTPS_PROXY_PASSWORD@$fqdn)"
https="authenticated"
else
HTTPS_PROXY_FULL_URL=$HTTPS_PROXY_URL
https="unauthenticated"
fi
getLdmPassword() {
if [ -n "$LDM_PASSWORD" ]; then
docker run --rm docker.verbis.dkfz.de/cache/httpd:alpine htpasswd -nb $PROJECT $LDM_PASSWORD | tr -d '\n' | tr -d '\r'
else
echo -n ""
fi
log INFO "Configuring proxy servers: $http http proxy (we're not supporting unencrypted comms), $https https proxy"
export HTTPS_PROXY_HOST HTTPS_PROXY_PORT HTTPS_PROXY_FULL_URL
}
exitIfNotRoot() {
@ -53,8 +34,8 @@ checkOwner(){
}
printUsage() {
echo "Usage: bridgehead start|stop|logs|docker-logs|is-running|update|install|uninstall|adduser|enroll PROJECTNAME"
echo "PROJECTNAME should be one of ccp|bbmri|cce|itcc|kr|dhki"
echo "Usage: bridgehead start|stop|is-running|update|install|uninstall|enroll PROJECTNAME"
echo "PROJECTNAME should be one of ccp|bbmri"
}
checkRequirements() {
@ -76,7 +57,7 @@ fetchVarsFromVault() {
set +e
PASS=$(BW_MASTERPASS="$BW_MASTERPASS" BW_CLIENTID="$BW_CLIENTID" BW_CLIENTSECRET="$BW_CLIENTSECRET" docker run --rm -e BW_MASTERPASS -e BW_CLIENTID -e BW_CLIENTSECRET -e http_proxy docker.verbis.dkfz.de/cache/samply/bridgehead-vaultfetcher:latest $@)
PASS=$(BW_MASTERPASS="$BW_MASTERPASS" BW_CLIENTID="$BW_CLIENTID" BW_CLIENTSECRET="$BW_CLIENTSECRET" docker run --rm -e BW_MASTERPASS -e BW_CLIENTID -e BW_CLIENTSECRET -e http_proxy samply/bridgehead-vaultfetcher $@)
RET=$?
if [ $RET -ne 0 ]; then
@ -116,7 +97,7 @@ assertVarsNotEmpty() {
MISSING_VARS=""
for VAR in $@; do
if [ -z "${!VAR}" ]; then
if [ -z "${!VAR}" ]; then
MISSING_VARS+="$VAR "
fi
done
@ -155,30 +136,6 @@ setHostname() {
fi
}
# This function optimizes the usage of memory through blaze, according to the official performance tuning guide:
# https://github.com/samply/blaze/blob/master/docs/tuning-guide.md
# Short summary of the adjustments made:
# - set blaze memory cap to a quarter of the system memory
# - set db block cache size to a quarter of the system memory
# - limit resource count allowed in blaze to 1,25M per 4GB available system memory
optimizeBlazeMemoryUsage() {
if [ -z "$BLAZE_MEMORY_CAP" ]; then
system_memory_in_mb=$(LC_ALL=C free -m | grep 'Mem:' | awk '{print $2}');
export BLAZE_MEMORY_CAP=$(($system_memory_in_mb/4));
fi
if [ -z "$BLAZE_RESOURCE_CACHE_CAP" ]; then
available_system_memory_chunks=$((BLAZE_MEMORY_CAP / 1000))
if [ $available_system_memory_chunks -eq 0 ]; then
log WARN "Only ${BLAZE_MEMORY_CAP} system memory available for Blaze. If your Blaze stores more than 128000 fhir ressources it will run significally slower."
export BLAZE_RESOURCE_CACHE_CAP=128000;
export BLAZE_CQL_CACHE_CAP=32;
else
export BLAZE_RESOURCE_CACHE_CAP=$((available_system_memory_chunks * 312500))
export BLAZE_CQL_CACHE_CAP=$((($system_memory_in_mb/4)/16));
fi
fi
}
# Takes 1) The Backup Directory Path 2) The name of the Service to be backuped
# Creates 3 Backups: 1) For the past seven days 2) For the current month and 3) for each calendar week
createEncryptedPostgresBackup(){
@ -214,7 +171,7 @@ function retry {
function bk_is_running {
detectCompose
RUNNING="$($COMPOSE -p $PROJECT -f minimal/docker-compose.yml -f ./$PROJECT/docker-compose.yml $OVERRIDE ps -q)"
RUNNING="$($COMPOSE -p $PROJECT -f ./$PROJECT/docker-compose.yml $OVERRIDE ps -q)"
NUMBEROFRUNNING=$(echo "$RUNNING" | wc -l)
if [ $NUMBEROFRUNNING -ge 2 ]; then
return 0
@ -223,220 +180,6 @@ function bk_is_running {
fi
}
function do_enroll_inner {
PARAMS=""
MANUAL_PROXY_ID="${1:-$PROXY_ID}"
if [ -z "$MANUAL_PROXY_ID" ]; then
log ERROR "No Proxy ID set"
exit 1
else
log INFO "Enrolling Beam Proxy Id $MANUAL_PROXY_ID"
fi
SUPPORT_EMAIL="${2:-$SUPPORT_EMAIL}"
if [ -n "$SUPPORT_EMAIL" ]; then
PARAMS+="--admin-email $SUPPORT_EMAIL"
fi
docker run --rm -v /etc/bridgehead/pki:/etc/bridgehead/pki docker.verbis.dkfz.de/cache/samply/beam-enroll:latest --output-file $PRIVATEKEYFILENAME --proxy-id $MANUAL_PROXY_ID $PARAMS
chmod 600 $PRIVATEKEYFILENAME
}
function do_enroll {
do_enroll_inner $@
}
add_basic_auth_user() {
USER="${1}"
PASSWORD="${2}"
NAME="${3}"
PROJECT="${4}"
FILE="/etc/bridgehead/${PROJECT}.local.conf"
ENCRY_CREDENTIALS="$(docker run --rm docker.verbis.dkfz.de/cache/httpd:alpine htpasswd -nb $USER $PASSWORD | tr -d '\n' | tr -d '\r')"
if [ -f $FILE ] && grep -R -q "$NAME=" $FILE # if a specific basic auth user already exists:
then
sed -i "/$NAME/ s|='|='$ENCRY_CREDENTIALS,|" $FILE
else
echo -e "\n## Basic Authentication Credentials for:\n$NAME='$ENCRY_CREDENTIALS'" >> $FILE;
fi
log DEBUG "Saving clear text credentials in $FILE. If wanted, delete them manually."
sed -i "/^$NAME/ s|$|\n# User: $USER\n# Password: $PASSWORD|" $FILE
}
OIDC_PUBLIC_REDIRECT_URLS=${OIDC_PUBLIC_REDIRECT_URLS:-""}
OIDC_PRIVATE_REDIRECT_URLS=${OIDC_PRIVATE_REDIRECT_URLS:-""}
# Add a redirect url to the public oidc client of the bridgehead
function add_public_oidc_redirect_url() {
if [[ $OIDC_PUBLIC_REDIRECT_URLS == "" ]]; then
OIDC_PUBLIC_REDIRECT_URLS+="$(generate_redirect_urls $1)"
else
OIDC_PUBLIC_REDIRECT_URLS+=",$(generate_redirect_urls $1)"
fi
}
# Add a redirect url to the private oidc client of the bridgehead
function add_private_oidc_redirect_url() {
if [[ $OIDC_PRIVATE_REDIRECT_URLS == "" ]]; then
OIDC_PRIVATE_REDIRECT_URLS+="$(generate_redirect_urls $1)"
else
OIDC_PRIVATE_REDIRECT_URLS+=",$(generate_redirect_urls $1)"
fi
}
function sync_secrets() {
local delimiter=$'\x1E'
local secret_sync_args=""
if [[ $OIDC_PRIVATE_REDIRECT_URLS != "" ]]; then
secret_sync_args="OIDC:OIDC_CLIENT_SECRET:private;$OIDC_PRIVATE_REDIRECT_URLS"
fi
if [[ $OIDC_PUBLIC_REDIRECT_URLS != "" ]]; then
if [[ $secret_sync_args == "" ]]; then
secret_sync_args="OIDC:OIDC_PUBLIC:public;$OIDC_PUBLIC_REDIRECT_URLS"
else
secret_sync_args+="${delimiter}OIDC:OIDC_PUBLIC:public;$OIDC_PUBLIC_REDIRECT_URLS"
fi
fi
if [[ $secret_sync_args == "" ]]; then
return
fi
mkdir -p /var/cache/bridgehead/secrets/ || fail_and_report 1 "Failed to create '/var/cache/bridgehead/secrets/'. Please run sudo './bridgehead install $PROJECT' again."
touch /var/cache/bridgehead/secrets/oidc
docker run --rm \
-v /var/cache/bridgehead/secrets/oidc:/usr/local/cache \
-v $PRIVATEKEYFILENAME:/run/secrets/privkey.pem:ro \
-v /srv/docker/bridgehead/$PROJECT/root.crt.pem:/run/secrets/root.crt.pem:ro \
-v /etc/bridgehead/trusted-ca-certs:/conf/trusted-ca-certs:ro \
-e TLS_CA_CERTIFICATES_DIR=/conf/trusted-ca-certs \
-e NO_PROXY=localhost,127.0.0.1 \
-e ALL_PROXY=$HTTPS_PROXY_FULL_URL \
-e PROXY_ID=$PROXY_ID \
-e BROKER_URL=$BROKER_URL \
-e OIDC_PROVIDER=secret-sync-central.central-secret-sync.$BROKER_ID \
-e SECRET_DEFINITIONS=$secret_sync_args \
docker.verbis.dkfz.de/cache/samply/secret-sync-local:latest
set -a # Export variables as environment variables
source /var/cache/bridgehead/secrets/oidc
set +a # Export variables in the regular way
}
function secret_sync_gitlab_token() {
# Map the origin of the git repository /etc/bridgehead to the prefix recognized by Secret Sync
local gitlab
case "$(git -C /etc/bridgehead remote get-url origin)" in
*git.verbis.dkfz.de*) gitlab=verbis;;
*gitlab.bbmri-eric.eu*) gitlab=bbmri;;
*)
log "WARN" "Not running Secret Sync because the git repository /etc/bridgehead has unknown origin"
return
;;
esac
if [ "$PROJECT" == "bbmri" ]; then
# If the project is BBMRI, use the BBMRI-ERIC broker and not the GBN broker
proxy_id=$ERIC_PROXY_ID
broker_url=$ERIC_BROKER_URL
broker_id=$ERIC_BROKER_ID
root_crt_file="/srv/docker/bridgehead/bbmri/modules/${ERIC_ROOT_CERT}.root.crt.pem"
else
proxy_id=$PROXY_ID
broker_url=$BROKER_URL
broker_id=$BROKER_ID
root_crt_file="/srv/docker/bridgehead/$PROJECT/root.crt.pem"
fi
# Create a temporary directory for Secret Sync that is valid per boot
secret_sync_tempdir="/tmp/bridgehead/secret-sync.boot-$(cat /proc/sys/kernel/random/boot_id)"
mkdir -p $secret_sync_tempdir
# Use Secret Sync to validate the GitLab token in $secret_sync_tempdir/cache.
# If it is missing or expired, Secret Sync will create a new token and write it to the file.
# The git credential helper reads the token from the file during git pull.
log "INFO" "Running Secret Sync for the GitLab token (gitlab=$gitlab)"
docker pull docker.verbis.dkfz.de/cache/samply/secret-sync-local:latest # make sure we have the latest image
docker run --rm \
-v $PRIVATEKEYFILENAME:/run/secrets/privkey.pem:ro \
-v $root_crt_file:/run/secrets/root.crt.pem:ro \
-v /etc/bridgehead/trusted-ca-certs:/conf/trusted-ca-certs:ro \
-v $secret_sync_tempdir:/secret-sync/ \
-e CACHE_PATH=/secret-sync/gitlab-token \
-e TLS_CA_CERTIFICATES_DIR=/conf/trusted-ca-certs \
-e NO_PROXY=localhost,127.0.0.1 \
-e ALL_PROXY=$HTTPS_PROXY_FULL_URL \
-e PROXY_ID=$proxy_id \
-e BROKER_URL=$broker_url \
-e GITLAB_PROJECT_ACCESS_TOKEN_PROVIDER=secret-sync-central.central-secret-sync.$broker_id \
-e SECRET_DEFINITIONS=GitLabProjectAccessToken:BRIDGEHEAD_CONFIG_REPO_TOKEN:$gitlab \
docker.verbis.dkfz.de/cache/samply/secret-sync-local:latest
if [ $? -eq 0 ]; then
log "INFO" "Secret Sync was successful"
# In the past we used to hardcode tokens into the repository URL. We have to remove those now for the git credential helper to become effective.
CLEAN_REPO="$(git -C /etc/bridgehead remote get-url origin | sed -E 's|https://[^@]+@|https://|')"
git -C /etc/bridgehead remote set-url origin "$CLEAN_REPO"
# Set the git credential helper
git -C /etc/bridgehead config credential.helper /srv/docker/bridgehead/lib/gitlab-token-helper.sh
else
log "WARN" "Secret Sync failed"
# Remove the git credential helper
git -C /etc/bridgehead config --unset credential.helper
fi
# In the past the git credential helper was also set for /srv/docker/bridgehead but never used.
# Let's remove it to avoid confusion. This line can be removed at some point the future when we
# believe that it was removed on all/most production servers.
git -C /srv/docker/bridgehead config --unset credential.helper
}
capitalize_first_letter() {
input="$1"
capitalized="$(tr '[:lower:]' '[:upper:]' <<< ${input:0:1})${input:1}"
echo "$capitalized"
}
# Generate a string of ',' separated string of redirect urls relative to $HOST.
# $1 will be appended to the url
# If the host looks like dev-jan.inet.dkfz-heidelberg.de it will generate urls with dev-jan and the original $HOST as url Authorities
function generate_redirect_urls(){
local redirect_urls="https://${HOST}$1"
local host_without_proxy="$(echo "$HOST" | cut -d '.' -f1)"
# Only append second url if its different and the host is not an ip address
if [[ "$HOST" != "$host_without_proxy" && ! "$HOST" =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
redirect_urls+=",https://$host_without_proxy$1"
fi
echo "$redirect_urls"
}
# This password contains at least one special char, a random number and a random upper and lower case letter
generate_password(){
local seed_text="$1"
local seed_num=$(awk 'BEGIN{FS=""} NR==1{print $10}' /etc/bridgehead/pki/${SITE_ID}.priv.pem | od -An -tuC)
local nums="1234567890"
local n=$(echo "$seed_num" | awk '{print $1 % 10}')
local random_digit=${nums:$n:1}
local n=$(echo "$seed_num" | awk '{print $1 % 26}')
local upper="ABCDEFGHIJKLMNOPQRSTUVWXYZ"
local lower="abcdefghijklmnopqrstuvwxyz"
local random_upper=${upper:$n:1}
local random_lower=${lower:$n:1}
local n=$(echo "$seed_num" | awk '{print $1 % 8}')
local special='@#$%^&+='
local random_special=${special:$n:1}
local combined_text="This is a salt string to generate one consistent password for ${seed_text}. It is not required to be secret."
local main_password=$(echo "${combined_text}" | sha1sum | openssl pkeyutl -sign -inkey "/etc/bridgehead/pki/${SITE_ID}.priv.pem" 2> /dev/null | base64 | head -c 26 | sed 's/\//A/g')
echo "${main_password}${random_digit}${random_upper}${random_lower}${random_special}"
}
# This password only contains alphanumeric characters
generate_simple_password(){
local seed_text="$1"
local combined_text="This is a salt string to generate one consistent password for ${seed_text}. It is not required to be secret."
echo "${combined_text}" | sha1sum | openssl pkeyutl -sign -inkey "/etc/bridgehead/pki/${SITE_ID}.priv.pem" 2> /dev/null | base64 | head -c 26 | sed 's/[+\/]/A/g'
}
docker_jq() {
docker run --rm -i docker.verbis.dkfz.de/cache/jqlang/jq:latest "$@"
}
##Setting Network properties
# currently not needed
#export HOSTIP=$(MSYS_NO_PATHCONV=1 docker run --rm --add-host=host.docker.internal:host-gateway ubuntu cat /etc/hosts | grep 'host.docker.internal' | awk '{print $1}');

View File

@ -1,11 +0,0 @@
#!/bin/bash
[ "$1" = "get" ] || exit
source "/tmp/bridgehead/secret-sync.boot-$(cat /proc/sys/kernel/random/boot_id)/gitlab-token"
# Any non-empty username works, only the token matters
cat << EOF
username=bk
password=$BRIDGEHEAD_CONFIG_REPO_TOKEN
EOF

41
lib/gitpassword.sh Executable file
View File

@ -0,0 +1,41 @@
#!/bin/bash
if [ "$1" != "get" ]; then
echo "Usage: $0 get"
exit 1
fi
baseDir() {
# see https://stackoverflow.com/questions/59895
SOURCE=${BASH_SOURCE[0]}
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR=$( cd -P "$( dirname "$SOURCE" )" >/dev/null 2>&1 && pwd )
SOURCE=$(readlink "$SOURCE")
[[ $SOURCE != /* ]] && SOURCE=$DIR/$SOURCE # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
DIR=$( cd -P "$( dirname "$SOURCE" )/.." >/dev/null 2>&1 && pwd )
echo $DIR
}
BASE=$(baseDir)
cd $BASE
source lib/functions.sh
assertVarsNotEmpty SITE_ID || fail_and_report 1 "gitpassword.sh failed: SITE_ID is empty."
PARAMS="$(cat)"
GITHOST=$(echo "$PARAMS" | grep "^host=" | sed 's/host=\(.*\)/\1/g')
fetchVarsFromVault GIT_PASSWORD
if [ -z "${GIT_PASSWORD}" ]; then
fail_and_report 1 "gitpassword.sh failed: Git password not found."
fi
cat <<EOF
protocol=https
host=$GITHOST
username=bk-${SITE_ID}
password=${GIT_PASSWORD}
EOF

View File

@ -29,30 +29,12 @@ bridgehead ALL= NOPASSWD: BRIDGEHEAD${PROJECT^^}
EOF
# TODO: Determine whether this should be located in setup-bridgehead (triggered through bridgehead install) or in update bridgehead (triggered every hour)
if [ -z "$LDM_AUTH" ]; then
log "INFO" "Now generating basic auth for the local data management (see adduser in bridgehead for more information). "
if [ -z "$LDM_PASSWORD" ]; then
log "INFO" "Now generating a password for the local data management. Please save the password for your ETL process!"
generated_passwd="$(cat /proc/sys/kernel/random/uuid | sed 's/[-]//g' | head -c 32)"
add_basic_auth_user $PROJECT $generated_passwd "LDM_AUTH" $PROJECT
fi
if [ ! -z "$NNGM_CTS_APIKEY" ] && [ -z "$NNGM_AUTH" ]; then
log "INFO" "Now generating basic auth for nNGM upload API (see adduser in bridgehead for more information). "
generated_passwd="$(cat /proc/sys/kernel/random/uuid | sed 's/[-]//g' | head -c 32)"
add_basic_auth_user "nngm" $generated_passwd "NNGM_AUTH" $PROJECT
fi
if [ -z "$TRANSFAIR_AUTH" ]; then
if [[ -n "$TTP_URL" || -n "$EXCHANGE_ID_SYSTEM" ]]; then
log "INFO" "Now generating basic auth user for transfair API (see adduser in bridgehead for more information). "
generated_passwd="$(cat /proc/sys/kernel/random/uuid | sed 's/[-]//g' | head -c 32)"
add_basic_auth_user "transfair" $generated_passwd "TRANSFAIR_AUTH" $PROJECT
fi
fi
if [ "$ENABLE_EXPORTER" == "true" ] && [ -z "$EXPORTER_USER" ]; then
log "INFO" "Now generating basic auth for the exporter and reporter (see adduser in bridgehead for more information)."
generated_passwd="$(cat /proc/sys/kernel/random/uuid | sed 's/[-]//g' | head -c 32)"
add_basic_auth_user $PROJECT $generated_passwd "EXPORTER_USER" $PROJECT
log "INFO" "Your generated credentials are:\n user: $PROJECT\n password: $generated_passwd"
echo -e "## Local Data Management Basic Authentication\n# User: $PROJECT\nLDM_PASSWORD=$generated_passwd" >> /etc/bridgehead/${PROJECT}.local.conf;
fi
log "INFO" "Registering system units for bridgehead and bridgehead-update"

View File

@ -47,8 +47,8 @@ function hc_send(){
if [ -n "$2" ]; then
MSG="$2\n\nDocker stats:\n$UPTIME"
echo -e "$MSG" | https_proxy=$HTTPS_PROXY_FULL_URL curl --max-time 5 -A "$USER_AGENT" -s -o /dev/null -X POST --data-binary @- "$HCURL"/"$1" || log WARN "Monitoring failed: Unable to send data to $HCURL/$1"
echo -e "$MSG" | https_proxy=$HTTPS_PROXY_URL curl -A "$USER_AGENT" -s -o /dev/null -X POST --data-binary @- "$HCURL"/"$1" || log WARN "Monitoring failed: Unable to send data to $HCURL/$1"
else
https_proxy=$HTTPS_PROXY_FULL_URL curl --max-time 5 -A "$USER_AGENT" -s -o /dev/null "$HCURL"/"$1" || log WARN "Monitoring failed: Unable to send data to $HCURL/$1"
https_proxy=$HTTPS_PROXY_URL curl -A "$USER_AGENT" -s -o /dev/null "$HCURL"/"$1" || log WARN "Monitoring failed: Unable to send data to $HCURL/$1"
fi
}

Some files were not shown because too many files have changed in this diff Show More