Compare commits

..

1 Commits

Author SHA1 Message Date
b741d931ae refactor: use environment variables for oauth2-proxy config 2024-10-08 13:52:29 +02:00
65 changed files with 221 additions and 1184 deletions

1
.github/CODEOWNERS vendored
View File

@ -1 +0,0 @@
* @samply/bridgehead-developers

View File

@ -1,39 +0,0 @@
import os
import requests
from datetime import datetime, timedelta
# Configuration
GITHUB_TOKEN = os.getenv('GITHUB_TOKEN')
REPO = 'samply/bridgehead'
HEADERS = {'Authorization': f'token {GITHUB_TOKEN}', 'Accept': 'application/vnd.github.v3+json'}
API_URL = f'https://api.github.com/repos/{REPO}/branches'
INACTIVE_DAYS = 365
CUTOFF_DATE = datetime.now() - timedelta(days=INACTIVE_DAYS)
# Fetch all branches
def get_branches():
response = requests.get(API_URL, headers=HEADERS)
response.raise_for_status()
return response.json() if response.status_code == 200 else []
# Rename inactive branches
def rename_branch(old_name, new_name):
rename_url = f'https://api.github.com/repos/{REPO}/branches/{old_name}/rename'
response = requests.post(rename_url, json={'new_name': new_name}, headers=HEADERS)
response.raise_for_status()
print(f"Renamed branch {old_name} to {new_name}" if response.status_code == 201 else f"Failed to rename {old_name}: {response.status_code}")
# Check if the branch is inactive
def is_inactive(commit_url):
last_commit_date = requests.get(commit_url, headers=HEADERS).json()['commit']['committer']['date']
return datetime.strptime(last_commit_date, '%Y-%m-%dT%H:%M:%SZ') < CUTOFF_DATE
# Rename inactive branches
def main():
for branch in get_branches():
if is_inactive(branch['commit']['url']):
#rename_branch(branch['name'], f"archived/{branch['name']}")
print(f"[LOG] Branch '{branch['name']}' is inactive and would be renamed to 'archived/{branch['name']}'")
if __name__ == "__main__":
main()

View File

@ -1,27 +0,0 @@
name: Cleanup - Rename Inactive Branches
on:
schedule:
- cron: '0 0 * * 0' # Runs every Sunday at midnight
jobs:
archive-stale-branches:
runs-on: ubuntu-latest
steps:
- name: Checkout Repository
uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: '3.x'
- name: Install Libraries
run: pip install requests
- name: Run Script to Rename Inactive Branches
run: |
python .github/scripts/rename_inactive_branches.py
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

100
README.md
View File

@ -22,13 +22,11 @@ This repository is the starting point for any information and tools you will nee
- [TLS terminating proxies](#tls-terminating-proxies) - [TLS terminating proxies](#tls-terminating-proxies)
- [File structure](#file-structure) - [File structure](#file-structure)
- [BBMRI-ERIC Directory entry needed](#bbmri-eric-directory-entry-needed) - [BBMRI-ERIC Directory entry needed](#bbmri-eric-directory-entry-needed)
- [Directory sync tool](#directory-sync-tool)
- [Loading data](#loading-data) - [Loading data](#loading-data)
4. [Things you should know](#things-you-should-know) 4. [Things you should know](#things-you-should-know)
- [Auto-Updates](#auto-updates) - [Auto-Updates](#auto-updates)
- [Auto-Backups](#auto-backups) - [Auto-Backups](#auto-backups)
- [Non-Linux OS](#non-linux-os) - [Non-Linux OS](#non-linux-os)
- [FAQ](#faq)
5. [Troubleshooting](#troubleshooting) 5. [Troubleshooting](#troubleshooting)
- [Docker Daemon Proxy Configuration](#docker-daemon-proxy-configuration) - [Docker Daemon Proxy Configuration](#docker-daemon-proxy-configuration)
- [Monitoring](#monitoring) - [Monitoring](#monitoring)
@ -78,7 +76,7 @@ The following URLs need to be accessible (prefix with `https://`):
* git.verbis.dkfz.de * git.verbis.dkfz.de
* To fetch docker images * To fetch docker images
* docker.verbis.dkfz.de * docker.verbis.dkfz.de
* Official Docker, Inc. URLs (subject to change, see [official list](https://docs.docker.com/desktop/setup/allow-list/)) * Official Docker, Inc. URLs (subject to change, see [official list](https://docs.docker.com/desktop/all))
* hub.docker.com * hub.docker.com
* registry-1.docker.io * registry-1.docker.io
* production.cloudflare.docker.com * production.cloudflare.docker.com
@ -156,7 +154,7 @@ Pay special attention to:
Clone the bridgehead repository: Clone the bridgehead repository:
```shell ```shell
sudo mkdir -p /srv/docker/ sudo mkdir -p /srv/docker/
sudo git clone -b main https://github.com/samply/bridgehead.git /srv/docker/bridgehead sudo git clone https://github.com/samply/bridgehead.git /srv/docker/bridgehead
``` ```
Then, run the installation script: Then, run the installation script:
@ -256,8 +254,6 @@ sh bridgehead uninstall
## Site-specific configuration ## Site-specific configuration
[How to Change Config Access Token](docs/update-access-token.md)
### HTTPS Access ### HTTPS Access
Even within your internal network, the Bridgehead enforces HTTPS for all services. During the installation, a self-signed, long-lived certificate was created for you. To increase security, you can simply replace the files under `/etc/bridgehead/traefik-tls` with ones from established certification authorities such as [Let's Encrypt](https://letsencrypt.org) or [DFN-AAI](https://www.aai.dfn.de). Even within your internal network, the Bridgehead enforces HTTPS for all services. During the installation, a self-signed, long-lived certificate was created for you. To increase security, you can simply replace the files under `/etc/bridgehead/traefik-tls` with ones from established certification authorities such as [Let's Encrypt](https://letsencrypt.org) or [DFN-AAI](https://www.aai.dfn.de).
@ -303,38 +299,26 @@ Once you have added your biobank to the Directory you got persistent identifier
### Directory sync tool ### Directory sync tool
The Bridgehead's **Directory Sync** is an optional feature that keeps the BBMRI-ERIC Directory up to date with your local data, e.g. number of samples. Conversely, it can also update the local FHIR store with the latest contact details etc. from the BBMRI-ERIC Directory. The Bridgehead's **Directory Sync** is an optional feature that keeps the Directory up to date with your local data, e.g. number of samples. Conversely, it also updates the local FHIR store with the latest contact details etc. from the Directory. You must explicitly set your country specific directory URL, username and password to enable this feature.
You should talk with your local data protection group regarding the information that is published by Directory sync. You should talk with your local data protection group regarding the information that is published by Directory sync.
To enable it, you will need to explicitly set the username and password variables for BBMRI-ERIC Directory login in the configuration file of your GitLab repository (e.g. ```bbmri.conf```). Here is an example minimal config: Full details can be found in [directory_sync_service](https://github.com/samply/directory_sync_service).
To enable it, you will need to set these variables to the ```bbmri.conf``` file of your GitLab repository. Here is an example config:
``` ```
DS_DIRECTORY_USER_NAME=your_directory_username DS_DIRECTORY_USER_NAME=your_directory_username
DS_DIRECTORY_USER_PASS=your_directory_password DS_DIRECTORY_USER_PASS=your_directory_password
``` ```
Please contact your National Node or Directory support (directory-dev@helpdesk.bbmri-eric.eu) to obtain these credentials. Please contact your National Node to obtain this information.
The following environment variables can be used from within your config file to control the behavior of Directory sync: Optionally, you **may** change when you want Directory sync to run by specifying a [cron](https://crontab.guru) expression, e.g. `DS_TIMER_CRON="0 22 * * *"` for 10 pm every evening.
| Variable | Purpose | Default if not specified | Once you edited the gitlab config, the bridgehead will autoupdate the config with the values and will sync the data.
|:-----------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------|:---------------------------------------|
| DS_DIRECTORY_URL | Base URL of the Directory | https://directory-backend.molgenis.net |
| DS_DIRECTORY_USER_NAME | User name for logging in to Directory **Mandatory** | |
| DS_DIRECTORY_USER_PASS | Password for logging in to Directory **Mandatory** | |
| DS_DIRECTORY_DEFAULT_COLLECTION_ID | ID of collection to be used if not in samples | |
| DS_DIRECTORY_ALLOW_STAR_MODEL | Set to 'True' to send star model info to Directory | True |
| DS_FHIR_STORE_URL | URL for FHIR store | http://bridgehead-bbmri-blaze:8080 |
| DS_TIMER_CRON | Execution interval for Directory sync, [cron](https://crontab.guru) format | 0 22 * * * |
| DS_IMPORT_BIOBANKS | Set to 'True' to import biobank metadata from Directory | True |
| DS_IMPORT_COLLECTIONS | Set to 'True' to import collection metadata from Directory | True |
Once you have finished editing the config, the Bridgehead will autoupdate the config with the values and will sync data at regular intervals, using the time specified in DS_TIMER_CRON.
There will be a delay before the effects of Directory sync become visible. First, you will need to wait until the time you have specified in ```TIMER_CRON```. Second, the information will then be synchronized from your national node with the central European Directory. This can take up to 24 hours. There will be a delay before the effects of Directory sync become visible. First, you will need to wait until the time you have specified in ```TIMER_CRON```. Second, the information will then be synchronized from your national node with the central European Directory. This can take up to 24 hours.
More details of Directory sync can be found in [directory_sync_service](https://github.com/samply/directory_sync_service).
### Loading data ### Loading data
The data accessed by the federated search is held in the Bridgehead in a FHIR store (we use Blaze). The data accessed by the federated search is held in the Bridgehead in a FHIR store (we use Blaze).
@ -354,24 +338,6 @@ The storage space on your hard drive will depend on the number of FHIR resources
For more information on Blaze performance, please refer to [import performance](https://github.com/samply/blaze/blob/master/docs/performance/import.md). For more information on Blaze performance, please refer to [import performance](https://github.com/samply/blaze/blob/master/docs/performance/import.md).
### Clearing data
The Bridgehead's FHIR store, Blaze, saves its data in a Docker volume. This means that the data will persist even if you stop the Bridgehead. You can clear existing data from the FHIR store by deleting the relevant Docker volume.
First, stop the Bridgehead:
```shell
sudo systemctl stop bridgehead@<PROJECT>.service
```
Now remove the volume:
```shell
docker volume rm <PROJECT>_blaze-data
```
Finally, restart the Bridgehead:
```shell
sudo systemctl start bridgehead@<PROJECT>.service
```
You will need to do this for example if you are using a VM as a test environment and you subsequently want to use the same VM for production.
#### ETL for BBMRI and GBA #### ETL for BBMRI and GBA
Normally, you will need to build your own ETL to feed the Bridgehead. However, there is one case where a short cut might be available: Normally, you will need to build your own ETL to feed the Bridgehead. However, there is one case where a short cut might be available:
@ -418,54 +384,6 @@ We have tested the installation procedure with an Ubuntu 22.04 guest system runn
Installation under WSL ought to work, but we have not tested this. Installation under WSL ought to work, but we have not tested this.
### FAQ
**Q: How is the security of GitHub pulls, volumes/containers, and image signing ensured?**
A: Changes to Git branches that could be delivered to sites (main and develop) must be accepted via a pull request with at least two positive reviews.
Containers/images are not built manually, but rather automatically through a CI/CD pipeline, so that an image can be rolled back to a defined code version at any time without changes.
**Note:** If firewall access for (outgoing) connections to GitHub and/or Docker Hub is problematic at the site, mirrors for both services are available, operated by the DKFZ.
**Q: How is authentication between users and components regulated?**
A: When setting up a Bridgehead, a private key and a so-called Certificate Sign Request (CSR) are generated locally. This CSR is manually signed by the broker operator, which allows the Bridgehead access to the network infrastructure.
All communication runs via Samply.Beam and is therefore end-to-end encrypted, but also signed. This allows the integrity and authenticity of the sender to be technically verified (which happens automatically both in the broker and at the recipients).
The connection to the broker is additionally secured using traditional TLS (transport encryption over https).
**Q: Are there any statistics on incoming traffic from the Bridgehead (what goes in and what goes out)?**
A: Incoming and outgoing traffic can only enter/leave the Bridgehead via a forward or reverse proxy, respectively. These components log all connections.
Statistical analysis is not currently being conducted, but is on the roadmap for some projects. We are also working on a dashboard for all tasks/responses delivered via Samply.Beam.
**Q: How is container access controlled, and what permission level is used?**
A: Currently, it is not possible to run the Bridgehead "out-of-the-box" as a rootless Docker Compose stack. The main reason is the operation of the reverse proxy (Traefik), which binds to the privileged ports 80 (HTTP) and 443 (HTTPS).
Otherwise, there are no known technical obstacles, although we don't have concrete experience implementing this.
At the file system level, a "bridgehead" user is created during installation, which manages the configuration and Bridgehead folders.
**Q: Is a cloud installation (not a company-owned one, but an external service provider) possible?**
A: Technically, yes. This is primarily a data protection issue between the participant and their cloud provider.
The Bridgehead contains a data storage system that, during use, contains sensitive patient and sample data.
There are cloud providers with whom appropriately worded contracts can be concluded to make this possible.
Of course, the details must be discussed with the responsible data protection officer.
**Q: What needs to be considered regarding the Docker distribution/registry, and how is it used here?**
A: The Bridgehead images are located both in Docker Hub and mirrored in a registry operated by the DKFZ.
The latter is used by default, avoiding potential issues with Docker Hub URL activation or rate limits.
When using automatic updates (highly recommended), an daily check is performed for:
- site configuration updates
- Bridgehead software updates
- container image updates
If updates are found, they are downloaded and applied.
See the first question for the control mechanism.
**Q: Is data only transferred one-way (Bridgehead/FHIR Store → Central/Locator), or is two-way access necessary?**
A: By using Samply.Beam, only one outgoing connection to the broker is required at the network level (i.e., Bridgehead → Broker).
## Troubleshooting ## Troubleshooting
### Docker Daemon Proxy Configuration ### Docker Daemon Proxy Configuration

View File

@ -4,14 +4,13 @@ version: "3.7"
services: services:
blaze: blaze:
image: docker.verbis.dkfz.de/cache/samply/blaze:${BLAZE_TAG} image: docker.verbis.dkfz.de/cache/samply/blaze:0.28
container_name: bridgehead-bbmri-blaze container_name: bridgehead-bbmri-blaze
environment: environment:
BASE_URL: "http://bridgehead-bbmri-blaze:8080" BASE_URL: "http://bridgehead-bbmri-blaze:8080"
JAVA_TOOL_OPTIONS: "-Xmx${BLAZE_MEMORY_CAP:-4096}m" JAVA_TOOL_OPTIONS: "-Xmx${BLAZE_MEMORY_CAP:-4096}m"
DB_RESOURCE_CACHE_SIZE: ${BLAZE_RESOURCE_CACHE_CAP:-2500000} DB_RESOURCE_CACHE_SIZE: ${BLAZE_RESOURCE_CACHE_CAP:-2500000}
DB_BLOCK_CACHE_SIZE: ${BLAZE_MEMORY_CAP} DB_BLOCK_CACHE_SIZE: $BLAZE_MEMORY_CAP
CQL_EXPR_CACHE_SIZE: ${BLAZE_CQL_CACHE_CAP:-32}
ENFORCE_REFERENTIAL_INTEGRITY: "false" ENFORCE_REFERENTIAL_INTEGRITY: "false"
volumes: volumes:
- "blaze-data:/app/data" - "blaze-data:/app/data"

View File

@ -12,7 +12,5 @@ services:
DS_DIRECTORY_MOCK: ${DS_DIRECTORY_MOCK} DS_DIRECTORY_MOCK: ${DS_DIRECTORY_MOCK}
DS_DIRECTORY_DEFAULT_COLLECTION_ID: ${DS_DIRECTORY_DEFAULT_COLLECTION_ID} DS_DIRECTORY_DEFAULT_COLLECTION_ID: ${DS_DIRECTORY_DEFAULT_COLLECTION_ID}
DS_DIRECTORY_COUNTRY: ${DS_DIRECTORY_COUNTRY} DS_DIRECTORY_COUNTRY: ${DS_DIRECTORY_COUNTRY}
DS_IMPORT_BIOBANKS: ${DS_IMPORT_BIOBANKS:-true}
DS_IMPORT_COLLECTIONS: ${DS_IMPORT_COLLECTIONS:-true}
depends_on: depends_on:
- "blaze" - "blaze"

View File

@ -2,7 +2,7 @@ version: "3.7"
services: services:
focus-eric: focus-eric:
image: docker.verbis.dkfz.de/cache/samply/focus:${FOCUS_TAG}-bbmri image: docker.verbis.dkfz.de/cache/samply/focus:${FOCUS_TAG}
container_name: bridgehead-focus-eric container_name: bridgehead-focus-eric
environment: environment:
API_KEY: ${ERIC_FOCUS_BEAM_SECRET_SHORT} API_KEY: ${ERIC_FOCUS_BEAM_SECRET_SHORT}

View File

@ -10,10 +10,6 @@ if [ "${ENABLE_ERIC}" == "true" ]; then
export ERIC_BROKER_ID=broker.bbmri.samply.de export ERIC_BROKER_ID=broker.bbmri.samply.de
export ERIC_ROOT_CERT=eric export ERIC_ROOT_CERT=eric
;; ;;
"acceptance")
export ERIC_BROKER_ID=broker-acc.bbmri-acc.samply.de
export ERIC_ROOT_CERT=eric.acc
;;
"test") "test")
export ERIC_BROKER_ID=broker-test.bbmri-test.samply.de export ERIC_BROKER_ID=broker-test.bbmri-test.samply.de
export ERIC_ROOT_CERT=eric.test export ERIC_ROOT_CERT=eric.test

View File

@ -1,20 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDNTCCAh2gAwIBAgIUE/wu6FmI+KSMOalI65b+lI3HI4cwDQYJKoZIhvcNAQEL
BQAwFjEUMBIGA1UEAxMLQnJva2VyLVJvb3QwHhcNMjQwOTE2MTUyMzU0WhcNMzQw
OTE0MTUyNDI0WjAWMRQwEgYDVQQDEwtCcm9rZXItUm9vdDCCASIwDQYJKoZIhvcN
AQEBBQADggEPADCCAQoCggEBAOt1I1FQt2bI4Nnjtg8JBYid29cBIkDT4MMb45Jr
ays24y4R3WO7VJK9UjNduSq/A1jlA0W0A/szDf8Ojq6bBtg+uL92PTDjYH1QXwX0
c7eMo2tvvyyrs/cb2/ovDBQ1lpibcxVmVAv042ASmil3SdqKKXpv3ATnF9I7V4cv
fwB56FChaGIov5EK+9JOMjTx6oMlBEgUFR6qq/lSqM9my0HYwUFbX2W+nT9EKEIP
9UP1eyfRZR3E/+oticnm/cS20BGCbjoYrNgLthXKyaASuhGoElKs8EZ3h9MiI+u0
DpR0KpePhAkMLugBrgYWqkMwwD1684LfC4YVQrsLwzo5OW8CAwEAAaN7MHkwDgYD
VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFPbXs3g3lMjH
1JMe0a5aVbN7lB92MB8GA1UdIwQYMBaAFPbXs3g3lMjH1JMe0a5aVbN7lB92MBYG
A1UdEQQPMA2CC0Jyb2tlci1Sb290MA0GCSqGSIb3DQEBCwUAA4IBAQBM5RsXb2HN
FpC1mYfocXAn20Zu4d603qmc/IqkiOWbp36pWo+jk1AxejyRS9hEpQalgSnvcRPQ
1hPEhGU+wvI0WWVi/01iNjVbXmJNPQEouXQWAT17dyp9vqQkPw8LNzpSV/qdPgbT
Z9o3sZrjUsSLsK7A7Q5ky4ePkiJBaMsHeAD+wqGwpiJ4D2Xhp8e1v36TWM0qt2EA
gySx9isx/jeGGPBmDqYB9BCal5lrihPN56jd+5pCkyXeZqKWiiXFJKXwcwxctYZc
ADHIiTLLPXE8LHTUJAO51it1NAZ1S24aMzax4eWDXcWO7/ybbx5pkYkMd6EqlKHd
8riQJIhY4huX
-----END CERTIFICATE-----

View File

@ -2,7 +2,7 @@ version: "3.7"
services: services:
focus-gbn: focus-gbn:
image: docker.verbis.dkfz.de/cache/samply/focus:${FOCUS_TAG}-bbmri image: docker.verbis.dkfz.de/cache/samply/focus:${FOCUS_TAG}
container_name: bridgehead-focus-gbn container_name: bridgehead-focus-gbn
environment: environment:
API_KEY: ${GBN_FOCUS_BEAM_SECRET_SHORT} API_KEY: ${GBN_FOCUS_BEAM_SECRET_SHORT}

View File

@ -35,9 +35,6 @@ case "$PROJECT" in
cce) cce)
#nothing extra to do #nothing extra to do
;; ;;
pscc)
#nothing extra to do
;;
itcc) itcc)
#nothing extra to do #nothing extra to do
;; ;;
@ -56,47 +53,17 @@ case "$PROJECT" in
;; ;;
esac esac
# Loads config variables and runs the projects setup script
loadVars() { loadVars() {
# Load variables from /etc/bridgehead and /srv/docker/bridgehead
set -a set -a
# Source the project specific config file
source /etc/bridgehead/$PROJECT.conf || fail_and_report 1 "/etc/bridgehead/$PROJECT.conf not found" source /etc/bridgehead/$PROJECT.conf || fail_and_report 1 "/etc/bridgehead/$PROJECT.conf not found"
# Source the project specific local config file if present
# This file is ignored by git as oposed to the regular config file as it contains private site information like etl auth data
if [ -e /etc/bridgehead/$PROJECT.local.conf ]; then if [ -e /etc/bridgehead/$PROJECT.local.conf ]; then
log INFO "Applying /etc/bridgehead/$PROJECT.local.conf" log INFO "Applying /etc/bridgehead/$PROJECT.local.conf"
source /etc/bridgehead/$PROJECT.local.conf || fail_and_report 1 "Found /etc/bridgehead/$PROJECT.local.conf but failed to import" source /etc/bridgehead/$PROJECT.local.conf || fail_and_report 1 "Found /etc/bridgehead/$PROJECT.local.conf but failed to import"
fi fi
# Set execution environment on main default to prod else test
if [[ -z "${ENVIRONMENT+x}" ]]; then
if [ "$(git rev-parse --abbrev-ref HEAD)" == "main" ]; then
ENVIRONMENT="production"
else
ENVIRONMENT="test" # we have acceptance environment in BBMRI ERIC and it would be more appropriate to default to that one in case the data they have in BH is real, but I'm gonna leave it as is for backward compatibility
fi
fi
# Source the versions of the images components
case "$ENVIRONMENT" in
"production")
source ./versions/prod
;;
"test")
source ./versions/test
;;
"acceptance")
source ./versions/acceptance
;;
*)
report_error 7 "Environment \"$ENVIRONMENT\" is unknown. Assuming production. FIX THIS!"
source ./versions/prod
;;
esac
fetchVarsFromVaultByFile /etc/bridgehead/$PROJECT.conf || fail_and_report 1 "Unable to fetchVarsFromVaultByFile" fetchVarsFromVaultByFile /etc/bridgehead/$PROJECT.conf || fail_and_report 1 "Unable to fetchVarsFromVaultByFile"
setHostname setHostname
optimizeBlazeMemoryUsage optimizeBlazeMemoryUsage
# Run project specific setup if it exists
# This will ususally modiy the `OVERRIDE` to include all the compose files that the project depends on
# This is also where projects specify which modules to load
[ -e ./$PROJECT/vars ] && source ./$PROJECT/vars [ -e ./$PROJECT/vars ] && source ./$PROJECT/vars
set +a set +a
@ -112,6 +79,26 @@ loadVars() {
fi fi
detectCompose detectCompose
setupProxy setupProxy
# Set some project-independent default values
: ${ENVIRONMENT:=production}
export ENVIRONMENT
case "$ENVIRONMENT" in
"production")
export FOCUS_TAG=main
export BEAM_TAG=main
;;
"test")
export FOCUS_TAG=develop
export BEAM_TAG=develop
;;
*)
report_error 7 "Environment \"$ENVIRONMENT\" is unknown. Assuming production. FIX THIS!"
export FOCUS_TAG=main
export BEAM_TAG=main
;;
esac
} }
case "$ACTION" in case "$ACTION" in

View File

@ -2,14 +2,13 @@ version: "3.7"
services: services:
blaze: blaze:
image: docker.verbis.dkfz.de/cache/samply/blaze:${BLAZE_TAG} image: docker.verbis.dkfz.de/cache/samply/blaze:0.28
container_name: bridgehead-cce-blaze container_name: bridgehead-cce-blaze
environment: environment:
BASE_URL: "http://bridgehead-cce-blaze:8080" BASE_URL: "http://bridgehead-cce-blaze:8080"
JAVA_TOOL_OPTIONS: "-Xmx${BLAZE_MEMORY_CAP:-4096}m" JAVA_TOOL_OPTIONS: "-Xmx${BLAZE_MEMORY_CAP:-4096}m"
DB_RESOURCE_CACHE_SIZE: ${BLAZE_RESOURCE_CACHE_CAP:-2500000} DB_RESOURCE_CACHE_SIZE: ${BLAZE_RESOURCE_CACHE_CAP:-2500000}
DB_BLOCK_CACHE_SIZE: ${BLAZE_MEMORY_CAP} DB_BLOCK_CACHE_SIZE: $BLAZE_MEMORY_CAP
CQL_EXPR_CACHE_SIZE: ${BLAZE_CQL_CACHE_CAP:-32}
ENFORCE_REFERENTIAL_INTEGRITY: "false" ENFORCE_REFERENTIAL_INTEGRITY: "false"
volumes: volumes:
- "blaze-data:/app/data" - "blaze-data:/app/data"
@ -32,10 +31,6 @@ services:
BEAM_PROXY_URL: http://beam-proxy:8081 BEAM_PROXY_URL: http://beam-proxy:8081
RETRY_COUNT: ${FOCUS_RETRY_COUNT} RETRY_COUNT: ${FOCUS_RETRY_COUNT}
EPSILON: 0.28 EPSILON: 0.28
QUERIES_TO_CACHE: '/queries_to_cache.conf'
ENDPOINT_TYPE: ${FOCUS_ENDPOINT_TYPE:-blaze}
volumes:
- /srv/docker/bridgehead/cce/queries_to_cache.conf:/queries_to_cache.conf:ro
depends_on: depends_on:
- "beam-proxy" - "beam-proxy"
- "blaze" - "blaze"

View File

@ -1,65 +0,0 @@
version: "3.7"
services:
blaze-pscc:
image: docker.verbis.dkfz.de/cache/samply/blaze:${BLAZE_TAG}
container_name: bridgehead-pscc-blaze
environment:
BASE_URL: "http://bridgehead-pscc-blaze:8080"
JAVA_TOOL_OPTIONS: "-Xmx${BLAZE_MEMORY_CAP:-4096}m"
DB_RESOURCE_CACHE_SIZE: ${BLAZE_RESOURCE_CACHE_CAP:-2500000}
DB_BLOCK_CACHE_SIZE: ${BLAZE_MEMORY_CAP}
CQL_EXPR_CACHE_SIZE: ${BLAZE_CQL_CACHE_CAP:-32}
ENFORCE_REFERENTIAL_INTEGRITY: "false"
volumes:
- "blaze-data-pscc:/app/data"
labels:
- "traefik.enable=true"
- "traefik.http.routers.blaze_pscc.rule=PathPrefix(`/pscc-localdatamanagement`)"
- "traefik.http.middlewares.pscc_b_strip.stripprefix.prefixes=/pscc-localdatamanagement"
- "traefik.http.services.blaze_pscc.loadbalancer.server.port=8080"
- "traefik.http.routers.blaze_pscc.middlewares=pscc_b_strip"
- "traefik.http.routers.blaze_pscc.tls=true"
focus-pscc:
image: docker.verbis.dkfz.de/cache/samply/focus:${FOCUS_TAG}
container_name: bridgehead-pscc-focus
environment:
API_KEY: ${FOCUS_BEAM_SECRET_SHORT}
BEAM_APP_ID_LONG: focus.${PROXY_ID_PSCC}
PROXY_ID: ${PROXY_ID_PSCC}
BLAZE_URL: "http://bridgehead-pscc-blaze:8080/fhir/"
BEAM_PROXY_URL: http://beam-proxy-pscc:8081
RETRY_COUNT: ${FOCUS_RETRY_COUNT}
EPSILON: 0.28
ENDPOINT_TYPE: ${FOCUS_ENDPOINT_TYPE:-blaze}
depends_on:
- "beam-proxy"
- "blaze"
beam-proxy-pscc:
image: docker.verbis.dkfz.de/cache/samply/beam-proxy:${BEAM_TAG}
container_name: bridgehead-pscc-beam-proxy
environment:
BROKER_URL: ${BROKER_URL_PSCC}
PROXY_ID: ${PROXY_ID_PSCC}
APP_focus_KEY: ${FOCUS_BEAM_SECRET_SHORT}
PRIVKEY_FILE: /run/secrets/proxy.pem
ALL_PROXY: http://forward_proxy:3128
TLS_CA_CERTIFICATES_DIR: /conf/trusted-ca-certs
ROOTCERT_FILE: /conf/root.crt.pem
secrets:
- proxy.pem
depends_on:
- "forward_proxy"
volumes:
- /etc/bridgehead/trusted-ca-certs:/conf/trusted-ca-certs:ro
- /srv/docker/bridgehead/pscc/root.crt.pem:/conf/root.crt.pem:ro
volumes:
blaze-data-pscc:
secrets:
proxy.pem:
file: /etc/bridgehead/pki/${SITE_ID}.priv.pem

View File

@ -1,5 +0,0 @@
#!/bin/bash
if [ -n "$ENABLE_PSCC" ];then
OVERRIDE+=" -f ./$PROJECT/modules/pscc-compose.yml"
fi

View File

@ -1,2 +0,0 @@
bGlicmFyeSBSZXRyaWV2ZQp1c2luZyBGSElSIHZlcnNpb24gJzQuMC4wJwppbmNsdWRlIEZISVJIZWxwZXJzIHZlcnNpb24gJzQuMC4wJwpjb2Rlc3lzdGVtIFNhbXBsZU1hdGVyaWFsVHlwZTogJ2h0dHBzOi8vZmhpci5iYm1yaS5kZS9Db2RlU3lzdGVtL1NhbXBsZU1hdGVyaWFsVHlwZScKCmNvZGVzeXN0ZW0gbG9pbmM6ICdodHRwOi8vbG9pbmMub3JnJwoKY29udGV4dCBQYXRpZW50CgpES1RLX1NUUkFUX0dFTkRFUl9TVFJBVElGSUVSCgpES1RLX1NUUkFUX0FHRV9TVFJBVElGSUVSCgpES1RLX1NUUkFUX0RFQ0VBU0VEX1NUUkFUSUZJRVIKCkRLVEtfU1RSQVRfRElBR05PU0lTX1NUUkFUSUZJRVIKCkRLVEtfU1RSQVRfU1BFQ0lNRU5fU1RSQVRJRklFUgoKREtUS19TVFJBVF9QUk9DRURVUkVfU1RSQVRJRklFUgoKREtUS19TVFJBVF9NRURJQ0FUSU9OX1NUUkFUSUZJRVIKREtUS19TVFJBVF9ERUZfSU5fSU5JVElBTF9QT1BVTEFUSU9OCnRydWU=
bGlicmFyeSBSZXRyaWV2ZQp1c2luZyBGSElSIHZlcnNpb24gJzQuMC4wJwppbmNsdWRlIEZISVJIZWxwZXJzIHZlcnNpb24gJzQuMC4wJwpjb2Rlc3lzdGVtIFNhbXBsZU1hdGVyaWFsVHlwZTogJ2h0dHBzOi8vZmhpci5iYm1yaS5kZS9Db2RlU3lzdGVtL1NhbXBsZU1hdGVyaWFsVHlwZScKCmNvZGVzeXN0ZW0gbG9pbmM6ICdodHRwOi8vbG9pbmMub3JnJwpjb2Rlc3lzdGVtIGljZDEwOiAnaHR0cDovL2ZoaXIuZGUvQ29kZVN5c3RlbS9iZmFybS9pY2QtMTAtZ20nCmNvZGVzeXN0ZW0gbW9ycGg6ICd1cm46b2lkOjIuMTYuODQwLjEuMTEzODgzLjYuNDMuMScKCmNvbnRleHQgUGF0aWVudAoKREtUS19TVFJBVF9HRU5ERVJfU1RSQVRJRklFUgoKREtUS19TVFJBVF9BR0VfU1RSQVRJRklFUgoKREtUS19TVFJBVF9ERUNFQVNFRF9TVFJBVElGSUVSCgpES1RLX1NUUkFUX0RJQUdOT1NJU19TVFJBVElGSUVSCgpES1RLX1NUUkFUX1NQRUNJTUVOX1NUUkFUSUZJRVIKCkRLVEtfU1RSQVRfUFJPQ0VEVVJFX1NUUkFUSUZJRVIKCkRLVEtfU1RSQVRfTUVESUNBVElPTl9TVFJBVElGSUVSCkRLVEtfU1RSQVRfREVGX0lOX0lOSVRJQUxfUE9QVUxBVElPTgooKGV4aXN0cyBbQ29uZGl0aW9uOiBDb2RlICdDNjEnIGZyb20gaWNkMTBdKSBhbmQKKChleGlzdHMgZnJvbSBbT2JzZXJ2YXRpb246IENvZGUgJzU5ODQ3LTQnIGZyb20gbG9pbmNdIE8Kd2hlcmUgTy52YWx1ZS5jb2RpbmcuY29kZSBjb250YWlucyAnODE0MC8zJykgb3IKKGV4aXN0cyBmcm9tIFtPYnNlcnZhdGlvbjogQ29kZSAnNTk4NDctNCcgZnJvbSBsb2luY10gTwp3aGVyZSBPLnZhbHVlLmNvZGluZy5jb2RlIGNvbnRhaW5zICc4MTQ3LzMnKSBvcgooZXhpc3RzIGZyb20gW09ic2VydmF0aW9uOiBDb2RlICc1OTg0Ny00JyBmcm9tIGxvaW5jXSBPCndoZXJlIE8udmFsdWUuY29kaW5nLmNvZGUgY29udGFpbnMgJzg0ODAvMycpIG9yCihleGlzdHMgZnJvbSBbT2JzZXJ2YXRpb246IENvZGUgJzU5ODQ3LTQnIGZyb20gbG9pbmNdIE8Kd2hlcmUgTy52YWx1ZS5jb2RpbmcuY29kZSBjb250YWlucyAnODUwMC8zJykpKQ==

View File

@ -1,9 +1,6 @@
BROKER_ID=test-no-real-data.broker.samply.de BROKER_ID=test-no-real-data.broker.samply.de
BROKER_ID_PSCC=test-no-real-data.broker.samply.de
BROKER_URL=https://${BROKER_ID} BROKER_URL=https://${BROKER_ID}
BROKER_URL_PSCC=https://${BROKER_ID}
PROXY_ID=${SITE_ID}.${BROKER_ID} PROXY_ID=${SITE_ID}.${BROKER_ID}
PROXY_ID_PSCC=${SITE_ID}.${BROKER_ID_PSCC}
FOCUS_BEAM_SECRET_SHORT="$(cat /proc/sys/kernel/random/uuid | sed 's/[-]//g' | head -c 20)" FOCUS_BEAM_SECRET_SHORT="$(cat /proc/sys/kernel/random/uuid | sed 's/[-]//g' | head -c 20)"
FOCUS_RETRY_COUNT=${FOCUS_RETRY_COUNT:-64} FOCUS_RETRY_COUNT=${FOCUS_RETRY_COUNT:-64}
SUPPORT_EMAIL=manoj.waikar@dkfz-heidelberg.de SUPPORT_EMAIL=manoj.waikar@dkfz-heidelberg.de

View File

@ -2,14 +2,13 @@ version: "3.7"
services: services:
blaze: blaze:
image: docker.verbis.dkfz.de/cache/samply/blaze:${BLAZE_TAG} image: docker.verbis.dkfz.de/cache/samply/blaze:0.28
container_name: bridgehead-ccp-blaze container_name: bridgehead-ccp-blaze
environment: environment:
BASE_URL: "http://bridgehead-ccp-blaze:8080" BASE_URL: "http://bridgehead-ccp-blaze:8080"
JAVA_TOOL_OPTIONS: "-Xmx${BLAZE_MEMORY_CAP:-4096}m" JAVA_TOOL_OPTIONS: "-Xmx${BLAZE_MEMORY_CAP:-4096}m"
DB_RESOURCE_CACHE_SIZE: ${BLAZE_RESOURCE_CACHE_CAP:-2500000} DB_RESOURCE_CACHE_SIZE: ${BLAZE_RESOURCE_CACHE_CAP:-2500000}
DB_BLOCK_CACHE_SIZE: ${BLAZE_MEMORY_CAP} DB_BLOCK_CACHE_SIZE: $BLAZE_MEMORY_CAP
CQL_EXPR_CACHE_SIZE: ${BLAZE_CQL_CACHE_CAP:-32}
ENFORCE_REFERENTIAL_INTEGRITY: "false" ENFORCE_REFERENTIAL_INTEGRITY: "false"
volumes: volumes:
- "blaze-data:/app/data" - "blaze-data:/app/data"
@ -22,7 +21,7 @@ services:
- "traefik.http.routers.blaze_ccp.tls=true" - "traefik.http.routers.blaze_ccp.tls=true"
focus: focus:
image: docker.verbis.dkfz.de/cache/samply/focus:${FOCUS_TAG}-dktk image: docker.verbis.dkfz.de/cache/samply/focus:${FOCUS_TAG}
container_name: bridgehead-focus container_name: bridgehead-focus
environment: environment:
API_KEY: ${FOCUS_BEAM_SECRET_SHORT} API_KEY: ${FOCUS_BEAM_SECRET_SHORT}
@ -33,9 +32,8 @@ services:
RETRY_COUNT: ${FOCUS_RETRY_COUNT} RETRY_COUNT: ${FOCUS_RETRY_COUNT}
EPSILON: 0.28 EPSILON: 0.28
QUERIES_TO_CACHE: '/queries_to_cache.conf' QUERIES_TO_CACHE: '/queries_to_cache.conf'
ENDPOINT_TYPE: ${FOCUS_ENDPOINT_TYPE:-blaze}
volumes: volumes:
- /srv/docker/bridgehead/ccp/queries_to_cache.conf:/queries_to_cache.conf:ro - /srv/docker/bridgehead/ccp/queries_to_cache.conf:/queries_to_cache.conf
depends_on: depends_on:
- "beam-proxy" - "beam-proxy"
- "blaze" - "blaze"
@ -59,6 +57,7 @@ services:
- /etc/bridgehead/trusted-ca-certs:/conf/trusted-ca-certs:ro - /etc/bridgehead/trusted-ca-certs:/conf/trusted-ca-certs:ro
- /srv/docker/bridgehead/ccp/root.crt.pem:/conf/root.crt.pem:ro - /srv/docker/bridgehead/ccp/root.crt.pem:/conf/root.crt.pem:ro
volumes: volumes:
blaze-data: blaze-data:

View File

@ -2,7 +2,7 @@ version: "3.7"
services: services:
blaze-secondary: blaze-secondary:
image: docker.verbis.dkfz.de/cache/samply/blaze:${BLAZE_TAG} image: docker.verbis.dkfz.de/cache/samply/blaze:0.28
container_name: bridgehead-ccp-blaze-secondary container_name: bridgehead-ccp-blaze-secondary
environment: environment:
BASE_URL: "http://bridgehead-ccp-blaze-secondary:8080" BASE_URL: "http://bridgehead-ccp-blaze-secondary:8080"

View File

@ -121,42 +121,38 @@ services:
oauth2-proxy: oauth2-proxy:
image: docker.verbis.dkfz.de/cache/oauth2-proxy/oauth2-proxy:latest image: docker.verbis.dkfz.de/cache/oauth2-proxy/oauth2-proxy:latest
container_name: bridgehead-oauth2proxy container_name: bridgehead-oauth2proxy
command: >- environment:
--allowed-group=DataSHIELD - http_proxy=http://forward_proxy:3128
--oidc-groups-claim=${OIDC_GROUP_CLAIM} - https_proxy=http://forward_proxy:3128
--auth-logging=true - OAUTH2_PROXY_ALLOWED_GROUPS=DataSHIELD
--whitelist-domain=${HOST} - OAUTH2_PROXY_OIDC_GROUPS_CLAIM=${OIDC_GROUP_CLAIM}
--http-address="0.0.0.0:4180" - OAUTH2_PROXY_WHITELIST_DOMAIN=${HOST}
--reverse-proxy=true - OAUTH2_PROXY_HTTP_ADDRESS=:4180
--upstream="static://202" - OAUTH2_PROXY_REVERSE_PROXY=true
--email-domain="*" - OAUTH2_PROXY_UPSTREAMS=static://202
--cookie-name="_BRIDGEHEAD_oauth2" - OAUTH2_PROXY_EMAIL_DOMAINS=*
--cookie-secret="${OAUTH2_PROXY_SECRET}" - OAUTH2_PROXY_COOKIE_NAME=_BRIDGEHEAD_oauth2
--cookie-expire="12h" - OAUTH2_PROXY_COOKIE_SECRET=${OAUTH2_PROXY_SECRET}
--cookie-secure="true" - OAUTH2_PROXY_COOKIE_EXPIRE=12h
--cookie-httponly="true"
#OIDC settings #OIDC settings
--provider="keycloak-oidc" - OAUTH2_PROXY_PROVIDER=keycloak-oidc
--provider-display-name="VerbIS Login" - OAUTH2_PROXY_PROVIDER_DISPLAY_NAME="VerbIS Login"
--client-id="${OIDC_PRIVATE_CLIENT_ID}" - OAUTH2_PROXY_CLIENT_ID=${OIDC_PRIVATE_CLIENT_ID}
--client-secret="${OIDC_CLIENT_SECRET}" - OAUTH2_PROXY_CLIENT_SECRET=${OIDC_CLIENT_SECRET}
--redirect-url="https://${HOST}${OAUTH2_CALLBACK}" - OAUTH2_PROXY_REDIRECT_URL="https://${HOST}${OAUTH2_CALLBACK}"
--oidc-issuer-url="${OIDC_ISSUER_URL}" - OAUTH2_PROXY_OIDC_ISSUER_URL=${OIDC_ISSUER_URL}
--scope="openid email profile" - OAUTH2_PROXY_SCOPE=openid profile email
--code-challenge-method="S256" - OAUTH2_PROXY_CODE_CHALLENGE_METHOD=true
--skip-provider-button=true - OAUTH2_PROXY_SKIP_PROVIDER_BUTTON=true
#X-Forwarded-Header settings - true/false depending on your needs #X-Forwarded-Header settings - true/false depending on your needs
--pass-basic-auth=true - OAUTH2_PROXY_PASS_BASIC_AUTH=true
--pass-user-headers=false - OAUTH2_PROXY_PASS_USER_HEADERS=false
--pass-access-token=false - OAUTH2_PROXY_ACCESS_TOKEN=false
labels: labels:
- "traefik.enable=true" - "traefik.enable=true"
- "traefik.http.routers.oauth2_proxy.rule=PathPrefix(`/oauth2`)" - "traefik.http.routers.oauth2_proxy.rule=PathPrefix(`/oauth2`)"
- "traefik.http.services.oauth2_proxy.loadbalancer.server.port=4180" - "traefik.http.services.oauth2_proxy.loadbalancer.server.port=4180"
- "traefik.http.routers.oauth2_proxy.tls=true" - "traefik.http.routers.oauth2_proxy.tls=true"
environment:
http_proxy: "http://forward_proxy:3128"
https_proxy: "http://forward_proxy:3128"
depends_on: depends_on:
forward_proxy: forward_proxy:
condition: service_healthy condition: service_healthy

View File

@ -13,7 +13,7 @@ services:
PROXY_APIKEY: ${DNPM_BEAM_SECRET_SHORT} PROXY_APIKEY: ${DNPM_BEAM_SECRET_SHORT}
APP_ID: dnpm-connect.${PROXY_ID} APP_ID: dnpm-connect.${PROXY_ID}
DISCOVERY_URL: "./conf/central_targets.json" DISCOVERY_URL: "./conf/central_targets.json"
LOCAL_TARGETS_FILE: "/conf/connect_targets.json" LOCAL_TARGETS_FILE: "./conf/connect_targets.json"
HTTP_PROXY: "http://forward_proxy:3128" HTTP_PROXY: "http://forward_proxy:3128"
HTTPS_PROXY: "http://forward_proxy:3128" HTTPS_PROXY: "http://forward_proxy:3128"
NO_PROXY: beam-proxy,dnpm-backend,host.docker.internal${DNPM_ADDITIONAL_NO_PROXY} NO_PROXY: beam-proxy,dnpm-backend,host.docker.internal${DNPM_ADDITIONAL_NO_PROXY}
@ -25,7 +25,7 @@ services:
volumes: volumes:
- /etc/bridgehead/trusted-ca-certs:/conf/trusted-ca-certs:ro - /etc/bridgehead/trusted-ca-certs:/conf/trusted-ca-certs:ro
- /etc/bridgehead/dnpm/local_targets.json:/conf/connect_targets.json:ro - /etc/bridgehead/dnpm/local_targets.json:/conf/connect_targets.json:ro
- /srv/docker/bridgehead/minimal/modules/dnpm-central-targets.json:/conf/central_targets.json:ro - /etc/bridgehead/dnpm/central_targets.json:/conf/central_targets.json:ro
labels: labels:
- "traefik.enable=true" - "traefik.enable=true"
- "traefik.http.routers.dnpm-connect.rule=PathPrefix(`/dnpm-connect`)" - "traefik.http.routers.dnpm-connect.rule=PathPrefix(`/dnpm-connect`)"

View File

@ -1,99 +1,34 @@
version: "3.7" version: "3.7"
services: services:
dnpm-mysql:
image: mysql:9
healthcheck:
test: [ "CMD", "mysqladmin" ,"ping", "-h", "localhost" ]
interval: 3s
timeout: 5s
retries: 5
environment:
MYSQL_ROOT_HOST: "%"
MYSQL_ROOT_PASSWORD: ${DNPM_MYSQL_ROOT_PASSWORD}
volumes:
- /var/cache/bridgehead/dnpm/mysql:/var/lib/mysql
dnpm-authup:
image: authup/authup:latest
container_name: bridgehead-dnpm-authup
volumes:
- /var/cache/bridgehead/dnpm/authup:/usr/src/app/writable
depends_on:
dnpm-mysql:
condition: service_healthy
command: server/core start
environment:
- PUBLIC_URL=https://${HOST}/auth/
- AUTHORIZE_REDIRECT_URL=https://${HOST}
- ROBOT_ADMIN_ENABLED=true
- ROBOT_ADMIN_SECRET=${DNPM_AUTHUP_SECRET}
- ROBOT_ADMIN_SECRET_RESET=true
- DB_TYPE=mysql
- DB_HOST=dnpm-mysql
- DB_USERNAME=root
- DB_PASSWORD=${DNPM_MYSQL_ROOT_PASSWORD}
- DB_DATABASE=auth
labels:
- "traefik.enable=true"
- "traefik.http.middlewares.authup-strip.stripprefix.prefixes=/auth"
- "traefik.http.routers.dnpm-auth.middlewares=authup-strip"
- "traefik.http.routers.dnpm-auth.rule=PathPrefix(`/auth`)"
- "traefik.http.services.dnpm-auth.loadbalancer.server.port=3000"
- "traefik.http.routers.dnpm-auth.tls=true"
dnpm-portal:
image: ghcr.io/dnpm-dip/portal:latest
container_name: bridgehead-dnpm-portal
environment:
- NUXT_API_URL=http://dnpm-backend:9000/
- NUXT_PUBLIC_API_URL=https://${HOST}/api/
- NUXT_AUTHUP_URL=http://dnpm-authup:3000/
- NUXT_PUBLIC_AUTHUP_URL=https://${HOST}/auth/
labels:
- "traefik.enable=true"
- "traefik.http.routers.dnpm-frontend.rule=PathPrefix(`/`)"
- "traefik.http.services.dnpm-frontend.loadbalancer.server.port=3000"
- "traefik.http.routers.dnpm-frontend.tls=true"
dnpm-backend: dnpm-backend:
image: ghcr.io/kohlbacherlab/bwhc-backend:1.0-snapshot-broker-connector
container_name: bridgehead-dnpm-backend container_name: bridgehead-dnpm-backend
image: ghcr.io/dnpm-dip/backend:latest
environment: environment:
- LOCAL_SITE=${ZPM_SITE}:${SITE_NAME} # Format: {Site-ID}:{Site-name}, e.g. UKT:Tübingen - ZPM_SITE=${ZPM_SITE}
- RD_RANDOM_DATA=${DNPM_SYNTH_NUM:--1} - N_RANDOM_FILES=${DNPM_SYNTH_NUM}
- MTB_RANDOM_DATA=${DNPM_SYNTH_NUM:--1}
- HATEOAS_HOST=https://${HOST}
- CONNECTOR_TYPE=broker
- AUTHUP_URL=robot://system:${DNPM_AUTHUP_SECRET}@http://dnpm-authup:3000
volumes: volumes:
- /etc/bridgehead/dnpm/config:/dnpm_config - /etc/bridgehead/dnpm:/bwhc_config:ro
- /var/cache/bridgehead/dnpm/backend-data:/dnpm_data - ${DNPM_DATA_DIR}:/bwhc_data
depends_on:
dnpm-authup:
condition: service_healthy
labels: labels:
- "traefik.enable=true" - "traefik.enable=true"
- "traefik.http.services.dnpm-backend.loadbalancer.server.port=9000" - "traefik.http.routers.bwhc-backend.rule=PathPrefix(`/bwhc`)"
# expose everything - "traefik.http.services.bwhc-backend.loadbalancer.server.port=9000"
- "traefik.http.routers.dnpm-backend.rule=PathPrefix(`/api`)" - "traefik.http.routers.bwhc-backend.tls=true"
- "traefik.http.routers.dnpm-backend.tls=true"
- "traefik.http.routers.dnpm-backend.service=dnpm-backend"
# except ETL
- "traefik.http.routers.dnpm-backend-etl.rule=PathRegexp(`^/api(/.*)?etl(/.*)?$`)"
- "traefik.http.routers.dnpm-backend-etl.tls=true"
- "traefik.http.routers.dnpm-backend-etl.service=dnpm-backend"
# this needs an ETL processor with support for basic auth
- "traefik.http.routers.dnpm-backend-etl.middlewares=auth"
# except peer-to-peer
- "traefik.http.routers.dnpm-backend-peer.rule=PathRegexp(`^/api(/.*)?/peer2peer(/.*)?$`)"
- "traefik.http.routers.dnpm-backend-peer.tls=true"
- "traefik.http.routers.dnpm-backend-peer.service=dnpm-backend"
- "traefik.http.routers.dnpm-backend-peer.middlewares=dnpm-backend-peer"
# this effectively denies all requests
# this is okay, because requests from peers don't go through Traefik
- "traefik.http.middlewares.dnpm-backend-peer.ipWhiteList.sourceRange=0.0.0.0/32"
landing: dnpm-frontend:
image: ghcr.io/kohlbacherlab/bwhc-frontend:2209
container_name: bridgehead-dnpm-frontend
links:
- dnpm-backend
environment:
- NUXT_HOST=0.0.0.0
- NUXT_PORT=8080
- BACKEND_PROTOCOL=https
- BACKEND_HOSTNAME=$HOST
- BACKEND_PORT=443
labels: labels:
- "traefik.http.routers.landing.rule=PathPrefix(`/landing`)" - "traefik.enable=true"
- "traefik.http.routers.bwhc-frontend.rule=PathPrefix(`/`)"
- "traefik.http.services.bwhc-frontend.loadbalancer.server.port=8080"
- "traefik.http.routers.bwhc-frontend.tls=true"

View File

@ -1,16 +1,28 @@
#!/bin/bash #!/bin/bash
if [ -n "${ENABLE_DNPM_NODE}" ]; then if [ -n "${ENABLE_DNPM_NODE}" ]; then
log INFO "DNPM setup detected -- will start DNPM:DIP node." log INFO "DNPM setup detected (BwHC Node) -- will start BwHC node."
OVERRIDE+=" -f ./$PROJECT/modules/dnpm-node-compose.yml" OVERRIDE+=" -f ./$PROJECT/modules/dnpm-node-compose.yml"
# Set variables required for BwHC Node. ZPM_SITE is assumed to be set in /etc/bridgehead/<project>.conf # Set variables required for BwHC Node. ZPM_SITE is assumed to be set in /etc/bridgehead/<project>.conf
DNPM_APPLICATION_SECRET="$(echo \"This is a salt string to generate one consistent password for DNPM. It is not required to be secret.\" | sha1sum | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 30)"
if [ -z "${ZPM_SITE+x}" ]; then if [ -z "${ZPM_SITE+x}" ]; then
log ERROR "Mandatory variable ZPM_SITE not defined!" log ERROR "Mandatory variable ZPM_SITE not defined!"
exit 1 exit 1
fi fi
mkdir -p /var/cache/bridgehead/dnpm/ || fail_and_report 1 "Failed to create '/var/cache/bridgehead/dnpm/'. Please run sudo './bridgehead install $PROJECT' again to fix the permissions." if [ -z "${DNPM_DATA_DIR+x}" ]; then
DNPM_SYNTH_NUM=${DNPM_SYNTH_NUM:--1} log ERROR "Mandatory variable DNPM_DATA_DIR not defined!"
DNPM_MYSQL_ROOT_PASSWORD="$(generate_simple_password 'dnpm mysql')" exit 1
DNPM_AUTHUP_SECRET="$(generate_simple_password 'dnpm authup')" fi
DNPM_SYNTH_NUM=${DNPM_SYNTH_NUM:-0}
if grep -q 'traefik.http.routers.landing.rule=PathPrefix(`/landing`)' /srv/docker/bridgehead/minimal/docker-compose.override.yml 2>/dev/null; then
echo "Override of landing page url already in place"
else
echo "Adding override of landing page url"
if [ -f /srv/docker/bridgehead/minimal/docker-compose.override.yml ]; then
echo -e ' landing:\n labels:\n - "traefik.http.routers.landing.rule=PathPrefix(`/landing`)"' >> /srv/docker/bridgehead/minimal/docker-compose.override.yml
else
echo -e 'version: "3.7"\nservices:\n landing:\n labels:\n - "traefik.http.routers.landing.rule=PathPrefix(`/landing`)"' >> /srv/docker/bridgehead/minimal/docker-compose.override.yml
fi
fi
fi fi

View File

@ -65,8 +65,3 @@ services:
- "traefik.http.routers.reporter_ccp.tls=true" - "traefik.http.routers.reporter_ccp.tls=true"
- "traefik.http.middlewares.reporter_ccp_strip.stripprefix.prefixes=/ccp-reporter" - "traefik.http.middlewares.reporter_ccp_strip.stripprefix.prefixes=/ccp-reporter"
- "traefik.http.routers.reporter_ccp.middlewares=reporter_ccp_strip" - "traefik.http.routers.reporter_ccp.middlewares=reporter_ccp_strip"
focus:
environment:
EXPORTER_URL: "http://exporter:8092"
EXPORTER_API_KEY: "${EXPORTER_API_KEY}"

View File

@ -23,7 +23,3 @@ services:
POSTGRES_DB: "dashboard" POSTGRES_DB: "dashboard"
volumes: volumes:
- "/var/cache/bridgehead/ccp/dashboard-db:/var/lib/postgresql/data" - "/var/cache/bridgehead/ccp/dashboard-db:/var/lib/postgresql/data"
focus:
environment:
POSTGRES_CONNECTION_STRING: "postgresql://dashboard:${DASHBOARD_DB_PASSWORD}@dashboard-db/dashboard"

View File

@ -4,5 +4,4 @@ if [ "$ENABLE_FHIR2SQL" == true ]; then
log INFO "Dashboard setup detected -- will start Dashboard backend and FHIR2SQL service." log INFO "Dashboard setup detected -- will start Dashboard backend and FHIR2SQL service."
OVERRIDE+=" -f ./$PROJECT/modules/fhir2sql-compose.yml" OVERRIDE+=" -f ./$PROJECT/modules/fhir2sql-compose.yml"
DASHBOARD_DB_PASSWORD="$(generate_simple_password 'fhir2sql')" DASHBOARD_DB_PASSWORD="$(generate_simple_password 'fhir2sql')"
FOCUS_ENDPOINT_TYPE="blaze-and-sql"
fi fi

View File

@ -101,12 +101,5 @@ services:
forward_proxy: forward_proxy:
condition: service_healthy condition: service_healthy
ccp-patient-project-identificator:
image: docker.verbis.dkfz.de/cache/samply/ccp-patient-project-identificator
container_name: bridgehead-ccp-patient-project-identificator
environment:
MAINZELLISTE_APIKEY: ${IDMANAGER_LOCAL_PATIENTLIST_APIKEY}
SITE_NAME: ${IDMANAGEMENT_FRIENDLY_ID}
volumes: volumes:
patientlist-db-data: patientlist-db-data:

View File

@ -12,6 +12,7 @@ services:
CTS_API_KEY: ${NNGM_CTS_APIKEY} CTS_API_KEY: ${NNGM_CTS_APIKEY}
CRYPT_KEY: ${NNGM_CRYPTKEY} CRYPT_KEY: ${NNGM_CRYPTKEY}
#CTS_MAGICPL_SITE: ${SITE_ID}TODO #CTS_MAGICPL_SITE: ${SITE_ID}TODO
restart: always
labels: labels:
- "traefik.enable=true" - "traefik.enable=true"
- "traefik.http.routers.connector.rule=PathPrefix(`/nngm-connector`)" - "traefik.http.routers.connector.rule=PathPrefix(`/nngm-connector`)"

View File

@ -3,13 +3,14 @@ version: "3.7"
services: services:
obds2fhir-rest: obds2fhir-rest:
container_name: bridgehead-obds2fhir-rest container_name: bridgehead-obds2fhir-rest
image: docker.verbis.dkfz.de/samply/obds2fhir-rest:main image: docker.verbis.dkfz.de/ccp/obds2fhir-rest:main
environment: environment:
IDTYPE: BK_${IDMANAGEMENT_FRIENDLY_ID}_L-ID IDTYPE: BK_${IDMANAGEMENT_FRIENDLY_ID}_L-ID
MAINZELLISTE_APIKEY: ${IDMANAGER_LOCAL_PATIENTLIST_APIKEY} MAINZELLISTE_APIKEY: ${IDMANAGER_LOCAL_PATIENTLIST_APIKEY}
SALT: ${LOCAL_SALT} SALT: ${LOCAL_SALT}
KEEP_INTERNAL_ID: ${KEEP_INTERNAL_ID:-false} KEEP_INTERNAL_ID: ${KEEP_INTERNAL_ID:-false}
MAINZELLISTE_URL: ${PATIENTLIST_URL:-http://patientlist:8080/patientlist} MAINZELLISTE_URL: ${PATIENTLIST_URL:-http://patientlist:8080/patientlist}
restart: always
labels: labels:
- "traefik.enable=true" - "traefik.enable=true"
- "traefik.http.routers.obds2fhir-rest.rule=PathPrefix(`/obds2fhir-rest`) || PathPrefix(`/adt2fhir-rest`)" - "traefik.http.routers.obds2fhir-rest.rule=PathPrefix(`/obds2fhir-rest`) || PathPrefix(`/adt2fhir-rest`)"

View File

@ -31,7 +31,6 @@ services:
environment: environment:
DEFAULT_LANGUAGE: "${TEILER_DEFAULT_LANGUAGE}" DEFAULT_LANGUAGE: "${TEILER_DEFAULT_LANGUAGE}"
TEILER_BACKEND_URL: "https://${HOST}/ccp-teiler-backend" TEILER_BACKEND_URL: "https://${HOST}/ccp-teiler-backend"
TEILER_DASHBOARD_URL: "https://${HOST}/ccp-teiler-dashboard"
OIDC_URL: "${OIDC_URL}" OIDC_URL: "${OIDC_URL}"
OIDC_REALM: "${OIDC_REALM}" OIDC_REALM: "${OIDC_REALM}"
OIDC_CLIENT_ID: "${OIDC_PUBLIC_CLIENT_ID}" OIDC_CLIENT_ID: "${OIDC_PUBLIC_CLIENT_ID}"
@ -42,6 +41,7 @@ services:
TEILER_PROJECT: "${PROJECT}" TEILER_PROJECT: "${PROJECT}"
EXPORTER_API_KEY: "${EXPORTER_API_KEY}" EXPORTER_API_KEY: "${EXPORTER_API_KEY}"
TEILER_ORCHESTRATOR_URL: "https://${HOST}/ccp-teiler" TEILER_ORCHESTRATOR_URL: "https://${HOST}/ccp-teiler"
TEILER_DASHBOARD_HTTP_RELATIVE_PATH: "/ccp-teiler-dashboard"
TEILER_ORCHESTRATOR_HTTP_RELATIVE_PATH: "/ccp-teiler" TEILER_ORCHESTRATOR_HTTP_RELATIVE_PATH: "/ccp-teiler"
TEILER_USER: "${OIDC_USER_GROUP}" TEILER_USER: "${OIDC_USER_GROUP}"
TEILER_ADMIN: "${OIDC_ADMIN_GROUP}" TEILER_ADMIN: "${OIDC_ADMIN_GROUP}"
@ -69,10 +69,10 @@ services:
TEILER_ORCHESTRATOR_URL: "https://${HOST}/ccp-teiler" TEILER_ORCHESTRATOR_URL: "https://${HOST}/ccp-teiler"
TEILER_DASHBOARD_DE_URL: "https://${HOST}/ccp-teiler-dashboard/de" TEILER_DASHBOARD_DE_URL: "https://${HOST}/ccp-teiler-dashboard/de"
TEILER_DASHBOARD_EN_URL: "https://${HOST}/ccp-teiler-dashboard/en" TEILER_DASHBOARD_EN_URL: "https://${HOST}/ccp-teiler-dashboard/en"
CENTRAX_URL: "${CENTRAXX_URL}"
HTTP_PROXY: "http://forward_proxy:3128" HTTP_PROXY: "http://forward_proxy:3128"
ENABLE_MTBA: "${ENABLE_MTBA}" ENABLE_MTBA: "${ENABLE_MTBA}"
ENABLE_DATASHIELD: "${ENABLE_DATASHIELD}" ENABLE_DATASHIELD: "${ENABLE_DATASHIELD}"
IDMANAGER_UPLOAD_APIKEY: "${IDMANAGER_UPLOAD_APIKEY}" # Only used to check if the ID Manager is active
secrets: secrets:
- ccp.conf - ccp.conf

View File

@ -1,3 +1,2 @@
bGlicmFyeSBSZXRyaWV2ZQp1c2luZyBGSElSIHZlcnNpb24gJzQuMC4wJwppbmNsdWRlIEZISVJIZWxwZXJzIHZlcnNpb24gJzQuMC4wJwoKY29kZXN5c3RlbSBsb2luYzogJ2h0dHA6Ly9sb2luYy5vcmcnCgpjb250ZXh0IFBhdGllbnQKCgpES1RLX1NUUkFUX0dFTkRFUl9TVFJBVElGSUVSCgpES1RLX1NUUkFUX1BSSU1BUllfRElBR05PU0lTX05PX1NPUlRfU1RSQVRJRklFUgpES1RLX1NUUkFUX0FHRV9DTEFTU19TVFJBVElGSUVSCgpES1RLX1NUUkFUX0RFQ0VBU0VEX1NUUkFUSUZJRVIKCkRLVEtfU1RSQVRfRElBR05PU0lTX1NUUkFUSUZJRVIKCkRLVEtfUkVQTEFDRV9TUEVDSU1FTl9TVFJBVElGSUVSaWYgSW5Jbml0aWFsUG9wdWxhdGlvbiB0aGVuIFtTcGVjaW1lbl0gZWxzZSB7fSBhcyBMaXN0PFNwZWNpbWVuPgpES1RLX1NUUkFUX1BST0NFRFVSRV9TVFJBVElGSUVSCgpES1RLX1NUUkFUX01FRElDQVRJT05fU1RSQVRJRklFUgoKICBES1RLX1JFUExBQ0VfSElTVE9MT0dZX1NUUkFUSUZJRVIKIGlmIGhpc3RvLmNvZGUuY29kaW5nLndoZXJlKGNvZGUgPSAnNTk4NDctNCcpLmNvZGUuZmlyc3QoKSBpcyBudWxsIHRoZW4gMCBlbHNlIDEKREtUS19TVFJBVF9ERUZfSU5fSU5JVElBTF9QT1BVTEFUSU9OCnRydWU= bGlicmFyeSBSZXRyaWV2ZQp1c2luZyBGSElSIHZlcnNpb24gJzQuMC4wJwppbmNsdWRlIEZISVJIZWxwZXJzIHZlcnNpb24gJzQuMC4wJwoKY29kZXN5c3RlbSBsb2luYzogJ2h0dHA6Ly9sb2luYy5vcmcnCgpjb250ZXh0IFBhdGllbnQKCgpES1RLX1NUUkFUX0dFTkRFUl9TVFJBVElGSUVSCgpES1RLX1NUUkFUX1BSSU1BUllfRElBR05PU0lTX05PX1NPUlRfU1RSQVRJRklFUgpES1RLX1NUUkFUX0FHRV9DTEFTU19TVFJBVElGSUVSCgpES1RLX1NUUkFUX0RFQ0VBU0VEX1NUUkFUSUZJRVIKCkRLVEtfU1RSQVRfRElBR05PU0lTX1NUUkFUSUZJRVIKCkRLVEtfU1RSQVRfU1BFQ0lNRU5fU1RSQVRJRklFUgoKREtUS19TVFJBVF9QUk9DRURVUkVfU1RSQVRJRklFUgoKREtUS19TVFJBVF9NRURJQ0FUSU9OX1NUUkFUSUZJRVIKCiAgREtUS19TVFJBVF9ISVNUT0xPR1lfU1RSQVRJRklFUgpES1RLX1NUUkFUX0RFRl9JTl9JTklUSUFMX1BPUFVMQVRJT04KdHJ1ZQ==
bGlicmFyeSBSZXRyaWV2ZQp1c2luZyBGSElSIHZlcnNpb24gJzQuMC4wJwppbmNsdWRlIEZISVJIZWxwZXJzIHZlcnNpb24gJzQuMC4wJwoKY29kZXN5c3RlbSBsb2luYzogJ2h0dHA6Ly9sb2luYy5vcmcnCmNvZGVzeXN0ZW0gaWNkMTA6ICdodHRwOi8vZmhpci5kZS9Db2RlU3lzdGVtL2JmYXJtL2ljZC0xMC1nbScKY29kZXN5c3RlbSBtb3JwaDogJ3VybjpvaWQ6Mi4xNi44NDAuMS4xMTM4ODMuNi40My4xJwoKY29udGV4dCBQYXRpZW50CgoKREtUS19TVFJBVF9HRU5ERVJfU1RSQVRJRklFUgoKREtUS19TVFJBVF9QUklNQVJZX0RJQUdOT1NJU19OT19TT1JUX1NUUkFUSUZJRVIKREtUS19TVFJBVF9BR0VfQ0xBU1NfU1RSQVRJRklFUgoKREtUS19TVFJBVF9ERUNFQVNFRF9TVFJBVElGSUVSCgpES1RLX1NUUkFUX0RJQUdOT1NJU19TVFJBVElGSUVSCgpES1RLX1JFUExBQ0VfU1BFQ0lNRU5fU1RSQVRJRklFUmlmIEluSW5pdGlhbFBvcHVsYXRpb24gdGhlbiBbU3BlY2ltZW5dIGVsc2Uge30gYXMgTGlzdDxTcGVjaW1lbj4KREtUS19TVFJBVF9QUk9DRURVUkVfU1RSQVRJRklFUgoKREtUS19TVFJBVF9NRURJQ0FUSU9OX1NUUkFUSUZJRVIKCiAgREtUS19SRVBMQUNFX0hJU1RPTE9HWV9TVFJBVElGSUVSCiBpZiBoaXN0by5jb2RlLmNvZGluZy53aGVyZShjb2RlID0gJzU5ODQ3LTQnKS5jb2RlLmZpcnN0KCkgaXMgbnVsbCB0aGVuIDAgZWxzZSAxCkRLVEtfU1RSQVRfREVGX0lOX0lOSVRJQUxfUE9QVUxBVElPTihleGlzdHMgW0NvbmRpdGlvbjogQ29kZSAnQzYxJyBmcm9tIGljZDEwXSkgYW5kIAooKGV4aXN0cyBmcm9tIFtPYnNlcnZhdGlvbjogQ29kZSAnNTk4NDctNCcgZnJvbSBsb2luY10gTwp3aGVyZSBPLnZhbHVlLmNvZGluZy5jb2RlIGNvbnRhaW5zICc4MTQwLzMnKSBvciAKKGV4aXN0cyBmcm9tIFtPYnNlcnZhdGlvbjogQ29kZSAnNTk4NDctNCcgZnJvbSBsb2luY10gTwp3aGVyZSBPLnZhbHVlLmNvZGluZy5jb2RlIGNvbnRhaW5zICc4MTQ3LzMnKSBvciAKKGV4aXN0cyBmcm9tIFtPYnNlcnZhdGlvbjogQ29kZSAnNTk4NDctNCcgZnJvbSBsb2luY10gTwp3aGVyZSBPLnZhbHVlLmNvZGluZy5jb2RlIGNvbnRhaW5zICc4NDgwLzMnKSBvciAKKGV4aXN0cyBmcm9tIFtPYnNlcnZhdGlvbjogQ29kZSAnNTk4NDctNCcgZnJvbSBsb2luY10gTwp3aGVyZSBPLnZhbHVlLmNvZGluZy5jb2RlIGNvbnRhaW5zICc4NTAwLzMnKSk= bGlicmFyeSBSZXRyaWV2ZQp1c2luZyBGSElSIHZlcnNpb24gJzQuMC4wJwppbmNsdWRlIEZISVJIZWxwZXJzIHZlcnNpb24gJzQuMC4wJwoKY29kZXN5c3RlbSBsb2luYzogJ2h0dHA6Ly9sb2luYy5vcmcnCmNvZGVzeXN0ZW0gaWNkMTA6ICdodHRwOi8vZmhpci5kZS9Db2RlU3lzdGVtL2JmYXJtL2ljZC0xMC1nbScKY29kZXN5c3RlbSBtb3JwaDogJ3VybjpvaWQ6Mi4xNi44NDAuMS4xMTM4ODMuNi40My4xJwoKY29udGV4dCBQYXRpZW50CgoKREtUS19TVFJBVF9HRU5ERVJfU1RSQVRJRklFUgoKREtUS19TVFJBVF9QUklNQVJZX0RJQUdOT1NJU19OT19TT1JUX1NUUkFUSUZJRVIKREtUS19TVFJBVF9BR0VfQ0xBU1NfU1RSQVRJRklFUgoKREtUS19TVFJBVF9ERUNFQVNFRF9TVFJBVElGSUVSCgpES1RLX1NUUkFUX0RJQUdOT1NJU19TVFJBVElGSUVSCgpES1RLX1NUUkFUX1NQRUNJTUVOX1NUUkFUSUZJRVIKCkRLVEtfU1RSQVRfUFJPQ0VEVVJFX1NUUkFUSUZJRVIKCkRLVEtfU1RSQVRfTUVESUNBVElPTl9TVFJBVElGSUVSCgogIERLVEtfU1RSQVRfSElTVE9MT0dZX1NUUkFUSUZJRVIKREtUS19TVFJBVF9ERUZfSU5fSU5JVElBTF9QT1BVTEFUSU9OKGV4aXN0cyBbQ29uZGl0aW9uOiBDb2RlICdDNjEnIGZyb20gaWNkMTBdKSBhbmQgCigoZXhpc3RzIGZyb20gW09ic2VydmF0aW9uOiBDb2RlICc1OTg0Ny00JyBmcm9tIGxvaW5jXSBPCndoZXJlIE8udmFsdWUuY29kaW5nLmNvZGUgY29udGFpbnMgJzgxNDAvMycpIG9yIAooZXhpc3RzIGZyb20gW09ic2VydmF0aW9uOiBDb2RlICc1OTg0Ny00JyBmcm9tIGxvaW5jXSBPCndoZXJlIE8udmFsdWUuY29kaW5nLmNvZGUgY29udGFpbnMgJzgxNDcvMycpIG9yIAooZXhpc3RzIGZyb20gW09ic2VydmF0aW9uOiBDb2RlICc1OTg0Ny00JyBmcm9tIGxvaW5jXSBPCndoZXJlIE8udmFsdWUuY29kaW5nLmNvZGUgY29udGFpbnMgJzg0ODAvMycpIG9yIAooZXhpc3RzIGZyb20gW09ic2VydmF0aW9uOiBDb2RlICc1OTg0Ny00JyBmcm9tIGxvaW5jXSBPCndoZXJlIE8udmFsdWUuY29kaW5nLmNvZGUgY29udGFpbnMgJzg1MDAvMycpKQ==
ORGANOID_DASHBOARD_PUBLIC

View File

@ -29,12 +29,4 @@ done
idManagementSetup idManagementSetup
mtbaSetup mtbaSetup
obds2fhirRestSetup obds2fhirRestSetup
blazeSecondarySetup blazeSecondarySetup
for module in modules/*.sh
do
log DEBUG "sourcing $module"
source $module
done
transfairSetup

View File

@ -2,7 +2,7 @@ version: "3.7"
services: services:
blaze: blaze:
image: docker.verbis.dkfz.de/cache/samply/blaze:${BLAZE_TAG} image: docker.verbis.dkfz.de/cache/samply/blaze:0.28
container_name: bridgehead-dhki-blaze container_name: bridgehead-dhki-blaze
environment: environment:
BASE_URL: "http://bridgehead-dhki-blaze:8080" BASE_URL: "http://bridgehead-dhki-blaze:8080"
@ -33,7 +33,7 @@ services:
EPSILON: 0.28 EPSILON: 0.28
QUERIES_TO_CACHE: '/queries_to_cache.conf' QUERIES_TO_CACHE: '/queries_to_cache.conf'
volumes: volumes:
- /srv/docker/bridgehead/dhki/queries_to_cache.conf:/queries_to_cache.conf:ro - /srv/docker/bridgehead/dhki/queries_to_cache.conf:/queries_to_cache.conf
depends_on: depends_on:
- "beam-proxy" - "beam-proxy"
- "blaze" - "blaze"

View File

@ -17,12 +17,4 @@ do
done done
idManagementSetup idManagementSetup
obds2fhirRestSetup obds2fhirRestSetup
for module in modules/*.sh
do
log DEBUG "sourcing $module"
source $module
done
transfairSetup

View File

@ -1,42 +0,0 @@
## How to Change Config Access Token
### 1. Generate a New Access Token
1. Go to your Git configuration repository provider, it might be either [git.verbis.dkfz.de](https://git.verbis.dkfz.de) or [gitlab.bbmri-eric.eu](https://gitlab.bbmri-eric.eu).
2. Navigate to the configuration repository for your site.
3. Go to **Settings → Access Tokens** to check if your Access Token is valid or expired.
- **If expired**, create a new Access Token.
4. Configure the new Access Token with the following settings:
- **Expiration date**: One year from today, minus one day.
- **Role**: Developer.
- **Scope**: Only `read_repository`.
5. Save the newly generated Access Token in a secure location.
---
### 2. Replace the Old Access Token
1. Navigate to `/etc/bridgehead` in your system.
2. Run the following command to retrieve the current Git remote URL:
```bash
git remote get-url origin
```
Example output:
```
https://name40dkfz-heidelberg.de:<old_access_token>@git.verbis.dkfz.de/bbmri-bridgehead-configs/test.git
```
3. Replace `<old_access_token>` with your new Access Token in the URL.
4. Set the updated URL using the following command:
```bash
git remote set-url origin https://name40dkfz-heidelberg.de:<new_access_token>@git.verbis.dkfz.de/bbmri-bridgehead-configs/test.git
```
5. Start the Bridgehead update service by running:
```bash
systemctl start bridgehead-update@<project>
```
6. View the output to ensure the update process is successful:
```bash
journalctl -u bridgehead-update@<project> -f
```

View File

@ -2,14 +2,13 @@ version: "3.7"
services: services:
blaze: blaze:
image: docker.verbis.dkfz.de/cache/samply/blaze:${BLAZE_TAG} image: docker.verbis.dkfz.de/cache/samply/blaze:0.28
container_name: bridgehead-itcc-blaze container_name: bridgehead-itcc-blaze
environment: environment:
BASE_URL: "http://bridgehead-itcc-blaze:8080" BASE_URL: "http://bridgehead-itcc-blaze:8080"
JAVA_TOOL_OPTIONS: "-Xmx${BLAZE_MEMORY_CAP:-4096}m" JAVA_TOOL_OPTIONS: "-Xmx${BLAZE_MEMORY_CAP:-4096}m"
DB_RESOURCE_CACHE_SIZE: ${BLAZE_RESOURCE_CACHE_CAP:-2500000} DB_RESOURCE_CACHE_SIZE: ${BLAZE_RESOURCE_CACHE_CAP:-2500000}
DB_BLOCK_CACHE_SIZE: ${BLAZE_MEMORY_CAP} DB_BLOCK_CACHE_SIZE: $BLAZE_MEMORY_CAP
CQL_EXPR_CACHE_SIZE: ${BLAZE_CQL_CACHE_CAP:-32}
ENFORCE_REFERENTIAL_INTEGRITY: "false" ENFORCE_REFERENTIAL_INTEGRITY: "false"
volumes: volumes:
- "blaze-data:/app/data" - "blaze-data:/app/data"
@ -32,10 +31,6 @@ services:
BEAM_PROXY_URL: http://beam-proxy:8081 BEAM_PROXY_URL: http://beam-proxy:8081
RETRY_COUNT: ${FOCUS_RETRY_COUNT} RETRY_COUNT: ${FOCUS_RETRY_COUNT}
EPSILON: 0.28 EPSILON: 0.28
QUERIES_TO_CACHE: '/queries_to_cache.conf'
ENDPOINT_TYPE: ${FOCUS_ENDPOINT_TYPE:-blaze}
volumes:
- /srv/docker/bridgehead/itcc/queries_to_cache.conf:/queries_to_cache.conf:ro
depends_on: depends_on:
- "beam-proxy" - "beam-proxy"
- "blaze" - "blaze"

View File

@ -1,2 +0,0 @@
bGlicmFyeSBSZXRyaWV2ZQp1c2luZyBGSElSIHZlcnNpb24gJzQuMC4wJwppbmNsdWRlIEZISVJIZWxwZXJzIHZlcnNpb24gJzQuMC4wJwpjb2Rlc3lzdGVtIFNhbXBsZU1hdGVyaWFsVHlwZTogJ2h0dHBzOi8vZmhpci5iYm1yaS5kZS9Db2RlU3lzdGVtL1NhbXBsZU1hdGVyaWFsVHlwZScKCmNvZGVzeXN0ZW0gbG9pbmM6ICdodHRwOi8vbG9pbmMub3JnJwoKY29udGV4dCBQYXRpZW50CkRLVEtfU1RSQVRfR0VOREVSX1NUUkFUSUZJRVIKICBES1RLX1NUUkFUX0RJQUdOT1NJU19TVFJBVElGSUVSCiAgSVRDQ19TVFJBVF9BR0VfQ0xBU1NfU1RSQVRJRklFUgogIERLVEtfU1RSQVRfREVGX0lOX0lOSVRJQUxfUE9QVUxBVElPTgp0cnVl
bGlicmFyeSBSZXRyaWV2ZQp1c2luZyBGSElSIHZlcnNpb24gJzQuMC4wJwppbmNsdWRlIEZISVJIZWxwZXJzIHZlcnNpb24gJzQuMC4wJwpjb2Rlc3lzdGVtIFNhbXBsZU1hdGVyaWFsVHlwZTogJ2h0dHBzOi8vZmhpci5iYm1yaS5kZS9Db2RlU3lzdGVtL1NhbXBsZU1hdGVyaWFsVHlwZScKCmNvZGVzeXN0ZW0gbG9pbmM6ICdodHRwOi8vbG9pbmMub3JnJwpjb2Rlc3lzdGVtIG1vbGVjdWxhck1hcmtlcjogJ2h0dHA6Ly93d3cuZ2VuZW5hbWVzLm9yZycKCmNvbnRleHQgUGF0aWVudApES1RLX1NUUkFUX0dFTkRFUl9TVFJBVElGSUVSCiAgREtUS19TVFJBVF9ESUFHTk9TSVNfU1RSQVRJRklFUgogIElUQ0NfU1RSQVRfQUdFX0NMQVNTX1NUUkFUSUZJRVIKICBES1RLX1NUUkFUX0RFRl9JTl9JTklUSUFMX1BPUFVMQVRJT04KKGV4aXN0cyBmcm9tIFtPYnNlcnZhdGlvbjogQ29kZSAnNjk1NDgtNicgZnJvbSBsb2luY10gTwp3aGVyZSBPLmNvbXBvbmVudC53aGVyZShjb2RlLmNvZGluZyBjb250YWlucyBDb2RlICc0ODAxOC02JyBmcm9tIGxvaW5jKS52YWx1ZS5jb2RpbmcgY29udGFpbnMgQ29kZSAnQlJBRicgZnJvbSBtb2xlY3VsYXJNYXJrZXIp

View File

@ -1,12 +1,8 @@
version: "3.7" version: "3.7"
services: services:
landing:
deploy:
replicas: 0 #deactivate landing page
blaze: blaze:
image: docker.verbis.dkfz.de/cache/samply/blaze:${BLAZE_TAG} image: docker.verbis.dkfz.de/cache/samply/blaze:0.28
container_name: bridgehead-kr-blaze container_name: bridgehead-kr-blaze
environment: environment:
BASE_URL: "http://bridgehead-kr-blaze:8080" BASE_URL: "http://bridgehead-kr-blaze:8080"

View File

@ -1,8 +1,6 @@
version: "3.7" version: "3.7"
services: services:
landing: landing:
deploy:
replicas: 1 #reactivate if lens is in use
container_name: lens_federated-search container_name: lens_federated-search
image: docker.verbis.dkfz.de/ccp/lens:${SITE_ID} image: docker.verbis.dkfz.de/ccp/lens:${SITE_ID}
labels: labels:

View File

@ -10,6 +10,7 @@ services:
SALT: ${LOCAL_SALT} SALT: ${LOCAL_SALT}
KEEP_INTERNAL_ID: ${KEEP_INTERNAL_ID:-false} KEEP_INTERNAL_ID: ${KEEP_INTERNAL_ID:-false}
MAINZELLISTE_URL: ${PATIENTLIST_URL:-http://patientlist:8080/patientlist} MAINZELLISTE_URL: ${PATIENTLIST_URL:-http://patientlist:8080/patientlist}
restart: always
labels: labels:
- "traefik.enable=true" - "traefik.enable=true"
- "traefik.http.routers.obds2fhir-rest.rule=PathPrefix(`/obds2fhir-rest`) || PathPrefix(`/adt2fhir-rest`)" - "traefik.http.routers.obds2fhir-rest.rule=PathPrefix(`/obds2fhir-rest`) || PathPrefix(`/adt2fhir-rest`)"

View File

@ -31,7 +31,6 @@ services:
environment: environment:
DEFAULT_LANGUAGE: "${TEILER_DEFAULT_LANGUAGE}" DEFAULT_LANGUAGE: "${TEILER_DEFAULT_LANGUAGE}"
TEILER_BACKEND_URL: "https://${HOST}/ccp-teiler-backend" TEILER_BACKEND_URL: "https://${HOST}/ccp-teiler-backend"
TEILER_DASHBOARD_URL: "https://${HOST}/ccp-teiler-dashboard"
OIDC_URL: "${OIDC_URL}" OIDC_URL: "${OIDC_URL}"
OIDC_REALM: "${OIDC_REALM}" OIDC_REALM: "${OIDC_REALM}"
OIDC_CLIENT_ID: "${OIDC_PUBLIC_CLIENT_ID}" OIDC_CLIENT_ID: "${OIDC_PUBLIC_CLIENT_ID}"
@ -42,6 +41,7 @@ services:
TEILER_PROJECT: "${PROJECT}" TEILER_PROJECT: "${PROJECT}"
EXPORTER_API_KEY: "${EXPORTER_API_KEY}" EXPORTER_API_KEY: "${EXPORTER_API_KEY}"
TEILER_ORCHESTRATOR_URL: "https://${HOST}/ccp-teiler" TEILER_ORCHESTRATOR_URL: "https://${HOST}/ccp-teiler"
TEILER_DASHBOARD_HTTP_RELATIVE_PATH: "/ccp-teiler-dashboard"
TEILER_ORCHESTRATOR_HTTP_RELATIVE_PATH: "/ccp-teiler" TEILER_ORCHESTRATOR_HTTP_RELATIVE_PATH: "/ccp-teiler"
TEILER_USER: "${OIDC_USER_GROUP}" TEILER_USER: "${OIDC_USER_GROUP}"
TEILER_ADMIN: "${OIDC_ADMIN_GROUP}" TEILER_ADMIN: "${OIDC_ADMIN_GROUP}"
@ -69,6 +69,7 @@ services:
TEILER_ORCHESTRATOR_URL: "https://${HOST}/ccp-teiler" TEILER_ORCHESTRATOR_URL: "https://${HOST}/ccp-teiler"
TEILER_DASHBOARD_DE_URL: "https://${HOST}/ccp-teiler-dashboard/de" TEILER_DASHBOARD_DE_URL: "https://${HOST}/ccp-teiler-dashboard/de"
TEILER_DASHBOARD_EN_URL: "https://${HOST}/ccp-teiler-dashboard/en" TEILER_DASHBOARD_EN_URL: "https://${HOST}/ccp-teiler-dashboard/en"
CENTRAX_URL: "${CENTRAXX_URL}"
HTTP_PROXY: "http://forward_proxy:3128" HTTP_PROXY: "http://forward_proxy:3128"
ENABLE_MTBA: "${ENABLE_MTBA}" ENABLE_MTBA: "${ENABLE_MTBA}"
ENABLE_DATASHIELD: "${ENABLE_DATASHIELD}" ENABLE_DATASHIELD: "${ENABLE_DATASHIELD}"

View File

@ -116,7 +116,7 @@ assertVarsNotEmpty() {
MISSING_VARS="" MISSING_VARS=""
for VAR in $@; do for VAR in $@; do
if [ -z "${!VAR}" ]; then if [ -z "${!VAR}" ]; then
MISSING_VARS+="$VAR " MISSING_VARS+="$VAR "
fi fi
done done
@ -171,10 +171,8 @@ optimizeBlazeMemoryUsage() {
if [ $available_system_memory_chunks -eq 0 ]; then if [ $available_system_memory_chunks -eq 0 ]; then
log WARN "Only ${BLAZE_MEMORY_CAP} system memory available for Blaze. If your Blaze stores more than 128000 fhir ressources it will run significally slower." log WARN "Only ${BLAZE_MEMORY_CAP} system memory available for Blaze. If your Blaze stores more than 128000 fhir ressources it will run significally slower."
export BLAZE_RESOURCE_CACHE_CAP=128000; export BLAZE_RESOURCE_CACHE_CAP=128000;
export BLAZE_CQL_CACHE_CAP=32;
else else
export BLAZE_RESOURCE_CACHE_CAP=$((available_system_memory_chunks * 312500)) export BLAZE_RESOURCE_CACHE_CAP=$((available_system_memory_chunks * 312500))
export BLAZE_CQL_CACHE_CAP=$((($system_memory_in_mb/4)/16));
fi fi
fi fi
} }
@ -313,82 +311,15 @@ function sync_secrets() {
-e ALL_PROXY=$HTTPS_PROXY_FULL_URL \ -e ALL_PROXY=$HTTPS_PROXY_FULL_URL \
-e PROXY_ID=$PROXY_ID \ -e PROXY_ID=$PROXY_ID \
-e BROKER_URL=$BROKER_URL \ -e BROKER_URL=$BROKER_URL \
-e OIDC_PROVIDER=secret-sync-central.central-secret-sync.$BROKER_ID \ -e OIDC_PROVIDER=secret-sync-central.oidc-client-enrollment.$BROKER_ID \
-e SECRET_DEFINITIONS=$secret_sync_args \ -e SECRET_DEFINITIONS=$secret_sync_args \
docker.verbis.dkfz.de/cache/samply/secret-sync-local:latest docker.verbis.dkfz.de/cache/samply/secret-sync-local:latest
set -a # Export variables as environment variables set -a # Export variables as environment variables
source /var/cache/bridgehead/secrets/oidc source /var/cache/bridgehead/secrets/*
set +a # Export variables in the regular way set +a # Export variables in the regular way
} }
function secret_sync_gitlab_token() {
# Map the origin of the git repository /etc/bridgehead to the prefix recognized by Secret Sync
local gitlab
case "$(git -C /etc/bridgehead remote get-url origin)" in
*git.verbis.dkfz.de*) gitlab=verbis;;
*gitlab.bbmri-eric.eu*) gitlab=bbmri;;
*)
log "WARN" "Not running Secret Sync because the git repository /etc/bridgehead has unknown origin"
return
;;
esac
if [ "$PROJECT" == "bbmri" ]; then
# If the project is BBMRI, use the BBMRI-ERIC broker and not the GBN broker
proxy_id=$ERIC_PROXY_ID
broker_url=$ERIC_BROKER_URL
broker_id=$ERIC_BROKER_ID
root_crt_file="/srv/docker/bridgehead/bbmri/modules/${ERIC_ROOT_CERT}.root.crt.pem"
else
proxy_id=$PROXY_ID
broker_url=$BROKER_URL
broker_id=$BROKER_ID
root_crt_file="/srv/docker/bridgehead/$PROJECT/root.crt.pem"
fi
# Create a temporary directory for Secret Sync that is valid per boot
secret_sync_tempdir="/tmp/bridgehead/secret-sync.boot-$(cat /proc/sys/kernel/random/boot_id)"
mkdir -p $secret_sync_tempdir
# Use Secret Sync to validate the GitLab token in $secret_sync_tempdir/cache.
# If it is missing or expired, Secret Sync will create a new token and write it to the file.
# The git credential helper reads the token from the file during git pull.
log "INFO" "Running Secret Sync for the GitLab token (gitlab=$gitlab)"
docker pull docker.verbis.dkfz.de/cache/samply/secret-sync-local:latest # make sure we have the latest image
docker run --rm \
-v $PRIVATEKEYFILENAME:/run/secrets/privkey.pem:ro \
-v $root_crt_file:/run/secrets/root.crt.pem:ro \
-v /etc/bridgehead/trusted-ca-certs:/conf/trusted-ca-certs:ro \
-v $secret_sync_tempdir:/secret-sync/ \
-e CACHE_PATH=/secret-sync/gitlab-token \
-e TLS_CA_CERTIFICATES_DIR=/conf/trusted-ca-certs \
-e NO_PROXY=localhost,127.0.0.1 \
-e ALL_PROXY=$HTTPS_PROXY_FULL_URL \
-e PROXY_ID=$proxy_id \
-e BROKER_URL=$broker_url \
-e GITLAB_PROJECT_ACCESS_TOKEN_PROVIDER=secret-sync-central.central-secret-sync.$broker_id \
-e SECRET_DEFINITIONS=GitLabProjectAccessToken:BRIDGEHEAD_CONFIG_REPO_TOKEN:$gitlab \
docker.verbis.dkfz.de/cache/samply/secret-sync-local:latest
if [ $? -eq 0 ]; then
log "INFO" "Secret Sync was successful"
# In the past we used to hardcode tokens into the repository URL. We have to remove those now for the git credential helper to become effective.
CLEAN_REPO="$(git -C /etc/bridgehead remote get-url origin | sed -E 's|https://[^@]+@|https://|')"
git -C /etc/bridgehead remote set-url origin "$CLEAN_REPO"
# Set the git credential helper
git -C /etc/bridgehead config credential.helper /srv/docker/bridgehead/lib/gitlab-token-helper.sh
else
log "WARN" "Secret Sync failed"
# Remove the git credential helper
git -C /etc/bridgehead config --unset credential.helper
fi
# In the past the git credential helper was also set for /srv/docker/bridgehead but never used.
# Let's remove it to avoid confusion. This line can be removed at some point the future when we
# believe that it was removed on all/most production servers.
git -C /srv/docker/bridgehead config --unset credential.helper
}
capitalize_first_letter() { capitalize_first_letter() {
input="$1" input="$1"
capitalized="$(tr '[:lower:]' '[:upper:]' <<< ${input:0:1})${input:1}" capitalized="$(tr '[:lower:]' '[:upper:]' <<< ${input:0:1})${input:1}"

View File

@ -1,11 +0,0 @@
#!/bin/bash
[ "$1" = "get" ] || exit
source "/tmp/bridgehead/secret-sync.boot-$(cat /proc/sys/kernel/random/boot_id)/gitlab-token"
# Any non-empty username works, only the token matters
cat << EOF
username=bk
password=$BRIDGEHEAD_CONFIG_REPO_TOKEN
EOF

41
lib/gitpassword.sh Executable file
View File

@ -0,0 +1,41 @@
#!/bin/bash
if [ "$1" != "get" ]; then
echo "Usage: $0 get"
exit 1
fi
baseDir() {
# see https://stackoverflow.com/questions/59895
SOURCE=${BASH_SOURCE[0]}
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR=$( cd -P "$( dirname "$SOURCE" )" >/dev/null 2>&1 && pwd )
SOURCE=$(readlink "$SOURCE")
[[ $SOURCE != /* ]] && SOURCE=$DIR/$SOURCE # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
DIR=$( cd -P "$( dirname "$SOURCE" )/.." >/dev/null 2>&1 && pwd )
echo $DIR
}
BASE=$(baseDir)
cd $BASE
source lib/functions.sh
assertVarsNotEmpty SITE_ID || fail_and_report 1 "gitpassword.sh failed: SITE_ID is empty."
PARAMS="$(cat)"
GITHOST=$(echo "$PARAMS" | grep "^host=" | sed 's/host=\(.*\)/\1/g')
fetchVarsFromVault GIT_PASSWORD
if [ -z "${GIT_PASSWORD}" ]; then
fail_and_report 1 "gitpassword.sh failed: Git password not found."
fi
cat <<EOF
protocol=https
host=$GITHOST
username=bk-${SITE_ID}
password=${GIT_PASSWORD}
EOF

View File

@ -41,14 +41,6 @@ if [ ! -z "$NNGM_CTS_APIKEY" ] && [ -z "$NNGM_AUTH" ]; then
add_basic_auth_user "nngm" $generated_passwd "NNGM_AUTH" $PROJECT add_basic_auth_user "nngm" $generated_passwd "NNGM_AUTH" $PROJECT
fi fi
if [ -z "$TRANSFAIR_AUTH" ]; then
if [[ -n "$TTP_URL" || -n "$EXCHANGE_ID_SYSTEM" ]]; then
log "INFO" "Now generating basic auth user for transfair API (see adduser in bridgehead for more information). "
generated_passwd="$(cat /proc/sys/kernel/random/uuid | sed 's/[-]//g' | head -c 32)"
add_basic_auth_user "transfair" $generated_passwd "TRANSFAIR_AUTH" $PROJECT
fi
fi
log "INFO" "Registering system units for bridgehead and bridgehead-update" log "INFO" "Registering system units for bridgehead and bridgehead-update"
cp -v \ cp -v \
lib/systemd/bridgehead\@.service \ lib/systemd/bridgehead\@.service \

View File

@ -55,9 +55,6 @@ case "$PROJECT" in
cce) cce)
site_configuration_repository_middle="git.verbis.dkfz.de/cce-sites/" site_configuration_repository_middle="git.verbis.dkfz.de/cce-sites/"
;; ;;
pscc)
site_configuration_repository_middle="git.verbis.dkfz.de/pscc-sites/"
;;
itcc) itcc)
site_configuration_repository_middle="git.verbis.dkfz.de/itcc-sites/" site_configuration_repository_middle="git.verbis.dkfz.de/itcc-sites/"
;; ;;

View File

@ -19,7 +19,7 @@ fi
hc_send log "Checking for bridgehead updates ..." hc_send log "Checking for bridgehead updates ..."
CONFFILE=/etc/bridgehead/$PROJECT.conf CONFFILE=/etc/bridgehead/$1.conf
if [ ! -e $CONFFILE ]; then if [ ! -e $CONFFILE ]; then
fail_and_report 1 "Configuration file $CONFFILE not found." fail_and_report 1 "Configuration file $CONFFILE not found."
@ -33,7 +33,7 @@ export SITE_ID
checkOwner /srv/docker/bridgehead bridgehead || fail_and_report 1 "Update failed: Wrong permissions in /srv/docker/bridgehead" checkOwner /srv/docker/bridgehead bridgehead || fail_and_report 1 "Update failed: Wrong permissions in /srv/docker/bridgehead"
checkOwner /etc/bridgehead bridgehead || fail_and_report 1 "Update failed: Wrong permissions in /etc/bridgehead" checkOwner /etc/bridgehead bridgehead || fail_and_report 1 "Update failed: Wrong permissions in /etc/bridgehead"
secret_sync_gitlab_token CREDHELPER="/srv/docker/bridgehead/lib/gitpassword.sh"
CHANGES="" CHANGES=""
@ -45,6 +45,10 @@ for DIR in /etc/bridgehead $(pwd); do
if [ -n "$OUT" ]; then if [ -n "$OUT" ]; then
report_error log "The working directory $DIR is modified. Changed files: $OUT" report_error log "The working directory $DIR is modified. Changed files: $OUT"
fi fi
if [ "$(git -C $DIR config --get credential.helper)" != "$CREDHELPER" ]; then
log "INFO" "Configuring repo to use bridgehead git credential helper."
git -C $DIR config credential.helper "$CREDHELPER"
fi
old_git_hash="$(git -C $DIR rev-parse --verify HEAD)" old_git_hash="$(git -C $DIR rev-parse --verify HEAD)"
if [ -z "$HTTPS_PROXY_FULL_URL" ]; then if [ -z "$HTTPS_PROXY_FULL_URL" ]; then
log "INFO" "Git is using no proxy!" log "INFO" "Git is using no proxy!"
@ -54,8 +58,7 @@ for DIR in /etc/bridgehead $(pwd); do
OUT=$(retry 5 git -c http.proxy=$HTTPS_PROXY_FULL_URL -c https.proxy=$HTTPS_PROXY_FULL_URL -C $DIR fetch 2>&1 && retry 5 git -c http.proxy=$HTTPS_PROXY_FULL_URL -c https.proxy=$HTTPS_PROXY_FULL_URL -C $DIR pull 2>&1) OUT=$(retry 5 git -c http.proxy=$HTTPS_PROXY_FULL_URL -c https.proxy=$HTTPS_PROXY_FULL_URL -C $DIR fetch 2>&1 && retry 5 git -c http.proxy=$HTTPS_PROXY_FULL_URL -c https.proxy=$HTTPS_PROXY_FULL_URL -C $DIR pull 2>&1)
fi fi
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
OUT_SAN=$(echo $OUT | sed -E 's|://[^:]+:[^@]+@|://credentials@|g') report_error log "Unable to update git $DIR: $OUT"
report_error log "Unable to update git $DIR: $OUT_SAN"
fi fi
new_git_hash="$(git -C $DIR rev-parse --verify HEAD)" new_git_hash="$(git -C $DIR rev-parse --verify HEAD)"

View File

@ -16,7 +16,7 @@ services:
- --entrypoints.web.http.redirections.entrypoint.scheme=https - --entrypoints.web.http.redirections.entrypoint.scheme=https
labels: labels:
- "traefik.enable=true" - "traefik.enable=true"
- "traefik.http.routers.dashboard.rule=PathPrefix(`/dashboard/`)" - "traefik.http.routers.dashboard.rule=PathPrefix(`/api`) || PathPrefix(`/dashboard/`)"
- "traefik.http.routers.dashboard.entrypoints=websecure" - "traefik.http.routers.dashboard.entrypoints=websecure"
- "traefik.http.routers.dashboard.service=api@internal" - "traefik.http.routers.dashboard.service=api@internal"
- "traefik.http.routers.dashboard.tls=true" - "traefik.http.routers.dashboard.tls=true"

View File

@ -1,142 +0,0 @@
{
"sites": [
{
"id": "UKFR",
"name": "Freiburg",
"virtualhost": "ukfr.dnpm.de",
"beamconnect": "dnpm-connect.dnpm-bridge.broker.ccp-it.dktk.dkfz.de"
},
{
"id": "UKHD",
"name": "Heidelberg",
"virtualhost": "ukhd.dnpm.de",
"beamconnect": "dnpm-connect.dnpm-bridge.broker.ccp-it.dktk.dkfz.de"
},
{
"id": "UKT",
"name": "Tübingen",
"virtualhost": "ukt.dnpm.de",
"beamconnect": "dnpm-connect.dnpm-bridge.broker.ccp-it.dktk.dkfz.de"
},
{
"id": "UKU",
"name": "Ulm",
"virtualhost": "uku.dnpm.de",
"beamconnect": "dnpm-connect.dnpm-bridge.broker.ccp-it.dktk.dkfz.de"
},
{
"id": "UM",
"name": "Mainz",
"virtualhost": "um.dnpm.de",
"beamconnect": "dnpm-connect.dnpm-bridge.broker.ccp-it.dktk.dkfz.de"
},
{
"id": "UKMR",
"name": "Marburg",
"virtualhost": "ukmr.dnpm.de",
"beamconnect": "dnpm-connect.dnpm-bridge.broker.ccp-it.dktk.dkfz.de"
},
{
"id": "UKE",
"name": "Hamburg",
"virtualhost": "uke.dnpm.de",
"beamconnect": "dnpm-connect.dnpm-bridge.broker.ccp-it.dktk.dkfz.de"
},
{
"id": "UKA",
"name": "Aachen",
"virtualhost": "uka.dnpm.de",
"beamconnect": "dnpm-connect.dnpm-bridge.broker.ccp-it.dktk.dkfz.de"
},
{
"id": "Charite",
"name": "Berlin",
"virtualhost": "charite.dnpm.de",
"beamconnect": "dnpm-connect.berlin-test.broker.ccp-it.dktk.dkfz.de"
},
{
"id": "MRI",
"name": "Muenchen-tum",
"virtualhost": "mri.dnpm.de",
"beamconnect": "dnpm-connect.muenchen-tum.broker.ccp-it.dktk.dkfz.de"
},
{
"id": "KUM",
"name": "Muenchen-lmu",
"virtualhost": "kum.dnpm.de",
"beamconnect": "dnpm-connect.muenchen-lmu.broker.ccp-it.dktk.dkfz.de"
},
{
"id": "MHH",
"name": "Hannover",
"virtualhost": "mhh.dnpm.de",
"beamconnect": "dnpm-connect.hannover.broker.ccp-it.dktk.dkfz.de"
},
{
"id": "UKDD",
"name": "dresden-dnpm",
"virtualhost": "ukdd.dnpm.de",
"beamconnect": "dnpm-connect.dresden-dnpm.broker.ccp-it.dktk.dkfz.de"
},
{
"id": "UKB",
"name": "Bonn",
"virtualhost": "ukb.dnpm.de",
"beamconnect": "dnpm-connect.bonn-dnpm.broker.ccp-it.dktk.dkfz.de"
},
{
"id": "UKD",
"name": "Duesseldorf",
"virtualhost": "ukd.dnpm.de",
"beamconnect": "dnpm-connect.duesseldorf-dnpm.broker.ccp-it.dktk.dkfz.de"
},
{
"id": "UKK",
"name": "Koeln",
"virtualhost": "ukk.dnpm.de",
"beamconnect": "dnpm-connect.dnpm-bridge.broker.ccp-it.dktk.dkfz.de"
},
{
"id": "UME",
"name": "Essen",
"virtualhost": "ume.dnpm.de",
"beamconnect": "dnpm-connect.essen.broker.ccp-it.dktk.dkfz.de"
},
{
"id": "UKM",
"name": "Muenster",
"virtualhost": "ukm.dnpm.de",
"beamconnect": "dnpm-connect.muenster-dnpm.broker.ccp-it.dktk.dkfz.de"
},
{
"id": "UKF",
"name": "Frankfurt",
"virtualhost": "ukf.dnpm.de",
"beamconnect": "dnpm-connect.frankfurt.broker.ccp-it.dktk.dkfz.de"
},
{
"id": "UMG",
"name": "Goettingen",
"virtualhost": "umg.dnpm.de",
"beamconnect": "dnpm-connect.goettingen.broker.ccp-it.dktk.dkfz.de"
},
{
"id": "UKW",
"name": "Würzburg",
"virtualhost": "ukw.dnpm.de",
"beamconnect": "dnpm-connect.wuerzburg-dnpm.broker.ccp-it.dktk.dkfz.de"
},
{
"id": "UKSH",
"name": "Schleswig-Holstein",
"virtualhost": "uksh.dnpm.de",
"beamconnect": "dnpm-connect.uksh-dnpm.broker.ccp-it.dktk.dkfz.de"
},
{
"id": "TKT",
"name": "Test",
"virtualhost": "tkt.dnpm.de",
"beamconnect": "dnpm-connect.tobias-develop.broker.ccp-it.dktk.dkfz.de"
}
]
}

View File

@ -29,7 +29,7 @@ services:
PROXY_APIKEY: ${DNPM_BEAM_SECRET_SHORT} PROXY_APIKEY: ${DNPM_BEAM_SECRET_SHORT}
APP_ID: dnpm-connect.${DNPM_PROXY_ID} APP_ID: dnpm-connect.${DNPM_PROXY_ID}
DISCOVERY_URL: "./conf/central_targets.json" DISCOVERY_URL: "./conf/central_targets.json"
LOCAL_TARGETS_FILE: "/conf/connect_targets.json" LOCAL_TARGETS_FILE: "./conf/connect_targets.json"
HTTP_PROXY: http://forward_proxy:3128 HTTP_PROXY: http://forward_proxy:3128
HTTPS_PROXY: http://forward_proxy:3128 HTTPS_PROXY: http://forward_proxy:3128
NO_PROXY: dnpm-beam-proxy,dnpm-backend, host.docker.internal${DNPM_ADDITIONAL_NO_PROXY} NO_PROXY: dnpm-beam-proxy,dnpm-backend, host.docker.internal${DNPM_ADDITIONAL_NO_PROXY}
@ -41,7 +41,7 @@ services:
volumes: volumes:
- /etc/bridgehead/trusted-ca-certs:/conf/trusted-ca-certs:ro - /etc/bridgehead/trusted-ca-certs:/conf/trusted-ca-certs:ro
- /etc/bridgehead/dnpm/local_targets.json:/conf/connect_targets.json:ro - /etc/bridgehead/dnpm/local_targets.json:/conf/connect_targets.json:ro
- /srv/docker/bridgehead/minimal/modules/dnpm-central-targets.json:/conf/central_targets.json:ro - /etc/bridgehead/dnpm/central_targets.json:/conf/central_targets.json:ro
labels: labels:
- "traefik.enable=true" - "traefik.enable=true"
- "traefik.http.routers.dnpm-connect.rule=PathPrefix(`/dnpm-connect`)" - "traefik.http.routers.dnpm-connect.rule=PathPrefix(`/dnpm-connect`)"

View File

@ -1,99 +1,34 @@
version: "3.7" version: "3.7"
services: services:
dnpm-mysql:
image: mysql:9
healthcheck:
test: [ "CMD", "mysqladmin" ,"ping", "-h", "localhost" ]
interval: 3s
timeout: 5s
retries: 5
environment:
MYSQL_ROOT_HOST: "%"
MYSQL_ROOT_PASSWORD: ${DNPM_MYSQL_ROOT_PASSWORD}
volumes:
- /var/cache/bridgehead/dnpm/mysql:/var/lib/mysql
dnpm-authup:
image: authup/authup:latest
container_name: bridgehead-dnpm-authup
volumes:
- /var/cache/bridgehead/dnpm/authup:/usr/src/app/writable
depends_on:
dnpm-mysql:
condition: service_healthy
command: server/core start
environment:
- PUBLIC_URL=https://${HOST}/auth/
- AUTHORIZE_REDIRECT_URL=https://${HOST}
- ROBOT_ADMIN_ENABLED=true
- ROBOT_ADMIN_SECRET=${DNPM_AUTHUP_SECRET}
- ROBOT_ADMIN_SECRET_RESET=true
- DB_TYPE=mysql
- DB_HOST=dnpm-mysql
- DB_USERNAME=root
- DB_PASSWORD=${DNPM_MYSQL_ROOT_PASSWORD}
- DB_DATABASE=auth
labels:
- "traefik.enable=true"
- "traefik.http.middlewares.authup-strip.stripprefix.prefixes=/auth/"
- "traefik.http.routers.dnpm-auth.middlewares=authup-strip"
- "traefik.http.routers.dnpm-auth.rule=PathPrefix(`/auth`)"
- "traefik.http.services.dnpm-auth.loadbalancer.server.port=3000"
- "traefik.http.routers.dnpm-auth.tls=true"
dnpm-portal:
image: ghcr.io/dnpm-dip/portal:latest
container_name: bridgehead-dnpm-portal
environment:
- NUXT_API_URL=http://dnpm-backend:9000/
- NUXT_PUBLIC_API_URL=https://${HOST}/api/
- NUXT_AUTHUP_URL=http://dnpm-authup:3000/
- NUXT_PUBLIC_AUTHUP_URL=https://${HOST}/auth/
labels:
- "traefik.enable=true"
- "traefik.http.routers.dnpm-frontend.rule=PathPrefix(`/`)"
- "traefik.http.services.dnpm-frontend.loadbalancer.server.port=3000"
- "traefik.http.routers.dnpm-frontend.tls=true"
dnpm-backend: dnpm-backend:
image: ghcr.io/kohlbacherlab/bwhc-backend:1.0-snapshot-broker-connector
container_name: bridgehead-dnpm-backend container_name: bridgehead-dnpm-backend
image: ghcr.io/dnpm-dip/backend:latest
environment: environment:
- LOCAL_SITE=${ZPM_SITE}:${SITE_NAME} # Format: {Site-ID}:{Site-name}, e.g. UKT:Tübingen - ZPM_SITE=${ZPM_SITE}
- RD_RANDOM_DATA=${DNPM_SYNTH_NUM:--1} - N_RANDOM_FILES=${DNPM_SYNTH_NUM}
- MTB_RANDOM_DATA=${DNPM_SYNTH_NUM:--1}
- HATEOAS_HOST=https://${HOST}
- CONNECTOR_TYPE=broker
- AUTHUP_URL=robot://system:${DNPM_AUTHUP_SECRET}@http://dnpm-authup:3000
volumes: volumes:
- /etc/bridgehead/dnpm/config:/dnpm_config - /etc/bridgehead/dnpm:/bwhc_config:ro
- /var/cache/bridgehead/dnpm/backend-data:/dnpm_data - ${DNPM_DATA_DIR}:/bwhc_data
depends_on:
dnpm-authup:
condition: service_healthy
labels: labels:
- "traefik.enable=true" - "traefik.enable=true"
- "traefik.http.services.dnpm-backend.loadbalancer.server.port=9000" - "traefik.http.routers.bwhc-backend.rule=PathPrefix(`/bwhc`)"
# expose everything - "traefik.http.services.bwhc-backend.loadbalancer.server.port=9000"
- "traefik.http.routers.dnpm-backend.rule=PathPrefix(`/api`)" - "traefik.http.routers.bwhc-backend.tls=true"
- "traefik.http.routers.dnpm-backend.tls=true"
- "traefik.http.routers.dnpm-backend.service=dnpm-backend"
# except ETL
- "traefik.http.routers.dnpm-backend-etl.rule=PathRegexp(`^/api(/.*)?etl(/.*)?$`)"
- "traefik.http.routers.dnpm-backend-etl.tls=true"
- "traefik.http.routers.dnpm-backend-etl.service=dnpm-backend"
# this needs an ETL processor with support for basic auth
- "traefik.http.routers.dnpm-backend-etl.middlewares=auth"
# except peer-to-peer
- "traefik.http.routers.dnpm-backend-peer.rule=PathRegexp(`^/api(/.*)?/peer2peer(/.*)?$`)"
- "traefik.http.routers.dnpm-backend-peer.tls=true"
- "traefik.http.routers.dnpm-backend-peer.service=dnpm-backend"
- "traefik.http.routers.dnpm-backend-peer.middlewares=dnpm-backend-peer"
# this effectively denies all requests
# this is okay, because requests from peers don't go through Traefik
- "traefik.http.middlewares.dnpm-backend-peer.ipWhiteList.sourceRange=0.0.0.0/32"
landing: dnpm-frontend:
image: ghcr.io/kohlbacherlab/bwhc-frontend:2209
container_name: bridgehead-dnpm-frontend
links:
- dnpm-backend
environment:
- NUXT_HOST=0.0.0.0
- NUXT_PORT=8080
- BACKEND_PROTOCOL=https
- BACKEND_HOSTNAME=$HOST
- BACKEND_PORT=443
labels: labels:
- "traefik.http.routers.landing.rule=PathPrefix(`/landing`)" - "traefik.enable=true"
- "traefik.http.routers.bwhc-frontend.rule=PathPrefix(`/`)"
- "traefik.http.services.bwhc-frontend.loadbalancer.server.port=8080"
- "traefik.http.routers.bwhc-frontend.tls=true"

View File

@ -1,16 +1,28 @@
#!/bin/bash #!/bin/bash
if [ -n "${ENABLE_DNPM_NODE}" ]; then if [ -n "${ENABLE_DNPM_NODE}" ]; then
log INFO "DNPM setup detected -- will start DNPM:DIP node." log INFO "DNPM setup detected (BwHC Node) -- will start BwHC node."
OVERRIDE+=" -f ./$PROJECT/modules/dnpm-node-compose.yml" OVERRIDE+=" -f ./$PROJECT/modules/dnpm-node-compose.yml"
# Set variables required for BwHC Node. ZPM_SITE is assumed to be set in /etc/bridgehead/<project>.conf # Set variables required for BwHC Node. ZPM_SITE is assumed to be set in /etc/bridgehead/<project>.conf
DNPM_APPLICATION_SECRET="$(echo \"This is a salt string to generate one consistent password for DNPM. It is not required to be secret.\" | sha1sum | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 30)"
if [ -z "${ZPM_SITE+x}" ]; then if [ -z "${ZPM_SITE+x}" ]; then
log ERROR "Mandatory variable ZPM_SITE not defined!" log ERROR "Mandatory variable ZPM_SITE not defined!"
exit 1 exit 1
fi fi
mkdir -p /var/cache/bridgehead/dnpm/ || fail_and_report 1 "Failed to create '/var/cache/bridgehead/dnpm/'. Please run sudo './bridgehead install $PROJECT' again to fix the permissions." if [ -z "${DNPM_DATA_DIR+x}" ]; then
DNPM_SYNTH_NUM=${DNPM_SYNTH_NUM:--1} log ERROR "Mandatory variable DNPM_DATA_DIR not defined!"
DNPM_MYSQL_ROOT_PASSWORD="$(generate_simple_password 'dnpm mysql')" exit 1
DNPM_AUTHUP_SECRET="$(generate_simple_password 'dnpm authup')" fi
DNPM_SYNTH_NUM=${DNPM_SYNTH_NUM:-0}
if grep -q 'traefik.http.routers.landing.rule=PathPrefix(`/landing`)' /srv/docker/bridgehead/minimal/docker-compose.override.yml 2>/dev/null; then
echo "Override of landing page url already in place"
else
echo "Adding override of landing page url"
if [ -f /srv/docker/bridgehead/minimal/docker-compose.override.yml ]; then
echo -e ' landing:\n labels:\n - "traefik.http.routers.landing.rule=PathPrefix(`/landing`)"' >> /srv/docker/bridgehead/minimal/docker-compose.override.yml
else
echo -e 'version: "3.7"\nservices:\n landing:\n labels:\n - "traefik.http.routers.landing.rule=PathPrefix(`/landing`)"' >> /srv/docker/bridgehead/minimal/docker-compose.override.yml
fi
fi
fi fi

View File

@ -11,6 +11,7 @@ services:
CTS_API_KEY: ${NNGM_CTS_APIKEY} CTS_API_KEY: ${NNGM_CTS_APIKEY}
CRYPT_KEY: ${NNGM_CRYPTKEY} CRYPT_KEY: ${NNGM_CRYPTKEY}
#CTS_MAGICPL_SITE: ${SITE_ID}TODO #CTS_MAGICPL_SITE: ${SITE_ID}TODO
restart: always
labels: labels:
- "traefik.enable=true" - "traefik.enable=true"
- "traefik.http.routers.connector.rule=PathPrefix(`/nngm-connector`)" - "traefik.http.routers.connector.rule=PathPrefix(`/nngm-connector`)"

View File

@ -1,17 +0,0 @@
version: "3.7"
services:
ssh-tunnel:
image: docker.verbis.dkfz.de/cache/samply/ssh-tunnel
container_name: bridgehead-ccp-ssh-tunnel
environment:
SSH_TUNNEL_USERNAME: "${SSH_TUNNEL_USERNAME}"
SSH_TUNNEL_HOST: "${SSH_TUNNEL_HOST}"
SSH_TUNNEL_PORT: "${SSH_TUNNEL_PORT:-22}"
volumes:
- "/etc/bridgehead/ssh-tunnel.conf:/ssh-tunnel.conf:ro"
secrets:
- privkey
secrets:
privkey:
file: /etc/bridgehead/pki/ssh-tunnel.priv.pem

View File

@ -1,6 +0,0 @@
#!/bin/bash
if [ -n "$ENABLE_SSH_TUNNEL" ]; then
log INFO "SSH Tunnel setup detected -- will start SSH Tunnel."
OVERRIDE+=" -f ./modules/ssh-tunnel-compose.yml"
fi

View File

@ -1,19 +0,0 @@
# SSH Tunnel Module
This module enables SSH tunneling capabilities for the Bridgehead installation.
The primary use case for this is to connect bridgehead components that are hosted externally due to security concerns.
To connect the new components to the locally running bridgehead infra one is supposed to write a docker-compose.override.yml changing the urls to point to the corresponding forwarded port of the ssh-tunnel container.
## Configuration Variables
- `ENABLE_SSH_TUNNEL`: Required to enable the module
- `SSH_TUNNEL_USERNAME`: Username for SSH connection
- `SSH_TUNNEL_HOST`: Target host for SSH tunnel
- `SSH_TUNNEL_PORT`: SSH port (defaults to 22)
## Configuration Files
The module requires the following files to be present:
- `/etc/bridgehead/ssh-tunnel.conf`: SSH tunnel configuration file. Detailed information can be found [here](https://github.com/samply/ssh-tunnel?tab=readme-ov-file#configuration).
- `/etc/bridgehead/pki/ssh-tunnel.priv.pem`: The SSH private key used to connect to the `SSH_TUNNEL_HOST`. **Passphrases for the key are not supported!**

View File

@ -1,86 +0,0 @@
services:
transfair:
image: docker.verbis.dkfz.de/cache/samply/transfair:latest
container_name: bridgehead-transfair
environment:
# NOTE: Those 3 variables need only to be passed if their set, otherwise transfair will complain about empty url values
- TTP_URL
- TTP_ML_API_KEY
- TTP_GW_SOURCE
- TTP_GW_EPIX_DOMAIN
- TTP_GW_GPAS_DOMAIN
- TTP_TYPE
- TTP_AUTH
- PROJECT_ID_SYSTEM
- FHIR_REQUEST_URL=${FHIR_REQUEST_URL}
- FHIR_INPUT_URL=${FHIR_INPUT_URL}
- FHIR_OUTPUT_URL=${FHIR_OUTPUT_URL:-http://blaze:8080}
- FHIR_REQUEST_CREDENTIALS=${FHIR_REQUEST_CREDENTIALS}
- FHIR_INPUT_CREDENTIALS=${FHIR_INPUT_CREDENTIALS}
- FHIR_OUTPUT_CREDENTIALS=${FHIR_OUTPUT_CREDENTIALS}
- EXCHANGE_ID_SYSTEM=${EXCHANGE_ID_SYSTEM:-SESSION_ID}
- DATABASE_URL=sqlite://transfair/data_requests.sql?mode=rwc
- RUST_LOG=${RUST_LOG:-info}
- TLS_CA_CERTIFICATES_DIR=/conf/trusted-ca-certs
- TLS_DISABLE=${TRANSFAIR_TLS_DISABLE:-false}
- NO_PROXY=${TRANSFAIR_NO_PROXIES}
- ALL_PROXY=http://forward_proxy:3128
volumes:
- /var/cache/bridgehead/${PROJECT}/transfair:/transfair
- /etc/bridgehead/trusted-ca-certs:/conf/trusted-ca-certs:ro
labels:
- "traefik.enable=true"
- "traefik.http.middlewares.transfair-strip.stripprefix.prefixes=/transfair"
- "traefik.http.routers.transfair.middlewares=transfair-strip,transfair-auth"
- "traefik.http.routers.transfair.rule=PathPrefix(`/transfair`)"
- "traefik.http.services.transfair.loadbalancer.server.port=8080"
- "traefik.http.routers.transfair.tls=true"
traefik:
labels:
- "traefik.http.middlewares.transfair-auth.basicauth.users=${TRANSFAIR_AUTH}"
transfair-input-blaze:
image: docker.verbis.dkfz.de/cache/samply/blaze:${BLAZE_TAG}
container_name: bridgehead-transfair-input-blaze
environment:
BASE_URL: "http://bridgehead-transfair-input-blaze:8080"
JAVA_TOOL_OPTIONS: "-Xmx1024m"
DB_BLOCK_CACHE_SIZE: 1024
CQL_EXPR_CACHE_SIZE: 8
ENFORCE_REFERENTIAL_INTEGRITY: "false"
volumes:
- "transfair-input-blaze-data:/app/data"
profiles: ["transfair-input-blaze"]
labels:
- "traefik.enable=true"
- "traefik.http.routers.transfair-input-blaze.rule=PathPrefix(`/data-delivery`)"
- "traefik.http.middlewares.transfair-input-strip.stripprefix.prefixes=/data-delivery"
- "traefik.http.services.transfair-input-blaze.loadbalancer.server.port=8080"
- "traefik.http.routers.transfair-input-blaze.middlewares=transfair-input-strip,transfair-auth"
- "traefik.http.routers.transfair-input-blaze.tls=true"
transfair-request-blaze:
image: docker.verbis.dkfz.de/cache/samply/blaze:${BLAZE_TAG}
container_name: bridgehead-transfair-request-blaze
environment:
BASE_URL: "http://bridgehead-transfair-request-blaze:8080"
JAVA_TOOL_OPTIONS: "-Xmx1024m"
DB_BLOCK_CACHE_SIZE: 1024
CQL_EXPR_CACHE_SIZE: 8
ENFORCE_REFERENTIAL_INTEGRITY: "false"
volumes:
- "transfair-request-blaze-data:/app/data"
profiles: ["transfair-request-blaze"]
labels:
- "traefik.enable=true"
- "traefik.http.routers.transfair-request-blaze.rule=PathPrefix(`/data-requests`)"
- "traefik.http.middlewares.transfair-request-strip.stripprefix.prefixes=/data-requests"
- "traefik.http.services.transfair-request-blaze.loadbalancer.server.port=8080"
- "traefik.http.routers.transfair-request-blaze.middlewares=transfair-request-strip,transfair-auth"
- "traefik.http.routers.transfair-request-blaze.tls=true"
volumes:
transfair-input-blaze-data:
transfair-request-blaze-data:

View File

@ -1,35 +0,0 @@
#!/bin/bash -e
function transfairSetup() {
if [[ -n "$TTP_URL" || -n "$EXCHANGE_ID_SYSTEM" ]]; then
echo "Starting transfair."
OVERRIDE+=" -f ./modules/transfair-compose.yml"
if [ -n "$FHIR_INPUT_URL" ]; then
log INFO "TransFAIR input fhir store set to external $FHIR_INPUT_URL"
else
log INFO "TransFAIR input fhir store not set writing to internal blaze"
FHIR_INPUT_URL="http://transfair-input-blaze:8080"
OVERRIDE+=" --profile transfair-input-blaze"
fi
if [ -n "$FHIR_REQUEST_URL" ]; then
log INFO "TransFAIR request fhir store set to external $FHIR_REQUEST_URL"
else
log INFO "TransFAIR request fhir store not set writing to internal blaze"
FHIR_REQUEST_URL="http://transfair-request-blaze:8080"
OVERRIDE+=" --profile transfair-request-blaze"
fi
if [ -n "$TTP_GW_SOURCE" ]; then
log INFO "TransFAIR configured with greifswald as ttp"
TTP_TYPE="greifswald"
elif [ -n "$TTP_ML_API_KEY" ]; then
log INFO "TransFAIR configured with mainzelliste as ttp"
TTP_TYPE="mainzelliste"
else
log INFO "TransFAIR configured without ttp"
fi
TRANSFAIR_NO_PROXIES="transfair-input-blaze,blaze,transfair-requests-blaze"
if [ -n "${TRANSFAIR_NO_PROXY}" ]; then
TRANSFAIR_NO_PROXIES+=",${TRANSFAIR_NO_PROXY}"
fi
fi
}

View File

@ -1,65 +0,0 @@
version: "3.7"
services:
blaze:
image: docker.verbis.dkfz.de/cache/samply/blaze:${BLAZE_TAG}
container_name: bridgehead-pscc-blaze
environment:
BASE_URL: "http://bridgehead-pscc-blaze:8080"
JAVA_TOOL_OPTIONS: "-Xmx${BLAZE_MEMORY_CAP:-4096}m"
DB_RESOURCE_CACHE_SIZE: ${BLAZE_RESOURCE_CACHE_CAP:-2500000}
DB_BLOCK_CACHE_SIZE: ${BLAZE_MEMORY_CAP}
CQL_EXPR_CACHE_SIZE: ${BLAZE_CQL_CACHE_CAP:-32}
ENFORCE_REFERENTIAL_INTEGRITY: "false"
volumes:
- "blaze-data:/app/data"
labels:
- "traefik.enable=true"
- "traefik.http.routers.blaze_pscc.rule=PathPrefix(`/pscc-localdatamanagement`)"
- "traefik.http.middlewares.pscc_b_strip.stripprefix.prefixes=/pscc-localdatamanagement"
- "traefik.http.services.blaze_pscc.loadbalancer.server.port=8080"
- "traefik.http.routers.blaze_pscc.middlewares=pscc_b_strip"
- "traefik.http.routers.blaze_pscc.tls=true"
focus:
image: docker.verbis.dkfz.de/cache/samply/focus:${FOCUS_TAG}
container_name: bridgehead-focus
environment:
API_KEY: ${FOCUS_BEAM_SECRET_SHORT}
BEAM_APP_ID_LONG: focus.${PROXY_ID}
PROXY_ID: ${PROXY_ID}
BLAZE_URL: "http://bridgehead-pscc-blaze:8080/fhir/"
BEAM_PROXY_URL: http://beam-proxy:8081
RETRY_COUNT: ${FOCUS_RETRY_COUNT}
EPSILON: 0.28
ENDPOINT_TYPE: ${FOCUS_ENDPOINT_TYPE:-blaze}
depends_on:
- "beam-proxy"
- "blaze"
beam-proxy:
image: docker.verbis.dkfz.de/cache/samply/beam-proxy:${BEAM_TAG}
container_name: bridgehead-beam-proxy
environment:
BROKER_URL: ${BROKER_URL}
PROXY_ID: ${PROXY_ID}
APP_focus_KEY: ${FOCUS_BEAM_SECRET_SHORT}
PRIVKEY_FILE: /run/secrets/proxy.pem
ALL_PROXY: http://forward_proxy:3128
TLS_CA_CERTIFICATES_DIR: /conf/trusted-ca-certs
ROOTCERT_FILE: /conf/root.crt.pem
secrets:
- proxy.pem
depends_on:
- "forward_proxy"
volumes:
- /etc/bridgehead/trusted-ca-certs:/conf/trusted-ca-certs:ro
- /srv/docker/bridgehead/pscc/root.crt.pem:/conf/root.crt.pem:ro
volumes:
blaze-data:
secrets:
proxy.pem:
file: /etc/bridgehead/pki/${SITE_ID}.priv.pem

View File

@ -1,34 +0,0 @@
version: "3.7"
services:
landing:
container_name: lens_federated-search
image: docker.verbis.dkfz.de/dashboard/pscc-explorer
labels:
- "traefik.enable=true"
- "traefik.http.routers.landing.rule=PathPrefix(`/`)"
- "traefik.http.services.landing.loadbalancer.server.port=5173"
- "traefik.http.routers.landing.middlewares=auth"
- "traefik.http.routers.landing.tls=true"
# spot:
# image: docker.verbis.dkfz.de/ccp-private/central-spot
# environment:
# BEAM_SECRET: "${FOCUS_BEAM_SECRET_SHORT}"
# BEAM_URL: http://beam-proxy:8081
# BEAM_PROXY_ID: ${SITE_ID}
# BEAM_BROKER_ID: ${BROKER_ID}
# BEAM_APP_ID: "focus"
# PROJECT_METADATA: "cce_supervisors"
# depends_on:
# - "beam-proxy"
# labels:
# - "traefik.enable=true"
# - "traefik.http.services.spot.loadbalancer.server.port=8080"
# - "traefik.http.middlewares.corsheaders2.headers.accesscontrolallowmethods=GET,OPTIONS,POST"
# - "traefik.http.middlewares.corsheaders2.headers.accesscontrolalloworiginlist=https://${HOST}"
# - "traefik.http.middlewares.corsheaders2.headers.accesscontrolallowcredentials=true"
# - "traefik.http.middlewares.corsheaders2.headers.accesscontrolmaxage=-1"
# - "traefik.http.routers.spot.rule=Host(`${HOST}`) && PathPrefix(`/backend`)"
# - "traefik.http.middlewares.stripprefix_spot.stripprefix.prefixes=/backend"
# - "traefik.http.routers.spot.tls=true"
# - "traefik.http.routers.spot.middlewares=corsheaders2,stripprefix_spot"

View File

@ -1,5 +0,0 @@
#!/bin/bash
if [ -n "$ENABLE_LENS" ];then
OVERRIDE+=" -f ./$PROJECT/modules/lens-compose.yml"
fi

View File

@ -1,20 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDNTCCAh2gAwIBAgIUW34NEb7bl0+Ywx+I1VKtY5vpAOowDQYJKoZIhvcNAQEL
BQAwFjEUMBIGA1UEAxMLQnJva2VyLVJvb3QwHhcNMjQwMTIyMTMzNzEzWhcNMzQw
MTE5MTMzNzQzWjAWMRQwEgYDVQQDEwtCcm9rZXItUm9vdDCCASIwDQYJKoZIhvcN
AQEBBQADggEPADCCAQoCggEBAL5UegLXTlq3XRRj8LyFs3aF0tpRPVoW9RXp5kFI
TnBvyO6qjNbMDT/xK+4iDtEX4QQUvsxAKxfXbe9i1jpdwjgH7JHaSGm2IjAiKLqO
OXQQtguWwfNmmp96Ql13ArLj458YH08xMO/w2NFWGwB/hfARa4z/T0afFuc/tKJf
XbGCG9xzJ9tmcG45QN8NChGhVvaTweNdVxGWlpHxmi0Mn8OM9CEuB7nPtTTiBuiu
pRC2zVVmNjVp4ktkAqL7IHOz+/F5nhiz6tOika9oD3376Xj055lPznLcTQn2+4d7
K7ZrBopCFxIQPjkgmYRLfPejbpdUjK1UVJw7hbWkqWqH7JMCAwEAAaN7MHkwDgYD
VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFGjvRcaIP4HM
poIguUAK9YL2n7fbMB8GA1UdIwQYMBaAFGjvRcaIP4HMpoIguUAK9YL2n7fbMBYG
A1UdEQQPMA2CC0Jyb2tlci1Sb290MA0GCSqGSIb3DQEBCwUAA4IBAQCbzycJSaDm
AXXNJqQ88djrKs5MDXS8RIjS/cu2ayuLaYDe+BzVmUXNA0Vt9nZGdaz63SLLcjpU
fNSxBfKbwmf7s30AK8Cnfj9q4W/BlBeVizUHQsg1+RQpDIdMrRQrwkXv8mfLw+w5
3oaXNW6W/8KpBp/H8TBZ6myl6jCbeR3T8EMXBwipMGop/1zkbF01i98Xpqmhx2+l
n+80ofPsSspOo5XmgCZym8CD/m/oFHmjcvOfpOCvDh4PZ+i37pmbSlCYoMpla3u/
7MJMP5lugfLBYNDN2p+V4KbHP/cApCDT5UWLOeAWjgiZQtHH5ilDeYqEc1oPjyJt
Rtup0MTxSJtN
-----END CERTIFICATE-----

View File

@ -1,14 +0,0 @@
BROKER_ID=test-no-real-data.broker.samply.de
BROKER_URL=https://${BROKER_ID}
PROXY_ID=${SITE_ID}.${BROKER_ID}
FOCUS_BEAM_SECRET_SHORT="$(cat /proc/sys/kernel/random/uuid | sed 's/[-]//g' | head -c 20)"
FOCUS_RETRY_COUNT=${FOCUS_RETRY_COUNT:-64}
SUPPORT_EMAIL=denis.koether@dkfz-heidelberg.de
PRIVATEKEYFILENAME=/etc/bridgehead/pki/${SITE_ID}.priv.pem
BROKER_URL_FOR_PREREQ=$BROKER_URL
for module in $PROJECT/modules/*.sh
do
log DEBUG "sourcing $module"
source $module
done

View File

@ -1,3 +0,0 @@
FOCUS_TAG=develop
BEAM_TAG=develop
BLAZE_TAG=main

View File

@ -1,3 +0,0 @@
FOCUS_TAG=main
BEAM_TAG=main
BLAZE_TAG=0.32

View File

@ -1,3 +0,0 @@
FOCUS_TAG=develop
BEAM_TAG=develop
BLAZE_TAG=main