mirror of
https://github.com/samply/bridgehead.git
synced 2025-06-16 20:40:15 +02:00
Compare commits
374 Commits
test-switc
...
feature/fe
Author | SHA1 | Date | |
---|---|---|---|
c4b7620fd6 | |||
6a21a5b641 | |||
c04ffc5f33 | |||
714e46f082 | |||
29c2b5ef69 | |||
b1bdf48e55 | |||
433edde75a | |||
fe3fc6204a | |||
4b3b13b101 | |||
1afbf88a76 | |||
7d5f771181 | |||
f9a9baf13d | |||
d4259406a9 | |||
0745eab7b5 | |||
b404277083 | |||
b767b3230f | |||
dd653a7871 | |||
7418861e8c | |||
94b2c29bc7 | |||
ac3ff314ff | |||
2831fb9a22 | |||
7934d912b8 | |||
70ad318b28 | |||
6da143f348 | |||
4fac079aec | |||
ec6f9302a1 | |||
896b24be9b | |||
adf8e35ba9 | |||
480bbe04e7 | |||
d8b9498ef9 | |||
3180d0fd76 | |||
3a8df378a6 | |||
8cb33c2ddc | |||
591d95e8db | |||
349027e969 | |||
ff06782234 | |||
6969a7a3bc | |||
8ea7da64b7 | |||
6217e28590 | |||
a87e9b9284 | |||
1f17fad366 | |||
5a6322fcaa | |||
f88dfb5654 | |||
1a233b81a4 | |||
e1e523f1ac | |||
7478d804df | |||
06033c8ea0 | |||
eeb17e7bfe | |||
3223c22ff5 | |||
ea6441fbcb | |||
b5c35211f6 | |||
3777d4bf05 | |||
48e198fa0c | |||
ad4430e480 | |||
7245ddc720 | |||
443dcc6ec2 | |||
b2c933f5e5 | |||
db9692795a | |||
74eb86f8af | |||
fb4da54297 | |||
3e44dab9f2 | |||
f72e7c7799 | |||
19d0fefe94 | |||
9a1860ccf9 | |||
8a197ce5c7 | |||
29d2bc0440 | |||
2eb56e66c8 | |||
ef8866b943 | |||
cea577bde5 | |||
97a558dd46 | |||
1995997ac2 | |||
64250d9d21 | |||
f3fa1ce712 | |||
b241feecdb | |||
4a9427a1bd | |||
af3e5231d8 | |||
51e8888fe1 | |||
32ffb33ab1 | |||
224c1472b2 | |||
01d3a38e18 | |||
92a1f4bb59 | |||
4e3cd68922 | |||
c60c9fc4b4 | |||
f0a05b12ad | |||
935c45b74d | |||
01efc6f9b9 | |||
e54475f704 | |||
2f04e51f96 | |||
d62f5a404b | |||
977ad139f8 | |||
643e9e67a6 | |||
37f100dc01 | |||
0793ea9fc6 | |||
44d7b34834 | |||
f6dac7038f | |||
8e5ddc493c | |||
fa141f8e86 | |||
2a024e751d | |||
d3da426610 | |||
b34f4f2a0f | |||
1edcdce5c6 | |||
b73ddc883c | |||
9f31e950a5 | |||
371097377a | |||
0a2dbb4b2d | |||
148e87341f | |||
28a612f218 | |||
e411883d18 | |||
0b2e64a2d5 | |||
25ac4d2590 | |||
f9b26b6958 | |||
5d4d0405ab | |||
b44a208e08 | |||
0cd4ededc7 | |||
f6965859fe | |||
ae965fddb3 | |||
903ef0df9b | |||
e32f484c31 | |||
8486abedd4 | |||
163650f592 | |||
9ebbf2ed9b | |||
131b52f57b | |||
043e12b985 | |||
bb076c5d5a | |||
3c8ec73ac3 | |||
0015365d1b | |||
dc3d5496e1 | |||
93a91326a2 | |||
4115319956 | |||
f854ab58ce | |||
cec3dfe4cd | |||
3d136959e7 | |||
8e171b71de | |||
d3edb5e143 | |||
b87d746a20 | |||
afb63306a8 | |||
90ee8d63f7 | |||
8d4f487806 | |||
a2c242583e | |||
178867cde7 | |||
77240ff92f | |||
876c4efa41 | |||
058d1c83e6 | |||
ec6407414b | |||
89c90d3aa0 | |||
0039efa353 | |||
c1020c569a | |||
2237562e6e | |||
c8fc35576e | |||
3dfc4cf57d | |||
3a6520a668 | |||
dcddbf2235 | |||
e2f31b6eeb | |||
452946aa04 | |||
5c7da0d40d | |||
77145277de | |||
9cdcf2afb8 | |||
13a74e5dab | |||
c33726d385 | |||
f38d9f8c19 | |||
b5ca5ea4a7 | |||
862e452f3c | |||
4aa8f0f3ba | |||
ccf0b91f17 | |||
720783249d | |||
2b3eabe95c | |||
14aece46f7 | |||
ff1f7904ad | |||
8d38adc91e | |||
cfc3c7c90e | |||
963144cc31 | |||
765613b87f | |||
2b61775652 | |||
4b0b17424f | |||
f26a8f7a71 | |||
973b5828f6 | |||
839e7a4518 | |||
6cfb42dc9b | |||
5d8bec53c0 | |||
c52975f204 | |||
957fa64ce9 | |||
b4805af0a1 | |||
e3b8a7369b | |||
adeaf433dc | |||
846e9c23a7 | |||
bb7451d8c3 | |||
26165232f0 | |||
7ed24f667d | |||
d97ac56126 | |||
e7f6c0b1a0 | |||
c4c4f743d2 | |||
be9adcbfa2 | |||
10a362c237 | |||
75c86b79e8 | |||
a6443a6857 | |||
f3745b973a | |||
50d28d293f | |||
44415369cc | |||
9b8331ed28 | |||
73d969e374 | |||
840096d1d5 | |||
43c45f0628 | |||
e182e2fbe6 | |||
c8bafb2461 | |||
0866cacc5a | |||
a1e76a61b8 | |||
09aa33c912 | |||
36ac8d41c8 | |||
c003999721 | |||
50360d3f41 | |||
5148e3382d | |||
2d7d1d73b3 | |||
20c65336e6 | |||
276f886546 | |||
bc239c0b02 | |||
6438fc5f4e | |||
f2f48869af | |||
e9e1ce5a65 | |||
687dbba383 | |||
5e376b17ad | |||
04cf5128b0 | |||
43ab59563c | |||
996f53a164 | |||
b5ce188842 | |||
325ae1d574 | |||
68782d1c32 | |||
bedc2ca6d0 | |||
dfde7c18ff | |||
0b1e0474d7 | |||
72255e6211 | |||
32de51eefb | |||
0cfe1d3617 | |||
fe07c63f36 | |||
3a91259a8a | |||
4bbd2a15fe | |||
0a17bbc81f | |||
c794508880 | |||
3e0bf38018 | |||
e2d109558d | |||
9299a201a6 | |||
c9b1975c9e | |||
17f52a7907 | |||
4d1a9bb701 | |||
efc04cea4f | |||
8fe03a6cd2 | |||
c66dac9881 | |||
38c7f3c24a | |||
49be101165 | |||
6626f860a2 | |||
eb17d8c159 | |||
6340acdbe8 | |||
c916a357dc | |||
20e2b2a0ed | |||
2e6edb6179 | |||
c58096aa27 | |||
b5ef856f12 | |||
5470fd726a | |||
3f6e3a2bb4 | |||
9937002d06 | |||
a1d0e93106 | |||
7d07c0623d | |||
f367a406bb | |||
8854670f4d | |||
aac31945a3 | |||
60b2bddf15 | |||
d8da5da7eb | |||
16fc40f8ae | |||
e90c087547 | |||
001b84a774 | |||
ed0bd483dd | |||
5516ad7641 | |||
d44ff4055f | |||
44ac09b9c1 | |||
f3abde1dfd | |||
6550c0cdab | |||
2d5b6e6932 | |||
40d991d94e | |||
ae02526baf | |||
0fd2481425 | |||
5ba1a1a820 | |||
ea51fc5910 | |||
c4018aae08 | |||
417c158435 | |||
00030a6141 | |||
29fb0e7099 | |||
00cae67fa1 | |||
2074461ee7 | |||
954d46efb1 | |||
48558812aa | |||
a80a980cea | |||
2606c62b1c | |||
f66f2755d8 | |||
842c83c66f | |||
d28a3ac889 | |||
fb6af1c4af | |||
c02da838c7 | |||
459fa7f78e | |||
28c38ed569 | |||
16211cfedf | |||
0b90cdb769 | |||
9bf1b42003 | |||
7b96864e63 | |||
2ba9645ab4 | |||
6457b21ac6 | |||
7ce501548a | |||
5558d4fefc | |||
545c6175f5 | |||
096225a77d | |||
2252504d78 | |||
6bf34b7732 | |||
dc0d42ca07 | |||
90248b331f | |||
e693a8f0e6 | |||
d16eb6c94d | |||
b52d49b4ef | |||
699d8d6398 | |||
7e7d184e8b | |||
392afb6410 | |||
f855a19865 | |||
bbfc607104 | |||
f008b18760 | |||
0555786435 | |||
262b9bd62e | |||
e0990d99cb | |||
9fc8564e4e | |||
74817a21da | |||
87cc0acecc | |||
93026d2d89 | |||
d9794a1eea | |||
68cd62b981 | |||
85446b0a3e | |||
4bdad68da5 | |||
3dadeef786 | |||
5ca11d1bf5 | |||
997c4df5c0 | |||
3c0a994237 | |||
377b003207 | |||
0c75ac2810 | |||
d21c6d7835 | |||
de10c8508e | |||
b3ace55898 | |||
b07731442b | |||
52f6193fde | |||
a4ce7f4eb6 | |||
49b5cb976a | |||
50ef08ca6d | |||
c354c450f3 | |||
6bb0471a64 | |||
2b0cdc0345 | |||
850a8eb973 | |||
6b1ea4c74e | |||
8c8ebb9298 | |||
0536023ceb | |||
3a3a9d09a9 | |||
c1f2131438 | |||
60e0db00a7 | |||
191be47252 | |||
42300e923f | |||
6b025a8f6a | |||
4ab1ff2008 | |||
dddbf0efd0 | |||
f4ff6f418a | |||
53c9580a46 | |||
169ce2436f | |||
66deff38a2 | |||
eeba6bce39 | |||
09b02fe4b6 | |||
bba8a03f9f | |||
86239a80e7 | |||
6cfa745385 | |||
cfb1bed7b4 | |||
ff942ac735 | |||
705fbeaf97 | |||
3a4c7b2ece |
2
.gitignore
vendored
2
.gitignore
vendored
@ -1,7 +1,7 @@
|
|||||||
##Ignore site configuration
|
##Ignore site configuration
|
||||||
.gitmodules
|
.gitmodules
|
||||||
site-config/*
|
site-config/*
|
||||||
|
.idea
|
||||||
## Ignore site configuration
|
## Ignore site configuration
|
||||||
*/docker-compose.override.yml
|
*/docker-compose.override.yml
|
||||||
|
|
||||||
|
118
README.md
118
README.md
@ -1,5 +1,5 @@
|
|||||||
# Bridgehead
|
# Bridgehead
|
||||||
|
|
||||||
The Bridgehead is a secure, low-effort solution to connect your research institution to a federated research network. It bundles interoperable, open-source software components into a turnkey package for installation on one of your secure servers. The Bridgehead is pre-configured with sane defaults, centrally monitored and with an absolute minimum of "moving parts" on your side, making it an extremely low-maintenance gateway to data sharing.
|
The Bridgehead is a secure, low-effort solution to connect your research institution to a federated research network. It bundles interoperable, open-source software components into a turnkey package for installation on one of your secure servers. The Bridgehead is pre-configured with sane defaults, centrally monitored and with an absolute minimum of "moving parts" on your side, making it an extremely low-maintenance gateway to data sharing.
|
||||||
|
|
||||||
This repository is the starting point for any information and tools you will need to deploy a Bridgehead. If you have questions, please [contact us](mailto:verbis-support@dkfz-heidelberg.de).
|
This repository is the starting point for any information and tools you will need to deploy a Bridgehead. If you have questions, please [contact us](mailto:verbis-support@dkfz-heidelberg.de).
|
||||||
@ -21,7 +21,8 @@ This repository is the starting point for any information and tools you will nee
|
|||||||
- [HTTPS Access](#https-access)
|
- [HTTPS Access](#https-access)
|
||||||
- [TLS terminating proxies](#tls-terminating-proxies)
|
- [TLS terminating proxies](#tls-terminating-proxies)
|
||||||
- [File structure](#file-structure)
|
- [File structure](#file-structure)
|
||||||
- [BBMRI-ERIC Directory](#bbmri-eric-directory)
|
- [BBMRI-ERIC Directory entry needed](#bbmri-eric-directory-entry-needed)
|
||||||
|
- [Loading data](#loading-data)
|
||||||
4. [Things you should know](#things-you-should-know)
|
4. [Things you should know](#things-you-should-know)
|
||||||
- [Auto-Updates](#auto-updates)
|
- [Auto-Updates](#auto-updates)
|
||||||
- [Auto-Backups](#auto-backups)
|
- [Auto-Backups](#auto-backups)
|
||||||
@ -33,6 +34,10 @@ This repository is the starting point for any information and tools you will nee
|
|||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
|
The data protection group at your site will probably want to know exactly what our software does with patient data, and you may need to get their approval before you are allowed to install a Bridgehead. To help you with this, we have provided some data protection concepts:
|
||||||
|
|
||||||
|
- [Germany](https://www.bbmri.de/biobanking/it/infrastruktur/datenschutzkonzept/)
|
||||||
|
|
||||||
### Hardware
|
### Hardware
|
||||||
|
|
||||||
Hardware requirements strongly depend on the specific use-cases of your network as well as on the data it is going to serve. Most use-cases are well-served with the following configuration:
|
Hardware requirements strongly depend on the specific use-cases of your network as well as on the data it is going to serve. Most use-cases are well-served with the following configuration:
|
||||||
@ -51,22 +56,41 @@ Ensure the following software (or newer) is installed:
|
|||||||
- docker >= 20.10.1
|
- docker >= 20.10.1
|
||||||
- docker-compose >= 2.xx (`docker-compose` and `docker compose` are both supported).
|
- docker-compose >= 2.xx (`docker-compose` and `docker compose` are both supported).
|
||||||
- systemd
|
- systemd
|
||||||
|
- curl
|
||||||
|
|
||||||
We recommend to install Docker(-compose) from its official sources as described on the [Docker website](https://docs.docker.com).
|
We recommend to install Docker(-compose) from its official sources as described on the [Docker website](https://docs.docker.com).
|
||||||
|
|
||||||
Note for Ubuntu: Please note that snap versions of Docker are not supported.
|
> 📝 Note for Ubuntu: Snap versions of Docker are not supported.
|
||||||
|
|
||||||
### Network
|
### Network
|
||||||
|
|
||||||
A running Bridgehead requires an outgoing HTTPS proxy to communicate with the central components.
|
A Bridgehead communicates to all central components via outgoing HTTPS connections.
|
||||||
|
|
||||||
Additionally, your site might use its own proxy. You should discuss this with your local systems administration. If a proxy is being used, you will need to note down the URL of the proxy. If it is a secure proxy, then you will also need to make a note of its username and password. This information will be used later on during the installation process.
|
Your site might require an outgoing proxy (i.e. HTTPS forward proxy) to connect to external servers; you should discuss this with your local systems administration. In that case, you will need to note down the URL of the proxy. If the proxy requires authentication, you will also need to make a note of its username and password. This information will be used later on during the installation process. TLS terminating proxies are also supported, see [here](#tls-terminating-proxies). Apart from the Bridgehead itself, you may also need to configure the proxy server in [git](https://gist.github.com/evantoli/f8c23a37eb3558ab8765) and [docker](https://docs.docker.com/network/proxy/).
|
||||||
|
|
||||||
Note that git and Docker may also need to be configured to use this proxy. This is a job for your systems administrators.
|
The following URLs need to be accessible (prefix with `https://`):
|
||||||
|
* To fetch code and configuration from git repositories
|
||||||
|
* github.com
|
||||||
|
* git.verbis.dkfz.de
|
||||||
|
* To fetch docker images
|
||||||
|
* docker.verbis.dkfz.de
|
||||||
|
* Official Docker, Inc. URLs (subject to change, see [official list](https://docs.docker.com/desktop/all))
|
||||||
|
* hub.docker.com
|
||||||
|
* registry-1.docker.io
|
||||||
|
* production.cloudflare.docker.com
|
||||||
|
* To report bridgeheads operational status
|
||||||
|
* healthchecks.verbis.dkfz.de
|
||||||
|
* only for DKTK/CCP
|
||||||
|
* broker.ccp-it.dktk.dkfz.de
|
||||||
|
* only for BBMRI-ERIC
|
||||||
|
* broker.bbmri.samply.de
|
||||||
|
* gitlab.bbmri-eric.eu
|
||||||
|
* only for German Biobank Node
|
||||||
|
* broker.bbmri.de
|
||||||
|
|
||||||
If there is a site firewall, this needs to be configured so that outgoing calls to the following URLs are allowed: *.dkfz.de, github.com, docker.io, *.docker.io, *.samply.de.
|
> 📝 This URL list is subject to change. Instead of the individual names, we highly recommend whitelisting wildcard domains: *.dkfz.de, github.com, *.docker.com, *.docker.io, *.samply.de, *.bbmri.de.
|
||||||
|
|
||||||
Note for Ubuntu: Please note that the uncomplicated firewall (ufw) is known to conflict with Docker [here](https://github.com/chaifeng/ufw-docker).
|
> 📝 Ubuntu's pre-installed uncomplicated firewall (ufw) is known to conflict with Docker, more info [here](https://github.com/chaifeng/ufw-docker).
|
||||||
|
|
||||||
## Deployment
|
## Deployment
|
||||||
|
|
||||||
@ -99,7 +123,7 @@ Mention:
|
|||||||
We will set the repository up for you. We will then send you:
|
We will set the repository up for you. We will then send you:
|
||||||
|
|
||||||
- A Repository Short Name (RSN). Beware: this is distinct from your site name.
|
- A Repository Short Name (RSN). Beware: this is distinct from your site name.
|
||||||
- Repository URL containing the acces token eg. https://BH_Dummy:dummy_token@git.verbis.dkfz.de/bbmri-bridgehead-configs/dummy.git
|
- Repository URL containing the acces token eg. https://BH_Dummy:dummy_token@git.verbis.dkfz.de/<project>-bridgehead-configs/dummy.git
|
||||||
|
|
||||||
During the installation, your Bridgehead will download your site's configuration from GitLab and you can review the details provided to us by email.
|
During the installation, your Bridgehead will download your site's configuration from GitLab and you can review the details provided to us by email.
|
||||||
|
|
||||||
@ -235,6 +259,21 @@ Even within your internal network, the Bridgehead enforces HTTPS for all service
|
|||||||
|
|
||||||
All of the Bridgehead's outgoing connections are secured by transport encryption (TLS) and a Bridgehead will refuse to connect if certificate verification fails. If your local forward proxy server performs TLS termination, please place its CA certificate in `/etc/bridgehead/trusted-ca-certs` as a `.pem` file, e.g. `/etc/bridgehead/trusted-ca-certs/mylocalca.pem`. Then, all Bridgehead components will pick up this certificate and trust it for outgoing connections.
|
All of the Bridgehead's outgoing connections are secured by transport encryption (TLS) and a Bridgehead will refuse to connect if certificate verification fails. If your local forward proxy server performs TLS termination, please place its CA certificate in `/etc/bridgehead/trusted-ca-certs` as a `.pem` file, e.g. `/etc/bridgehead/trusted-ca-certs/mylocalca.pem`. Then, all Bridgehead components will pick up this certificate and trust it for outgoing connections.
|
||||||
|
|
||||||
|
To find the certificate file, first run the following:
|
||||||
|
|
||||||
|
```
|
||||||
|
curl -v https://broker.bbmri.samply.de/v1/health
|
||||||
|
```
|
||||||
|
|
||||||
|
In the output, look out for the line:
|
||||||
|
|
||||||
|
|
||||||
|
```
|
||||||
|
successfully set certificate verify locations:
|
||||||
|
```
|
||||||
|
|
||||||
|
Here a file will be mentioned, perhaps in the directory /etc/ssl/certs. The exact location will depend on your operating system. This is the file that you need to copy.
|
||||||
|
|
||||||
### File structure
|
### File structure
|
||||||
|
|
||||||
- `/srv/docker/bridgehead` contains this git repository with the shell scripts and *project-specific configuration*. In here, all files are identical for all sites. You should not make any changes here.
|
- `/srv/docker/bridgehead` contains this git repository with the shell scripts and *project-specific configuration*. In here, all files are identical for all sites. You should not make any changes here.
|
||||||
@ -247,28 +286,29 @@ All of the Bridgehead's outgoing connections are secured by transport encryption
|
|||||||
|
|
||||||
Your Bridgehead's actual data is not stored in the above directories, but in named docker volumes, see `docker volume ls` and `docker volume inspect <volume_name>`.
|
Your Bridgehead's actual data is not stored in the above directories, but in named docker volumes, see `docker volume ls` and `docker volume inspect <volume_name>`.
|
||||||
|
|
||||||
### BBMRI-ERIC Directory
|
### BBMRI-ERIC Directory entry needed
|
||||||
|
|
||||||
If you run a biobank, you should register with the [Directory](https://directory.bbmri-eric.eu), a BBMRI-ERIC project that catalogs biobanks.
|
If you run a biobank, you should be listed together with your collections with in the [Directory](https://directory.bbmri-eric.eu), a BBMRI-ERIC project that catalogs biobanks.
|
||||||
|
|
||||||
To do this, contact the BBMRI-ERIC national node for the country where your biobank is based, see [the list of nodes](http://www.bbmri-eric.eu/national-nodes/).
|
To do this, contact the BBMRI-ERIC national node for the country where your biobank is based, see [the list of nodes](http://www.bbmri-eric.eu/national-nodes/).
|
||||||
|
|
||||||
Once you have registered, **you should choose one of your sample collections as a default collection for your biobank**. This is the collection that will be automatically used to label any samples that have not been assigned a collection ID in your ETL process. Make a note of this ID, you will need it later on in the installation process.
|
Once you have added your biobank to the Directory you got persistent identifier (PID) for your biobank and unique identifiers (IDs) for your collections. The collection IDs are necessary for the biospecimens assigning to the collections and later in the data flows between BBMRI-ERIC tools. In case you cannot distribute all your biospecimens within collections via assigning the collection IDs, **you should choose one of your sample collections as a default collection for your biobank**. This collection will be automatically used to label any samples that have not been assigned a collection ID in your ETL process. Make a note of this default collection ID, you will need it later on in the installation process.
|
||||||
|
|
||||||
The Bridgehead's **Directory Sync** is an optional feature that keeps the Directory up to date with your local data, e.g. number of samples. Conversely, it also updates the local FHIR store with the latest contact details etc. from the Directory. You must explicitly set your country specific directory url, username and password to enable this feature.
|
### Directory sync tool
|
||||||
|
|
||||||
|
The Bridgehead's **Directory Sync** is an optional feature that keeps the Directory up to date with your local data, e.g. number of samples. Conversely, it also updates the local FHIR store with the latest contact details etc. from the Directory. You must explicitly set your country specific directory URL, username and password to enable this feature.
|
||||||
|
|
||||||
Full details can be found in [directory_sync_service](https://github.com/samply/directory_sync_service).
|
Full details can be found in [directory_sync_service](https://github.com/samply/directory_sync_service).
|
||||||
|
|
||||||
To enable it, you will need to set these variables to the ```bbmri.conf``` file of your GitLab repository. Here is an example config:
|
To enable it, you will need to set these variables to the ```bbmri.conf``` file of your GitLab repository. Here is an example config:
|
||||||
|
|
||||||
```
|
```
|
||||||
### Directory sync service
|
|
||||||
DS_DIRECTORY_URL=https://directory.bbmri-eric.eu
|
DS_DIRECTORY_URL=https://directory.bbmri-eric.eu
|
||||||
DS_DIRECTORY_USER_NAME=your_directory_username
|
DS_DIRECTORY_USER_NAME=your_directory_username
|
||||||
DS_DIRECTORY_USER_PASS=qwdnqwswdvqHBVGFR9887
|
DS_DIRECTORY_USER_PASS=qwdnqwswdvqHBVGFR9887
|
||||||
DS_TIMER_CRON="0 22 * * *"
|
DS_TIMER_CRON="0 22 * * *"
|
||||||
```
|
```
|
||||||
You must contact the Directory for your national node to find the URL, and to register as a user.
|
You must contact the Directory team for your national node to find the URL, and to register as a user.
|
||||||
|
|
||||||
Additionally, you should choose when you want Directory sync to run. In the example above, this is set to happen at 10 pm every evening. You can modify this to suit your requirements. The timer specification should follow the [cron](https://crontab.guru) convention.
|
Additionally, you should choose when you want Directory sync to run. In the example above, this is set to happen at 10 pm every evening. You can modify this to suit your requirements. The timer specification should follow the [cron](https://crontab.guru) convention.
|
||||||
|
|
||||||
@ -276,6 +316,32 @@ Once you edited the gitlab config, the bridgehead will autoupdate the config wit
|
|||||||
|
|
||||||
There will be a delay before the effects of Directory sync become visible. First, you will need to wait until the time you have specified in ```TIMER_CRON```. Second, the information will then be synchronized from your national node with the central European Directory. This can take up to 24 hours.
|
There will be a delay before the effects of Directory sync become visible. First, you will need to wait until the time you have specified in ```TIMER_CRON```. Second, the information will then be synchronized from your national node with the central European Directory. This can take up to 24 hours.
|
||||||
|
|
||||||
|
### Loading data
|
||||||
|
|
||||||
|
The data accessed by the federated search is held in the Bridgehead in a FHIR store (we use Blaze).
|
||||||
|
|
||||||
|
You can load data into this store by using its FHIR API:
|
||||||
|
|
||||||
|
```
|
||||||
|
https://<Name of your server>/bbmri-localdatamanagement/fhir
|
||||||
|
```
|
||||||
|
The name of your server will generally be the full name of the VM that the Bridgehead runs on. You can alternatively supply an IP address.
|
||||||
|
|
||||||
|
The FHIR API uses basic auth. You can find the credentials in `/etc/bridgehead/<project>.local.conf`.
|
||||||
|
|
||||||
|
Note that if you don't have a DNS certificate for the Bridgehead, you will need to allow an insecure connection. E.g. with curl, use the `-k` flag.
|
||||||
|
|
||||||
|
The storage space on your hard drive will depend on the number of FHIR resources that you intend to generate. This will be the sum of the number of patients/subjects, the number of samples, the number of conditions/diseases and the number of observations. As a general rule of thumb, you can assume that each resource will consume about 2 kilobytes of disk space.
|
||||||
|
|
||||||
|
For more information on Blaze performance, please refer to [import performance](https://github.com/samply/blaze/blob/master/docs/performance/import.md).
|
||||||
|
|
||||||
|
#### ETL for BBMRI and GBA
|
||||||
|
|
||||||
|
Normally, you will need to build your own ETL to feed the Bridgehead. However, there is one case where a short cut might be available:
|
||||||
|
- If you are using CentraXX as a BIMS and you have a FHIR-Export License, then you can employ standard mapping scripts that access the CentraXX-internal data structures and map the data onto the BBMRI FHIR profile. It may be necessary to adjust a few parameters, but this is nonetheless significantly easier than writing your own ETL.
|
||||||
|
|
||||||
|
You can find the profiles for generating FHIR in [Simplifier](https://simplifier.net/bbmri.de/~resources?category=Profile).
|
||||||
|
|
||||||
## Things you should know
|
## Things you should know
|
||||||
|
|
||||||
### Auto-Updates
|
### Auto-Updates
|
||||||
@ -319,8 +385,28 @@ Installation under WSL ought to work, but we have not tested this.
|
|||||||
|
|
||||||
### Docker Daemon Proxy Configuration
|
### Docker Daemon Proxy Configuration
|
||||||
|
|
||||||
Docker has a background daemon, responsible for downloading images and starting them. Sometimes, proxy configuration from your system won't carry over and it will fail to download images. In that case, configure the proxy for this daemon as described in the [official documentation](https://docs.docker.com).
|
Docker has a background daemon, responsible for downloading images and starting them. Sometimes, proxy configuration from your system won't carry over and it will fail to download images. In that case, you'll need to configure the proxy inside the system unit of docker by creating the file `/etc/systemd/system/docker.service.d/proxy.conf` with the following content:
|
||||||
|
|
||||||
|
``` ini
|
||||||
|
[Service]
|
||||||
|
Environment="HTTP_PROXY=http://proxy.example.com:3128"
|
||||||
|
Environment="HTTPS_PROXY=https://proxy.example.com:3128"
|
||||||
|
Environment="NO_PROXY=localhost,127.0.0.1,some-local-docker-registry.example.com,.corp"
|
||||||
|
```
|
||||||
|
|
||||||
|
After saving the configuration file, you'll need to reload the system daemon for the changes to take effect:
|
||||||
|
|
||||||
|
``` shell
|
||||||
|
sudo systemctl daemon-reload
|
||||||
|
```
|
||||||
|
|
||||||
|
and restart the docker daemon:
|
||||||
|
|
||||||
|
``` shell
|
||||||
|
sudo systemctl restart docker
|
||||||
|
```
|
||||||
|
|
||||||
|
For more information, please consult the [official documentation](https://docs.docker.com/config/daemon/systemd/#httphttps-proxy).
|
||||||
|
|
||||||
### Monitoring
|
### Monitoring
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
version: "3.7"
|
version: "3.7"
|
||||||
|
|
||||||
# This includes only the shared persistence for BBMRI-ERIC and GBN. Federation components are included as modules, see vars.
|
# This includes only the shared persistence for BBMRI-ERIC and GBN. Federation components are included as modules, see ccp vars.
|
||||||
|
|
||||||
services:
|
services:
|
||||||
blaze:
|
blaze:
|
||||||
@ -8,8 +8,9 @@ services:
|
|||||||
container_name: bridgehead-bbmri-blaze
|
container_name: bridgehead-bbmri-blaze
|
||||||
environment:
|
environment:
|
||||||
BASE_URL: "http://bridgehead-bbmri-blaze:8080"
|
BASE_URL: "http://bridgehead-bbmri-blaze:8080"
|
||||||
JAVA_TOOL_OPTIONS: "-Xmx4g"
|
JAVA_TOOL_OPTIONS: "-Xmx${BLAZE_MEMORY_CAP:-4096}m"
|
||||||
LOG_LEVEL: "debug"
|
DB_RESOURCE_CACHE_SIZE: ${BLAZE_RESOURCE_CACHE_CAP:-2500000}
|
||||||
|
DB_BLOCK_CACHE_SIZE: $BLAZE_MEMORY_CAP
|
||||||
ENFORCE_REFERENTIAL_INTEGRITY: "false"
|
ENFORCE_REFERENTIAL_INTEGRITY: "false"
|
||||||
volumes:
|
volumes:
|
||||||
- "blaze-data:/app/data"
|
- "blaze-data:/app/data"
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
version: "3.7"
|
||||||
|
|
||||||
services:
|
services:
|
||||||
directory_sync_service:
|
directory_sync_service:
|
||||||
image: "docker.verbis.dkfz.de/cache/samply/directory_sync_service"
|
image: "docker.verbis.dkfz.de/cache/samply/directory_sync_service"
|
||||||
|
@ -1,50 +0,0 @@
|
|||||||
version: "3.7"
|
|
||||||
|
|
||||||
services:
|
|
||||||
dnpm-beam-proxy:
|
|
||||||
image: docker.verbis.dkfz.de/cache/samply/beam-proxy:develop
|
|
||||||
container_name: bridgehead-dnpm-beam-proxy
|
|
||||||
environment:
|
|
||||||
BROKER_URL: ${DNPM_BROKER_URL}
|
|
||||||
PROXY_ID: ${DNPM_PROXY_ID}
|
|
||||||
APP_dnpm-connect_KEY: ${DNPM_BEAM_SECRET_SHORT}
|
|
||||||
PRIVKEY_FILE: /run/secrets/proxy.pem
|
|
||||||
ALL_PROXY: http://forward_proxy:3128
|
|
||||||
TLS_CA_CERTIFICATES_DIR: /conf/trusted-ca-certs
|
|
||||||
ROOTCERT_FILE: /conf/root.crt.pem
|
|
||||||
secrets:
|
|
||||||
- proxy.pem
|
|
||||||
depends_on:
|
|
||||||
- "forward_proxy"
|
|
||||||
volumes:
|
|
||||||
- /etc/bridgehead/trusted-ca-certs:/conf/trusted-ca-certs:ro
|
|
||||||
- /srv/docker/bridgehead/ccp/root-new.crt.pem:/conf/root.crt.pem:ro
|
|
||||||
|
|
||||||
dnpm-beam-connect:
|
|
||||||
depends_on: [ dnpm-beam-proxy ]
|
|
||||||
image: docker.verbis.dkfz.de/cache/samply/beam-connect:dnpm
|
|
||||||
container_name: bridgehead-dnpm-beam-connect
|
|
||||||
environment:
|
|
||||||
PROXY_URL: http://dnpm-beam-proxy:8081
|
|
||||||
PROXY_APIKEY: ${DNPM_BEAM_SECRET_SHORT}
|
|
||||||
APP_ID: dnpm-connect.${DNPM_PROXY_ID}
|
|
||||||
DISCOVERY_URL: "./conf/central_targets.json"
|
|
||||||
LOCAL_TARGETS_FILE: "./conf/connect_targets.json"
|
|
||||||
HTTP_PROXY: http://forward_proxy:3128
|
|
||||||
HTTPS_PROXY: http://forward_proxy:3128
|
|
||||||
NO_PROXY: dnpm-beam-proxy,dnpm-backend
|
|
||||||
RUST_LOG: ${RUST_LOG:-info}
|
|
||||||
volumes:
|
|
||||||
- /etc/bridgehead/dnpm/local_targets.json:/conf/connect_targets.json:ro
|
|
||||||
- /etc/bridgehead/dnpm/central_targets.json:/conf/central_targets.json:ro
|
|
||||||
labels:
|
|
||||||
- "traefik.enable=true"
|
|
||||||
- "traefik.http.routers.dnpm-connect.rule=PathPrefix(`/dnpm-connect`)"
|
|
||||||
- "traefik.http.middlewares.dnpm-connect-strip.stripprefix.prefixes=/dnpm-connect"
|
|
||||||
- "traefik.http.routers.dnpm-connect.middlewares=dnpm-connect-strip"
|
|
||||||
- "traefik.http.services.dnpm-connect.loadbalancer.server.port=8062"
|
|
||||||
- "traefik.http.routers.dnpm-connect.tls=true"
|
|
||||||
|
|
||||||
secrets:
|
|
||||||
proxy.pem:
|
|
||||||
file: /etc/bridgehead/pki/${SITE_ID}.priv.pem
|
|
@ -1,13 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
if [ -n "${ENABLE_DNPM}" ]; then
|
|
||||||
log INFO "DNPM setup detected (Beam.Connect) -- will start Beam and Beam.Connect for DNPM."
|
|
||||||
OVERRIDE+=" -f ./$PROJECT/modules/dnpm-compose.yml"
|
|
||||||
|
|
||||||
# Set variables required for Beam-Connect
|
|
||||||
DNPM_APPLICATION_SECRET="$(echo \"This is a salt string to generate one consistent password for DNPM. It is not required to be secret.\" | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 30)"
|
|
||||||
DNPM_BEAM_SECRET_SHORT="$(cat /proc/sys/kernel/random/uuid | sed 's/[-]//g' | head -c 20)"
|
|
||||||
DNPM_BROKER_ID="broker.ccp-it.dktk.dkfz.de"
|
|
||||||
DNPM_BROKER_URL="https://${DNPM_BROKER_ID}"
|
|
||||||
DNPM_PROXY_ID="${SITE_ID}.${DNPM_BROKER_ID}"
|
|
||||||
fi
|
|
@ -2,7 +2,7 @@ version: "3.7"
|
|||||||
|
|
||||||
services:
|
services:
|
||||||
focus-eric:
|
focus-eric:
|
||||||
image: docker.verbis.dkfz.de/cache/samply/focus:main
|
image: docker.verbis.dkfz.de/cache/samply/focus:${FOCUS_TAG}
|
||||||
container_name: bridgehead-focus-eric
|
container_name: bridgehead-focus-eric
|
||||||
environment:
|
environment:
|
||||||
API_KEY: ${ERIC_FOCUS_BEAM_SECRET_SHORT}
|
API_KEY: ${ERIC_FOCUS_BEAM_SECRET_SHORT}
|
||||||
@ -32,5 +32,5 @@ services:
|
|||||||
- "forward_proxy"
|
- "forward_proxy"
|
||||||
volumes:
|
volumes:
|
||||||
- /etc/bridgehead/trusted-ca-certs:/conf/trusted-ca-certs:ro
|
- /etc/bridgehead/trusted-ca-certs:/conf/trusted-ca-certs:ro
|
||||||
- /srv/docker/bridgehead/bbmri/modules/eric.root.crt.pem:/conf/root.crt.pem:ro
|
- /srv/docker/bridgehead/bbmri/modules/${ERIC_ROOT_CERT}.root.crt.pem:/conf/root.crt.pem:ro
|
||||||
|
|
||||||
|
@ -4,8 +4,23 @@ if [ "${ENABLE_ERIC}" == "true" ]; then
|
|||||||
log INFO "BBMRI-ERIC setup detected -- will start services for BBMRI-ERIC."
|
log INFO "BBMRI-ERIC setup detected -- will start services for BBMRI-ERIC."
|
||||||
OVERRIDE+=" -f ./$PROJECT/modules/eric-compose.yml"
|
OVERRIDE+=" -f ./$PROJECT/modules/eric-compose.yml"
|
||||||
|
|
||||||
# Set required variables
|
# The environment needs to be defined in /etc/bridgehead
|
||||||
ERIC_BROKER_ID=broker.bbmri.samply.de
|
case "$ENVIRONMENT" in
|
||||||
|
"production")
|
||||||
|
export ERIC_BROKER_ID=broker.bbmri.samply.de
|
||||||
|
export ERIC_ROOT_CERT=eric
|
||||||
|
;;
|
||||||
|
"test")
|
||||||
|
export ERIC_BROKER_ID=broker-test.bbmri-test.samply.de
|
||||||
|
export ERIC_ROOT_CERT=eric.test
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
report_error 6 "Environment \"$ENVIRONMENT\" is unknown. Assuming production. FIX THIS!"
|
||||||
|
export ERIC_BROKER_ID=broker.bbmri.samply.de
|
||||||
|
export ERIC_ROOT_CERT=eric
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
ERIC_BROKER_URL=https://${ERIC_BROKER_ID}
|
ERIC_BROKER_URL=https://${ERIC_BROKER_ID}
|
||||||
ERIC_PROXY_ID=${SITE_ID}.${ERIC_BROKER_ID}
|
ERIC_PROXY_ID=${SITE_ID}.${ERIC_BROKER_ID}
|
||||||
ERIC_FOCUS_BEAM_SECRET_SHORT="$(cat /proc/sys/kernel/random/uuid | sed 's/[-]//g' | head -c 20)"
|
ERIC_FOCUS_BEAM_SECRET_SHORT="$(cat /proc/sys/kernel/random/uuid | sed 's/[-]//g' | head -c 20)"
|
||||||
|
20
bbmri/modules/eric.test.root.crt.pem
Normal file
20
bbmri/modules/eric.test.root.crt.pem
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIIDNTCCAh2gAwIBAgIUJ0g7k2vrdAwNTU38S1/mU8NO26MwDQYJKoZIhvcNAQEL
|
||||||
|
BQAwFjEUMBIGA1UEAxMLQnJva2VyLVJvb3QwHhcNMjMwNzEwMTIyMzQxWhcNMzMw
|
||||||
|
NzA3MTIyNDExWjAWMRQwEgYDVQQDEwtCcm9rZXItUm9vdDCCASIwDQYJKoZIhvcN
|
||||||
|
AQEBBQADggEPADCCAQoCggEBALMvc/fApbsAl+/NXDszNgffNR5llAb9CfxzdnRn
|
||||||
|
ryoBqZdPevBYZZfKBARRKjFbXRDdPWbE7erDeo1LiCM6PObXCuT9wmGWJtvfkmqW
|
||||||
|
3Z/a75e4r360kceMEGVn4kWpi9dz8s7+oXVZURjW2r13h6pq6xQNZDNlXmpR8wHG
|
||||||
|
58TSrQC4n1vzdSwMWdptgOA8Sw8adR7ZJI1yNZpmynB2QolKKNESI7FcSKC/+b+H
|
||||||
|
LoPkseAwQG9yJo23qEw1GZS67B47iKIqX2wp9VLQobHw7ncrhKXQLSWq973k/Swp
|
||||||
|
7lBdfOsTouf72flLiF1HbdOLcFDmWgIbf5scj2HaQe8b/UcCAwEAAaN7MHkwDgYD
|
||||||
|
VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFHYxBJiJZieW
|
||||||
|
e6G1vwn6Q36/crgNMB8GA1UdIwQYMBaAFHYxBJiJZieWe6G1vwn6Q36/crgNMBYG
|
||||||
|
A1UdEQQPMA2CC0Jyb2tlci1Sb290MA0GCSqGSIb3DQEBCwUAA4IBAQCN6WVNYpWJ
|
||||||
|
6Z1Ee+otLZYMXhjyR6NUQ5s0aHiug97gB8mTiNlgXiiTgipCbofEmENgh1inYrPC
|
||||||
|
WfdXxqOaekSXCQW6nSO1KtBzEYtkN5LrN1cjKqt51P2DbkllinK37wwCS2Kfup1+
|
||||||
|
yjhTRxrehSIfsMVK6bTUeSoc8etkgwErZpORhlpqZKWhmOwcMpgsYJJOLhUetqc1
|
||||||
|
UNe/254bc0vqHEPT6VI/86c7qAmk1xR0RUfrnKAEqZtUeuoj2fe1L/6yOB16fxt5
|
||||||
|
3V3oim7EO6eZCTjDo9fU5DaFiqSMe7WVdr03Na0cWet60XKRH/xaiC6gMWdHWcbh
|
||||||
|
vZdXnV1qjlM2
|
||||||
|
-----END CERTIFICATE-----
|
67
bbmri/modules/exporter-compose.yml
Normal file
67
bbmri/modules/exporter-compose.yml
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
version: "3.7"
|
||||||
|
|
||||||
|
services:
|
||||||
|
exporter:
|
||||||
|
image: docker.verbis.dkfz.de/ccp/dktk-exporter:latest
|
||||||
|
container_name: bridgehead-ccp-exporter
|
||||||
|
environment:
|
||||||
|
JAVA_OPTS: "-Xms1G -Xmx8G -XX:+UseG1GC"
|
||||||
|
LOG_LEVEL: "INFO"
|
||||||
|
EXPORTER_API_KEY: "${EXPORTER_API_KEY}" # Set in exporter-setup.sh
|
||||||
|
CROSS_ORIGINS: "https://${HOST}"
|
||||||
|
EXPORTER_DB_USER: "exporter"
|
||||||
|
EXPORTER_DB_PASSWORD: "${EXPORTER_DB_PASSWORD}" # Set in exporter-setup.sh
|
||||||
|
EXPORTER_DB_URL: "jdbc:postgresql://exporter-db:5432/exporter"
|
||||||
|
HTTP_RELATIVE_PATH: "/ccp-exporter"
|
||||||
|
SITE: "${SITE_ID}"
|
||||||
|
HTTP_SERVLET_REQUEST_SCHEME: "https"
|
||||||
|
OPAL_PASSWORD: "${EXPORTER_OPAL_PASSWORD}"
|
||||||
|
labels:
|
||||||
|
- "traefik.enable=true"
|
||||||
|
- "traefik.http.routers.exporter_ccp.rule=PathPrefix(`/ccp-exporter`)"
|
||||||
|
- "traefik.http.services.exporter_ccp.loadbalancer.server.port=8092"
|
||||||
|
- "traefik.http.routers.exporter_ccp.tls=true"
|
||||||
|
- "traefik.http.middlewares.exporter_ccp_strip.stripprefix.prefixes=/ccp-exporter"
|
||||||
|
- "traefik.http.routers.exporter_ccp.middlewares=exporter_ccp_strip"
|
||||||
|
volumes:
|
||||||
|
- "/var/cache/bridgehead/ccp/exporter-files:/app/exporter-files/output"
|
||||||
|
|
||||||
|
exporter-db:
|
||||||
|
image: docker.verbis.dkfz.de/cache/postgres:${POSTGRES_TAG}
|
||||||
|
container_name: bridgehead-ccp-exporter-db
|
||||||
|
environment:
|
||||||
|
POSTGRES_USER: "exporter"
|
||||||
|
POSTGRES_PASSWORD: "${EXPORTER_DB_PASSWORD}" # Set in exporter-setup.sh
|
||||||
|
POSTGRES_DB: "exporter"
|
||||||
|
volumes:
|
||||||
|
# Consider removing this volume once we find a solution to save Lens-queries to be executed in the explorer.
|
||||||
|
- "/var/cache/bridgehead/ccp/exporter-db:/var/lib/postgresql/data"
|
||||||
|
|
||||||
|
reporter:
|
||||||
|
image: docker.verbis.dkfz.de/ccp/dktk-reporter:latest
|
||||||
|
container_name: bridgehead-ccp-reporter
|
||||||
|
environment:
|
||||||
|
JAVA_OPTS: "-Xms1G -Xmx8G -XX:+UseG1GC"
|
||||||
|
LOG_LEVEL: "INFO"
|
||||||
|
CROSS_ORIGINS: "https://${HOST}"
|
||||||
|
HTTP_RELATIVE_PATH: "/ccp-reporter"
|
||||||
|
SITE: "${SITE_ID}"
|
||||||
|
EXPORTER_API_KEY: "${EXPORTER_API_KEY}" # Set in exporter-setup.sh
|
||||||
|
EXPORTER_URL: "http://exporter:8092"
|
||||||
|
LOG_FHIR_VALIDATION: "false"
|
||||||
|
HTTP_SERVLET_REQUEST_SCHEME: "https"
|
||||||
|
|
||||||
|
# In this initial development state of the bridgehead, we are trying to have so many volumes as possible.
|
||||||
|
# However, in the first executions in the CCP sites, this volume seems to be very important. A report is
|
||||||
|
# a process that can take several hours, because it depends on the exporter.
|
||||||
|
# There is a risk that the bridgehead restarts, losing the already created export.
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
- "/var/cache/bridgehead/ccp/reporter-files:/app/reports"
|
||||||
|
labels:
|
||||||
|
- "traefik.enable=true"
|
||||||
|
- "traefik.http.routers.reporter_ccp.rule=PathPrefix(`/ccp-reporter`)"
|
||||||
|
- "traefik.http.services.reporter_ccp.loadbalancer.server.port=8095"
|
||||||
|
- "traefik.http.routers.reporter_ccp.tls=true"
|
||||||
|
- "traefik.http.middlewares.reporter_ccp_strip.stripprefix.prefixes=/ccp-reporter"
|
||||||
|
- "traefik.http.routers.reporter_ccp.middlewares=reporter_ccp_strip"
|
8
bbmri/modules/exporter-setup.sh
Normal file
8
bbmri/modules/exporter-setup.sh
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
#!/bin/bash -e
|
||||||
|
|
||||||
|
if [ "$ENABLE_EXPORTER" == true ]; then
|
||||||
|
log INFO "Exporter setup detected -- will start Exporter service."
|
||||||
|
OVERRIDE+=" -f ./$PROJECT/modules/exporter-compose.yml"
|
||||||
|
EXPORTER_DB_PASSWORD="$(echo \"This is a salt string to generate one consistent password for the exporter. It is not required to be secret.\" | sha1sum | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 30)"
|
||||||
|
EXPORTER_API_KEY="$(echo \"This is a salt string to generate one consistent API KEY for the exporter. It is not required to be secret.\" | sha1sum | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 64)"
|
||||||
|
fi
|
15
bbmri/modules/exporter.md
Normal file
15
bbmri/modules/exporter.md
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
# Exporter and Reporter
|
||||||
|
|
||||||
|
|
||||||
|
## Exporter
|
||||||
|
The exporter is a REST API that exports the data of the different databases of the bridgehead in a set of tables.
|
||||||
|
It can accept different output formats as CSV, Excel, JSON or XML. It can also export data into Opal.
|
||||||
|
|
||||||
|
## Exporter-DB
|
||||||
|
It is a database to save queries for its execution in the exporter.
|
||||||
|
The exporter manages also the different executions of the same query in through the database.
|
||||||
|
|
||||||
|
## Reporter
|
||||||
|
This component is a plugin of the exporter that allows to create more complex Excel reports described in templates.
|
||||||
|
It is compatible with different template engines as Groovy, Thymeleaf,...
|
||||||
|
It is perfect to generate a document as our traditional CCP quality report.
|
@ -2,7 +2,7 @@ version: "3.7"
|
|||||||
|
|
||||||
services:
|
services:
|
||||||
focus-gbn:
|
focus-gbn:
|
||||||
image: docker.verbis.dkfz.de/cache/samply/focus:main
|
image: docker.verbis.dkfz.de/cache/samply/focus:${FOCUS_TAG}
|
||||||
container_name: bridgehead-focus-gbn
|
container_name: bridgehead-focus-gbn
|
||||||
environment:
|
environment:
|
||||||
API_KEY: ${GBN_FOCUS_BEAM_SECRET_SHORT}
|
API_KEY: ${GBN_FOCUS_BEAM_SECRET_SHORT}
|
||||||
@ -32,5 +32,5 @@ services:
|
|||||||
- "forward_proxy"
|
- "forward_proxy"
|
||||||
volumes:
|
volumes:
|
||||||
- /etc/bridgehead/trusted-ca-certs:/conf/trusted-ca-certs:ro
|
- /etc/bridgehead/trusted-ca-certs:/conf/trusted-ca-certs:ro
|
||||||
- /srv/docker/bridgehead/bbmri/modules/gbn.root.crt.pem:/conf/root.crt.pem:ro
|
- /srv/docker/bridgehead/bbmri/modules/${GBN_ROOT_CERT}.root.crt.pem:/conf/root.crt.pem:ro
|
||||||
|
|
||||||
|
@ -4,10 +4,25 @@ if [ "${ENABLE_GBN}" == "true" ]; then
|
|||||||
log INFO "GBN setup detected -- will start services for German Biobank Node."
|
log INFO "GBN setup detected -- will start services for German Biobank Node."
|
||||||
OVERRIDE+=" -f ./$PROJECT/modules/gbn-compose.yml"
|
OVERRIDE+=" -f ./$PROJECT/modules/gbn-compose.yml"
|
||||||
|
|
||||||
# Set required variables
|
# The environment needs to be defined in /etc/bridgehead
|
||||||
GBN_BROKER_ID='#TODO#'
|
case "$ENVIRONMENT" in
|
||||||
|
"production")
|
||||||
|
export GBN_BROKER_ID=broker.bbmri.de
|
||||||
|
export GBN_ROOT_CERT=gbn
|
||||||
|
;;
|
||||||
|
"test")
|
||||||
|
export GBN_BROKER_ID=broker.test.bbmri.de
|
||||||
|
export GBN_ROOT_CERT=gbn.test
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
report_error 6 "Environment \"$ENVIRONMENT\" is unknown. Assuming production. FIX THIS!"
|
||||||
|
export GBN_BROKER_ID=broker.bbmri.de
|
||||||
|
export GBN_ROOT_CERT=gbn
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
GBN_BROKER_URL=https://${GBN_BROKER_ID}
|
GBN_BROKER_URL=https://${GBN_BROKER_ID}
|
||||||
GBN_PROXY_ID=${SITE_ID}.${GBN_BROKER_ID}
|
GBN_PROXY_ID=${SITE_ID}.${GBN_BROKER_ID}
|
||||||
GBN_FOCUS_BEAM_SECRET_SHORT="$(cat /proc/sys/kernel/random/uuid | sed 's/[-]//g' | head -c 20)"
|
GBN_FOCUS_BEAM_SECRET_SHORT="$(cat /proc/sys/kernel/random/uuid | sed 's/[-]//g' | head -c 20)"
|
||||||
GBN_SUPPORT_EMAIL=todo@verbis.dkfz.de
|
GBN_SUPPORT_EMAIL=feedback@germanbiobanknode.de
|
||||||
fi
|
fi
|
||||||
|
@ -1 +1,20 @@
|
|||||||
#TODO#
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIIDNTCCAh2gAwIBAgIUckVOQQWZBTC0pWhn1X3lPxAWricwDQYJKoZIhvcNAQEL
|
||||||
|
BQAwFjEUMBIGA1UEAxMLQnJva2VyLVJvb3QwHhcNMjMwOTA0MDkwMTQ0WhcNMzMw
|
||||||
|
OTAxMDkwMjEzWjAWMRQwEgYDVQQDEwtCcm9rZXItUm9vdDCCASIwDQYJKoZIhvcN
|
||||||
|
AQEBBQADggEPADCCAQoCggEBAOOD+CVvteBmu1hKV1QlfbHmiLCnuf6F+9k+1u/b
|
||||||
|
6as6k7BURn8KZAxVLWSIwC6x2C7n9CHN9Jieb4DWpS0XmXQVUEpT1/yiLGBdxp2x
|
||||||
|
nrbzm7caOunsWsPlGOcXPJKJpzAhcg58RDzXZ+2+shulSmsgPNlWBaLhNL5wj0sQ
|
||||||
|
MzbwGVlGIJg18Ye/9WgQkO2ZcnTGb5cRsChKs4H43ZC34ZSSk7wqWg6P3e2xFam1
|
||||||
|
YKXBOZzhwHoI4AxUQ+gd6upz5dqcwbaNZm10VP8fMac2dMLw9cOCS0ueDCS4viLd
|
||||||
|
A69yds19AndBPMZhoEY1UHafjJ1uITRJQpaaB4vNliX+1rECAwEAAaN7MHkwDgYD
|
||||||
|
VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFC74YIorSwWD
|
||||||
|
/s5ozz3xvqUMDJ3qMB8GA1UdIwQYMBaAFC74YIorSwWD/s5ozz3xvqUMDJ3qMBYG
|
||||||
|
A1UdEQQPMA2CC0Jyb2tlci1Sb290MA0GCSqGSIb3DQEBCwUAA4IBAQCzcIccBzYr
|
||||||
|
sHCGTGsSyLGBYsuI5yl+hvFOitYTha/mC+XBxq2R6By2WzbfSZtyZkUtC/+FqdCY
|
||||||
|
VtMSjbDVXtBgsabfqODBobHmPyOEmNUX4IGcyn06rdM+rHQRah98lF+PhiPPO42F
|
||||||
|
9Wj8dkq4/Gf+Yarq31ZbY0sed2sEPZ/bV26Og8Ft9qip5gKwklyakAiCnDIq+QBd
|
||||||
|
ltvng3g08AQM0o5KIphP2/WU0UoSk1YPVMjRxuLiFg8xvr2EdCQQ9oA7xbhrmAXe
|
||||||
|
242HVW/7KokjmowyWTQlIUGnuGdCOtTl8h74eHTID0YWO68hHkA0J5Ox2j4dZxvw
|
||||||
|
HRFTxAR1gGKX
|
||||||
|
-----END CERTIFICATE-----
|
20
bbmri/modules/gbn.test.root.crt.pem
Normal file
20
bbmri/modules/gbn.test.root.crt.pem
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIIDNTCCAh2gAwIBAgIUQJjusHYR89Xas+kRbg41aHZxfmcwDQYJKoZIhvcNAQEL
|
||||||
|
BQAwFjEUMBIGA1UEAxMLQnJva2VyLVJvb3QwHhcNMjMwODIxMDk1MDI1WhcNMzMw
|
||||||
|
ODE4MDk1MDU1WjAWMRQwEgYDVQQDEwtCcm9rZXItUm9vdDCCASIwDQYJKoZIhvcN
|
||||||
|
AQEBBQADggEPADCCAQoCggEBAMP0jt2tSk23Bu+QeogqlFwjbMnqwRcWGKAOF4ch
|
||||||
|
aOK2B5u/BnpqIZDZbhfSIJTv8DPe3+nA2VqRfSiW3HbV0auqxx1ii2ZmHYbvO2P/
|
||||||
|
Jj6hyIiYYGqCMRVXk7iB+DfMysQEaSJO/7lJSprlVQCl0u7MAQ4q/szVNwcCm2Xi
|
||||||
|
iE00Wlota2xTYjnJHYjeaLZL4kQsjqW2aCWHG4q77Z4NXT+lXN9XXedgoXLhuwWl
|
||||||
|
UyHhXPjyCVu1iFzsXwSTodPAETGoInRYMqMA7PrbHZu1b2Jz0BwCQ+bark1td+Mf
|
||||||
|
l3uP0QduhZnH6zGO0KyUFRzeiesgabv5bgUeSSsIOVjnLJUCAwEAAaN7MHkwDgYD
|
||||||
|
VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFME99nPh1Vuo
|
||||||
|
7eRaymL2Ps7qGxIdMB8GA1UdIwQYMBaAFME99nPh1Vuo7eRaymL2Ps7qGxIdMBYG
|
||||||
|
A1UdEQQPMA2CC0Jyb2tlci1Sb290MA0GCSqGSIb3DQEBCwUAA4IBAQB0WG0xT00R
|
||||||
|
5CA0tVHaNo8bQuAXytu566TspKc5vVd3r6mglj/MiSSQG2MVz+GUU6LnnApgln1P
|
||||||
|
pvZuyaldB0QdTTLeJVMr/eFtZonlxqcxkj+VW2Y7mRHT7Xx9GQvzKYvSK5m/+xzH
|
||||||
|
pAQl8AirgkoZ5b+ltlzM0pDAH204xj3/skmGqM/o0FKzRtpetHYkZPiquHCmO2Cp
|
||||||
|
nTMkv7c2qu5t2Dm5q0Tmb7ZRoA1yIYhDn/UfhTAVWQnoMfXK8oB9nkRRb7pAfOXo
|
||||||
|
W1K4A+oWqKrJwfIH/Ycnw7hu8hPuGOyIN/PLnLpJp9M2I67vywp5lIvFib4UukyJ
|
||||||
|
wJw6/iTienIA
|
||||||
|
-----END CERTIFICATE-----
|
81
bbmri/modules/teiler-compose.yml
Normal file
81
bbmri/modules/teiler-compose.yml
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
version: "3.7"
|
||||||
|
|
||||||
|
services:
|
||||||
|
|
||||||
|
teiler-orchestrator:
|
||||||
|
image: docker.verbis.dkfz.de/cache/samply/teiler-orchestrator:latest
|
||||||
|
container_name: bridgehead-teiler-orchestrator
|
||||||
|
labels:
|
||||||
|
- "traefik.enable=true"
|
||||||
|
- "traefik.http.routers.teiler_orchestrator_ccp.rule=PathPrefix(`/ccp-teiler`)"
|
||||||
|
- "traefik.http.services.teiler_orchestrator_ccp.loadbalancer.server.port=9000"
|
||||||
|
- "traefik.http.routers.teiler_orchestrator_ccp.tls=true"
|
||||||
|
- "traefik.http.middlewares.teiler_orchestrator_ccp_strip.stripprefix.prefixes=/ccp-teiler"
|
||||||
|
- "traefik.http.routers.teiler_orchestrator_ccp.middlewares=teiler_orchestrator_ccp_strip"
|
||||||
|
environment:
|
||||||
|
TEILER_BACKEND_URL: "https://${HOST}/ccp-teiler-backend"
|
||||||
|
TEILER_DASHBOARD_URL: "https://${HOST}/ccp-teiler-dashboard"
|
||||||
|
DEFAULT_LANGUAGE: "${TEILER_DEFAULT_LANGUAGE_LOWER_CASE}"
|
||||||
|
HTTP_RELATIVE_PATH: "/ccp-teiler"
|
||||||
|
|
||||||
|
teiler-dashboard:
|
||||||
|
image: docker.verbis.dkfz.de/cache/samply/teiler-dashboard:develop
|
||||||
|
container_name: bridgehead-teiler-dashboard
|
||||||
|
labels:
|
||||||
|
- "traefik.enable=true"
|
||||||
|
- "traefik.http.routers.teiler_dashboard_ccp.rule=PathPrefix(`/ccp-teiler-dashboard`)"
|
||||||
|
- "traefik.http.services.teiler_dashboard_ccp.loadbalancer.server.port=80"
|
||||||
|
- "traefik.http.routers.teiler_dashboard_ccp.tls=true"
|
||||||
|
- "traefik.http.middlewares.teiler_dashboard_ccp_strip.stripprefix.prefixes=/ccp-teiler-dashboard"
|
||||||
|
- "traefik.http.routers.teiler_dashboard_ccp.middlewares=teiler_dashboard_ccp_strip"
|
||||||
|
environment:
|
||||||
|
DEFAULT_LANGUAGE: "${TEILER_DEFAULT_LANGUAGE}"
|
||||||
|
TEILER_BACKEND_URL: "https://${HOST}/ccp-teiler-backend"
|
||||||
|
OIDC_URL: "${OIDC_URL}"
|
||||||
|
OIDC_REALM: "${OIDC_REALM}"
|
||||||
|
OIDC_CLIENT_ID: "${OIDC_PUBLIC_CLIENT_ID}"
|
||||||
|
OIDC_TOKEN_GROUP: "${OIDC_GROUP_CLAIM}"
|
||||||
|
TEILER_ADMIN_NAME: "${OPERATOR_FIRST_NAME} ${OPERATOR_LAST_NAME}"
|
||||||
|
TEILER_ADMIN_EMAIL: "${OPERATOR_EMAIL}"
|
||||||
|
TEILER_ADMIN_PHONE: "${OPERATOR_PHONE}"
|
||||||
|
TEILER_PROJECT: "${PROJECT}"
|
||||||
|
EXPORTER_API_KEY: "${EXPORTER_API_KEY}"
|
||||||
|
TEILER_ORCHESTRATOR_URL: "https://${HOST}/ccp-teiler"
|
||||||
|
TEILER_DASHBOARD_HTTP_RELATIVE_PATH: "/ccp-teiler-dashboard"
|
||||||
|
TEILER_ORCHESTRATOR_HTTP_RELATIVE_PATH: "/ccp-teiler"
|
||||||
|
TEILER_USER: "${OIDC_USER_GROUP}"
|
||||||
|
TEILER_ADMIN: "${OIDC_ADMIN_GROUP}"
|
||||||
|
REPORTER_DEFAULT_TEMPLATE_ID: "bbmri-qb"
|
||||||
|
EXPORTER_DEFAULT_TEMPLATE_ID: "bbmri"
|
||||||
|
|
||||||
|
|
||||||
|
teiler-backend:
|
||||||
|
image: docker.verbis.dkfz.de/ccp/dktk-teiler-backend:latest
|
||||||
|
container_name: bridgehead-teiler-backend
|
||||||
|
labels:
|
||||||
|
- "traefik.enable=true"
|
||||||
|
- "traefik.http.routers.teiler_backend_ccp.rule=PathPrefix(`/ccp-teiler-backend`)"
|
||||||
|
- "traefik.http.services.teiler_backend_ccp.loadbalancer.server.port=8085"
|
||||||
|
- "traefik.http.routers.teiler_backend_ccp.tls=true"
|
||||||
|
- "traefik.http.middlewares.teiler_backend_ccp_strip.stripprefix.prefixes=/ccp-teiler-backend"
|
||||||
|
- "traefik.http.routers.teiler_backend_ccp.middlewares=teiler_backend_ccp_strip"
|
||||||
|
environment:
|
||||||
|
LOG_LEVEL: "INFO"
|
||||||
|
APPLICATION_PORT: "8085"
|
||||||
|
APPLICATION_ADDRESS: "${HOST}"
|
||||||
|
DEFAULT_LANGUAGE: "${TEILER_DEFAULT_LANGUAGE}"
|
||||||
|
CONFIG_ENV_VAR_PATH: "/run/secrets/ccp.conf"
|
||||||
|
TEILER_ORCHESTRATOR_HTTP_RELATIVE_PATH: "/ccp-teiler"
|
||||||
|
TEILER_ORCHESTRATOR_URL: "https://${HOST}/ccp-teiler"
|
||||||
|
TEILER_DASHBOARD_DE_URL: "https://${HOST}/ccp-teiler-dashboard/de"
|
||||||
|
TEILER_DASHBOARD_EN_URL: "https://${HOST}/ccp-teiler-dashboard/en"
|
||||||
|
CENTRAX_URL: "${CENTRAXX_URL}"
|
||||||
|
HTTP_PROXY: "http://forward_proxy:3128"
|
||||||
|
ENABLE_MTBA: "${ENABLE_MTBA}"
|
||||||
|
ENABLE_DATASHIELD: "${ENABLE_DATASHIELD}"
|
||||||
|
secrets:
|
||||||
|
- ccp.conf
|
||||||
|
|
||||||
|
secrets:
|
||||||
|
ccp.conf:
|
||||||
|
file: /etc/bridgehead/ccp.conf
|
9
bbmri/modules/teiler-setup.sh
Normal file
9
bbmri/modules/teiler-setup.sh
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
#!/bin/bash -e
|
||||||
|
|
||||||
|
if [ "$ENABLE_TEILER" == true ];then
|
||||||
|
log INFO "Teiler setup detected -- will start Teiler services."
|
||||||
|
OVERRIDE+=" -f ./$PROJECT/modules/teiler-compose.yml"
|
||||||
|
TEILER_DEFAULT_LANGUAGE=DE
|
||||||
|
TEILER_DEFAULT_LANGUAGE_LOWER_CASE=${TEILER_DEFAULT_LANGUAGE,,}
|
||||||
|
add_public_oidc_redirect_url "/ccp-teiler/*"
|
||||||
|
fi
|
19
bbmri/modules/teiler.md
Normal file
19
bbmri/modules/teiler.md
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
# Teiler
|
||||||
|
This module orchestrates the different microfrontends of the bridgehead as a single page application.
|
||||||
|
|
||||||
|
## Teiler Orchestrator
|
||||||
|
Single SPA component that consists on the root HTML site of the single page application and a javascript code that
|
||||||
|
gets the information about the microfrontend calling the teiler backend and is responsible for registering them. With the
|
||||||
|
resulting mapping, it can initialize, mount and unmount the required microfrontends on the fly.
|
||||||
|
|
||||||
|
The microfrontends run independently in different containers and can be based on different frameworks (Angular, Vue, React,...)
|
||||||
|
This microfrontends can run as single alone but need an extension with Single-SPA (https://single-spa.js.org/docs/ecosystem).
|
||||||
|
There are also available three templates (Angular, Vue, React) to be directly extended to be used directly in the teiler.
|
||||||
|
|
||||||
|
## Teiler Dashboard
|
||||||
|
It consists on the main dashboard and a set of embedded services.
|
||||||
|
### Login
|
||||||
|
user and password in ccp.local.conf
|
||||||
|
|
||||||
|
## Teiler Backend
|
||||||
|
In this component, the microfrontends are configured.
|
16
bbmri/vars
16
bbmri/vars
@ -7,6 +7,18 @@
|
|||||||
FOCUS_RETRY_COUNT=32
|
FOCUS_RETRY_COUNT=32
|
||||||
PRIVATEKEYFILENAME=/etc/bridgehead/pki/${SITE_ID}.priv.pem
|
PRIVATEKEYFILENAME=/etc/bridgehead/pki/${SITE_ID}.priv.pem
|
||||||
|
|
||||||
|
OIDC_USER_GROUP="DKTK_CCP_$(capitalize_first_letter ${SITE_ID})"
|
||||||
|
OIDC_ADMIN_GROUP="DKTK_CCP_$(capitalize_first_letter ${SITE_ID})_Verwalter"
|
||||||
|
OIDC_PRIVATE_CLIENT_ID=${SITE_ID}-private
|
||||||
|
OIDC_PUBLIC_CLIENT_ID=${SITE_ID}-public
|
||||||
|
# Use "test-realm-01" for testing
|
||||||
|
OIDC_REALM="${OIDC_REALM:-master}"
|
||||||
|
OIDC_URL="https://login.verbis.dkfz.de"
|
||||||
|
OIDC_ISSUER_URL="${OIDC_URL}/realms/${OIDC_REALM}"
|
||||||
|
OIDC_GROUP_CLAIM="groups"
|
||||||
|
|
||||||
|
POSTGRES_TAG=15.6-alpine
|
||||||
|
|
||||||
for module in $PROJECT/modules/*.sh
|
for module in $PROJECT/modules/*.sh
|
||||||
do
|
do
|
||||||
log DEBUG "sourcing $module"
|
log DEBUG "sourcing $module"
|
||||||
@ -14,7 +26,7 @@ do
|
|||||||
done
|
done
|
||||||
|
|
||||||
SUPPORT_EMAIL=$ERIC_SUPPORT_EMAIL
|
SUPPORT_EMAIL=$ERIC_SUPPORT_EMAIL
|
||||||
BROKER_URL_FOR_PREREQ=$ERIC_BROKER_URL
|
BROKER_URL_FOR_PREREQ="${ERIC_BROKER_URL:-$GBN_BROKER_URL}"
|
||||||
|
|
||||||
if [ -n "$GBN_SUPPORT_EMAIL" ]; then
|
if [ -n "$GBN_SUPPORT_EMAIL" ]; then
|
||||||
SUPPORT_EMAIL=$GBN_SUPPORT_EMAIL
|
SUPPORT_EMAIL=$GBN_SUPPORT_EMAIL
|
||||||
@ -34,4 +46,4 @@ function do_enroll {
|
|||||||
echo
|
echo
|
||||||
echo "You just received $COUNT certificate signing requests (CSR). Please send $COUNT e-mails, with 1 CSR each, to the respective e-mail address."
|
echo "You just received $COUNT certificate signing requests (CSR). Please send $COUNT e-mails, with 1 CSR each, to the respective e-mail address."
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
28
bridgehead
28
bridgehead
@ -50,6 +50,8 @@ loadVars() {
|
|||||||
source /etc/bridgehead/$PROJECT.local.conf || fail_and_report 1 "Found /etc/bridgehead/$PROJECT.local.conf but failed to import"
|
source /etc/bridgehead/$PROJECT.local.conf || fail_and_report 1 "Found /etc/bridgehead/$PROJECT.local.conf but failed to import"
|
||||||
fi
|
fi
|
||||||
fetchVarsFromVaultByFile /etc/bridgehead/$PROJECT.conf || fail_and_report 1 "Unable to fetchVarsFromVaultByFile"
|
fetchVarsFromVaultByFile /etc/bridgehead/$PROJECT.conf || fail_and_report 1 "Unable to fetchVarsFromVaultByFile"
|
||||||
|
setHostname
|
||||||
|
optimizeBlazeMemoryUsage
|
||||||
[ -e ./$PROJECT/vars ] && source ./$PROJECT/vars
|
[ -e ./$PROJECT/vars ] && source ./$PROJECT/vars
|
||||||
set +a
|
set +a
|
||||||
|
|
||||||
@ -64,7 +66,23 @@ loadVars() {
|
|||||||
OVERRIDE+=" -f ./$PROJECT/docker-compose.override.yml"
|
OVERRIDE+=" -f ./$PROJECT/docker-compose.override.yml"
|
||||||
fi
|
fi
|
||||||
detectCompose
|
detectCompose
|
||||||
setHostname
|
setupProxy
|
||||||
|
|
||||||
|
# Set some project-independent default values
|
||||||
|
: ${ENVIRONMENT:=production}
|
||||||
|
|
||||||
|
case "$ENVIRONMENT" in
|
||||||
|
"production")
|
||||||
|
export FOCUS_TAG=main
|
||||||
|
;;
|
||||||
|
"test")
|
||||||
|
export FOCUS_TAG=develop
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
report_error 7 "Environment \"$ENVIRONMENT\" is unknown. Assuming production. FIX THIS!"
|
||||||
|
export FOCUS_TAG=main
|
||||||
|
;;
|
||||||
|
esac
|
||||||
}
|
}
|
||||||
|
|
||||||
case "$ACTION" in
|
case "$ACTION" in
|
||||||
@ -72,11 +90,14 @@ case "$ACTION" in
|
|||||||
loadVars
|
loadVars
|
||||||
hc_send log "Bridgehead $PROJECT startup: Checking requirements ..."
|
hc_send log "Bridgehead $PROJECT startup: Checking requirements ..."
|
||||||
checkRequirements
|
checkRequirements
|
||||||
|
sync_secrets
|
||||||
hc_send log "Bridgehead $PROJECT startup: Requirements checked out. Now starting bridgehead ..."
|
hc_send log "Bridgehead $PROJECT startup: Requirements checked out. Now starting bridgehead ..."
|
||||||
exec $COMPOSE -p $PROJECT -f ./minimal/docker-compose.yml -f ./$PROJECT/docker-compose.yml $OVERRIDE up --abort-on-container-exit
|
exec $COMPOSE -p $PROJECT -f ./minimal/docker-compose.yml -f ./$PROJECT/docker-compose.yml $OVERRIDE up --abort-on-container-exit
|
||||||
;;
|
;;
|
||||||
stop)
|
stop)
|
||||||
loadVars
|
loadVars
|
||||||
|
# Kill stale secret-sync instances if present
|
||||||
|
docker kill $(docker ps -q --filter ancestor=docker.verbis.dkfz.de/cache/samply/secret-sync-local) 2>/dev/null || true
|
||||||
# HACK: This is temporarily to properly shut down false bridgehead instances (bridgehead-ccp instead ccp)
|
# HACK: This is temporarily to properly shut down false bridgehead instances (bridgehead-ccp instead ccp)
|
||||||
$COMPOSE -p bridgehead-$PROJECT -f ./minimal/docker-compose.yml -f ./$PROJECT/docker-compose.yml $OVERRIDE down
|
$COMPOSE -p bridgehead-$PROJECT -f ./minimal/docker-compose.yml -f ./$PROJECT/docker-compose.yml $OVERRIDE down
|
||||||
exec $COMPOSE -p $PROJECT -f ./minimal/docker-compose.yml -f ./$PROJECT/docker-compose.yml $OVERRIDE down
|
exec $COMPOSE -p $PROJECT -f ./minimal/docker-compose.yml -f ./$PROJECT/docker-compose.yml $OVERRIDE down
|
||||||
@ -85,6 +106,11 @@ case "$ACTION" in
|
|||||||
bk_is_running
|
bk_is_running
|
||||||
exit $?
|
exit $?
|
||||||
;;
|
;;
|
||||||
|
logs)
|
||||||
|
loadVars
|
||||||
|
shift 2
|
||||||
|
exec $COMPOSE -p $PROJECT -f ./minimal/docker-compose.yml -f ./$PROJECT/docker-compose.yml $OVERRIDE logs -f $@
|
||||||
|
;;
|
||||||
update)
|
update)
|
||||||
loadVars
|
loadVars
|
||||||
exec ./lib/update-bridgehead.sh $PROJECT
|
exec ./lib/update-bridgehead.sh $PROJECT
|
||||||
|
@ -6,7 +6,9 @@ services:
|
|||||||
container_name: bridgehead-ccp-blaze
|
container_name: bridgehead-ccp-blaze
|
||||||
environment:
|
environment:
|
||||||
BASE_URL: "http://bridgehead-ccp-blaze:8080"
|
BASE_URL: "http://bridgehead-ccp-blaze:8080"
|
||||||
JAVA_TOOL_OPTIONS: "-Xmx4g"
|
JAVA_TOOL_OPTIONS: "-Xmx${BLAZE_MEMORY_CAP:-4096}m"
|
||||||
|
DB_RESOURCE_CACHE_SIZE: ${BLAZE_RESOURCE_CACHE_CAP:-2500000}
|
||||||
|
DB_BLOCK_CACHE_SIZE: $BLAZE_MEMORY_CAP
|
||||||
ENFORCE_REFERENTIAL_INTEGRITY: "false"
|
ENFORCE_REFERENTIAL_INTEGRITY: "false"
|
||||||
volumes:
|
volumes:
|
||||||
- "blaze-data:/app/data"
|
- "blaze-data:/app/data"
|
||||||
@ -19,7 +21,7 @@ services:
|
|||||||
- "traefik.http.routers.blaze_ccp.tls=true"
|
- "traefik.http.routers.blaze_ccp.tls=true"
|
||||||
|
|
||||||
focus:
|
focus:
|
||||||
image: docker.verbis.dkfz.de/cache/samply/focus:main
|
image: docker.verbis.dkfz.de/cache/samply/focus:0.4.4
|
||||||
container_name: bridgehead-focus
|
container_name: bridgehead-focus
|
||||||
environment:
|
environment:
|
||||||
API_KEY: ${FOCUS_BEAM_SECRET_SHORT}
|
API_KEY: ${FOCUS_BEAM_SECRET_SHORT}
|
||||||
@ -28,7 +30,7 @@ services:
|
|||||||
BLAZE_URL: "http://bridgehead-ccp-blaze:8080/fhir/"
|
BLAZE_URL: "http://bridgehead-ccp-blaze:8080/fhir/"
|
||||||
BEAM_PROXY_URL: http://beam-proxy:8081
|
BEAM_PROXY_URL: http://beam-proxy:8081
|
||||||
RETRY_COUNT: ${FOCUS_RETRY_COUNT}
|
RETRY_COUNT: ${FOCUS_RETRY_COUNT}
|
||||||
OBFUSCATE: "no"
|
EPSILON: 0.28
|
||||||
depends_on:
|
depends_on:
|
||||||
- "beam-proxy"
|
- "beam-proxy"
|
||||||
- "blaze"
|
- "blaze"
|
||||||
|
18
ccp/modules/adt2fhir-rest-compose.yml
Normal file
18
ccp/modules/adt2fhir-rest-compose.yml
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
version: "3.7"
|
||||||
|
|
||||||
|
services:
|
||||||
|
adt2fhir-rest:
|
||||||
|
container_name: bridgehead-adt2fhir-rest
|
||||||
|
image: docker.verbis.dkfz.de/ccp/adt2fhir-rest:main
|
||||||
|
environment:
|
||||||
|
IDTYPE: BK_${IDMANAGEMENT_FRIENDLY_ID}_L-ID
|
||||||
|
MAINZELLISTE_APIKEY: ${IDMANAGER_LOCAL_PATIENTLIST_APIKEY}
|
||||||
|
SALT: ${LOCAL_SALT}
|
||||||
|
restart: always
|
||||||
|
labels:
|
||||||
|
- "traefik.enable=true"
|
||||||
|
- "traefik.http.routers.adt2fhir-rest.rule=PathPrefix(`/adt2fhir-rest`)"
|
||||||
|
- "traefik.http.middlewares.adt2fhir-rest_strip.stripprefix.prefixes=/adt2fhir-rest"
|
||||||
|
- "traefik.http.services.adt2fhir-rest.loadbalancer.server.port=8080"
|
||||||
|
- "traefik.http.routers.adt2fhir-rest.tls=true"
|
||||||
|
- "traefik.http.routers.adt2fhir-rest.middlewares=adt2fhir-rest_strip,auth"
|
13
ccp/modules/adt2fhir-rest-setup.sh
Normal file
13
ccp/modules/adt2fhir-rest-setup.sh
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
function adt2fhirRestSetup() {
|
||||||
|
if [ -n "$ENABLE_ADT2FHIR_REST" ]; then
|
||||||
|
log INFO "ADT2FHIR-REST setup detected -- will start adt2fhir-rest API."
|
||||||
|
if [ ! -n "$IDMANAGER_LOCAL_PATIENTLIST_APIKEY" ]; then
|
||||||
|
log ERROR "Missing ID-Management Module! Fix this by setting up ID Management:"
|
||||||
|
exit 1;
|
||||||
|
fi
|
||||||
|
OVERRIDE+=" -f ./$PROJECT/modules/adt2fhir-rest-compose.yml"
|
||||||
|
LOCAL_SALT="$(echo \"local-random-salt\" | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 30)"
|
||||||
|
fi
|
||||||
|
}
|
171
ccp/modules/datashield-compose.yml
Normal file
171
ccp/modules/datashield-compose.yml
Normal file
@ -0,0 +1,171 @@
|
|||||||
|
version: "3.7"
|
||||||
|
|
||||||
|
services:
|
||||||
|
rstudio:
|
||||||
|
container_name: bridgehead-rstudio
|
||||||
|
image: docker.verbis.dkfz.de/ccp/dktk-rstudio:latest
|
||||||
|
environment:
|
||||||
|
#DEFAULT_USER: "rstudio" # This line is kept for informational purposes
|
||||||
|
PASSWORD: "${RSTUDIO_ADMIN_PASSWORD}" # It is required, even if the authentication is disabled
|
||||||
|
DISABLE_AUTH: "true" # https://rocker-project.org/images/versioned/rstudio.html#how-to-use
|
||||||
|
HTTP_RELATIVE_PATH: "/rstudio"
|
||||||
|
ALL_PROXY: "http://forward_proxy:3128" # https://rocker-project.org/use/networking.html
|
||||||
|
labels:
|
||||||
|
- "traefik.enable=true"
|
||||||
|
- "traefik.http.routers.rstudio_ccp.rule=PathPrefix(`/rstudio`)"
|
||||||
|
- "traefik.http.services.rstudio_ccp.loadbalancer.server.port=8787"
|
||||||
|
- "traefik.http.middlewares.rstudio_ccp_strip.stripprefix.prefixes=/rstudio"
|
||||||
|
- "traefik.http.routers.rstudio_ccp.tls=true"
|
||||||
|
- "traefik.http.routers.rstudio_ccp.middlewares=oidcAuth,rstudio_ccp_strip"
|
||||||
|
networks:
|
||||||
|
- rstudio
|
||||||
|
|
||||||
|
opal:
|
||||||
|
container_name: bridgehead-opal
|
||||||
|
image: docker.verbis.dkfz.de/ccp/dktk-opal:latest
|
||||||
|
labels:
|
||||||
|
- "traefik.enable=true"
|
||||||
|
- "traefik.http.routers.opal_ccp.rule=PathPrefix(`/opal`)"
|
||||||
|
- "traefik.http.services.opal_ccp.loadbalancer.server.port=8080"
|
||||||
|
- "traefik.http.routers.opal_ccp.tls=true"
|
||||||
|
links:
|
||||||
|
- opal-rserver
|
||||||
|
- opal-db
|
||||||
|
environment:
|
||||||
|
JAVA_OPTS: "-Xms1G -Xmx8G -XX:+UseG1GC -Dhttps.proxyHost=forward_proxy -Dhttps.proxyPort=3128"
|
||||||
|
# OPAL_ADMINISTRATOR_USER: "administrator" # This line is kept for informational purposes
|
||||||
|
OPAL_ADMINISTRATOR_PASSWORD: "${OPAL_ADMIN_PASSWORD}"
|
||||||
|
POSTGRESDATA_HOST: "opal-db"
|
||||||
|
POSTGRESDATA_DATABASE: "opal"
|
||||||
|
POSTGRESDATA_USER: "opal"
|
||||||
|
POSTGRESDATA_PASSWORD: "${OPAL_DB_PASSWORD}"
|
||||||
|
ROCK_HOSTS: "opal-rserver:8085"
|
||||||
|
APP_URL: "https://${HOST}/opal"
|
||||||
|
APP_CONTEXT_PATH: "/opal"
|
||||||
|
OPAL_PRIVATE_KEY: "/run/secrets/opal-key.pem"
|
||||||
|
OPAL_CERTIFICATE: "/run/secrets/opal-cert.pem"
|
||||||
|
OIDC_URL: "${OIDC_URL}"
|
||||||
|
OIDC_REALM: "${OIDC_REALM}"
|
||||||
|
OIDC_CLIENT_ID: "${OIDC_PRIVATE_CLIENT_ID}"
|
||||||
|
OIDC_CLIENT_SECRET: "${OIDC_CLIENT_SECRET}"
|
||||||
|
OIDC_ADMIN_GROUP: "${OIDC_ADMIN_GROUP}"
|
||||||
|
TOKEN_MANAGER_PASSWORD: "${TOKEN_MANAGER_OPAL_PASSWORD}"
|
||||||
|
EXPORTER_PASSWORD: "${EXPORTER_OPAL_PASSWORD}"
|
||||||
|
BEAM_APP_ID: token-manager.${PROXY_ID}
|
||||||
|
BEAM_SECRET: ${TOKEN_MANAGER_SECRET}
|
||||||
|
BEAM_DATASHIELD_PROXY: request-manager
|
||||||
|
volumes:
|
||||||
|
- "/var/cache/bridgehead/ccp/opal-metadata-db:/srv" # Opal metadata
|
||||||
|
secrets:
|
||||||
|
- opal-cert.pem
|
||||||
|
- opal-key.pem
|
||||||
|
|
||||||
|
opal-db:
|
||||||
|
container_name: bridgehead-opal-db
|
||||||
|
image: docker.verbis.dkfz.de/cache/postgres:${POSTGRES_TAG}
|
||||||
|
environment:
|
||||||
|
POSTGRES_PASSWORD: "${OPAL_DB_PASSWORD}" # Set in datashield-setup.sh
|
||||||
|
POSTGRES_USER: "opal"
|
||||||
|
POSTGRES_DB: "opal"
|
||||||
|
volumes:
|
||||||
|
- "/var/cache/bridgehead/ccp/opal-db:/var/lib/postgresql/data" # Opal project data (imported from exporter)
|
||||||
|
|
||||||
|
opal-rserver:
|
||||||
|
container_name: bridgehead-opal-rserver
|
||||||
|
image: docker.verbis.dkfz.de/ccp/dktk-rserver # datashield/rock-base + dsCCPhos
|
||||||
|
tmpfs:
|
||||||
|
- /srv
|
||||||
|
|
||||||
|
beam-connect:
|
||||||
|
image: docker.verbis.dkfz.de/cache/samply/beam-connect:develop
|
||||||
|
container_name: bridgehead-datashield-connect
|
||||||
|
environment:
|
||||||
|
PROXY_URL: "http://beam-proxy:8081"
|
||||||
|
TLS_CA_CERTIFICATES_DIR: /run/secrets
|
||||||
|
APP_ID: datashield-connect.${SITE_ID}.${BROKER_ID}
|
||||||
|
PROXY_APIKEY: ${DATASHIELD_CONNECT_SECRET}
|
||||||
|
DISCOVERY_URL: "./map/central.json"
|
||||||
|
LOCAL_TARGETS_FILE: "./map/local.json"
|
||||||
|
NO_AUTH: "true"
|
||||||
|
secrets:
|
||||||
|
- opal-cert.pem
|
||||||
|
depends_on:
|
||||||
|
- beam-proxy
|
||||||
|
volumes:
|
||||||
|
- /tmp/bridgehead/opal-map/:/map/:ro
|
||||||
|
networks:
|
||||||
|
- default
|
||||||
|
- rstudio
|
||||||
|
|
||||||
|
traefik:
|
||||||
|
labels:
|
||||||
|
- "traefik.http.middlewares.oidcAuth.forwardAuth.address=http://oauth2-proxy:4180/"
|
||||||
|
- "traefik.http.middlewares.oidcAuth.forwardAuth.trustForwardHeader=true"
|
||||||
|
- "traefik.http.middlewares.oidcAuth.forwardAuth.authResponseHeaders=X-Auth-Request-Access-Token,Authorization"
|
||||||
|
networks:
|
||||||
|
- default
|
||||||
|
- rstudio
|
||||||
|
forward_proxy:
|
||||||
|
networks:
|
||||||
|
- default
|
||||||
|
- rstudio
|
||||||
|
|
||||||
|
beam-proxy:
|
||||||
|
environment:
|
||||||
|
APP_datashield-connect_KEY: ${DATASHIELD_CONNECT_SECRET}
|
||||||
|
APP_token-manager_KEY: ${TOKEN_MANAGER_SECRET}
|
||||||
|
|
||||||
|
# TODO: Allow users of group /DataSHIELD and OIDC_USER_GROUP at the same time:
|
||||||
|
# Maybe a solution would be (https://oauth2-proxy.github.io/oauth2-proxy/configuration/oauth_provider):
|
||||||
|
# --allowed-groups=/DataSHIELD,OIDC_USER_GROUP
|
||||||
|
oauth2-proxy:
|
||||||
|
image: docker.verbis.dkfz.de/cache/oauth2-proxy/oauth2-proxy:latest
|
||||||
|
container_name: bridgehead-oauth2proxy
|
||||||
|
command: >-
|
||||||
|
--allowed-group=DataSHIELD
|
||||||
|
--oidc-groups-claim=${OIDC_GROUP_CLAIM}
|
||||||
|
--auth-logging=true
|
||||||
|
--whitelist-domain=${HOST}
|
||||||
|
--http-address="0.0.0.0:4180"
|
||||||
|
--reverse-proxy=true
|
||||||
|
--upstream="static://202"
|
||||||
|
--email-domain="*"
|
||||||
|
--cookie-name="_BRIDGEHEAD_oauth2"
|
||||||
|
--cookie-secret="${OAUTH2_PROXY_SECRET}"
|
||||||
|
--cookie-expire="12h"
|
||||||
|
--cookie-secure="true"
|
||||||
|
--cookie-httponly="true"
|
||||||
|
#OIDC settings
|
||||||
|
--provider="keycloak-oidc"
|
||||||
|
--provider-display-name="VerbIS Login"
|
||||||
|
--client-id="${OIDC_PRIVATE_CLIENT_ID}"
|
||||||
|
--client-secret="${OIDC_CLIENT_SECRET}"
|
||||||
|
--redirect-url="https://${HOST}${OAUTH2_CALLBACK}"
|
||||||
|
--oidc-issuer-url="${OIDC_ISSUER_URL}"
|
||||||
|
--scope="openid email profile"
|
||||||
|
--code-challenge-method="S256"
|
||||||
|
--skip-provider-button=true
|
||||||
|
#X-Forwarded-Header settings - true/false depending on your needs
|
||||||
|
--pass-basic-auth=true
|
||||||
|
--pass-user-headers=false
|
||||||
|
--pass-access-token=false
|
||||||
|
labels:
|
||||||
|
- "traefik.enable=true"
|
||||||
|
- "traefik.http.routers.oauth2_proxy.rule=Host(`${HOST}`) && PathPrefix(`/oauth2`, `/oauth2/callback`)"
|
||||||
|
- "traefik.http.services.oauth2_proxy.loadbalancer.server.port=4180"
|
||||||
|
- "traefik.http.routers.oauth2_proxy.tls=true"
|
||||||
|
environment:
|
||||||
|
http_proxy: "http://forward_proxy:3128"
|
||||||
|
https_proxy: "http://forward_proxy:3128"
|
||||||
|
depends_on:
|
||||||
|
forward_proxy:
|
||||||
|
condition: service_healthy
|
||||||
|
|
||||||
|
secrets:
|
||||||
|
opal-cert.pem:
|
||||||
|
file: /tmp/bridgehead/opal-cert.pem
|
||||||
|
opal-key.pem:
|
||||||
|
file: /tmp/bridgehead/opal-key.pem
|
||||||
|
|
||||||
|
networks:
|
||||||
|
rstudio:
|
157
ccp/modules/datashield-import-template.xml
Normal file
157
ccp/modules/datashield-import-template.xml
Normal file
@ -0,0 +1,157 @@
|
|||||||
|
<template id="opal-ccp" source-id="blaze-store" opal-project="ccp-demo" target-id="opal" >
|
||||||
|
|
||||||
|
<container csv-filename="Patient-${TIMESTAMP}.csv" opal-table="patient" opal-entity-type="Patient">
|
||||||
|
<attribute csv-column="patient-id" opal-value-type="text" primary-key="true" val-fhir-path="Patient.id.value" anonym="Pat" op="EXTRACT_RELATIVE_ID"/>
|
||||||
|
<attribute csv-column="dktk-id-global" opal-value-type="text" val-fhir-path="Patient.identifier.where(type.coding.code = 'Global').value.value"/>
|
||||||
|
<attribute csv-column="dktk-id-lokal" opal-value-type="text" val-fhir-path="Patient.identifier.where(type.coding.code = 'Lokal').value.value" />
|
||||||
|
<attribute csv-column="geburtsdatum" opal-value-type="date" val-fhir-path="Patient.birthDate.value"/>
|
||||||
|
<attribute csv-column="geschlecht" opal-value-type="text" val-fhir-path="Patient.gender.value" />
|
||||||
|
<attribute csv-column="datum_des_letztbekannten_vitalstatus" opal-value-type="date" val-fhir-path="Observation.where(code.coding.code = '75186-7').effective.value" join-fhir-path="/Observation.where(code.coding.code = '75186-7').subject.reference.value"/>
|
||||||
|
<attribute csv-column="vitalstatus" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '75186-7').value.coding.code.value" join-fhir-path="/Observation.where(code.coding.code = '75186-7').subject.reference.value"/>
|
||||||
|
<!--fehlt in ADT2FHIR--><attribute csv-column="tod_tumorbedingt" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '68343-3').value.coding.where(system = 'http://fhir.de/CodeSystem/bfarm/icd-10-gm').code.value" join-fhir-path="/Observation.where(code.coding.code = '68343-3').subject.reference.value"/>
|
||||||
|
<!--fehlt in ADT2FHIR--><attribute csv-column="todesursachen" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '68343-3').value.coding.where(system = 'http://dktk.dkfz.de/fhir/onco/core/CodeSystem/JNUCS').code.value" join-fhir-path="/Observation.where(code.coding.code = '68343-3').subject.reference.value"/>
|
||||||
|
</container>
|
||||||
|
|
||||||
|
<container csv-filename="Diagnosis-${TIMESTAMP}.csv" opal-table="diagnosis" opal-entity-type="Diagnosis">
|
||||||
|
<attribute csv-column="diagnosis-id" primary-key="true" opal-value-type="text" val-fhir-path="Condition.id.value" anonym="Dia" op="EXTRACT_RELATIVE_ID"/>
|
||||||
|
<attribute csv-column="patient-id" opal-value-type="text" val-fhir-path="Condition.subject.reference.value" anonym="Pat"/>
|
||||||
|
<attribute csv-column="primaerdiagnose" opal-value-type="text" val-fhir-path="Condition.code.coding.code.value"/>
|
||||||
|
<attribute csv-column="tumor_diagnosedatum" opal-value-type="date" val-fhir-path="Condition.onset.value"/>
|
||||||
|
<attribute csv-column="primaertumor_diagnosetext" opal-value-type="text" val-fhir-path="Condition.code.text.value"/>
|
||||||
|
<attribute csv-column="version_des_icd-10_katalogs" opal-value-type="integer" val-fhir-path="Condition.code.coding.version.value"/>
|
||||||
|
<attribute csv-column="lokalisation" opal-value-type="text" val-fhir-path="Condition.bodySite.coding.where(system = 'urn:oid:2.16.840.1.113883.6.43.1').code.value"/>
|
||||||
|
<attribute csv-column="icd-o_katalog_topographie_version" opal-value-type="text" val-fhir-path="Condition.bodySite.coding.where(system = 'urn:oid:2.16.840.1.113883.6.43.1').version.value"/>
|
||||||
|
<attribute csv-column="seitenlokalisation_nach_adt-gekid" opal-value-type="text" val-fhir-path="Condition.bodySite.coding.where(system = 'http://dktk.dkfz.de/fhir/onco/core/CodeSystem/SeitenlokalisationCS').code.value"/>
|
||||||
|
</container>
|
||||||
|
|
||||||
|
<container csv-filename="Progress-${TIMESTAMP}.csv" opal-table="progress" opal-entity-type="Progress">
|
||||||
|
<!--it would be better to generate a an ID, instead of extracting the ClinicalImpression id-->
|
||||||
|
<attribute csv-column="progress-id" primary-key="true" opal-value-type="text" val-fhir-path="ClinicalImpression.id.value" anonym="Pro" op="EXTRACT_RELATIVE_ID"/>
|
||||||
|
<attribute csv-column="diagnosis-id" opal-value-type="text" val-fhir-path="ClinicalImpression.problem.reference.value" anonym="Dia"/>
|
||||||
|
<attribute csv-column="patient-id" opal-value-type="text" val-fhir-path="ClinicalImpression.subject.reference.value" anonym="Pat" />
|
||||||
|
<attribute csv-column="untersuchungs-_befunddatum_im_verlauf" opal-value-type="date" val-fhir-path="ClinicalImpression.effective.value" />
|
||||||
|
<!-- just for evaluation: redundant to Untersuchungs-, Befunddatum im Verlauf-->
|
||||||
|
<attribute csv-column="datum_lokales_oder_regionaeres_rezidiv" opal-value-type="date" val-fhir-path="Observation.where(code.coding.code = 'LA4583-6').effective.value" join-fhir-path="ClinicalImpression.finding.itemReference.reference.value" />
|
||||||
|
<attribute csv-column="gesamtbeurteilung_tumorstatus" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21976-6').value.coding.code.value" join-fhir-path="ClinicalImpression.finding.itemReference.reference.value"/>
|
||||||
|
<attribute csv-column="lokales_oder_regionaeres_rezidiv" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = 'LA4583-6').value.coding.code.value" join-fhir-path="ClinicalImpression.finding.itemReference.reference.value"/>
|
||||||
|
<attribute csv-column="lymphknoten-rezidiv" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = 'LA4370-8').value.coding.code.value" join-fhir-path="ClinicalImpression.finding.itemReference.reference.value" />
|
||||||
|
<attribute csv-column="fernmetastasen" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = 'LA4226-2').value.coding.code.value" join-fhir-path="ClinicalImpression.finding.itemReference.reference.value" />
|
||||||
|
</container>
|
||||||
|
|
||||||
|
<container csv-filename="Histology-${TIMESTAMP}.csv" opal-table="histology" opal-entity-type="Histology" >
|
||||||
|
<attribute csv-column="histology-id" primary-key="true" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '59847-4').id" anonym="His" op="EXTRACT_RELATIVE_ID"/>
|
||||||
|
<attribute csv-column="diagnosis-id" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '59847-4').focus.reference.value" anonym="Dia"/>
|
||||||
|
<attribute csv-column="patient-id" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '59847-4').subject.reference.value" anonym="Pat" />
|
||||||
|
<attribute csv-column="histologie_datum" opal-value-type="date" val-fhir-path="Observation.where(code.coding.code = '59847-4').effective.value"/>
|
||||||
|
<attribute csv-column="icd-o_katalog_morphologie_version" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '59847-4').value.coding.version.value" />
|
||||||
|
<attribute csv-column="morphologie" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '59847-4').value.coding.code.value"/>
|
||||||
|
<attribute csv-column="morphologie-freitext" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '59847-4').value.text.value"/>
|
||||||
|
<attribute csv-column="grading" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '59542-1').value.coding.code.value" join-fhir-path="Observation.where(code.coding.code = '59847-4').hasMember.reference.value"/>
|
||||||
|
</container>
|
||||||
|
|
||||||
|
|
||||||
|
<container csv-filename="Metastasis-${TIMESTAMP}.csv" opal-table="metastasis" opal-entity-type="Metastasis" >
|
||||||
|
<attribute csv-column="metastasis-id" primary-key="true" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21907-1').id" anonym="Met" op="EXTRACT_RELATIVE_ID"/>
|
||||||
|
<attribute csv-column="diagnosis-id" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21907-1').focus.reference.value" anonym="Dia"/>
|
||||||
|
<attribute csv-column="patient-id" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21907-1').subject.reference.value" anonym="Pat" />
|
||||||
|
<attribute csv-column="datum_fernmetastasen" opal-value-type="date" val-fhir-path="Observation.where(code.coding.code = '21907-1').effective.value"/>
|
||||||
|
<attribute csv-column="fernmetastasen_vorhanden" opal-value-type="boolean" val-fhir-path="Observation.where(code.coding.code = '21907-1').value.coding.code.value"/>
|
||||||
|
<attribute csv-column="lokalisation_fernmetastasen" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21907-1').bodySite.coding.code.value"/>
|
||||||
|
</container>
|
||||||
|
|
||||||
|
<container csv-filename="TNM-${TIMESTAMP}.csv" opal-table="tnm" opal-entity-type="TNM">
|
||||||
|
<attribute csv-column="tnm-id" primary-key="true" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').id" anonym="TNM" op="EXTRACT_RELATIVE_ID"/>
|
||||||
|
<attribute csv-column="diagnosis-id" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').focus.reference.value" anonym="Dia"/>
|
||||||
|
<attribute csv-column="patient-id" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').subject.reference.value" anonym="Pat" />
|
||||||
|
<attribute csv-column="datum_der_tnm_dokumentation_datum_befund" opal-value-type="date" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').effective.value"/>
|
||||||
|
<attribute csv-column="uicc_stadium" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').value.coding.code.value"/>
|
||||||
|
<attribute csv-column="tnm-t" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').component.where(code.coding.code = '21905-5' or code.coding.code = '21899-0').value.coding.code.value"/>
|
||||||
|
<attribute csv-column="tnm-n" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').component.where(code.coding.code = '21906-3' or code.coding.code = '21900-6').value.coding.code.value"/>
|
||||||
|
<attribute csv-column="tnm-m" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').component.where(code.coding.code = '21907-1' or code.coding.code = '21901-4').value.coding.code.value"/>
|
||||||
|
<attribute csv-column="c_p_u_preefix_t" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').component.where(code.coding.code = '21905-5' or code.coding.code = '21899-0').extension('http://dktk.dkfz.de/fhir/StructureDefinition/onco-core-Extension-TNMcpuPraefix').value.coding.code.value"/>
|
||||||
|
<attribute csv-column="c_p_u_preefix_n" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').component.where(code.coding.code = '21906-3' or code.coding.code = '21900-6').extension('http://dktk.dkfz.de/fhir/StructureDefinition/onco-core-Extension-TNMcpuPraefix').value.coding.code.value"/>
|
||||||
|
<attribute csv-column="c_p_u_preefix_m" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').component.where(code.coding.code = '21907-1' or code.coding.code = '21901-4').extension('http://dktk.dkfz.de/fhir/StructureDefinition/onco-core-Extension-TNMcpuPraefix').value.coding.code.value"/>
|
||||||
|
<attribute csv-column="tnm-y-symbol" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').component.where(code.coding.code = '59479-6' or code.coding.code = '59479-6').value.coding.code.value"/>
|
||||||
|
<attribute csv-column="tnm-r-symbol" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').component.where(code.coding.code = '21983-2' or code.coding.code = '21983-2').value.coding.code.value"/>
|
||||||
|
<attribute csv-column="tnm-m-symbol" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').component.where(code.coding.code = '42030-7' or code.coding.code = '42030-7').value.coding.code.value"/>
|
||||||
|
<!--nur bei UICC, nicht in ADT2FHIR--><attribute csv-column="tnm-version" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '21908-9' or code.coding.code = '21902-2').value.coding.version.value"/>
|
||||||
|
</container>
|
||||||
|
|
||||||
|
|
||||||
|
<container csv-filename="System-Therapy-${TIMESTAMP}.csv" opal-table="system-therapy" opal-entity-type="SystemTherapy">
|
||||||
|
<attribute csv-column="system-therapy-id" primary-key="true" opal-value-type="text" val-fhir-path="MedicationStatement.id" anonym="Sys" op="EXTRACT_RELATIVE_ID"/>
|
||||||
|
<attribute csv-column="diagnosis-id" opal-value-type="text" val-fhir-path="MedicationStatement.reasonReference.reference.value" anonym="Dia"/>
|
||||||
|
<attribute csv-column="patient-id" opal-value-type="text" val-fhir-path="MedicationStatement.subject.reference.value" anonym="Pat" />
|
||||||
|
<attribute csv-column="systemische_therapie_stellung_zu_operativer_therapie" opal-value-type="text" val-fhir-path="MedicationStatement.extension('http://dktk.dkfz.de/fhir/StructureDefinition/onco-core-Extension-StellungZurOp').value.coding.code.value"/>
|
||||||
|
<attribute csv-column="intention_chemotherapie" opal-value-type="text" val-fhir-path="MedicationStatement.extension('http://dktk.dkfz.de/fhir/StructureDefinition/onco-core-Extension-SYSTIntention').value.coding.code.value"/>
|
||||||
|
<attribute csv-column="therapieart" opal-value-type="text" val-fhir-path="MedicationStatement.category.coding.code.value"/>
|
||||||
|
<attribute csv-column="systemische_therapie_beginn" opal-value-type="date" val-fhir-path="MedicationStatement.effective.start.value"/>
|
||||||
|
<attribute csv-column="systemische_therapie_ende" opal-value-type="date" val-fhir-path="MedicationStatement.effective.end.value"/>
|
||||||
|
<attribute csv-column="systemische_therapie_protokoll" opal-value-type="text" val-fhir-path="MedicationStatement.extension('http://dktk.dkfz.de/fhir/StructureDefinition/onco-core-Extension-SystemischeTherapieProtokoll').value.text.value"/>
|
||||||
|
<attribute csv-column="systemische_therapie_substanzen" opal-value-type="text" val-fhir-path="MedicationStatement.medication.text.value"/>
|
||||||
|
<attribute csv-column="chemotherapie" opal-value-type="boolean" val-fhir-path="MedicationStatement.where(category.coding.code = 'CH').exists().value" />
|
||||||
|
<attribute csv-column="hormontherapie" opal-value-type="boolean" val-fhir-path="MedicationStatement.where(category.coding.code = 'HO').exists().value" />
|
||||||
|
<attribute csv-column="immuntherapie" opal-value-type="boolean" val-fhir-path="MedicationStatement.where(category.coding.code = 'IM').exists().value" />
|
||||||
|
<attribute csv-column="knochenmarktransplantation" opal-value-type="boolean" val-fhir-path="MedicationStatement.where(category.coding.code = 'KM').exists().value" />
|
||||||
|
<attribute csv-column="abwartende_strategie" opal-value-type="boolean" val-fhir-path="MedicationStatement.where(category.coding.code = 'WS').exists().value" />
|
||||||
|
</container>
|
||||||
|
|
||||||
|
|
||||||
|
<container csv-filename="Surgery-${TIMESTAMP}.csv" opal-table="surgery" opal-entity-type="Surgery">
|
||||||
|
<attribute csv-column="surgery-id" primary-key="true" opal-value-type="text" val-fhir-path="Procedure.where(category.coding.code = 'OP').id" anonym="Sur" op="EXTRACT_RELATIVE_ID"/>
|
||||||
|
<attribute csv-column="diagnosis-id" opal-value-type="text" val-fhir-path="Procedure.where(category.coding.code = 'OP').reasonReference.reference.value" anonym="Dia"/>
|
||||||
|
<attribute csv-column="patient-id" opal-value-type="text" val-fhir-path="Procedure.where(category.coding.code = 'OP').subject.reference.value" anonym="Pat" />
|
||||||
|
<attribute csv-column="ops-code" opal-value-type="text" val-fhir-path="Procedure.where(category.coding.code = 'OP').code.coding.code.value"/>
|
||||||
|
<attribute csv-column="datum_der_op" opal-value-type="date" val-fhir-path="Procedure.where(category.coding.code = 'OP').performed.value"/>
|
||||||
|
<attribute csv-column="intention_op" opal-value-type="text" val-fhir-path="Procedure.extension('http://dktk.dkfz.de/fhir/StructureDefinition/onco-core-Extension-OPIntention').value.coding.code.value"/>
|
||||||
|
<attribute csv-column="lokale_beurteilung_resttumor" opal-value-type="text" val-fhir-path="Procedure.where(category.coding.code = 'OP').outcome.coding.where(system = 'http://dktk.dkfz.de/fhir/onco/core/CodeSystem/LokaleBeurteilungResidualstatusCS').code.value" />
|
||||||
|
<attribute csv-column="gesamtbeurteilung_resttumor" opal-value-type="text" val-fhir-path="Procedure.where(category.coding.code = 'OP').outcome.coding.where(system = 'http://dktk.dkfz.de/fhir/onco/core/CodeSystem/GesamtbeurteilungResidualstatusCS').code.value" />
|
||||||
|
</container>
|
||||||
|
|
||||||
|
|
||||||
|
<container csv-filename="Radiation-Therapy-${TIMESTAMP}.csv" opal-table="radiation-therapy" opal-entity-type="RadiationTherapy">
|
||||||
|
<attribute csv-column="radiation-therapy-id" primary-key="true" opal-value-type="text" val-fhir-path="Procedure.where(category.coding.code = 'ST').id" anonym="Rad" op="EXTRACT_RELATIVE_ID"/>
|
||||||
|
<attribute csv-column="diagnosis-id" opal-value-type="text" val-fhir-path="Procedure.where(category.coding.code = 'ST').reasonReference.reference.value" anonym="Dia"/>
|
||||||
|
<attribute csv-column="patient-id" opal-value-type="text" val-fhir-path="Procedure.where(category.coding.code = 'ST').subject.reference.value" anonym="Pat" />
|
||||||
|
<attribute csv-column="strahlentherapie_stellung_zu_operativer_therapie" opal-value-type="text" val-fhir-path="Procedure.extension('http://dktk.dkfz.de/fhir/StructureDefinition/onco-core-Extension-StellungZurOp').value.coding.code.value"/>
|
||||||
|
<attribute csv-column="intention_strahlentherapie" opal-value-type="text" val-fhir-path="Procedure.extension('http://dktk.dkfz.de/fhir/StructureDefinition/onco-core-Extension-SYSTIntention').value.coding.code.value" />
|
||||||
|
<attribute csv-column="strahlentherapie_beginn" opal-value-type="date" val-fhir-path="Procedure.where(category.coding.code = 'ST').performed.start.value"/>
|
||||||
|
<attribute csv-column="strahlentherapie_ende" opal-value-type="date" val-fhir-path="Procedure.where(category.coding.code = 'ST').performed.end.value"/>
|
||||||
|
</container>
|
||||||
|
|
||||||
|
|
||||||
|
<container csv-filename="Molecular-Marker-${TIMESTAMP}.csv" opal-table="molecular-marker" opal-entity-type="MolecularMarker">
|
||||||
|
<attribute csv-column="mol-marker-id" primary-key="true" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '69548-6').id" anonym="Mol" op="EXTRACT_RELATIVE_ID"/>
|
||||||
|
<attribute csv-column="diagnosis-id" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '69548-6').focus.reference.value" anonym="Dia" />
|
||||||
|
<attribute csv-column="patient-id" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '69548-6').subject.reference.value" anonym="Pat" />
|
||||||
|
<attribute csv-column="datum_der_datenerhebung" opal-value-type="date" val-fhir-path="Observation.where(code.coding.code = '69548-6').effective.value"/>
|
||||||
|
<attribute csv-column="marker" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '69548-6').component.value.coding.code.value"/>
|
||||||
|
<attribute csv-column="status_des_molekularen_markers" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '69548-6').value.coding.code.value" />
|
||||||
|
<attribute csv-column="zusaetzliche_alternative_dokumentation" opal-value-type="text" val-fhir-path="Observation.where(code.coding.code = '69548-6').value.text.value"/>
|
||||||
|
</container>
|
||||||
|
|
||||||
|
|
||||||
|
<container csv-filename="Sample-${TIMESTAMP}.csv" opal-table="sample" opal-entity-type="Sample">
|
||||||
|
<attribute csv-column="sample-id" primary-key="true" opal-value-type="text" val-fhir-path="Specimen.id" anonym="Sam" op="EXTRACT_RELATIVE_ID"/>
|
||||||
|
<attribute csv-column="patient-id" opal-value-type="text" val-fhir-path="Specimen.subject.reference.value" anonym="Pat" />
|
||||||
|
<attribute csv-column="entnahmedatum" opal-value-type="date" val-fhir-path="Specimen.collection.collectedDateTime.value"/>
|
||||||
|
<attribute csv-column="probenart" opal-value-type="text" val-fhir-path="Specimen.type.coding.code.value"/>
|
||||||
|
<attribute csv-column="status" opal-value-type="text" val-fhir-path="Specimen.status.code.value"/>
|
||||||
|
<attribute csv-column="projekt" opal-value-type="text" val-fhir-path="Specimen.identifier.system.value"/>
|
||||||
|
<!-- @TODO: it is still necessary to clarify whether it would not be better to take the quantity of collection.quantity -->
|
||||||
|
<attribute csv-column="menge" opal-value-type="integer" val-fhir-path="Specimen.container.specimenQuantity.value.value"/>
|
||||||
|
<attribute csv-column="einheit" opal-value-type="text" val-fhir-path="Specimen.container.specimenQuantity.unit.value"/>
|
||||||
|
<attribute csv-column="aliquot" opal-value-type="text" val-fhir-path="Specimen.parent.reference.exists().value" />
|
||||||
|
</container>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
<fhir-rev-include>Observation:patient</fhir-rev-include>
|
||||||
|
<fhir-rev-include>Condition:patient</fhir-rev-include>
|
||||||
|
<fhir-rev-include>ClinicalImpression:patient</fhir-rev-include>
|
||||||
|
<fhir-rev-include>MedicationStatement:patient</fhir-rev-include>
|
||||||
|
<fhir-rev-include>Procedure:patient</fhir-rev-include>
|
||||||
|
<fhir-rev-include>Specimen:patient</fhir-rev-include>
|
||||||
|
|
||||||
|
</template>
|
44
ccp/modules/datashield-setup.sh
Normal file
44
ccp/modules/datashield-setup.sh
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
#!/bin/bash -e
|
||||||
|
|
||||||
|
if [ "$ENABLE_DATASHIELD" == true ]; then
|
||||||
|
# HACK: This only works because exporter-setup.sh and teiler-setup.sh are sourced after datashield-setup.sh
|
||||||
|
if [ -z "${ENABLE_EXPORTER}" ] || [ "${ENABLE_EXPORTER}" != "true" ]; then
|
||||||
|
log WARN "The ENABLE_EXPORTER variable is either not set or not set to 'true'."
|
||||||
|
fi
|
||||||
|
OAUTH2_CALLBACK=/oauth2/callback
|
||||||
|
OAUTH2_PROXY_SECRET="$(echo \"This is a salt string to generate one consistent encryption key for the oauth2_proxy. It is not required to be secret.\" | sha1sum | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 32)"
|
||||||
|
add_private_oidc_redirect_url "${OAUTH2_CALLBACK}"
|
||||||
|
|
||||||
|
log INFO "DataSHIELD setup detected -- will start DataSHIELD services."
|
||||||
|
OVERRIDE+=" -f ./$PROJECT/modules/datashield-compose.yml"
|
||||||
|
EXPORTER_OPAL_PASSWORD="$(generate_password \"exporter in Opal\")"
|
||||||
|
TOKEN_MANAGER_OPAL_PASSWORD="$(generate_password \"Token Manager in Opal\")"
|
||||||
|
OPAL_DB_PASSWORD="$(echo \"Opal DB\" | generate_simple_password)"
|
||||||
|
OPAL_ADMIN_PASSWORD="$(generate_password \"admin password for Opal\")"
|
||||||
|
RSTUDIO_ADMIN_PASSWORD="$(generate_password \"admin password for R-Studio\")"
|
||||||
|
DATASHIELD_CONNECT_SECRET="$(echo \"DataShield Connect\" | generate_simple_password)"
|
||||||
|
TOKEN_MANAGER_SECRET="$(echo \"Token Manager\" | generate_simple_password)"
|
||||||
|
if [ ! -e /tmp/bridgehead/opal-cert.pem ]; then
|
||||||
|
mkdir -p /tmp/bridgehead/
|
||||||
|
openssl req -x509 -newkey rsa:4096 -nodes -keyout /tmp/bridgehead/opal-key.pem -out /tmp/bridgehead/opal-cert.pem -days 3650 -subj "/CN=opal/C=DE"
|
||||||
|
fi
|
||||||
|
mkdir -p /tmp/bridgehead/opal-map
|
||||||
|
sites="$(cat ./$PROJECT/modules/datashield-sites.json)"
|
||||||
|
echo "$sites" | docker_jq -n --args '{"sites": input | map({
|
||||||
|
"name": .,
|
||||||
|
"id": .,
|
||||||
|
"virtualhost": "\(.):443",
|
||||||
|
"beamconnect": "datashield-connect.\(.).'"$BROKER_ID"'"
|
||||||
|
})}' $sites >/tmp/bridgehead/opal-map/central.json
|
||||||
|
echo "$sites" | docker_jq -n --args '[{
|
||||||
|
"external": "'"$SITE_ID"':443",
|
||||||
|
"internal": "opal:8443",
|
||||||
|
"allowed": input | map("datashield-connect.\(.).'"$BROKER_ID"'")
|
||||||
|
}]' >/tmp/bridgehead/opal-map/local.json
|
||||||
|
if [ "$USER" == "root" ]; then
|
||||||
|
chown -R bridgehead:docker /tmp/bridgehead
|
||||||
|
chmod g+wr /tmp/bridgehead/opal-map/*
|
||||||
|
chmod g+r /tmp/bridgehead/opal-key.pem
|
||||||
|
fi
|
||||||
|
add_private_oidc_redirect_url "/opal/*"
|
||||||
|
fi
|
14
ccp/modules/datashield-sites.json
Normal file
14
ccp/modules/datashield-sites.json
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
[
|
||||||
|
"berlin",
|
||||||
|
"muenchen-lmu",
|
||||||
|
"dresden",
|
||||||
|
"freiburg",
|
||||||
|
"muenchen-tum",
|
||||||
|
"tuebingen",
|
||||||
|
"mainz",
|
||||||
|
"frankfurt",
|
||||||
|
"essen",
|
||||||
|
"dktk-datashield-test",
|
||||||
|
"dktk-test",
|
||||||
|
"mannheim"
|
||||||
|
]
|
28
ccp/modules/datashield.md
Normal file
28
ccp/modules/datashield.md
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
# DataSHIELD
|
||||||
|
This module constitutes the infrastructure to run DataSHIELD within the bridghead.
|
||||||
|
For more information about DataSHIELD, please visit https://www.datashield.org/
|
||||||
|
|
||||||
|
## R-Studio
|
||||||
|
To connect to the different bridgeheads of the CCP through DataSHIELD, you can use your own R-Studio environment.
|
||||||
|
However, this R-Studio has already installed the DataSHIELD libraries and is integrated within the bridgehead.
|
||||||
|
This can save you some time for extra configuration of your R-Studio environment.
|
||||||
|
|
||||||
|
## Opal
|
||||||
|
This is the core of DataSHIELD. It is made up of Opal, a Postgres database and an R-server.
|
||||||
|
For more information about Opal, please visit https://opaldoc.obiba.org
|
||||||
|
|
||||||
|
### Opal
|
||||||
|
Opal is OBiBa’s core database application for biobanks.
|
||||||
|
|
||||||
|
### Opal-DB
|
||||||
|
Opal requires a database to import the data for DataSHIELD. We use a Postgres instance as database.
|
||||||
|
The data is imported within the bridgehead through the exporter.
|
||||||
|
|
||||||
|
### Opal-R-Server
|
||||||
|
R-Server to execute R scripts in DataSHIELD.
|
||||||
|
|
||||||
|
## Beam
|
||||||
|
### Beam-Connect
|
||||||
|
Beam-Connect is used to route http(s) traffic through beam to enable R-Studio to access data from other bridgeheads that have datashield enabled.
|
||||||
|
### Beam-Proxy
|
||||||
|
The usual beam proxy used for communication.
|
@ -6,7 +6,7 @@ services:
|
|||||||
APP_dnpm-connect_KEY: ${DNPM_BEAM_SECRET_SHORT}
|
APP_dnpm-connect_KEY: ${DNPM_BEAM_SECRET_SHORT}
|
||||||
dnpm-beam-connect:
|
dnpm-beam-connect:
|
||||||
depends_on: [ beam-proxy ]
|
depends_on: [ beam-proxy ]
|
||||||
image: docker.verbis.dkfz.de/cache/samply/beam-connect:dnpm
|
image: docker.verbis.dkfz.de/cache/samply/beam-connect:develop
|
||||||
container_name: bridgehead-dnpm-beam-connect
|
container_name: bridgehead-dnpm-beam-connect
|
||||||
environment:
|
environment:
|
||||||
PROXY_URL: http://beam-proxy:8081
|
PROXY_URL: http://beam-proxy:8081
|
||||||
@ -16,9 +16,14 @@ services:
|
|||||||
LOCAL_TARGETS_FILE: "./conf/connect_targets.json"
|
LOCAL_TARGETS_FILE: "./conf/connect_targets.json"
|
||||||
HTTP_PROXY: "http://forward_proxy:3128"
|
HTTP_PROXY: "http://forward_proxy:3128"
|
||||||
HTTPS_PROXY: "http://forward_proxy:3128"
|
HTTPS_PROXY: "http://forward_proxy:3128"
|
||||||
NO_PROXY: beam-proxy,dnpm-backend
|
NO_PROXY: beam-proxy,dnpm-backend,host.docker.internal${DNPM_ADDITIONAL_NO_PROXY}
|
||||||
RUST_LOG: ${RUST_LOG:-info}
|
RUST_LOG: ${RUST_LOG:-info}
|
||||||
|
NO_AUTH: "true"
|
||||||
|
TLS_CA_CERTIFICATES_DIR: ./conf/trusted-ca-certs
|
||||||
|
extra_hosts:
|
||||||
|
- "host.docker.internal:host-gateway"
|
||||||
volumes:
|
volumes:
|
||||||
|
- /etc/bridgehead/trusted-ca-certs:/conf/trusted-ca-certs:ro
|
||||||
- /etc/bridgehead/dnpm/local_targets.json:/conf/connect_targets.json:ro
|
- /etc/bridgehead/dnpm/local_targets.json:/conf/connect_targets.json:ro
|
||||||
- /etc/bridgehead/dnpm/central_targets.json:/conf/central_targets.json:ro
|
- /etc/bridgehead/dnpm/central_targets.json:/conf/central_targets.json:ro
|
||||||
labels:
|
labels:
|
||||||
@ -28,3 +33,7 @@ services:
|
|||||||
- "traefik.http.routers.dnpm-connect.middlewares=dnpm-connect-strip"
|
- "traefik.http.routers.dnpm-connect.middlewares=dnpm-connect-strip"
|
||||||
- "traefik.http.services.dnpm-connect.loadbalancer.server.port=8062"
|
- "traefik.http.services.dnpm-connect.loadbalancer.server.port=8062"
|
||||||
- "traefik.http.routers.dnpm-connect.tls=true"
|
- "traefik.http.routers.dnpm-connect.tls=true"
|
||||||
|
|
||||||
|
dnpm-echo:
|
||||||
|
image: docker.verbis.dkfz.de/cache/samply/bridgehead-echo:latest
|
||||||
|
container_name: bridgehead-dnpm-echo
|
||||||
|
34
ccp/modules/dnpm-node-compose.yml
Normal file
34
ccp/modules/dnpm-node-compose.yml
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
version: "3.7"
|
||||||
|
|
||||||
|
services:
|
||||||
|
dnpm-backend:
|
||||||
|
image: ghcr.io/kohlbacherlab/bwhc-backend:1.0-snapshot-broker-connector
|
||||||
|
container_name: bridgehead-dnpm-backend
|
||||||
|
environment:
|
||||||
|
- ZPM_SITE=${ZPM_SITE}
|
||||||
|
- N_RANDOM_FILES=${DNPM_SYNTH_NUM}
|
||||||
|
volumes:
|
||||||
|
- /etc/bridgehead/dnpm:/bwhc_config:ro
|
||||||
|
- ${DNPM_DATA_DIR}:/bwhc_data
|
||||||
|
labels:
|
||||||
|
- "traefik.enable=true"
|
||||||
|
- "traefik.http.routers.bwhc-backend.rule=PathPrefix(`/bwhc`)"
|
||||||
|
- "traefik.http.services.bwhc-backend.loadbalancer.server.port=9000"
|
||||||
|
- "traefik.http.routers.bwhc-backend.tls=true"
|
||||||
|
|
||||||
|
dnpm-frontend:
|
||||||
|
image: ghcr.io/kohlbacherlab/bwhc-frontend:2209
|
||||||
|
container_name: bridgehead-dnpm-frontend
|
||||||
|
links:
|
||||||
|
- dnpm-backend
|
||||||
|
environment:
|
||||||
|
- NUXT_HOST=0.0.0.0
|
||||||
|
- NUXT_PORT=8080
|
||||||
|
- BACKEND_PROTOCOL=https
|
||||||
|
- BACKEND_HOSTNAME=$HOST
|
||||||
|
- BACKEND_PORT=443
|
||||||
|
labels:
|
||||||
|
- "traefik.enable=true"
|
||||||
|
- "traefik.http.routers.bwhc-frontend.rule=PathPrefix(`/`)"
|
||||||
|
- "traefik.http.services.bwhc-frontend.loadbalancer.server.port=8080"
|
||||||
|
- "traefik.http.routers.bwhc-frontend.tls=true"
|
28
ccp/modules/dnpm-node-setup.sh
Normal file
28
ccp/modules/dnpm-node-setup.sh
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
if [ -n "${ENABLE_DNPM_NODE}" ]; then
|
||||||
|
log INFO "DNPM setup detected (BwHC Node) -- will start BwHC node."
|
||||||
|
OVERRIDE+=" -f ./$PROJECT/modules/dnpm-node-compose.yml"
|
||||||
|
|
||||||
|
# Set variables required for BwHC Node. ZPM_SITE is assumed to be set in /etc/bridgehead/<project>.conf
|
||||||
|
DNPM_APPLICATION_SECRET="$(echo \"This is a salt string to generate one consistent password for DNPM. It is not required to be secret.\" | sha1sum | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 30)"
|
||||||
|
if [ -z "${ZPM_SITE+x}" ]; then
|
||||||
|
log ERROR "Mandatory variable ZPM_SITE not defined!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z "${DNPM_DATA_DIR+x}" ]; then
|
||||||
|
log ERROR "Mandatory variable DNPM_DATA_DIR not defined!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
DNPM_SYNTH_NUM=${DNPM_SYNTH_NUM:-0}
|
||||||
|
if grep -q 'traefik.http.routers.landing.rule=PathPrefix(`/landing`)' /srv/docker/bridgehead/minimal/docker-compose.override.yml 2>/dev/null; then
|
||||||
|
echo "Override of landing page url already in place"
|
||||||
|
else
|
||||||
|
echo "Adding override of landing page url"
|
||||||
|
if [ -f /srv/docker/bridgehead/minimal/docker-compose.override.yml ]; then
|
||||||
|
echo -e ' landing:\n labels:\n - "traefik.http.routers.landing.rule=PathPrefix(`/landing`)"' >> /srv/docker/bridgehead/minimal/docker-compose.override.yml
|
||||||
|
else
|
||||||
|
echo -e 'version: "3.7"\nservices:\n landing:\n labels:\n - "traefik.http.routers.landing.rule=PathPrefix(`/landing`)"' >> /srv/docker/bridgehead/minimal/docker-compose.override.yml
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
@ -1,10 +1,15 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash -e
|
||||||
|
|
||||||
if [ -n "${ENABLE_DNPM}" ]; then
|
if [ -n "${ENABLE_DNPM}" ]; then
|
||||||
log INFO "DNPM setup detected (Beam.Connect) -- will start Beam.Connect for DNPM."
|
log INFO "DNPM setup detected (Beam.Connect) -- will start Beam.Connect for DNPM."
|
||||||
OVERRIDE+=" -f ./$PROJECT/modules/dnpm-compose.yml"
|
OVERRIDE+=" -f ./$PROJECT/modules/dnpm-compose.yml"
|
||||||
|
|
||||||
# Set variables required for Beam-Connect
|
# Set variables required for Beam-Connect
|
||||||
DNPM_APPLICATION_SECRET="$(echo \"This is a salt string to generate one consistent password for DNPM. It is not required to be secret.\" | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 30)"
|
|
||||||
DNPM_BEAM_SECRET_SHORT="$(cat /proc/sys/kernel/random/uuid | sed 's/[-]//g' | head -c 20)"
|
DNPM_BEAM_SECRET_SHORT="$(cat /proc/sys/kernel/random/uuid | sed 's/[-]//g' | head -c 20)"
|
||||||
|
# If the DNPM_NO_PROXY variable is set, prefix it with a comma (as it gets added to a comma separated list)
|
||||||
|
if [ -n "${DNPM_NO_PROXY}" ]; then
|
||||||
|
DNPM_ADDITIONAL_NO_PROXY=",${DNPM_NO_PROXY}"
|
||||||
|
else
|
||||||
|
DNPM_ADDITIONAL_NO_PROXY=""
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
6
ccp/modules/export-and-qb.curl-templates
Normal file
6
ccp/modules/export-and-qb.curl-templates
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
# Full Excel Export
|
||||||
|
curl --location --request POST 'https://${HOST}/ccp-exporter/request?query=Patient&query-format=FHIR_PATH&template-id=ccp&output-format=EXCEL' \
|
||||||
|
--header 'x-api-key: ${EXPORT_API_KEY}'
|
||||||
|
|
||||||
|
# QB
|
||||||
|
curl --location --request POST 'https://${HOST}/ccp-reporter/generate?template-id=ccp'
|
67
ccp/modules/exporter-compose.yml
Normal file
67
ccp/modules/exporter-compose.yml
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
version: "3.7"
|
||||||
|
|
||||||
|
services:
|
||||||
|
exporter:
|
||||||
|
image: docker.verbis.dkfz.de/ccp/dktk-exporter:latest
|
||||||
|
container_name: bridgehead-ccp-exporter
|
||||||
|
environment:
|
||||||
|
JAVA_OPTS: "-Xms1G -Xmx8G -XX:+UseG1GC"
|
||||||
|
LOG_LEVEL: "INFO"
|
||||||
|
EXPORTER_API_KEY: "${EXPORTER_API_KEY}" # Set in exporter-setup.sh
|
||||||
|
CROSS_ORIGINS: "https://${HOST}"
|
||||||
|
EXPORTER_DB_USER: "exporter"
|
||||||
|
EXPORTER_DB_PASSWORD: "${EXPORTER_DB_PASSWORD}" # Set in exporter-setup.sh
|
||||||
|
EXPORTER_DB_URL: "jdbc:postgresql://exporter-db:5432/exporter"
|
||||||
|
HTTP_RELATIVE_PATH: "/ccp-exporter"
|
||||||
|
SITE: "${SITE_ID}"
|
||||||
|
HTTP_SERVLET_REQUEST_SCHEME: "https"
|
||||||
|
OPAL_PASSWORD: "${EXPORTER_OPAL_PASSWORD}"
|
||||||
|
labels:
|
||||||
|
- "traefik.enable=true"
|
||||||
|
- "traefik.http.routers.exporter_ccp.rule=PathPrefix(`/ccp-exporter`)"
|
||||||
|
- "traefik.http.services.exporter_ccp.loadbalancer.server.port=8092"
|
||||||
|
- "traefik.http.routers.exporter_ccp.tls=true"
|
||||||
|
- "traefik.http.middlewares.exporter_ccp_strip.stripprefix.prefixes=/ccp-exporter"
|
||||||
|
- "traefik.http.routers.exporter_ccp.middlewares=exporter_ccp_strip"
|
||||||
|
volumes:
|
||||||
|
- "/var/cache/bridgehead/ccp/exporter-files:/app/exporter-files/output"
|
||||||
|
|
||||||
|
exporter-db:
|
||||||
|
image: docker.verbis.dkfz.de/cache/postgres:${POSTGRES_TAG}
|
||||||
|
container_name: bridgehead-ccp-exporter-db
|
||||||
|
environment:
|
||||||
|
POSTGRES_USER: "exporter"
|
||||||
|
POSTGRES_PASSWORD: "${EXPORTER_DB_PASSWORD}" # Set in exporter-setup.sh
|
||||||
|
POSTGRES_DB: "exporter"
|
||||||
|
volumes:
|
||||||
|
# Consider removing this volume once we find a solution to save Lens-queries to be executed in the explorer.
|
||||||
|
- "/var/cache/bridgehead/ccp/exporter-db:/var/lib/postgresql/data"
|
||||||
|
|
||||||
|
reporter:
|
||||||
|
image: docker.verbis.dkfz.de/ccp/dktk-reporter:latest
|
||||||
|
container_name: bridgehead-ccp-reporter
|
||||||
|
environment:
|
||||||
|
JAVA_OPTS: "-Xms1G -Xmx8G -XX:+UseG1GC"
|
||||||
|
LOG_LEVEL: "INFO"
|
||||||
|
CROSS_ORIGINS: "https://${HOST}"
|
||||||
|
HTTP_RELATIVE_PATH: "/ccp-reporter"
|
||||||
|
SITE: "${SITE_ID}"
|
||||||
|
EXPORTER_API_KEY: "${EXPORTER_API_KEY}" # Set in exporter-setup.sh
|
||||||
|
EXPORTER_URL: "http://exporter:8092"
|
||||||
|
LOG_FHIR_VALIDATION: "false"
|
||||||
|
HTTP_SERVLET_REQUEST_SCHEME: "https"
|
||||||
|
|
||||||
|
# In this initial development state of the bridgehead, we are trying to have so many volumes as possible.
|
||||||
|
# However, in the first executions in the CCP sites, this volume seems to be very important. A report is
|
||||||
|
# a process that can take several hours, because it depends on the exporter.
|
||||||
|
# There is a risk that the bridgehead restarts, losing the already created export.
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
- "/var/cache/bridgehead/ccp/reporter-files:/app/reports"
|
||||||
|
labels:
|
||||||
|
- "traefik.enable=true"
|
||||||
|
- "traefik.http.routers.reporter_ccp.rule=PathPrefix(`/ccp-reporter`)"
|
||||||
|
- "traefik.http.services.reporter_ccp.loadbalancer.server.port=8095"
|
||||||
|
- "traefik.http.routers.reporter_ccp.tls=true"
|
||||||
|
- "traefik.http.middlewares.reporter_ccp_strip.stripprefix.prefixes=/ccp-reporter"
|
||||||
|
- "traefik.http.routers.reporter_ccp.middlewares=reporter_ccp_strip"
|
8
ccp/modules/exporter-setup.sh
Normal file
8
ccp/modules/exporter-setup.sh
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
#!/bin/bash -e
|
||||||
|
|
||||||
|
if [ "$ENABLE_EXPORTER" == true ]; then
|
||||||
|
log INFO "Exporter setup detected -- will start Exporter service."
|
||||||
|
OVERRIDE+=" -f ./$PROJECT/modules/exporter-compose.yml"
|
||||||
|
EXPORTER_DB_PASSWORD="$(echo \"This is a salt string to generate one consistent password for the exporter. It is not required to be secret.\" | sha1sum | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 30)"
|
||||||
|
EXPORTER_API_KEY="$(echo \"This is a salt string to generate one consistent API KEY for the exporter. It is not required to be secret.\" | sha1sum | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 64)"
|
||||||
|
fi
|
15
ccp/modules/exporter.md
Normal file
15
ccp/modules/exporter.md
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
# Exporter and Reporter
|
||||||
|
|
||||||
|
|
||||||
|
## Exporter
|
||||||
|
The exporter is a REST API that exports the data of the different databases of the bridgehead in a set of tables.
|
||||||
|
It can accept different output formats as CSV, Excel, JSON or XML. It can also export data into Opal.
|
||||||
|
|
||||||
|
## Exporter-DB
|
||||||
|
It is a database to save queries for its execution in the exporter.
|
||||||
|
The exporter manages also the different executions of the same query in through the database.
|
||||||
|
|
||||||
|
## Reporter
|
||||||
|
This component is a plugin of the exporter that allows to create more complex Excel reports described in templates.
|
||||||
|
It is compatible with different template engines as Groovy, Thymeleaf,...
|
||||||
|
It is perfect to generate a document as our traditional CCP quality report.
|
@ -1,4 +1,5 @@
|
|||||||
version: "3.7"
|
version: "3.7"
|
||||||
|
|
||||||
services:
|
services:
|
||||||
id-manager:
|
id-manager:
|
||||||
image: docker.verbis.dkfz.de/bridgehead/magicpl
|
image: docker.verbis.dkfz.de/bridgehead/magicpl
|
||||||
@ -28,6 +29,7 @@ services:
|
|||||||
container_name: bridgehead-patientlist
|
container_name: bridgehead-patientlist
|
||||||
environment:
|
environment:
|
||||||
- TOMCAT_REVERSEPROXY_FQDN=${HOST}
|
- TOMCAT_REVERSEPROXY_FQDN=${HOST}
|
||||||
|
- TOMCAT_REVERSEPROXY_SSL=true
|
||||||
- ML_SITE=${IDMANAGEMENT_FRIENDLY_ID}
|
- ML_SITE=${IDMANAGEMENT_FRIENDLY_ID}
|
||||||
- ML_DB_PASS=${PATIENTLIST_POSTGRES_PASSWORD}
|
- ML_DB_PASS=${PATIENTLIST_POSTGRES_PASSWORD}
|
||||||
- ML_API_KEY=${IDMANAGER_LOCAL_PATIENTLIST_APIKEY}
|
- ML_API_KEY=${IDMANAGER_LOCAL_PATIENTLIST_APIKEY}
|
||||||
@ -43,7 +45,7 @@ services:
|
|||||||
- patientlist-db
|
- patientlist-db
|
||||||
|
|
||||||
patientlist-db:
|
patientlist-db:
|
||||||
image: docker.verbis.dkfz.de/cache/postgres:15.1-alpine
|
image: docker.verbis.dkfz.de/cache/postgres:${POSTGRES_TAG}
|
||||||
container_name: bridgehead-patientlist-db
|
container_name: bridgehead-patientlist-db
|
||||||
environment:
|
environment:
|
||||||
POSTGRES_USER: "mainzelliste"
|
POSTGRES_USER: "mainzelliste"
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash -e
|
||||||
|
|
||||||
function idManagementSetup() {
|
function idManagementSetup() {
|
||||||
if [ -n "$IDMANAGER_UPLOAD_APIKEY" ]; then
|
if [ -n "$IDMANAGER_UPLOAD_APIKEY" ]; then
|
||||||
@ -39,6 +39,7 @@ function applySpecialCases() {
|
|||||||
result="$1";
|
result="$1";
|
||||||
result="${result/Lmu/LMU}";
|
result="${result/Lmu/LMU}";
|
||||||
result="${result/Tum/TUM}";
|
result="${result/Tum/TUM}";
|
||||||
|
result="${result/Dktk Test/Teststandort}";
|
||||||
echo "$result";
|
echo "$result";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -11,22 +11,30 @@ services:
|
|||||||
ID_MANAGER_API_KEY: ${IDMANAGER_UPLOAD_APIKEY}
|
ID_MANAGER_API_KEY: ${IDMANAGER_UPLOAD_APIKEY}
|
||||||
ID_MANAGER_PSEUDONYM_ID_TYPE: BK_${IDMANAGEMENT_FRIENDLY_ID}_L-ID
|
ID_MANAGER_PSEUDONYM_ID_TYPE: BK_${IDMANAGEMENT_FRIENDLY_ID}_L-ID
|
||||||
ID_MANAGER_URL: http://id-manager:8080/id-manager
|
ID_MANAGER_URL: http://id-manager:8080/id-manager
|
||||||
PATIENT_CSV_FIRST_NAME_HEADER: ${MTBA_PATIENT_CSV_FIRST_NAME_HEADER}
|
PATIENT_CSV_FIRST_NAME_HEADER: ${MTBA_PATIENT_CSV_FIRST_NAME_HEADER:-FIRST_NAME}
|
||||||
PATIENT_CSV_LAST_NAME_HEADER: ${MTBA_PATIENT_CSV_LAST_NAME_HEADER}
|
PATIENT_CSV_LAST_NAME_HEADER: ${MTBA_PATIENT_CSV_LAST_NAME_HEADER:-LAST_NAME}
|
||||||
PATIENT_CSV_GENDER_HEADER: ${MTBA_PATIENT_CSV_GENDER_HEADER}
|
PATIENT_CSV_GENDER_HEADER: ${MTBA_PATIENT_CSV_GENDER_HEADER:-GENDER}
|
||||||
PATIENT_CSV_BIRTHDAY_HEADER: ${MTBA_PATIENT_CSV_BIRTHDAY_HEADER}
|
PATIENT_CSV_BIRTHDAY_HEADER: ${MTBA_PATIENT_CSV_BIRTHDAY_HEADER:-BIRTHDAY}
|
||||||
CBIOPORTAL_URL: http://cbioportal:8080
|
CBIOPORTAL_URL: http://cbioportal:8080
|
||||||
FILE_CHARSET: ${MTBA_FILE_CHARSET}
|
FILE_CHARSET: ${MTBA_FILE_CHARSET:-UTF-8}
|
||||||
FILE_END_OF_LINE: ${MTBA_FILE_END_OF_LINE}
|
FILE_END_OF_LINE: ${MTBA_FILE_END_OF_LINE:-LF}
|
||||||
CSV_DELIMITER: ${MTBA_CSV_DELIMITER}
|
CSV_DELIMITER: ${MTBA_CSV_DELIMITER:-TAB}
|
||||||
|
HTTP_RELATIVE_PATH: "/mtba"
|
||||||
|
OIDC_ADMIN_GROUP: "${OIDC_ADMIN_GROUP}"
|
||||||
|
OIDC_CLIENT_ID: "${OIDC_PRIVATE_CLIENT_ID}"
|
||||||
|
OIDC_CLIENT_SECRET: "${OIDC_CLIENT_SECRET}"
|
||||||
|
OIDC_REALM: "${OIDC_REALM}"
|
||||||
|
OIDC_URL: "${OIDC_URL}"
|
||||||
|
|
||||||
labels:
|
labels:
|
||||||
- "traefik.enable=true"
|
- "traefik.enable=true"
|
||||||
- "traefik.http.routers.mtba.rule=PathPrefix(`/`)"
|
- "traefik.http.routers.mtba_ccp.rule=PathPrefix(`/mtba`)"
|
||||||
- "traefik.http.services.mtba.loadbalancer.server.port=80"
|
- "traefik.http.services.mtba_ccp.loadbalancer.server.port=8480"
|
||||||
- "traefik.http.routers.mtba.tls=true"
|
- "traefik.http.routers.mtba_ccp.tls=true"
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
- /tmp/bridgehead/mtba/input:/app/input
|
- /var/cache/bridgehead/ccp/mtba/input:/app/input
|
||||||
- /tmp/bridgehead/mtba/persist:/app/persist
|
- /var/cache/bridgehead/ccp/mtba/persist:/app/persist
|
||||||
|
|
||||||
# TODO: Include CBioPortal in Deployment ...
|
# TODO: Include CBioPortal in Deployment ...
|
||||||
# NOTE: CBioPortal can't load data while the system is running. So after import of data bridgehead needs to be restarted!
|
# NOTE: CBioPortal can't load data while the system is running. So after import of data bridgehead needs to be restarted!
|
||||||
|
@ -1,13 +1,13 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash -e
|
||||||
|
|
||||||
function mtbaSetup() {
|
function mtbaSetup() {
|
||||||
# TODO: Check if ID-Management Module is activated!
|
if [ -n "$ENABLE_MTBA" ];then
|
||||||
if [ -n "$ENABLE_MTBA" ];then
|
log INFO "MTBA setup detected -- will start MTBA Service and CBioPortal."
|
||||||
log INFO "MTBA setup detected -- will start MTBA Service and CBioPortal."
|
if [ ! -n "$IDMANAGER_UPLOAD_APIKEY" ]; then
|
||||||
if [ ! -n "$IDMANAGER_UPLOAD_APIKEY" ]; then
|
log ERROR "Missing ID-Management Module! Fix this by setting up ID Management:"
|
||||||
log ERROR "Detected MTBA Module configuration but ID-Management Module seems not to be configured!"
|
exit 1;
|
||||||
exit 1;
|
fi
|
||||||
fi
|
OVERRIDE+=" -f ./$PROJECT/modules/mtba-compose.yml"
|
||||||
OVERRIDE+=" -f ./$PROJECT/modules/mtba-compose.yml"
|
add_private_oidc_redirect_url "/mtba/*"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
6
ccp/modules/mtba.md
Normal file
6
ccp/modules/mtba.md
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
# Molecular Tumor Board Alliance (MTBA)
|
||||||
|
|
||||||
|
In this module, the genetic data to import is stored in a directory (/tmp/bridgehead/mtba/input). A process checks
|
||||||
|
regularly if there are files in the directory. The files are pseudonomized when the IDAT is provided. The files are
|
||||||
|
combined with clinical data of the blaze and imported in cBioPortal. On the other hand, this files are also imported in
|
||||||
|
Blaze.
|
@ -1,4 +1,5 @@
|
|||||||
version: "3.7"
|
version: "3.7"
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
nngm-rest:
|
nngm-rest:
|
||||||
|
|
||||||
@ -21,9 +22,6 @@ services:
|
|||||||
- "traefik.http.routers.connector.middlewares=connector_strip,auth-nngm"
|
- "traefik.http.routers.connector.middlewares=connector_strip,auth-nngm"
|
||||||
volumes:
|
volumes:
|
||||||
- nngm-rest:/var/log
|
- nngm-rest:/var/log
|
||||||
|
|
||||||
traefik:
|
traefik:
|
||||||
labels:
|
labels:
|
||||||
- "traefik.http.middlewares.auth-nngm.basicauth.users=${NNGM_AUTH}"
|
- "traefik.http.middlewares.auth-nngm.basicauth.users=${NNGM_AUTH}"
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,8 +1,6 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash -e
|
||||||
|
|
||||||
function nngmSetup() {
|
if [ -n "$NNGM_CTS_APIKEY" ]; then
|
||||||
if [ -n "$NNGM_CTS_APIKEY" ]; then
|
log INFO "nNGM setup detected -- will start nNGM Connector."
|
||||||
log INFO "nNGM setup detected -- will start nNGM Connector."
|
OVERRIDE+=" -f ./$PROJECT/modules/nngm-compose.yml"
|
||||||
OVERRIDE+=" -f ./$PROJECT/modules/nngm-compose.yml"
|
fi
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
81
ccp/modules/teiler-compose.yml
Normal file
81
ccp/modules/teiler-compose.yml
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
version: "3.7"
|
||||||
|
|
||||||
|
services:
|
||||||
|
|
||||||
|
teiler-orchestrator:
|
||||||
|
image: docker.verbis.dkfz.de/cache/samply/teiler-orchestrator:latest
|
||||||
|
container_name: bridgehead-teiler-orchestrator
|
||||||
|
labels:
|
||||||
|
- "traefik.enable=true"
|
||||||
|
- "traefik.http.routers.teiler_orchestrator_ccp.rule=PathPrefix(`/ccp-teiler`)"
|
||||||
|
- "traefik.http.services.teiler_orchestrator_ccp.loadbalancer.server.port=9000"
|
||||||
|
- "traefik.http.routers.teiler_orchestrator_ccp.tls=true"
|
||||||
|
- "traefik.http.middlewares.teiler_orchestrator_ccp_strip.stripprefix.prefixes=/ccp-teiler"
|
||||||
|
- "traefik.http.routers.teiler_orchestrator_ccp.middlewares=teiler_orchestrator_ccp_strip"
|
||||||
|
environment:
|
||||||
|
TEILER_BACKEND_URL: "https://${HOST}/ccp-teiler-backend"
|
||||||
|
TEILER_DASHBOARD_URL: "https://${HOST}/ccp-teiler-dashboard"
|
||||||
|
DEFAULT_LANGUAGE: "${TEILER_DEFAULT_LANGUAGE_LOWER_CASE}"
|
||||||
|
HTTP_RELATIVE_PATH: "/ccp-teiler"
|
||||||
|
|
||||||
|
teiler-dashboard:
|
||||||
|
image: docker.verbis.dkfz.de/cache/samply/teiler-dashboard:develop
|
||||||
|
container_name: bridgehead-teiler-dashboard
|
||||||
|
labels:
|
||||||
|
- "traefik.enable=true"
|
||||||
|
- "traefik.http.routers.teiler_dashboard_ccp.rule=PathPrefix(`/ccp-teiler-dashboard`)"
|
||||||
|
- "traefik.http.services.teiler_dashboard_ccp.loadbalancer.server.port=80"
|
||||||
|
- "traefik.http.routers.teiler_dashboard_ccp.tls=true"
|
||||||
|
- "traefik.http.middlewares.teiler_dashboard_ccp_strip.stripprefix.prefixes=/ccp-teiler-dashboard"
|
||||||
|
- "traefik.http.routers.teiler_dashboard_ccp.middlewares=teiler_dashboard_ccp_strip"
|
||||||
|
environment:
|
||||||
|
DEFAULT_LANGUAGE: "${TEILER_DEFAULT_LANGUAGE}"
|
||||||
|
TEILER_BACKEND_URL: "https://${HOST}/ccp-teiler-backend"
|
||||||
|
OIDC_URL: "${OIDC_URL}"
|
||||||
|
OIDC_REALM: "${OIDC_REALM}"
|
||||||
|
OIDC_CLIENT_ID: "${OIDC_PUBLIC_CLIENT_ID}"
|
||||||
|
OIDC_TOKEN_GROUP: "${OIDC_GROUP_CLAIM}"
|
||||||
|
TEILER_ADMIN_NAME: "${OPERATOR_FIRST_NAME} ${OPERATOR_LAST_NAME}"
|
||||||
|
TEILER_ADMIN_EMAIL: "${OPERATOR_EMAIL}"
|
||||||
|
TEILER_ADMIN_PHONE: "${OPERATOR_PHONE}"
|
||||||
|
TEILER_PROJECT: "${PROJECT}"
|
||||||
|
EXPORTER_API_KEY: "${EXPORTER_API_KEY}"
|
||||||
|
TEILER_ORCHESTRATOR_URL: "https://${HOST}/ccp-teiler"
|
||||||
|
TEILER_DASHBOARD_HTTP_RELATIVE_PATH: "/ccp-teiler-dashboard"
|
||||||
|
TEILER_ORCHESTRATOR_HTTP_RELATIVE_PATH: "/ccp-teiler"
|
||||||
|
TEILER_USER: "${OIDC_USER_GROUP}"
|
||||||
|
TEILER_ADMIN: "${OIDC_ADMIN_GROUP}"
|
||||||
|
REPORTER_DEFAULT_TEMPLATE_ID: "ccp-qb"
|
||||||
|
EXPORTER_DEFAULT_TEMPLATE_ID: "ccp"
|
||||||
|
|
||||||
|
|
||||||
|
teiler-backend:
|
||||||
|
image: docker.verbis.dkfz.de/ccp/dktk-teiler-backend:latest
|
||||||
|
container_name: bridgehead-teiler-backend
|
||||||
|
labels:
|
||||||
|
- "traefik.enable=true"
|
||||||
|
- "traefik.http.routers.teiler_backend_ccp.rule=PathPrefix(`/ccp-teiler-backend`)"
|
||||||
|
- "traefik.http.services.teiler_backend_ccp.loadbalancer.server.port=8085"
|
||||||
|
- "traefik.http.routers.teiler_backend_ccp.tls=true"
|
||||||
|
- "traefik.http.middlewares.teiler_backend_ccp_strip.stripprefix.prefixes=/ccp-teiler-backend"
|
||||||
|
- "traefik.http.routers.teiler_backend_ccp.middlewares=teiler_backend_ccp_strip"
|
||||||
|
environment:
|
||||||
|
LOG_LEVEL: "INFO"
|
||||||
|
APPLICATION_PORT: "8085"
|
||||||
|
APPLICATION_ADDRESS: "${HOST}"
|
||||||
|
DEFAULT_LANGUAGE: "${TEILER_DEFAULT_LANGUAGE}"
|
||||||
|
CONFIG_ENV_VAR_PATH: "/run/secrets/ccp.conf"
|
||||||
|
TEILER_ORCHESTRATOR_HTTP_RELATIVE_PATH: "/ccp-teiler"
|
||||||
|
TEILER_ORCHESTRATOR_URL: "https://${HOST}/ccp-teiler"
|
||||||
|
TEILER_DASHBOARD_DE_URL: "https://${HOST}/ccp-teiler-dashboard/de"
|
||||||
|
TEILER_DASHBOARD_EN_URL: "https://${HOST}/ccp-teiler-dashboard/en"
|
||||||
|
CENTRAX_URL: "${CENTRAXX_URL}"
|
||||||
|
HTTP_PROXY: "http://forward_proxy:3128"
|
||||||
|
ENABLE_MTBA: "${ENABLE_MTBA}"
|
||||||
|
ENABLE_DATASHIELD: "${ENABLE_DATASHIELD}"
|
||||||
|
secrets:
|
||||||
|
- ccp.conf
|
||||||
|
|
||||||
|
secrets:
|
||||||
|
ccp.conf:
|
||||||
|
file: /etc/bridgehead/ccp.conf
|
9
ccp/modules/teiler-setup.sh
Normal file
9
ccp/modules/teiler-setup.sh
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
#!/bin/bash -e
|
||||||
|
|
||||||
|
if [ "$ENABLE_TEILER" == true ];then
|
||||||
|
log INFO "Teiler setup detected -- will start Teiler services."
|
||||||
|
OVERRIDE+=" -f ./$PROJECT/modules/teiler-compose.yml"
|
||||||
|
TEILER_DEFAULT_LANGUAGE=DE
|
||||||
|
TEILER_DEFAULT_LANGUAGE_LOWER_CASE=${TEILER_DEFAULT_LANGUAGE,,}
|
||||||
|
add_public_oidc_redirect_url "/ccp-teiler/*"
|
||||||
|
fi
|
19
ccp/modules/teiler.md
Normal file
19
ccp/modules/teiler.md
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
# Teiler
|
||||||
|
This module orchestrates the different microfrontends of the bridgehead as a single page application.
|
||||||
|
|
||||||
|
## Teiler Orchestrator
|
||||||
|
Single SPA component that consists on the root HTML site of the single page application and a javascript code that
|
||||||
|
gets the information about the microfrontend calling the teiler backend and is responsible for registering them. With the
|
||||||
|
resulting mapping, it can initialize, mount and unmount the required microfrontends on the fly.
|
||||||
|
|
||||||
|
The microfrontends run independently in different containers and can be based on different frameworks (Angular, Vue, React,...)
|
||||||
|
This microfrontends can run as single alone but need an extension with Single-SPA (https://single-spa.js.org/docs/ecosystem).
|
||||||
|
There are also available three templates (Angular, Vue, React) to be directly extended to be used directly in the teiler.
|
||||||
|
|
||||||
|
## Teiler Dashboard
|
||||||
|
It consists on the main dashboard and a set of embedded services.
|
||||||
|
### Login
|
||||||
|
user and password in ccp.local.conf
|
||||||
|
|
||||||
|
## Teiler Backend
|
||||||
|
In this component, the microfrontends are configured.
|
@ -1,20 +0,0 @@
|
|||||||
-----BEGIN CERTIFICATE-----
|
|
||||||
MIIDNTCCAh2gAwIBAgIUN7yzueIZzwpe8PaPEIMY8zoH+eMwDQYJKoZIhvcNAQEL
|
|
||||||
BQAwFjEUMBIGA1UEAxMLQnJva2VyLVJvb3QwHhcNMjMwNTIzMTAxNzIzWhcNMzMw
|
|
||||||
NTIwMTAxNzUzWjAWMRQwEgYDVQQDEwtCcm9rZXItUm9vdDCCASIwDQYJKoZIhvcN
|
|
||||||
AQEBBQADggEPADCCAQoCggEBAN5JAj+HydSGaxvA0AOcrXVTZ9FfsH0cMVBlQb72
|
|
||||||
bGZgrRvkqtB011TNXZfsHl7rPxCY61DcsDJfFq3+8VHT+S9HE0qV1bEwP+oA3xc4
|
|
||||||
Opq77av77cNNOqDC7h+jyPhHcUaE33iddmrH9Zn2ofWTSkKHHu3PAe5udCrc2QnD
|
|
||||||
4PLRF6gqiEY1mcGknJrXj1ff/X0nRY/m6cnHNXz0Cvh8oPOtbdfGgfZjID2/fJNP
|
|
||||||
fNoNKqN+5oJAZ+ZZ9id9rBvKj1ivW3F2EoGjZF268SgZzc5QrM/D1OpSBQf5SF/V
|
|
||||||
qUPcQTgt9ry3YR+SZYazLkfKMEOWEa0WsqJVgXdQ6FyergcCAwEAAaN7MHkwDgYD
|
|
||||||
VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFEa70kcseqU5
|
|
||||||
bHx2zSt4bG21HokhMB8GA1UdIwQYMBaAFEa70kcseqU5bHx2zSt4bG21HokhMBYG
|
|
||||||
A1UdEQQPMA2CC0Jyb2tlci1Sb290MA0GCSqGSIb3DQEBCwUAA4IBAQCGmE7NXW4T
|
|
||||||
6J4mV3b132cGEMD7grx5JeiXK5EHMlswUS+Odz0NcBNzhUHdG4WVMbrilHbI5Ua+
|
|
||||||
6jdKx5WwnqzjQvElP0MCw6sH/35gbokWgk1provOP99WOFRsQs+9Sm8M2XtMf9HZ
|
|
||||||
m3wABwU/O+dhZZ1OT1PjSZD0OKWKqH/KvlsoF5R6P888KpeYFiIWiUNS5z21Jm8A
|
|
||||||
ZcllJjiRJ60EmDwSUOQVJJSMOvtr6xTZDZLtAKSN8zN08lsNGzyrFwqjDwU0WTqp
|
|
||||||
scMXEGBsWQjlvxqDnXyljepR0oqRIjOvgrWaIgbxcnu98tK/OdBGwlAPKNUW7Crr
|
|
||||||
vO+eHxl9iqd4
|
|
||||||
-----END CERTIFICATE-----
|
|
15
ccp/vars
15
ccp/vars
@ -8,6 +8,17 @@ PRIVATEKEYFILENAME=/etc/bridgehead/pki/${SITE_ID}.priv.pem
|
|||||||
|
|
||||||
BROKER_URL_FOR_PREREQ=$BROKER_URL
|
BROKER_URL_FOR_PREREQ=$BROKER_URL
|
||||||
|
|
||||||
|
OIDC_USER_GROUP="DKTK_CCP_$(capitalize_first_letter ${SITE_ID})"
|
||||||
|
OIDC_ADMIN_GROUP="DKTK_CCP_$(capitalize_first_letter ${SITE_ID})_Verwalter"
|
||||||
|
OIDC_PRIVATE_CLIENT_ID=${SITE_ID}-private
|
||||||
|
OIDC_PUBLIC_CLIENT_ID=${SITE_ID}-public
|
||||||
|
# Use "test-realm-01" for testing
|
||||||
|
OIDC_REALM="${OIDC_REALM:-master}"
|
||||||
|
OIDC_URL="https://login.verbis.dkfz.de"
|
||||||
|
OIDC_ISSUER_URL="${OIDC_URL}/realms/${OIDC_REALM}"
|
||||||
|
OIDC_GROUP_CLAIM="groups"
|
||||||
|
|
||||||
|
POSTGRES_TAG=15.6-alpine
|
||||||
|
|
||||||
for module in $PROJECT/modules/*.sh
|
for module in $PROJECT/modules/*.sh
|
||||||
do
|
do
|
||||||
@ -16,5 +27,5 @@ do
|
|||||||
done
|
done
|
||||||
|
|
||||||
idManagementSetup
|
idManagementSetup
|
||||||
nngmSetup
|
mtbaSetup
|
||||||
mtbaSetup
|
adt2fhirRestSetup
|
||||||
|
167
lib/functions.sh
167
lib/functions.sh
@ -9,6 +9,33 @@ detectCompose() {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
setupProxy() {
|
||||||
|
### Note: As the current data protection concepts do not allow communication via HTTP,
|
||||||
|
### we are not setting a proxy for HTTP requests.
|
||||||
|
|
||||||
|
local http="no"
|
||||||
|
local https="no"
|
||||||
|
if [ $HTTPS_PROXY_URL ]; then
|
||||||
|
local proto="$(echo $HTTPS_PROXY_URL | grep :// | sed -e 's,^\(.*://\).*,\1,g')"
|
||||||
|
local fqdn="$(echo ${HTTPS_PROXY_URL/$proto/})"
|
||||||
|
local hostport=$(echo $HTTPS_PROXY_URL | sed -e "s,$proto,,g" | cut -d/ -f1)
|
||||||
|
HTTPS_PROXY_HOST="$(echo $hostport | sed -e 's,:.*,,g')"
|
||||||
|
HTTPS_PROXY_PORT="$(echo $hostport | sed -e 's,^.*:,:,g' -e 's,.*:\([0-9]*\).*,\1,g' -e 's,[^0-9],,g')"
|
||||||
|
if [[ ! -z "$HTTPS_PROXY_USERNAME" && ! -z "$HTTPS_PROXY_PASSWORD" ]]; then
|
||||||
|
local proto="$(echo $HTTPS_PROXY_URL | grep :// | sed -e 's,^\(.*://\).*,\1,g')"
|
||||||
|
local fqdn="$(echo ${HTTPS_PROXY_URL/$proto/})"
|
||||||
|
HTTPS_PROXY_FULL_URL="$(echo $proto$HTTPS_PROXY_USERNAME:$HTTPS_PROXY_PASSWORD@$fqdn)"
|
||||||
|
https="authenticated"
|
||||||
|
else
|
||||||
|
HTTPS_PROXY_FULL_URL=$HTTPS_PROXY_URL
|
||||||
|
https="unauthenticated"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
log INFO "Configuring proxy servers: $http http proxy (we're not supporting unencrypted comms), $https https proxy"
|
||||||
|
export HTTPS_PROXY_HOST HTTPS_PROXY_PORT HTTPS_PROXY_FULL_URL
|
||||||
|
}
|
||||||
|
|
||||||
exitIfNotRoot() {
|
exitIfNotRoot() {
|
||||||
if [ "$EUID" -ne 0 ]; then
|
if [ "$EUID" -ne 0 ]; then
|
||||||
log "ERROR" "Please run as root"
|
log "ERROR" "Please run as root"
|
||||||
@ -26,7 +53,7 @@ checkOwner(){
|
|||||||
}
|
}
|
||||||
|
|
||||||
printUsage() {
|
printUsage() {
|
||||||
echo "Usage: bridgehead start|stop|is-running|update|install|uninstall|adduser|enroll PROJECTNAME"
|
echo "Usage: bridgehead start|stop|logs|is-running|update|install|uninstall|adduser|enroll PROJECTNAME"
|
||||||
echo "PROJECTNAME should be one of ccp|bbmri"
|
echo "PROJECTNAME should be one of ccp|bbmri"
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -49,7 +76,7 @@ fetchVarsFromVault() {
|
|||||||
|
|
||||||
set +e
|
set +e
|
||||||
|
|
||||||
PASS=$(BW_MASTERPASS="$BW_MASTERPASS" BW_CLIENTID="$BW_CLIENTID" BW_CLIENTSECRET="$BW_CLIENTSECRET" docker run --rm -e BW_MASTERPASS -e BW_CLIENTID -e BW_CLIENTSECRET -e http_proxy samply/bridgehead-vaultfetcher $@)
|
PASS=$(BW_MASTERPASS="$BW_MASTERPASS" BW_CLIENTID="$BW_CLIENTID" BW_CLIENTSECRET="$BW_CLIENTSECRET" docker run --rm -e BW_MASTERPASS -e BW_CLIENTID -e BW_CLIENTSECRET -e http_proxy docker.verbis.dkfz.de/cache/samply/bridgehead-vaultfetcher:latest $@)
|
||||||
RET=$?
|
RET=$?
|
||||||
|
|
||||||
if [ $RET -ne 0 ]; then
|
if [ $RET -ne 0 ]; then
|
||||||
@ -128,6 +155,28 @@ setHostname() {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# This function optimizes the usage of memory through blaze, according to the official performance tuning guide:
|
||||||
|
# https://github.com/samply/blaze/blob/master/docs/tuning-guide.md
|
||||||
|
# Short summary of the adjustments made:
|
||||||
|
# - set blaze memory cap to a quarter of the system memory
|
||||||
|
# - set db block cache size to a quarter of the system memory
|
||||||
|
# - limit resource count allowed in blaze to 1,25M per 4GB available system memory
|
||||||
|
optimizeBlazeMemoryUsage() {
|
||||||
|
if [ -z "$BLAZE_MEMORY_CAP" ]; then
|
||||||
|
system_memory_in_mb=$(LC_ALL=C free -m | grep 'Mem:' | awk '{print $2}');
|
||||||
|
export BLAZE_MEMORY_CAP=$(($system_memory_in_mb/4));
|
||||||
|
fi
|
||||||
|
if [ -z "$BLAZE_RESOURCE_CACHE_CAP" ]; then
|
||||||
|
available_system_memory_chunks=$((BLAZE_MEMORY_CAP / 1000))
|
||||||
|
if [ $available_system_memory_chunks -eq 0 ]; then
|
||||||
|
log WARN "Only ${BLAZE_MEMORY_CAP} system memory available for Blaze. If your Blaze stores more than 128000 fhir ressources it will run significally slower."
|
||||||
|
export BLAZE_RESOURCE_CACHE_CAP=128000;
|
||||||
|
else
|
||||||
|
export BLAZE_RESOURCE_CACHE_CAP=$((available_system_memory_chunks * 312500))
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
# Takes 1) The Backup Directory Path 2) The name of the Service to be backuped
|
# Takes 1) The Backup Directory Path 2) The name of the Service to be backuped
|
||||||
# Creates 3 Backups: 1) For the past seven days 2) For the current month and 3) for each calendar week
|
# Creates 3 Backups: 1) For the past seven days 2) For the current month and 3) for each calendar week
|
||||||
createEncryptedPostgresBackup(){
|
createEncryptedPostgresBackup(){
|
||||||
@ -188,7 +237,7 @@ function do_enroll_inner {
|
|||||||
PARAMS+="--admin-email $SUPPORT_EMAIL"
|
PARAMS+="--admin-email $SUPPORT_EMAIL"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
docker run --rm -ti -v /etc/bridgehead/pki:/etc/bridgehead/pki samply/beam-enroll:latest --output-file $PRIVATEKEYFILENAME --proxy-id $MANUAL_PROXY_ID $PARAMS
|
docker run --rm -v /etc/bridgehead/pki:/etc/bridgehead/pki docker.verbis.dkfz.de/cache/samply/beam-enroll:latest --output-file $PRIVATEKEYFILENAME --proxy-id $MANUAL_PROXY_ID $PARAMS
|
||||||
chmod 600 $PRIVATEKEYFILENAME
|
chmod 600 $PRIVATEKEYFILENAME
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -211,4 +260,114 @@ add_basic_auth_user() {
|
|||||||
fi
|
fi
|
||||||
log DEBUG "Saving clear text credentials in $FILE. If wanted, delete them manually."
|
log DEBUG "Saving clear text credentials in $FILE. If wanted, delete them manually."
|
||||||
sed -i "/^$NAME/ s|$|\n# User: $USER\n# Password: $PASSWORD|" $FILE
|
sed -i "/^$NAME/ s|$|\n# User: $USER\n# Password: $PASSWORD|" $FILE
|
||||||
}
|
}
|
||||||
|
|
||||||
|
OIDC_PUBLIC_REDIRECT_URLS=${OIDC_PUBLIC_REDIRECT_URLS:-""}
|
||||||
|
OIDC_PRIVATE_REDIRECT_URLS=${OIDC_PRIVATE_REDIRECT_URLS:-""}
|
||||||
|
|
||||||
|
# Add a redirect url to the public oidc client of the bridgehead
|
||||||
|
function add_public_oidc_redirect_url() {
|
||||||
|
if [[ $OIDC_PUBLIC_REDIRECT_URLS == "" ]]; then
|
||||||
|
OIDC_PUBLIC_REDIRECT_URLS+="$(generate_redirect_urls $1)"
|
||||||
|
else
|
||||||
|
OIDC_PUBLIC_REDIRECT_URLS+=",$(generate_redirect_urls $1)"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Add a redirect url to the private oidc client of the bridgehead
|
||||||
|
function add_private_oidc_redirect_url() {
|
||||||
|
if [[ $OIDC_PRIVATE_REDIRECT_URLS == "" ]]; then
|
||||||
|
OIDC_PRIVATE_REDIRECT_URLS+="$(generate_redirect_urls $1)"
|
||||||
|
else
|
||||||
|
OIDC_PRIVATE_REDIRECT_URLS+=",$(generate_redirect_urls $1)"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function sync_secrets() {
|
||||||
|
local delimiter=$'\x1E'
|
||||||
|
local secret_sync_args=""
|
||||||
|
if [[ $OIDC_PRIVATE_REDIRECT_URLS != "" ]]; then
|
||||||
|
secret_sync_args="OIDC:OIDC_CLIENT_SECRET:private;$OIDC_PRIVATE_REDIRECT_URLS"
|
||||||
|
fi
|
||||||
|
if [[ $OIDC_PUBLIC_REDIRECT_URLS != "" ]]; then
|
||||||
|
if [[ $secret_sync_args == "" ]]; then
|
||||||
|
secret_sync_args="OIDC:OIDC_PUBLIC:public;$OIDC_PUBLIC_REDIRECT_URLS"
|
||||||
|
else
|
||||||
|
secret_sync_args+="${delimiter}OIDC:OIDC_PUBLIC:public;$OIDC_PUBLIC_REDIRECT_URLS"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
if [[ $secret_sync_args == "" ]]; then
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
mkdir -p /var/cache/bridgehead/secrets/ || fail_and_report 1 "Failed to create '/var/cache/bridgehead/secrets/'. Please run sudo './bridgehead install $PROJECT' again."
|
||||||
|
touch /var/cache/bridgehead/secrets/oidc
|
||||||
|
docker run --rm \
|
||||||
|
-v /var/cache/bridgehead/secrets/oidc:/usr/local/cache \
|
||||||
|
-v $PRIVATEKEYFILENAME:/run/secrets/privkey.pem:ro \
|
||||||
|
-v /srv/docker/bridgehead/$PROJECT/root.crt.pem:/run/secrets/root.crt.pem:ro \
|
||||||
|
-v /etc/bridgehead/trusted-ca-certs:/conf/trusted-ca-certs:ro \
|
||||||
|
-e TLS_CA_CERTIFICATES_DIR=/conf/trusted-ca-certs \
|
||||||
|
-e NO_PROXY=localhost,127.0.0.1 \
|
||||||
|
-e ALL_PROXY=$HTTPS_PROXY_FULL_URL \
|
||||||
|
-e PROXY_ID=$PROXY_ID \
|
||||||
|
-e BROKER_URL=$BROKER_URL \
|
||||||
|
-e OIDC_PROVIDER=secret-sync-central.oidc-client-enrollment.$BROKER_ID \
|
||||||
|
-e SECRET_DEFINITIONS=$secret_sync_args \
|
||||||
|
docker.verbis.dkfz.de/cache/samply/secret-sync-local:latest
|
||||||
|
|
||||||
|
set -a # Export variables as environment variables
|
||||||
|
source /var/cache/bridgehead/secrets/*
|
||||||
|
set +a # Export variables in the regular way
|
||||||
|
}
|
||||||
|
|
||||||
|
capitalize_first_letter() {
|
||||||
|
input="$1"
|
||||||
|
capitalized="$(tr '[:lower:]' '[:upper:]' <<< ${input:0:1})${input:1}"
|
||||||
|
echo "$capitalized"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Generate a string of ',' separated string of redirect urls relative to $HOST.
|
||||||
|
# $1 will be appended to the url
|
||||||
|
# If the host looks like dev-jan.inet.dkfz-heidelberg.de it will generate urls with dev-jan and the original $HOST as url Authorities
|
||||||
|
function generate_redirect_urls(){
|
||||||
|
local redirect_urls="https://${HOST}$1"
|
||||||
|
local host_without_proxy="$(echo "$HOST" | cut -d '.' -f1)"
|
||||||
|
# Only append second url if its different and the host is not an ip address
|
||||||
|
if [[ "$HOST" != "$host_without_proxy" && ! "$HOST" =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||||
|
redirect_urls+=",https://$host_without_proxy$1"
|
||||||
|
fi
|
||||||
|
echo "$redirect_urls"
|
||||||
|
}
|
||||||
|
|
||||||
|
# This password contains at least one special char, a random number and a random upper and lower case letter
|
||||||
|
generate_password(){
|
||||||
|
local seed_text="$1"
|
||||||
|
local seed_num=$(awk 'BEGIN{FS=""} NR==1{print $10}' /etc/bridgehead/pki/${SITE_ID}.priv.pem | od -An -tuC)
|
||||||
|
local nums="1234567890"
|
||||||
|
local n=$(echo "$seed_num" | awk '{print $1 % 10}')
|
||||||
|
local random_digit=${nums:$n:1}
|
||||||
|
local n=$(echo "$seed_num" | awk '{print $1 % 26}')
|
||||||
|
local upper="ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||||
|
local lower="abcdefghijklmnopqrstuvwxyz"
|
||||||
|
local random_upper=${upper:$n:1}
|
||||||
|
local random_lower=${lower:$n:1}
|
||||||
|
local n=$(echo "$seed_num" | awk '{print $1 % 8}')
|
||||||
|
local special='@#$%^&+='
|
||||||
|
local random_special=${special:$n:1}
|
||||||
|
|
||||||
|
local combined_text="This is a salt string to generate one consistent password for ${seed_text}. It is not required to be secret."
|
||||||
|
local main_password=$(echo "${combined_text}" | sha1sum | openssl pkeyutl -sign -inkey "/etc/bridgehead/pki/${SITE_ID}.priv.pem" 2> /dev/null | base64 | head -c 26 | sed 's/\//A/g')
|
||||||
|
|
||||||
|
echo "${main_password}${random_digit}${random_upper}${random_lower}${random_special}"
|
||||||
|
}
|
||||||
|
|
||||||
|
# This password only contains alphanumeric characters
|
||||||
|
generate_simple_password(){
|
||||||
|
local seed_text="$1"
|
||||||
|
local combined_text="This is a salt string to generate one consistent password for ${seed_text}. It is not required to be secret."
|
||||||
|
echo "${combined_text}" | sha1sum | openssl pkeyutl -sign -inkey "/etc/bridgehead/pki/${SITE_ID}.priv.pem" 2> /dev/null | base64 | head -c 26 | sed 's/[+\/]/A/g'
|
||||||
|
}
|
||||||
|
|
||||||
|
docker_jq() {
|
||||||
|
docker run --rm -i docker.verbis.dkfz.de/cache/jqlang/jq:latest "$@"
|
||||||
|
}
|
||||||
|
@ -47,8 +47,8 @@ function hc_send(){
|
|||||||
|
|
||||||
if [ -n "$2" ]; then
|
if [ -n "$2" ]; then
|
||||||
MSG="$2\n\nDocker stats:\n$UPTIME"
|
MSG="$2\n\nDocker stats:\n$UPTIME"
|
||||||
echo -e "$MSG" | https_proxy=$HTTPS_PROXY_URL curl --max-time 5 -A "$USER_AGENT" -s -o /dev/null -X POST --data-binary @- "$HCURL"/"$1" || log WARN "Monitoring failed: Unable to send data to $HCURL/$1"
|
echo -e "$MSG" | https_proxy=$HTTPS_PROXY_FULL_URL curl --max-time 5 -A "$USER_AGENT" -s -o /dev/null -X POST --data-binary @- "$HCURL"/"$1" || log WARN "Monitoring failed: Unable to send data to $HCURL/$1"
|
||||||
else
|
else
|
||||||
https_proxy=$HTTPS_PROXY_URL curl --max-time 5 -A "$USER_AGENT" -s -o /dev/null "$HCURL"/"$1" || log WARN "Monitoring failed: Unable to send data to $HCURL/$1"
|
https_proxy=$HTTPS_PROXY_FULL_URL curl --max-time 5 -A "$USER_AGENT" -s -o /dev/null "$HCURL"/"$1" || log WARN "Monitoring failed: Unable to send data to $HCURL/$1"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
@ -89,6 +89,9 @@ elif [[ "$DEV_MODE" == "DEV" ]]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
chown -R bridgehead /etc/bridgehead /srv/docker/bridgehead
|
chown -R bridgehead /etc/bridgehead /srv/docker/bridgehead
|
||||||
|
mkdir -p /tmp/bridgehead /var/cache/bridgehead
|
||||||
|
chown -R bridgehead:docker /tmp/bridgehead /var/cache/bridgehead
|
||||||
|
chmod -R g+wr /var/cache/bridgehead /tmp/bridgehead
|
||||||
|
|
||||||
log INFO "System preparation is completed and configuration is present."
|
log INFO "System preparation is completed and configuration is present."
|
||||||
|
|
||||||
|
@ -14,7 +14,7 @@ checkOwner /etc/bridgehead bridgehead || exit 1
|
|||||||
|
|
||||||
## Check if user is a su
|
## Check if user is a su
|
||||||
log INFO "Checking if all prerequisites are met ..."
|
log INFO "Checking if all prerequisites are met ..."
|
||||||
prerequisites="git docker"
|
prerequisites="git docker curl"
|
||||||
for prerequisite in $prerequisites; do
|
for prerequisite in $prerequisites; do
|
||||||
$prerequisite --version 2>&1
|
$prerequisite --version 2>&1
|
||||||
is_available=$?
|
is_available=$?
|
||||||
@ -67,29 +67,30 @@ log INFO "Checking network access ($BROKER_URL_FOR_PREREQ) ..."
|
|||||||
source /etc/bridgehead/${PROJECT}.conf
|
source /etc/bridgehead/${PROJECT}.conf
|
||||||
source ${PROJECT}/vars
|
source ${PROJECT}/vars
|
||||||
|
|
||||||
set +e
|
if [ "${PROJECT}" != "minimal" ]; then
|
||||||
SERVERTIME="$(https_proxy=$HTTPS_PROXY_URL curl -m 5 -s -I $BROKER_URL_FOR_PREREQ 2>&1 | grep -i -e '^Date: ' | sed -e 's/^Date: //i')"
|
set +e
|
||||||
RET=$?
|
SERVERTIME="$(https_proxy=$HTTPS_PROXY_FULL_URL curl -m 5 -s -I $BROKER_URL_FOR_PREREQ 2>&1 | grep -i -e '^Date: ' | sed -e 's/^Date: //i')"
|
||||||
set -e
|
RET=$?
|
||||||
if [ $RET -ne 0 ]; then
|
set -e
|
||||||
log WARN "Unable to connect to Samply.Beam broker at $BROKER_URL_FOR_PREREQ. Please check your proxy settings.\nThe currently configured proxy was \"$HTTPS_PROXY_URL\". This error is normal when using proxy authentication."
|
if [ $RET -ne 0 ]; then
|
||||||
log WARN "Unable to check clock skew due to previous error."
|
log WARN "Unable to connect to Samply.Beam broker at $BROKER_URL_FOR_PREREQ. Please check your proxy settings.\nThe currently configured proxy was \"$HTTPS_PROXY_URL\". This error is normal when using proxy authentication."
|
||||||
else
|
log WARN "Unable to check clock skew due to previous error."
|
||||||
log INFO "Checking clock skew ..."
|
else
|
||||||
|
log INFO "Checking clock skew ..."
|
||||||
|
|
||||||
SERVERTIME_AS_TIMESTAMP=$(date --date="$SERVERTIME" +%s)
|
SERVERTIME_AS_TIMESTAMP=$(date --date="$SERVERTIME" +%s)
|
||||||
MYTIME=$(date +%s)
|
MYTIME=$(date +%s)
|
||||||
SKEW=$(($SERVERTIME_AS_TIMESTAMP - $MYTIME))
|
SKEW=$(($SERVERTIME_AS_TIMESTAMP - $MYTIME))
|
||||||
SKEW=$(echo $SKEW | awk -F- '{print $NF}')
|
SKEW=$(echo $SKEW | awk -F- '{print $NF}')
|
||||||
SYNCTEXT="For example, consider entering a correct NTP server (e.g. your institution's Active Directory Domain Controller in /etc/systemd/timesyncd.conf (option NTP=) and restart systemd-timesyncd."
|
SYNCTEXT="For example, consider entering a correct NTP server (e.g. your institution's Active Directory Domain Controller in /etc/systemd/timesyncd.conf (option NTP=) and restart systemd-timesyncd."
|
||||||
if [ $SKEW -ge 300 ]; then
|
if [ $SKEW -ge 300 ]; then
|
||||||
report_error 5 "Your clock is not synchronized (${SKEW}s off). This will cause Samply.Beam's certificate will fail. Please setup time synchronization. $SYNCTEXT"
|
report_error 5 "Your clock is not synchronized (${SKEW}s off). This will cause Samply.Beam's certificate will fail. Please setup time synchronization. $SYNCTEXT"
|
||||||
exit 1
|
exit 1
|
||||||
elif [ $SKEW -ge 60 ]; then
|
elif [ $SKEW -ge 60 ]; then
|
||||||
log WARN "Your clock is more than a minute off (${SKEW}s). Consider syncing to a time server. $SYNCTEXT"
|
log WARN "Your clock is more than a minute off (${SKEW}s). Consider syncing to a time server. $SYNCTEXT"
|
||||||
fi
|
fi
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
checkPrivKey() {
|
checkPrivKey() {
|
||||||
if [ -e /etc/bridgehead/pki/${SITE_ID}.priv.pem ]; then
|
if [ -e /etc/bridgehead/pki/${SITE_ID}.priv.pem ]; then
|
||||||
log INFO "Success - private key found."
|
log INFO "Success - private key found."
|
||||||
@ -100,7 +101,7 @@ checkPrivKey() {
|
|||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
if [[ "$@" =~ "noprivkey" ]]; then
|
if [[ "$@" =~ "noprivkey" || "${PROJECT}" != "minimal" ]]; then
|
||||||
log INFO "Skipping check for private key for now."
|
log INFO "Skipping check for private key for now."
|
||||||
else
|
else
|
||||||
checkPrivKey || exit 1
|
checkPrivKey || exit 1
|
||||||
|
@ -30,7 +30,7 @@ source $CONFFILE
|
|||||||
assertVarsNotEmpty SITE_ID || fail_and_report 1 "Update failed: SITE_ID empty"
|
assertVarsNotEmpty SITE_ID || fail_and_report 1 "Update failed: SITE_ID empty"
|
||||||
export SITE_ID
|
export SITE_ID
|
||||||
|
|
||||||
checkOwner . bridgehead || fail_and_report 1 "Update failed: Wrong permissions in $(pwd)"
|
checkOwner /srv/docker/bridgehead bridgehead || fail_and_report 1 "Update failed: Wrong permissions in /srv/docker/bridgehead"
|
||||||
checkOwner /etc/bridgehead bridgehead || fail_and_report 1 "Update failed: Wrong permissions in /etc/bridgehead"
|
checkOwner /etc/bridgehead bridgehead || fail_and_report 1 "Update failed: Wrong permissions in /etc/bridgehead"
|
||||||
|
|
||||||
CREDHELPER="/srv/docker/bridgehead/lib/gitpassword.sh"
|
CREDHELPER="/srv/docker/bridgehead/lib/gitpassword.sh"
|
||||||
@ -50,12 +50,12 @@ for DIR in /etc/bridgehead $(pwd); do
|
|||||||
git -C $DIR config credential.helper "$CREDHELPER"
|
git -C $DIR config credential.helper "$CREDHELPER"
|
||||||
fi
|
fi
|
||||||
old_git_hash="$(git -C $DIR rev-parse --verify HEAD)"
|
old_git_hash="$(git -C $DIR rev-parse --verify HEAD)"
|
||||||
if [ -z "$HTTP_PROXY_URL" ]; then
|
if [ -z "$HTTPS_PROXY_FULL_URL" ]; then
|
||||||
log "INFO" "Git is using no proxy!"
|
log "INFO" "Git is using no proxy!"
|
||||||
OUT=$(retry 5 git -C $DIR fetch 2>&1 && retry 5 git -C $DIR pull 2>&1)
|
OUT=$(retry 5 git -C $DIR fetch 2>&1 && retry 5 git -C $DIR pull 2>&1)
|
||||||
else
|
else
|
||||||
log "INFO" "Git is using proxy ${HTTP_PROXY_URL} from ${CONFFILE}"
|
log "INFO" "Git is using proxy ${HTTPS_PROXY_URL} from ${CONFFILE}"
|
||||||
OUT=$(retry 5 git -c http.proxy=$HTTP_PROXY_URL -c https.proxy=$HTTPS_PROXY_URL -C $DIR fetch 2>&1 && retry 5 git -c http.proxy=$HTTP_PROXY_URL -c https.proxy=$HTTPS_PROXY_URL -C $DIR pull 2>&1)
|
OUT=$(retry 5 git -c http.proxy=$HTTPS_PROXY_FULL_URL -c https.proxy=$HTTPS_PROXY_FULL_URL -C $DIR fetch 2>&1 && retry 5 git -c http.proxy=$HTTPS_PROXY_FULL_URL -c https.proxy=$HTTPS_PROXY_FULL_URL -C $DIR pull 2>&1)
|
||||||
fi
|
fi
|
||||||
if [ $? -ne 0 ]; then
|
if [ $? -ne 0 ]; then
|
||||||
report_error log "Unable to update git $DIR: $OUT"
|
report_error log "Unable to update git $DIR: $OUT"
|
||||||
@ -80,19 +80,13 @@ for DIR in /etc/bridgehead $(pwd); do
|
|||||||
log "INFO" "You can review all changes on the repository with $git_repository_url/compare?from=$old_git_hash&to=$new_git_hash"
|
log "INFO" "You can review all changes on the repository with $git_repository_url/compare?from=$old_git_hash&to=$new_git_hash"
|
||||||
fi
|
fi
|
||||||
git_updated="true"
|
git_updated="true"
|
||||||
if [ "$DIR" == "/srv/docker/bridgehead" ]; then
|
|
||||||
git -C "$DIR" checkout main
|
|
||||||
REPORT_BRANCH_NAME=$(git -C "$DIR" branch --show-current)
|
|
||||||
REPORT_STATUS_CHECK=$(git -C "$DIR" status --porcelain)
|
|
||||||
report_error 7 "MAINTENANCE: Switched Branch to main, you are now on branch $REPORT_BRANCH_NAME \n see status \n $REPORT_STATUS_CHECK"
|
|
||||||
fi
|
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
# Check docker updates
|
# Check docker updates
|
||||||
log "INFO" "Checking for updates to running docker images ..."
|
log "INFO" "Checking for updates to running docker images ..."
|
||||||
docker_updated="false"
|
docker_updated="false"
|
||||||
for IMAGE in $(cat $PROJECT/docker-compose.yml ${OVERRIDE//-f/} minimal/docker-compose.yml | grep -v "^#" | grep "image:" | sed -e 's_^.*image: \(.*\).*$_\1_g; s_\"__g'); do
|
for IMAGE in $($COMPOSE -p $PROJECT -f ./minimal/docker-compose.yml -f ./$PROJECT/docker-compose.yml $OVERRIDE config | grep "image:" | sed -e 's_^.*image: \(.*\).*$_\1_g; s_\"__g'); do
|
||||||
log "INFO" "Checking for Updates of Image: $IMAGE"
|
log "INFO" "Checking for Updates of Image: $IMAGE"
|
||||||
if docker pull $IMAGE | grep "Downloaded newer image"; then
|
if docker pull $IMAGE | grep "Downloaded newer image"; then
|
||||||
CHANGE="Image $IMAGE updated."
|
CHANGE="Image $IMAGE updated."
|
||||||
@ -122,7 +116,7 @@ if [ -n "${BACKUP_DIRECTORY}" ]; then
|
|||||||
mkdir -p "$BACKUP_DIRECTORY"
|
mkdir -p "$BACKUP_DIRECTORY"
|
||||||
chown -R "$BACKUP_DIRECTORY" bridgehead;
|
chown -R "$BACKUP_DIRECTORY" bridgehead;
|
||||||
fi
|
fi
|
||||||
checkOwner "$BACKUP_DIRECTORY" bridgehead || fail_and_report 1 "Automatic maintenance failed: Wrong permissions for backup directory $(pwd)"
|
checkOwner "$BACKUP_DIRECTORY" bridgehead || fail_and_report 1 "Automatic maintenance failed: Wrong permissions for backup directory $BACKUP_DIRECTORY"
|
||||||
# Collect all container names that contain '-db'
|
# Collect all container names that contain '-db'
|
||||||
BACKUP_SERVICES="$(docker ps --filter name=-db --format "{{.Names}}" | tr "\n" "\ ")"
|
BACKUP_SERVICES="$(docker ps --filter name=-db --format "{{.Names}}" | tr "\n" "\ ")"
|
||||||
log INFO "Performing automatic maintenance: Creating Backups for $BACKUP_SERVICES";
|
log INFO "Performing automatic maintenance: Creating Backups for $BACKUP_SERVICES";
|
||||||
|
@ -35,17 +35,20 @@ services:
|
|||||||
image: docker.verbis.dkfz.de/cache/samply/bridgehead-forward-proxy:latest
|
image: docker.verbis.dkfz.de/cache/samply/bridgehead-forward-proxy:latest
|
||||||
environment:
|
environment:
|
||||||
HTTPS_PROXY: ${HTTPS_PROXY_URL}
|
HTTPS_PROXY: ${HTTPS_PROXY_URL}
|
||||||
USERNAME: ${HTTPS_PROXY_USERNAME}
|
HTTPS_PROXY_USERNAME: ${HTTPS_PROXY_USERNAME}
|
||||||
PASSWORD: ${HTTPS_PROXY_PASSWORD}
|
HTTPS_PROXY_PASSWORD: ${HTTPS_PROXY_PASSWORD}
|
||||||
tmpfs:
|
tmpfs:
|
||||||
- /var/log/squid
|
- /var/log/squid
|
||||||
- /var/spool/squid
|
- /var/spool/squid
|
||||||
volumes:
|
volumes:
|
||||||
- /etc/bridgehead/trusted-ca-certs:/docker/custom-certs/:ro
|
- /etc/bridgehead/trusted-ca-certs:/docker/custom-certs/:ro
|
||||||
|
healthcheck:
|
||||||
|
# Wait 1s before marking this service healthy. Required for the oauth2-proxy to talk to the OIDC provider on startup which will fail if the forward proxy is not started yet.
|
||||||
|
test: ["CMD", "sleep", "1"]
|
||||||
|
|
||||||
landing:
|
landing:
|
||||||
container_name: bridgehead-landingpage
|
container_name: bridgehead-landingpage
|
||||||
image: docker.verbis.dkfz.de/cache/samply/bridgehead-landingpage:master
|
image: docker.verbis.dkfz.de/cache/samply/bridgehead-landingpage:main
|
||||||
labels:
|
labels:
|
||||||
- "traefik.enable=true"
|
- "traefik.enable=true"
|
||||||
- "traefik.http.routers.landing.rule=PathPrefix(`/`)"
|
- "traefik.http.routers.landing.rule=PathPrefix(`/`)"
|
||||||
@ -55,5 +58,3 @@ services:
|
|||||||
HOST: ${HOST}
|
HOST: ${HOST}
|
||||||
PROJECT: ${PROJECT}
|
PROJECT: ${PROJECT}
|
||||||
SITE_NAME: ${SITE_NAME}
|
SITE_NAME: ${SITE_NAME}
|
||||||
|
|
||||||
|
|
||||||
|
2
minimal/modules/bbmri vars
Normal file
2
minimal/modules/bbmri vars
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
OIDC_USER_GROUP="DKTK_CCP_$(capitalize_first_letter ${SITE_ID})"
|
||||||
|
OIDC_ADMIN_GROUP="DKTK_CCP_$(capitalize_first_letter ${SITE_ID})_Verwalter"
|
7
minimal/modules/ccp vars
Normal file
7
minimal/modules/ccp vars
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
OIDC_PRIVATE_CLIENT_ID=${SITE_ID}-private
|
||||||
|
OIDC_PUBLIC_CLIENT_ID=${SITE_ID}-public
|
||||||
|
# Use "test-realm-01" for testing
|
||||||
|
OIDC_REALM="${OIDC_REALM:-master}"
|
||||||
|
OIDC_URL="https://login.verbis.dkfz.de"
|
||||||
|
OIDC_ISSUER_URL="${OIDC_URL}/realms/${OIDC_REALM}"
|
||||||
|
OIDC_GROUP_CLAIM="groups"
|
@ -18,11 +18,11 @@ services:
|
|||||||
- "forward_proxy"
|
- "forward_proxy"
|
||||||
volumes:
|
volumes:
|
||||||
- /etc/bridgehead/trusted-ca-certs:/conf/trusted-ca-certs:ro
|
- /etc/bridgehead/trusted-ca-certs:/conf/trusted-ca-certs:ro
|
||||||
- /srv/docker/bridgehead/ccp/root-new.crt.pem:/conf/root.crt.pem:ro
|
- /srv/docker/bridgehead/ccp/root.crt.pem:/conf/root.crt.pem:ro
|
||||||
|
|
||||||
dnpm-beam-connect:
|
dnpm-beam-connect:
|
||||||
depends_on: [ dnpm-beam-proxy ]
|
depends_on: [ dnpm-beam-proxy ]
|
||||||
image: docker.verbis.dkfz.de/cache/samply/beam-connect:dnpm
|
image: docker.verbis.dkfz.de/cache/samply/beam-connect:develop
|
||||||
container_name: bridgehead-dnpm-beam-connect
|
container_name: bridgehead-dnpm-beam-connect
|
||||||
environment:
|
environment:
|
||||||
PROXY_URL: http://dnpm-beam-proxy:8081
|
PROXY_URL: http://dnpm-beam-proxy:8081
|
||||||
@ -32,9 +32,14 @@ services:
|
|||||||
LOCAL_TARGETS_FILE: "./conf/connect_targets.json"
|
LOCAL_TARGETS_FILE: "./conf/connect_targets.json"
|
||||||
HTTP_PROXY: http://forward_proxy:3128
|
HTTP_PROXY: http://forward_proxy:3128
|
||||||
HTTPS_PROXY: http://forward_proxy:3128
|
HTTPS_PROXY: http://forward_proxy:3128
|
||||||
NO_PROXY: dnpm-beam-proxy,dnpm-backend
|
NO_PROXY: dnpm-beam-proxy,dnpm-backend, host.docker.internal${DNPM_ADDITIONAL_NO_PROXY}
|
||||||
RUST_LOG: ${RUST_LOG:-info}
|
RUST_LOG: ${RUST_LOG:-info}
|
||||||
|
NO_AUTH: "true"
|
||||||
|
TLS_CA_CERTIFICATES_DIR: ./conf/trusted-ca-certs
|
||||||
|
extra_hosts:
|
||||||
|
- "host.docker.internal:host-gateway"
|
||||||
volumes:
|
volumes:
|
||||||
|
- /etc/bridgehead/trusted-ca-certs:/conf/trusted-ca-certs:ro
|
||||||
- /etc/bridgehead/dnpm/local_targets.json:/conf/connect_targets.json:ro
|
- /etc/bridgehead/dnpm/local_targets.json:/conf/connect_targets.json:ro
|
||||||
- /etc/bridgehead/dnpm/central_targets.json:/conf/central_targets.json:ro
|
- /etc/bridgehead/dnpm/central_targets.json:/conf/central_targets.json:ro
|
||||||
labels:
|
labels:
|
||||||
@ -45,6 +50,10 @@ services:
|
|||||||
- "traefik.http.services.dnpm-connect.loadbalancer.server.port=8062"
|
- "traefik.http.services.dnpm-connect.loadbalancer.server.port=8062"
|
||||||
- "traefik.http.routers.dnpm-connect.tls=true"
|
- "traefik.http.routers.dnpm-connect.tls=true"
|
||||||
|
|
||||||
|
dnpm-echo:
|
||||||
|
image: docker.verbis.dkfz.de/cache/samply/bridgehead-echo:latest
|
||||||
|
container_name: bridgehead-dnpm-echo
|
||||||
|
|
||||||
secrets:
|
secrets:
|
||||||
proxy.pem:
|
proxy.pem:
|
||||||
file: /etc/bridgehead/pki/${SITE_ID}.priv.pem
|
file: /etc/bridgehead/pki/${SITE_ID}.priv.pem
|
||||||
|
34
minimal/modules/dnpm-node-compose.yml
Normal file
34
minimal/modules/dnpm-node-compose.yml
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
version: "3.7"
|
||||||
|
|
||||||
|
services:
|
||||||
|
dnpm-backend:
|
||||||
|
image: ghcr.io/kohlbacherlab/bwhc-backend:1.0-snapshot-broker-connector
|
||||||
|
container_name: bridgehead-dnpm-backend
|
||||||
|
environment:
|
||||||
|
- ZPM_SITE=${ZPM_SITE}
|
||||||
|
- N_RANDOM_FILES=${DNPM_SYNTH_NUM}
|
||||||
|
volumes:
|
||||||
|
- /etc/bridgehead/dnpm:/bwhc_config:ro
|
||||||
|
- ${DNPM_DATA_DIR}:/bwhc_data
|
||||||
|
labels:
|
||||||
|
- "traefik.enable=true"
|
||||||
|
- "traefik.http.routers.bwhc-backend.rule=PathPrefix(`/bwhc`)"
|
||||||
|
- "traefik.http.services.bwhc-backend.loadbalancer.server.port=9000"
|
||||||
|
- "traefik.http.routers.bwhc-backend.tls=true"
|
||||||
|
|
||||||
|
dnpm-frontend:
|
||||||
|
image: ghcr.io/kohlbacherlab/bwhc-frontend:2209
|
||||||
|
container_name: bridgehead-dnpm-frontend
|
||||||
|
links:
|
||||||
|
- dnpm-backend
|
||||||
|
environment:
|
||||||
|
- NUXT_HOST=0.0.0.0
|
||||||
|
- NUXT_PORT=8080
|
||||||
|
- BACKEND_PROTOCOL=https
|
||||||
|
- BACKEND_HOSTNAME=$HOST
|
||||||
|
- BACKEND_PORT=443
|
||||||
|
labels:
|
||||||
|
- "traefik.enable=true"
|
||||||
|
- "traefik.http.routers.bwhc-frontend.rule=PathPrefix(`/`)"
|
||||||
|
- "traefik.http.services.bwhc-frontend.loadbalancer.server.port=8080"
|
||||||
|
- "traefik.http.routers.bwhc-frontend.tls=true"
|
28
minimal/modules/dnpm-node-setup.sh
Normal file
28
minimal/modules/dnpm-node-setup.sh
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
if [ -n "${ENABLE_DNPM_NODE}" ]; then
|
||||||
|
log INFO "DNPM setup detected (BwHC Node) -- will start BwHC node."
|
||||||
|
OVERRIDE+=" -f ./$PROJECT/modules/dnpm-node-compose.yml"
|
||||||
|
|
||||||
|
# Set variables required for BwHC Node. ZPM_SITE is assumed to be set in /etc/bridgehead/<project>.conf
|
||||||
|
DNPM_APPLICATION_SECRET="$(echo \"This is a salt string to generate one consistent password for DNPM. It is not required to be secret.\" | sha1sum | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 30)"
|
||||||
|
if [ -z "${ZPM_SITE+x}" ]; then
|
||||||
|
log ERROR "Mandatory variable ZPM_SITE not defined!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z "${DNPM_DATA_DIR+x}" ]; then
|
||||||
|
log ERROR "Mandatory variable DNPM_DATA_DIR not defined!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
DNPM_SYNTH_NUM=${DNPM_SYNTH_NUM:-0}
|
||||||
|
if grep -q 'traefik.http.routers.landing.rule=PathPrefix(`/landing`)' /srv/docker/bridgehead/minimal/docker-compose.override.yml 2>/dev/null; then
|
||||||
|
echo "Override of landing page url already in place"
|
||||||
|
else
|
||||||
|
echo "Adding override of landing page url"
|
||||||
|
if [ -f /srv/docker/bridgehead/minimal/docker-compose.override.yml ]; then
|
||||||
|
echo -e ' landing:\n labels:\n - "traefik.http.routers.landing.rule=PathPrefix(`/landing`)"' >> /srv/docker/bridgehead/minimal/docker-compose.override.yml
|
||||||
|
else
|
||||||
|
echo -e 'version: "3.7"\nservices:\n landing:\n labels:\n - "traefik.http.routers.landing.rule=PathPrefix(`/landing`)"' >> /srv/docker/bridgehead/minimal/docker-compose.override.yml
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
@ -5,9 +5,18 @@ if [ -n "${ENABLE_DNPM}" ]; then
|
|||||||
OVERRIDE+=" -f ./$PROJECT/modules/dnpm-compose.yml"
|
OVERRIDE+=" -f ./$PROJECT/modules/dnpm-compose.yml"
|
||||||
|
|
||||||
# Set variables required for Beam-Connect
|
# Set variables required for Beam-Connect
|
||||||
DNPM_APPLICATION_SECRET="$(echo \"This is a salt string to generate one consistent password for DNPM. It is not required to be secret.\" | openssl pkeyutl -sign -inkey /etc/bridgehead/pki/${SITE_ID}.priv.pem | base64 | head -c 30)"
|
|
||||||
DNPM_BEAM_SECRET_SHORT="$(cat /proc/sys/kernel/random/uuid | sed 's/[-]//g' | head -c 20)"
|
DNPM_BEAM_SECRET_SHORT="$(cat /proc/sys/kernel/random/uuid | sed 's/[-]//g' | head -c 20)"
|
||||||
DNPM_BROKER_ID="broker.ccp-it.dktk.dkfz.de"
|
DNPM_BROKER_ID="broker.ccp-it.dktk.dkfz.de"
|
||||||
DNPM_BROKER_URL="https://${DNPM_BROKER_ID}"
|
DNPM_BROKER_URL="https://${DNPM_BROKER_ID}"
|
||||||
|
if [ -z ${BROKER_URL_FOR_PREREQ+x} ]; then
|
||||||
|
BROKER_URL_FOR_PREREQ=$DNPM_BROKER_URL
|
||||||
|
log DEBUG "No Broker for clock check set; using $DNPM_BROKER_URL"
|
||||||
|
fi
|
||||||
DNPM_PROXY_ID="${SITE_ID}.${DNPM_BROKER_ID}"
|
DNPM_PROXY_ID="${SITE_ID}.${DNPM_BROKER_ID}"
|
||||||
|
# If the DNPM_NO_PROXY variable is set, prefix it with a comma (as it gets added to a comma separated list)
|
||||||
|
if [ -n "${DNPM_NO_PROXY}" ]; then
|
||||||
|
DNPM_ADDITIONAL_NO_PROXY=",${DNPM_NO_PROXY}"
|
||||||
|
else
|
||||||
|
DNPM_ADDITIONAL_NO_PROXY=""
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
29
minimal/modules/nngm-compose.yml
Normal file
29
minimal/modules/nngm-compose.yml
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
version: "3.7"
|
||||||
|
volumes:
|
||||||
|
nngm-rest:
|
||||||
|
|
||||||
|
services:
|
||||||
|
connector:
|
||||||
|
container_name: bridgehead-connector
|
||||||
|
image: docker.verbis.dkfz.de/ccp/nngm-rest:main
|
||||||
|
environment:
|
||||||
|
CTS_MAGICPL_API_KEY: ${NNGM_MAGICPL_APIKEY}
|
||||||
|
CTS_API_KEY: ${NNGM_CTS_APIKEY}
|
||||||
|
CRYPT_KEY: ${NNGM_CRYPTKEY}
|
||||||
|
#CTS_MAGICPL_SITE: ${SITE_ID}TODO
|
||||||
|
restart: always
|
||||||
|
labels:
|
||||||
|
- "traefik.enable=true"
|
||||||
|
- "traefik.http.routers.connector.rule=PathPrefix(`/nngm-connector`)"
|
||||||
|
- "traefik.http.middlewares.connector_strip.stripprefix.prefixes=/nngm-connector"
|
||||||
|
- "traefik.http.services.connector.loadbalancer.server.port=8080"
|
||||||
|
- "traefik.http.routers.connector.tls=true"
|
||||||
|
- "traefik.http.routers.connector.middlewares=connector_strip,auth-nngm"
|
||||||
|
volumes:
|
||||||
|
- nngm-rest:/var/log
|
||||||
|
|
||||||
|
traefik:
|
||||||
|
labels:
|
||||||
|
- "traefik.http.middlewares.auth-nngm.basicauth.users=${NNGM_AUTH}"
|
||||||
|
|
||||||
|
|
6
minimal/modules/nngm-setup.sh
Normal file
6
minimal/modules/nngm-setup.sh
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
if [ -n "$NNGM_CTS_APIKEY" ]; then
|
||||||
|
log INFO "nNGM setup detected -- will start nNGM Connector."
|
||||||
|
OVERRIDE+=" -f ./$PROJECT/modules/nngm-compose.yml"
|
||||||
|
fi
|
Reference in New Issue
Block a user