forked from uaf-arctic-eco-modeling/dvm-dos-tem
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdocker-compose.yml
123 lines (107 loc) · 3.62 KB
/
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
version: '3.2'
# Docker Compose file for coordinating several dvmdostem containers.
#
# First you should build your images. This can be done individually or you can
# use the docker-build-wrapper.sh script.
#
# Next create a .env file where you set the following variables:
# $ echo "DDT_INPUT_CATALOG=/some/path/to/your/input-catalog" >> .env
# $ echo "DDT_WORKFLOWS=/some/path/to/your/workflows" >> .env
# $ echo "V_TAG=$(git describe)" >> .env
#
# Next start the containers:
# $ docker compose up -d to start the containers
#
# Then you can run various programs inside the containers, see examples below.
services:
# Some general dev tools - not expected to be used directly
cpp-dev:
build: .
image: cpp-dev:${V_TAG}
# The general dvmdostem dev enviorment. Intented to be used for
# most development work by mounting volumes of source code into
# the resulting container - essentially sharing the source code
# folders from the host to the container so they can be edited
# on the host and used w/in the container run-time.
dvmdostem-dev:
image: dvmdostem-dev:${V_TAG}
build: .
tty: true
ports:
- "7001:7001"
volumes:
- sourcecode:/work
- inputcatalog:/data/input-catalog
- workflows:/data/workflows
# This is more of a stand-alone image, just meant to serve as a
# compile time environment for creating the lean run image.
# Copies source code into the image so that it doesn't need
# volumes to run container and run make. Then once the dvmdostem
# binary exists in this image, it can be copied into the lean
# run image.
dvmdostem-build:
image: dvmdostem-build:${V_TAG}
build: .
#command: tail -F /dev/null # keeps container running
# The lean production image
dvmdostem-run:
image: dvmdostem-run:${V_TAG}
build: .
tty: true
ports:
- "7002:7002"
volumes:
- inputcatalog:/data/input-catalog
- workflows:/data/workflows
# Auxillary stuff that can't run on other container/images due to difficult
# build dependencies (GDAL mainly).
dvmdostem-mapping-support:
image: dvmdostem-mapping-support:${V_TAG}
build: .
tty: true
ports:
- "7003:7003"
volumes:
- sourcecode:/work
- inputcatalog:/data/input-catalog
- workflows:/data/workflows
volumes:
sourcecode:
driver_opts:
type: none
device: '${PWD}'
o: bind
inputcatalog:
driver_opts:
type: none
device: '${DDT_INPUT_CATALOG}'
o: bind
workflows:
driver_opts:
type: none
device: '${DDT_WORKFLOWS}'
o: bind
## EXAMPLES
# In general there are 2 ways you can operate:
# 1) run a specific command in the docker container
#
# 2) start a bash terminal in the container and work with an interactive prompt
# in the container
# Runs the model in the service. Service is not a
# constant running process, so container starts, runs,
# and exits. Doesn't handle Ctrl-C
# $ docker-compose run dvmdostem-run dvmdostem --help
# Runs a basic ls in the service
# $ docker-compose run dvmdostem-run ls /data/input-catalog
# Run in existing container (using exec as opposed to run does this)
# $ docker-compose exec dvmdostem-run dvmdostem --help
# Several ways to compile
# 1) The parallel options seems very finnicky...
# Mac Docker desktop seems to freeze a lot
# $ docker-compose exec dvmdostem-run make -j2
#
# 2) Sign into interactive session and run make
# $ docker-compose exec dvmdostem-run bash
# $ develop@256519d33e7c:/work$ make
# Run the io-view tool:
# $ docker compose exec dvmdostem-mapping-support bokeh serve scripts/io_view.py