diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile
index 534998eb8..9a7d9df3e 100644
--- a/.devcontainer/Dockerfile
+++ b/.devcontainer/Dockerfile
@@ -45,7 +45,7 @@ RUN apt-get -y install snmp
#-------------------Install Kafka----------------------------------
RUN mkdir ~/Downloads
-RUN curl "https://archive.apache.org/dist/kafka/3.6.0/kafka_2.12-3.6.0.tgz" -o ~/Downloads/kafka.tgz
+RUN curl "https://archive.apache.org/dist/kafka/3.6.1/kafka_2.12-3.6.1.tgz" -o ~/Downloads/kafka.tgz
RUN mkdir ~/kafka \
&& cd ~/kafka \
&& tar -xvzf ~/Downloads/kafka.tgz --strip 1
diff --git a/.devcontainer/kafka b/.devcontainer/kafka
index b00dec3dd..be2be9883 100644
--- a/.devcontainer/kafka
+++ b/.devcontainer/kafka
@@ -8,8 +8,6 @@ export LOG_DIR=/var/log/kafka
case "$1" in
start)
# Start daemon.
- echo "Starting Zookeeper";
- $DAEMON_PATH/bin/zookeeper-server-start.sh -daemon $DAEMON_PATH/config/zookeeper.properties
echo "Starting Kafka";
$DAEMON_PATH/bin/kafka-server-start.sh -daemon $DAEMON_PATH/config/server.properties
;;
@@ -17,9 +15,6 @@ case "$1" in
# Stop daemons.
echo "Shutting down Kafka";
$DAEMON_PATH/bin/kafka-server-stop.sh
- sleep 2
- echo "Shutting down Zookeeper";
- $DAEMON_PATH/bin/zookeeper-server-stop.sh
;;
restart)
$0 stop
@@ -27,13 +22,6 @@ case "$1" in
$0 start
;;
status)
- pid=`ps ax | grep -i 'org.apache.zookeeper.server' | grep -v grep | awk '{print $1}'`
- if [ -n "$pid" ]
- then
- echo "Zookeeper is Running as PID: $pid"
- else
- echo "Zookeeper is not Running"
- fi
pid=`ps ax | grep -i 'kafka.Kafka' | grep -v grep | awk '{print $1}'`
if [ -n "$pid" ]
then
diff --git a/.devcontainer/post-create.sh b/.devcontainer/post-create.sh
index 38abca974..21a448f81 100644
--- a/.devcontainer/post-create.sh
+++ b/.devcontainer/post-create.sh
@@ -1,8 +1,10 @@
cd ~/kafka/
-# start zookeeper
-bin/zookeeper-server-start.sh -daemon config/zookeeper.properties
+KAFKA_CLUSTER_ID="$(bin/kafka-storage.sh random-uuid)"
+
+bin/kafka-storage.sh format -t $KAFKA_CLUSTER_ID -c config/kraft/server.properties
+
# start kafka
-bin/kafka-server-start.sh -daemon config/server.properties
+bin/kafka-server-start.sh -daemon config/kraft/server.properties
# wait 2 seconds for the server to start and be able to add partitions
sleep 2s
# add topics
diff --git a/.gitignore b/.gitignore
index daaaca405..dcf38f29c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -21,6 +21,7 @@ settings.xml
/.settings
.metadata
*.pyc
+/kafka/
####################
### jpo-ode-svcs ###
diff --git a/docker-compose-ppm-nsv.yml b/docker-compose-ppm-nsv.yml
index 06739d438..d3b62a2b1 100644
--- a/docker-compose-ppm-nsv.yml
+++ b/docker-compose-ppm-nsv.yml
@@ -4,35 +4,32 @@
version: '3'
services:
- zookeeper:
- image: wurstmeister/zookeeper
- ports:
- - "2181:2181"
-
kafka:
- image: wurstmeister/kafka
+ image: bitnami/kafka:latest
+ hostname: kafka
ports:
- "9092:9092"
+ volumes:
+ - "${DOCKER_SHARED_VOLUME}:/bitnami"
environment:
- DOCKER_HOST_IP: ${DOCKER_HOST_IP}
- ZK: ${DOCKER_HOST_IP}:2181
- KAFKA_ADVERTISED_HOST_NAME: ${DOCKER_HOST_IP}
- KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
+ KAFKA_ENABLE_KRAFT: "yes"
+ KAFKA_CFG_PROCESS_ROLES: "broker,controller"
+ KAFKA_CFG_CONTROLLER_LISTENER_NAMES: "CONTROLLER"
+ KAFKA_CFG_LISTENERS: "PLAINTEXT://:9094,CONTROLLER://:9093,EXTERNAL://:9092"
+ KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: "CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,EXTERNAL:PLAINTEXT"
+ KAFKA_CFG_ADVERTISED_LISTENERS: "PLAINTEXT://kafka:9094,EXTERNAL://${DOCKER_HOST_IP}:9092"
+ KAFKA_BROKER_ID: "1"
+ KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: "1@kafka:9093"
+ ALLOW_PLAINTEXT_LISTENER: "yes"
+ KAFKA_CFG_NODE_ID: "1"
KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true"
- KAFKA_CREATE_TOPICS: "topic.OdeBsmPojo:1:1,topic.OdeBsmJson:1:1,topic.FilteredOdeBsmJson:1:1,topic.OdeTimJson:1:1,topic.OdeTimBroadcastJson:1:1,topic.J2735TimBroadcastJson:1:1,topic.OdeDriverAlertJson:1:1,topic.Asn1DecoderInput:1:1,topic.Asn1DecoderOutput:1:1,topic.Asn1EncoderInput:1:1,topic.Asn1EncoderOutput:1:1,topic.SDWDepositorInput:1:1"
- KAFKA_DELETE_TOPIC_ENABLED: "true"
- KAFKA_CLEANUP_POLICY: "delete" # delete old logs
- KAFKA_LOG_RETENTION_HOURS: 2
- KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 3000
- KAFKA_RETENTION_MS: 7200000 # delete old logs after 2 hours
- KAFKA_SEGMENT_MS: 7200000 # roll segment logs every 2 hours.
- # This configuration controls the period of time after
- # which Kafka will force the log to roll even if the segment
- # file isn't full to ensure that retention can delete or compact old data.
- depends_on:
- - zookeeper
- volumes:
- - ${DOCKER_SHARED_VOLUME}/var/run/docker.sock:/var/run/docker.sock
+ KAFKA_CREATE_TOPICS: "topic.OdeBsmPojo:1:1,topic.OdeSpatTxPojo:1:1,topic.OdeSpatPojo:1:1,topic.OdeSpatJson:1:1,topic.FilteredOdeSpatJson:1:1,topic.OdeSpatRxJson:1:1,topic.OdeSpatRxPojo:1:1,topic.OdeBsmJson:1:1,topic.FilteredOdeBsmJson:1:1,topic.OdeTimJson:1:1,topic.OdeTimBroadcastJson:1:1,topic.J2735TimBroadcastJson:1:1,topic.OdeDriverAlertJson:1:1,topic.Asn1DecoderInput:1:1,topic.Asn1DecoderOutput:1:1,topic.Asn1EncoderInput:1:1,topic.Asn1EncoderOutput:1:1,topic.SDWDepositorInput:1:1,topic.OdeTIMCertExpirationTimeJson:1:1,topic.OdeRawEncodedBSMJson:1:1,topic.OdeRawEncodedSPATJson:1:1,topic.OdeRawEncodedTIMJson:1:1,topic.OdeRawEncodedMAPJson:1:1,topic.OdeMapTxPojo:1:1,topic.OdeMapJson:1:1,topic.OdeRawEncodedSSMJson:1:1,topic.OdeSsmPojo:1:1,topic.OdeSsmJson:1:1,topic.OdeRawEncodedSRMJson:1:1,topic.OdeSrmTxPojo:1:1,topic.OdeSrmJson:1:1,topic.OdeRawEncodedPSMJson:1:1,topic.OdePsmTxPojo:1:1,topic.OdePsmJson:1:1"
+ KAFKA_CFG_DELETE_TOPIC_ENABLE: "true"
+ KAFKA_CFG_LOG_RETENTION_HOURS: 2
+ logging:
+ options:
+ max-size: "10m"
+ max-file: "5"
ode:
build: .
@@ -193,7 +190,6 @@ services:
SDW_PASSWORD: ${SDW_PASSWORD}
depends_on:
- kafka
- - zookeeper
- ode
sec:
diff --git a/docker-compose.yml b/docker-compose.yml
index 91551538e..88f208cc0 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -1,41 +1,30 @@
version: '3'
services:
- zookeeper:
- image: wurstmeister/zookeeper
- ports:
- - "2181:2181"
- logging:
- options:
- max-size: "10m"
- max-file: "5"
-
kafka:
- image: wurstmeister/kafka
+ image: bitnami/kafka:latest
+ hostname: kafka
ports:
- "9092:9092"
+ volumes:
+ - "${DOCKER_SHARED_VOLUME}:/bitnami"
environment:
- DOCKER_HOST_IP: ${DOCKER_HOST_IP}
- ZK: ${DOCKER_HOST_IP}:2181
- KAFKA_ADVERTISED_HOST_NAME: ${DOCKER_HOST_IP}
- KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
+ KAFKA_ENABLE_KRAFT: "yes"
+ KAFKA_CFG_PROCESS_ROLES: "broker,controller"
+ KAFKA_CFG_CONTROLLER_LISTENER_NAMES: "CONTROLLER"
+ KAFKA_CFG_LISTENERS: "PLAINTEXT://:9094,CONTROLLER://:9093,EXTERNAL://:9092"
+ KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: "CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,EXTERNAL:PLAINTEXT"
+ KAFKA_CFG_ADVERTISED_LISTENERS: "PLAINTEXT://kafka:9094,EXTERNAL://${DOCKER_HOST_IP}:9092"
+ KAFKA_BROKER_ID: "1"
+ KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: "1@kafka:9093"
+ ALLOW_PLAINTEXT_LISTENER: "yes"
+ KAFKA_CFG_NODE_ID: "1"
KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true"
KAFKA_CREATE_TOPICS: "topic.OdeBsmPojo:1:1,topic.OdeSpatTxPojo:1:1,topic.OdeSpatPojo:1:1,topic.OdeSpatJson:1:1,topic.FilteredOdeSpatJson:1:1,topic.OdeSpatRxJson:1:1,topic.OdeSpatRxPojo:1:1,topic.OdeBsmJson:1:1,topic.FilteredOdeBsmJson:1:1,topic.OdeTimJson:1:1,topic.OdeTimBroadcastJson:1:1,topic.J2735TimBroadcastJson:1:1,topic.OdeDriverAlertJson:1:1,topic.Asn1DecoderInput:1:1,topic.Asn1DecoderOutput:1:1,topic.Asn1EncoderInput:1:1,topic.Asn1EncoderOutput:1:1,topic.SDWDepositorInput:1:1,topic.OdeTIMCertExpirationTimeJson:1:1,topic.OdeRawEncodedBSMJson:1:1,topic.OdeRawEncodedSPATJson:1:1,topic.OdeRawEncodedTIMJson:1:1,topic.OdeRawEncodedMAPJson:1:1,topic.OdeMapTxPojo:1:1,topic.OdeMapJson:1:1,topic.OdeRawEncodedSSMJson:1:1,topic.OdeSsmPojo:1:1,topic.OdeSsmJson:1:1,topic.OdeRawEncodedSRMJson:1:1,topic.OdeSrmTxPojo:1:1,topic.OdeSrmJson:1:1,topic.OdeRawEncodedPSMJson:1:1,topic.OdePsmTxPojo:1:1,topic.OdePsmJson:1:1"
- KAFKA_DELETE_TOPIC_ENABLED: "true"
- KAFKA_CLEANUP_POLICY: "delete" # delete old logs
- KAFKA_LOG_RETENTION_HOURS: 2
- KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 3000
- KAFKA_RETENTION_MS: 7200000 # delete old logs after 2 hours
- KAFKA_SEGMENT_MS: 7200000 # roll segment logs every 2 hours.
- # This configuration controls the period of time after
- # which Kafka will force the log to roll even if the segment
- # file isn't full to ensure that retention can delete or compact old data.
- depends_on:
- - zookeeper
- volumes:
- - ${DOCKER_SHARED_VOLUME}/var/run/docker.sock:/var/run/docker.sock
+ KAFKA_CFG_DELETE_TOPIC_ENABLE: "true"
+ KAFKA_CFG_LOG_RETENTION_HOURS: 2
logging:
options:
- max-size: "10m"
+ max-size: "10m"
max-file: "5"
ode:
@@ -254,7 +243,6 @@ services:
SDW_API_KEY: ${SDW_API_KEY}
depends_on:
- kafka
- - zookeeper
- ode
logging:
options:
diff --git a/docs/Architecture.md b/docs/Architecture.md
index 2ac9b4d86..2fea46037 100644
--- a/docs/Architecture.md
+++ b/docs/Architecture.md
@@ -371,7 +371,7 @@ Docker is utilized as the primary deployment mechanism to
compartmentalize each of the designed micro-services into separate
containers. Docker is used to package all components in a composite of
containers each running a distinct service. The ODE application runs in
-one container and other major frameworks such as ZooKeeper and Kafka run
+one container and other major frameworks such as Kafka run
in their own separate containers.
@@ -391,4 +391,3 @@ in their own separate containers.
| SCP | Secure Copy |
| US DOT | Unites States Department of Transportation |
| WebSocket | WebSocket is designed to be implemented in web browsers and web servers, but it can be used by any client or server application. The WebSocket Protocol is an independent TCP-based protocol. Its only relationship to HTTP is that its handshake is interpreted by HTTP servers as an Upgrade request. |
-| ZooKeeper | Apache ZooKeeper is a centralized service for maintaining configuration information, naming, providing distributed synchronization, and providing group services. |
diff --git a/docs/UserGuide.md b/docs/UserGuide.md
index f65d16f71..e8028116e 100644
--- a/docs/UserGuide.md
+++ b/docs/UserGuide.md
@@ -205,7 +205,6 @@ This document is intended for use by the ODE client applications.
| TIM | Traveler Information Message |
| US DOT | Unites States Department of Transportation |
| WebSocket | WebSocket is designed to be implemented in web browsers and web servers, but it can be used by any client or server application. The WebSocket Protocol is an independent TCP-based protocol. Its only relationship to HTTP is that its handshake is interpreted by HTTP servers as an Upgrade request. |
-| ZooKeeper | Apache ZooKeeper is a centralized service for maintaining configuration information, naming, providing distributed synchronization, and providing group services. |
@@ -579,7 +578,7 @@ ODE uses Logback logging framework to log application and data events.
#### 7.2.3 - Steps to turn on/off logging during application runtime.
-1. Start ode, Kafka, and Zookeeper as normal.
+1. Start ode and Kafka as normal.
2. In a new terminal window run \"jconsole\".
diff --git a/docs/dockerhub.md b/docs/dockerhub.md
index d8cedd07f..7ac5bf2cc 100644
--- a/docs/dockerhub.md
+++ b/docs/dockerhub.md
@@ -18,7 +18,6 @@ The image expects the following environment variables to be set:
## Direct Dependencies
The ODE will fail to start up if the following containers/services are not already present:
- Kafka or Confluent & related requirements
-- Zookeeper (relied on by Kafka when run locally)
## Indirect Dependencies
Some functionality will be unreachable without the participation of the following programs (except by directly pushing to kafka topics):
@@ -35,42 +34,31 @@ For further configuration options, see the [GitHub repository](https://github.co
```
version: '3'
services:
- zookeeper:
- image: wurstmeister/zookeeper
- ports:
- - "2181:2181"
- logging:
- options:
- max-size: "10m"
- max-file: "5"
-
kafka:
- image: wurstmeister/kafka
+ image: bitnami/kafka:latest
+ hostname: kafka
ports:
- "9092:9092"
+ volumes:
+ - "${DOCKER_SHARED_VOLUME}:/bitnami"
environment:
- DOCKER_HOST_IP: ${DOCKER_HOST_IP}
- ZK: ${DOCKER_HOST_IP}:2181
- KAFKA_ADVERTISED_HOST_NAME: ${DOCKER_HOST_IP}
- KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
+ KAFKA_ENABLE_KRAFT: "yes"
+ KAFKA_CFG_PROCESS_ROLES: "broker,controller"
+ KAFKA_CFG_CONTROLLER_LISTENER_NAMES: "CONTROLLER"
+ KAFKA_CFG_LISTENERS: "PLAINTEXT://:9094,CONTROLLER://:9093,EXTERNAL://:9092"
+ KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: "CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,EXTERNAL:PLAINTEXT"
+ KAFKA_CFG_ADVERTISED_LISTENERS: "PLAINTEXT://kafka:9094,EXTERNAL://${DOCKER_HOST_IP}:9092"
+ KAFKA_BROKER_ID: "1"
+ KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: "1@kafka:9093"
+ ALLOW_PLAINTEXT_LISTENER: "yes"
+ KAFKA_CFG_NODE_ID: "1"
KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true"
- KAFKA_CREATE_TOPICS: "topic.OdeBsmPojo:1:1,topic.OdeSpatTxPojo:1:1,topic.OdeSpatPojo:1:1,topic.OdeSpatJson:1:1,topic.FilteredOdeSpatJson:1:1,topic.OdeSpatRxJson:1:1,topic.OdeSpatRxPojo:1:1,topic.OdeBsmJson:1:1,topic.FilteredOdeBsmJson:1:1,topic.OdeTimJson:1:1,topic.OdeTimBroadcastJson:1:1,topic.J2735TimBroadcastJson:1:1,topic.OdeDriverAlertJson:1:1,topic.Asn1DecoderInput:1:1,topic.Asn1DecoderOutput:1:1,topic.Asn1EncoderInput:1:1,topic.Asn1EncoderOutput:1:1,topic.SDWDepositorInput:1:1,topic.OdeTIMCertExpirationTimeJson:1:1,topic.OdeRawEncodedBSMJson:1:1,topic.OdeRawEncodedSPATJson:1:1,topic.OdeRawEncodedTIMJson:1:1,topic.OdeRawEncodedMAPJson:1:1,topic.OdeMapTxPojo:1:1,topic.OdeMapJson:1:1,topic.OdeRawEncodedSSMJson:1:1,topic.OdeSsmPojo:1:1,topic.OdeSsmJson:1:1,topic.OdeRawEncodedSRMJson:1:1,topic.OdeSrmTxPojo:1:1,topic.OdeSrmJson:1:1"
- KAFKA_DELETE_TOPIC_ENABLED: "true"
- KAFKA_CLEANUP_POLICY: "delete" # delete old logs
- KAFKA_LOG_RETENTION_HOURS: 2
- KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 3000
- KAFKA_RETENTION_MS: 7200000 # delete old logs after 2 hours
- KAFKA_SEGMENT_MS: 7200000 # roll segment logs every 2 hours.
- # This configuration controls the period of time after
- # which Kafka will force the log to roll even if the segment
- # file isn't full to ensure that retention can delete or compact old data.
- depends_on:
- - zookeeper
- volumes:
- - ${DOCKER_SHARED_VOLUME_WINDOWS}/var/run/docker.sock:/var/run/docker.sock
+ KAFKA_CREATE_TOPICS: "topic.OdeBsmPojo:1:1,topic.OdeSpatTxPojo:1:1,topic.OdeSpatPojo:1:1,topic.OdeSpatJson:1:1,topic.FilteredOdeSpatJson:1:1,topic.OdeSpatRxJson:1:1,topic.OdeSpatRxPojo:1:1,topic.OdeBsmJson:1:1,topic.FilteredOdeBsmJson:1:1,topic.OdeTimJson:1:1,topic.OdeTimBroadcastJson:1:1,topic.J2735TimBroadcastJson:1:1,topic.OdeDriverAlertJson:1:1,topic.Asn1DecoderInput:1:1,topic.Asn1DecoderOutput:1:1,topic.Asn1EncoderInput:1:1,topic.Asn1EncoderOutput:1:1,topic.SDWDepositorInput:1:1,topic.OdeTIMCertExpirationTimeJson:1:1,topic.OdeRawEncodedBSMJson:1:1,topic.OdeRawEncodedSPATJson:1:1,topic.OdeRawEncodedTIMJson:1:1,topic.OdeRawEncodedMAPJson:1:1,topic.OdeMapTxPojo:1:1,topic.OdeMapJson:1:1,topic.OdeRawEncodedSSMJson:1:1,topic.OdeSsmPojo:1:1,topic.OdeSsmJson:1:1,topic.OdeRawEncodedSRMJson:1:1,topic.OdeSrmTxPojo:1:1,topic.OdeSrmJson:1:1,topic.OdeRawEncodedPSMJson:1:1,topic.OdePsmTxPojo:1:1,topic.OdePsmJson:1:1"
+ KAFKA_CFG_DELETE_TOPIC_ENABLE: "true"
+ KAFKA_CFG_LOG_RETENTION_HOURS: 2
logging:
options:
- max-size: "10m"
+ max-size: "10m"
max-file: "5"
ode:
diff --git a/jpo-ode-consumer-example/README.md b/jpo-ode-consumer-example/README.md
index 311d14ad4..a23d49453 100644
--- a/jpo-ode-consumer-example/README.md
+++ b/jpo-ode-consumer-example/README.md
@@ -49,7 +49,7 @@ The IP used is the location of the Kafka endpoints.
####Create, alter, list, and describe topics.
```
-kafka-topics --zookeeper 192.168.1.151:2181 --list
+kafka-topics --bootstrap-server=192.168.1.151:9092 --list
sink1
t1
t2
@@ -58,11 +58,11 @@ t2
####Read data from a Kafka topic and write it to standard output.
```
-kafka-console-consumer --zookeeper 192.168.1.151:2181 --topic topic.J2735Bsm
+kafka-console-consumer --bootstrap-server=192.168.1.151:9092 --topic topic.J2735Bsm
```
####Read data from standard output and write it to a Kafka topic.
```
-kafka-console-producer --broker-list 192.168.1.151:9092 --topic topic.J2735Bsm
+kafka-console-producer --bootstrap-server=192.168.1.151:9092 --topic topic.J2735Bsm
```
diff --git a/quickstart-compose.yml b/quickstart-compose.yml
index d131d27bd..725e06303 100644
--- a/quickstart-compose.yml
+++ b/quickstart-compose.yml
@@ -2,35 +2,33 @@
version: '3'
services:
- zookeeper:
- image: wurstmeister/zookeeper
- ports:
- - "2181:2181"
-
kafka:
- image: wurstmeister/kafka
+ image: bitnami/kafka:latest
+ hostname: kafka
ports:
- "9092:9092"
+ volumes:
+ - "${DOCKER_SHARED_VOLUME}:/bitnami"
environment:
- DOCKER_HOST_IP: ${DOCKER_HOST_IP}
- ZK: ${DOCKER_HOST_IP}:2181
- KAFKA_ADVERTISED_HOST_NAME: ${DOCKER_HOST_IP}
- KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
+ KAFKA_ENABLE_KRAFT: "yes"
+ KAFKA_CFG_PROCESS_ROLES: "broker,controller"
+ KAFKA_CFG_CONTROLLER_LISTENER_NAMES: "CONTROLLER"
+ KAFKA_CFG_LISTENERS: "PLAINTEXT://:9094,CONTROLLER://:9093,EXTERNAL://:9092"
+ KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: "CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,EXTERNAL:PLAINTEXT"
+ KAFKA_CFG_ADVERTISED_LISTENERS: "PLAINTEXT://kafka:9094,EXTERNAL://${DOCKER_HOST_IP}:9092"
+ KAFKA_BROKER_ID: "1"
+ KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: "1@kafka:9093"
+ ALLOW_PLAINTEXT_LISTENER: "yes"
+ KAFKA_CFG_NODE_ID: "1"
KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true"
- KAFKA_DELETE_TOPIC_ENABLED: "true"
- KAFKA_CLEANUP_POLICY: "delete" # delete old logs
- KAFKA_LOG_RETENTION_HOURS: 2
- KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 3000
- KAFKA_RETENTION_MS: 7200000 # delete old logs after 2 hours
- KAFKA_SEGMENT_MS: 7200000 # roll segment logs every 2 hours.
- # This configuration controls the period of time after
- # which Kafka will force the log to roll even if the segment
- # file isn't full to ensure that retention can delete or compact old data.
- depends_on:
- - zookeeper
- volumes:
- - ${DOCKER_SHARED_VOLUME_WINDOWS}/var/run/docker.sock:/var/run/docker.sock
-
+ KAFKA_CREATE_TOPICS: "topic.OdeBsmPojo:1:1,topic.OdeSpatTxPojo:1:1,topic.OdeSpatPojo:1:1,topic.OdeSpatJson:1:1,topic.FilteredOdeSpatJson:1:1,topic.OdeSpatRxJson:1:1,topic.OdeSpatRxPojo:1:1,topic.OdeBsmJson:1:1,topic.FilteredOdeBsmJson:1:1,topic.OdeTimJson:1:1,topic.OdeTimBroadcastJson:1:1,topic.J2735TimBroadcastJson:1:1,topic.OdeDriverAlertJson:1:1,topic.Asn1DecoderInput:1:1,topic.Asn1DecoderOutput:1:1,topic.Asn1EncoderInput:1:1,topic.Asn1EncoderOutput:1:1,topic.SDWDepositorInput:1:1,topic.OdeTIMCertExpirationTimeJson:1:1,topic.OdeRawEncodedBSMJson:1:1,topic.OdeRawEncodedSPATJson:1:1,topic.OdeRawEncodedTIMJson:1:1,topic.OdeRawEncodedMAPJson:1:1,topic.OdeMapTxPojo:1:1,topic.OdeMapJson:1:1,topic.OdeRawEncodedSSMJson:1:1,topic.OdeSsmPojo:1:1,topic.OdeSsmJson:1:1,topic.OdeRawEncodedSRMJson:1:1,topic.OdeSrmTxPojo:1:1,topic.OdeSrmJson:1:1,topic.OdeRawEncodedPSMJson:1:1,topic.OdePsmTxPojo:1:1,topic.OdePsmJson:1:1"
+ KAFKA_CFG_DELETE_TOPIC_ENABLE: "true"
+ KAFKA_CFG_LOG_RETENTION_HOURS: 2
+ logging:
+ options:
+ max-size: "10m"
+ max-file: "5"
+
ode:
build: .
image: jpoode_ode:latest