diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index e9f167f03..000000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,56 +0,0 @@ -version: 2.1 - -# Copyright (C) 2018-2020 LEIDOS. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy of -# the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -jobs: - build: - docker: - - image: 'cimg/openjdk:21.0.2-node' - steps: - - checkout - - run: - name: Initialize Submodules - command: | - set -x - pwd - ls - git submodule init - git submodule update - cd asn1_codec - git submodule init - git submodule update - - run: - name: Analyze on SonarCloud - command: | - echo "URL ${CIRCLE_PULL_REQUEST}" - if [ -z "${CIRCLE_PULL_REQUEST}" ]; then - echo "Non-PR Build Detected. Running analysis on ${CIRCLE_BRANCH}" - mvn -e -X clean org.jacoco:jacoco-maven-plugin:prepare-agent package sonar:sonar - exit 0; - fi - echo "PR branch ${CIRCLE_BRANCH}" - echo "Repo name ${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}" - echo "URL ${CIRCLE_PULL_REQUEST}" - export PR_NUM=`echo ${CIRCLE_PULL_REQUEST} | cut -d'/' -f7` - echo "PR number ${PR_NUM}" - export BASE_BRANCH_URL="https://api.github.com/repos/${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}/pulls/${PR_NUM}" - export TARGET_BRANCH=$(curl "$BASE_BRANCH_URL" | jq '.base.ref' | tr -d '"') - echo "Target Branch = ${TARGET_BRANCH}" - mvn -e -X clean org.jacoco:jacoco-maven-plugin:prepare-agent package sonar:sonar -Dsonar.pullrequest.base=${TARGET_BRANCH} -Dsonar.pullrequest.branch=${CIRCLE_BRANCH} -Dsonar.pullrequest.key=${PR_NUM} -workflows: - main: - jobs: - - build: - context: SonarCloud diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 534998eb8..9a7d9df3e 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -45,7 +45,7 @@ RUN apt-get -y install snmp #-------------------Install Kafka---------------------------------- RUN mkdir ~/Downloads -RUN curl "https://archive.apache.org/dist/kafka/3.6.0/kafka_2.12-3.6.0.tgz" -o ~/Downloads/kafka.tgz +RUN curl "https://archive.apache.org/dist/kafka/3.6.1/kafka_2.12-3.6.1.tgz" -o ~/Downloads/kafka.tgz RUN mkdir ~/kafka \ && cd ~/kafka \ && tar -xvzf ~/Downloads/kafka.tgz --strip 1 diff --git a/.devcontainer/kafka b/.devcontainer/kafka index b00dec3dd..be2be9883 100644 --- a/.devcontainer/kafka +++ b/.devcontainer/kafka @@ -8,8 +8,6 @@ export LOG_DIR=/var/log/kafka case "$1" in start) # Start daemon. - echo "Starting Zookeeper"; - $DAEMON_PATH/bin/zookeeper-server-start.sh -daemon $DAEMON_PATH/config/zookeeper.properties echo "Starting Kafka"; $DAEMON_PATH/bin/kafka-server-start.sh -daemon $DAEMON_PATH/config/server.properties ;; @@ -17,9 +15,6 @@ case "$1" in # Stop daemons. echo "Shutting down Kafka"; $DAEMON_PATH/bin/kafka-server-stop.sh - sleep 2 - echo "Shutting down Zookeeper"; - $DAEMON_PATH/bin/zookeeper-server-stop.sh ;; restart) $0 stop @@ -27,13 +22,6 @@ case "$1" in $0 start ;; status) - pid=`ps ax | grep -i 'org.apache.zookeeper.server' | grep -v grep | awk '{print $1}'` - if [ -n "$pid" ] - then - echo "Zookeeper is Running as PID: $pid" - else - echo "Zookeeper is not Running" - fi pid=`ps ax | grep -i 'kafka.Kafka' | grep -v grep | awk '{print $1}'` if [ -n "$pid" ] then diff --git a/.devcontainer/post-create.sh b/.devcontainer/post-create.sh index 38abca974..21a448f81 100644 --- a/.devcontainer/post-create.sh +++ b/.devcontainer/post-create.sh @@ -1,8 +1,10 @@ cd ~/kafka/ -# start zookeeper -bin/zookeeper-server-start.sh -daemon config/zookeeper.properties +KAFKA_CLUSTER_ID="$(bin/kafka-storage.sh random-uuid)" + +bin/kafka-storage.sh format -t $KAFKA_CLUSTER_ID -c config/kraft/server.properties + # start kafka -bin/kafka-server-start.sh -daemon config/server.properties +bin/kafka-server-start.sh -daemon config/kraft/server.properties # wait 2 seconds for the server to start and be able to add partitions sleep 2s # add topics diff --git a/.github/workflows/dockerhub.yml b/.github/workflows/dockerhub.yml index 04eb9c2ab..5db9c2cd2 100644 --- a/.github/workflows/dockerhub.yml +++ b/.github/workflows/dockerhub.yml @@ -20,8 +20,13 @@ jobs: with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Replcae Docker tag + id: set_tag + run: echo "TAG=$(echo ${GITHUB_REF##*/} | sed 's/\//-/g')" >> $GITHUB_ENV + - name: Build uses: docker/build-push-action@v3 with: push: true - tags: usdotjpoode/jpo-ode:${{ github.ref_name }} + tags: usdotjpoode/jpo-ode:${{ env.TAG }} \ No newline at end of file diff --git a/.gitignore b/.gitignore index daaaca405..dcf38f29c 100644 --- a/.gitignore +++ b/.gitignore @@ -21,6 +21,7 @@ settings.xml /.settings .metadata *.pyc +/kafka/ #################### ### jpo-ode-svcs ### diff --git a/README.md b/README.md index a39f562b6..45731e912 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ -| Travis Build Status | Sonar Code Quality | Sonar Code Coverage | -|---------------------|---------------------|---------------------| - [![Build Status](https://travis-ci.org/usdot-jpo-ode/jpo-ode.svg?branch=master)](https://travis-ci.org/usdot-jpo-ode/jpo-ode) | [![Quality Gate Status](https://sonarcloud.io/api/project_badges/measure?project=usdot.jpo.ode%3Ajpo-ode&metric=alert_status)](https://sonarcloud.io/dashboard?id=usdot.jpo.ode%3Ajpo-ode) | [![Coverage](https://sonarcloud.io/api/project_badges/measure?project=usdot.jpo.ode%3Ajpo-ode&metric=coverage)](https://sonarcloud.io/dashboard?id=usdot.jpo.ode%3Ajpo-ode) | +| Sonar Code Quality | Sonar Code Coverage | +|---------------------|---------------------| +[![Quality Gate Status](https://sonarcloud.io/api/project_badges/measure?project=usdot.jpo.ode%3Ajpo-ode&metric=alert_status)](https://sonarcloud.io/dashboard?id=usdot.jpo.ode%3Ajpo-ode) | [![Coverage](https://sonarcloud.io/api/project_badges/measure?project=usdot.jpo.ode%3Ajpo-ode&metric=coverage)](https://sonarcloud.io/dashboard?id=usdot.jpo.ode%3Ajpo-ode) | # jpo-ode @@ -15,36 +15,38 @@ _Figure 1: ODE Dataflows_ **Documentation:** 1. [ODE Architecture](docs/Architecture.md) -2. [ODE User Guide](docs/UserGuide.md) -3. [ODE Output Schema Reference Guide](docs/ODE_Output_Schema_Reference.docx) -4. [ODE REST API Guide](https://usdot-jpo-ode.github.io/) -5. [ODE Smoke Tests](https://github.com/usdot-jpo-ode/jpo-ode/wiki/JPO-ODE-QA-Documents) +1. [ODE User Guide](docs/UserGuide.md) +1. [ODE Output Schema Reference Guide](docs/ODE_Output_Schema_Reference.docx) +1. [ODE REST API Guide](https://usdot-jpo-ode.github.io/) +1. [ODE Smoke Tests](https://github.com/usdot-jpo-ode/jpo-ode/wiki/JPO-ODE-QA-Documents) All stakeholders are invited to provide input to these documents. To provide feedback, we recommend that you create an "issue" in this repository (). You will need a GitHub account to create an issue. If you don’t have an account, a dialog will be presented to you to create one at no cost. --- - + + +**Release Notes** -## Release Notes The current version and release history of the JPO-ODE: [ODE Release Notes]() -## Table of Contents - -1. [Usage Example](#usage-example) -2. [Configuration](#configuration) -3. [Installation](#installation) -4. [File Manifest](#file-manifest) -5. [Development Setup](#development-setup) -6. [Release History](#release-history) -7. [Contact Information](#contact-information) -8. [Contributing](#contributing) -9. [Known Bugs](#known-bugs) -10. [Credits and Acknowledgement](#credits-and-acknowledgement) -11. [Code.gov Registration Info](#codegov-registration-info) -12. [Kubernetes](#kubernetes) -13. [Sonar Cloud](#sonar-token-configuration) ([Documentation](https://sonarcloud.io/documentation/user-guide/user-token/)) -14. [SNMP](#snmp) +**Table of Contents** + +1. [Usage Example](#usage-example) +1. [Configuration](#configuration) +1. [Installation](#installation) +1. [File Manifest](#file-manifest) +1. [Development Setup](#development-setup) +1. [Release History](#release-history) +1. [Contact Information](#contact-information) +1. [Contributing](#contributing) +1. [Credits and Acknowledgement](#credits-and-acknowledgement) +1. [Code.gov Registration Info](#codegov-registration-info) +1. [Kubernetes](#kubernetes) +1. [Sonar Cloud](#sonar-token-configuration) ([Documentation](https://sonarcloud.io/documentation/user-guide/user-token/)) +1. [SNMP](#snmp) + + - + ## 1. Usage Example @@ -105,13 +107,15 @@ Supported message types: [Back to top](#toc) + + - + ## 2. Configuration @@ -124,10 +128,10 @@ Supported message types: - Windows 10/11 Professional (Professional version required for Docker virtualization) - OSX 13 -The ODE software can run on most standard Window, Mac, or Linux based computers with +The ODE software can run on most standard Windows, Mac, or Linux based computers with Pentium core processors. Performance of the software will be based on the computing power and available RAM in the system. Larger data flows can require much larger space requirements depending on the -amount of data being processed by the software. The ODE software application was developed using the open source programming language Java. If running the ODE outside of Docker, the application requires the Java 8 runtime environment. +amount of data being processed by the software. The ODE software application was developed using the open source programming language Java. If running the ODE outside of Docker, the application requires the Java 21 runtime environment. ### Software Prerequisites @@ -153,7 +157,7 @@ The following guide contains information about the data flow diagrams for the OD [See here](https://github.com/usdot-jpo-ode/jpo-ode/wiki/Docker-fix-for-SSL-issues-due-to-corporate-network) for instructions to fix this. - Additionally `git` commands may fail for similar reasons, you can fix this by running `export GIT_SSL_NO_VERIFY=1`. - Windows users may find more information on installing and using Docker [here](https://github.com/usdot-jpo-ode/jpo-ode/wiki/Docker-management). -- Users interested in Kafka may find more guidance and configuration options [here](docker/kafka/README.md). +- A compatibility guide containing recommendations for suitable versions of submodules for each main module version can be found [here](docs/compatibility.md). **Configuration:** @@ -162,17 +166,20 @@ If you wish to change the application properties, such as change the location of ODE configuration can be customized for every deployment environment using environment variables. These variables can either be set locally or using the [sample.env](sample.env) file. Instructions for how to use this file can be found [here](https://github.com/usdot-jpo-ode/jpo-ode/wiki/Using-the-.env-configuration-file). **Important!** + You must rename `sample.env` to `.env` for Docker to automatically read the file. This file will contain AWS access keys and other private information. Do not push this file to source control. [Back to top](#toc) + + - + ## 3. Installation @@ -243,6 +250,7 @@ cp ./J2735_201603DA.ASN asn1_codec/asn1c_combined/ #### Step 2 - Build and run the application **Notes:** + - Docker builds may fail if you are on a corporate network due to DNS resolution errors. [See here](https://github.com/usdot-jpo-ode/jpo-ode/wiki/Docker-fix-for-SSL-issues-due-to-corporate-network) for instructions to fix this. - In order for Docker to automatically read the environment variable file, you must rename it from `sample.env` to `.env`. **This file will contain private keys, do not put add it to version control.** @@ -256,34 +264,34 @@ Copy the following files from `jpo-ode` directory into your DOCKER_SHARED_VOLUME Navigate to the root directory of the jpo-ode project and run the following command: ```bash -docker-compose up --build -d -docker-compose ps +docker compose up --build -d +docker compose ps ``` To bring down the services and remove the running containers run the following command: ```bash -docker-compose down +docker compose down ``` For a fresh restart, run: ```bash -docker-compose down -docker-compose up --build -d -docker-compose ps +docker compose down +docker compose up --build -d +docker compose ps ``` To completely rebuild from scratch, run: ```bash -docker-compose down -docker-compose rm -fvs -docker-compose up --build -d -docker-compose ps +docker compose down +docker compose rm -fvs +docker compose up --build -d +docker compose ps ``` -Check the deployment by running `docker-compose ps`. You can start and stop containers using `docker-compose start` and `docker-compose stop` commands. -If using the multi-broker docker-compose file, you can change the scaling by running `docker-compose scale =n` where container is the container you would like to scale and n is the number of instances. For example, `docker-compose scale kafka=3`. +Check the deployment by running `docker compose ps`. You can start and stop containers using `docker compose start` and `docker compose stop` commands. +If using the multi-broker docker compose file, you can change the scaling by running `docker compose scale =n` where container is the container you would like to scale and n is the number of instances. For example, `docker compose scale kafka=3`. #### asn1_codec Module (ASN.1 Encoder and Decoder) @@ -313,15 +321,15 @@ cd $BASE_PPM_DIR/jpo-cvdp/build $ ./bsmjson_privacy -c ../config/ppm.properties ``` -# Confluent Cloud Integration +### Confluent Cloud Integration Rather than using a local kafka instance, the ODE can utilize an instance of kafka hosted by Confluent Cloud via SASL. -## Environment variables +#### Environment variables -### Purpose & Usage +##### Purpose & Usage - The DOCKER_HOST_IP environment variable is used to communicate with the bootstrap server that the instance of Kafka is running on. @@ -331,7 +339,7 @@ Rather than using a local kafka instance, the ODE can utilize an instance of kaf -### Values +##### Values In order to utilize Confluent Cloud: - DOCKER_HOST_IP must be set to the bootstrap server address (excluding the port) @@ -344,25 +352,90 @@ In order to utilize Confluent Cloud: -## CC Docker Compose File +#### CC Docker Compose File There is a provided docker-compose file (docker-compose-confluent-cloud.yml) that passes the above environment variables into the container that gets created. Further, this file doesn't spin up a local kafka instance since it is not required. -## Note +#### Note This has only been tested with Confluent Cloud but technically all SASL authenticated Kafka brokers can be reached using this method. [Back to top](#toc) - + +# MongoDB Integration + +## Description and Configuration + +To sink streamed kafka topic data to a MongoDB database, a kafka connect and MongoDB instance can be deployed for the ODE. By running the provided docker compose [file](./docker-compose-mongo.yml) the following topics will be streamed to MongoDB: + +- OdeRawEncodedBSMJson +- OdeBsmJson +- OdeRawEncodedMAPJson +- OdeMapJson +- OdeRawEncodedSPATJson +- OdeSpatJson +- OdeRawEncodedTIMJson +- OdeTimJson +- OdeRawEncodedPsmJson +- OdePsmJson + +The configuration that defines this is in the jpo-s3-deposit submodule [here](jpo-s3-deposit\mongo-connector\connect_start.sh). This script is attached to the `connect` container as a volume and if you would like to sink different topics then feel free to make a copy of the `connect_start.sh` script and attach it to the `connect` container to the following path: `/scripts/connect_start.sh`. + +## Environment variables + +### Purpose & Usage + +- The `MONGO_IP` environment variable is used to define the IP address of the MongoDB container. This can be configured to use a remote MongoDB instance instead of using the provided docker deployed container. + +- The `MONGO_DB_NAME` environmental variable defines the name of the DB created in MongoDB. This variable is used for both configuring user permission access as well as a destination for the connectors defined in the `connect` container. + +- The `MONGO_ADMIN_DB_USER` and `MONGO_ADMIN_DB_PASS` define the credentials for the `admin` MongoDB user. This user has full control of the cluster and the password must be securely set for production deployments. + +- The `MONGO_ODE_DB_USER` and `MONGO_ODE_DB_PASS` define the credentials for the `ode` MongoDB user. This user has `readWrite` permissions to the `MONGO_DB_NAME` database. + +- The `MONGO_URI` environmental variable contains the complete connection string used to connect to the MongoDB when creating connectors in the `connect` container. + +- The `MONGO_COLLECTION_TTL` environmental variable configures the Time To Live (TTL) for created TTL indexes. Setting this value too high will result in much more storage usage. + +### Values +In order to utilize Confluent Cloud: + +- `MONGO_IP` must be set to the IP address of the MongoDB container. This can be left as `${DOCKER_HOST_IP}` for deployments using the provided MongoDB instance included in the docker-compose file. + +- `MONGO_DB_NAME` configures the created DB name in MongoDB. + +- `MONGO_ADMIN_DB_USER` configures the MongoDB admin user's name. + +- `MONGO_ADMIN_DB_PASS` configures the MongoDB admin user's name. This must be changed to a more secure password for production deployments. + +- `MONGO_ODE_DB_USER` configures the username of the initialized user with `readwrite` access to the initialized database. + +- `MONGO_ODE_DB_PASS` configures the password of the initialized user with `readwrite` access to the initialized database. + +- `MONGO_URI` defines the connection URI used by the kafka connect instance. MongoDB connection URI options are documented [here](https://www.mongodb.com/docs/manual/reference/connection-string/) + +- `MONGO_COLLECTION_TTL` sets the Time To Live (TTL) for the created TTL indexes. + + +## Mongo Docker Compose File + +There is a provided docker-compose [file](docker-compose-mongo.yml) that spins up a MongoDB instance with a kafka connect service. There is also a initialization container that configures the RBAC and replica set of the MongoDB container. + +## Note + +Kafka connect is being used for MongoDB in this implimentation but it can interact with many types of databases, here is further documentation for [kafka connect](https://docs.confluent.io/platform/current/connect/index.html) + +[Back to top](#toc) + - + ## 4. File Manifest @@ -383,11 +456,11 @@ This section outlines the software technology stacks of the ODE. ### Continuous Integration -- [TravisCI](https://travis-ci.org/) +- [GitHub Workflows](https://docs.github.com/en/actions/using-workflows) ### ODE Code -- [Java 8](https://openjdk.java.net/) +- [Java 21](https://openjdk.java.net/) - [Maven](https://maven.apache.org/) - [Spring Boot](http://spring.io/projects/spring-boot) - [Logback](https://logback.qos.ch/) @@ -405,13 +478,15 @@ This section outlines the software technology stacks of the ODE. [Back to top](#toc) + + - + ## 5. Development Setup @@ -422,23 +497,26 @@ Install the IDE of your choice: * Eclipse: [https://eclipse.org/](https://eclipse.org/) * STS: [https://spring.io/tools/sts/all](https://spring.io/tools/sts/all) * IntelliJ: [https://www.jetbrains.com/idea/](https://www.jetbrains.com/idea/) +* VSCode: [https://code.visualstudio.com/](https://code.visualstudio.com/) ### Continuous Integration -* TravisCI: +See the [GitHub Workflows](.github/workflows/) defined for this project. ### Dev Container Environment The project can be reopened inside of a dev container in VSCode. This environment should have all of the necessary dependencies to debug the ODE and its submodules. When attempting to run scripts in this environment, it may be necessary to make them executable with "chmod +x" first. [Back to top](#toc) + + - + ## 6. Release History @@ -446,13 +524,15 @@ The project can be reopened inside of a dev container in VSCode. This environmen [Back to top](#toc) + + - + ## 7. Contact Information @@ -471,13 +551,15 @@ permissions and limitations under the [License](http://www.apache.org/licenses/L [Back to top](#toc) + + - + ## 8. Contributing @@ -493,67 +575,23 @@ Please read our [contributing guide](docs/contributing_guide.md) to learn about - - Security services repository on GitHub (public) - +- SDW Depositor Module on GitHub (public) + - - ODE Output Validatory Library (public) - https://github.com/usdot-jpo-ode/ode-output-validator-library -### Agile Project Management - Jira - - -### Wiki - Confluence - - -### Continuous Integration and Delivery - - -
Using Travis for your build - - -To allow Travis run your build when you push your changes to your public fork of the jpo-ode repository, you must define the following secure environment variable using Travis CLI (). - -Run: - -``` -travis login --org -``` -Enter personal github account credentials. - -In order to allow Sonar to run, personal key must be added with this command: -(Key can be obtained from the JPO-ODE development team) - -``` -travis env set SONAR_SECURITY_TOKEN -pr / -``` -
-
- ### Static Code Analysis -[Back to top](#toc) - - - ### Quality Assurance Code quality assurance is reported through the [usdot-jpo-ode SonarCloud organization](https://sonarcloud.io/organizations/usdot-jpo-ode/projects). Code quality reports are generated by the [JaCoCo plugin for Maven](https://www.eclemma.org/jacoco/trunk/doc/maven.html) during the ODE's [webhook-triggered TravisCI build](https://github.com/usdot-jpo-ode/jpo-ode/blob/dev/.travis.yml#L16). After a successful build, the [SonarQube scanner plugin for Maven](https://docs.sonarqube.org/display/SCAN/Analyzing+with+SonarQube+Scanner+for+Maven) creates and uploads a code quality report to SonarCloud. -For regression and user acceptance testing, ODE provides an automated test harness. The test harness is pprovided in the [qa/test-harness](ga/test-harness) directory under jpo-ode root folder. The test harness uses the ODE [Validator Library](https://github.com/usdot-jpo-ode/ode-output-validator-library) repository as a submodule. +For regression and user acceptance testing, ODE provides an automated test harness. The test harness is provided in the [qa/test-harness](ga/test-harness) directory under jpo-ode root folder. The test harness uses the ODE [Validator Library](https://github.com/usdot-jpo-ode/ode-output-validator-library) repository as a submodule. For more information, please see: https://github.com/usdot-jpo-ode/jpo-ode/wiki/Using-the-ODE-test-harness - - -## 9. Known Bugs - -Date: 07/2017 - -In its current state, the ODE has been developed to accomplish the goals of data transfer, security, and modularity working with the J2735 and 1609.2 security. The system has been designed to support multiple services orchestrated through the Apache Kafka streaming data pipelines, services built and supported as separate applications and described with each service's repository. As a modular system, each component has been built for functionality first, and additional performance testing is needed to understand the limits of the system with large volumes of data. - ### Troubleshooting Please read our [Wiki](https://github.com/usdot-jpo-ode/jpo-ode/wiki) for more information, or check the [User Guide](docs/UserGuide.md). @@ -562,29 +600,33 @@ Application Support for the ODE currently managed via GitHub's native issue trac [Back to top](#toc) + + - + -## 10. Credits and Acknowledgement +## 9. Credits and Acknowledgement [Attribution](ATTRIBUTION.md) [Back to top](#toc) + + - + -## 11. Code.gov Registration Info +## 10. Code.gov Registration Info Agency: DOT @@ -600,17 +642,30 @@ Contact Name: James Lieu Contact Phone: (202) 366-3000 - -## 12. Kubernetes -The ODE can be run in a k8s environment. -See [this document](./docs/Kubernetes.md) for more details about this. + + + +## 11. Kubernetes +The ODE can be run in a Kubernetes (k8s) environment. +See [the Kubernetes document](./docs/Kubernetes.md) for more details about this. [Back to top](#toc) - -## 13. Sonar Token Configuration + + + + +## 12. Sonar Token Configuration Generating and Using Tokens Users can generate tokens that can be used to run analyses or invoke web services without access to the user's actual credentials. @@ -633,7 +688,7 @@ Sonar Host URL: In GitHub, create a new repository secret with SONAR_HOST_URL as Configure your workflow YAML file as below: - 1. Add GitHub Secrets in ci.yml workflow as SONAR_TOKEN: ${{ secrets.SONAR_TOKEN } + 1. Add GitHub Secrets in ci.yml workflow as SONAR_TOKEN: ${ secrets.SONAR_TOKEN } 2. Update the sonar properties in Sonar scan step (- name: Run Sonar) with new sonar project properties. Commit and push your code to start the analysis. @@ -641,16 +696,32 @@ Commit and push your code to start the analysis. ### Revoking a token You can revoke an existing token at User > My Account > Security by clicking the Revoke button next to the token. - +[Back to top](#toc) + -## 14. SNMP + + + + +## 13. SNMP The ODE is capable of communicating with RSUs to: - Query TIMs - Deposit TIMs - Delete TIMs +By default, the ODE will not sign TIMs that are delivered to RSUs. This can be changed by setting the value of the DATA_SIGNING_ENABLED_RSU environment variable found in the provided sample.env file. Additionally, signing of SDX-delivery TIMs can be configured by setting the value of the environment variable DATA_SIGNING_ENABLED_SDW found in sample.env. + The following SNMP protocols are supported for communication with RSUs: - DSRC 4.1 (defined in 'Dedicated Short-Range Communications Roadside Unit Specifications') - NTCIP1218 (defined in 'National Transportation Communications for ITS Protocol') -Additionally, the ODE supports the execution of PDM operations on RSUs. PDM operations are not defined in NTCIP1218, but are defined DSRC 4.1. \ No newline at end of file +If no protocol is specified in a request containing RSUs, the ODE will communicate with RSUs via the NTCIP1218 protocol by default. +This can be changed by setting the value of the DEFAULT_SNMP_PROTOCOL environment variable. + +Additionally, the ODE supports the execution of PDM operations on RSUs. PDM operations are not defined in NTCIP1218, but are defined DSRC 4.1. + +[Back to top](#toc) diff --git a/docker-compose-confluent-cloud.yml b/docker-compose-confluent-cloud.yml index 2bb3e5b06..6d729451d 100644 --- a/docker-compose-confluent-cloud.yml +++ b/docker-compose-confluent-cloud.yml @@ -23,10 +23,9 @@ services: KAFKA_TYPE: ${KAFKA_TYPE} CONFLUENT_KEY: ${CONFLUENT_KEY} CONFLUENT_SECRET: ${CONFLUENT_SECRET} - # Commented out, will use SDW depositor module by default - #ODE_DEPOSIT_SDW_MESSAGES_OVER_WEBSOCKET: ${ODE_DEPOSIT_SDW_MESSAGES_OVER_WEBSOCKET} - #ODE_DDS_CAS_USERNAME: ${ODE_DDS_CAS_USERNAME} - #ODE_DDS_CAS_PASSWORD: ${ODE_DDS_CAS_PASSWORD} + DATA_SIGNING_ENABLED_RSU: ${DATA_SIGNING_ENABLED_RSU} + DATA_SIGNING_ENABLED_SDW: ${DATA_SIGNING_ENABLED_SDW} + DEFAULT_SNMP_PROTOCOL: ${DEFAULT_SNMP_PROTOCOL} volumes: - ${DOCKER_SHARED_VOLUME}:/jpo-ode - ${DOCKER_SHARED_VOLUME}/uploads:/home/uploads diff --git a/docker-compose-mongo.yml b/docker-compose-mongo.yml new file mode 100644 index 000000000..6ae0d0d21 --- /dev/null +++ b/docker-compose-mongo.yml @@ -0,0 +1,92 @@ +version: '3' + +include: + - docker-compose.yml + +services: + mongo: + image: mongo:7 + container_name: mongo + restart: always + ports: + - "27017:27017" + environment: + MONGO_INITDB_ROOT_USERNAME: ${MONGO_ADMIN_DB_USER} + MONGO_INITDB_ROOT_PASSWORD: ${MONGO_ADMIN_DB_PASS} + MONGO_INITDB_DATABASE: admin + entrypoint: + - bash + - -c + - | + openssl rand -base64 741 > /mongo_keyfile + chmod 400 /mongo_keyfile + chown 999:999 /mongo_keyfile + exec docker-entrypoint.sh $$@ + command: "mongod --bind_ip_all --replSet rs0 --keyFile /mongo_keyfile" + volumes: + - mongo_data:/data/db + healthcheck: + test: | + echo 'db.runCommand("ping").ok' | mongosh localhost:27017/test --quiet + interval: 10s + start_period: 30s + + mongo-setup: + image: mongo:7 + container_name: mongo_setup + depends_on: + - mongo + restart: on-failure + environment: + MONGO_ADMIN_DB_USER: ${MONGO_ADMIN_DB_USER} + MONGO_ADMIN_DB_PASS: ${MONGO_ADMIN_DB_PASS} + MONGO_DB_NAME: ${MONGO_DB_NAME} + MONGO_ODE_DB_USER: ${MONGO_ODE_DB_USER} + MONGO_ODE_DB_PASS: ${MONGO_ODE_DB_PASS} + MONGO_COLLECTION_TTL: ${MONGO_COLLECTION_TTL} + entrypoint: ["/bin/bash", "setup_mongo.sh"] + volumes: + - ./scripts/mongo/setup_mongo.sh:/setup_mongo.sh + - ./scripts/mongo/create_indexes.js:/create_indexes.js + + + connect: + image: kafka-connect:latest + restart: always + build: + context: ./jpo-s3-deposit/mongo-connector + dockerfile: Dockerfile + ports: + - "8083:8083" + depends_on: + mongo: + condition: service_healthy + environment: + MONGO_URI: ${MONGO_URI} + MONGO_DB_NAME: ${MONGO_DB_NAME} + CONNECT_BOOTSTRAP_SERVERS: ${DOCKER_HOST_IP}:9092 + CONNECT_REST_ADVERTISED_HOST_NAME: connect + CONNECT_REST_PORT: 8083 + CONNECT_GROUP_ID: compose-connect-group + CONNECT_CONFIG_STORAGE_TOPIC: topic.kafka-connect-configs + CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1 + CONNECT_CONFIG_STORAGE_CLEANUP_POLICY: compact + CONNECT_OFFSET_FLUSH_INTERVAL_MS: 10000 + CONNECT_OFFSET_STORAGE_TOPIC: topic.kafka-connect-offsets + CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1 + CONNECT_OFFSET_STORAGE_CLEANUP_POLICY: compact + CONNECT_STATUS_STORAGE_TOPIC: topic.kafka-connect-status + CONNECT_STATUS_STORAGE_CLEANUP_POLICY: compact + CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1 + CONNECT_KEY_CONVERTER: "org.apache.kafka.connect.json.JsonConverter" + CONNECT_VALUE_CONVERTER: "org.apache.kafka.connect.json.JsonConverter" + CONNECT_INTERNAL_KEY_CONVERTER: "org.apache.kafka.connect.json.JsonConverter" + CONNECT_INTERNAL_VALUE_CONVERTER: "org.apache.kafka.connect.json.JsonConverter" + CONNECT_LOG4J_ROOT_LOGLEVEL: "ERROR" + CONNECT_LOG4J_LOGGERS: "org.apache.kafka.connect.runtime.rest=ERROR,org.reflections=ERROR,com.mongodb.kafka=ERROR" + CONNECT_PLUGIN_PATH: /usr/share/confluent-hub-components + volumes: + - ./jpo-s3-deposit/mongo-connector/connect_start.sh:/scripts/connect_start.sh + +volumes: + mongo_data: \ No newline at end of file diff --git a/docker-compose-ppm-nsv.yml b/docker-compose-ppm-nsv.yml index 06739d438..f5937b3b9 100644 --- a/docker-compose-ppm-nsv.yml +++ b/docker-compose-ppm-nsv.yml @@ -4,35 +4,39 @@ version: '3' services: - zookeeper: - image: wurstmeister/zookeeper - ports: - - "2181:2181" - kafka: - image: wurstmeister/kafka + image: bitnami/kafka:latest + hostname: kafka ports: - "9092:9092" + volumes: + - "${DOCKER_SHARED_VOLUME}:/bitnami" environment: - DOCKER_HOST_IP: ${DOCKER_HOST_IP} - ZK: ${DOCKER_HOST_IP}:2181 - KAFKA_ADVERTISED_HOST_NAME: ${DOCKER_HOST_IP} - KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 - KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true" - KAFKA_CREATE_TOPICS: "topic.OdeBsmPojo:1:1,topic.OdeBsmJson:1:1,topic.FilteredOdeBsmJson:1:1,topic.OdeTimJson:1:1,topic.OdeTimBroadcastJson:1:1,topic.J2735TimBroadcastJson:1:1,topic.OdeDriverAlertJson:1:1,topic.Asn1DecoderInput:1:1,topic.Asn1DecoderOutput:1:1,topic.Asn1EncoderInput:1:1,topic.Asn1EncoderOutput:1:1,topic.SDWDepositorInput:1:1" - KAFKA_DELETE_TOPIC_ENABLED: "true" - KAFKA_CLEANUP_POLICY: "delete" # delete old logs - KAFKA_LOG_RETENTION_HOURS: 2 - KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 3000 - KAFKA_RETENTION_MS: 7200000 # delete old logs after 2 hours - KAFKA_SEGMENT_MS: 7200000 # roll segment logs every 2 hours. - # This configuration controls the period of time after - # which Kafka will force the log to roll even if the segment - # file isn't full to ensure that retention can delete or compact old data. + KAFKA_ENABLE_KRAFT: "yes" + KAFKA_CFG_PROCESS_ROLES: "broker,controller" + KAFKA_CFG_CONTROLLER_LISTENER_NAMES: "CONTROLLER" + KAFKA_CFG_LISTENERS: "PLAINTEXT://:9094,CONTROLLER://:9093,EXTERNAL://:9092" + KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: "CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,EXTERNAL:PLAINTEXT" + KAFKA_CFG_ADVERTISED_LISTENERS: "PLAINTEXT://kafka:9094,EXTERNAL://${DOCKER_HOST_IP}:9092" + KAFKA_BROKER_ID: "1" + KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: "1@kafka:9093" + ALLOW_PLAINTEXT_LISTENER: "yes" + KAFKA_CFG_NODE_ID: "1" + KAFKA_CFG_DELETE_TOPIC_ENABLE: "true" + KAFKA_CFG_LOG_RETENTION_HOURS: 2 + logging: + options: + max-size: "10m" + max-file: "5" + + kafka_init: + image: bitnami/kafka:latest depends_on: - - zookeeper + kafka: + condition: service_started volumes: - - ${DOCKER_SHARED_VOLUME}/var/run/docker.sock:/var/run/docker.sock + - ./scripts/kafka/kafka_init.sh:/kafka_init.sh + entrypoint: ["/bin/sh", "kafka_init.sh"] ode: build: . @@ -51,10 +55,9 @@ services: ODE_SECURITY_SVCS_SIGNATURE_URI: ${ODE_SECURITY_SVCS_SIGNATURE_URI} ODE_RSU_USERNAME: ${ODE_RSU_USERNAME} ODE_RSU_PASSWORD: ${ODE_RSU_PASSWORD} - # Commented out, will use SDW depositor module by default - #ODE_DEPOSIT_SDW_MESSAGES_OVER_WEBSOCKET: ${ODE_DEPOSIT_SDW_MESSAGES_OVER_WEBSOCKET} - #ODE_DDS_CAS_USERNAME: ${ODE_DDS_CAS_USERNAME} - #ODE_DDS_CAS_PASSWORD: ${ODE_DDS_CAS_PASSWORD} + DATA_SIGNING_ENABLED_RSU: ${DATA_SIGNING_ENABLED_RSU} + DATA_SIGNING_ENABLED_SDW: ${DATA_SIGNING_ENABLED_SDW} + DEFAULT_SNMP_PROTOCOL: ${DEFAULT_SNMP_PROTOCOL} depends_on: - kafka volumes: @@ -73,6 +76,7 @@ services: - kafka volumes: - ${DOCKER_SHARED_VOLUME}/adm:/asn1_codec_share + restart: on-failure aem: build: ./asn1_codec @@ -107,6 +111,7 @@ services: - ${DOCKER_SHARED_VOLUME}/ppm_bsm:/ppm_data depends_on: - kafka + restart: on-failure ppm_tim: build: @@ -193,7 +198,6 @@ services: SDW_PASSWORD: ${SDW_PASSWORD} depends_on: - kafka - - zookeeper - ode sec: diff --git a/docker-compose.yml b/docker-compose.yml index 7ca7edcc1..f61839bff 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,43 +1,39 @@ version: '3' services: - zookeeper: - image: wurstmeister/zookeeper - ports: - - "2181:2181" - logging: - options: - max-size: "10m" - max-file: "5" - kafka: - image: wurstmeister/kafka + image: bitnami/kafka:latest + hostname: kafka ports: - "9092:9092" - environment: - DOCKER_HOST_IP: ${DOCKER_HOST_IP} - ZK: ${DOCKER_HOST_IP}:2181 - KAFKA_ADVERTISED_HOST_NAME: ${DOCKER_HOST_IP} - KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 - KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true" - KAFKA_CREATE_TOPICS: "topic.OdeBsmPojo:1:1,topic.OdeSpatTxPojo:1:1,topic.OdeSpatPojo:1:1,topic.OdeSpatJson:1:1,topic.FilteredOdeSpatJson:1:1,topic.OdeSpatRxJson:1:1,topic.OdeSpatRxPojo:1:1,topic.OdeBsmJson:1:1,topic.FilteredOdeBsmJson:1:1,topic.OdeTimJson:1:1,topic.OdeTimBroadcastJson:1:1,topic.J2735TimBroadcastJson:1:1,topic.OdeDriverAlertJson:1:1,topic.Asn1DecoderInput:1:1,topic.Asn1DecoderOutput:1:1,topic.Asn1EncoderInput:1:1,topic.Asn1EncoderOutput:1:1,topic.SDWDepositorInput:1:1,topic.OdeTIMCertExpirationTimeJson:1:1,topic.OdeRawEncodedBSMJson:1:1,topic.OdeRawEncodedSPATJson:1:1,topic.OdeRawEncodedTIMJson:1:1,topic.OdeRawEncodedMAPJson:1:1,topic.OdeMapTxPojo:1:1,topic.OdeMapJson:1:1,topic.OdeRawEncodedSSMJson:1:1,topic.OdeSsmPojo:1:1,topic.OdeSsmJson:1:1,topic.OdeRawEncodedSRMJson:1:1,topic.OdeSrmTxPojo:1:1,topic.OdeSrmJson:1:1,topic.OdeRawEncodedPSMJson:1:1,topic.OdePsmTxPojo:1:1,topic.OdePsmJson:1:1" - KAFKA_DELETE_TOPIC_ENABLED: "true" - KAFKA_CLEANUP_POLICY: "delete" # delete old logs - KAFKA_LOG_RETENTION_HOURS: 2 - KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 3000 - KAFKA_RETENTION_MS: 7200000 # delete old logs after 2 hours - KAFKA_SEGMENT_MS: 7200000 # roll segment logs every 2 hours. - # This configuration controls the period of time after - # which Kafka will force the log to roll even if the segment - # file isn't full to ensure that retention can delete or compact old data. - depends_on: - - zookeeper volumes: - - ${DOCKER_SHARED_VOLUME}/var/run/docker.sock:/var/run/docker.sock + - kafka:/bitnami + environment: + KAFKA_ENABLE_KRAFT: "yes" + KAFKA_CFG_PROCESS_ROLES: "broker,controller" + KAFKA_CFG_CONTROLLER_LISTENER_NAMES: "CONTROLLER" + KAFKA_CFG_LISTENERS: "PLAINTEXT://:9094,CONTROLLER://:9093,EXTERNAL://:9092" + KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: "CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,EXTERNAL:PLAINTEXT" + KAFKA_CFG_ADVERTISED_LISTENERS: "PLAINTEXT://kafka:9094,EXTERNAL://${DOCKER_HOST_IP}:9092" + KAFKA_BROKER_ID: "1" + KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: "1@kafka:9093" + ALLOW_PLAINTEXT_LISTENER: "yes" + KAFKA_CFG_NODE_ID: "1" + KAFKA_CFG_DELETE_TOPIC_ENABLE: "true" + KAFKA_CFG_LOG_RETENTION_HOURS: 2 logging: options: - max-size: "10m" + max-size: "10m" max-file: "5" + kafka_init: + image: bitnami/kafka:latest + depends_on: + kafka: + condition: service_started + volumes: + - ./scripts/kafka/kafka_init.sh:/kafka_init.sh + entrypoint: ["/bin/sh", "kafka_init.sh"] + ode: build: . image: jpoode_ode:latest @@ -60,10 +56,9 @@ services: ODE_SECURITY_SVCS_SIGNATURE_URI: ${ODE_SECURITY_SVCS_SIGNATURE_URI} ODE_RSU_USERNAME: ${ODE_RSU_USERNAME} ODE_RSU_PASSWORD: ${ODE_RSU_PASSWORD} - # Commented out, will use SDW depositor module by default - #ODE_DEPOSIT_SDW_MESSAGES_OVER_WEBSOCKET: ${ODE_DEPOSIT_SDW_MESSAGES_OVER_WEBSOCKET} - #ODE_DDS_CAS_USERNAME: ${ODE_DDS_CAS_USERNAME} - #ODE_DDS_CAS_PASSWORD: ${ODE_DDS_CAS_PASSWORD} + DATA_SIGNING_ENABLED_RSU: ${DATA_SIGNING_ENABLED_RSU} + DATA_SIGNING_ENABLED_SDW: ${DATA_SIGNING_ENABLED_SDW} + DEFAULT_SNMP_PROTOCOL: ${DEFAULT_SNMP_PROTOCOL} depends_on: - kafka volumes: @@ -91,3 +86,189 @@ services: options: max-size: "10m" max-file: "5" + restart: on-failure + + aem: + build: ./asn1_codec + image: jpoode_acm:latest + environment: + DOCKER_HOST_IP: ${DOCKER_HOST_IP} + ACM_CONFIG_FILE: aem.properties + ACM_LOG_TO_CONSOLE: ${AEM_LOG_TO_CONSOLE} + ACM_LOG_TO_FILE: ${AEM_LOG_TO_FILE} + ACM_LOG_LEVEL: ${AEM_LOG_LEVEL} + depends_on: + - kafka + volumes: + - ${DOCKER_SHARED_VOLUME}:/asn1_codec_share + logging: + options: + max-size: "10m" + max-file: "5" + restart: on-failure + + ppm_bsm: + build: + context: ./jpo-cvdp + dockerfile: Dockerfile + image: jpoode_ppm:latest + environment: + DOCKER_HOST_IP: ${DOCKER_HOST_IP} + PPM_CONFIG_FILE: ppmBsm.properties + PPM_LOG_TO_CONSOLE: ${PPM_BSM_LOG_TO_CONSOLE} + PPM_LOG_TO_FILE: ${PPM_BSM_LOG_TO_FILE} + PPM_LOG_LEVEL: ${PPM_BSM_LOG_LEVEL} + volumes: + - ${DOCKER_SHARED_VOLUME}:/ppm_data + depends_on: + - kafka + logging: + options: + max-size: "10m" + max-file: "5" + + ppm_tim: + build: + context: ./jpo-cvdp + dockerfile: Dockerfile + image: jpoode_ppm:latest + environment: + DOCKER_HOST_IP: ${DOCKER_HOST_IP} + PPM_CONFIG_FILE: ppmTim.properties + PPM_LOG_TO_CONSOLE: ${PPM_TIM_LOG_TO_CONSOLE} + PPM_LOG_TO_FILE: ${PPM_TIM_LOG_TO_FILE} + PPM_LOG_LEVEL: ${PPM_TIM_LOG_LEVEL} + volumes: + - ${DOCKER_SHARED_VOLUME}:/ppm_data + depends_on: + - kafka + logging: + options: + max-size: "10m" + max-file: "5" + + cvpep_bsm_depositor: + build: ./jpo-s3-deposit + image: jpoode_s3dep:latest + environment: + DOCKER_HOST_IP: ${DOCKER_HOST_IP} + DEPOSIT_BUCKET_NAME: ${CVPEP_BSM_BUCKET_NAME} + DEPOSIT_KEY_NAME: ${CVPEP_BSM_DEPOSIT_KEY} + DEPOSIT_TOPIC: ${CVPEP_BSM_TOPIC} + K_AWS_ACCESS_KEY_ID: ${CVPEP_BSM_K_AWS_ACCESS_KEY_ID} + K_AWS_SECRET_ACCESS_SECRET: ${CVPEP_BSM_K_AWS_SECRET_ACCESS_SECRET} + K_AWS_SESSION_TOKEN: ${CVPEP_BSM_K_AWS_SESSION_TOKEN} + K_AWS_EXPIRATION: ${CVPEP_BSM_K_AWS_EXPIRATION} + API_ENDPOINT: ${CVPEP_BSM_API_ENDPOINT} + HEADER_ACCEPT: ${CVPEP_BSM_HEADER_ACCEPT} + HEADER_X_API_KEY: ${CVPEP_BSM_HEADER_X_API_KEY} + DEPOSIT_GROUP: ${CVPEP_BSM_GROUP} + depends_on: + - kafka + logging: + options: + max-size: "10m" + max-file: "5" + + rde_bsm_depositor: + build: ./jpo-s3-deposit + image: jpoode_s3dep:latest + environment: + DOCKER_HOST_IP: ${DOCKER_HOST_IP} + DEPOSIT_BUCKET_NAME: ${RDE_BSM_BUCKET_NAME} + DEPOSIT_KEY_NAME: ${RDE_BSM_DEPOSIT_KEY} + DEPOSIT_TOPIC: ${RDE_BSM_TOPIC} + K_AWS_ACCESS_KEY_ID: ${RDE_BSM_K_AWS_ACCESS_KEY_ID} + K_AWS_SECRET_ACCESS_SECRET: ${RDE_BSM_K_AWS_SECRET_ACCESS_SECRET} + K_AWS_SESSION_TOKEN: ${RDE_BSM_K_AWS_SESSION_TOKEN} + K_AWS_EXPIRATION: ${RDE_BSM_K_AWS_EXPIRATION} + API_ENDPOINT: ${RDE_BSM_API_ENDPOINT} + HEADER_ACCEPT: ${RDE_BSM_HEADER_ACCEPT} + HEADER_X_API_KEY: ${RDE_BSM_HEADER_X_API_KEY} + DEPOSIT_GROUP: ${RDE_BSM_GROUP} + depends_on: + - kafka + logging: + options: + max-size: "10m" + max-file: "5" + + cvpep_tim_depositor: + build: ./jpo-s3-deposit + image: jpoode_s3dep:latest + environment: + DOCKER_HOST_IP: ${DOCKER_HOST_IP} + DEPOSIT_BUCKET_NAME: ${CVPEP_TIM_BUCKET_NAME} + DEPOSIT_KEY_NAME: ${CVPEP_TIM_DEPOSIT_KEY} + DEPOSIT_TOPIC: ${CVPEP_TIM_TOPIC} + K_AWS_ACCESS_KEY_ID: ${CVPEP_BSM_K_AWS_ACCESS_KEY_ID} + K_AWS_SECRET_ACCESS_SECRET: ${CVPEP_BSM_K_AWS_SECRET_ACCESS_SECRET} + K_AWS_SESSION_TOKEN: ${CVPEP_BSM_K_AWS_SESSION_TOKEN} + K_AWS_EXPIRATION: ${CVPEP_BSM_K_AWS_EXPIRATION} + API_ENDPOINT: ${CVPEP_BSM_API_ENDPOINT} + HEADER_ACCEPT: ${CVPEP_BSM_HEADER_ACCEPT} + HEADER_X_API_KEY: ${CVPEP_BSM_HEADER_X_API_KEY} + DEPOSIT_GROUP: ${CVPEP_TIM_GROUP} + depends_on: + - kafka + logging: + options: + max-size: "10m" + max-file: "5" + + rde_tim_depositor: + build: ./jpo-s3-deposit + image: jpoode_s3dep:latest + environment: + DOCKER_HOST_IP: ${DOCKER_HOST_IP} + DEPOSIT_BUCKET_NAME: ${RDE_TIM_BUCKET_NAME} + DEPOSIT_KEY_NAME: ${RDE_TIM_DEPOSIT_KEY} + DEPOSIT_TOPIC: ${RDE_TIM_TOPIC} + K_AWS_ACCESS_KEY_ID: ${CVPEP_BSM_K_AWS_ACCESS_KEY_ID} + K_AWS_SECRET_ACCESS_SECRET: ${RDE_TIM_K_AWS_SECRET_ACCESS_SECRET} + K_AWS_SESSION_TOKEN: ${RDE_TIM_K_AWS_SESSION_TOKEN} + K_AWS_EXPIRATION: ${RDE_TIM_K_AWS_EXPIRATION} + API_ENDPOINT: ${RDE_TIM_API_ENDPOINT} + HEADER_ACCEPT: ${RDE_TIM_HEADER_ACCEPT} + HEADER_X_API_KEY: ${RDE_TIM_HEADER_X_API_KEY} + DEPOSIT_GROUP: ${RDE_TIM_GROUP} + depends_on: + - kafka + logging: + options: + max-size: "10m" + max-file: "5" + + sdw_depositor: + build: ./jpo-sdw-depositor + image: jpoode_sdwdep:latest + environment: + DOCKER_HOST_IP: ${DOCKER_HOST_IP} + #SDW_GROUP_ID: ${SDW_GROUP_ID} + #SDW_KAFKA_PORT: ${SDW_KAFKA_PORT} + #SDW_SUBSCRIPTION_TOPICS: ${SDW_SUBSCRIPTION_TOPICS} + #SDW_DESTINATION_URL: ${SDW_DESTINATION_URL} + SDW_API_KEY: ${SDW_API_KEY} + depends_on: + - kafka + - ode + logging: + options: + max-size: "10m" + max-file: "5" + + sec: + build: ./jpo-security-svcs + image: jpoode_sec:latest + ports: + - "8090:8090" + environment: + SEC_CRYPTO_SERVICE_BASE_URI: ${SEC_CRYPTO_SERVICE_BASE_URI} + logging: + options: + max-size: "10m" + max-file: "5" + +volumes: + kafka: + {} \ No newline at end of file diff --git a/docs/Architecture.md b/docs/Architecture.md index 2ac9b4d86..d51dc8985 100644 --- a/docs/Architecture.md +++ b/docs/Architecture.md @@ -12,7 +12,7 @@ Booz Allen Hamilton\ 8283 Greensboro Drive\ McLean, VA 22102 -_Last updated June 11, 2021_ +_Last updated April 26th, 2024_ # Contents @@ -32,7 +32,7 @@ _Last updated June 11, 2021_ - [6 - Appendix](#appendix) - [6.1 - Glossary](#glossary) - + # Version History @@ -43,7 +43,7 @@ _Last updated June 11, 2021_ | 1.1 | Hamid Musavi | 4/17/2017 | Updated to reflect ORNL De-identification service | | 1.2 | Hamid Musavi | 12/21/2018 | General Update | - + # 1 - Introduction @@ -62,7 +62,7 @@ This document is a living document and will be updated throughout the life of the JPO ODE project to reflect the most recent changes in the ODE design and stakeholder feedback. - + # 2 - Project Overview @@ -79,13 +79,13 @@ brokering, processing and routing data from various data sources, including connected vehicles, field devices, Transportation Management Center (TMC) applications and a variety of other data users. Data users include but not limited to transportation software applications, -Research Data Exchange (RDE), US DOT Situation Data Warehouse. +Research Data Exchange (RDE), and the [Situational Data Exchange (SDX)](https://sdx.trihydro.com/). As a data provisioning service, the ODE can provision data from disparate data sources to software applications that have placed data subscription requests to the ODE. On the other direction, the ODE can accept data from CV applications and broadcast them to field devices -through Road Side Units (RSU) and US DOT Situation Data Warehouse which +through Road Side Units (RSU)s and the Situational Data Exchange (SDX) which in turn will transmit the data to Sirius XM satellites for delivery to the connected vehicles in the field. @@ -102,7 +102,7 @@ validation and sanitization. compromise the privacy of the individual(s) that might be linked to the data. - + # 3 - System Overview @@ -112,7 +112,7 @@ operational, monitoring, and research purposes. The system will enable applications to submit data through a variety standard interfaces as illustrated in the figure below. - + ### 3.1 - ODE Technology Stack @@ -121,10 +121,10 @@ each technology layer. ![](images/architecture/figure1.png) -Figure 1 - ODE Technology Stack +_Figure 1 - ODE Technology Stack_ Data Integration later of JPO-ODE supports the producers and consumers -of CV data as illustrated in Figure 2 below. Not all components or +of CV data as illustrated in Figure 2 below. It is possible that not all components or services shown in this diagram have been implemented. *The implementation timeline for the identified interfaces will depend on the needs of the JPO ODE customers and the priority of these capabilities to @@ -132,9 +132,9 @@ the JPO-ODE product owner.* ![](images/architecture/figure2.png) -Figure 2 - ODE Data Integration Clients +_Figure 2 - ODE Data Integration Clients_ - + ### 3.2 - Producer Mechanisms @@ -174,12 +174,12 @@ Notation (JSON), environmental and various other system logs. and process new records. *This interface is suitable only to applications residing in the private network domain.* - + ### 3.3 - Consumer Mechanisms The JPO-ODE is designed to support the following mechanisms for -outputting decoded BSM, Map and Signal Phase and Timing (SPaT) data as +outputting decoded BSM, Map, and Signal Phase & Timing (SPaT) data as well as encoded TIM data. - **Streaming Data Consumers (Direct):** Applications can subscribe @@ -191,25 +191,12 @@ well as encoded TIM data. to the messaging service through the use of a standard WebSocket API. *This interface is suitable to all applications whether residing in the private network domain or in the cloud. For cloud - applications Secure WebSocket (wss) protocol should be utillized.* - -- **RESTful API Data Consumers:** Applications can connect directly - with a RESTful API and submit messages to the messaging service - through HTTP commands. *This interface is suitable to all - applications whether residing in the private network domain or in - the cloud. For cloud applications Secure HTTP (https) protocol - should be utilized.* - -- **File System Data Consumers:** Through the use of a shared file - repository, applications can monitor collection of data messages. - *This interface is suitable to applications residing in the private - network domain or in the cloud. This interface should be utilized - through Secure Copy (scp) protocol.* + applications Secure WebSocket (wss) protocol should be utilized.* - **Database Data Consumers**: Data messages can be directly inserted into a shared application database and made available for queries. - + ### 3.4 - ODE Management Console @@ -217,7 +204,7 @@ ODE is envisioned to provide a management console for functions such as SNMP device management and provisioning. Other configuration functions can be included in a future management console. - + # 4 - Architecture Pattern @@ -228,7 +215,7 @@ and service-oriented architectures. 1 _In software engineering, a **monolithic application** describes a single-tiered software **application** in which the user interface and data access code are combined into a single program from a single platform. A **monolithic application** is self-contained, and independent from other computing **applications**._ - + ### 4.1 - Pattern Description @@ -257,7 +244,7 @@ The micro-services pattern consists of three major concepts: _Figure 3 - Basic Micro-services architecture pattern_ - + ### 4.2 - Pattern Topology @@ -351,7 +338,7 @@ designed as depicted in Figure 6. _Figure 6 - Kafka Publish/Subscribe Model_ - + # 5 - JPO ODE Micro-services Topology @@ -363,7 +350,7 @@ the major architectural components with which these services interact. _Figure 7 - JPO ODE Micro-services Topology_ - + ### 5.1 - Deployments @@ -371,14 +358,14 @@ Docker is utilized as the primary deployment mechanism to compartmentalize each of the designed micro-services into separate containers. Docker is used to package all components in a composite of containers each running a distinct service. The ODE application runs in -one container and other major frameworks such as ZooKeeper and Kafka run -in their own separate containers. +one container, its submodules run in separate containers and other major +frameworks such as Kafka run in their own separate containers. - + # 6 - Appendix - + ### 6.1 - Glossary @@ -391,4 +378,3 @@ in their own separate containers. | SCP | Secure Copy | | US DOT | Unites States Department of Transportation | | WebSocket | WebSocket is designed to be implemented in web browsers and web servers, but it can be used by any client or server application. The WebSocket Protocol is an independent TCP-based protocol. Its only relationship to HTTP is that its handshake is interpreted by HTTP servers as an Upgrade request. | -| ZooKeeper | Apache ZooKeeper is a centralized service for maintaining configuration information, naming, providing distributed synchronization, and providing group services. | diff --git a/docs/Release_notes.md b/docs/Release_notes.md index 8611f4026..5b109cf0c 100644 --- a/docs/Release_notes.md +++ b/docs/Release_notes.md @@ -1,6 +1,33 @@ JPO-ODE Release Notes ---------------------------- +Version 2.1.0, released June 2024 +---------------------------------------- +### **Summary** +The updates for the jpo-ode 2.1.0 release include several key improvements and fixes. These updates address issues with PSM and log offloading and enhance the continuous integration processes. The Kafka version has been upgraded and a bug related to log processing has been resolved. Nanoseconds are now trimmed from timestamps and 1609.2 headers are now stripped from unsigned messages. A submodule compatibility guide has been added, along with making the default SNMP protocol configurable. Configurable signing is now possible independently for Road-Side Units (RSUs) and the SDX. The Dockerhub documentation now includes a link to the submodule compatibility guide. Maven plugin versions have been updated and the Kafka topic creation process has been improved. A timestamp precision bug has been fixed and the documentation has been revised for accuracy. Additionally, the NTCIP1218 msgRepeatOptions value is now set based on context and SnmpSession now reports failures to retrieve authoritative engine IDs only if a response is received. Finally, the TimDeleteController has been updated to log message deletion failures as errors. + +Enhancements in this release: +CDOT PR 57: Fixes for PSM & Log Offloading +CDOT PR 58: Updated CI +CDOT PR 59: Updated Kafka version +CDOT PR 61: Fixed bug with log processing +CDOT PR 62: Trimmed nanoseconds from snmpTimeStampFromIso +CDOT PR 63: Stripped 1609.2 headers from unsigned messages +CDOT PR 64: Added submodule compatibility guide +CDOT PR 65: Added support for retaining IEEE 1609.2 security headers +CDOT PR 66: Made default SNMP protocol configurable +CDOT PR 67: Added configurable signing independently for RSUs and the SDX +CDOT PR 69: Added link to submodule compatibility guide to Docker Hub documentation +CDOT PR 70: Updated maven plugin versions +CDOT PR 71: Updated Kafka topic creation +CDOT PR 72: Fixed timestamp precision bug +CDOT PR 73: Revised documentation for accuracy +CDOT PR 74: Kafka Connect & MongoDB Database +CDOT PR 75: Set NTCIP1218 msgRepeatOptions value based on context +CDOT PR 76: Updated SnmpSession to report failures to retrieve authoritative engine IDs only if a response is received +CDOT PR 78: UDP/Log Ingestion Updates +CDOT PR 79: Updated TimDeleteController to log failures to delete messages as errors + Version 2.0.2, released April 2024 ---------------------------------------- ### **Summary** diff --git a/docs/UserGuide.md b/docs/UserGuide.md index f65d16f71..872e73b17 100644 --- a/docs/UserGuide.md +++ b/docs/UserGuide.md @@ -9,7 +9,7 @@ Booz Allen Hamilton\ 8283 Greensboro Drive\ McLean, VA 22102 -_Last updated February 7, 2019_ +_Last updated April 26th, 2024_ # Table of Contents - [Version History](#version-history) @@ -48,7 +48,7 @@ _Last updated February 7, 2019_ - [7.4 - Probe Data Management](#probe-data-management) - [7.4.1 - PDM Broadcast Request Quick Start Guide](#pdm-broadcast-request-quick-start-guide) - [7.5 - Outbound TIM Broadcast](#outbound-tim-broadcast) - - [7.5.1 - Outbound TIM to SDW Websocket Setup](#outbound-tim-to-sdw-websocket-setup) + - [7.5.1 - Outbound TIM to SDX Setup](#outbound-tim-to-sdx-setup) - [7.5.2 - Outbound TIM to S3 Bucket Setup](#outbound-tim-to-s3-bucket-setup) - [7.5.3 - TIM Broadcast Request Quick Start Guide](#tim-broadcast-request-quick-start-guide) - [7.6 - Privacy Protection Module (PPM)](#privacy-protection-module-ppm) @@ -67,7 +67,7 @@ _Last updated February 7, 2019_ - [8.3.2 - ODE Output Schema Reference](#ode-output-schema-reference) - [9 - References](#references) - + # Version History @@ -91,7 +91,7 @@ _Last updated February 7, 2019_ | 0.16 | ODE Team | 2/4/2019 | Removed deprecated properties. Added ode.kafkaDisabledTopics | | 0.17 | ODE Team | 2/6/2019 | Added SDW depositor submodule instructions. Removed deprecated properties and capabilities (VSD deposit to SDC). | - + # 1 - Introduction @@ -113,7 +113,7 @@ in the project's GitHub repository GitHub account to create an issue. If you don't have an account, a dialog will be presented to you to create one at no cost. - + # 2 - Project Overview @@ -130,13 +130,13 @@ brokering, processing and routing data from various data sources, including connected vehicles, field devices, Transportation Management Center (TMC) applications and a variety of other data users. Data users include but not limited to transportation software applications, -Research Data Exchange (RDE), US DOT Situation Data Warehouse. +Research Data Exchange (RDE), and the [Situational Data Exchange (SDX)](https://sdx.trihydro.com/). As a data provisioning service, the ODE can provision data from disparate data sources to software applications that have placed data subscription requests to the ODE. On the other direction, the ODE can accept data from CV applications and broadcast them to field devices -through Road Side Units (RSU) and US DOT Situation Data Warehouse which +through Road Side Units (RSU)s and the Situational Data Exchange (SDX) which in turn will transmit the data to Sirius XM satellites for delivery to the connected vehicles in the field. @@ -153,11 +153,11 @@ validation and sanitization. compromise the privacy of the individual(s) that might be linked to the data. - + # 3 - System Overview -JPO ODE is an open-sourced software application that will enable the +JPO ODE is an open-source software application that will enable the transfer of data between field devices and backend TMC systems for operational, monitoring, and research purposes. The system will enable applications to submit data through a variety standard interfaces as @@ -175,15 +175,15 @@ capabilities to the JPO-ODE product owner._ ![](images/userguide/figure1.png) -Figure 1 - ODE System Data Producers and Consumers +_Figure 1 - ODE System Data Producers and Consumers_ - + # 4 - Audience -This document is intended for use by the ODE client applications. +This document is intended for use by the ODE client applications that will be interfacing with the ODE. - + # 5 - Glossary @@ -201,17 +201,17 @@ This document is intended for use by the ODE client applications. | SAE | SAE International is a global association of more than 128,000 engineers and related technical experts in the aerospace, automotive and commercial-vehicle industries. | | J2735 | This SAE Standard specifies a message set, and its data frames and data elements specifically for use by applications intended to utilize the 5.9 GHz Dedicated Short Range Communications for Wireless Access in Vehicular Environments (DSRC/WAVE, referenced in this document simply as “DSRC”), communications systems. (SAE International 2016) | | SCP | Secure Copy | -| SDW | Situation Data Warehouse | +| SDX | Situational Data Exchange | +| SDW | Situational Data Warehouse (the old name for the SDX) | | TIM | Traveler Information Message | | US DOT | Unites States Department of Transportation | | WebSocket | WebSocket is designed to be implemented in web browsers and web servers, but it can be used by any client or server application. The WebSocket Protocol is an independent TCP-based protocol. Its only relationship to HTTP is that its handshake is interpreted by HTTP servers as an Upgrade request. | -| ZooKeeper | Apache ZooKeeper is a centralized service for maintaining configuration information, naming, providing distributed synchronization, and providing group services. | - + # 6 - ODE Development Environment - + ### 6.1 - Java Development Tools @@ -225,15 +225,15 @@ Tools: - Maven - GitHub: - + ### 6.2 - Java -Install Java Development Kit (JDK) 1.8 +Install Java Development Kit (JDK) 21 - + - + ### 6.3 - Eclipse IDE @@ -241,11 +241,11 @@ Download and install Eclipse. -Configure Eclipse to use Java 1.8 JDK. Local installation of Tomcat can +Configure Eclipse to use Java 21 JDK. Local installation of Tomcat can integrate with Eclipse and can help with prototyping or debugging the application. - + ### 6.4 - Maven @@ -256,7 +256,7 @@ pre-installed with a Maven plug-in. Download and install Maven: - + ### 6.5 - Git Version Control @@ -274,7 +274,7 @@ It is recommended that GIT plug-ins are installed with your IDE so that your IDE is Git "aware". Newer versions of eclipse (Luna and later versions) comes pre-installed with a Git plug-in. - + ### 6.6 - Building ODE Software Artifacts @@ -288,12 +288,14 @@ be found in the jpo-ode/README.md document: | jpo-s3-deposit | public | S3 depositor service | | | jpo-cvdp | public | PII sanitization module | | | asn1\_codec | public | ASN.1 encoder/decoder | | +| jpo-sdw-depositor | public | SDX depositor service | | +| jpo-security-svcs | public | Security services module | | - + #### 6.6.1 - Open-Source Repository -The ODE deployment artifact consists of one of more jar files that make +The ODE deployment artifact consists of one or more jar files that make up the collection of software modules and service components. Initially, there will be only one executable jar file (one micros service) but in the future as the ODE functionality expands it is envisioned that @@ -323,29 +325,29 @@ The following components make up the JPO ODE software: ODE and other applications. This module will replace the private repository jpo-ode-private. - + #### 6.6.2 - ASN.1 Java API The data uploaded or deposited to the ODE from the connected vehicles -(CV) and the road-side units (RSU) is encoded in ASN.1 format. In order +(CV) and the road-side units (RSU)s is encoded in ASN.1 format. In order for the ODE to utilize the data, it must be able to decode the data from ASN.1 format into a more generic format, in this case Plain Old Java -Objects (POJOs). ODE utilizes an open-source ASN.1 codec library -provided on GitHub at . ODE team has +Objects (POJOs). ODE utilizes a fork of an open-source ASN.1 codec library +provided on GitHub at . ODE team has built a standalone C/C++ module that uses this library to perform all required encoding and decoding needs of the application. The module is a submodule of ODE, also provided on GitHub: - + #### 6.6.3 - Build and Deploy Procedure -Follow the steps in jpo-ode/README.md Getting Started guide for building +Follow the steps in jpo-ode/README.md [Installation](../README.md#3-installation) section for building and deploying the JPO-ODE services. - + #### 6.6.4 - ODE Application Properties @@ -368,7 +370,7 @@ For example, add ``` 3. _You may_ specify properties as system environment variables in the - form of _ode.DdsCasUsername=fred.flintstone\@stone.age._ + form of _ode.sdwApiKey=12345_ Other properties not specific to the ODE can also be defined in a similar way but without the _ode_ prefix. @@ -389,10 +391,6 @@ _Table 1 - ODE Application Properties_ | ode.uploadLocationObuLog | ./uploads/bsmlog | | Specific location for OBU log files with header fields to specify direction, UTC timestamp, and other metadata | | ode.pluginsLocations | ./plugins | | Location of the jar files for ODE plugins. | | ode.kafkaProducerType | async | | Specifies whether publishing to Kafka will be synchronous (i.e. blocking until the data has been persisted) or asynchronous (i.e. publish and forget). Valid values are: sync or async. Sync will generally be slower but more reliable, async is faster with the risk of losing data if kafka crashes during the write operation. | -| ode.ddsCasUsername | null | X | Username to be used for authentication when interfacing with Situation Data Warehouse | -| ode.ddsCasPassword | null | X | Password to be used for authentication when interfacing with Situation Data Warehouse (SDW) | -| ode.ddsCasUrl | | | URL of the US DOT security server. | -| ode.ddsWebsocketUrl | wss://webapp.cvmvp.com/whtools/websocket | | URL of the US DOT SDW WebSockets API | | ode.sdcIp | 104.130.170.234 | | IPv4 address of SDC | | ode.sdcPort | 46753 | | Destination port of SDC | | ode.bsmReceiverPort | 46800 | | The UDP port that ODE will use to listen to BSM messages. | @@ -420,7 +418,7 @@ _Table 1 - ODE Application Properties_ | ode.rsuPassword | null | If not present in JSON | The SNMP password used to authenticate with an RSU when depositing, deleting, or querying TIMs. | | ode.kafkaTopicsDisabled | topic.OdeBsmRxPojo, topic.OdeBsmTxPojo, topic.OdeBsmDuringEventPojo,topic.OdeTimBroadcastPojo | | List of topics to be disabled from publishing. | - + #### 6.6.5 - ODE Logging Properties @@ -439,7 +437,7 @@ source _src/main/resources/logback.xml_ file before building the software or place a different _logback.xml_ file with the modified values in the working directory of the application. - + # 7 - ODE Features @@ -468,7 +466,7 @@ applications: 11. Data Sanitization - + ### 7.1 - Managing SNMP Devices @@ -477,7 +475,7 @@ existing Road Side Unit to ensure the system is up and running. To trigger a specific heartbeat call, the ODE provides two separate interfaces to deploy a message to an RSU. - + ##### 7.1.1 - Query Parameters @@ -493,7 +491,7 @@ _The OIDs for the RSUs are specified in the DSRC Roadside Unit (RSU) Specifications Document v4.1. The units also respond to ISO standard OIDs, as demonstrated in the screenshot below._ - + ##### 7.1.2 - API Details @@ -510,7 +508,7 @@ was last powered on) \[1.3.6.1.2.1.1.3.0 = 0:05:12.59\] - + #### 7.1.3 - Web Based View @@ -519,7 +517,7 @@ through the existing web interface located at the root of the application. On it, a user will see a section for RSU SNMP Query and may enter in the same IP and OID information as the API Endpoint. - + #### 7.1.4 - Additional Features/ Discussion Points @@ -530,13 +528,13 @@ enter in the same IP and OID information as the API Endpoint. - Should the responses from the application be in a standard format? (JSON) - + ### 7.2 - Logging Events ODE uses Logback logging framework to log application and data events. - + #### 7.2.1 - Log Levels @@ -558,14 +556,14 @@ ODE uses Logback logging framework to log application and data events. 8. WARN - Logger reports application warnings - + #### 7.2.2 - Logging setup - As it stands, the current logging framework has two separate log files. The first log file is for application output called ode.log. Application debug information and backend service messages are - output to this file. The second log file, Events.log contains + output to this file. The second log file, Events.log, contains informational messages pertaining to the services a message goes through inside of the system. @@ -575,11 +573,11 @@ ODE uses Logback logging framework to log application and data events. deletion, and rolling archive file naming. For the full list of features visit this URL: - + #### 7.2.3 - Steps to turn on/off logging during application runtime. -1. Start ode, Kafka, and Zookeeper as normal. +1. Start ode and Kafka as normal. 2. In a new terminal window run \"jconsole\". @@ -611,7 +609,7 @@ ODE uses Logback logging framework to log application and data events. 13. Save the file and go back to the jconsole and click the button reloadbyfilename to submit changes. - + ### 7.3 Inbound Data Distribution @@ -631,7 +629,7 @@ as POJO). ODE uses Kryo serializer for serializing POJOs before publishing. See section 8.3.1 for the topic names to which applications can subscribe. - + #### 7.3.1 - Inbound BSM Log File Processing and Distribution @@ -648,7 +646,7 @@ can subscribe. 1. Received messages (purge third) 1. Received BSMs from nearby OBUs are logged and deposited to the ODE via the file copy interface. - + #### 7.3.2 - Inbound TIM Log File Processing and Distribution @@ -663,7 +661,7 @@ can subscribe. 1. We have a log for driver's alerts, it will need to flag alerts that were not given because of a higher priority alert (purge ninth) 1. Location, time, alert (FCW, TIM, not DNM) - + #### 7.3.3 Inbound Other Log File Processing and Distribution @@ -686,13 +684,13 @@ can subscribe. 1. Log success/fail of firmware updates 2. Log availability of firmware updates - + #### 7.3.4 - Inbound BSM - Text File Processing (HEX and JSON) HEX and JSON file processing is no longer supported - + ### 7.4 - Probe Data Management @@ -701,7 +699,7 @@ PDM messages via the REST API interface. The ODE accepts data elements in JSON which are then sent via SNMP to an array of Roadside Units (RSUs) which are also specified in that same JSON string. - + #### 7.4.1 - PDM Broadcast Request Quick Start Guide @@ -735,7 +733,7 @@ instructions. } ``` - + ### 7.5 - Outbound TIM Broadcast @@ -745,100 +743,57 @@ in JSON format from which a fully formed ASN.1 compliant J2735 TravelerInformation message will be constructed and sent to an array of RSUs. The RSUs must be specified in the TIM broadcast message received by the ODE. In addition to the RSU devices, the TIM message is also -deposited to the US DOT Situation Data Warehouse (SDW) from which the +deposited to the Situational Data Exchange (SDX) from which the SiriusXM satellites will pull from and broadcast to vehicles that are -not within range of RSUs. SDW parameters are also specified in the TIM +not within range of RSUs. SDX parameters are also specified in the TIM REST interface. Please refer to the Swagger file documentation for details of a TIM REST interface. - + -#### 7.5.1 Outbound TIM to SDW Setup +#### 7.5.1 Outbound TIM to SDX Setup -Traveler Information Messages may be distributed to RSUs, the SDW, or both by including certain objects in the JSON message sent to the `/tim` endpoint: +Traveler Information Messages may be distributed to RSUs, the SDX, or both by including certain objects in the JSON message sent to the `/tim` endpoint: - **RSU Distribution**: The /tim REST service will send the TIM messages to RSUs if both "rsus" and "snmp" elements of the request body are defined and valid. If either "rsus" or "snmp" are missing, the request will not be sent to the RSUs. -- **SDW Enablement**: /tim REST service sends the TIM messages to SDW +- **SDX Enablement**: /tim REST service sends the TIM messages to SDX if the "sdw" element of the request body is defined and valid. If - "sdw" element is missing, the request will not be sent to the SDW. + "sdw" element is missing, the request will not be sent to the SDX. -**Option 1: Websocket Interface** +**SDW Depositor Submodule** -ODE **Configuration**: Update the effective application.properties file -with username and password for Webapp2/sdw. Substitute your username and -password for `` and ``, respectively. - -```bash -ode.ddsCasUsername= -ode.ddsCasPassword= -ode.depositSdwMessagesOverWebsocket=true -``` - -(OR) - -Define the following command line arguments while launching the ODE through the jpo-ode-svcs JAR: - -```bash ---ode.ddsCasUsername=, ---ode.ddsCasPassword=, ---ode.depositSdwMessagesOverWebsocket=true -``` - -(OR) - -Define the following environment variables in the environment file: - -```bash -SDW_USERNAME= -SDWPASSWORD= -ODE_DEPOSIT_SDW_MESSAGES_OVER_WEBSOCKET=true -``` - -**Note**: This option uses the ODE's built-in SDW depositor and does not -require a SDW service to be running. Therefore, jpo-sdw-depositor -service should be removed from docker-compose.yml. - -**Option 2 (Recommended): SDW Depositor Submodule** - -Depositing a TIM message to the Situation Data Warehouse can be done +Depositing a TIM message to the Situational Data Exchange can be done using the pre-built jpo-sdw-depositor repository. To set this service up: 1. Follow the steps in the ODE README.md to clone and compile the SDW depositor service. If you used the `--recurse-submodules` option to clone, it will automatically be cloned. -2. Set the following environment variable to false OR comment it out using the \# symbol: - -``` -ode.depositSdwMessagesOverWebsocket=false -#ode.depositSdwMessagesOverWebsocket -``` - -3. Set the following environment variables in the _.env_ file: +2. Set the following environment variables in the _.env_ file: ```bash SDW_API_KEY=myApiKey ``` -4. Follow the rest of the ODE setup steps. The SDW depositor service +3. Follow the rest of the ODE setup steps. The SDW depositor service containers will be automatically created by docker-compose. -5. Verify arrival of messages in SDW by verifying response status +4. Verify arrival of messages in SDX by verifying response status messages in the logs. - + #### 7.5.2 - Outbound TIM to S3 Bucket Setup Depositing a TIM message to an S3 bucket can be done using the pre-built -jpo-s3-depositor repository. To set this service up: +jpo-s3-deposit repository. To set this service up: 1. Follow the steps in the ODE README.md to clone and compile the S3 - depositor service. + deposit service. 2. Set the following environment variables (and/or use the RDE prefixed variables, these prefixes are for guidance only and do not @@ -854,50 +809,34 @@ jpo-s3-depositor repository. To set this service up: - CVPEP\_TIM\_S3\_TOPIC -3. Follow the rest of the ODE setup steps. The S3 depositor service +3. Follow the rest of the ODE setup steps. The S3 deposit service containers will be automatically created by docker-compose. 4. Verify arrival of messages in S3 by visiting the AWS UI or an S3 client application. - + #### 7.5.3 - TIM Broadcast Request Quick Start Guide To run a local test of the TIM Message API, please follow these instructions: -1. Start the ODE with valid ode.ddsCasUsername and ode.ddsCasPassword - in the effective application.properties file. - -2. Reference the Swagger documentation located in the /docs folder of +1. Reference the Swagger documentation located in the /docs folder of the repo or at to view the specifications for the API call. -3. Copy the curl command, run the python script, or use a web based +2. Copy the curl command, run the python script, or use a web based REST tool such as Postman to send the TIM broadcast request to the ODE. Make sure the REST request body contains the "snmp" and "rsus" elements with valid IP addresses of the RSUs that you intend to send - the message to as well as the required SDW parameters. + the message to as well as the required SDX parameters. -4. The REST interface will return a response indicating the deposit +3. The REST interface will return a response indicating the deposit success ("success":"true") or failure ("success":"false") for each - RSU and the SDW deposit: - -```json -{ - "rsu_responses": [{ - "target": "192.168.1.100", - "success": "true", - "message": "Success." - }], - "dds_deposit": { - "success": "true" - } -} -``` + RSU and the SDX deposit: - + ### 7.6 Privacy Protection Module (PPM) @@ -909,24 +848,24 @@ details. For instructions about configuration and integration of the PPM with ODE, please refer to the ODE README file at the root of the GitHub page . - + ### 7.7 - Data validation TBD - + ### 7.8 - String S3 Depositor The ODE has the capability to deposit any string messages to any S3 -buckets using the application in the jpo-s3-depositor repository. To +buckets using the application in the jpo-s3-deposit repository. To obtain and build this service, follow the instructions in the ODE README.md document. Once downloaded and compiled, all the user must do is set the relevant environment variables, the rest is managed automatically by docker-compose. -Four example S3 depositor configurations are provided in the +Four example S3 deposit configurations are provided in the docker-compose.yml file in the root of the jpo-ode directory, a BSM and TIM depositor for both CVPEP and RDE: cvpep\_bsm\_s3dep, rde\_bsm\_s3dep, cvpep\_tim\_s3dep, and rde\_tim\_s3dep. These example @@ -934,27 +873,25 @@ templates are provided for convenience and guidance but may be removed/commented out by adding a \# symbol to the front of each line, or copied to create new a new S3 depositor. - + ### 7.9 - Security Services Module ODE integrates with the [jpo-security-svcs](https://github.com/usdot-jpo-ode/jpo-security-svcs) -(JSS) module for performing message signing, verification, encryption -and decryption. ODE sends TIM messages to JSS module to be signed before -broadcasting the message to RSUs and SDW. No new configuration +(SEC) module for performing message signing, verification, encryption +and decryption. ODE sends TIM messages to SEC module to be signed before +broadcasting the message to RSUs and SDX. No new configuration properties need to be set if the module and ODE run in Docker containers -on the same server. However, if they are running o different host +on the same server. However, if they are running on different host machines the property _ode.securitySvcsSignatureUri_ must be set to -point to the JSS domain name or IP:Port number. The JSS module must, +point to the SEC domain name or IP:Port number. The SEC module must, however, be configured with the DNS name or IP:Port of the Green Hills HSM security service URI. This property can be defined using the environment variable _SEC\_CRYPTO\_SERVICE\_BASE\_URI_. It must be set -to [http://ip:port](http://ip:port) of the Green Hills appliance. If you -do not want to sign the data set -_SEC\_CRYPTO\_SERVICE\_BASE\_URI=UNSECURED_ +to [http://ip:port](http://ip:port) of the Green Hills appliance. By default, the ODE will not sign TIMs that are delivered to RSUs. This can be changed by setting the value of the DATA_SIGNING_ENABLED_RSU environment variable found in the provided sample.env file. Additionally, signing of SDX-delivery TIMs can be configured by setting the value of the environment variable DATA_SIGNING_ENABLED_SDW found in sample.env. - + # 8 - Appendix A: ODE Interface Specification @@ -976,7 +913,7 @@ application) to send and receive data to and from the ODE All of the above interfaces can be secured using SSL encryption. - + ### 8.1 - File Copy Data Deposit @@ -1026,7 +963,7 @@ field of the output messages if and only if the payload is not signed with a valid signature. If the payload contains a valid 1609.2 signature, the generationTime from 1609.2 header will be used. - + #### 8.1.1 - Messages and Alerts @@ -1075,7 +1012,7 @@ Table 2 - File Copy Data Deposit Messages and Alerts | “IMPORTER - Failed to open or process file: {}” SecurityException | Application log file | When a data file is copied into one of the ODE upload folders, ODE will try to open the file and process its content. This error message is logged when ODE fails to read the file due to lack of Java security privileges. | If a security manager exists and its checkRead method denies read access to the file, a message will be logged to the application log file.” | | “IMPORTER - Failed to open or process file: {}” Error decoding data. | Application log file | When a data file is copied into one of the ODE upload folders, ODE will try to open the file and process its content. This error message is logged when ODE fails to decode the data from ASN.1 format. | If the message is not encoded to the expected ASN.1 encoding, ODE will raise this error to indicate failure to decode the data. | - + ### 8.2 - ODE REST API @@ -1097,7 +1034,7 @@ the repository at docs/ODESwagger.yml. Figure 3 - ODE REST API Editor Tool - + #### 8.2.1 - Upload BSM File @@ -1105,7 +1042,7 @@ ODE provides a REST API interface to upload a file to the ODE. Refer to [ODE REST API](https://usdot-jpo-ode.github.io/) online documentation () for details. - + #### 8.2.2 - Traveler Information Message (TIM) Interface @@ -1113,7 +1050,7 @@ Refer to the [ODESwagger.yaml](https://github.com/usdot-jpo-ode/jpo-ode/blob/develop/docs/ODESwagger.yaml) for details of the TIM interface. - + #### 8.2.3 - Probe Data Management Messages (PDM) Interface @@ -1121,7 +1058,7 @@ Refer to the [ODESwagger.yaml](https://github.com/usdot-jpo-ode/jpo-ode/blob/develop/docs/ODESwagger.yaml) for details of the PDM interface. - + ### 8.3 - ODE Streaming API @@ -1136,7 +1073,7 @@ two distinct but dependent interfaces. ( ). See section 7.3.2 for details. - + #### 8.3.1 - Direct Kafka Interface @@ -1162,7 +1099,7 @@ For a complete list and description of ODE publish/subscribe topics, refer to [ODE Output Schema Reference Document](#references). (Booz Allen Hamilton 2018) - + #### 8.3.2 - ODE Output Schema Reference @@ -1170,7 +1107,7 @@ Full details of ODE streaming interface schemas are provided in the [ODE Output Schema Reference Document](#references). (Booz Allen Hamilton 2018) - + # 9 - References diff --git a/docs/WYDOT.md b/docs/WYDOT.md index 981ab021c..3bdd273be 100644 --- a/docs/WYDOT.md +++ b/docs/WYDOT.md @@ -11,7 +11,7 @@ The current project goals for the ODE have been developed specifically for the u - **Collect CV Data:** Connected vehicle data from field may be collected from vehicle OBUs directly or through RSUs. Data collected include Basic Safety Messages Part I and Part 2, Event Logs and other probe data (weather sensors, etc.). These messages are ingested into the operational data environment (ODE) where the data is then further channeled to other subsystems. - **Support Data Brokerage:** The WYDOT Data Broker is a sub-system that is responsible for interfacing with various WYDOT Transportation Management Center (TMC) systems gathering information on current traffic conditions, incidents, construction, operator actions and road conditions. The data broker then distributes information from PikAlert, the ODE and the WYDOT interfaces based on business rules. The data broker develops a traveler information message (TIM) for segments on I-80, and provide event or condition information back to the WYDOT interfaces -- **Distribute traveler information messages (TIM):** The data broker distributes the TIM message to the operational data environment (ODE) which will then communicate the message back to the OBUs, RSUs and the situational data warehouse (SDW) +- **Distribute traveler information messages (TIM):** The data broker distributes the TIM message to the operational data environment (ODE) which will then communicate the message back to the OBUs, RSUs and the situational data exchange (SDX) - **Store data:** Data generated by the system (both from the field and the back-office sub-systems) are stored in the WYDOT data warehouse. diff --git a/docs/compatibility.md b/docs/compatibility.md new file mode 100644 index 000000000..451264e6e --- /dev/null +++ b/docs/compatibility.md @@ -0,0 +1,13 @@ +# Submodule Compatibility Guide +This table serves as a guide, suggesting which versions of individual submodules are best suited to accompany each version of the main module. It helps users ensure compatibility and smooth integration by recommending specific submodule versions for their chosen main module version. + +| [ODE (this project)](https://github.com/usdot-jpo-ode/jpo-ode/releases) | [ACM](https://github.com/usdot-jpo-ode/asn1_codec/releases) | [PPM](https://github.com/usdot-jpo-ode/jpo-cvdp/releases) | [SEC](https://github.com/usdot-jpo-ode/jpo-security-svcs/releases) | [SDWD](https://github.com/usdot-jpo-ode/jpo-sdw-depositor/releases) | [S3D](https://github.com/usdot-jpo-ode/jpo-s3-deposit/releases) | [GJConverter](https://github.com/usdot-jpo-ode/jpo-geojsonconverter/releases) | [CMonitor](https://github.com/usdot-jpo-ode/jpo-conflictmonitor/releases) | [CVisualizer](https://github.com/usdot-jpo-ode/jpo-conflictvisualizer/releases) | [CVManager](https://github.com/usdot-jpo-ode/jpo-cvmanager/releases) | +| ----------------- | --- | --- | --- | ---- | --- | ----------- | -------- | ----------- | ----------- | +| 2.1.0 | 2.1.0 | 1.3.0 | 1.4.0 | 1.7.0 | 1.5.0 | 1.3.0 | 1.3.0 | 1.3.0 | 1.3.0 | +| 2.0.x | 2.0.0 | 1.3.0 | 1.4.0 | 1.6.0 | 1.4.0 | 1.2.0 | 1.2.0 | 1.2.0 | 1.2.0 | +| 1.5.1 | 1.5.0 | 1.2.0 | 1.3.0 | 1.5.0 | 1.3.0 | 1.1.0 | 1.1.0 | 1.1.0 | 1.1.0 | +| 1.4.1 | 1.4.1 | 1.1.1 | 1.2.1 | 1.4.1 | 1.2.1 | 1.0.0 | 1.0.1 | 1.0.1 | 1.0.1 | +| 1.4.0 | 1.4.0 | 1.1.0 | 1.2.0 | 1.4.0 | 1.2.0 | N/A | N/A | N/A | N/A | +| 1.3.0 | 1.3.0 | 1.0.0 | 1.0.1 | 1.3.0 | 1.1.0 | N/A | N/A | N/A | N/A | + +For example, if you're using ODE version 2.0.1, it's recommended to use ACM 2.0.0, PPM 1.3.0, SEC 1.4.0, SDWD 1.6.0, S3D 1.4.0, GJConverter 1.2.0, CMonitor 1.2.0, CVisualizer 1.2.0, and CVManager 1.2.0. While other combinations may work, these versions are suggested for the best compatibility. \ No newline at end of file diff --git a/docs/contributing_guide.md b/docs/contributing_guide.md index a2cf60505..d5dfcd55f 100644 --- a/docs/contributing_guide.md +++ b/docs/contributing_guide.md @@ -17,13 +17,11 @@ By contributing to the US Department of Transportation Joint Program office (JPO [JPO-ODE GitHub page](https://github.com/usdot-jpo-ode/jpo-ode) -[Confluence Wiki](https://usdotjpoode.atlassian.net/wiki/) - [ODE Architecture](https://github.com/usdot-jpo-ode/jpo-ode/blob/develop/docs/JPO%20ODE%20Architecture.docx) [ODE User Guide](https://github.com/usdot-jpo-ode/jpo-ode/blob/develop/docs/JPO_ODE_UserGuide.docx) -[ODE REST API Guide](usdot-jpo-ode.github.io) +[ODE REST API Guide](https://usdot-jpo-ode.github.io) [ODE Smoke Tests](https://github.com/usdot-jpo-ode/jpo-ode/wiki/JPO-ODE-QA-Documents) @@ -50,9 +48,20 @@ All pull requests will be reviewed by the JPO-ODE team. The team member will eit 5. Update the documentation. - User QA procedures are documented within the Github Wiki - Architecture and user guide documentation should be included in the word document under the `docs/` folder - - Please contact the ODE with qny questions + - Please contact the ODE team with any questions 6. Format your code as outlined in the style guide +## Release Processes +### Quarterly Release +At the end of each quarter, a 'release/version' branch will be created from the develop branch for all release changes. This branch should be used to stabilize the code and prepare for a release. Any non-release changes continue to be pushed to the develop branch as part of the ongoing development process. Validation testing will be conducted on the release branch to ensure that the code is stable and ready for release. If hotfixes are necessary during validation, a hotfix branch is created from the release branch. Once the release branch is stable, it will be merged into the master branch and tagged with the release version. To conclude the release process, changes from the master branch are merged back into the develop branch. + +For more information on the quarterly release process, please refer to the [Release Process](./release_process.md#quarterly-release-process) documentation. + +### Standalone Hotfix Release +If a critical bug or issue is found in the master branch, a hotfix branch should be created from the master branch. The hotfix branch should be used to fix the critical bug or issue. Once the hotfix branch is stable, it can be merged into the master branch and tagged with the release version. The master branch will then be merged back into the develop branch to ensure that the develop branch is up to date with the latest changes. + +For more information on the standalone hotfix release process, please refer to the [Release Process](./release_process.md#standalone-hotfix-release-process) documentation. + ## Contributor Covenant Code of Conduct ### Our Pledge In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. diff --git a/docs/data-flow-diagrams/ODE Data Flow Overview.drawio b/docs/data-flow-diagrams/ODE Data Flow Overview.drawio index 37725264d..1209b5cac 100644 --- a/docs/data-flow-diagrams/ODE Data Flow Overview.drawio +++ b/docs/data-flow-diagrams/ODE Data Flow Overview.drawio @@ -1,6 +1,6 @@ - + - + @@ -45,13 +45,13 @@ - + - + @@ -110,7 +110,7 @@ - + @@ -140,7 +140,7 @@ - + @@ -170,7 +170,7 @@ - + @@ -203,13 +203,13 @@ - + - + @@ -224,43 +224,40 @@ - + - - - - + - - - - + - + - + - + - + + + + - + - + @@ -306,7 +303,7 @@ - + @@ -323,6 +320,12 @@ + + + + + + diff --git a/docs/data-flow-diagrams/ODE Data Flow Overview.drawio.png b/docs/data-flow-diagrams/ODE Data Flow Overview.drawio.png index 915576aaf..4e4335470 100644 Binary files a/docs/data-flow-diagrams/ODE Data Flow Overview.drawio.png and b/docs/data-flow-diagrams/ODE Data Flow Overview.drawio.png differ diff --git a/docs/data-flow-diagrams/README.md b/docs/data-flow-diagrams/README.md index 6be5d3549..ce5f4aadc 100644 --- a/docs/data-flow-diagrams/README.md +++ b/docs/data-flow-diagrams/README.md @@ -13,28 +13,27 @@ The purpose of these diagrams is to show: ## Data Flow Explanations ### Overview Data Flow 1 (Tim Depositor Controller) -1. Messages come in through the receiver classes and are pushed to the Raw Encoded Messages group of topics. -1. The AsnCodecMessageServiceController pulls these raw encoded messages and passes them over to the Asn1Decode classes. -1. These classes push the message to the Asn1DecoderInput topic. -1. The [ACM](https://github.com/usdot-jpo-ode/asn1_codec) pulls from that topic and pushes decoded messages to the Asn1DecoderOutput topic. -1. The Asn1DecodeDataRouter class pulls from the Asn1DecodeOutput topic and deposits messages into the Pojo Messages group of topics and the Json Messages group of topics. -1. The [PPM](https://github.com/usdot-jpo-ode/jpo-cvdp) pulls from the Json Messages group of topics and pushes filtered messages to the Filtered Json Messages group of topics. -1. The [GeoJSON Converter](https://github.com/usdot-jpo-ode/jpo-geojsonconverter) pulls from the Json Messages group of topics, converts the messages and pushes them to the Processed Spat/Map group of topics. -1. The [Conflict Monitor](https://github.com/usdot-jpo-ode/jpo-conflictmonitor) pulls from the Processed Map/Spat group of topics and pushes to the [Conflict Monitor](https://github.com/usdot-jpo-ode/jpo-conflictmonitor) Output Topics group. - -### Overview Data Flow 2 (Receiver Classes) 1. Messages come in through the TimDepositorController class and are pushed to the Broadcast Messages and Json Messages groups of topics, as well as the AsnEncoderInput topic. 1. The [ACM](https://github.com/usdot-jpo-ode/asn1_codec) pulls from the Asn1EncoderInput and pushes encoded messages to the Asn1EncoderOutput topic. 1. The AsnEncodedDataRouter class pulls from the Asn1EncoderOutput topic and pushes it to the AsnCommandManager class. 1. If the message is not signed, it is sent to the SignatureController class to be signed. 1. If the message is signed and meant for the RSU, it will be passed to the RsuDepositor class which sends the message to the RSUs. -1. If the message is signed and not meant for the RSU, it is meant for the SDX. -1. If the message has not been double-encoded, yet, it will be sent back to the Asn1EncoderInput topic for encoding. -1. If the message has been double-encoded, it will be passed to the SDWDepositorInput, pulled into the [SDWD](https://github.com/usdot-jpo-ode/jpo-sdw-depositor) and sent to the SDX. +1. If the message is signed, is meant for the SDX and the message has not been double-encoded, yet, it will be sent back to the Asn1EncoderInput topic for encoding. +1. If the message is signed, is meant for the SDX and the message has been double-encoded, it will be passed to the SDWDepositorInput, pulled into the [SDWD](https://github.com/usdot-jpo-ode/jpo-sdw-depositor) and sent to the SDX. 1. The [PPM](https://github.com/usdot-jpo-ode/jpo-cvdp) pulls from the Json Messages group of topics and sends filtered messages to the Filtered Json Messages group of topics. 1. The [GeoJSON Converter](https://github.com/usdot-jpo-ode/jpo-geojsonconverter) pulls from the Json Messages group of topics, converts the messages and pushes them to the Processed Spat/Map group of topics. 1. The [Conflict Monitor](https://github.com/usdot-jpo-ode/jpo-conflictmonitor) pulls from the Processed Map/Spat group of topics and pushes to the [Conflict Monitor](https://github.com/usdot-jpo-ode/jpo-conflictmonitor) Output Topics group. +### Overview Data Flow 2 (Receiver Classes) +1. Messages come in through the receiver classes and are pushed to the Raw Encoded Messages group of topics. +1. The AsnCodecMessageServiceController pulls these raw encoded messages and passes them over to the Asn1Decode classes. +1. These classes push the message to the Asn1DecoderInput topic. +1. The [ACM](https://github.com/usdot-jpo-ode/asn1_codec) pulls from that topic and pushes decoded messages to the Asn1DecoderOutput topic. +1. The Asn1DecodeDataRouter class pulls from the Asn1DecodeOutput topic and deposits messages into the Pojo Messages group of topics and the Json Messages group of topics. +1. The [PPM](https://github.com/usdot-jpo-ode/jpo-cvdp) pulls from the Json Messages group of topics and pushes filtered messages to the Filtered Json Messages group of topics. +1. The [GeoJSON Converter](https://github.com/usdot-jpo-ode/jpo-geojsonconverter) pulls from the Json Messages group of topics, converts the messages and pushes them to the Processed Spat/Map group of topics. +1. The [Conflict Monitor](https://github.com/usdot-jpo-ode/jpo-conflictmonitor) pulls from the Processed Map/Spat group of topics and pushes to the [Conflict Monitor](https://github.com/usdot-jpo-ode/jpo-conflictmonitor) Output Topics group. + ### Overview Data Flow 3 (Offloaded Files) 1. Messages are offloaded onto a directory referenced by the FileUploadController class. 1. The FileUploadController class indirectly invokes the LogFileToAsn1CodecPublisher class, which handles the offloaded messages. @@ -75,9 +74,8 @@ The purpose of these diagrams is to show: 1. The Asn1EncodedDataRouter class pulls from the Asn1EncoderOutput topic and passes the TIM to the Asn1CommandManager class. 1. If the message is not signed, it is sent to the SignatureController class to be signed. 1. If the message is signed and meant for the RSU, it will be passed to the RsuDepositor class which sends the message to the RSUs. -1. If the message is signed and not meant for the RSU, it is meant for the SDX. -1. If the message has not been double-encoded, yet, it will be sent back to the Asn1EncoderInput topic for encoding. -1. If the message has been double-encoded, it will be passed to the SDWDepositorInput, pulled into the [SDWD](https://github.com/usdot-jpo-ode/jpo-sdw-depositor) and sent to the SDX. +1. If the message is signed, is meant for the SDX and the message has not been double-encoded, yet, it will be sent back to the Asn1EncoderInput topic for encoding. +1. If the message is signed, is meant for the SDX and the message has been double-encoded, it will be passed to the SDWDepositorInput, pulled into the [SDWD](https://github.com/usdot-jpo-ode/jpo-sdw-depositor) and sent to the SDX. 1. The [PPM](https://github.com/usdot-jpo-ode/jpo-cvdp) pulls from the OdeTimJson topic, filters the TIM and pushes it to the FilteredOdeTimJson topic. ### TIM Data Flow 2 (Receiver Classes) diff --git a/docs/data-flow-diagrams/tim/TIM Data Flow.drawio b/docs/data-flow-diagrams/tim/TIM Data Flow.drawio index 15e7443ac..cebd15488 100644 --- a/docs/data-flow-diagrams/tim/TIM Data Flow.drawio +++ b/docs/data-flow-diagrams/tim/TIM Data Flow.drawio @@ -1,6 +1,6 @@ - + - + @@ -52,7 +52,7 @@ - + @@ -116,8 +116,10 @@ - - + + + + @@ -199,115 +201,135 @@ - + + + + + + + + + + + + + + + + + + - - + + - + - - + + - + - - + + - + - - + + - - + + - - + + - + - - + + - + - + + + + + + + + + + - - + + - + - + + + + - - + + - - + + - - + + - - + + - - + + - + - + - - + + - - + + - - + + - + - - + + - - - - - - - - + + - + - - - - - + + - - + + diff --git a/docs/data-flow-diagrams/tim/TIM Data Flow.drawio.png b/docs/data-flow-diagrams/tim/TIM Data Flow.drawio.png index 529846eb3..a1ad71aac 100644 Binary files a/docs/data-flow-diagrams/tim/TIM Data Flow.drawio.png and b/docs/data-flow-diagrams/tim/TIM Data Flow.drawio.png differ diff --git a/docs/dockerhub.md b/docs/dockerhub.md index d8cedd07f..3a5cbb581 100644 --- a/docs/dockerhub.md +++ b/docs/dockerhub.md @@ -18,7 +18,6 @@ The image expects the following environment variables to be set: ## Direct Dependencies The ODE will fail to start up if the following containers/services are not already present: - Kafka or Confluent & related requirements -- Zookeeper (relied on by Kafka when run locally) ## Indirect Dependencies Some functionality will be unreachable without the participation of the following programs (except by directly pushing to kafka topics): @@ -28,6 +27,9 @@ Some functionality will be unreachable without the participation of the followin - jpo-sdw-depositor - jpo-s3-deposit +### Submodule Compatibility +To find the compatible submodule versions, please refer to the [Submodule Compatibility Guide](https://github.com/CDOT-CV/jpo-ode/blob/dev/docs/compatibility.md). Based on your ODE version, you can find the compatible submodule versions by looking at the corresponding row of the provided table. + ## Configuration For further configuration options, see the [GitHub repository](https://github.com/usdot-jpo-ode/jpo-ode). @@ -35,44 +37,40 @@ For further configuration options, see the [GitHub repository](https://github.co ``` version: '3' services: - zookeeper: - image: wurstmeister/zookeeper - ports: - - "2181:2181" - logging: - options: - max-size: "10m" - max-file: "5" - kafka: - image: wurstmeister/kafka + image: bitnami/kafka:latest + hostname: kafka ports: - "9092:9092" - environment: - DOCKER_HOST_IP: ${DOCKER_HOST_IP} - ZK: ${DOCKER_HOST_IP}:2181 - KAFKA_ADVERTISED_HOST_NAME: ${DOCKER_HOST_IP} - KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 - KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true" - KAFKA_CREATE_TOPICS: "topic.OdeBsmPojo:1:1,topic.OdeSpatTxPojo:1:1,topic.OdeSpatPojo:1:1,topic.OdeSpatJson:1:1,topic.FilteredOdeSpatJson:1:1,topic.OdeSpatRxJson:1:1,topic.OdeSpatRxPojo:1:1,topic.OdeBsmJson:1:1,topic.FilteredOdeBsmJson:1:1,topic.OdeTimJson:1:1,topic.OdeTimBroadcastJson:1:1,topic.J2735TimBroadcastJson:1:1,topic.OdeDriverAlertJson:1:1,topic.Asn1DecoderInput:1:1,topic.Asn1DecoderOutput:1:1,topic.Asn1EncoderInput:1:1,topic.Asn1EncoderOutput:1:1,topic.SDWDepositorInput:1:1,topic.OdeTIMCertExpirationTimeJson:1:1,topic.OdeRawEncodedBSMJson:1:1,topic.OdeRawEncodedSPATJson:1:1,topic.OdeRawEncodedTIMJson:1:1,topic.OdeRawEncodedMAPJson:1:1,topic.OdeMapTxPojo:1:1,topic.OdeMapJson:1:1,topic.OdeRawEncodedSSMJson:1:1,topic.OdeSsmPojo:1:1,topic.OdeSsmJson:1:1,topic.OdeRawEncodedSRMJson:1:1,topic.OdeSrmTxPojo:1:1,topic.OdeSrmJson:1:1" - KAFKA_DELETE_TOPIC_ENABLED: "true" - KAFKA_CLEANUP_POLICY: "delete" # delete old logs - KAFKA_LOG_RETENTION_HOURS: 2 - KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 3000 - KAFKA_RETENTION_MS: 7200000 # delete old logs after 2 hours - KAFKA_SEGMENT_MS: 7200000 # roll segment logs every 2 hours. - # This configuration controls the period of time after - # which Kafka will force the log to roll even if the segment - # file isn't full to ensure that retention can delete or compact old data. - depends_on: - - zookeeper volumes: - - ${DOCKER_SHARED_VOLUME_WINDOWS}/var/run/docker.sock:/var/run/docker.sock + - kafka:/bitnami + environment: + KAFKA_ENABLE_KRAFT: "yes" + KAFKA_CFG_PROCESS_ROLES: "broker,controller" + KAFKA_CFG_CONTROLLER_LISTENER_NAMES: "CONTROLLER" + KAFKA_CFG_LISTENERS: "PLAINTEXT://:9094,CONTROLLER://:9093,EXTERNAL://:9092" + KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: "CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,EXTERNAL:PLAINTEXT" + KAFKA_CFG_ADVERTISED_LISTENERS: "PLAINTEXT://kafka:9094,EXTERNAL://${DOCKER_HOST_IP}:9092" + KAFKA_BROKER_ID: "1" + KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: "1@kafka:9093" + ALLOW_PLAINTEXT_LISTENER: "yes" + KAFKA_CFG_NODE_ID: "1" + KAFKA_CFG_DELETE_TOPIC_ENABLE: "true" + KAFKA_CFG_LOG_RETENTION_HOURS: 2 logging: options: - max-size: "10m" + max-size: "10m" max-file: "5" + kafka_init: + image: bitnami/kafka:latest + depends_on: + kafka: + condition: service_started + volumes: + - ./scripts/kafka/kafka_init.sh:/kafka_init.sh + entrypoint: ["/bin/sh", "kafka_init.sh"] + ode: image: usdotjpoode/jpo-ode:release_q3 ports: @@ -85,6 +83,7 @@ services: - "44910:44910/udp" - "44920:44920/udp" - "44930:44930/udp" + - "44940:44940/udp" - "5555:5555/udp" - "6666:6666/udp" environment: @@ -93,6 +92,9 @@ services: ODE_SECURITY_SVCS_SIGNATURE_URI: ${ODE_SECURITY_SVCS_SIGNATURE_URI} ODE_RSU_USERNAME: ${ODE_RSU_USERNAME} ODE_RSU_PASSWORD: ${ODE_RSU_PASSWORD} + DATA_SIGNING_ENABLED_RSU: ${DATA_SIGNING_ENABLED_RSU} + DATA_SIGNING_ENABLED_SDW: ${DATA_SIGNING_ENABLED_SDW} + DEFAULT_SNMP_PROTOCOL: ${DEFAULT_SNMP_PROTOCOL} depends_on: - kafka volumes: diff --git a/docs/release_process.md b/docs/release_process.md new file mode 100644 index 000000000..f58d10b45 --- /dev/null +++ b/docs/release_process.md @@ -0,0 +1,321 @@ +# Quarterly Release Process +The quarterly release process is used to prepare the code for a new release at the end of each quarter. This process includes creating a new release branch, stabilizing the code, updating project references, creating the release, and testing the release. + +There are four over-arching steps to the quarterly release: +1. Code Ready & Release Notes +2. Preliminary Testing +3. Project Reference Updates & Release Creation +4. DockerHub Image Testing + +## 1. Code Ready & Release Notes +### Description +The first step in the quarterly release process is to ensure that the code is ready for release and that the release notes have been created. This includes ensuring that all features and bug fixes that are intended for the release are complete and have been merged into the `develop` branch. A new branch `release_(year)-(quarter)` should be created from the `develop` branch to stabilize the code and prepare for the release. Release notes should be drafted and added to the `Release_notes.md` file in the `docs` directory of the repository. + +### Acceptance Criteria + - [ ] jpo-ode + - [ ] Release notes drafted & added to `Release_notes.md` file in `docs` directory + - [ ] Code changes for release are merged into `develop` + - [ ] A new branch `release_(year)-(quarter)` is created from `develop` + - [ ] asn1_codec + - [ ] Release notes drafted & added to `Release_notes.md` file in `docs` directory + - [ ] Code changes for release are merged into `develop` + - [ ] A new branch `release_(year)-(quarter)` is created from `develop` + - [ ] jpo-cvdp + - [ ] Release notes drafted & added to `Release_notes.md` file in `docs` directory + - [ ] Code changes for release are merged into `develop` + - [ ] A new branch `release_(year)-(quarter)` is created from `develop` + - [ ] jpo-security-svcs + - [ ] Release notes drafted & added to `Release_notes.md` file in `docs` directory + - [ ] Code changes for release are merged into `develop` + - [ ] A new branch `release_(year)-(quarter)` is created from `develop` + - [ ] jpo-sdw-depositor + - [ ] Release notes drafted & added to `Release_notes.md` file in `docs` directory + - [ ] Code changes for release are merged into `develop` + - [ ] A new branch `release_(year)-(quarter)` is created from `develop` + - [ ] jpo-s3-deposit + - [ ] Release notes drafted & added to `Release_notes.md` file in `docs` directory + - [ ] Code changes for release are merged into `develop` + - [ ] A new branch `release_(year)-(quarter)` is created from `develop` + - [ ] jpo-geojsonconverter + - [ ] Release notes drafted & added to `Release_notes.md` file in `docs` directory + - [ ] Code changes for release are merged into `develop` + - [ ] A new branch `release_(year)-(quarter)` is created from `develop` + - [ ] jpo-conflictmonitor + - [ ] Release notes drafted & added to `Release_notes.md` file in `docs` directory + - [ ] Code changes for release are merged into `develop` + - [ ] A new branch `release_(year)-(quarter)` is created from `develop` + - [ ] jpo-conflictvisualizer + - [ ] Release notes drafted & added to `Release_notes.md` file in `docs` directory + - [ ] Code changes for release are merged into `develop` + - [ ] A new branch `release_(year)-(quarter)` is created from `develop` + - [ ] jpo-cvmanager + - [ ] Release notes drafted & added to `Release_notes.md` file in `docs` directory + - [ ] Code changes for release are merged into `develop` + - [ ] A new branch `release_(year)-(quarter)` is created from `develop` + +## 2. Preliminary Testing +### Description +After the release branches are created, preliminary testing should be conducted to ensure that the code is stable and ready for release. This includes running unit tests, integration tests, and any other relevant tests to ensure that the code is functioning as expected. + +### Acceptance Criteria + - [ ] jpo-ode + - [ ] code compiles + - [ ] unit tests pass + - [ ] program starts up correctly + - [ ] http endpoint is reachable + - [ ] tims can be successfully pushed to http endpoint + - [ ] capable of ingesting messages via udp (see scripts in `scripts/tests` directory) + - [ ] tims + - [ ] bsms + - [ ] ssms + - [ ] srms + - [ ] spats + - [ ] maps + - [ ] psms + - [ ] asn1_codec + - [ ] code compiles + - [ ] unit tests pass + - [ ] program starts up correctly + - [ ] program can be configured for decoding successfully + - [ ] program can be configured for encoding successfully + - [ ] messages get decoded as expected + - [ ] messages get encoded as expected + - [ ] jpo-cvdp + - [ ] code compiles + - [ ] unit tests pass + - [ ] program starts up correctly + - [ ] messages get consumed as expected + - [ ] BSMs inside geofence are retained + - [ ] BSMs with a partII section are retained + - [ ] BSMs outside geofence are suppressed + - [ ] BSMs above speed range are suppressed + - [ ] BSMs below speed range are suppressed + - [ ] jpo-security-svcs + - [ ] code compiles + - [ ] program starts up correctly + - [ ] program can be successfully configured + - [ ] messages can be successfully signed + - [ ] jpo-sdw-depositor + - [ ] code compiles + - [ ] unit tests pass + - [ ] program starts up correctly + - [ ] messages are consumed successfully + - [ ] messages are submitted to the SDX successfully + - [ ] jpo-s3-deposit + - [ ] code compiles + - [ ] program starts up correctly + - [ ] deposits can be made to one of the destinations successfully + - [ ] jpo-geojsonconverter + - [ ] code compiles + - [ ] unit tests pass + - [ ] program starts up correctly + - [ ] program can be configured successfully + - [ ] MAP & SPaT messages are consumed successfully + - [ ] valid ProcessedMaps & ProcessedSpats are outputted + - [ ] jpo-conflictmonitor + - [ ] code compiles + - [ ] unit tests pass + - [ ] program starts up correctly + - [ ] program processes SpAT/MAP/BSM messages and generates events as expected (see https://github.com/usdot-jpo-ode/jpo-conflictmonitor/wiki/Integration-Tests) + - [ ] test BSM events + - [ ] test connection of travel event + - [ ] test intersection reference alignment events + - [ ] test lane direction of travel event + - [ ] test MAP broadcast rate event + - [ ] test MAP minimum data event + - [ ] test signal group alignment events + - [ ] test signal state conflict events + - [ ] test SPaT broadcast rate event + - [ ] test SPaT minimum data event + - [ ] test SPaT time change details event + - [ ] test stop line passage events + - [ ] test stop line stop events + - [ ] jpo-cvmanager + - [ ] code compiles + - [ ] unit tests pass + - [ ] program starts up correctly + - [ ] webapp can be signed into successfully + - [ ] jpo-conflictvisualizer + - [ ] code compiles + - [ ] unit tests pass + - [ ] program starts up correctly + - [ ] GUI functions & can display messages + +## 3. Project Reference Updates & Release Creation +### Description +After preliminary testing is complete, project reference updates should be made to ensure that all projects are referencing the correct versions of other projects. Once project references are updated, the release should be created by merging the `release_(year)-(quarter)` branch into the `master` branch and tagging the release with the appropriate version number. Images should be built and pushed to DockerHub for testing. + +### Steps +#### Merging release branches & updating project references +1. Merge ‘release_(year)-(quarter)’ branch into ‘master/main’ branch for the following projects: + - asn1_codec + - jpo-cvdp + - jpo-security-svcs + - jpo-sdw-depositor + - jpo-s3-deposit + + 1a. Tag the master/main branch of each application with a git tag that includes the version number of each app. + +2. Update git submodule references for the ‘jpo-ode’ project to point to tagged commits in projects with updated `master/main` branches. Also update the version numbers within the pom.xmls of each of the ode subprojects (jpo-ode-common, jpo-ode-plugins, jpo-ode-svcs) to be self-consistent. + + 2a. (These changes will need to pass CI/CD checks & make it into the `release_(year)-(quarter)` branch before continuing.) + +3. Merge `release_(year)-(quarter)` branch into `master/main` branch for the jpo-ode project, and add a git tag with the ode version number. + +4. Update git submodule references for the ‘jpo-geojsonconverter’ project to point to the tagged commit in jpo-ode master/main branch. + +5. Update pom.xml references for the 'jpo-geojsonconverter' project to version used in the tagged commit in jpo-ode master/main branch. + + 5a. (These changes (steps 4 & 5) will need to pass CI/CD checks & make it into the `release_(year)-(quarter)` branch before continuing.) + +6. Merge `release_(year)-(quarter)` branch into `master/main` branch for the jpo-geojsonconverter project, and add a git tag with the geojsonconverter version number. + +7. Update git submodule references for the `jpo-conflictmonitor` project to point to tagged commit in jpo-geojsonconverter master/main branch. + +8. Update pom.xml references for the 'jpo-conflictmonitor' project to version used in tagged commit in jpo-geojsonconverter master/main branch. This change will be necessary in the jpo-conflictmonitor/pom.xml, jpo-deduplicator/pom.xml and message-sender/pom.xml files. + +9. Update pom.xml references for the 'jpo-conflictmonitor' project to version used in tagged commit in jpo-ode master/main branch. This change will be necessary in the jpo-conflictmonitor/pom.xml, jpo-deduplicator/pom.xml and message-sender/pom.xml files. + + 9a. (These changes (steps 7-9) will need to pass CI/CD checks & make it into the `release_(year)-(quarter)` branch before continuing.) + +10. Merge `release_(year)-(quarter)` branch into `master/main` branch for the jpo-conflictmonitor project, and add a git tag with the conflictmonitor version number. + +11. Update git submodule references for the `jpo-conflictvisualizer` project to point to tagged commit in jpo-conflictmonitor master/main branch. + +12. Update pom.xml references for the 'jpo-conflictvisualizer' project to version used in tagged commit in jpo-conflictmonitor master/main branch. + +13. Update pom.xml references for the 'jpo-conflictvisualizer' project to version used in tagged commit in jpo-geojsonconverter master/main branch. + +14. Update pom.xml references for the 'jpo-conflictvisualizer' project to version used in tagged commit in jpo-ode master/main branch. + + 14a. (These changes (steps 11-14) will need to pass CI/CD checks & make it into the `release_(year)-(quarter)` branch before continuing.) + +15. Merge `release_(year)-(quarter)` branch into `master/main` branch for the jpo-conflictvisualizer project, and add a git tag with the visualizer version number. +16. Merge `release_(year)-(quarter)` branch into `master/main` branch for the jpo-cvmanager project, and add a git tag with the cvmanager version number. + +#### Create Releases & Docker Images +1. Within the github CI/CD release process, use the release tags for each application to produce releases and docker images with the same tag name, containing the version number of each app. + + 1a. The Conflict Visualizer will need two separate images to be created: one for the API and one for Keycloak. + +2. Upload docker images to [DockerHub](https://hub.docker.com/u/usdotjpoode). +3. Tag docker images with the version number of each app. (e.g. 1.0.0) +4. Tag docker images with year and quarter of release. (e.g. 2024-Q2) +5. Tag docker images with 'latest' tag for the most recent release. + +#### Housekeeping +1. Merge master branches into develop branches for each project & verify that CI/CD passes. + +## 4. DockerHub Image Testing +### Description +After the docker images have been built and pushed to DockerHub, they should be tested to ensure that they are functioning as expected. This includes running the docker images locally and verifying that the applications are working correctly. + +### Acceptance Criteria + - [ ] jpo-ode + - [ ] image starts up correctly + - [ ] http endpoint is reachable + - [ ] tims can be successfully pushed to http endpoint + - [ ] capable of ingesting messages via udp (see scripts in `scripts/tests` directory) + - [ ] tims + - [ ] bsms + - [ ] ssms + - [ ] srms + - [ ] spats + - [ ] maps + - [ ] psms + - [ ] asn1_codec + - [ ] image starts up correctly + - [ ] program can be configured for decoding successfully + - [ ] program can be configured for encoding successfully + - [ ] messages get decoded as expected + - [ ] messages get encoded as expected + - [ ] jpo-cvdp + - [ ] image starts up correctly + - [ ] messages get consumed as expected + - [ ] BSMs inside geofence are retained + - [ ] BSMs with a partII section are retained + - [ ] BSMs outside geofence are suppressed + - [ ] BSMs above speed range are suppressed + - [ ] BSMs below speed range are suppressed + - [ ] jpo-security-svcs + - [ ] image starts up correctly + - [ ] program can be successfully configured + - [ ] messages can be successfully signed + - [ ] jpo-sdw-depositor + - [ ] image starts up correctly + - [ ] messages are consumed successfully + - [ ] messages are submitted to the SDX successfully + - [ ] jpo-s3-deposit + - [ ] image starts up correctly + - [ ] deposits can be made to one of the destinations successfully + - [ ] jpo-geojsonconverter + - [ ] image starts up correctly + - [ ] program can be configured successfully + - [ ] MAP & SPaT messages are consumed successfully + - [ ] valid ProcessedMaps & ProcessedSpats are outputted + - [ ] jpo-conflictmonitor + - [ ] image starts up correctly + - [ ] program processes SpAT/MAP/BSM messages and generates events as expected (see https://github.com/usdot-jpo-ode/jpo-conflictmonitor/wiki/Integration-Tests) + - [ ] test BSM events + - [ ] test connection of travel event + - [ ] test intersection reference alignment events + - [ ] test lane direction of travel event + - [ ] test MAP broadcast rate event + - [ ] test MAP minimum data event + - [ ] test signal group alignment events + - [ ] test signal state conflict events + - [ ] test SPaT broadcast rate event + - [ ] test SPaT minimum data event + - [ ] test SPaT time change details event + - [ ] test stop line passage events + - [ ] test stop line stop events + - [ ] jpo-conflictvisualizer-api + - [ ] image starts up correctly + - [ ] GUI functions & can display messages + - [ ] jpo-conflictvisualizer-keycloak + - [ ] image starts up correctly + - [ ] authentication verified to work + +At this point the quarterly release process is complete. + +# Standalone Hotfix Release Process +The standalone hotfix release process is used to address critical issues that require immediate attention. This process is similar to the quarterly release process, but is expedited to address the critical issue as quickly as possible. + +It should be noted that not all projects will be necessarily affected by a hotfix. The dependent projects that are affected by the hotfix should be updated and released, while the other projects should remain unchanged. + +There are two over-arching steps to the standalone hotfix release: +1. Code Ready & Release Notes +2. Project Reference Updates & Release Creation + +## 1. Code Ready & Release Notes +### Description +The first step in the standalone hotfix release process is to create a new branch from the `master` branch to address the critical issue. The code changes should be merged into the hotfix branch and release notes should be drafted and added to the `Release_notes.md` file in the `docs` directory of the repository. + +### Acceptance Criteria + - [ ] A new branch `hotfix_(year)-(month)-(day)` is created from `master` for the project requiring the hotfix + - [ ] Patch version number is updated in the `pom.xml` file of the project requiring the hotfix + - [ ] Release notes drafted & added to `Release_notes.md` file in `docs` directory + - [ ] Code changes for hotfix are merged into `hotfix_(year)-(month)-(day)` + +## 2. Project Reference Updates & Release Creation +### Description +After the hotfix branch is created and the code changes are merged, project reference updates should be made to ensure that all projects are referencing the correct versions of other projects. Once project references are updated, the release should be created by merging the `hotfix_(year)-(month)-(day)` branch into the `master` branch and tagging the release with the appropriate version number. Images should be built and pushed to DockerHub for testing. + +### Steps +#### Merging hotfix branches & updating project references +1. Merge `hotfix_(year)-(month)-(day)` branch into `master/main` branch for the project requiring the hotfix. +2. Tag the master/main branch of the project with a git tag that includes the version number of the hotfix. +3. Update git submodule references & pom.xml references for dependent projects to point to tagged commits in projects with updated `master/main` branches. +4. Merge `hotfix_(year)-(month)-(day)` branch into `master/main` branch for the dependent projects, and add a git tag with the version number of the hotfix. + +#### Create Releases & Docker Images +1. Within the github CI/CD release process, use the release tags for each affected application to produce releases and docker images with the same tag name, containing the version number of each app. +2. Upload docker images to [DockerHub](https://hub.docker.com/u/usdotjpoode). +3. Tag docker images with the version number of each app. (e.g. 1.0.0) +4. Tag docker images with year, month, and day of hotfix. (e.g. 2024-04-01) +5. Tag docker images with 'latest' tag for the most recent release. + +#### Housekeeping +1. Merge master branches into develop branches for each affected project & verify that CI/CD passes. + +At this point the standalone hotfix release process is complete. diff --git a/jpo-ode-common/pom.xml b/jpo-ode-common/pom.xml index 3af52d5c3..9f2eb545d 100644 --- a/jpo-ode-common/pom.xml +++ b/jpo-ode-common/pom.xml @@ -5,7 +5,7 @@ usdot.jpo.ode jpo-ode - 2.0.0-SNAPSHOT + 2.1.0-SNAPSHOT jpo-ode-common diff --git a/jpo-ode-common/src/main/java/us/dot/its/jpo/ode/util/DateTimeUtils.java b/jpo-ode-common/src/main/java/us/dot/its/jpo/ode/util/DateTimeUtils.java index 40a1ab62f..7dea6140e 100644 --- a/jpo-ode-common/src/main/java/us/dot/its/jpo/ode/util/DateTimeUtils.java +++ b/jpo-ode-common/src/main/java/us/dot/its/jpo/ode/util/DateTimeUtils.java @@ -29,7 +29,7 @@ private DateTimeUtils() { } public static String now() { - return nowZDT().format(DateTimeFormatter.ISO_INSTANT); + return nowZDT().format(DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'")); } public static ZonedDateTime nowZDT() { @@ -37,7 +37,7 @@ public static ZonedDateTime nowZDT() { } public static String isoDateTime(ZonedDateTime zonedDateTime) { - return zonedDateTime.format(DateTimeFormatter.ISO_INSTANT); + return zonedDateTime.format(DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'")); } public static ZonedDateTime diff --git a/jpo-ode-common/src/test/java/us/dot/its/jpo/ode/util/DateTimeUtilsTest.java b/jpo-ode-common/src/test/java/us/dot/its/jpo/ode/util/DateTimeUtilsTest.java index 079048d21..fa8e4bca4 100644 --- a/jpo-ode-common/src/test/java/us/dot/its/jpo/ode/util/DateTimeUtilsTest.java +++ b/jpo-ode-common/src/test/java/us/dot/its/jpo/ode/util/DateTimeUtilsTest.java @@ -23,7 +23,6 @@ import org.junit.jupiter.api.Test; import junit.framework.TestCase; -import us.dot.its.jpo.ode.util.DateTimeUtils; public class DateTimeUtilsTest extends TestCase { @@ -38,7 +37,7 @@ public void testIsoDateTime() throws ParseException { expectedDate.getHour(), expectedDate.getMinute(), expectedDate.getSecond(), - expectedDate.getNano()/1000000).format(DateTimeFormatter.ISO_INSTANT); + expectedDate.getNano()/1000000).format(DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'")); assertEquals(sExpectedDate.substring(0, 18), sdate.substring(0, 18)); ZonedDateTime date2 = DateTimeUtils.isoDateTime("2015-11-30T16:06:15.679Z"); diff --git a/jpo-ode-consumer-example/README.md b/jpo-ode-consumer-example/README.md index 311d14ad4..5ea5af8ad 100644 --- a/jpo-ode-consumer-example/README.md +++ b/jpo-ode-consumer-example/README.md @@ -49,7 +49,7 @@ The IP used is the location of the Kafka endpoints. ####Create, alter, list, and describe topics. ``` -kafka-topics --zookeeper 192.168.1.151:2181 --list +kafka-topics --bootstrap-server 192.168.1.151:9092 --list sink1 t1 t2 @@ -58,11 +58,11 @@ t2 ####Read data from a Kafka topic and write it to standard output. ``` -kafka-console-consumer --zookeeper 192.168.1.151:2181 --topic topic.J2735Bsm +kafka-console-consumer --bootstrap-server 192.168.1.151:9092 --topic topic.J2735Bsm ``` ####Read data from standard output and write it to a Kafka topic. ``` -kafka-console-producer --broker-list 192.168.1.151:9092 --topic topic.J2735Bsm +kafka-console-producer --bootstrap-server 192.168.1.151:9092 --topic topic.J2735Bsm ``` diff --git a/jpo-ode-core/pom.xml b/jpo-ode-core/pom.xml index e9e7cde15..3db2c79cd 100644 --- a/jpo-ode-core/pom.xml +++ b/jpo-ode-core/pom.xml @@ -5,7 +5,7 @@ usdot.jpo.ode jpo-ode - 2.0.0-SNAPSHOT + 2.1.0-SNAPSHOT jpo-ode-core @@ -23,12 +23,12 @@ usdot.jpo.ode jpo-ode-common - 2.0.0-SNAPSHOT + 2.1.0-SNAPSHOT usdot.jpo.ode jpo-ode-plugins - 2.0.0-SNAPSHOT + 2.1.0-SNAPSHOT org.apache.httpcomponents diff --git a/jpo-ode-core/src/test/java/us/dot/its/jpo/ode/snmp/SNMPTest.java b/jpo-ode-core/src/test/java/us/dot/its/jpo/ode/snmp/SNMPTest.java index 383cb9995..9a755cb07 100644 --- a/jpo-ode-core/src/test/java/us/dot/its/jpo/ode/snmp/SNMPTest.java +++ b/jpo-ode-core/src/test/java/us/dot/its/jpo/ode/snmp/SNMPTest.java @@ -76,4 +76,10 @@ public void testSnmpTimestampFromIso() throws ParseException { String snmpTS = SNMP.snmpTimestampFromIso("2017-05-04T21:55:00-05:00"); assertEquals("07E1050415370000", snmpTS); } + + @Test + public void testSnmpTimestampFromIsoNanosecondFormat() throws ParseException { + String snmpTS = SNMP.snmpTimestampFromIso("2024-03-01T20:29:33.033Z"); + assertEquals("07E80301141D211F", snmpTS); + } } diff --git a/jpo-ode-plugins/pom.xml b/jpo-ode-plugins/pom.xml index 7471e970e..8403a85f7 100644 --- a/jpo-ode-plugins/pom.xml +++ b/jpo-ode-plugins/pom.xml @@ -11,7 +11,7 @@ usdot.jpo.ode jpo-ode - 2.0.0-SNAPSHOT + 2.1.0-SNAPSHOT @@ -27,7 +27,7 @@ usdot.jpo.ode jpo-ode-common - 2.0.0-SNAPSHOT + 2.1.0-SNAPSHOT 1.3.6.1.4.1.1206.4.2.18.3.2.1.9.3 = 4 // rsuMsgRepeatPriority.3 = 6 // --> 1.3.6.1.4.1.1206.4.2.18.3.2.1.10.3 = 6 - // rsuMsgRepeatOptions.3 = "C0" - // --> 1.3.6.1.4.1.1206.4.2.18.3.2.1.11.3 = "C0" + // rsuMsgRepeatOptions.3 = "00" + // --> 1.3.6.1.4.1.1206.4.2.18.3.2.1.11.3 = "00" ////////////////////////////// VariableBinding rsuMsgRepeatPsid = SnmpNTCIP1218Protocol.getVbRsuMsgRepeatPsid(index, snmp.getRsuid()); @@ -391,7 +397,14 @@ private static ScopedPDU createPDUWithNTCIP1218Protocol(SNMP snmp, String payloa VariableBinding rsuMsgRepeatEnable = SnmpNTCIP1218Protocol.getVbRsuMsgRepeatEnable(index, snmp.getEnable()); VariableBinding rsuMsgRepeatStatus = SnmpNTCIP1218Protocol.getVbRsuMsgRepeatStatus(index, snmp.getStatus()); VariableBinding rsuMsgRepeatPriority = SnmpNTCIP1218Protocol.getVbRsuMsgRepeatPriority(index); - VariableBinding rsuMsgRepeatOptions = SnmpNTCIP1218Protocol.getVbRsuMsgRepeatOptions(index); + VariableBinding rsuMsgRepeatOptions; + if (dataSigningEnabledRSU) { + // set options to 0x00 to tell RSU to broadcast message without signing or attaching a 1609.2 header + rsuMsgRepeatOptions = SnmpNTCIP1218Protocol.getVbRsuMsgRepeatOptions(index, 0x00); + } else { + // set options to 0x80 to tell RSU to sign & attach a 1609.2 header before broadcasting + rsuMsgRepeatOptions = SnmpNTCIP1218Protocol.getVbRsuMsgRepeatOptions(index, 0x80); + } ScopedPDU pdu = new ScopedPDU(); pdu.add(rsuMsgRepeatPsid); diff --git a/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/traveler/TimDeleteController.java b/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/traveler/TimDeleteController.java index 2111a113f..57397d1a5 100644 --- a/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/traveler/TimDeleteController.java +++ b/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/traveler/TimDeleteController.java @@ -118,34 +118,43 @@ else if (snmpProtocol.equals(SnmpProtocol.NTCIP1218)) { .body(JsonUtils.jsonKeyValue(ERRSTR, e.getMessage())); } - // Try to explain common errors - HttpStatus returnCode = null; - String bodyMsg = ""; + // Provide error codes/text returned from RSU and our interpretation of them + HttpStatus httpResponseReturnCode = null; + String httpResponseBodyMessage = ""; + String rsuIpAddress = queryTarget.getRsuTarget(); if (null == rsuResponse || null == rsuResponse.getResponse()) { // Timeout - returnCode = HttpStatus.REQUEST_TIMEOUT; - bodyMsg = JsonUtils.jsonKeyValue(ERRSTR, "Timeout."); + httpResponseReturnCode = HttpStatus.REQUEST_TIMEOUT; + String timeoutMessage = "Timeout. No response from RSU."; + httpResponseBodyMessage = JsonUtils.jsonKeyValue(ERRSTR, timeoutMessage); + logger.error("Failed to delete message at index {} for RSU {}: {}", index, rsuIpAddress, timeoutMessage); } else if (rsuResponse.getResponse().getErrorStatus() == 0) { // Success - returnCode = HttpStatus.OK; - bodyMsg = JsonUtils.jsonKeyValue("deleted_msg", Integer.toString(index)); - } else if (rsuResponse.getResponse().getErrorStatus() == 12) { - // Message previously deleted or doesn't exist - returnCode = HttpStatus.BAD_REQUEST; - bodyMsg = JsonUtils.jsonKeyValue(ERRSTR, "No message at index ".concat(Integer.toString(index))); - } else if (rsuResponse.getResponse().getErrorStatus() == 10) { - // Invalid index - returnCode = HttpStatus.BAD_REQUEST; - bodyMsg = JsonUtils.jsonKeyValue(ERRSTR, "Invalid index ".concat(Integer.toString(index))); + httpResponseReturnCode = HttpStatus.OK; + httpResponseBodyMessage = JsonUtils.jsonKeyValue("deleted_msg", Integer.toString(index)); + logger.info("Successfully deleted message at index {} for RSU {}", index, rsuIpAddress); } else { - // Misc error - returnCode = HttpStatus.BAD_REQUEST; - bodyMsg = JsonUtils.jsonKeyValue(ERRSTR, rsuResponse.getResponse().getErrorStatusText()); + // Error + httpResponseReturnCode = HttpStatus.BAD_REQUEST; + int errorCodeReturnedByRSU = rsuResponse.getResponse().getErrorStatus(); + String errorTextReturnedByRSU = rsuResponse.getResponse().getErrorStatusText(); + String givenReason = "Error code " + Integer.toString(errorCodeReturnedByRSU) + ": " + errorTextReturnedByRSU; + String interpretation = interpretErrorCode(errorCodeReturnedByRSU, index); + httpResponseBodyMessage = JsonUtils.jsonKeyValue(ERRSTR, givenReason + " => Interpretation: " + interpretation); + logger.error("Failed to delete message at index {} for RSU {} due to error: {} => Interpretation: {}", index, rsuIpAddress, givenReason, interpretation); } - logger.info("Delete call response code: {}, message: {}", returnCode, bodyMsg); + return ResponseEntity.status(httpResponseReturnCode).body(httpResponseBodyMessage); + } - return ResponseEntity.status(returnCode).body(bodyMsg); + private String interpretErrorCode(int errorCodeReturnedByRSU, int index) { + if (errorCodeReturnedByRSU == 12) { + return "Message previously deleted or doesn't exist at index " + Integer.toString(index); + } else if (errorCodeReturnedByRSU == 10) { + return "Invalid index " + Integer.toString(index); + } else { + return "Unknown error"; + } } } diff --git a/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/traveler/TimDepositController.java b/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/traveler/TimDepositController.java index d000af2c4..56dea7112 100644 --- a/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/traveler/TimDepositController.java +++ b/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/traveler/TimDepositController.java @@ -75,6 +75,8 @@ public class TimDepositController { private MessageProducer stringMsgProducer; private MessageProducer timProducer; + private boolean dataSigningEnabledSDW; + public static class TimDepositControllerException extends Exception { private static final long serialVersionUID = 1L; @@ -99,6 +101,10 @@ public TimDepositController(OdeProperties odeProperties) { this.timProducer = new MessageProducer<>(odeProperties.getKafkaBrokers(), odeProperties.getKafkaProducerType(), null, OdeTimSerializer.class.getName(), odeProperties.getKafkaTopicsDisabledSet()); + this.dataSigningEnabledSDW = System.getenv("DATA_SIGNING_ENABLED_SDW") != null && !System.getenv("DATA_SIGNING_ENABLED_SDW").isEmpty() + ? Boolean.parseBoolean(System.getenv("DATA_SIGNING_ENABLED_SDW")) + : true; + } /** @@ -226,7 +232,7 @@ public synchronized ResponseEntity depositTim(String jsonString, Request logger.debug("securitySvcsSignatureUri = {}", odeProperties.getSecuritySvcsSignatureUri()); String xmlMsg; DdsAdvisorySituationData asd = null; - if (!odeProperties.dataSigningEnabled()) { + if (!this.dataSigningEnabledSDW) { // We need to send data UNSECURED, so we should try to build the ASD as well as // MessageFrame asd = TimTransmogrifier.buildASD(odeTID.getRequest()); diff --git a/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/udp/AbstractUdpReceiverPublisher.java b/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/udp/AbstractUdpReceiverPublisher.java index 77067c2c4..96e373ce6 100644 --- a/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/udp/AbstractUdpReceiverPublisher.java +++ b/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/udp/AbstractUdpReceiverPublisher.java @@ -1,13 +1,17 @@ package us.dot.its.jpo.ode.udp; import java.net.DatagramSocket; +import java.net.DatagramPacket; import java.net.SocketException; +import org.apache.tomcat.util.buf.HexUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import us.dot.its.jpo.ode.OdeProperties; +import us.dot.its.jpo.ode.model.OdeAsn1Payload; +import us.dot.its.jpo.ode.uper.UperUtil; public abstract class AbstractUdpReceiverPublisher implements Runnable { @@ -53,18 +57,24 @@ public AbstractUdpReceiverPublisher(OdeProperties odeProps, int port, int buffer } } - /* Strips the 1609.3 and unsigned 1609.2 headers if they are present. - Will return the payload with a signed 1609.2 header if it is present. - Otherwise, returns just the payload. */ - protected String stripDot3Header(String hexString, String payload_start_flag) { - int payloadStartIndex = hexString.indexOf(payload_start_flag); - String headers = hexString.substring(0, payloadStartIndex); - String payload = hexString.substring(payloadStartIndex, hexString.length()); - // Look for the index of the start flag of a signed 1609.2 header - int signedDot2StartIndex = headers.indexOf("038100"); - if (signedDot2StartIndex == -1) - return payload; - else - return headers.substring(signedDot2StartIndex, headers.length()) + payload; + public OdeAsn1Payload getPayloadHexString(DatagramPacket packet, UperUtil.SupportedMessageTypes msgType) { + String startFlag = UperUtil.getStartFlag(msgType); + // extract the actual packet from the buffer + byte[] payload = packet.getData(); + if (payload == null) + return null; + // convert bytes to hex string and verify identity + String payloadHexString = HexUtils.toHexString(payload).toLowerCase(); + if (payloadHexString.indexOf(startFlag) == -1) + return null; + + logger.debug("Full {} packet: {}", msgType, payloadHexString); + payloadHexString = UperUtil.stripDot3Header(payloadHexString, startFlag); + logger.debug("Stripped {} packet: {}", msgType, payloadHexString); + + OdeAsn1Payload timPayload = new OdeAsn1Payload(HexUtils.fromHexString(payloadHexString)); + + return timPayload; } + } \ No newline at end of file diff --git a/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/udp/bsm/BsmReceiver.java b/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/udp/bsm/BsmReceiver.java index 6088c0c28..b8b252f48 100644 --- a/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/udp/bsm/BsmReceiver.java +++ b/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/udp/bsm/BsmReceiver.java @@ -5,7 +5,6 @@ import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; -import org.apache.tomcat.util.buf.HexUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; @@ -23,6 +22,7 @@ import us.dot.its.jpo.ode.model.ReceivedMessageDetails; import us.dot.its.jpo.ode.model.RxSource; import us.dot.its.jpo.ode.udp.AbstractUdpReceiverPublisher; +import us.dot.its.jpo.ode.uper.UperUtil; import us.dot.its.jpo.ode.util.JsonUtils; public class BsmReceiver extends AbstractUdpReceiverPublisher { @@ -62,26 +62,15 @@ public void run() { senderPort = packet.getPort(); logger.debug("Packet received from {}:{}", senderIp, senderPort); - // extract the actualPacket from the buffer - byte[] payload = packet.getData(); - if (payload == null) - continue; - - // convert bytes to hex string and verify identity - String payloadHexString = HexUtils.toHexString(payload).toLowerCase(); - if (payloadHexString.indexOf(odeProperties.getBsmStartFlag()) == -1) - continue; - logger.debug("Full BSM packet: {}", payloadHexString); - payloadHexString = super.stripDot3Header(payloadHexString, odeProperties.getBsmStartFlag()); - logger.debug("Stripped BSM packet: {}", payloadHexString); - // Create OdeMsgPayload and OdeLogMetadata objects and populate them - OdeAsn1Payload bsmPayload = new OdeAsn1Payload(HexUtils.fromHexString(payloadHexString)); + OdeAsn1Payload bsmPayload = super.getPayloadHexString(packet, UperUtil.SupportedMessageTypes.BSM); + if (bsmPayload == null) + continue; OdeBsmMetadata bsmMetadata = new OdeBsmMetadata(bsmPayload); // Set BSM Metadata values that can be assumed from the UDP endpoint ZonedDateTime utc = ZonedDateTime.now(ZoneOffset.UTC); - String timestamp = utc.format(DateTimeFormatter.ISO_INSTANT); + String timestamp = utc.format(DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'")); bsmMetadata.setOdeReceivedAt(timestamp); ReceivedMessageDetails receivedMessageDetails = new ReceivedMessageDetails(); diff --git a/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/udp/map/MapReceiver.java b/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/udp/map/MapReceiver.java index c39efc181..d77b15cf2 100644 --- a/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/udp/map/MapReceiver.java +++ b/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/udp/map/MapReceiver.java @@ -5,7 +5,6 @@ import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; -import org.apache.tomcat.util.buf.HexUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; @@ -20,6 +19,7 @@ import us.dot.its.jpo.ode.model.OdeMapMetadata; import us.dot.its.jpo.ode.OdeProperties; import us.dot.its.jpo.ode.udp.AbstractUdpReceiverPublisher; +import us.dot.its.jpo.ode.uper.UperUtil; import us.dot.its.jpo.ode.util.JsonUtils; public class MapReceiver extends AbstractUdpReceiverPublisher { @@ -58,26 +58,15 @@ public void run() { senderPort = packet.getPort(); logger.debug("Packet received from {}:{}", senderIp, senderPort); - // extract the actualPacket from the buffer - byte[] payload = packet.getData(); - if (payload == null) - continue; - - // convert bytes to hex string and verify identity - String payloadHexString = HexUtils.toHexString(payload).toLowerCase(); - if (payloadHexString.indexOf(odeProperties.getMapStartFlag()) == -1) - continue; - logger.debug("Full Map packet: {}", payloadHexString); - payloadHexString = super.stripDot3Header(payloadHexString, odeProperties.getMapStartFlag()); - logger.debug("Stripped Map packet: {}", payloadHexString); - // Create OdeMsgPayload and OdeLogMetadata objects and populate them - OdeAsn1Payload mapPayload = new OdeAsn1Payload(HexUtils.fromHexString(payloadHexString)); + OdeAsn1Payload mapPayload = super.getPayloadHexString(packet, UperUtil.SupportedMessageTypes.MAP); + if (mapPayload == null) + continue; OdeMapMetadata mapMetadata = new OdeMapMetadata(mapPayload); // Add header data for the decoding process ZonedDateTime utc = ZonedDateTime.now(ZoneOffset.UTC); - String timestamp = utc.format(DateTimeFormatter.ISO_INSTANT); + String timestamp = utc.format(DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'")); mapMetadata.setOdeReceivedAt(timestamp); mapMetadata.setOriginIp(senderIp); diff --git a/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/udp/psm/PsmReceiver.java b/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/udp/psm/PsmReceiver.java index dfdcf557d..7bd56c95c 100644 --- a/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/udp/psm/PsmReceiver.java +++ b/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/udp/psm/PsmReceiver.java @@ -5,7 +5,6 @@ import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; -import org.apache.tomcat.util.buf.HexUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; @@ -20,6 +19,7 @@ import us.dot.its.jpo.ode.model.OdePsmMetadata; import us.dot.its.jpo.ode.OdeProperties; import us.dot.its.jpo.ode.udp.AbstractUdpReceiverPublisher; +import us.dot.its.jpo.ode.uper.UperUtil; import us.dot.its.jpo.ode.util.JsonUtils; public class PsmReceiver extends AbstractUdpReceiverPublisher { @@ -58,26 +58,15 @@ public void run() { senderPort = packet.getPort(); logger.debug("Packet received from {}:{}", senderIp, senderPort); - // extract the actualPacket from the buffer - byte[] payload = packet.getData(); - if (payload == null) - continue; - - // convert bytes to hex string and verify identity - String payloadHexString = HexUtils.toHexString(payload).toLowerCase(); - if (payloadHexString.indexOf(odeProperties.getPsmStartFlag()) == -1) - continue; - logger.debug("Full PSM packet: {}", payloadHexString); - payloadHexString = super.stripDot3Header(payloadHexString, odeProperties.getPsmStartFlag()); - logger.debug("Stripped PSM packet: {}", payloadHexString); - // Create OdeMsgPayload and OdeLogMetadata objects and populate them - OdeAsn1Payload psmPayload = new OdeAsn1Payload(HexUtils.fromHexString(payloadHexString)); + OdeAsn1Payload psmPayload = super.getPayloadHexString(packet, UperUtil.SupportedMessageTypes.PSM); + if (psmPayload == null) + continue; OdePsmMetadata psmMetadata = new OdePsmMetadata(psmPayload); // Add header data for the decoding process ZonedDateTime utc = ZonedDateTime.now(ZoneOffset.UTC); - String timestamp = utc.format(DateTimeFormatter.ISO_INSTANT); + String timestamp = utc.format(DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'")); psmMetadata.setOdeReceivedAt(timestamp); psmMetadata.setOriginIp(senderIp); @@ -88,7 +77,7 @@ public void run() { // Submit JSON to the OdeRawEncodedMessageJson Kafka Topic psmPublisher.publish(JsonUtils.toJson(new OdeAsn1Data(psmMetadata, psmPayload), false), - psmPublisher.getOdeProperties().getKafkaTopicOdeRawEncodedPSMJson()); + psmPublisher.getOdeProperties().getKafkaTopicOdeRawEncodedPSMJson()); } } catch (Exception e) { logger.error("Error receiving packet", e); diff --git a/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/udp/spat/SpatReceiver.java b/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/udp/spat/SpatReceiver.java index b03a7d50b..5bb7bd8ef 100644 --- a/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/udp/spat/SpatReceiver.java +++ b/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/udp/spat/SpatReceiver.java @@ -5,7 +5,6 @@ import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; -import org.apache.tomcat.util.buf.HexUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; @@ -20,79 +19,69 @@ import us.dot.its.jpo.ode.model.OdeSpatMetadata.SpatSource; import us.dot.its.jpo.ode.OdeProperties; import us.dot.its.jpo.ode.udp.AbstractUdpReceiverPublisher; +import us.dot.its.jpo.ode.uper.UperUtil; import us.dot.its.jpo.ode.util.JsonUtils; public class SpatReceiver extends AbstractUdpReceiverPublisher { - private static Logger logger = LoggerFactory.getLogger(SpatReceiver.class); + private static Logger logger = LoggerFactory.getLogger(SpatReceiver.class); - private StringPublisher spatPublisher; + private StringPublisher spatPublisher; - @Autowired - public SpatReceiver(OdeProperties odeProps) { - this(odeProps, odeProps.getSpatReceiverPort(), odeProps.getSpatBufferSize()); + @Autowired + public SpatReceiver(OdeProperties odeProps) { + this(odeProps, odeProps.getSpatReceiverPort(), odeProps.getSpatBufferSize()); - this.spatPublisher = new StringPublisher(odeProps); - } + this.spatPublisher = new StringPublisher(odeProps); + } - public SpatReceiver(OdeProperties odeProps, int port, int bufferSize) { - super(odeProps, port, bufferSize); + public SpatReceiver(OdeProperties odeProps, int port, int bufferSize) { + super(odeProps, port, bufferSize); - this.spatPublisher = new StringPublisher(odeProps); - } + this.spatPublisher = new StringPublisher(odeProps); + } - @Override - public void run() { + @Override + public void run() { - logger.debug("SPaT UDP Receiver Service started."); + logger.debug("SPaT UDP Receiver Service started."); - byte[] buffer = new byte[bufferSize]; + byte[] buffer = new byte[bufferSize]; - DatagramPacket packet = new DatagramPacket(buffer, buffer.length); + DatagramPacket packet = new DatagramPacket(buffer, buffer.length); - do { - try { - logger.debug("Waiting for UDP SPaT packets..."); - socket.receive(packet); - if (packet.getLength() > 0) { - senderIp = packet.getAddress().getHostAddress(); - senderPort = packet.getPort(); - logger.debug("Packet received from {}:{}", senderIp, senderPort); + do { + try { + logger.debug("Waiting for UDP SPaT packets..."); + socket.receive(packet); + if (packet.getLength() > 0) { + senderIp = packet.getAddress().getHostAddress(); + senderPort = packet.getPort(); + logger.debug("Packet received from {}:{}", senderIp, senderPort); - // extract the actualPacket from the buffer - byte[] payload = packet.getData(); - if (payload == null) - continue; + // Create OdeMsgPayload and OdeLogMetadata objects and populate them + OdeAsn1Payload spatPayload = super.getPayloadHexString(packet, UperUtil.SupportedMessageTypes.SPAT); + if (spatPayload == null) + continue; + OdeSpatMetadata spatMetadata = new OdeSpatMetadata(spatPayload); - // convert bytes to hex string and verify identity - String payloadHexString = HexUtils.toHexString(payload).toLowerCase(); - if (payloadHexString.indexOf(odeProperties.getSpatStartFlag()) == -1) - continue; - logger.debug("Full SPaT packet: {}", payloadHexString); - payloadHexString = super.stripDot3Header(payloadHexString, odeProperties.getSpatStartFlag()); - logger.debug("Stripped SPaT packet: {}", payloadHexString); + // Add header data for the decoding process + ZonedDateTime utc = ZonedDateTime.now(ZoneOffset.UTC); + String timestamp = utc.format(DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'")); + spatMetadata.setOdeReceivedAt(timestamp); - // Create OdeMsgPayload and OdeLogMetadata objects and populate them - OdeAsn1Payload spatPayload = new OdeAsn1Payload(HexUtils.fromHexString(payloadHexString)); - OdeSpatMetadata spatMetadata = new OdeSpatMetadata(spatPayload); + spatMetadata.setOriginIp(senderIp); + spatMetadata.setSpatSource(SpatSource.RSU); + spatMetadata.setRecordType(RecordType.spatTx); + spatMetadata.setRecordGeneratedBy(GeneratedBy.RSU); + spatMetadata.setSecurityResultCode(SecurityResultCode.success); - // Add header data for the decoding process - ZonedDateTime utc = ZonedDateTime.now(ZoneOffset.UTC); - String timestamp = utc.format(DateTimeFormatter.ISO_INSTANT); - spatMetadata.setOdeReceivedAt(timestamp); - - spatMetadata.setOriginIp(senderIp); - spatMetadata.setSpatSource(SpatSource.RSU); - spatMetadata.setRecordType(RecordType.spatTx); - spatMetadata.setRecordGeneratedBy(GeneratedBy.RSU); - spatMetadata.setSecurityResultCode(SecurityResultCode.success); - - // Submit JSON to the OdeRawEncodedMessageJson Kafka Topic - spatPublisher.publish(JsonUtils.toJson(new OdeAsn1Data(spatMetadata, spatPayload), false), - spatPublisher.getOdeProperties().getKafkaTopicOdeRawEncodedSPATJson()); + // Submit JSON to the OdeRawEncodedMessageJson Kafka Topic + spatPublisher.publish(JsonUtils.toJson(new OdeAsn1Data(spatMetadata, spatPayload), false), + spatPublisher.getOdeProperties().getKafkaTopicOdeRawEncodedSPATJson()); + } + } catch (Exception e) { + logger.error("Error receiving packet", e); } - } catch (Exception e) { - logger.error("Error receiving packet", e); - } - } while (!isStopped()); - } + } while (!isStopped()); + } } diff --git a/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/udp/srm/SrmReceiver.java b/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/udp/srm/SrmReceiver.java index 22917447e..b9d9121bd 100644 --- a/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/udp/srm/SrmReceiver.java +++ b/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/udp/srm/SrmReceiver.java @@ -5,7 +5,6 @@ import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; -import org.apache.tomcat.util.buf.HexUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; @@ -20,79 +19,69 @@ import us.dot.its.jpo.ode.model.OdeSrmMetadata.SrmSource; import us.dot.its.jpo.ode.OdeProperties; import us.dot.its.jpo.ode.udp.AbstractUdpReceiverPublisher; +import us.dot.its.jpo.ode.uper.UperUtil; import us.dot.its.jpo.ode.util.JsonUtils; public class SrmReceiver extends AbstractUdpReceiverPublisher { - private static Logger logger = LoggerFactory.getLogger(SrmReceiver.class); + private static Logger logger = LoggerFactory.getLogger(SrmReceiver.class); - private StringPublisher srmPublisher; + private StringPublisher srmPublisher; - @Autowired - public SrmReceiver(OdeProperties odeProps) { - this(odeProps, odeProps.getSrmReceiverPort(), odeProps.getSrmBufferSize()); + @Autowired + public SrmReceiver(OdeProperties odeProps) { + this(odeProps, odeProps.getSrmReceiverPort(), odeProps.getSrmBufferSize()); - this.srmPublisher = new StringPublisher(odeProps); - } + this.srmPublisher = new StringPublisher(odeProps); + } - public SrmReceiver(OdeProperties odeProps, int port, int bufferSize) { - super(odeProps, port, bufferSize); + public SrmReceiver(OdeProperties odeProps, int port, int bufferSize) { + super(odeProps, port, bufferSize); - this.srmPublisher = new StringPublisher(odeProps); - } + this.srmPublisher = new StringPublisher(odeProps); + } - @Override - public void run() { + @Override + public void run() { - logger.debug("SRM UDP Receiver Service started."); + logger.debug("SRM UDP Receiver Service started."); - byte[] buffer = new byte[bufferSize]; + byte[] buffer = new byte[bufferSize]; - DatagramPacket packet = new DatagramPacket(buffer, buffer.length); + DatagramPacket packet = new DatagramPacket(buffer, buffer.length); - do { - try { - logger.debug("Waiting for UDP SRM packets..."); - socket.receive(packet); - if (packet.getLength() > 0) { - senderIp = packet.getAddress().getHostAddress(); - senderPort = packet.getPort(); - logger.debug("Packet received from {}:{}", senderIp, senderPort); + do { + try { + logger.debug("Waiting for UDP SRM packets..."); + socket.receive(packet); + if (packet.getLength() > 0) { + senderIp = packet.getAddress().getHostAddress(); + senderPort = packet.getPort(); + logger.debug("Packet received from {}:{}", senderIp, senderPort); - // extract the actualPacket from the buffer - byte[] payload = packet.getData(); - if (payload == null) - continue; + // Create OdeMsgPayload and OdeLogMetadata objects and populate them + OdeAsn1Payload srmPayload = super.getPayloadHexString(packet, UperUtil.SupportedMessageTypes.SRM); + if (srmPayload == null) + continue; + OdeSrmMetadata srmMetadata = new OdeSrmMetadata(srmPayload); - // convert bytes to hex string and verify identity - String payloadHexString = HexUtils.toHexString(payload).toLowerCase(); - if (payloadHexString.indexOf(odeProperties.getSrmStartFlag()) == -1) - continue; - logger.debug("Full SRM packet: {}", payloadHexString); - payloadHexString = super.stripDot3Header(payloadHexString, odeProperties.getSrmStartFlag()); - logger.debug("Stripped SRM packet: {}", payloadHexString); + // Add header data for the decoding process + ZonedDateTime utc = ZonedDateTime.now(ZoneOffset.UTC); + String timestamp = utc.format(DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'")); + srmMetadata.setOdeReceivedAt(timestamp); - // Create OdeMsgPayload and OdeLogMetadata objects and populate them - OdeAsn1Payload srmPayload = new OdeAsn1Payload(HexUtils.fromHexString(payloadHexString)); - OdeSrmMetadata srmMetadata = new OdeSrmMetadata(srmPayload); + srmMetadata.setOriginIp(senderIp); + srmMetadata.setSrmSource(SrmSource.RSU); + srmMetadata.setRecordType(RecordType.srmTx); + srmMetadata.setRecordGeneratedBy(GeneratedBy.OBU); + srmMetadata.setSecurityResultCode(SecurityResultCode.success); - // Add header data for the decoding process - ZonedDateTime utc = ZonedDateTime.now(ZoneOffset.UTC); - String timestamp = utc.format(DateTimeFormatter.ISO_INSTANT); - srmMetadata.setOdeReceivedAt(timestamp); - - srmMetadata.setOriginIp(senderIp); - srmMetadata.setSrmSource(SrmSource.RSU); - srmMetadata.setRecordType(RecordType.srmTx); - srmMetadata.setRecordGeneratedBy(GeneratedBy.OBU); - srmMetadata.setSecurityResultCode(SecurityResultCode.success); - - // Submit JSON to the OdeRawEncodedMessageJson Kafka Topic - srmPublisher.publish(JsonUtils.toJson(new OdeAsn1Data(srmMetadata, srmPayload), false), - srmPublisher.getOdeProperties().getKafkaTopicOdeRawEncodedSRMJson()); + // Submit JSON to the OdeRawEncodedMessageJson Kafka Topic + srmPublisher.publish(JsonUtils.toJson(new OdeAsn1Data(srmMetadata, srmPayload), false), + srmPublisher.getOdeProperties().getKafkaTopicOdeRawEncodedSRMJson()); + } + } catch (Exception e) { + logger.error("Error receiving packet", e); } - } catch (Exception e) { - logger.error("Error receiving packet", e); - } - } while (!isStopped()); - } + } while (!isStopped()); + } } diff --git a/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/udp/ssm/SsmReceiver.java b/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/udp/ssm/SsmReceiver.java index c5a7774a8..27108eec7 100644 --- a/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/udp/ssm/SsmReceiver.java +++ b/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/udp/ssm/SsmReceiver.java @@ -5,7 +5,6 @@ import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; -import org.apache.tomcat.util.buf.HexUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; @@ -20,79 +19,69 @@ import us.dot.its.jpo.ode.model.OdeSsmMetadata.SsmSource; import us.dot.its.jpo.ode.OdeProperties; import us.dot.its.jpo.ode.udp.AbstractUdpReceiverPublisher; +import us.dot.its.jpo.ode.uper.UperUtil; import us.dot.its.jpo.ode.util.JsonUtils; public class SsmReceiver extends AbstractUdpReceiverPublisher { - private static Logger logger = LoggerFactory.getLogger(SsmReceiver.class); + private static Logger logger = LoggerFactory.getLogger(SsmReceiver.class); - private StringPublisher ssmPublisher; + private StringPublisher ssmPublisher; - @Autowired - public SsmReceiver(OdeProperties odeProps) { - this(odeProps, odeProps.getSsmReceiverPort(), odeProps.getSsmBufferSize()); + @Autowired + public SsmReceiver(OdeProperties odeProps) { + this(odeProps, odeProps.getSsmReceiverPort(), odeProps.getSsmBufferSize()); - this.ssmPublisher = new StringPublisher(odeProps); - } + this.ssmPublisher = new StringPublisher(odeProps); + } - public SsmReceiver(OdeProperties odeProps, int port, int bufferSize) { - super(odeProps, port, bufferSize); + public SsmReceiver(OdeProperties odeProps, int port, int bufferSize) { + super(odeProps, port, bufferSize); - this.ssmPublisher = new StringPublisher(odeProps); - } + this.ssmPublisher = new StringPublisher(odeProps); + } - @Override - public void run() { + @Override + public void run() { - logger.debug("SSM UDP Receiver Service started."); + logger.debug("SSM UDP Receiver Service started."); - byte[] buffer = new byte[bufferSize]; + byte[] buffer = new byte[bufferSize]; - DatagramPacket packet = new DatagramPacket(buffer, buffer.length); + DatagramPacket packet = new DatagramPacket(buffer, buffer.length); - do { - try { - logger.debug("Waiting for UDP SSM packets..."); - socket.receive(packet); - if (packet.getLength() > 0) { - senderIp = packet.getAddress().getHostAddress(); - senderPort = packet.getPort(); - logger.debug("Packet received from {}:{}", senderIp, senderPort); + do { + try { + logger.debug("Waiting for UDP SSM packets..."); + socket.receive(packet); + if (packet.getLength() > 0) { + senderIp = packet.getAddress().getHostAddress(); + senderPort = packet.getPort(); + logger.debug("Packet received from {}:{}", senderIp, senderPort); - // extract the actualPacket from the buffer - byte[] payload = packet.getData(); - if (payload == null) - continue; + // Create OdeMsgPayload and OdeLogMetadata objects and populate them + OdeAsn1Payload ssmPayload = super.getPayloadHexString(packet, UperUtil.SupportedMessageTypes.SSM); + if (ssmPayload == null) + continue; + OdeSsmMetadata ssmMetadata = new OdeSsmMetadata(ssmPayload); - // convert bytes to hex string and verify identity - String payloadHexString = HexUtils.toHexString(payload).toLowerCase(); - if (payloadHexString.indexOf(odeProperties.getSsmStartFlag()) == -1) - continue; - logger.debug("Full SSM packet: {}", payloadHexString); - payloadHexString = super.stripDot3Header(payloadHexString, odeProperties.getSsmStartFlag()); - logger.debug("Stripped SSM packet: {}", payloadHexString); + // Add header data for the decoding process + ZonedDateTime utc = ZonedDateTime.now(ZoneOffset.UTC); + String timestamp = utc.format(DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'")); + ssmMetadata.setOdeReceivedAt(timestamp); - // Create OdeMsgPayload and OdeLogMetadata objects and populate them - OdeAsn1Payload ssmPayload = new OdeAsn1Payload(HexUtils.fromHexString(payloadHexString)); - OdeSsmMetadata ssmMetadata = new OdeSsmMetadata(ssmPayload); + ssmMetadata.setOriginIp(senderIp); + ssmMetadata.setSsmSource(SsmSource.RSU); + ssmMetadata.setRecordType(RecordType.ssmTx); + ssmMetadata.setRecordGeneratedBy(GeneratedBy.RSU); + ssmMetadata.setSecurityResultCode(SecurityResultCode.success); - // Add header data for the decoding process - ZonedDateTime utc = ZonedDateTime.now(ZoneOffset.UTC); - String timestamp = utc.format(DateTimeFormatter.ISO_INSTANT); - ssmMetadata.setOdeReceivedAt(timestamp); - - ssmMetadata.setOriginIp(senderIp); - ssmMetadata.setSsmSource(SsmSource.RSU); - ssmMetadata.setRecordType(RecordType.ssmTx); - ssmMetadata.setRecordGeneratedBy(GeneratedBy.RSU); - ssmMetadata.setSecurityResultCode(SecurityResultCode.success); - - // Submit JSON to the OdeRawEncodedMessageJson Kafka Topic - ssmPublisher.publish(JsonUtils.toJson(new OdeAsn1Data(ssmMetadata, ssmPayload), false), - ssmPublisher.getOdeProperties().getKafkaTopicOdeRawEncodedSSMJson()); + // Submit JSON to the OdeRawEncodedMessageJson Kafka Topic + ssmPublisher.publish(JsonUtils.toJson(new OdeAsn1Data(ssmMetadata, ssmPayload), false), + ssmPublisher.getOdeProperties().getKafkaTopicOdeRawEncodedSSMJson()); + } + } catch (Exception e) { + logger.error("Error receiving packet", e); } - } catch (Exception e) { - logger.error("Error receiving packet", e); - } - } while (!isStopped()); - } + } while (!isStopped()); + } } diff --git a/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/udp/tim/TimReceiver.java b/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/udp/tim/TimReceiver.java index a6b190336..5524463fe 100644 --- a/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/udp/tim/TimReceiver.java +++ b/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/udp/tim/TimReceiver.java @@ -5,7 +5,6 @@ import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; -import org.apache.tomcat.util.buf.HexUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; @@ -19,6 +18,7 @@ import us.dot.its.jpo.ode.model.OdeTimMetadata; import us.dot.its.jpo.ode.OdeProperties; import us.dot.its.jpo.ode.udp.AbstractUdpReceiverPublisher; +import us.dot.its.jpo.ode.uper.UperUtil; import us.dot.its.jpo.ode.util.JsonUtils; public class TimReceiver extends AbstractUdpReceiverPublisher { @@ -56,26 +56,15 @@ public void run() { senderPort = packet.getPort(); logger.debug("Packet received from {}:{}", senderIp, senderPort); - // extract the actualPacket from the buffer - byte[] payload = packet.getData(); - if (payload == null) - continue; - - // convert bytes to hex string and verify identity - String payloadHexString = HexUtils.toHexString(payload).toLowerCase(); - if (payloadHexString.indexOf(odeProperties.getTimStartFlag()) == -1) - continue; - logger.debug("Full TIM packet: {}", payloadHexString); - payloadHexString = super.stripDot3Header(payloadHexString, odeProperties.getTimStartFlag()); - logger.debug("Stripped TIM packet: {}", payloadHexString); - // Create OdeMsgPayload and OdeLogMetadata objects and populate them - OdeAsn1Payload timPayload = new OdeAsn1Payload(HexUtils.fromHexString(payloadHexString)); + OdeAsn1Payload timPayload = super.getPayloadHexString(packet, UperUtil.SupportedMessageTypes.TIM); + if (timPayload == null) + continue; OdeTimMetadata timMetadata = new OdeTimMetadata(timPayload); // Add header data for the decoding process ZonedDateTime utc = ZonedDateTime.now(ZoneOffset.UTC); - String timestamp = utc.format(DateTimeFormatter.ISO_INSTANT); + String timestamp = utc.format(DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'")); timMetadata.setOdeReceivedAt(timestamp); timMetadata.setOriginIp(senderIp); diff --git a/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/uper/UperUtil.java b/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/uper/UperUtil.java new file mode 100644 index 000000000..06d0eb360 --- /dev/null +++ b/jpo-ode-svcs/src/main/java/us/dot/its/jpo/ode/uper/UperUtil.java @@ -0,0 +1,175 @@ +package us.dot.its.jpo.ode.uper; + +import java.util.HashMap; + +import org.apache.tomcat.util.buf.HexUtils; +import org.json.JSONObject; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import us.dot.its.jpo.ode.model.OdeMsgPayload; +import us.dot.its.jpo.ode.util.JsonUtils; +import us.dot.its.jpo.ode.util.JsonUtils.JsonUtilsException; + +public class UperUtil { + private static Logger logger = LoggerFactory.getLogger(UperUtil.class); + + // start flags for BSM, TIM, MAP, SPAT, SRM, SSM, and PSM + private static final String BSM_START_FLAG = "0014"; + private static final String TIM_START_FLAG = "001f"; + private static final String SPAT_START_FLAG = "0013"; + private static final String SSM_START_FLAG = "001e"; + private static final String SRM_START_FLAG = "001d"; + private static final String MAP_START_FLAG = "0012"; + private static final String PSM_START_FLAG = "0020"; + + public enum SupportedMessageTypes { + BSM, TIM, SPAT, SSM, SRM, MAP, PSM + } + + // Strips the IEEE 1609.2 security header (if it exists) and returns the payload + public static String stripDot2Header(String hexString, String payload_start_flag) { + hexString = hexString.toLowerCase(); + int startIndex = hexString.indexOf(payload_start_flag); + if (startIndex == -1) + return "BAD DATA"; + return hexString.substring(startIndex, hexString.length()); + } + + /* + * Strips the 1609.3 and unsigned 1609.2 headers if they are present. + * Will return the payload with a signed 1609.2 header if it is present. + * Otherwise, returns just the payload. + */ + public static byte[] stripDot3Header(byte[] packet, HashMap msgStartFlags) { + String hexString = HexUtils.toHexString(packet); + String hexPacketParsed = ""; + + for (String start_flag : msgStartFlags.values()) { + int payloadStartIndex = hexString.indexOf(start_flag); + + if (payloadStartIndex == -1) + continue; + + String headers = hexString.substring(0, payloadStartIndex); + String payload = hexString.substring(payloadStartIndex, hexString.length()); + + // Look for the index of the start flag of a signed 1609.2 header, if one exists + int signedDot2StartIndex = headers.indexOf("038100"); + if (signedDot2StartIndex == -1) + hexPacketParsed = payload; + else + hexPacketParsed = headers.substring(signedDot2StartIndex, headers.length()) + payload; + break; + } + + if (hexPacketParsed.equals("")) { + hexPacketParsed = hexString; + logger.debug("Packet is not a BSM, TIM or Map message: " + hexPacketParsed); + } + + return HexUtils.fromHexString(hexPacketParsed); + } + + /* + * Strips the 1609.3 and unsigned 1609.2 headers if they are present. + * Will return the payload with a signed 1609.2 header if it is present. + * Otherwise, returns just the payload. + */ + public static String stripDot3Header(String hexString, String payload_start_flag) { + int payloadStartIndex = hexString.indexOf(payload_start_flag); + String headers = hexString.substring(0, payloadStartIndex); + String payload = hexString.substring(payloadStartIndex, hexString.length()); + // Look for the index of the start flag of a signed 1609.2 header + int signedDot2StartIndex = headers.indexOf("038100"); + if (signedDot2StartIndex == -1) + return payload; + else + return headers.substring(signedDot2StartIndex, headers.length()) + payload; + } + + /** + * Determines the message type based off the most likely start flag + * + * @param payload The OdeMsgPayload to check the content of. + */ + public static String determineMessageType(OdeMsgPayload payload) { + String messageType = ""; + try { + JSONObject payloadJson = JsonUtils.toJSONObject(payload.getData().toJson()); + String hexString = payloadJson.getString("bytes").toLowerCase(); + + HashMap flagIndexes = new HashMap(); + flagIndexes.put("MAP", hexString.indexOf(MAP_START_FLAG)); + flagIndexes.put("TIM", hexString.indexOf(TIM_START_FLAG)); + flagIndexes.put("SSM", hexString.indexOf(SSM_START_FLAG)); + flagIndexes.put("PSM", hexString.indexOf(PSM_START_FLAG)); + flagIndexes.put("SRM", hexString.indexOf(SRM_START_FLAG)); + + int lowestIndex = Integer.MAX_VALUE; + for (String key : flagIndexes.keySet()) { + if (flagIndexes.get(key) == -1) { + logger.debug("This message is not of type " + key); + continue; + } + if (flagIndexes.get(key) < lowestIndex) { + messageType = key; + lowestIndex = flagIndexes.get(key); + } + } + } catch (JsonUtilsException e) { + logger.error("JsonUtilsException while checking message header. Stacktrace: " + e.toString()); + } + return messageType; + } + + // Get methods for message start flags + public static String getBsmStartFlag() { + return BSM_START_FLAG; + } + + public static String getTimStartFlag() { + return TIM_START_FLAG; + } + + public static String getSpatStartFlag() { + return SPAT_START_FLAG; + } + + public static String getSsmStartFlag() { + return SSM_START_FLAG; + } + + public static String getSrmStartFlag() { + return SRM_START_FLAG; + } + + public static String getMapStartFlag() { + return MAP_START_FLAG; + } + + public static String getPsmStartFlag() { + return PSM_START_FLAG; + } + + public static String getStartFlag(SupportedMessageTypes msgType) { + switch (msgType) { + case SupportedMessageTypes.BSM: + return BSM_START_FLAG; + case SupportedMessageTypes.TIM: + return TIM_START_FLAG; + case SupportedMessageTypes.SPAT: + return SPAT_START_FLAG; + case SupportedMessageTypes.SSM: + return SSM_START_FLAG; + case SupportedMessageTypes.SRM: + return SRM_START_FLAG; + case SupportedMessageTypes.MAP: + return MAP_START_FLAG; + case SupportedMessageTypes.PSM: + return PSM_START_FLAG; + default: + return null; + } + } +} diff --git a/jpo-ode-svcs/src/main/resources/application.properties b/jpo-ode-svcs/src/main/resources/application.properties index 02326516c..4fe1e2faa 100644 --- a/jpo-ode-svcs/src/main/resources/application.properties +++ b/jpo-ode-svcs/src/main/resources/application.properties @@ -19,18 +19,9 @@ spring.http.multipart.max-request-size=1MB #ode.uploadLocationBsm = bsm #ode.uploadLocationMessageFrame = messageframe -#USDOT Situation Data Clearinghouse (SDC)/ Situation Data Warehouse (SDW), a.k.a Data Distribution System (DDS) Properties +#USDOT Situation Data Clearinghouse (SDC) / Situational Data Exchange (SDX) Properties #========================================================================================================================= -#DDS WebSocket Properties -#------------------------ -#ode.ddsCasUsername= or define env variable ${ODE_DDS_CAS_USERNAME} -#ode.ddsCasPassword= or define env variable ${ODE_DDS_CAS_PASSWORD} - -#Enable/disable depositing SDW messages over Websocket(true) or REST(false) -#-------------------------------------------------------------------------- -#ode.depositSdwMessagesOverWebsocket=false - #RSU Properties (note - do not include quotes) #-------------- #ode.rsuUsername = diff --git a/jpo-ode-svcs/src/test/java/us/dot/its/jpo/ode/coder/stream/LogFileToAsn1CodecPublisherTest.java b/jpo-ode-svcs/src/test/java/us/dot/its/jpo/ode/coder/stream/LogFileToAsn1CodecPublisherTest.java index 33fc23641..d82457bcf 100644 --- a/jpo-ode-svcs/src/test/java/us/dot/its/jpo/ode/coder/stream/LogFileToAsn1CodecPublisherTest.java +++ b/jpo-ode-svcs/src/test/java/us/dot/its/jpo/ode/coder/stream/LogFileToAsn1CodecPublisherTest.java @@ -378,15 +378,4 @@ public void testPublishRxMsgBSMLogFileNewLine() throws Exception { assertEquals(expected, data.toJson()); } } - - @Test - public void testDetermineMessageType() throws JsonUtilsException { - String mapHexString = "0012839338023000205E96094D40DF4C2CA626C8516E02DC3C2010640000000289E01C009F603F42E88039900000000A41107B027D80FD0A4200C6400000002973021C09F603DE0C16029200000080002A8A008D027D98FEE805404FB0E1085F60588200028096021200000080002AA0007D027D98FE9802E04FB1200C214456228000A02B1240005022C03240000020000D56B40BC04FB35FF655E2C09F623FB81C835FEC0DB240A0A2BFF4AEBF82C660000804B0089000000800025670034013ECD7FB9578E027D9AFF883C4E050515FFA567A41635000040258024800000400012B8F81F409F663FAC094013ECD7FC83DDB02829AFFA480BC04FB02C6E0000804B09C5000000200035EA98A9604F60DA6C7C113D505C35FFE941D409F65C05034C050500C9880004409BC800000006D2BD3CEC813C40CDE062C1FD400000200008791EA3DB3CF380A009F666F05005813D80FFE0A0588C00040092106A00000000BC75CAC009F66DB54C04A813D80A100801241ED40000000078EBAE3B6DA7A008809E2050904008811F100000000BC72389009F60ECA8002049C400000002F1B2CA3027D93A71FA813EC204BC400000002F1B2B34027B0397608880CD10000000039B8E1A51036820505080D51000000003A7461ED1036760505080DD1000000003B2F62311006260505160BCA00000080002B785E2A80A0A6C028DE728145037F1F9E456488000202B2540001022C1894000001000057058C5B81414D806DBCD4028A18F4DF23A050502C8D0000404B05A5000000800035B6471BC05053602431F380A2864087BDB0141458064AB0D6C00053FC013EC0B0680006012C15940000020000D6C06C6581414D807FB972028A1901D78DC050536020EC1800A0A6C039D639813D80B0780006012C1494000002000096AB8C6581414D8062BE32028A1B01417E04050A360172D77009E2058440003009409C200000040006B3486A480A0A1CAB7134C8117DCC02879B018FAE2C050F3601CED54809E21012720000000067FBAD0007E7E84045C80000000100661580958004041C8000000019F3658401CDFA2C0D64000002000144016C02C36DDFFF0282984ACC1EE05052C36F0AC02828669D82DA8F821480A0A10F140002C8E0001004B03190000008000519FD190C43B2E0066108B08401428C342A0CE02828258A0604A6BE959AEE0E6050502C920001004B02D90000008000459FA164404FB30A8580A00A14619C306701414C32CE10E02829659081F814141029030164B0000802E8000802000035FDB1D84C09EC6C003BA14814140B0540003012C187400040080011B13F6EDB804F115FA6DFC10AFC94FC6A57EE07DCE2BFA7BED3B5FFCD72E80A1E018C900008000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"; - OdeAsn1Payload mapPayload = new OdeAsn1Payload(HexUtils.fromHexString(mapHexString)); - assertEquals(testLogFileToAsn1CodecPublisher.determineMessageType(mapPayload), "MAP"); - - String timHexString = "001F79201000000000012AA366D080729B8987D859717EE22001FFFE4FD0011589D828007E537130FB0B2E2FDC440001F46FFFF002B8B2E46E926E27CE6813D862CB90EDC9B89E11CE2CB8E98F9B89BCC4050518B2E365B66E26AE3B8B2E291A66E2591D8141462CB873969B89396C62CB86AFE9B89208E00000131560018300023E43A6A1351800023E4700EFC51881010100030180C620FB90CAAD3B9C5082080E1DDC905E10168E396921000325A0D73B83279C83010180034801090001260001808001838005008001F0408001828005008001304000041020407E800320409780050080012040000320409900018780032040958005000001E0408183E7139D7B70987019B526B8A950052F5C011D3C4B992143E885C71F95DA6071658082346CC03A50D66801F65288C30AB39673D0494536C559047E457AD291C99C20A7FB1244363E993EE3EE98C78742609340541DA01545A0F7339C26A527903576D30000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"; - OdeAsn1Payload timPayload = new OdeAsn1Payload(HexUtils.fromHexString(timHexString)); - assertEquals(testLogFileToAsn1CodecPublisher.determineMessageType(timPayload), "TIM"); - } } diff --git a/jpo-ode-svcs/src/test/java/us/dot/its/jpo/ode/snmp/SnmpSessionTest.java b/jpo-ode-svcs/src/test/java/us/dot/its/jpo/ode/snmp/SnmpSessionTest.java index 845bdf211..16624faa9 100644 --- a/jpo-ode-svcs/src/test/java/us/dot/its/jpo/ode/snmp/SnmpSessionTest.java +++ b/jpo-ode-svcs/src/test/java/us/dot/its/jpo/ode/snmp/SnmpSessionTest.java @@ -155,22 +155,24 @@ public void shouldCreatePDUWithFourDot1Protocol() throws ParseException { SNMP testParams = new SNMP(rsuSRMPsid, rsuSRMDsrcMsgId, rsuSRMTxMode, rsuSRMTxChannel, rsuSRMTxInterval, "2017-12-02T17:47:11-05:00", "2017-12-02T17:47:11-05:00", rsuSRMEnable, rsuSRMStatus); - ScopedPDU result = SnmpSession.createPDU(testParams, rsuSRMPayload, 3, RequestVerb.POST, SnmpProtocol.FOURDOT1); + boolean rsuDataSigningEnabled = true; + + ScopedPDU result = SnmpSession.createPDU(testParams, rsuSRMPayload, 3, RequestVerb.POST, SnmpProtocol.FOURDOT1, rsuDataSigningEnabled); assertEquals("Incorrect type, expected PDU.SET (-93)", -93, result.getType()); assertEquals(expectedResult, result.getVariableBindings().toString()); - ScopedPDU result2 = SnmpSession.createPDU(testParams, rsuSRMPayload, 3, RequestVerb.GET, SnmpProtocol.FOURDOT1); + ScopedPDU result2 = SnmpSession.createPDU(testParams, rsuSRMPayload, 3, RequestVerb.GET, SnmpProtocol.FOURDOT1, rsuDataSigningEnabled); assertEquals("Incorrect type, expected PDU.SET (-93)", -93, result2.getType()); assertEquals(expectedResult2, result2.getVariableBindings().toString()); } @Test - public void shouldCreatePDUWithNTCIP1218Protocol() throws ParseException { - String expectedResult = "[1.3.6.1.4.1.1206.4.2.18.3.2.1.2.3 = 80:03, 1.3.6.1.4.1.1206.4.2.18.3.2.1.3.3 = 4, 1.3.6.1.4.1.1206.4.2.18.3.2.1.4.3 = 5, 1.3.6.1.4.1.1206.4.2.18.3.2.1.5.3 = 07:e1:0c:02:11:2f:0b:00, 1.3.6.1.4.1.1206.4.2.18.3.2.1.6.3 = 07:e1:0c:02:11:2f:0b:00, 1.3.6.1.4.1.1206.4.2.18.3.2.1.7.3 = 88, 1.3.6.1.4.1.1206.4.2.18.3.2.1.8.3 = 9, 1.3.6.1.4.1.1206.4.2.18.3.2.1.9.3 = 10, 1.3.6.1.4.1.1206.4.2.18.3.2.1.10.3 = 6, 1.3.6.1.4.1.1206.4.2.18.3.2.1.11.3 = c0]"; - String expectedResult2 = "[1.3.6.1.4.1.1206.4.2.18.3.2.1.2.3 = 80:03, 1.3.6.1.4.1.1206.4.2.18.3.2.1.3.3 = 4, 1.3.6.1.4.1.1206.4.2.18.3.2.1.4.3 = 5, 1.3.6.1.4.1.1206.4.2.18.3.2.1.5.3 = 07:e1:0c:02:11:2f:0b:00, 1.3.6.1.4.1.1206.4.2.18.3.2.1.6.3 = 07:e1:0c:02:11:2f:0b:00, 1.3.6.1.4.1.1206.4.2.18.3.2.1.7.3 = 88, 1.3.6.1.4.1.1206.4.2.18.3.2.1.8.3 = 9, 1.3.6.1.4.1.1206.4.2.18.3.2.1.10.3 = 6, 1.3.6.1.4.1.1206.4.2.18.3.2.1.11.3 = c0]"; - + public void shouldCreatePDUWithNTCIP1218Protocol_dataSigningEnabledRsu_True() throws ParseException { + // prepare + String expectedResult = "[1.3.6.1.4.1.1206.4.2.18.3.2.1.2.3 = 80:03, 1.3.6.1.4.1.1206.4.2.18.3.2.1.3.3 = 4, 1.3.6.1.4.1.1206.4.2.18.3.2.1.4.3 = 5, 1.3.6.1.4.1.1206.4.2.18.3.2.1.5.3 = 07:e1:0c:02:11:2f:0b:00, 1.3.6.1.4.1.1206.4.2.18.3.2.1.6.3 = 07:e1:0c:02:11:2f:0b:00, 1.3.6.1.4.1.1206.4.2.18.3.2.1.7.3 = 88, 1.3.6.1.4.1.1206.4.2.18.3.2.1.8.3 = 9, 1.3.6.1.4.1.1206.4.2.18.3.2.1.9.3 = 10, 1.3.6.1.4.1.1206.4.2.18.3.2.1.10.3 = 6, 1.3.6.1.4.1.1206.4.2.18.3.2.1.11.3 = 00]"; + String expectedResult2 = "[1.3.6.1.4.1.1206.4.2.18.3.2.1.2.3 = 80:03, 1.3.6.1.4.1.1206.4.2.18.3.2.1.3.3 = 4, 1.3.6.1.4.1.1206.4.2.18.3.2.1.4.3 = 5, 1.3.6.1.4.1.1206.4.2.18.3.2.1.5.3 = 07:e1:0c:02:11:2f:0b:00, 1.3.6.1.4.1.1206.4.2.18.3.2.1.6.3 = 07:e1:0c:02:11:2f:0b:00, 1.3.6.1.4.1.1206.4.2.18.3.2.1.7.3 = 88, 1.3.6.1.4.1.1206.4.2.18.3.2.1.8.3 = 9, 1.3.6.1.4.1.1206.4.2.18.3.2.1.10.3 = 6, 1.3.6.1.4.1.1206.4.2.18.3.2.1.11.3 = 00]"; String rsuSRMPsid = "00000083"; int rsuSRMTxChannel = 4; int rsuSRMTxInterval = 5; @@ -181,13 +183,45 @@ public void shouldCreatePDUWithNTCIP1218Protocol() throws ParseException { SNMP testParams = new SNMP(rsuSRMPsid, 0, 0, rsuSRMTxChannel, rsuSRMTxInterval, "2017-12-02T17:47:11-05:00", "2017-12-02T17:47:11-05:00", rsuSRMEnable, rsuSRMStatus); - ScopedPDU result = SnmpSession.createPDU(testParams, rsuSRMPayload, 3, RequestVerb.POST, SnmpProtocol.NTCIP1218); + boolean rsuDataSigningEnabled = true; + + // execute + ScopedPDU result = SnmpSession.createPDU(testParams, rsuSRMPayload, 3, RequestVerb.POST, SnmpProtocol.NTCIP1218, true); + ScopedPDU result2 = SnmpSession.createPDU(testParams, rsuSRMPayload, 3, RequestVerb.GET, SnmpProtocol.NTCIP1218, true); + // verify assertEquals("Incorrect type, expected PDU.SET (-93)", -93, result.getType()); assertEquals(expectedResult, result.getVariableBindings().toString()); + assertEquals("Incorrect type, expected PDU.SET (-93)", -93, result2.getType()); + assertEquals(expectedResult2, result2.getVariableBindings().toString()); + } - ScopedPDU result2 = SnmpSession.createPDU(testParams, rsuSRMPayload, 3, RequestVerb.GET, SnmpProtocol.NTCIP1218); + @Test + public void shouldCreatePDUWithNTCIP1218Protocol_dataSigningEnabledRsu_False() throws ParseException { + // prepare + String expectedResult = "[1.3.6.1.4.1.1206.4.2.18.3.2.1.2.3 = 80:03, 1.3.6.1.4.1.1206.4.2.18.3.2.1.3.3 = 4, 1.3.6.1.4.1.1206.4.2.18.3.2.1.4.3 = 5, 1.3.6.1.4.1.1206.4.2.18.3.2.1.5.3 = 07:e1:0c:02:11:2f:0b:00, 1.3.6.1.4.1.1206.4.2.18.3.2.1.6.3 = 07:e1:0c:02:11:2f:0b:00, 1.3.6.1.4.1.1206.4.2.18.3.2.1.7.3 = 88, 1.3.6.1.4.1.1206.4.2.18.3.2.1.8.3 = 9, 1.3.6.1.4.1.1206.4.2.18.3.2.1.9.3 = 10, 1.3.6.1.4.1.1206.4.2.18.3.2.1.10.3 = 6, 1.3.6.1.4.1.1206.4.2.18.3.2.1.11.3 = 80]"; + String expectedResult2 = "[1.3.6.1.4.1.1206.4.2.18.3.2.1.2.3 = 80:03, 1.3.6.1.4.1.1206.4.2.18.3.2.1.3.3 = 4, 1.3.6.1.4.1.1206.4.2.18.3.2.1.4.3 = 5, 1.3.6.1.4.1.1206.4.2.18.3.2.1.5.3 = 07:e1:0c:02:11:2f:0b:00, 1.3.6.1.4.1.1206.4.2.18.3.2.1.6.3 = 07:e1:0c:02:11:2f:0b:00, 1.3.6.1.4.1.1206.4.2.18.3.2.1.7.3 = 88, 1.3.6.1.4.1.1206.4.2.18.3.2.1.8.3 = 9, 1.3.6.1.4.1.1206.4.2.18.3.2.1.10.3 = 6, 1.3.6.1.4.1.1206.4.2.18.3.2.1.11.3 = 80]"; + String rsuSRMPsid = "00000083"; + int rsuSRMTxChannel = 4; + int rsuSRMTxInterval = 5; + String rsuSRMPayload = "88"; + int rsuSRMEnable = 9; + int rsuSRMStatus = 10; + + SNMP testParams = new SNMP(rsuSRMPsid, 0, 0, rsuSRMTxChannel, rsuSRMTxInterval, "2017-12-02T17:47:11-05:00", + "2017-12-02T17:47:11-05:00", rsuSRMEnable, rsuSRMStatus); + System.setProperty("DATA_SIGNING_ENABLED_RSU", "false"); + + boolean rsuDataSigningEnabled = false; + + // execute + ScopedPDU result = SnmpSession.createPDU(testParams, rsuSRMPayload, 3, RequestVerb.POST, SnmpProtocol.NTCIP1218, rsuDataSigningEnabled); + ScopedPDU result2 = SnmpSession.createPDU(testParams, rsuSRMPayload, 3, RequestVerb.GET, SnmpProtocol.NTCIP1218, rsuDataSigningEnabled); + + // verify + assertEquals("Incorrect type, expected PDU.SET (-93)", -93, result.getType()); + assertEquals(expectedResult, result.getVariableBindings().toString()); assertEquals("Incorrect type, expected PDU.SET (-93)", -93, result2.getType()); assertEquals(expectedResult2, result2.getVariableBindings().toString()); } diff --git a/jpo-ode-svcs/src/test/java/us/dot/its/jpo/ode/traveler/TimDepositControllerTest.java b/jpo-ode-svcs/src/test/java/us/dot/its/jpo/ode/traveler/TimDepositControllerTest.java index 5af303706..911a77919 100644 --- a/jpo-ode-svcs/src/test/java/us/dot/its/jpo/ode/traveler/TimDepositControllerTest.java +++ b/jpo-ode-svcs/src/test/java/us/dot/its/jpo/ode/traveler/TimDepositControllerTest.java @@ -167,4 +167,11 @@ public void testDepositingTimWithExtraProperties(@Capturing TimTransmogrifier ca assertEquals("{\"success\":\"true\"}", actualResponse.getBody()); } + @Test + public void testSuccessfulRsuMessageReturnsSuccessMessagePost(@Capturing TimTransmogrifier capturingTimTransmogrifier, @Capturing XmlUtils capturingXmlUtils) { + String timToSubmit = "{\"request\": {\"rsus\": [{\"latitude\": 30.123456, \"longitude\": -100.12345, \"rsuId\": 123, \"route\": \"myroute\", \"milepost\": 10, \"rsuTarget\": \"172.0.0.1\", \"rsuRetries\": 3, \"rsuTimeout\": 5000, \"rsuIndex\": 7, \"rsuUsername\": \"myusername\", \"rsuPassword\": \"mypassword\"}], \"snmp\": {\"rsuid\": \"83\", \"msgid\": 31, \"mode\": 1, \"channel\": 183, \"interval\": 2000, \"deliverystart\": \"2024-05-13T14:30:00Z\", \"deliverystop\": \"2024-05-13T22:30:00Z\", \"enable\": 1, \"status\": 4}}, \"tim\": {\"msgCnt\": \"1\", \"timeStamp\": \"2024-05-10T19:01:22Z\", \"packetID\": \"123451234512345123\", \"urlB\": \"null\", \"dataframes\": [{\"startDateTime\": \"2024-05-13T20:30:05.014Z\", \"durationTime\": \"30\", \"sspTimRights\": \"1\", \"frameType\": \"advisory\", \"msgId\": {\"roadSignID\": {\"mutcdCode\": \"warning\", \"viewAngle\": \"1111111111111111\", \"position\": {\"latitude\": 30.123456, \"longitude\": -100.12345}}}, \"priority\": \"5\", \"sspLocationRights\": \"1\", \"regions\": [{\"name\": \"I_myroute_RSU_172.0.0.1\", \"anchorPosition\": {\"latitude\": 30.123456, \"longitude\": -100.12345}, \"laneWidth\": \"50\", \"directionality\": \"3\", \"closedPath\": \"false\", \"description\": \"path\", \"path\": {\"scale\": 0, \"nodes\": [{\"delta\": \"node-LL\", \"nodeLat\": 0.0, \"nodeLong\": 0.0}, {\"delta\": \"node-LL\", \"nodeLat\": 0.0, \"nodeLong\": 0.0}], \"type\": \"ll\"}, \"direction\": \"0000000000010000\"}], \"sspMsgTypes\": \"1\", \"sspMsgContent\": \"1\", \"content\": \"workZone\", \"items\": [\"771\"], \"url\": \"null\"}]}}"; + ResponseEntity actualResponse = testTimDepositController.postTim(timToSubmit); + assertEquals("{\"success\":\"true\"}", actualResponse.getBody()); + } + } diff --git a/jpo-ode-svcs/src/test/java/us/dot/its/jpo/ode/uper/UperUtilTest.java b/jpo-ode-svcs/src/test/java/us/dot/its/jpo/ode/uper/UperUtilTest.java new file mode 100644 index 000000000..c75a08dd2 --- /dev/null +++ b/jpo-ode-svcs/src/test/java/us/dot/its/jpo/ode/uper/UperUtilTest.java @@ -0,0 +1,80 @@ +package us.dot.its.jpo.ode.uper; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; + +import java.util.HashMap; + +import org.apache.tomcat.util.buf.HexUtils; +import org.junit.jupiter.api.Test; + +import us.dot.its.jpo.ode.model.OdeAsn1Payload; +import us.dot.its.jpo.ode.util.JsonUtils.JsonUtilsException; + +public class UperUtilTest { + UperUtil testUperUtil; + + @Test + public void testStripDot2Header() { + String testHexString = "10110014000000"; + String testPayloadStartFlag = "0014"; + String expectedValue = "0014000000"; + assertEquals(expectedValue, UperUtil.stripDot2Header(testHexString, testPayloadStartFlag)); + } + + @Test + public void testStripDot2HeaderBadData() { + String testHexString = "0014"; + String testPayloadStartFlag = "0015"; + String expectedValue = "BAD DATA"; + assertEquals(expectedValue, UperUtil.stripDot2Header(testHexString, testPayloadStartFlag)); + } + + @Test + public void testStripDot3Header() { + byte[] testPacket = { 0x10, 0x20, 0x00, 0x1f, 0x00, 0x00 }; + byte[] testExpected = { 0x00, 0x1f, 0x00, 0x00 }; + HashMap testMsgStartFlag = new HashMap<>(); + testMsgStartFlag.put("TIM", "001f"); + byte[] testResult = UperUtil.stripDot3Header(testPacket, testMsgStartFlag); + assertArrayEquals(testExpected, testResult); + } + + @Test + public void testStripDot3HeaderWithDot2StartIndex() { + byte[] testPacket = { 0x0, 0x01, 0x03, (byte) 0x81, 0x00, 0x00, 0x1f, 0x00 }; + byte[] testExpected = { 0x03, (byte) 0x81, 0x00, 0x00, 0x1f, 0x00 }; + HashMap testMsgStartFlag = new HashMap<>(); + testMsgStartFlag.put("TIM", "001f"); + byte[] testResult = UperUtil.stripDot3Header(testPacket, testMsgStartFlag); + assertArrayEquals(testExpected, testResult); + } + + @Test + public void testStripDot3HeaderString() { + String testPacketString = "0102001f0000"; + String testExpectedString = "001f0000"; + String testMsgStartFlag = "001f"; + assertEquals(testExpectedString, UperUtil.stripDot3Header(testPacketString, testMsgStartFlag)); + } + + @Test + public void testStripDot3HeaderStringWithDot2StartIndex() { + String testPacketString = "0001038100001f00"; + String testExpectedString = "038100001f00"; + String testMsgStartFlag = "001f"; + assertEquals(testExpectedString, UperUtil.stripDot3Header(testPacketString, testMsgStartFlag)); + } + + @Test + public void testDetermineMessageType() throws JsonUtilsException { + String mapHexString = "0012839338023000205E96094D40DF4C2CA626C8516E02DC3C2010640000000289E01C009F603F42E88039900000000A41107B027D80FD0A4200C6400000002973021C09F603DE0C16029200000080002A8A008D027D98FEE805404FB0E1085F60588200028096021200000080002AA0007D027D98FE9802E04FB1200C214456228000A02B1240005022C03240000020000D56B40BC04FB35FF655E2C09F623FB81C835FEC0DB240A0A2BFF4AEBF82C660000804B0089000000800025670034013ECD7FB9578E027D9AFF883C4E050515FFA567A41635000040258024800000400012B8F81F409F663FAC094013ECD7FC83DDB02829AFFA480BC04FB02C6E0000804B09C5000000200035EA98A9604F60DA6C7C113D505C35FFE941D409F65C05034C050500C9880004409BC800000006D2BD3CEC813C40CDE062C1FD400000200008791EA3DB3CF380A009F666F05005813D80FFE0A0588C00040092106A00000000BC75CAC009F66DB54C04A813D80A100801241ED40000000078EBAE3B6DA7A008809E2050904008811F100000000BC72389009F60ECA8002049C400000002F1B2CA3027D93A71FA813EC204BC400000002F1B2B34027B0397608880CD10000000039B8E1A51036820505080D51000000003A7461ED1036760505080DD1000000003B2F62311006260505160BCA00000080002B785E2A80A0A6C028DE728145037F1F9E456488000202B2540001022C1894000001000057058C5B81414D806DBCD4028A18F4DF23A050502C8D0000404B05A5000000800035B6471BC05053602431F380A2864087BDB0141458064AB0D6C00053FC013EC0B0680006012C15940000020000D6C06C6581414D807FB972028A1901D78DC050536020EC1800A0A6C039D639813D80B0780006012C1494000002000096AB8C6581414D8062BE32028A1B01417E04050A360172D77009E2058440003009409C200000040006B3486A480A0A1CAB7134C8117DCC02879B018FAE2C050F3601CED54809E21012720000000067FBAD0007E7E84045C80000000100661580958004041C8000000019F3658401CDFA2C0D64000002000144016C02C36DDFFF0282984ACC1EE05052C36F0AC02828669D82DA8F821480A0A10F140002C8E0001004B03190000008000519FD190C43B2E0066108B08401428C342A0CE02828258A0604A6BE959AEE0E6050502C920001004B02D90000008000459FA164404FB30A8580A00A14619C306701414C32CE10E02829659081F814141029030164B0000802E8000802000035FDB1D84C09EC6C003BA14814140B0540003012C187400040080011B13F6EDB804F115FA6DFC10AFC94FC6A57EE07DCE2BFA7BED3B5FFCD72E80A1E018C900008000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"; + OdeAsn1Payload mapPayload = new OdeAsn1Payload(HexUtils.fromHexString(mapHexString)); + assertEquals(UperUtil.determineMessageType(mapPayload), "MAP"); + + String timHexString = "001F79201000000000012AA366D080729B8987D859717EE22001FFFE4FD0011589D828007E537130FB0B2E2FDC440001F46FFFF002B8B2E46E926E27CE6813D862CB90EDC9B89E11CE2CB8E98F9B89BCC4050518B2E365B66E26AE3B8B2E291A66E2591D8141462CB873969B89396C62CB86AFE9B89208E00000131560018300023E43A6A1351800023E4700EFC51881010100030180C620FB90CAAD3B9C5082080E1DDC905E10168E396921000325A0D73B83279C83010180034801090001260001808001838005008001F0408001828005008001304000041020407E800320409780050080012040000320409900018780032040958005000001E0408183E7139D7B70987019B526B8A950052F5C011D3C4B992143E885C71F95DA6071658082346CC03A50D66801F65288C30AB39673D0494536C559047E457AD291C99C20A7FB1244363E993EE3EE98C78742609340541DA01545A0F7339C26A527903576D30000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"; + OdeAsn1Payload timPayload = new OdeAsn1Payload(HexUtils.fromHexString(timHexString)); + assertEquals(UperUtil.determineMessageType(timPayload), "TIM"); + } + +} diff --git a/kafka.md b/kafka.md index 1a56238c1..28c6f1756 100644 --- a/kafka.md +++ b/kafka.md @@ -41,16 +41,7 @@ If you don't specify a broker id in your docker-compose file, it will automatica ### Automatically create topics -If you want to have kafka-docker automatically create topics in Kafka during -creation, a ```KAFKA_CREATE_TOPICS``` environment variable can be -added in ```docker-compose.yml```. - -Here is an example snippet from ```docker-compose.yml```: - - environment: - KAFKA_CREATE_TOPICS: "Topic1:1:3,Topic2:1:1" - -```Topic 1``` will have 1 partition and 3 replicas, ```Topic 2``` will have 1 partition and 1 replica. +If you want to have Kafka automatically create topics during creation, modify the `scripts\kafka\kafka_init.sh` script to include the topics you want to create. The script is run by a kafka init container upon startup. The default script creates all necessary topics with a replication factor of 1. If you want to change the replication factor, modify the script accordingly. ### Advertised hostname diff --git a/pom.xml b/pom.xml index 18acbf54a..9399cbd82 100644 --- a/pom.xml +++ b/pom.xml @@ -11,12 +11,12 @@ scm:git:https://github.com/usdot-jpo-ode/jpo-ode.git - jpo-ode-2.0.0-SNAPSHOT + jpo-ode-2.1.0-SNAPSHOT usdot.jpo.ode jpo-ode - 2.0.0-SNAPSHOT + 2.1.0-SNAPSHOT pom jpo-ode-common @@ -150,7 +150,7 @@ org.apache.maven.plugins maven-surefire-plugin 3.2.5 - + diff --git a/quickstart-compose.yml b/quickstart-compose.yml index d131d27bd..b99dc802f 100644 --- a/quickstart-compose.yml +++ b/quickstart-compose.yml @@ -2,34 +2,39 @@ version: '3' services: - zookeeper: - image: wurstmeister/zookeeper - ports: - - "2181:2181" - kafka: - image: wurstmeister/kafka + image: bitnami/kafka:latest + hostname: kafka ports: - "9092:9092" + volumes: + - kafka:/bitnami environment: - DOCKER_HOST_IP: ${DOCKER_HOST_IP} - ZK: ${DOCKER_HOST_IP}:2181 - KAFKA_ADVERTISED_HOST_NAME: ${DOCKER_HOST_IP} - KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 - KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true" - KAFKA_DELETE_TOPIC_ENABLED: "true" - KAFKA_CLEANUP_POLICY: "delete" # delete old logs - KAFKA_LOG_RETENTION_HOURS: 2 - KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 3000 - KAFKA_RETENTION_MS: 7200000 # delete old logs after 2 hours - KAFKA_SEGMENT_MS: 7200000 # roll segment logs every 2 hours. - # This configuration controls the period of time after - # which Kafka will force the log to roll even if the segment - # file isn't full to ensure that retention can delete or compact old data. + KAFKA_ENABLE_KRAFT: "yes" + KAFKA_CFG_PROCESS_ROLES: "broker,controller" + KAFKA_CFG_CONTROLLER_LISTENER_NAMES: "CONTROLLER" + KAFKA_CFG_LISTENERS: "PLAINTEXT://:9094,CONTROLLER://:9093,EXTERNAL://:9092" + KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: "CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,EXTERNAL:PLAINTEXT" + KAFKA_CFG_ADVERTISED_LISTENERS: "PLAINTEXT://kafka:9094,EXTERNAL://${DOCKER_HOST_IP}:9092" + KAFKA_BROKER_ID: "1" + KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: "1@kafka:9093" + ALLOW_PLAINTEXT_LISTENER: "yes" + KAFKA_CFG_NODE_ID: "1" + KAFKA_CFG_DELETE_TOPIC_ENABLE: "true" + KAFKA_CFG_LOG_RETENTION_HOURS: 2 + logging: + options: + max-size: "10m" + max-file: "5" + + kafka_init: + image: bitnami/kafka:latest depends_on: - - zookeeper + kafka: + condition: service_started volumes: - - ${DOCKER_SHARED_VOLUME_WINDOWS}/var/run/docker.sock:/var/run/docker.sock + - ./scripts/kafka/kafka_init.sh:/kafka_init.sh + entrypoint: ["/bin/sh", "kafka_init.sh"] ode: build: . @@ -39,22 +44,32 @@ services: - "9090:9090" - "46753:46753/udp" - "46800:46800/udp" + - "47900:47900/udp" + - "44900:44900/udp" + - "44910:44910/udp" + - "44920:44920/udp" + - "44930:44930/udp" + - "44940:44940/udp" - "5555:5555/udp" - "6666:6666/udp" environment: DOCKER_HOST_IP: ${DOCKER_HOST_IP} ZK: ${DOCKER_HOST_IP}:2181 - ODE_DDS_CAS_USERNAME: ${ODE_DDS_CAS_USERNAME} - ODE_DDS_CAS_PASSWORD: ${ODE_DDS_CAS_PASSWORD} ODE_SECURITY_SVCS_SIGNATURE_URI: ${ODE_SECURITY_SVCS_SIGNATURE_URI} ODE_RSU_USERNAME: ${ODE_RSU_USERNAME} ODE_RSU_PASSWORD: ${ODE_RSU_PASSWORD} -# Commented out for latest schemaVersion. Uncomment to set for older schemaVersion -# ODE_OUTPUT_SCHEMA_VERSION: ${ODE_OUTPUT_SCHEMA_VERSION} + DATA_SIGNING_ENABLED_RSU: ${DATA_SIGNING_ENABLED_RSU} + DATA_SIGNING_ENABLED_SDW: ${DATA_SIGNING_ENABLED_SDW} + DEFAULT_SNMP_PROTOCOL: ${DEFAULT_SNMP_PROTOCOL} depends_on: - kafka volumes: - ${DOCKER_SHARED_VOLUME}:/jpo-ode + - ${DOCKER_SHARED_VOLUME}/uploads:/home/uploads + logging: + options: + max-size: "10m" + max-file: "5" adm: build: ./asn1_codec diff --git a/sample.env b/sample.env index 8a8669119..a055fb9b8 100644 --- a/sample.env +++ b/sample.env @@ -37,13 +37,24 @@ DOCKER_SHARED_VOLUME_WINDOWS= ODE_RSU_USERNAME= ODE_RSU_PASSWORD= +# Values to sign TIM messages delivered to RSUs/SDX (accepted values are true or false) +# If not set, DATA_SIGNING_ENABLED_RSU will default to false +# If not set, DATA_SIGNING_ENABLED_SDW will default to true +DATA_SIGNING_ENABLED_RSU= +DATA_SIGNING_ENABLED_SDW= + +# Default SNMP protocol version when not specified in the request +# Current supported values are FOURDOT1 and NTCIP1218 +# If no protocol is specified the NTCIP1218 protocol will be used +DEFAULT_SNMP_PROTOCOL= + ######################### # Kafka and Confluent Cloud Properties -# The IP address of Docker host machine which can be found by running "ifconfig" +# The type of Kafka broker to connect to. If set to "CONFLUENT", the broker will be Confluent Cloud. Otherwise, it will be a local Kafka broker. KAFKA_TYPE= -# Confluent Cloud API access credentials +# Confluent Cloud API access credentials (only required if KAFKA_TYPE is set to "CONFLUENT") CONFLUENT_KEY= CONFLUENT_SECRET= @@ -100,29 +111,27 @@ RDE_TIM_HEADER_X_API_KEY= RDE_TIM_GROUP=group_rde_tim ######################### -# SDW Depositor Properties - -# Uncomment and set this variable to `true` if you want to use the DDS depositor. -# Otherwise, leave it commented out or set to `false` if you want to use the SDW Depositor Module -#ODE_DEPOSIT_SDW_MESSAGES_OVER_WEBSOCKET=false - -# Note: you only need to set one of the following pairs of username/pw combinations -# The first pair is used for the SDW depositor submodule -# The second pair is used for the built-in DDS depositor +# SDX Depositor Properties ## Required if using SDX depositor module (REST interface) SDW_API_KEY= +# Required MONGODB Variables +MONGO_IP=${DOCKER_HOST_IP} +MONGO_DB_NAME=ode +MONGO_ADMIN_DB_USER=admin +MONGO_ADMIN_DB_PASS=password +MONGO_ODE_DB_USER=ode +MONGO_ODE_DB_PASS=password +MONGO_URI=mongodb://${MONGO_ODE_DB_USER}:${MONGO_ODE_DB_PASS}@${MONGO_IP}:27017/?directConnection=true +MONGO_COLLECTION_TTL=7 # days + ## Optional overrides #SDW_DESTINATION_URL= #SDW_GROUP_ID= #SDW_KAFKA_PORT= #SDW_SUBSCRIPTION_TOPIC=topic.SDWDepositorInput -## Required if using built-in DDS deposit (websocket interface) -#ODE_DDS_CAS_USERNAME= -#ODE_DDS_CAS_PASSWORD= - #jpo-security-svcs module properties SEC_CRYPTO_SERVICE_BASE_URI= ODE_SECURITY_SVCS_SIGNATURE_URI= diff --git a/scripts/kafka/kafka_init.sh b/scripts/kafka/kafka_init.sh new file mode 100644 index 000000000..9ed08f089 --- /dev/null +++ b/scripts/kafka/kafka_init.sh @@ -0,0 +1,43 @@ +sleep 2s +/opt/bitnami/kafka/bin/kafka-topics.sh --bootstrap-server kafka:9092 --list +echo 'Creating kafka topics' + +# Create topics +/opt/bitnami/kafka/bin/kafka-topics.sh --create --if-not-exists --topic "topic.OdeBsmPojo" --bootstrap-server kafka:9092 --replication-factor 1 --partitions 1 +/opt/bitnami/kafka/bin/kafka-topics.sh --create --if-not-exists --topic "topic.OdeSpatTxPojo" --bootstrap-server kafka:9092 --replication-factor 1 --partitions 1 +/opt/bitnami/kafka/bin/kafka-topics.sh --create --if-not-exists --topic "topic.OdeSpatPojo" --bootstrap-server kafka:9092 --replication-factor 1 --partitions 1 +/opt/bitnami/kafka/bin/kafka-topics.sh --create --if-not-exists --topic "topic.OdeSpatJson" --bootstrap-server kafka:9092 --replication-factor 1 --partitions 1 +/opt/bitnami/kafka/bin/kafka-topics.sh --create --if-not-exists --topic "topic.FilteredOdeSpatJson" --bootstrap-server kafka:9092 --replication-factor 1 --partitions 1 +/opt/bitnami/kafka/bin/kafka-topics.sh --create --if-not-exists --topic "topic.OdeSpatRxJson" --bootstrap-server kafka:9092 --replication-factor 1 --partitions 1 +/opt/bitnami/kafka/bin/kafka-topics.sh --create --if-not-exists --topic "topic.OdeSpatRxPojo" --bootstrap-server kafka:9092 --replication-factor 1 --partitions 1 +/opt/bitnami/kafka/bin/kafka-topics.sh --create --if-not-exists --topic "topic.OdeBsmJson" --bootstrap-server kafka:9092 --replication-factor 1 --partitions 1 +/opt/bitnami/kafka/bin/kafka-topics.sh --create --if-not-exists --topic "topic.FilteredOdeBsmJson" --bootstrap-server kafka:9092 --replication-factor 1 --partitions 1 +/opt/bitnami/kafka/bin/kafka-topics.sh --create --if-not-exists --topic "topic.OdeTimJson" --bootstrap-server kafka:9092 --replication-factor 1 --partitions 1 +/opt/bitnami/kafka/bin/kafka-topics.sh --create --if-not-exists --topic "topic.OdeTimBroadcastJson" --bootstrap-server kafka:9092 --replication-factor 1 --partitions 1 +/opt/bitnami/kafka/bin/kafka-topics.sh --create --if-not-exists --topic "topic.J2735TimBroadcastJson" --bootstrap-server kafka:9092 --replication-factor 1 --partitions 1 +/opt/bitnami/kafka/bin/kafka-topics.sh --create --if-not-exists --topic "topic.OdeDriverAlertJson" --bootstrap-server kafka:9092 --replication-factor 1 --partitions 1 +/opt/bitnami/kafka/bin/kafka-topics.sh --create --if-not-exists --topic "topic.Asn1DecoderInput" --bootstrap-server kafka:9092 --replication-factor 1 --partitions 1 +/opt/bitnami/kafka/bin/kafka-topics.sh --create --if-not-exists --topic "topic.Asn1DecoderOutput" --bootstrap-server kafka:9092 --replication-factor 1 --partitions 1 +/opt/bitnami/kafka/bin/kafka-topics.sh --create --if-not-exists --topic "topic.Asn1EncoderInput" --bootstrap-server kafka:9092 --replication-factor 1 --partitions 1 +/opt/bitnami/kafka/bin/kafka-topics.sh --create --if-not-exists --topic "topic.Asn1EncoderOutput" --bootstrap-server kafka:9092 --replication-factor 1 --partitions 1 +/opt/bitnami/kafka/bin/kafka-topics.sh --create --if-not-exists --topic "topic.SDWDepositorInput" --bootstrap-server kafka:9092 --replication-factor 1 --partitions 1 +/opt/bitnami/kafka/bin/kafka-topics.sh --create --if-not-exists --topic "topic.OdeTIMCertExpirationTimeJson" --bootstrap-server kafka:9092 --replication-factor 1 --partitions 1 +/opt/bitnami/kafka/bin/kafka-topics.sh --create --if-not-exists --topic "topic.OdeRawEncodedBSMJson" --bootstrap-server kafka:9092 --replication-factor 1 --partitions 1 +/opt/bitnami/kafka/bin/kafka-topics.sh --create --if-not-exists --topic "topic.OdeRawEncodedSPATJson" --bootstrap-server kafka:9092 --replication-factor 1 --partitions 1 +/opt/bitnami/kafka/bin/kafka-topics.sh --create --if-not-exists --topic "topic.OdeRawEncodedTIMJson" --bootstrap-server kafka:9092 --replication-factor 1 --partitions 1 +/opt/bitnami/kafka/bin/kafka-topics.sh --create --if-not-exists --topic "topic.OdeRawEncodedMAPJson" --bootstrap-server kafka:9092 --replication-factor 1 --partitions 1 +/opt/bitnami/kafka/bin/kafka-topics.sh --create --if-not-exists --topic "topic.OdeMapTxPojo" --bootstrap-server kafka:9092 --replication-factor 1 --partitions 1 +/opt/bitnami/kafka/bin/kafka-topics.sh --create --if-not-exists --topic "topic.OdeMapJson" --bootstrap-server kafka:9092 --replication-factor 1 --partitions 1 +/opt/bitnami/kafka/bin/kafka-topics.sh --create --if-not-exists --topic "topic.OdeRawEncodedSSMJson" --bootstrap-server kafka:9092 --replication-factor 1 --partitions 1 +/opt/bitnami/kafka/bin/kafka-topics.sh --create --if-not-exists --topic "topic.OdeSsmPojo" --bootstrap-server kafka:9092 --replication-factor 1 --partitions 1 +/opt/bitnami/kafka/bin/kafka-topics.sh --create --if-not-exists --topic "topic.OdeSsmJson" --bootstrap-server kafka:9092 --replication-factor 1 --partitions 1 +/opt/bitnami/kafka/bin/kafka-topics.sh --create --if-not-exists --topic "topic.OdeRawEncodedSRMJson" --bootstrap-server kafka:9092 --replication-factor 1 --partitions 1 +/opt/bitnami/kafka/bin/kafka-topics.sh --create --if-not-exists --topic "topic.OdeSrmTxPojo" --bootstrap-server kafka:9092 --replication-factor 1 --partitions 1 +/opt/bitnami/kafka/bin/kafka-topics.sh --create --if-not-exists --topic "topic.OdeSrmJson" --bootstrap-server kafka:9092 --replication-factor 1 --partitions 1 +/opt/bitnami/kafka/bin/kafka-topics.sh --create --if-not-exists --topic "topic.OdeRawEncodedPSMJson" --bootstrap-server kafka:9092 --replication-factor 1 --partitions 1 +/opt/bitnami/kafka/bin/kafka-topics.sh --create --if-not-exists --topic "topic.OdePsmTxPojo" --bootstrap-server kafka:9092 --replication-factor 1 --partitions 1 +/opt/bitnami/kafka/bin/kafka-topics.sh --create --if-not-exists --topic "topic.OdePsmJson" --bootstrap-server kafka:9092 --replication-factor 1 --partitions 1 + +echo 'Kafka created with the following topics:' +/opt/bitnami/kafka/bin/kafka-topics.sh --bootstrap-server kafka:9092 --list +exit \ No newline at end of file diff --git a/scripts/mongo/create_indexes.js b/scripts/mongo/create_indexes.js new file mode 100644 index 000000000..04487a537 --- /dev/null +++ b/scripts/mongo/create_indexes.js @@ -0,0 +1,206 @@ +// Create indexes on all collections + +/* +This script is responsible for initializing the replica set, creating collections, adding indexes and TTLs +*/ +console.log("Running create_indexes.js"); + +const ode_db = process.env.MONGO_DB_NAME; +const ode_user = process.env.MONGO_ODE_DB_USER; +const ode_pass = process.env.MONGO_ODE_DB_PASS; + +const ttlInDays = process.env.MONGO_COLLECTION_TTL; // TTL in days +const expire_seconds = ttlInDays * 24 * 60 * 60; +const retry_milliseconds = 5000; + +console.log("ODE DB Name: " + ode_db); + +try { + console.log("Initializing replica set..."); + + var config = { + "_id": "rs0", + "version": 1, + "members": [ + { + "_id": 0, + "host": "mongo:27017", + "priority": 2 + }, + ] + }; + rs.initiate(config, { force: true }); + rs.status(); +} catch(e) { + rs.status().ok +} + +// name -> collection name +// ttlField -> field to perform ttl on +// timeField -> field to index for time queries + +const collections = [ + {name: "OdeBsmJson", ttlField: "recordGeneratedAt", timeField: "metadata.odeReceivedAt"}, + {name: "OdeRawEncodedBSMJson", ttlField: "recordGeneratedAt", timeField: "none"}, + + {name: "OdeMapJson", ttlField: "recordGeneratedAt", timeField: "metadata.odeReceivedAt"}, + {name: "OdeRawEncodedMAPJson", ttlField: "recordGeneratedAt", timeField: "none"}, + + {name: "OdeSpatJson", ttlField: "recordGeneratedAt", timeField: "metadata.odeReceivedAt"}, + {name: "OdeRawEncodedSPATJson", ttlField: "recordGeneratedAt", timeField: "none"}, + + {name: "OdeTimJson", ttlField: "recordGeneratedAt", timeField: "metadata.odeReceivedAt"}, + {name: "OdeRawEncodedTIMJson", ttlField: "recordGeneratedAt", timeField: "none"}, + + {name: "OdePsmJson", ttlField: "recordGeneratedAt", timeField: "metadata.odeReceivedAt"}, + {name: "OdeRawEncodedPsmJson", ttlField: "recordGeneratedAt", timeField: "none"}, +]; + +// Function to check if the replica set is ready +function isReplicaSetReady() { + let status; + try { + status = rs.status(); + } catch (error) { + console.error("Error getting replica set status: " + error); + return false; + } + + // Check if the replica set has a primary + if (!status.hasOwnProperty('myState') || status.myState !== 1) { + console.log("Replica set is not ready yet"); + return false; + } + + console.log("Replica set is ready"); + return true; +} + +try{ + + // Wait for the replica set to be ready + while (!isReplicaSetReady()) { + sleep(retry_milliseconds); + } + sleep(retry_milliseconds); + // creates another user + console.log("Creating ODE user..."); + admin = db.getSiblingDB("admin"); + // Check if user already exists + var user = admin.getUser(ode_user); + if (user == null) { + admin.createUser( + { + user: ode_user, + pwd: ode_pass, + roles: [ + { role: "readWrite", db: ode_db }, + ] + } + ); + } else { + console.log("User \"" + ode_user + "\" already exists."); + } + +} catch (error) { + print("Error connecting to the MongoDB instance: " + error); +} + + +// Wait for the collections to exist in mongo before trying to create indexes on them +let missing_collection_count; +const db = db.getSiblingDB(ode_db); +do { + try { + missing_collection_count = 0; + const collection_names = db.getCollectionNames(); + for (collection of collections) { + console.log("Creating Indexes for Collection" + collection["name"]); + // Create Collection if It doesn't exist + let created = false; + if(!collection_names.includes(collection.name)){ + created = createCollection(collection); + // created = true; + }else{ + created = true; + } + + if(created){ + if (collection.hasOwnProperty('ttlField') && collection.ttlField !== 'none') { + createTTLIndex(collection); + } + + + }else{ + missing_collection_count++; + console.log("Collection " + collection.name + " does not exist yet"); + } + } + if (missing_collection_count > 0) { + print("Waiting on " + missing_collection_count + " collections to be created...will try again in " + retry_milliseconds + " ms"); + sleep(retry_milliseconds); + } + } catch (err) { + console.log("Error while setting up TTL indexes in collections"); + console.log(rs.status()); + console.error(err); + sleep(retry_milliseconds); + } +} while (missing_collection_count > 0); + +console.log("Finished Creating All TTL indexes"); + + +function createCollection(collection){ + try { + db.createCollection(collection.name); + return true; + } catch (err) { + console.log("Unable to Create Collection: " + collection.name); + console.log(err); + return false; + } +} + +// Create TTL Indexes +function createTTLIndex(collection) { + if (ttlIndexExists(collection)) { + console.log("TTL index already exists for " + collection.name); + return; + } + + const collection_name = collection.name; + const timeField = collection.ttlField; + + console.log( + "Creating TTL index for " + collection_name + " to remove documents after " + + expire_seconds + + " seconds" + ); + + try { + var index_json = {}; + index_json[timeField] = 1; + db[collection_name].createIndex(index_json, + {expireAfterSeconds: expire_seconds} + ); + console.log("Created TTL index for " + collection_name + " using the field: " + timeField + " as the timestamp"); + } catch (err) { + var pattern_json = {}; + pattern_json[timeField] = 1; + db.runCommand({ + "collMod": collection_name, + "index": { + keyPattern: pattern_json, + expireAfterSeconds: expire_seconds + } + }); + console.log("Updated TTL index for " + collection_name + " using the field: " + timeField + " as the timestamp"); + } + +} + + +function ttlIndexExists(collection) { + return db[collection.name].getIndexes().find((idx) => idx.hasOwnProperty('expireAfterSeconds')) !== undefined; +} \ No newline at end of file diff --git a/scripts/mongo/setup_mongo.sh b/scripts/mongo/setup_mongo.sh new file mode 100644 index 000000000..13cb4e7b7 --- /dev/null +++ b/scripts/mongo/setup_mongo.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +until mongosh --host mongo:27017 --eval 'quit(db.runCommand({ ping: 1 }).ok ? 0 : 2)' &>/dev/null; do + sleep 1 +done + +echo "MongoDB is up and running!" + +cd / + +mongosh -u $MONGO_ADMIN_DB_USER -p $MONGO_ADMIN_DB_PASS --authenticationDatabase admin --host mongo:27017 /create_indexes.js diff --git a/scripts/start-kafka-consumer.sh b/scripts/start-kafka-consumer.sh index 9ed719e8d..d1af820cf 100644 --- a/scripts/start-kafka-consumer.sh +++ b/scripts/start-kafka-consumer.sh @@ -6,4 +6,4 @@ if [[ -z "$1" ]]; then exit 1; fi -$KAFKA_HOME/bin/kafka-console-consumer.sh --topic=$1 --bootstrap-server=`broker-list.sh` +$KAFKA_HOME/bin/kafka-console-consumer.sh --topic $1 --bootstrap-server `broker-list.sh` diff --git a/scripts/start-kafka-producer.sh b/scripts/start-kafka-producer.sh index 9f6ae463c..d82e1818b 100644 --- a/scripts/start-kafka-producer.sh +++ b/scripts/start-kafka-producer.sh @@ -6,4 +6,4 @@ if [[ -z "$1" ]]; then exit 1; fi -$KAFKA_HOME/bin/kafka-console-producer.sh --topic=$1 --broker-list=`broker-list.sh` +$KAFKA_HOME/bin/kafka-console-producer.sh --topic $1 --broker-list `broker-list.sh`