From f82c50ca6194bc4579f6251a5b9a27c973932222 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=2E=20Fatih=20C=C4=B1r=C4=B1t?= Date: Mon, 26 Feb 2024 17:21:51 +0300 Subject: [PATCH 01/17] feat(docker): re-organize the autoware docker containers Signed-off-by: oguzkaganozt --- .devcontainer/Dockerfile | 14 ++ .devcontainer/devcontainer.json | 26 +++ .dockerignore | 17 ++ .../create-main-distro-alias/action.yaml | 49 ----- .../actions/docker-build-and-push/action.yaml | 131 ++++++++++--- ...ocker-build-and-push-main-self-hosted.yaml | 33 +++- .../workflows/docker-build-and-push-main.yaml | 35 +++- .github/workflows/setup-docker.yaml | 5 - .github/workflows/setup-universe.yaml | 5 - .github/workflows/update-docker-manifest.yaml | 36 +--- .hadolint.yaml | 5 + amd64.env | 5 +- ansible/playbooks/core.yaml | 22 --- ansible/playbooks/docker.yaml | 3 +- ansible/playbooks/openadk.yaml | 46 +++++ ansible/playbooks/universe.yaml | 20 +- .../roles/autoware_universe/meta/main.yaml | 2 - ansible/roles/ccache/README.md | 13 -- ansible/roles/ccache/tasks/main.yaml | 7 - ansible/roles/cuda/tasks/main.yaml | 82 +++++++- ansible/roles/dev_tools/README.md | 9 + .../defaults/main.yaml | 0 .../meta/main.yaml | 0 .../{git_lfs => dev_tools}/tasks/main.yaml | 19 ++ .../roles/{autoware_core => gdown}/README.md | 4 +- .../defaults/main.yaml | 0 .../{ccache/defaults => gdown/meta}/main.yaml | 0 .../{autoware_core => gdown}/tasks/main.yaml | 0 .../README.md | 4 +- .../meta => geographiclib/defaults}/main.yaml | 0 .../defaults => geographiclib/meta}/main.yaml | 0 .../tasks/main.yaml | 0 ansible/roles/git_lfs/README.md | 14 -- ansible/roles/kisak_mesa/README.md | 3 + .../meta => kisak_mesa/defaults}/main.yaml | 0 .../defaults => kisak_mesa/meta}/main.yaml | 0 ansible/roles/kisak_mesa/tasks/main.yaml | 24 +++ ansible/roles/nvidia_docker/tasks/main.yaml | 44 +++-- ansible/roles/pacmod/meta/main.yaml | 2 - ansible/roles/pacmod/tasks/main.yaml | 8 + ansible/roles/plotjuggler/README.md | 16 -- ansible/roles/plotjuggler/meta/main.yaml | 2 - ansible/roles/plotjuggler/tasks/main.yaml | 7 - ansible/roles/pre_commit/README.md | 21 --- ansible/roles/pre_commit/defaults/main.yaml | 1 - ansible/roles/pre_commit/meta/main.yaml | 0 ansible/roles/pre_commit/tasks/main.yaml | 18 -- .../roles/rmw_implementation/meta/main.yaml | 2 - ansible/roles/rocker/README.md | 25 --- ansible/roles/rocker/defaults/main.yaml | 0 ansible/roles/rocker/meta/main.yaml | 0 ansible/roles/rocker/tasks/main.yaml | 33 ---- ansible/roles/ros2_dev_tools/meta/main.yaml | 2 - ansible/roles/tensorrt/defaults/main.yaml | 1 - ansible/roles/tensorrt/tasks/main.yaml | 4 +- docker/README.md | 148 +++++---------- docker/autoware-openadk/Dockerfile | 137 ++++++++++++++ docker/autoware-openadk/docker-bake.hcl | 26 +++ docker/autoware-openadk/etc/.bash_aliases | 28 +++ docker/autoware-openadk/etc/ros_entrypoint.sh | 32 ++++ docker/autoware-universe/Dockerfile | 139 -------------- docker/autoware-universe/docker-bake.hcl | 19 -- docker/build.sh | 175 ++++++++++++------ docker/run.sh | 163 ++++++++++++++++ setup-dev-env.sh | 51 ++++- 65 files changed, 1042 insertions(+), 695 deletions(-) create mode 100644 .devcontainer/Dockerfile create mode 100644 .devcontainer/devcontainer.json create mode 100644 .dockerignore delete mode 100644 .github/actions/create-main-distro-alias/action.yaml create mode 100644 .hadolint.yaml delete mode 100644 ansible/playbooks/core.yaml create mode 100644 ansible/playbooks/openadk.yaml delete mode 100644 ansible/roles/autoware_universe/meta/main.yaml delete mode 100644 ansible/roles/ccache/README.md delete mode 100644 ansible/roles/ccache/tasks/main.yaml create mode 100644 ansible/roles/dev_tools/README.md rename ansible/roles/{autoware_core => dev_tools}/defaults/main.yaml (100%) rename ansible/roles/{autoware_core => dev_tools}/meta/main.yaml (100%) rename ansible/roles/{git_lfs => dev_tools}/tasks/main.yaml (61%) rename ansible/roles/{autoware_core => gdown}/README.md (59%) rename ansible/roles/{autoware_universe => gdown}/defaults/main.yaml (100%) rename ansible/roles/{ccache/defaults => gdown/meta}/main.yaml (100%) rename ansible/roles/{autoware_core => gdown}/tasks/main.yaml (100%) rename ansible/roles/{autoware_universe => geographiclib}/README.md (64%) rename ansible/roles/{ccache/meta => geographiclib/defaults}/main.yaml (100%) rename ansible/roles/{git_lfs/defaults => geographiclib/meta}/main.yaml (100%) rename ansible/roles/{autoware_universe => geographiclib}/tasks/main.yaml (100%) delete mode 100644 ansible/roles/git_lfs/README.md create mode 100644 ansible/roles/kisak_mesa/README.md rename ansible/roles/{git_lfs/meta => kisak_mesa/defaults}/main.yaml (100%) rename ansible/roles/{plotjuggler/defaults => kisak_mesa/meta}/main.yaml (100%) create mode 100644 ansible/roles/kisak_mesa/tasks/main.yaml delete mode 100644 ansible/roles/plotjuggler/README.md delete mode 100644 ansible/roles/plotjuggler/meta/main.yaml delete mode 100644 ansible/roles/plotjuggler/tasks/main.yaml delete mode 100644 ansible/roles/pre_commit/README.md delete mode 100644 ansible/roles/pre_commit/defaults/main.yaml delete mode 100644 ansible/roles/pre_commit/meta/main.yaml delete mode 100644 ansible/roles/pre_commit/tasks/main.yaml delete mode 100644 ansible/roles/rocker/README.md delete mode 100644 ansible/roles/rocker/defaults/main.yaml delete mode 100644 ansible/roles/rocker/meta/main.yaml delete mode 100644 ansible/roles/rocker/tasks/main.yaml create mode 100644 docker/autoware-openadk/Dockerfile create mode 100644 docker/autoware-openadk/docker-bake.hcl create mode 100644 docker/autoware-openadk/etc/.bash_aliases create mode 100644 docker/autoware-openadk/etc/ros_entrypoint.sh delete mode 100644 docker/autoware-universe/Dockerfile delete mode 100644 docker/autoware-universe/docker-bake.hcl create mode 100755 docker/run.sh diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile new file mode 100644 index 00000000000..045aa5c10cf --- /dev/null +++ b/.devcontainer/Dockerfile @@ -0,0 +1,14 @@ +FROM ghcr.io/autowarefoundation/openadk:latest-devel + +ENV SHELL /bin/bash + +ARG USERNAME=autoware +ARG USER_UID=1000 +ARG USER_GID=$USER_UID + +RUN groupadd --gid $USER_GID $USERNAME \ + && useradd --uid $USER_UID --gid $USER_GID -m $USERNAME \ + && apt-get update \ + && apt-get install -y sudo \ + && echo $USERNAME ALL=\(root\) NOPASSWD:ALL > /etc/sudoers.d/$USERNAME \ + && chmod 0440 /etc/sudoers.d/$USERNAME diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 00000000000..1fd21bcaba3 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,26 @@ +{ + "name": "Autoware", + "build": { + "dockerfile": "Dockerfile" + }, + "remoteUser": "autoware", + "hostRequirements": { + "gpu": true + }, + "runArgs": [ + "--cap-add=SYS_PTRACE", + "--security-opt", + "seccomp=unconfined", + "--net=host", + "--volume=/etc/localtime:/etc/localtime:ro", + "--gpus", + "all" + ], + "customizations": { + "vscode": { + "settings.json": { + "terminal.integrated.profiles.linux": { "bash": { "path": "/bin/bash" } } + } + } + } +} diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000000..8ae6e191434 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,17 @@ +# Ignore git and metadata directories +.git +.github +.vscode + +# Ignore all markdown files +*.md + +# Ignore Docker files +docker-bake.hcl + +# Etc +*.ignore +*.lint +*.lock +*.log +*.out diff --git a/.github/actions/create-main-distro-alias/action.yaml b/.github/actions/create-main-distro-alias/action.yaml deleted file mode 100644 index 40e9d53ee21..00000000000 --- a/.github/actions/create-main-distro-alias/action.yaml +++ /dev/null @@ -1,49 +0,0 @@ -name: create-main-distro-alias -description: "" - -inputs: - package-name: - description: "" - required: true - rosdistro: - description: "" - required: true - tag-name: - description: "" - required: true - -runs: - using: composite - steps: - - name: Login to GitHub Container Registry - uses: docker/login-action@v2 - with: - registry: ghcr.io - username: ${{ github.repository_owner }} - password: ${{ github.token }} - - - name: Set image name - id: set-image-name - run: echo "image-name=ghcr.io/${{ github.repository_owner }}/${{ inputs.package-name }}" >> $GITHUB_OUTPUT - shell: bash - - - name: Create Docker manifest for latest - run: | - # Check image existence - distro_image="${{ steps.set-image-name.outputs.image-name }}:${{ inputs.rosdistro }}-${{ inputs.tag-name }}" - if docker manifest inspect "$distro_image-amd64" >/dev/null 2>&1; then - amd64_image="$distro_image-amd64" - fi - if docker manifest inspect "$distro_image-arm64" >/dev/null 2>&1; then - arm64_image="$distro_image-arm64" - fi - - echo "amd64_image: $amd64_image" - echo "arm64_image: $arm64_image" - - docker manifest create --amend ${{ steps.set-image-name.outputs.image-name }}:${{ inputs.tag-name }} \ - $amd64_image \ - $arm64_image - - docker manifest push ${{ steps.set-image-name.outputs.image-name }}:${{ inputs.tag-name }} - shell: bash diff --git a/.github/actions/docker-build-and-push/action.yaml b/.github/actions/docker-build-and-push/action.yaml index 757c7c3cead..a50e7abbd61 100644 --- a/.github/actions/docker-build-and-push/action.yaml +++ b/.github/actions/docker-build-and-push/action.yaml @@ -8,10 +8,10 @@ inputs: build-args: description: "" required: false - tag-prefix: + tag-suffix: description: "" required: false - tag-suffix: + tag-prefix: description: "" required: false allow-push: @@ -31,23 +31,18 @@ runs: sudo apt-get -y install jq shell: bash - # workflow_dispatch: latest, date - # schedule: latest, date - # tag: semver - name: Set Docker tags id: set-docker-tags run: | tags=() - if [ "${{ github.event_name }}" == "workflow_dispatch" ]; then - tags+=("latest") - tags+=("{{date 'YYYYMMDD'}}") - else - tags+=("type=schedule,pattern=latest") - tags+=("type=schedule,pattern={{date 'YYYYMMDD'}}") - tags+=("type=semver,pattern={{version}}") - tags+=("type=match,pattern=\d+.\d+") + if [ "${{ github.event_name }}" == "push" ] && [ "${{ github.ref_type }}" == "tag" ]; then + tags+=("$(echo "${{ github.ref }}" | sed -E 's/.*([vV][0-9]+\.[0-9]+\.[0-9]+).*/\1/')") fi + tags+=("{{date 'YYYYMMDD'}}") + tags+=("latest") + tags+=("latest-${{ inputs.tag-prefix }}") + # Output multiline strings: https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#multiline-strings EOF=$(dd if=/dev/urandom bs=15 count=1 status=none | base64) echo "tags<<$EOF" >> $GITHUB_OUTPUT @@ -55,6 +50,17 @@ runs: echo "$EOF" >> $GITHUB_OUTPUT shell: bash + - name: Docker meta for prebuilt + id: meta-prebuilt + uses: docker/metadata-action@v4 + with: + images: ghcr.io/${{ github.repository_owner }}/${{ inputs.bake-target }} + tags: ${{ steps.set-docker-tags.outputs.tags }} + bake-target: docker-metadata-action-prebuilt + flavor: | + latest=false + suffix=-prebuilt${{ inputs.tag-suffix }} + - name: Docker meta for devel id: meta-devel uses: docker/metadata-action@v4 @@ -64,20 +70,19 @@ runs: bake-target: docker-metadata-action-devel flavor: | latest=false - prefix=${{ inputs.tag-prefix }} - suffix=${{ inputs.tag-suffix }} + suffix=-devel${{ inputs.tag-suffix }} - - name: Docker meta for prebuilt - id: meta-prebuilt + - name: Docker meta for runtime + if: ${{ github.event_name == 'workflow_dispatch' }} || ${{ (github.event_name == 'push' && github.ref_type == 'tag') }} + id: meta-runtime uses: docker/metadata-action@v4 with: images: ghcr.io/${{ github.repository_owner }}/${{ inputs.bake-target }} tags: ${{ steps.set-docker-tags.outputs.tags }} - bake-target: docker-metadata-action-prebuilt + bake-target: docker-metadata-action-runtime flavor: | - latest=false - prefix=${{ inputs.tag-prefix }} - suffix=-prebuilt${{ inputs.tag-suffix }} + latest=${{ github.event_name == 'push' && github.ref_type == 'tag' }} + suffix=-runtime${{ inputs.tag-suffix }} - name: Login to GitHub Container Registry if: ${{ github.event_name != 'pull_request' }} @@ -87,15 +92,93 @@ runs: username: ${{ github.repository_owner }} password: ${{ github.token }} - - name: Build and push + - name: Build and Push - prebuilt and devel + if: ${{ (github.event_name == 'push' && github.ref_type == 'branch') || github.event_name == 'schedule' }} + uses: docker/bake-action@v3 + with: + push: ${{ inputs.allow-push == 'true' }} + files: | + docker/${{ inputs.bake-target }}/docker-bake.hcl + ${{ steps.meta-prebuilt.outputs.bake-file }} + ${{ steps.meta-devel.outputs.bake-file }} + targets: | + prebuilt + devel + provenance: false + set: | + ${{ inputs.build-args }} + + - name: Build and Publish to GitHub Container Registry + if: ${{ ( github.event_name == 'push' && github.ref_type == 'tag' ) || ( github.event_name == 'workflow_dispatch' && github.event.inputs.artifacts-destination == 'registry') }} uses: docker/bake-action@v3 with: - # Checking event_name for https://github.com/autowarefoundation/autoware/issues/2796 - push: ${{ (github.event_name == 'schedule' || github.ref_name == github.event.repository.default_branch || github.event_name == 'push') && inputs.allow-push == 'true' }} + push: true files: | docker/${{ inputs.bake-target }}/docker-bake.hcl ${{ steps.meta-devel.outputs.bake-file }} ${{ steps.meta-prebuilt.outputs.bake-file }} + ${{ steps.meta-runtime.outputs.bake-file }} provenance: false set: | ${{ inputs.build-args }} + + - name: Build and Save Artifacts + if: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.artifacts-destination == 'tarball' }} + uses: docker/bake-action@v3 + with: + push: false + files: | + docker/${{ inputs.bake-target }}/docker-bake.hcl + ${{ steps.meta-devel.outputs.bake-file }} + ${{ steps.meta-prebuilt.outputs.bake-file }} + ${{ steps.meta-runtime.outputs.bake-file }} + provenance: false + set: | + ${{ inputs.build-args }} + prebuilt.output=type=docker,dest=/tmp/prebuilt.tar + devel.output=type=docker,dest=/tmp/devel.tar + runtime.output=type=docker,dest=/tmp/runtime.tar + + - name: Upload Artifact - prebuilt + if: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.artifacts-destination == 'tarball' }} + id: artifact-upload-step-prebuilt + uses: actions/upload-artifact@v4 + with: + name: prebuilt-image-${{ inputs.tag-suffix }} + path: /tmp/prebuilt.tar + retention-days: 7 + compression-level: 6 + overwrite: true + if-no-files-found: error + + - name: Upload Artifact - devel + if: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.artifacts-destination == 'tarball' }} + id: artifact-upload-step-devel + uses: actions/upload-artifact@v4 + with: + name: devel-image-${{ inputs.tag-suffix }} + path: /tmp/devel.tar + retention-days: 7 + compression-level: 6 + overwrite: true + if-no-files-found: error + + - name: Upload Artifact - runtime + if: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.artifacts-destination == 'tarball' }} + id: artifact-upload-step-runtime + uses: actions/upload-artifact@v4 + with: + name: runtime-image-${{ inputs.tag-suffix }} + path: /tmp/runtime.tar + retention-days: 7 + compression-level: 6 + overwrite: true + if-no-files-found: error + + - name: Output artifact URLs + id: output-artifact-urls + run: | + echo 'prebuilt URL ${{ steps.artifact-upload-step-prebuilt.outputs.artifact-url }}' + echo 'devel URL ${{ steps.artifact-upload-step-devel.outputs.artifact-url }}' + echo 'runtime URL ${{ steps.artifact-upload-step-runtime.outputs.artifact-url }}' + shell: bash diff --git a/.github/workflows/docker-build-and-push-main-self-hosted.yaml b/.github/workflows/docker-build-and-push-main-self-hosted.yaml index 9774de4e5cb..315e5b1ed07 100644 --- a/.github/workflows/docker-build-and-push-main-self-hosted.yaml +++ b/.github/workflows/docker-build-and-push-main-self-hosted.yaml @@ -1,13 +1,28 @@ +# EVENTS: push, schedule, workflow_dispatch +# workflow_dispatch: Build all. No publish to registry; save as tarball. +# schedule: Build only devel and prebuilt. TAGS: date, latest +# push-branch: Build only devel and prebuilt. TAGS: date, latest +# push-tag: Build all. TAGS: version, date, latest + name: docker-build-and-push-main-self-hosted on: push: tags: - - v* - - "[0-9]+.[0-9]+*" + - adkit-v*.*.* + branches: + - main schedule: - cron: 0 0 1,15 * * workflow_dispatch: + inputs: + artifacts-destination: + type: choice + description: Destination for the artifacts + options: + - registry + - tarball + default: tarball jobs: docker-build-and-push-main-self-hosted: @@ -21,11 +36,12 @@ jobs: include: - name: no-cuda base_image_env: base_image + lib_dir: aarch64 setup-args: --no-nvidia additional-tag-suffix: "" - name: cuda - base_image_env: cuda_base_image - setup-args: --no-cuda-drivers + base_image_env: base_image + lib_dir: aarch64 additional-tag-suffix: -cuda steps: # https://github.com/actions/checkout/issues/211 @@ -46,18 +62,19 @@ jobs: cat arm64.env | sed -e "s/^\s*//" -e "/^#/d" >> $GITHUB_ENV fi - - name: Build 'autoware-universe' + - name: Build 'autoware-openadk' uses: ./.github/actions/docker-build-and-push with: - bake-target: autoware-universe + bake-target: autoware-openadk build-args: | *.platform=linux/arm64 *.args.ROS_DISTRO=${{ env.rosdistro }} *.args.BASE_IMAGE=${{ env[format('{0}', matrix.base_image_env)] }} - *.args.PREBUILT_BASE_IMAGE=${{ env.prebuilt_base_image }} *.args.SETUP_ARGS=${{ matrix.setup-args }} - tag-prefix: ${{ env.rosdistro }}- + *.args.LIB_DIR=${{ matrix.lib_dir }} tag-suffix: ${{ matrix.additional-tag-suffix }}-arm64 + tag-prefix: ${{ env.rosdistro }} + allow-push: true - name: Show disk space run: | diff --git a/.github/workflows/docker-build-and-push-main.yaml b/.github/workflows/docker-build-and-push-main.yaml index f5cebc8eb99..d5fe698bccf 100644 --- a/.github/workflows/docker-build-and-push-main.yaml +++ b/.github/workflows/docker-build-and-push-main.yaml @@ -1,17 +1,32 @@ +# EVENTS: push, schedule, workflow_dispatch +# workflow_dispatch: Build all. No publish to registry; save as tarball. +# schedule: Build only devel and prebuilt. TAGS: date, latest +# push-branch: Build only devel and prebuilt. TAGS: date, latest +# push-tag: Build all. TAGS: version, date, latest + name: docker-build-and-push-main on: push: tags: - - v* - - "[0-9]+.[0-9]+*" + - adkit-v*.*.* + branches: + - main schedule: - cron: 0 0 1,15 * * workflow_dispatch: + inputs: + artifacts-destination: + type: choice + description: Destination for the artifacts + options: + - registry + - tarball + default: tarball jobs: docker-build-and-push-main: - runs-on: [self-hosted, linux, X64] + runs-on: ubuntu-latest strategy: fail-fast: false matrix: @@ -21,11 +36,12 @@ jobs: include: - name: no-cuda base_image_env: base_image + lib_dir: x86_64 setup-args: --no-nvidia additional-tag-suffix: "" - name: cuda - base_image_env: cuda_base_image - setup-args: --no-cuda-drivers + base_image_env: base_image + lib_dir: x86_64 additional-tag-suffix: -cuda steps: - name: Check out repository @@ -41,18 +57,19 @@ jobs: cat arm64.env | sed -e "s/^\s*//" -e "/^#/d" >> $GITHUB_ENV fi - - name: Build 'autoware-universe' + - name: Build 'autoware-openadk' uses: ./.github/actions/docker-build-and-push with: - bake-target: autoware-universe + bake-target: autoware-openadk build-args: | *.platform=linux/amd64 *.args.ROS_DISTRO=${{ env.rosdistro }} *.args.BASE_IMAGE=${{ env[format('{0}', matrix.base_image_env)] }} - *.args.PREBUILT_BASE_IMAGE=${{ env.prebuilt_base_image }} *.args.SETUP_ARGS=${{ matrix.setup-args }} - tag-prefix: ${{ env.rosdistro }}- + *.args.LIB_DIR=${{ matrix.lib_dir }} tag-suffix: ${{ matrix.additional-tag-suffix }}-amd64 + tag-prefix: ${{ env.rosdistro }} + allow-push: true - name: Show disk space run: | diff --git a/.github/workflows/setup-docker.yaml b/.github/workflows/setup-docker.yaml index 515002da06a..caca7bcc9ce 100644 --- a/.github/workflows/setup-docker.yaml +++ b/.github/workflows/setup-docker.yaml @@ -4,13 +4,8 @@ on: pull_request: jobs: - load-env: - uses: ./.github/workflows/load-env.yaml - setup-docker: - needs: load-env runs-on: ubuntu-latest - container: ${{ needs.load-env.outputs.base-image }} steps: - name: Check out repository uses: actions/checkout@v4 diff --git a/.github/workflows/setup-universe.yaml b/.github/workflows/setup-universe.yaml index 1750d34b913..5840c359d52 100644 --- a/.github/workflows/setup-universe.yaml +++ b/.github/workflows/setup-universe.yaml @@ -4,13 +4,8 @@ on: pull_request: jobs: - load-env: - uses: ./.github/workflows/load-env.yaml - setup-universe: - needs: load-env runs-on: ubuntu-latest - container: ${{ needs.load-env.outputs.base-image }} steps: - name: Check out repository uses: actions/checkout@v4 diff --git a/.github/workflows/update-docker-manifest.yaml b/.github/workflows/update-docker-manifest.yaml index a82929bcdeb..7c63a54dcb5 100644 --- a/.github/workflows/update-docker-manifest.yaml +++ b/.github/workflows/update-docker-manifest.yaml @@ -6,45 +6,13 @@ on: workflow_dispatch: jobs: - load-env: - uses: ./.github/workflows/load-env.yaml - update-docker-manifest: - needs: load-env runs-on: ubuntu-latest steps: - name: Check out repository uses: actions/checkout@v4 - - name: Combine multi arch images for 'autoware-universe' + - name: Combine multi arch images for 'autoware-openadk' uses: ./.github/actions/combine-multi-arch-images with: - package-name: autoware-universe - - - name: Create alias from 'autoware-universe:{rosdistro}-latest' to 'autoware-universe:latest' - uses: ./.github/actions/create-main-distro-alias - with: - package-name: autoware-universe - rosdistro: ${{ needs.load-env.outputs.rosdistro }} - tag-name: latest - - - name: Create alias from 'autoware-universe:{rosdistro}-latest-prebuilt' to 'autoware-universe:latest-prebuilt' - uses: ./.github/actions/create-main-distro-alias - with: - package-name: autoware-universe - rosdistro: ${{ needs.load-env.outputs.rosdistro }} - tag-name: latest-prebuilt - - - name: Create alias from 'autoware-universe:{rosdistro}-latest-cuda' to 'autoware-universe:latest-cuda' - uses: ./.github/actions/create-main-distro-alias - with: - package-name: autoware-universe - rosdistro: ${{ needs.load-env.outputs.rosdistro }} - tag-name: latest-cuda - - - name: Create alias from 'autoware-universe:{rosdistro}-latest-prebuilt-cuda' to 'autoware-universe:latest-prebuilt-cuda' - uses: ./.github/actions/create-main-distro-alias - with: - package-name: autoware-universe - rosdistro: ${{ needs.load-env.outputs.rosdistro }} - tag-name: latest-prebuilt-cuda + package-name: autoware-openadk diff --git a/.hadolint.yaml b/.hadolint.yaml new file mode 100644 index 00000000000..10c6f4532b7 --- /dev/null +++ b/.hadolint.yaml @@ -0,0 +1,5 @@ +ignored: + - DL3008 # Pin versions in apt get install. Instead of `apt-get install ` use `apt-get install =` + - DL3013 # Pin versions in pip. Instead of `pip install `, use `pip install ==` + - DL3015 # Avoid additional packages by specifying `--no-install-recommends` + - DL3009 # Delete the apt-get lists after installing something diff --git a/amd64.env b/amd64.env index 76a02c8e780..38eadee0a91 100644 --- a/amd64.env +++ b/amd64.env @@ -1,8 +1,7 @@ rosdistro=humble rmw_implementation=rmw_cyclonedds_cpp -base_image=ubuntu:22.04 -cuda_base_image=ubuntu:22.04 -prebuilt_base_image=ubuntu:22.04 +base_image=ros:humble-ros-base-jammy cuda_version=12.3 cudnn_version=8.9.5.29-1+cuda12.2 tensorrt_version=8.6.1.6-1+cuda12.0 +pre_commit_clang_format_version=17.0.5 diff --git a/ansible/playbooks/core.yaml b/ansible/playbooks/core.yaml deleted file mode 100644 index dd37be36253..00000000000 --- a/ansible/playbooks/core.yaml +++ /dev/null @@ -1,22 +0,0 @@ -- name: Set up source development environments for Autoware Core - hosts: localhost - connection: local - pre_tasks: - - name: Verify OS - ansible.builtin.fail: - msg: Only Ubuntu 22.04 is supported for this branch. Please refer to https://autowarefoundation.github.io/autoware-documentation/main/installation/autoware/source-installation/. - when: ansible_distribution_version != '22.04' - - - name: Print args - ansible.builtin.debug: - msg: - - rosdistro: "{{ rosdistro }}" - - rmw_implementation: "{{ rmw_implementation }}" - roles: - - role: autoware.dev_env.autoware_core - - role: autoware.dev_env.ccache - - role: autoware.dev_env.plotjuggler - - role: autoware.dev_env.pre_commit - - role: autoware.dev_env.ros2 - - role: autoware.dev_env.ros2_dev_tools - - role: autoware.dev_env.rmw_implementation diff --git a/ansible/playbooks/docker.yaml b/ansible/playbooks/docker.yaml index 2302909c94b..0a6d5c42acc 100644 --- a/ansible/playbooks/docker.yaml +++ b/ansible/playbooks/docker.yaml @@ -21,7 +21,6 @@ when: prompt_install_nvidia != 'y' roles: - role: autoware.dev_env.cuda - when: prompt_install_nvidia == 'y' and tensorrt_install_devel == 'true' + when: prompt_install_nvidia == 'y' - role: autoware.dev_env.docker_engine - role: autoware.dev_env.nvidia_docker - - role: autoware.dev_env.rocker diff --git a/ansible/playbooks/openadk.yaml b/ansible/playbooks/openadk.yaml new file mode 100644 index 00000000000..75f39038812 --- /dev/null +++ b/ansible/playbooks/openadk.yaml @@ -0,0 +1,46 @@ +- name: Set up source development environments for Autoware Universe + hosts: localhost + connection: local + pre_tasks: + - name: Verify OS + ansible.builtin.fail: + msg: Only Ubuntu 22.04 is supported for this branch. Please refer to https://autowarefoundation.github.io/autoware-documentation/main/installation/autoware/source-installation/. + when: ansible_distribution == 'Ubuntu' and ansible_distribution_version != '22.04' + + - name: Print args + ansible.builtin.debug: + msg: + - module: "{{ module }}" + - rosdistro: "{{ rosdistro }}" + - rmw_implementation: "{{ rmw_implementation }}" + - cuda_version: "{{ cuda_version }}" + - cudnn_version: "{{ cudnn_version }}" + - tensorrt_version: "{{ tensorrt_version }}" + roles: + # Autoware base dependencies + - role: autoware.dev_env.rmw_implementation + when: module == 'base' + - role: autoware.dev_env.gdown + when: module == 'base' + - role: autoware.dev_env.kisak_mesa + when: module == 'base' + + # Module specific dependencies + - role: autoware.dev_env.geographiclib + when: module == 'perception-localization' or module == 'all' + - role: autoware.dev_env.cuda + when: (module == 'perception-localization' or module == 'all') and prompt_install_nvidia=='y' + - role: autoware.dev_env.tensorrt + when: (module == 'perception-localization' or module == 'all') and prompt_install_nvidia=='y' + - role: autoware.dev_env.pacmod + when: module == 'planning-control' or module == 'perception-localization' or module == 'all' + + # Development environment + - role: autoware.dev_env.dev_tools + when: module == 'dev-tools' + - role: autoware.dev_env.ros2_dev_tools + when: module == 'dev-tools' + + # ONNX files and other artifacts + - role: autoware.dev_env.artifacts + when: prompt_download_artifacts == 'y' diff --git a/ansible/playbooks/universe.yaml b/ansible/playbooks/universe.yaml index 3d09c9f259f..fbc42d56577 100644 --- a/ansible/playbooks/universe.yaml +++ b/ansible/playbooks/universe.yaml @@ -34,26 +34,24 @@ [Warning] Skipping installation of NVIDIA libraries. Please manually install them if you plan to use any dependent components. when: prompt_install_nvidia != 'y' roles: - # Core - - role: autoware.dev_env.autoware_core - - role: autoware.dev_env.ccache - when: tensorrt_install_devel == 'true' - - role: autoware.dev_env.plotjuggler - - role: autoware.dev_env.pre_commit - when: tensorrt_install_devel == 'true' + # Autoware base dependencies - role: autoware.dev_env.ros2 - role: autoware.dev_env.ros2_dev_tools - role: autoware.dev_env.rmw_implementation + - role: autoware.dev_env.gdown - # Universe - - role: autoware.dev_env.autoware_universe + # Autoware module dependencies + - role: autoware.dev_env.geographiclib - role: autoware.dev_env.cuda - when: prompt_install_nvidia == 'y' and tensorrt_install_devel == 'true' + when: prompt_install_nvidia == 'y' - role: autoware.dev_env.pacmod when: rosdistro != 'rolling' - role: autoware.dev_env.tensorrt when: prompt_install_nvidia == 'y' - - role: autoware.dev_env.git_lfs + + # Autoware devel dependencies + - role: autoware.dev_env.dev_tools + when: install_devel == 'true' # ONNX files and other artifacts - role: autoware.dev_env.artifacts diff --git a/ansible/roles/autoware_universe/meta/main.yaml b/ansible/roles/autoware_universe/meta/main.yaml deleted file mode 100644 index 626281ada2a..00000000000 --- a/ansible/roles/autoware_universe/meta/main.yaml +++ /dev/null @@ -1,2 +0,0 @@ -dependencies: - - role: autoware.dev_env.autoware_core diff --git a/ansible/roles/ccache/README.md b/ansible/roles/ccache/README.md deleted file mode 100644 index e77979ee4ea..00000000000 --- a/ansible/roles/ccache/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# ccache - -This role installs Ccache. You can access detailed information about Ccache with this [link](https://ccache.dev/). - -## Inputs - -None. - -## Manual Installation - -```bash -sudo apt update && sudo apt install ccache -``` diff --git a/ansible/roles/ccache/tasks/main.yaml b/ansible/roles/ccache/tasks/main.yaml deleted file mode 100644 index 3623238c6b6..00000000000 --- a/ansible/roles/ccache/tasks/main.yaml +++ /dev/null @@ -1,7 +0,0 @@ -- name: Install ccache - become: true - ansible.builtin.apt: - name: - - ccache - state: latest - update_cache: true diff --git a/ansible/roles/cuda/tasks/main.yaml b/ansible/roles/cuda/tasks/main.yaml index e4a3c2f4107..0a075de1cfb 100644 --- a/ansible/roles/cuda/tasks/main.yaml +++ b/ansible/roles/cuda/tasks/main.yaml @@ -25,20 +25,28 @@ register: cuda__dash_case_cuda_version changed_when: false -- name: Install CUDA libraries except for cuda-drivers +- name: Install CUDA devel libraries except for cuda-drivers become: true ansible.builtin.apt: name: - - cuda-cudart-dev-{{ cuda__dash_case_cuda_version.stdout }} - cuda-command-line-tools-{{ cuda__dash_case_cuda_version.stdout }} - cuda-minimal-build-{{ cuda__dash_case_cuda_version.stdout }} - - cuda-libraries-dev-{{ cuda__dash_case_cuda_version.stdout }} - - cuda-nvml-dev-{{ cuda__dash_case_cuda_version.stdout }} - - libnpp-dev-{{ cuda__dash_case_cuda_version.stdout }} - libcusparse-dev-{{ cuda__dash_case_cuda_version.stdout }} - libcublas-dev-{{ cuda__dash_case_cuda_version.stdout }} - - libnccl-dev + - libcurand-dev-{{ cuda__dash_case_cuda_version.stdout }} update_cache: true + when: install_devel == 'true' + +- name: Install CUDA libraries except for cuda-drivers + become: true + ansible.builtin.apt: + name: + - cuda-minimal-build-{{ cuda__dash_case_cuda_version.stdout }} + - libcusparse-{{ cuda__dash_case_cuda_version.stdout }} + - libcublas-{{ cuda__dash_case_cuda_version.stdout }} + - libcurand-{{ cuda__dash_case_cuda_version.stdout }} + update_cache: true + when: install_devel == 'false' - name: Install extra CUDA libraries for x86_64 become: true @@ -71,3 +79,65 @@ state: present create: true mode: 0644 + +- name: Create Vulkan directory + become: true + ansible.builtin.file: + path: /etc/vulkan/icd.d + state: directory + mode: "0755" + +- name: Create OpenGL directory + become: true + ansible.builtin.file: + path: /etc/glvnd/egl_vendor.d + state: directory + mode: "0755" + +- name: Create OpenCL directory + become: true + ansible.builtin.file: + path: /etc/OpenCL/vendors + state: directory + mode: "0755" + +- name: Register Vulkan GPU vendors + become: true + ansible.builtin.get_url: + url: https://gitlab.com/nvidia/container-images/vulkan/raw/dc389b0445c788901fda1d85be96fd1cb9410164/nvidia_icd.json + dest: /etc/vulkan/icd.d/nvidia_icd.json + mode: "0644" + +- name: Set permissions for Vulkan GPU vendors + become: true + ansible.builtin.file: + path: /etc/vulkan/icd.d/nvidia_icd.json + mode: "0644" + +- name: Register OpenGL GPU vendors + become: true + ansible.builtin.get_url: + url: https://gitlab.com/nvidia/container-images/opengl/raw/5191cf205d3e4bb1150091f9464499b076104354/glvnd/runtime/10_nvidia.json + dest: /etc/glvnd/egl_vendor.d/10_nvidia.json + mode: "0644" + +- name: Set permissions for OpenGL GPU vendors + become: true + ansible.builtin.file: + path: /etc/glvnd/egl_vendor.d/10_nvidia.json + mode: "0644" + +- name: Register OpenCL GPU vendors + become: true + ansible.builtin.file: + path: /etc/OpenCL/vendors/nvidia.icd + state: touch + mode: "0644" + +- name: Set permissions for OpenCL GPU vendors + become: true + ansible.builtin.lineinfile: + path: /etc/OpenCL/vendors/nvidia.icd + line: libnvidia-opencl.so.1 + create: true + mode: "0644" diff --git a/ansible/roles/dev_tools/README.md b/ansible/roles/dev_tools/README.md new file mode 100644 index 00000000000..17fb5e1931e --- /dev/null +++ b/ansible/roles/dev_tools/README.md @@ -0,0 +1,9 @@ +# devel + +This role installs development dependencies for Autoware. + +## Inputs + +None. + +## Manual Installation diff --git a/ansible/roles/autoware_core/defaults/main.yaml b/ansible/roles/dev_tools/defaults/main.yaml similarity index 100% rename from ansible/roles/autoware_core/defaults/main.yaml rename to ansible/roles/dev_tools/defaults/main.yaml diff --git a/ansible/roles/autoware_core/meta/main.yaml b/ansible/roles/dev_tools/meta/main.yaml similarity index 100% rename from ansible/roles/autoware_core/meta/main.yaml rename to ansible/roles/dev_tools/meta/main.yaml diff --git a/ansible/roles/git_lfs/tasks/main.yaml b/ansible/roles/dev_tools/tasks/main.yaml similarity index 61% rename from ansible/roles/git_lfs/tasks/main.yaml rename to ansible/roles/dev_tools/tasks/main.yaml index b416dc3237d..62201d86863 100644 --- a/ansible/roles/git_lfs/tasks/main.yaml +++ b/ansible/roles/dev_tools/tasks/main.yaml @@ -17,3 +17,22 @@ ansible.builtin.command: git lfs install when: "'filter.lfs.required' not in git_lfs__git_global_config.config_values" changed_when: true + +- name: Install pre-commit + ansible.builtin.pip: + name: pre-commit + state: latest + executable: pip3 + +- name: Install clang-format + ansible.builtin.pip: + name: clang-format + version: "{{ pre_commit_clang_format_version }}" + executable: pip3 + +- name: Install Go + become: true + ansible.builtin.apt: + name: golang + state: latest + update_cache: true diff --git a/ansible/roles/autoware_core/README.md b/ansible/roles/gdown/README.md similarity index 59% rename from ansible/roles/autoware_core/README.md rename to ansible/roles/gdown/README.md index a834adb9e79..4d5e1e5dbab 100644 --- a/ansible/roles/autoware_core/README.md +++ b/ansible/roles/gdown/README.md @@ -1,6 +1,6 @@ -# autoware_core +# Role: gdown -This role installs development/runtime dependencies for Autoware Core. +This role installs gdown to download files from CMakeLists.txt. ## Inputs diff --git a/ansible/roles/autoware_universe/defaults/main.yaml b/ansible/roles/gdown/defaults/main.yaml similarity index 100% rename from ansible/roles/autoware_universe/defaults/main.yaml rename to ansible/roles/gdown/defaults/main.yaml diff --git a/ansible/roles/ccache/defaults/main.yaml b/ansible/roles/gdown/meta/main.yaml similarity index 100% rename from ansible/roles/ccache/defaults/main.yaml rename to ansible/roles/gdown/meta/main.yaml diff --git a/ansible/roles/autoware_core/tasks/main.yaml b/ansible/roles/gdown/tasks/main.yaml similarity index 100% rename from ansible/roles/autoware_core/tasks/main.yaml rename to ansible/roles/gdown/tasks/main.yaml diff --git a/ansible/roles/autoware_universe/README.md b/ansible/roles/geographiclib/README.md similarity index 64% rename from ansible/roles/autoware_universe/README.md rename to ansible/roles/geographiclib/README.md index e3ab6300083..8ab94806be3 100644 --- a/ansible/roles/autoware_universe/README.md +++ b/ansible/roles/geographiclib/README.md @@ -1,6 +1,4 @@ -# autoware_universe - -This role installs development/runtime dependencies for Autoware Universe. +# geographiclib ## Inputs diff --git a/ansible/roles/ccache/meta/main.yaml b/ansible/roles/geographiclib/defaults/main.yaml similarity index 100% rename from ansible/roles/ccache/meta/main.yaml rename to ansible/roles/geographiclib/defaults/main.yaml diff --git a/ansible/roles/git_lfs/defaults/main.yaml b/ansible/roles/geographiclib/meta/main.yaml similarity index 100% rename from ansible/roles/git_lfs/defaults/main.yaml rename to ansible/roles/geographiclib/meta/main.yaml diff --git a/ansible/roles/autoware_universe/tasks/main.yaml b/ansible/roles/geographiclib/tasks/main.yaml similarity index 100% rename from ansible/roles/autoware_universe/tasks/main.yaml rename to ansible/roles/geographiclib/tasks/main.yaml diff --git a/ansible/roles/git_lfs/README.md b/ansible/roles/git_lfs/README.md deleted file mode 100644 index a6b8da6e0a4..00000000000 --- a/ansible/roles/git_lfs/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# Git Large File Storage - -This role installs Git LFS. You can access detailed information about Git LFS with [this link](https://git-lfs.github.com/). - -## Inputs - -None. - -## Manual Installation - -```bash -sudo apt install git-lfs -git lfs install -``` diff --git a/ansible/roles/kisak_mesa/README.md b/ansible/roles/kisak_mesa/README.md new file mode 100644 index 00000000000..937de6afa36 --- /dev/null +++ b/ansible/roles/kisak_mesa/README.md @@ -0,0 +1,3 @@ +# Kisak Mesa Fix for Ubuntu 22.04 for Rviz2 + + diff --git a/ansible/roles/git_lfs/meta/main.yaml b/ansible/roles/kisak_mesa/defaults/main.yaml similarity index 100% rename from ansible/roles/git_lfs/meta/main.yaml rename to ansible/roles/kisak_mesa/defaults/main.yaml diff --git a/ansible/roles/plotjuggler/defaults/main.yaml b/ansible/roles/kisak_mesa/meta/main.yaml similarity index 100% rename from ansible/roles/plotjuggler/defaults/main.yaml rename to ansible/roles/kisak_mesa/meta/main.yaml diff --git a/ansible/roles/kisak_mesa/tasks/main.yaml b/ansible/roles/kisak_mesa/tasks/main.yaml new file mode 100644 index 00000000000..910c6f38b62 --- /dev/null +++ b/ansible/roles/kisak_mesa/tasks/main.yaml @@ -0,0 +1,24 @@ +- name: Install additional dependencies + become: true + ansible.builtin.apt: + name: software-properties-common + state: present + +- name: Add Kisak Mesa PPA + become: true + ansible.builtin.apt_repository: + repo: ppa:kisak/kisak-mesa + +- name: Install Mesa libraries + become: true + ansible.builtin.apt: + name: + - libegl-mesa0 + - libegl1-mesa-dev + - libgbm-dev + - libgbm1 + - libgl1-mesa-dev + - libgl1-mesa-dri + - libglapi-mesa + - libglx-mesa0 + state: present diff --git a/ansible/roles/nvidia_docker/tasks/main.yaml b/ansible/roles/nvidia_docker/tasks/main.yaml index 215354fbffe..b1197fd21cf 100644 --- a/ansible/roles/nvidia_docker/tasks/main.yaml +++ b/ansible/roles/nvidia_docker/tasks/main.yaml @@ -1,30 +1,34 @@ -- name: Authorize NVIDIA Docker GPG key +- name: Add NVIDIA container toolkit GPG key become: true ansible.builtin.apt_key: - url: https://nvidia.github.io/nvidia-docker/gpgkey + url: https://nvidia.github.io/libnvidia-container/gpgkey + state: present + keyring: /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg -- name: Save result of '. /etc/os-release;echo $ID$VERSION_ID' - ansible.builtin.shell: . /etc/os-release;echo $ID$VERSION_ID - register: nvidia_docker__distribution - changed_when: false - -- name: Save result of 'curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list' - ansible.builtin.uri: - url: https://nvidia.github.io/nvidia-docker/{{ nvidia_docker__distribution.stdout }}/nvidia-docker.list - return_content: true - register: nvidia_docker_list - -# curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list -- name: Add NVIDIA Docker apt repository to source list +- name: Add NVIDIA container toolkit repository become: true - ansible.builtin.copy: - dest: /etc/apt/sources.list.d/nvidia-docker.list - content: "{{ nvidia_docker_list.content }}" - mode: 0644 + ansible.builtin.apt_repository: + repo: deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://nvidia.github.io/libnvidia-container/stable/deb/$(ARCH) / + state: present + filename: nvidia-container-toolkit + update_cache: true - name: Install NVIDIA Container Toolkit become: true ansible.builtin.apt: name: - - nvidia-docker2 + - nvidia-container-toolkit update_cache: true + +- name: Add NVIDIA runtime support to docker engine + become: true + ansible.builtin.shell: | + nvidia-ctk runtime configure --runtime=docker + changed_when: true + +- name: Restart docker daemon + become: true + ansible.builtin.systemd: + name: docker + state: restarted + changed_when: true diff --git a/ansible/roles/pacmod/meta/main.yaml b/ansible/roles/pacmod/meta/main.yaml index 81b20fe4831..e69de29bb2d 100644 --- a/ansible/roles/pacmod/meta/main.yaml +++ b/ansible/roles/pacmod/meta/main.yaml @@ -1,2 +0,0 @@ -dependencies: - - role: autoware.dev_env.ros2 diff --git a/ansible/roles/pacmod/tasks/main.yaml b/ansible/roles/pacmod/tasks/main.yaml index ed96c249ef4..d3fc094034e 100644 --- a/ansible/roles/pacmod/tasks/main.yaml +++ b/ansible/roles/pacmod/tasks/main.yaml @@ -1,3 +1,11 @@ +- name: Install plotjuggler + become: true + ansible.builtin.apt: + name: + - ros-{{ rosdistro }}-plotjuggler-ros + state: latest + update_cache: true + - name: Install apt-transport-https become: true ansible.builtin.apt: diff --git a/ansible/roles/plotjuggler/README.md b/ansible/roles/plotjuggler/README.md deleted file mode 100644 index 0aceac25c4a..00000000000 --- a/ansible/roles/plotjuggler/README.md +++ /dev/null @@ -1,16 +0,0 @@ -# plotjuggler - -This role installs PlotJuggler. You can access detailed information about PlotJuggler with this [link](https://www.plotjuggler.io/). - -## Inputs - -None. - -## Manual Installation - -```bash -wget -O /tmp/amd64.env https://raw.githubusercontent.com/autowarefoundation/autoware/main/amd64.env && source /tmp/amd64.env - -sudo apt update && sudo apt install -y \ - ros-$rosdistro-plotjuggler-ros -``` diff --git a/ansible/roles/plotjuggler/meta/main.yaml b/ansible/roles/plotjuggler/meta/main.yaml deleted file mode 100644 index 81b20fe4831..00000000000 --- a/ansible/roles/plotjuggler/meta/main.yaml +++ /dev/null @@ -1,2 +0,0 @@ -dependencies: - - role: autoware.dev_env.ros2 diff --git a/ansible/roles/plotjuggler/tasks/main.yaml b/ansible/roles/plotjuggler/tasks/main.yaml deleted file mode 100644 index 07197044a14..00000000000 --- a/ansible/roles/plotjuggler/tasks/main.yaml +++ /dev/null @@ -1,7 +0,0 @@ -- name: Install plotjuggler - become: true - ansible.builtin.apt: - name: - - ros-{{ rosdistro }}-plotjuggler-ros - state: latest - update_cache: true diff --git a/ansible/roles/pre_commit/README.md b/ansible/roles/pre_commit/README.md deleted file mode 100644 index de9fe77522b..00000000000 --- a/ansible/roles/pre_commit/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# pre_commit - -This role installs dependent tools for [pre-commit](https://pre-commit.com/). - -## Inputs - -| Name | Required | Description | -| ------------------------------- | -------- | --------------------------- | -| pre_commit_clang_format_version | false | The version of ClangFormat. | - -## Manual Installation - -The `pre_commit_clang_format_version` variable can also be found in: -[./defaults/main.yaml](./defaults/main.yaml) - -```bash -pre_commit_clang_format_version=17.0.6 -pip3 install pre-commit clang-format==${pre_commit_clang_format_version} - -sudo apt install golang -``` diff --git a/ansible/roles/pre_commit/defaults/main.yaml b/ansible/roles/pre_commit/defaults/main.yaml deleted file mode 100644 index 34550552379..00000000000 --- a/ansible/roles/pre_commit/defaults/main.yaml +++ /dev/null @@ -1 +0,0 @@ -pre_commit_clang_format_version: 17.0.6 diff --git a/ansible/roles/pre_commit/meta/main.yaml b/ansible/roles/pre_commit/meta/main.yaml deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/ansible/roles/pre_commit/tasks/main.yaml b/ansible/roles/pre_commit/tasks/main.yaml deleted file mode 100644 index bd185cc164e..00000000000 --- a/ansible/roles/pre_commit/tasks/main.yaml +++ /dev/null @@ -1,18 +0,0 @@ -- name: Install pre-commit - ansible.builtin.pip: - name: pre-commit - state: latest - executable: pip3 - -- name: Install clang-format - ansible.builtin.pip: - name: clang-format - version: "{{ pre_commit_clang_format_version }}" - executable: pip3 - -- name: Install Go - become: true - ansible.builtin.apt: - name: golang - state: latest - update_cache: true diff --git a/ansible/roles/rmw_implementation/meta/main.yaml b/ansible/roles/rmw_implementation/meta/main.yaml index 81b20fe4831..e69de29bb2d 100644 --- a/ansible/roles/rmw_implementation/meta/main.yaml +++ b/ansible/roles/rmw_implementation/meta/main.yaml @@ -1,2 +0,0 @@ -dependencies: - - role: autoware.dev_env.ros2 diff --git a/ansible/roles/rocker/README.md b/ansible/roles/rocker/README.md deleted file mode 100644 index ded68609c11..00000000000 --- a/ansible/roles/rocker/README.md +++ /dev/null @@ -1,25 +0,0 @@ -# rocker - -This role installs [osrf/rocker](https://github.com/osrf/rocker) following the [installation guide](https://github.com/osrf/rocker/#installation). - -## Inputs - -None. - -## Manual Installation - -Install rocker: - -```bash -# Taken from: https://github.com/osrf/rocker#installation - -# Add the ROS 2 apt repository to your system. First authorize our GPG key with apt. -sudo apt update && sudo apt install curl gnupg lsb-release -sudo curl -sSL https://raw.githubusercontent.com/ros/rosdistro/master/ros.key -o /usr/share/keyrings/ros-archive-keyring.gpg - -# Then add the repository to your sources list. -echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/ros-archive-keyring.gpg] http://packages.ros.org/ros2/ubuntu $(source /etc/os-release && echo $UBUNTU_CODENAME) main" | sudo tee /etc/apt/sources.list.d/ros2.list > /dev/null - -sudo apt update -sudo apt-get install python3-rocker -``` diff --git a/ansible/roles/rocker/defaults/main.yaml b/ansible/roles/rocker/defaults/main.yaml deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/ansible/roles/rocker/meta/main.yaml b/ansible/roles/rocker/meta/main.yaml deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/ansible/roles/rocker/tasks/main.yaml b/ansible/roles/rocker/tasks/main.yaml deleted file mode 100644 index 885d2895342..00000000000 --- a/ansible/roles/rocker/tasks/main.yaml +++ /dev/null @@ -1,33 +0,0 @@ -# sudo curl -sSL https://raw.githubusercontent.com/ros/rosdistro/master/ros.key -o /usr/share/keyrings/ros-archive-keyring.gpg -- name: Authorize ROS GPG key - become: true - ansible.builtin.get_url: - url: https://raw.githubusercontent.com/ros/rosdistro/master/ros.key - dest: /usr/share/keyrings/ros-archive-keyring.gpg - mode: 644 - -- name: Save result of 'dpkg --print-architecture' - ansible.builtin.command: dpkg --print-architecture - register: rocker__deb_architecture - changed_when: false - -- name: Save result of 'lsb_release -cs' - ansible.builtin.command: lsb_release -cs - register: rocker__lsb_release_cs - changed_when: false - -# echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/ros-archive-keyring.gpg] http://packages.ros.org/ros2/ubuntu $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/ros2.list > /dev/null -- name: Add ROS 2 apt repository to source list - become: true - ansible.builtin.apt_repository: - repo: deb [arch={{ rocker__deb_architecture.stdout }} signed-by=/usr/share/keyrings/ros-archive-keyring.gpg] http://packages.ros.org/ros2/ubuntu {{ rocker__lsb_release_cs.stdout }} main - filename: ros2 - state: present - update_cache: true - -- name: Install rocker - become: true - ansible.builtin.apt: - name: - - python3-rocker - update_cache: true diff --git a/ansible/roles/ros2_dev_tools/meta/main.yaml b/ansible/roles/ros2_dev_tools/meta/main.yaml index 81b20fe4831..e69de29bb2d 100644 --- a/ansible/roles/ros2_dev_tools/meta/main.yaml +++ b/ansible/roles/ros2_dev_tools/meta/main.yaml @@ -1,2 +0,0 @@ -dependencies: - - role: autoware.dev_env.ros2 diff --git a/ansible/roles/tensorrt/defaults/main.yaml b/ansible/roles/tensorrt/defaults/main.yaml index 02edd251d82..e69de29bb2d 100644 --- a/ansible/roles/tensorrt/defaults/main.yaml +++ b/ansible/roles/tensorrt/defaults/main.yaml @@ -1 +0,0 @@ -tensorrt_install_devel: true diff --git a/ansible/roles/tensorrt/tasks/main.yaml b/ansible/roles/tensorrt/tasks/main.yaml index df85ae75bb5..8e01abf1e8c 100644 --- a/ansible/roles/tensorrt/tasks/main.yaml +++ b/ansible/roles/tensorrt/tasks/main.yaml @@ -23,7 +23,7 @@ allow_change_held_packages: true allow_downgrade: true update_cache: true - when: tensorrt_install_devel | bool + when: install_devel == 'true' # apt-mark hold - name: Prevent CUDA-related packages from upgrading @@ -49,4 +49,4 @@ - libnvinfer-plugin-dev - libnvparsers-dev - libnvonnxparsers-dev - when: tensorrt_install_devel | bool + when: install_devel == 'true' diff --git a/docker/README.md b/docker/README.md index f9186843b22..19be1aec827 100644 --- a/docker/README.md +++ b/docker/README.md @@ -1,86 +1,74 @@ -# Docker images for Autoware +# Open AD Kit: Containerized Workloads for the Autoware -We have two types of Docker image: `development` and `prebuilt`. +Open AD Kit offers two types of Docker image to let you get started with the Autoware quickly: `devel` and `runtime`. -1. The `development` image enables you to develop Autoware without setting up the local development environment. -2. The `prebuilt` image contains executables and enables you to try out Autoware quickly. - - Note that the prebuilt image is not designed for deployment on a real vehicle! +1. The `devel` image contains the development environment and enables you to build and develop Autoware from source. Keep in mind that 'devel' image always include more recent changes than the 'runtime' image to provide the latest development environment. +2. The `runtime` image contains only runtime executables and enables you to try out Autoware quickly. 'runtime' image is more stable than 'devel' image and is recommended for production use. -**Note**: Before proceeding, confirm and agree with the [NVIDIA Deep Learning Container license](https://developer.nvidia.com/ngc/nvidia-deep-learning-container-license). By pulling and using the Autoware Universe images, you accept the terms and conditions of the license. +**Note**: Before proceeding, confirm and agree with the [NVIDIA Deep Learning Container license](https://developer.nvidia.com/ngc/nvidia-deep-learning-container-license). By pulling and using the Autoware Open AD Kit images, you accept the terms and conditions of the license. ## Prerequisites -- [Docker](https://docs.docker.com/engine/install/ubuntu/) -- [rocker](https://github.com/osrf/rocker) - - We use `rocker` to enable GUI applications such as `rviz` and `rqt` on Docker Containers. - - Refer to [here](http://wiki.ros.org/docker/Tutorials/GUI) for more details. +- Docker +- NVIDIA Container Toolkit (optional) +- NVIDIA CUDA 12 compatible GPU Driver (optional) -The [setup script](../setup-dev-env.sh) will install these dependencies through the following roles. +The [setup script](../setup-dev-env.sh) will install all required dependencies with the setup script: -- [Docker](../ansible/roles/docker_engine/README.md) -- [rocker](../ansible/roles/rocker/README.md) - -## Usage +```bash +./setup-dev-env.sh --docker +``` -### Development image +To install without NVIDIA GPU support: ```bash -docker run --rm -it \ - -v {path_to_your_workspace}:/autoware \ - ghcr.io/autowarefoundation/autoware-universe:latest +./setup-dev-env.sh --docker --no-nvidia ``` -To run with `rocker`: +## Usage + +### Runtime -If you use `rocker<=0.2.9`, add an option of `--env NVIDIA_DRIVER_CAPABILITIES=""` or `--env NVIDIA_DRIVER_CAPABILITIES=compute,utility,graphics` to avoid the CUDA environment error. For more details, see [this issue](https://github.com/autowarefoundation/autoware/issues/2452). +You can use `run.sh` to run the Autoware runtime container with the map data: ```bash -rocker --nvidia --x11 --user \ - --volume {path_to_your_workspace} \ - -- ghcr.io/autowarefoundation/autoware-universe:latest +./docker/run.sh --map-path path_to_map_data ``` -If you locate your workspace under your home directory, you can use the `--home` option instead: +**Note**: You can use `--no-nvidia` to run without NVIDIA GPU support, and `--headless` to run without display that means no RViz visualization. + +For more launch options you can edit the launch command `--launch-cmd`: ```bash -rocker --nvidia --x11 --user --home \ - -- ghcr.io/autowarefoundation/autoware-universe:latest +./docker/run.sh --map-path path_to_map_data --launch-cmd "ros2 launch autoware_launch autoware.launch.xml map_path:=/autoware_map vehicle_model:=sample_vehicle sensor_model:=sample_sensor_kit" ``` -To use a customized `.bashrc` for the container: +### Using Development Container ```bash -rocker --nvidia --x11 --user --home \ - --volume $HOME/.bashrc.container:$HOME/.bashrc \ - -- ghcr.io/autowarefoundation/autoware-universe:latest +./docker/run.sh --devel ``` -### Prebuilt image +**Note**: By default workspace mounted on the container will be current directory, you can change the workspace path by `--workspace path_to_workspace`. For development environments without NVIDIA GPU support use `--no-nvidia`. -```bash -docker run --rm -it \ - ghcr.io/autowarefoundation/autoware-universe:latest-prebuilt -``` +#### Using VS Code Remote Containers for Development -To run with `rocker`: +Get the Visual Studio Code's [Remote - Containers](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) extension. +And reopen the workspace in the container by selecting `Remote-Containers: Reopen in Container` from the Command Palette (`F1`). -```bash -rocker --nvidia --x11 --user \ - --volume {path_to_your_workspace} \ - -- ghcr.io/autowarefoundation/autoware-universe:latest-prebuilt -``` +By default devcontainer assumes NIVIDA GPU support, you can change this by deleting these lines within `.devcontainer/devcontainer.json`: -If you intend to use pre-existing data such as maps or Rosbags, modify the `--volume` options shown below. +```json + "hostRequirements": { + "gpu": true + }, +``` -```bash -rocker --nvidia --x11 --user \ - --volume {path_to_your_workspace} \ - --volume {path_to_your_map_data} \ - --volume {path_to_your_log_data} \ - -- ghcr.io/autowarefoundation/autoware-universe:latest-prebuilt +```json + "--gpus", "all" ``` -## Building Docker images on your local machine +## Building Docker images from scratch If you want to build these images locally for development purposes, run the following command: @@ -95,67 +83,19 @@ To build without CUDA, use the `--no-cuda` option: ./docker/build.sh --no-cuda ``` -To specify the platform, use the `--platform` option: +To build only development image, use the `--devel-only` option: ```bash -./docker/build.sh --platform linux/amd64 -./docker/build.sh --platform linux/arm64 +./docker/build.sh --devel-only ``` -## Tips - -### Precautions for not using `rocker` - -If either image is run without `rocker`, then `root` privileges will be used. -This can affect your local environment as below: +To specify the platform, use the `--platform` option: -```sh-session -$ docker run --rm -it -v {path_to_your_workspace}:/autoware ghcr.io/autowarefoundation/autoware-universe:latest -# colcon build -# exit -$ rm build/COLCON_IGNORE -rm: remove write-protected regular empty file 'build/COLCON_IGNORE'? y -rm: cannot remove 'build/COLCON_IGNORE': Permission denied +```bash +./docker/build.sh --platform linux/amd64 +./docker/build.sh --platform linux/arm64 ``` -To prevent this error occurring when rocker is not used, there are two suggested methods: - -1. Prepare a dedicated workspace for the docker image. -2. Use Visual Studio Code's [Remote - Containers](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) extension. - - To use the extension, the following settings can be used to create a user account in a similar way to `rocker. - Refer to [this document](https://code.visualstudio.com/remote/advancedcontainers/add-nonroot-user) for more details. - - ```jsonc - // .devcontainer/devcontainer.json - { - "name": "Autoware", - "build": { - "dockerfile": "Dockerfile" - }, - "remoteUser": "autoware", - "settings": { - "terminal.integrated.defaultProfile.linux": "bash" - } - } - ``` - - ```docker - # .devcontainer/Dockerfile - FROM ghcr.io/autowarefoundation/autoware-universe:latest - - ARG USERNAME=autoware - ARG USER_UID=1000 - ARG USER_GID=$USER_UID - - RUN groupadd --gid $USER_GID $USERNAME \ - && useradd --uid $USER_UID --gid $USER_GID -m $USERNAME \ - && apt-get update \ - && apt-get install -y sudo \ - && echo $USERNAME ALL=\(root\) NOPASSWD:ALL > /etc/sudoers.d/$USERNAME \ - && chmod 0440 /etc/sudoers.d/$USERNAME - ``` - ### Using Docker images other than `latest` There are also images versioned based on the `date` or `release tag`. diff --git a/docker/autoware-openadk/Dockerfile b/docker/autoware-openadk/Dockerfile new file mode 100644 index 00000000000..71f94fe373b --- /dev/null +++ b/docker/autoware-openadk/Dockerfile @@ -0,0 +1,137 @@ +# hadolint global ignore=DL3006,DL3008,DL3009,DL3015,DL3013,DL3027,DL3042 +ARG BASE_IMAGE + +FROM $BASE_IMAGE as base +SHELL ["/bin/bash", "-o", "pipefail", "-c"] +ARG ROS_DISTRO + +# Install apt packages +# hadolint ignore=DL3008 +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install --no-install-recommends \ + git \ + ssh \ + wget \ + cmake \ + curl \ + gosu \ + ccache \ + gnupg \ + vim \ + unzip \ + lsb-release \ + && apt-get autoremove -y && apt-get clean -y && rm -rf /var/lib/apt/lists/* "$HOME"/.cache + +# Add GitHub to known hosts for private repositories +RUN mkdir -p ~/.ssh \ + && ssh-keyscan github.com >> ~/.ssh/known_hosts + +# Copy files +COPY setup-dev-env.sh ansible-galaxy-requirements.yaml amd64.env arm64.env /autoware/ +COPY ansible/ /autoware/ansible/ +WORKDIR /autoware + +# Set up base environment +RUN --mount=type=ssh \ + ./setup-dev-env.sh -y --module base --runtime openadk \ + && pip uninstall -y ansible ansible-core \ + && pip install vcstool \ + && apt-get autoremove -y && apt-get clean -y && rm -rf /var/lib/apt/lists/* "$HOME"/.cache \ + && echo "source /opt/ros/${ROS_DISTRO}/setup.bash" > /etc/bash.bashrc + +# Create entrypoint +CMD ["/bin/bash"] + +FROM base as prebuilt +SHELL ["/bin/bash", "-o", "pipefail", "-c"] +ARG ROS_DISTRO +ARG SETUP_ARGS +ARG ROS_DISTRO +ENV CCACHE_DIR=/ccache +ENV CC="/usr/lib/ccache/gcc" +ENV CXX="/usr/lib/ccache/g++" + +# Set up development environment +RUN --mount=type=ssh \ + ./setup-dev-env.sh -y --module all ${SETUP_ARGS} --no-cuda-drivers openadk \ + && pip uninstall -y ansible ansible-core \ + && apt-get autoremove -y && apt-get clean -y && rm -rf /var/lib/apt/lists/* "$HOME"/.cache \ + && find / -name 'libcu*.a' -delete \ + && find / -name 'libnv*.a' -delete + +# Copy repository files +COPY autoware.repos /autoware/ + +# Install rosdep dependencies +RUN --mount=type=ssh \ + mkdir src \ + && vcs import src < autoware.repos \ + && apt-get update \ + && rosdep update \ + && DEBIAN_FRONTEND=noninteractive rosdep install -y --ignore-src --from-paths src --rosdistro "$ROS_DISTRO" \ + && source /opt/ros/"$ROS_DISTRO"/setup.bash \ + && colcon build --cmake-args -DCMAKE_BUILD_TYPE=Release --cmake-args \ + " -Wno-dev" \ + " --no-warn-unused-cli" \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_EXPORT_COMPILE_COMMANDS=ON \ + && find /autoware/install -type d -exec chmod 777 {} \; \ + && apt-get autoremove -y && apt-get clean -y && rm -rf /var/lib/apt/lists/* "$HOME"/.cache \ + && rm -rf /autoware/build /autoware/src + +CMD ["/bin/bash"] + +FROM prebuilt as devel +SHELL ["/bin/bash", "-o", "pipefail", "-c"] + +# Install development tools and artifacts +RUN --mount=type=ssh \ + ./setup-dev-env.sh -y --module dev-tools --download-artifacts openadk \ + && pip uninstall -y ansible ansible-core \ + && apt-get autoremove -y && apt-get clean -y && rm -rf /var/lib/apt/lists/* "$HOME"/.cache + +# Change working directory to workspace +WORKDIR /workspace + +# Create entrypoint +COPY docker/autoware-openadk/etc/ros_entrypoint.sh /ros_entrypoint.sh +RUN chmod +x /ros_entrypoint.sh +ENTRYPOINT ["/ros_entrypoint.sh"] +CMD ["/bin/bash"] + +FROM base as runtime +SHELL ["/bin/bash", "-o", "pipefail", "-c"] +ARG ROS_DISTRO +ARG LIB_DIR +ARG SETUP_ARGS + +# Set up runtime environment and artifacts +COPY autoware.repos /autoware/ +RUN --mount=type=ssh \ + ./setup-dev-env.sh -y --module all ${SETUP_ARGS} --download-artifacts --no-cuda-drivers --runtime openadk \ + && pip uninstall -y ansible ansible-core \ + && mkdir src \ + && vcs import src < autoware.repos \ + && rosdep update \ + && DEBIAN_FRONTEND=noninteractive rosdep install -y --dependency-types=exec --ignore-src --from-paths src --rosdistro "$ROS_DISTRO" \ + && apt-get autoremove -y && apt-get clean -y && rm -rf /var/lib/apt/lists/* "$HOME"/.cache \ + && find /usr/lib/$LIB_DIR-linux-gnu -name "*.a" -type f -delete \ + && find / -name "*.o" -type f -delete \ + && find / -name "*.h" -type f -delete \ + && find / -name "*.hpp" -type f -delete \ + && rm -rf /autoware/src /autoware/ansible /autoware/autoware.repos \ + /root/.local/pipx /opt/ros/"$ROS_DISTRO"/include /etc/apt/sources.list.d/cuda*.list \ + /etc/apt/sources.list.d/docker.list /etc/apt/sources.list.d/nvidia-docker.list \ + /usr/include /usr/share/doc /usr/lib/gcc /usr/lib/jvm /usr/lib/llvm* + +# Copy prebuilt binaries +COPY --from=prebuilt /autoware/install/ /autoware/install/ + +# Copy bash aliases +COPY docker/autoware-openadk/etc/.bash_aliases /root/.bash_aliases +RUN echo "source /autoware/install/setup.bash" > /etc/bash.bashrc + +# Create entrypoint +COPY docker/autoware-openadk/etc/ros_entrypoint.sh /ros_entrypoint.sh +RUN chmod +x /ros_entrypoint.sh +ENTRYPOINT ["/ros_entrypoint.sh"] +CMD ["bash"] diff --git a/docker/autoware-openadk/docker-bake.hcl b/docker/autoware-openadk/docker-bake.hcl new file mode 100644 index 00000000000..d472a0df008 --- /dev/null +++ b/docker/autoware-openadk/docker-bake.hcl @@ -0,0 +1,26 @@ +group "default" { + targets = ["prebuilt", "devel", "runtime"] +} + +// For docker/metadata-action +target "docker-metadata-action-prebuilt" {} +target "docker-metadata-action-devel" {} +target "docker-metadata-action-runtime" {} + +target "prebuilt" { + inherits = ["docker-metadata-action-prebuilt"] + dockerfile = "docker/autoware-openadk/Dockerfile" + target = "prebuilt" +} + +target "devel" { + inherits = ["docker-metadata-action-devel"] + dockerfile = "docker/autoware-openadk/Dockerfile" + target = "devel" +} + +target "runtime" { + inherits = ["docker-metadata-action-runtime"] + dockerfile = "docker/autoware-openadk/Dockerfile" + target = "runtime" +} diff --git a/docker/autoware-openadk/etc/.bash_aliases b/docker/autoware-openadk/etc/.bash_aliases new file mode 100644 index 00000000000..f6200449b1c --- /dev/null +++ b/docker/autoware-openadk/etc/.bash_aliases @@ -0,0 +1,28 @@ +#!/bin/bash + +# planning simulation +function download_planning_map() { + if [ ! -f ~/autoware_map/sample-map-planning.zip ]; then + gdown -O ~/autoware_map/ 'https://docs.google.com/uc?export=download&id=1499_nsbUbIeturZaDj7jhUownh5fvXHd' + unzip -d ~/autoware_map ~/autoware_map/sample-map-planning.zip + fi +} +alias awf-launch-planning-sim='download_planning_map&&ros2 launch autoware_launch planning_simulator.launch.xml map_path:=$HOME/autoware_map/sample-map-planning vehicle_model:=sample_vehicle sensor_model:=sample_sensor_kit' + +# rosbag replay simulation +function download_rosbag_map() { + if [ ! -f ~/autoware_map/sample-map-rosbag.zip ]; then + gdown -O ~/autoware_map/ 'https://docs.google.com/uc?export=download&id=1A-8BvYRX3DhSzkAnOcGWFw5T30xTlwZI' + unzip -d ~/autoware_map/ ~/autoware_map/sample-map-rosbag.zip + fi +} +alias awf-launch-sample-rosbag-sim='download_rosbag_artifacts&&download_rosbag_map&&ros2 launch autoware_launch logging_simulator.launch.xml map_path:=$HOME/autoware_map/sample-map-rosbag vehicle_model:=sample_vehicle sensor_model:=sample_sensor_kit' + +# play sample rosbag +function download_rosbag_file() { + if [ ! -f ~/autoware_map/sample-rosbag.zip ]; then + gdown -O ~/autoware_map/ 'https://docs.google.com/uc?export=download&id=1VnwJx9tI3kI_cTLzP61ktuAJ1ChgygpG' + unzip -d ~/autoware_map/ ~/autoware_map/sample-rosbag.zip + fi +} +alias awf-play-sample-rosbag='download_rosbag_file&&ros2 bag play ~/autoware_map/sample-rosbag/sample.db3 -r 0.35 -s sqlite3' diff --git a/docker/autoware-openadk/etc/ros_entrypoint.sh b/docker/autoware-openadk/etc/ros_entrypoint.sh new file mode 100644 index 00000000000..cbe45ef3bdb --- /dev/null +++ b/docker/autoware-openadk/etc/ros_entrypoint.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash +# shellcheck disable=SC1090,SC1091 + +# Get the user ID and group ID of the local user +USER_ID=${LOCAL_UID} +USER_NAME=${LOCAL_USER} +GROUP_ID=${LOCAL_GID} +GROUP_NAME=${LOCAL_GROUP} + +# Check if any of the variables are empty +if [[ -z $USER_ID || -z $USER_NAME || -z $GROUP_ID || -z $GROUP_NAME ]]; then + source "/opt/ros/$ROS_DISTRO/setup.bash" + source /autoware/install/setup.bash + exec "$@" +else + echo "Starting with user: $USER_NAME >> UID $USER_ID, GID: $GROUP_ID" + + # Create group and user with GID/UID + groupadd -g "$GROUP_ID" "$GROUP_NAME" + useradd -u "$USER_ID" -g "$GROUP_ID" -s /bin/bash -m -d /home/"$USER_NAME" "$USER_NAME" + + # Add sudo privileges to the user + echo "$USER_NAME ALL=(ALL) NOPASSWD:ALL" >>/etc/sudoers + + # Source ROS2 + # hadolint ignore=SC1090 + source "/opt/ros/$ROS_DISTRO/setup.bash" + source /autoware/install/setup.bash + + # Execute the command as the user + exec /usr/sbin/gosu "$USER_NAME" "$@" +fi diff --git a/docker/autoware-universe/Dockerfile b/docker/autoware-universe/Dockerfile deleted file mode 100644 index a7e1465f0f9..00000000000 --- a/docker/autoware-universe/Dockerfile +++ /dev/null @@ -1,139 +0,0 @@ -# Image args should come at the beginning. -ARG BASE_IMAGE -ARG PREBUILT_BASE_IMAGE -# hadolint ignore=DL3006 -FROM $BASE_IMAGE as devel -SHELL ["/bin/bash", "-o", "pipefail", "-c"] - -ARG ROS_DISTRO -ARG SETUP_ARGS - -## Install apt packages -# hadolint ignore=DL3008 -RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install --no-install-recommends \ - git \ - ssh \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -## Copy files -COPY autoware.repos setup-dev-env.sh ansible-galaxy-requirements.yaml amd64.env arm64.env /autoware/ -COPY ansible/ /autoware/ansible/ -WORKDIR /autoware -RUN ls /autoware - -## Add GitHub to known hosts for private repositories -RUN mkdir -p ~/.ssh \ - && ssh-keyscan github.com >> ~/.ssh/known_hosts - -## Set up development environment -RUN --mount=type=ssh \ - ./setup-dev-env.sh -y $SETUP_ARGS universe \ - && pip uninstall -y ansible ansible-core \ - && mkdir src \ - && vcs import src < autoware.repos \ - && rosdep update \ - && DEBIAN_FRONTEND=noninteractive rosdep install -y --ignore-src --from-paths src --rosdistro "$ROS_DISTRO" \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -## Clean up unnecessary files -RUN rm -rf \ - "$HOME"/.cache \ - /etc/apt/sources.list.d/cuda*.list \ - /etc/apt/sources.list.d/docker.list \ - /etc/apt/sources.list.d/nvidia-docker.list - -## Register Vulkan GPU vendors -RUN curl https://gitlab.com/nvidia/container-images/vulkan/raw/dc389b0445c788901fda1d85be96fd1cb9410164/nvidia_icd.json -o /etc/vulkan/icd.d/nvidia_icd.json \ - && chmod 644 /etc/vulkan/icd.d/nvidia_icd.json -RUN curl https://gitlab.com/nvidia/container-images/opengl/raw/5191cf205d3e4bb1150091f9464499b076104354/glvnd/runtime/10_nvidia.json -o /etc/glvnd/egl_vendor.d/10_nvidia.json \ - && chmod 644 /etc/glvnd/egl_vendor.d/10_nvidia.json - -## Register OpenCL GPU vendors -RUN mkdir -p /etc/OpenCL/vendors \ - && echo "libnvidia-opencl.so.1" > /etc/OpenCL/vendors/nvidia.icd \ - && chmod 644 /etc/OpenCL/vendors/nvidia.icd - -## TODO: remove/re-evaluate after Ubuntu 24.04 is released -## Fix OpenGL issues (e.g. black screen in rviz2) due to old mesa lib in Ubuntu 22.04 -## See https://github.com/autowarefoundation/autoware.universe/issues/2789 -# hadolint ignore=DL3008 -RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y software-properties-common \ - && apt-add-repository ppa:kisak/kisak-mesa \ - && DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y \ - libegl-mesa0 libegl1-mesa-dev libgbm-dev libgbm1 libgl1-mesa-dev libgl1-mesa-dri libglapi-mesa libglx-mesa0 \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -## Create entrypoint -# hadolint ignore=DL3059 -RUN echo "source /opt/ros/${ROS_DISTRO}/setup.bash" > /etc/bash.bashrc -CMD ["/bin/bash"] - -FROM devel as builder -SHELL ["/bin/bash", "-o", "pipefail", "-c"] - -## Build and change permission for runtime data conversion -RUN source /opt/ros/"$ROS_DISTRO"/setup.bash \ - && colcon build --cmake-args -DCMAKE_BUILD_TYPE=Release \ - && find /autoware/install -type d -exec chmod 777 {} \; - -# hadolint ignore=DL3006 -FROM $PREBUILT_BASE_IMAGE as prebuilt - -SHELL ["/bin/bash", "-o", "pipefail", "-c"] - -ARG ROS_DISTRO -ARG SETUP_ARGS - -## Install apt packages -# hadolint ignore=DL3008 -RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install --no-install-recommends \ - git \ - ssh \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -## Copy files -COPY autoware.repos setup-dev-env.sh ansible-galaxy-requirements.yaml amd64.env arm64.env /autoware/ -COPY ansible/ /autoware/ansible/ -WORKDIR /autoware -RUN ls /autoware - -## Add GitHub to known hosts for private repositories -RUN mkdir -p ~/.ssh \ - && ssh-keyscan github.com >> ~/.ssh/known_hosts - -## Set up runtime environment -RUN --mount=type=ssh \ - ./setup-dev-env.sh -y $SETUP_ARGS --no-cuda-drivers --runtime universe \ - && pip uninstall -y ansible ansible-core \ - && mkdir src \ - && vcs import src < autoware.repos \ - && rosdep update \ - && DEBIAN_FRONTEND=noninteractive rosdep install -y --ignore-src --from-paths src --rosdistro "$ROS_DISTRO" \ - && rm -rf src \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -## Copy install folder from builder -COPY --from=builder /autoware/install/ /autoware/install/ - -## Clean up unnecessary files -RUN rm -rf \ - "$HOME"/.cache \ - /etc/apt/sources.list.d/cuda*.list \ - /etc/apt/sources.list.d/docker.list \ - /etc/apt/sources.list.d/nvidia-docker.list - -## Register Vulkan GPU vendors -ADD "https://gitlab.com/nvidia/container-images/vulkan/raw/dc389b0445c788901fda1d85be96fd1cb9410164/nvidia_icd.json" /etc/vulkan/icd.d/nvidia_icd.json -RUN chmod 644 /etc/vulkan/icd.d/nvidia_icd.json -ADD "https://gitlab.com/nvidia/container-images/opengl/raw/5191cf205d3e4bb1150091f9464499b076104354/glvnd/runtime/10_nvidia.json" /etc/glvnd/egl_vendor.d/10_nvidia.json -RUN chmod 644 /etc/glvnd/egl_vendor.d/10_nvidia.json - -## Create entrypoint -# hadolint ignore=DL3059 -RUN echo "source /autoware/install/setup.bash" > /etc/bash.bashrc -CMD ["/bin/bash"] diff --git a/docker/autoware-universe/docker-bake.hcl b/docker/autoware-universe/docker-bake.hcl deleted file mode 100644 index fa5b3dff25f..00000000000 --- a/docker/autoware-universe/docker-bake.hcl +++ /dev/null @@ -1,19 +0,0 @@ -group "default" { - targets = ["devel", "prebuilt"] -} - -// For docker/metadata-action -target "docker-metadata-action-devel" {} -target "docker-metadata-action-prebuilt" {} - -target "devel" { - inherits = ["docker-metadata-action-devel"] - dockerfile = "docker/autoware-universe/Dockerfile" - target = "devel" -} - -target "prebuilt" { - inherits = ["docker-metadata-action-prebuilt"] - dockerfile = "docker/autoware-universe/Dockerfile" - target = "prebuilt" -} diff --git a/docker/build.sh b/docker/build.sh index 803040ad41a..fe9917b7575 100755 --- a/docker/build.sh +++ b/docker/build.sh @@ -2,76 +2,135 @@ set -e +# Function to print help message +print_help() { + echo "Usage: build.sh [OPTIONS]" + echo "Options:" + echo " --help Display this help message" + echo " -h Display this help message" + echo " --no-cuda Disable CUDA support" + echo " --platform Specify the platform (default: current platform)" + echo " --devel-only Build devel image only" + echo "" + echo "Note: The --platform option should be one of 'linux/amd64' or 'linux/arm64'." +} + SCRIPT_DIR=$(readlink -f "$(dirname "$0")") WORKSPACE_ROOT="$SCRIPT_DIR/../" # Parse arguments -args=() -while [ "$1" != "" ]; do - case "$1" in - --no-cuda) - option_no_cuda=true - ;; - --platform) - option_platform="$2" +parse_arguments() { + while [ "$1" != "" ]; do + case "$1" in + --help | -h) + print_help + exit 1 + ;; + --no-cuda) + option_no_cuda=true + ;; + --platform) + option_platform="$2" + shift + ;; + --devel-only) + option_devel_only=true + ;; + *) + echo "Unknown option: $1" + print_help + exit 1 + ;; + esac shift - ;; - --no-prebuilt) - option_no_prebuilt=true - ;; - *) - args+=("$1") - ;; - esac - shift -done + done +} # Set CUDA options -if [ "$option_no_cuda" = "true" ]; then - setup_args="--no-nvidia" - image_name_suffix="" -else - setup_args="--no-cuda-drivers" - image_name_suffix="-cuda" -fi +set_cuda_options() { + if [ "$option_no_cuda" = "true" ]; then + setup_args="--no-nvidia" + image_name_suffix="" + else + image_name_suffix="-cuda" + fi +} -# Set prebuilt options -if [ "$option_no_prebuilt" = "true" ]; then - targets=("devel") -else - # default targets include devel and prebuilt - targets=() -fi +# Set build options +set_build_options() { + if [ "$option_devel_only" = "true" ]; then + targets=("devel") + else + targets=() + fi +} # Set platform -if [ -n "$option_platform" ]; then - platform="$option_platform" -else - platform="linux/amd64" - if [ "$(uname -m)" = "aarch64" ]; then - platform="linux/arm64" +set_platform() { + if [ -n "$option_platform" ]; then + platform="$option_platform" + else + platform="linux/amd64" + if [ "$(uname -m)" = "aarch64" ]; then + platform="linux/arm64" + fi + fi +} + +# Set arch lib dir +set_arch_lib_dir() { + if [ "$platform" = "linux/arm64" ]; then + lib_dir="aarch64" + elif [ "$platform" = "linux/amd64" ]; then + lib_dir="x86_64" + else + echo "Unsupported platform: $platform" + exit 1 fi -fi +} # Load env -source "$WORKSPACE_ROOT/amd64.env" -if [ "$platform" = "linux/arm64" ]; then - source "$WORKSPACE_ROOT/arm64.env" -fi +load_env() { + source "$WORKSPACE_ROOT/amd64.env" + if [ "$platform" = "linux/arm64" ]; then + source "$WORKSPACE_ROOT/arm64.env" + fi +} + +# Build images +build_images() { + # https://github.com/docker/buildx/issues/484 + export BUILDKIT_STEP_LOG_MAX_SIZE=10000000 + + echo "Building images for platform: $platform" + echo "ROS distro: $rosdistro" + echo "Base image: $base_image" + echo "Setup args: $setup_args" + echo "Lib dir: $lib_dir" + echo "Image name suffix: $image_name_suffix" + echo "Targets: ${targets[*]}" -# https://github.com/docker/buildx/issues/484 -export BUILDKIT_STEP_LOG_MAX_SIZE=10000000 + set -x + docker buildx bake --load --progress=plain -f "$SCRIPT_DIR/autoware-openadk/docker-bake.hcl" \ + --set "*.context=$WORKSPACE_ROOT" \ + --set "*.ssh=default" \ + --set "*.platform=$platform" \ + --set "*.args.ROS_DISTRO=$rosdistro" \ + --set "*.args.BASE_IMAGE=$base_image" \ + --set "*.args.SETUP_ARGS=$setup_args" \ + --set "*.args.LIB_DIR=$lib_dir" \ + --set "devel.tags=ghcr.io/autowarefoundation/autoware-openadk:latest-devel$image_name_suffix" \ + --set "prebuilt.tags=ghcr.io/autowarefoundation/autoware-openadk:latest-prebuilt$image_name_suffix" \ + --set "runtime.tags=ghcr.io/autowarefoundation/autoware-openadk:latest-runtime$image_name_suffix" \ + "${targets[@]}" + set +x +} -set -x -docker buildx bake --no-cache --load --progress=plain -f "$SCRIPT_DIR/autoware-universe/docker-bake.hcl" \ - --set "*.context=$WORKSPACE_ROOT" \ - --set "*.ssh=default" \ - --set "*.platform=$platform" \ - --set "*.args.ROS_DISTRO=$rosdistro" \ - --set "*.args.BASE_IMAGE=$base_image" \ - --set "*.args.PREBUILT_BASE_IMAGE=$prebuilt_base_image" \ - --set "*.args.SETUP_ARGS=$setup_args" \ - --set "devel.tags=ghcr.io/autowarefoundation/autoware-universe:$rosdistro-latest$image_name_suffix" \ - --set "prebuilt.tags=ghcr.io/autowarefoundation/autoware-universe:$rosdistro-latest-prebuilt$image_name_suffix" \ - "${targets[@]}" -set +x +# Main script execution +parse_arguments "$@" +set_cuda_options +set_build_options +set_platform +set_arch_lib_dir +load_env +build_images diff --git a/docker/run.sh b/docker/run.sh new file mode 100755 index 00000000000..a790892b83b --- /dev/null +++ b/docker/run.sh @@ -0,0 +1,163 @@ +#!/usr/bin/env bash +# shellcheck disable=SC2086,SC2124 + +set -e + +# Define terminal colors +RED='\033[0;31m' +GREEN='\033[0;32m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +SCRIPT_DIR=$(readlink -f "$(dirname "$0")") +WORKSPACE_ROOT="$SCRIPT_DIR/../" +source "$WORKSPACE_ROOT/amd64.env" +if [ "$(uname -m)" = "aarch64" ]; then + source "$WORKSPACE_ROOT/arm64.env" +fi + +# Default values +option_no_nvidia=false +option_devel=false +option_headless=false +MAP_PATH="" +WORKSPACE_PATH="" +USER_ID="" +WORKSPACE="" +DEFAULT_LAUNCH_CMD="ros2 launch autoware_launch autoware.launch.xml map_path:=/autoware_map vehicle_model:=sample_vehicle sensor_model:=sample_sensor_kit" + +# Function to print help message +print_help() { + echo -e "\n------------------------------------------------------------" + echo -e "${RED}Note:${NC} The --map-path option is mandatory if not custom launch command given. Please provide exact path to the map files." + echo -e " Default launch command: ${GREEN}${DEFAULT_LAUNCH_CMD}${NC}" + echo -e "------------------------------------------------------------" + echo -e "${RED}Usage:${NC} run.sh [OPTIONS] [LAUNCH_CMD](optional)" + echo -e "Options:" + echo -e " ${GREEN}--help/-h${NC} Display this help message" + echo -e " ${GREEN}--map-path${NC} Specify the path to the map files (mandatory if no custom launch command is provided)" + echo -e " ${GREEN}--no-nvidia${NC} Disable NVIDIA GPU support" + echo -e " ${GREEN}--devel${NC} Use the latest development version of Autoware" + echo -e " ${GREEN}--headless${NC} Run Autoware in headless mode (default: false)" + echo -e " ${GREEN}--workspace${NC} Specify the workspace path to mount into container" + echo "" +} + +# Parse arguments +parse_arguments() { + while [ "$1" != "" ]; do + case "$1" in + --help | -h) + print_help + exit 1 + ;; + --no-nvidia) + option_no_nvidia=true + ;; + --devel) + option_devel=true + ;; + --headless) + option_headless=true + ;; + --workspace) + WORKSPACE_PATH="$2" + shift + ;; + --map-path) + MAP_PATH="$2" + shift + ;; + --*) + echo "Unknown option: $1" + print_help + exit 1 + ;; + -*) + echo "Unknown option: $1" + print_help + exit 1 + ;; + *) + LAUNCH_CMD="$@" + break + ;; + esac + shift + done +} + +# Set image and workspace variables +set_variables() { + # Check if map path is provided for default launch command + if [ "$MAP_PATH" == "" ] && [ "$LAUNCH_CMD" == "" ]; then + print_help + exit 1 + fi + + # Mount map path if provided + MAP="-v ${MAP_PATH}:/autoware_map:ro" + + # Set default launch command if not provided + if [ "$LAUNCH_CMD" == "" ]; then + LAUNCH_CMD=${DEFAULT_LAUNCH_CMD} + fi + + # Set workspace path if provided with current user and group + if [ "$WORKSPACE_PATH" != "" ]; then + USER_ID="-e LOCAL_UID=$(id -u) -e LOCAL_GID=$(id -g) -e LOCAL_USER=$(id -un) -e LOCAL_GROUP=$(id -gn)" + WORKSPACE="-v ${WORKSPACE_PATH}:/workspace" + fi + + # Set image based on option + if [ "$option_devel" == "true" ]; then + IMAGE="ghcr.io/autowarefoundation/autoware-openadk:latest-devel" + else + IMAGE="ghcr.io/autowarefoundation/autoware-openadk:latest-runtime" + fi +} + +# Set GPU flag based on option +set_gpu_flag() { + if [ "$option_no_nvidia" = "true" ]; then + GPU_FLAG="" + else + GPU_FLAG="--gpus all" + IMAGE=${IMAGE}-cuda + fi +} + +# Set X display variables +set_x_display() { + MOUNT_X="" + if [ "$option_headless" = "false" ]; then + MOUNT_X="-e DISPLAY=$DISPLAY -v /tmp/.X11-unix/:/tmp/.X11-unix" + xhost + >/dev/null + fi +} + +# Main script execution +main() { + # Parse arguments + parse_arguments "$@" + set_variables + set_gpu_flag + set_x_display + + echo -e "${GREEN}\n-----------------------LAUNCHING CONTAINER-----------------------" + echo -e "${GREEN}IMAGE:${NC} ${IMAGE}" + echo -e "${GREEN}MAP PATH(mounted):${NC} ${MAP_PATH}:/autoware_map" + echo -e "${GREEN}WORKSPACE(mounted):${NC} ${WORKSPACE_PATH}:/workspace" + echo -e "${GREEN}LAUNCH CMD:${NC} ${LAUNCH_CMD}" + echo -e "${GREEN}-----------------------------------------------------------------${NC}" + + # Launch the container + set -x + docker run -it --rm --net=host ${GPU_FLAG} ${USER_ID} ${MOUNT_X} \ + -e XAUTHORITY=${XAUTHORITY} -e XDG_RUNTIME_DIR=$XDG_RUNTIME_DIR -e NVIDIA_DRIVER_CAPABILITIES=all -v /etc/localtime:/etc/localtime:ro \ + ${WORKSPACE} ${MAP} ${IMAGE} \ + ${LAUNCH_CMD} +} + +# Execute the main script +main "$@" diff --git a/setup-dev-env.sh b/setup-dev-env.sh index 3eb24732940..fe4b9a8fa3b 100755 --- a/setup-dev-env.sh +++ b/setup-dev-env.sh @@ -5,6 +5,24 @@ set -e +# Function to print help message +print_help() { + echo "Usage: setup-dev-env.sh [OPTIONS]" + echo "Options:" + echo " --help Display this help message" + echo " -h Display this help message" + echo " -y Use non-interactive mode" + echo " -v Enable debug outputs" + echo " --no-nvidia Disable installation of the NVIDIA-related roles ('cuda' and 'tensorrt')" + echo " --no-cuda-drivers Disable installation of 'cuda-drivers' in the role 'cuda'" + echo " --runtime Disable installation dev package of role 'cuda' and 'tensorrt'" + echo " --data-dir Set data directory (default: $HOME/autoware_data)" + echo " --download-artifacts" + echo " Download artifacts" + echo " --module Specify the module (default: all)" + echo "" +} + SCRIPT_DIR=$(readlink -f "$(dirname "$0")") # Parse arguments @@ -13,6 +31,10 @@ option_data_dir="$HOME/autoware_data" while [ "$1" != "" ]; do case "$1" in + --help | -h) + print_help + exit 1 + ;; -y) # Use non-interactive mode. option_yes=true @@ -42,6 +64,10 @@ while [ "$1" != "" ]; do # Set download artifacts option option_download_artifacts=true ;; + --module) + option_module="$2" + shift + ;; *) args+=("$1") ;; @@ -94,11 +120,10 @@ fi # Check installation of dev package if [ "$option_runtime" = "true" ]; then - ansible_args+=("--extra-vars" "tensorrt_install_devel=false") - # ROS installation type, default "desktop" - ansible_args+=("--extra-vars" "ros2_installation_type=ros-base") + ansible_args+=("--extra-vars" "ros2_installation_type=ros-base") # ROS installation type, default "desktop" + ansible_args+=("--extra-vars" "install_devel=false") else - ansible_args+=("--extra-vars" "tensorrt_install_devel=true") + ansible_args+=("--extra-vars" "install_devel=true") fi # Check downloading artifacts @@ -107,8 +132,26 @@ if [ "$option_yes" = "true" ] || [ "$option_download_artifacts" = "true" ]; then ansible_args+=("--extra-vars" "prompt_download_artifacts=y") fi +# Check downloading artifacts +if [ "$target_playbook" = "autoware.dev_env.openadk" ]; then + if [ "$option_download_artifacts" = "true" ]; then + echo -e "\e[36mArtifacts will be downloaded to $option_data_dir\e[m" + ansible_args+=("--extra-vars" "prompt_download_artifacts=y") + else + ansible_args+=("--extra-vars" "prompt_download_artifacts=N") + fi +elif [ "$option_yes" = "true" ] || [ "$option_download_artifacts" = "true" ]; then + echo -e "\e[36mArtifacts will be downloaded to $option_data_dir\e[m" + ansible_args+=("--extra-vars" "prompt_download_artifacts=y") +fi + ansible_args+=("--extra-vars" "data_dir=$option_data_dir") +# Check module option +if [ "$option_module" != "" ]; then + ansible_args+=("--extra-vars" "module=$option_module") +fi + # Load env source "$SCRIPT_DIR/amd64.env" if [ "$(uname -m)" = "aarch64" ]; then From b1ffd1046a694a9fd3ad55add2938b891ffdec12 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=2E=20Fatih=20C=C4=B1r=C4=B1t?= Date: Mon, 26 Feb 2024 17:22:57 +0300 Subject: [PATCH 02/17] comment out unused color MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: M. Fatih Cırıt --- docker/run.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/run.sh b/docker/run.sh index a790892b83b..a6ff0e35b95 100755 --- a/docker/run.sh +++ b/docker/run.sh @@ -6,7 +6,7 @@ set -e # Define terminal colors RED='\033[0;31m' GREEN='\033[0;32m' -BLUE='\033[0;34m' +# BLUE='\033[0;34m' NC='\033[0m' # No Color SCRIPT_DIR=$(readlink -f "$(dirname "$0")") From 8c186299438e4bfbeaaaabf44b3ce16f86a71730 Mon Sep 17 00:00:00 2001 From: oguzkaganozt Date: Tue, 27 Feb 2024 15:03:53 +0300 Subject: [PATCH 03/17] Update Ansible role readme(s) Signed-off-by: oguzkaganozt --- ansible/roles/cuda/README.md | 27 ++++++++++++++++++++++++ ansible/roles/dev_tools/README.md | 34 ++++++++++++++++++++++++++++-- ansible/roles/kisak_mesa/README.md | 27 ++++++++++++++++++++++-- 3 files changed, 84 insertions(+), 4 deletions(-) diff --git a/ansible/roles/cuda/README.md b/ansible/roles/cuda/README.md index 9264489f701..e2969cf7d71 100644 --- a/ansible/roles/cuda/README.md +++ b/ansible/roles/cuda/README.md @@ -35,4 +35,31 @@ Perform the post installation actions: # Taken from: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#post-installation-actions echo 'export PATH=/usr/local/cuda/bin${PATH:+:${PATH}}' >> ~/.bashrc echo 'export LD_LIBRARY_PATH=/usr/local/cuda/lib64${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}' >> ~/.bashrc + +# Register Vulkan, OpenGL, and OpenCL GPU vendors + +# Create Vulkan directory +sudo mkdir -p /etc/vulkan/icd.d +sudo chmod 0755 /etc/vulkan/icd.d + +# Create OpenGL directory +sudo mkdir -p /etc/glvnd/egl_vendor.d +sudo chmod 0755 /etc/glvnd/egl_vendor.d + +# Create OpenCL directory +sudo mkdir -p /etc/OpenCL/vendors +sudo chmod 0755 /etc/OpenCL/vendors + +# Download and set permissions for Vulkan GPU vendors JSON +sudo wget https://gitlab.com/nvidia/container-images/vulkan/raw/dc389b0445c788901fda1d85be96fd1cb9410164/nvidia_icd.json -O /etc/vulkan/icd.d/nvidia_icd.json +sudo chmod 0644 /etc/vulkan/icd.d/nvidia_icd.json + +# Download and set permissions for OpenGL GPU vendors JSON +sudo wget https://gitlab.com/nvidia/container-images/opengl/raw/5191cf205d3e4bb1150091f9464499b076104354/glvnd/runtime/10_nvidia.json -O /etc/glvnd/egl_vendor.d/10_nvidia.json +sudo chmod 0644 /etc/glvnd/egl_vendor.d/10_nvidia.json + +# Register and set permissions for OpenCL GPU vendors +sudo touch /etc/OpenCL/vendors/nvidia.icd +echo "libnvidia-opencl.so.1" | sudo tee /etc/OpenCL/vendors/nvidia.icd > /dev/null +sudo chmod 0644 /etc/OpenCL/vendors/nvidia.icd ``` diff --git a/ansible/roles/dev_tools/README.md b/ansible/roles/dev_tools/README.md index 17fb5e1931e..b84163b17ba 100644 --- a/ansible/roles/dev_tools/README.md +++ b/ansible/roles/dev_tools/README.md @@ -1,9 +1,39 @@ # devel -This role installs development dependencies for Autoware. +This role installs optional development tools for Autoware. + +## Tools +- Git LFS +- pre-commit +- clang-format +- Go ## Inputs -None. +| Name | Required | Description | +| -------------------- | -------- | -------------------------------- | +| clang-version | true | The version of clang-format to install. ## Manual Installation +```bash +#!/bin/bash + +# Update package lists +sudo apt-get update + +# Install Git LFS +sudo apt-get install -y git-lfs + +# Setup Git LFS +git lfs install + +# Install pre-commit using pip3 +pip3 install pre-commit + +# Install a specific version of clang-format using pip3 +# Replace X.Y with the actual version you want to install +pip3 install clang-format==${clang_version} + +# Install Go +sudo apt-get install -y golang +``` \ No newline at end of file diff --git a/ansible/roles/kisak_mesa/README.md b/ansible/roles/kisak_mesa/README.md index 937de6afa36..86b6c9f6ee0 100644 --- a/ansible/roles/kisak_mesa/README.md +++ b/ansible/roles/kisak_mesa/README.md @@ -1,3 +1,26 @@ -# Kisak Mesa Fix for Ubuntu 22.04 for Rviz2 +# Kisak Mesa Fix for Ubuntu 22.04 for Rviz2 (Not mandatory) - +If you are using Ubuntu 22.04 and Rviz2 (especially inside a container), you may encounter black-screen error on Rviz2: + +This role will install the Kisak Mesa fix for Ubuntu 22.04 for Rviz2. + +## Inputs +None + +## Manual Installation +```bash +#!/bin/bash + +# Update the package list and install software-properties-common +sudo apt-get update +sudo apt-get install -y software-properties-common + +# Add the Kisak Mesa PPA +sudo add-apt-repository -y ppa:kisak/kisak-mesa + +# Update the package list after adding the new repository +sudo apt-get update + +# Install Mesa libraries +sudo apt-get install -y libegl-mesa0 libegl1-mesa-dev libgbm-dev libgbm1 libgl1-mesa-dev libgl1-mesa-dri libglapi-mesa libglx-mesa0 +``` From 1718ce914385f63c1c433008d0795012d11c51f8 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 27 Feb 2024 12:04:06 +0000 Subject: [PATCH 04/17] style(pre-commit): autofix --- ansible/roles/dev_tools/README.md | 10 ++++++---- ansible/roles/kisak_mesa/README.md | 2 ++ 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/ansible/roles/dev_tools/README.md b/ansible/roles/dev_tools/README.md index b84163b17ba..ff8eb98c8cc 100644 --- a/ansible/roles/dev_tools/README.md +++ b/ansible/roles/dev_tools/README.md @@ -3,6 +3,7 @@ This role installs optional development tools for Autoware. ## Tools + - Git LFS - pre-commit - clang-format @@ -10,11 +11,12 @@ This role installs optional development tools for Autoware. ## Inputs -| Name | Required | Description | -| -------------------- | -------- | -------------------------------- | -| clang-version | true | The version of clang-format to install. +| Name | Required | Description | +| ------------- | -------- | --------------------------------------- | +| clang-version | true | The version of clang-format to install. | ## Manual Installation + ```bash #!/bin/bash @@ -36,4 +38,4 @@ pip3 install clang-format==${clang_version} # Install Go sudo apt-get install -y golang -``` \ No newline at end of file +``` diff --git a/ansible/roles/kisak_mesa/README.md b/ansible/roles/kisak_mesa/README.md index 86b6c9f6ee0..6c707dfb465 100644 --- a/ansible/roles/kisak_mesa/README.md +++ b/ansible/roles/kisak_mesa/README.md @@ -5,9 +5,11 @@ If you are using Ubuntu 22.04 and Rviz2 (especially inside a container), you may This role will install the Kisak Mesa fix for Ubuntu 22.04 for Rviz2. ## Inputs + None ## Manual Installation + ```bash #!/bin/bash From 041e00e10f0e68b6cf16119346748c1b407ead6d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=2E=20Fatih=20C=C4=B1r=C4=B1t?= Date: Tue, 27 Feb 2024 21:07:23 +0300 Subject: [PATCH 05/17] fix ubuntu version MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: M. Fatih Cırıt --- ansible/roles/cuda/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/cuda/README.md b/ansible/roles/cuda/README.md index e2969cf7d71..6525a5a05aa 100644 --- a/ansible/roles/cuda/README.md +++ b/ansible/roles/cuda/README.md @@ -1,6 +1,6 @@ # cuda -This role installs [CUDA Toolkit](https://developer.nvidia.com/cuda-toolkit) following [this page](https://developer.nvidia.com/cuda-downloads?target_os=Linux&target_arch=x86_64&Distribution=Ubuntu&target_version=20.04&target_type=deb_network) and [this page](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#post-installation-actions). +This role installs [CUDA Toolkit](https://developer.nvidia.com/cuda-toolkit) following [this page](https://developer.nvidia.com/cuda-downloads?target_os=Linux&target_arch=x86_64&Distribution=Ubuntu&target_version=22.04&target_type=deb_network) and [this page](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#post-installation-actions). ## Inputs From 6f85cdd2909e1347b8393d6e5f233e418c70346f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=2E=20Fatih=20C=C4=B1r=C4=B1t?= Date: Tue, 27 Feb 2024 21:14:11 +0300 Subject: [PATCH 06/17] fix clang version MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: M. Fatih Cırıt --- ansible/roles/dev_tools/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/dev_tools/README.md b/ansible/roles/dev_tools/README.md index ff8eb98c8cc..1cce1e43a0e 100644 --- a/ansible/roles/dev_tools/README.md +++ b/ansible/roles/dev_tools/README.md @@ -34,7 +34,7 @@ pip3 install pre-commit # Install a specific version of clang-format using pip3 # Replace X.Y with the actual version you want to install -pip3 install clang-format==${clang_version} +pip3 install clang-format==${pre_commit_clang_format_version} # Install Go sudo apt-get install -y golang From 20d4a28d37f1c6aeb13f794351a05f666b51bf61 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=2E=20Fatih=20C=C4=B1r=C4=B1t?= Date: Tue, 27 Feb 2024 21:14:27 +0300 Subject: [PATCH 07/17] make apt install multiline MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: M. Fatih Cırıt --- ansible/roles/kisak_mesa/README.md | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/ansible/roles/kisak_mesa/README.md b/ansible/roles/kisak_mesa/README.md index 6c707dfb465..e92b276647e 100644 --- a/ansible/roles/kisak_mesa/README.md +++ b/ansible/roles/kisak_mesa/README.md @@ -24,5 +24,13 @@ sudo add-apt-repository -y ppa:kisak/kisak-mesa sudo apt-get update # Install Mesa libraries -sudo apt-get install -y libegl-mesa0 libegl1-mesa-dev libgbm-dev libgbm1 libgl1-mesa-dev libgl1-mesa-dri libglapi-mesa libglx-mesa0 +sudo apt-get install -y \ +libegl-mesa0 \ +libegl1-mesa-dev \ +libgbm-dev \ +libgbm1 \ +libgl1-mesa-dev \ +libgl1-mesa-dri \ +libglapi-mesa \ +libglx-mesa0 ``` From 3ce272faec6bfbaa456d4044cb31d46a6545c326 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=2E=20Fatih=20C=C4=B1r=C4=B1t?= Date: Tue, 27 Feb 2024 21:28:16 +0300 Subject: [PATCH 08/17] update tensorrt and cudnn MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: M. Fatih Cırıt --- ansible/roles/tensorrt/README.md | 39 ++++++++++++++++++++++++-------- 1 file changed, 29 insertions(+), 10 deletions(-) diff --git a/ansible/roles/tensorrt/README.md b/ansible/roles/tensorrt/README.md index 0b938ad03a5..a7e22feedaf 100644 --- a/ansible/roles/tensorrt/README.md +++ b/ansible/roles/tensorrt/README.md @@ -11,17 +11,36 @@ This role installs TensorRT and cuDNN following [this page](https://docs.nvidia. ## Manual Installation -For Universe, the `cudnn_version` and `tensorrt_version` variables should be copied from -[amd64.env](../../../amd64.env) or [arm64.env](../../../arm64.env) depending on the architecture used. - ```bash wget -O /tmp/amd64.env https://raw.githubusercontent.com/autowarefoundation/autoware/main/amd64.env && source /tmp/amd64.env -# Taken from: https://docs.nvidia.com/deeplearning/tensorrt/install-guide/index.html#installing - -sudo apt-get install libcudnn8=${cudnn_version} libcudnn8-dev=${cudnn_version} -sudo apt-mark hold libcudnn8 libcudnn8-dev - -sudo apt-get install libnvinfer8=${tensorrt_version} libnvonnxparsers8=${tensorrt_version} libnvparsers8=${tensorrt_version} libnvinfer-plugin8=${tensorrt_version} libnvinfer-dev=${tensorrt_version} libnvonnxparsers-dev=${tensorrt_version} libnvparsers-dev=${tensorrt_version} libnvinfer-plugin-dev=${tensorrt_version} -sudo apt-mark hold libnvinfer8 libnvonnxparsers8 libnvparsers8 libnvinfer-plugin8 libnvinfer-dev libnvonnxparsers-dev libnvparsers-dev libnvinfer-plugin-dev +# Can also be found at: https://docs.nvidia.com/deeplearning/tensorrt/install-guide/index.html#installing + +sudo apt-get install -y \ +libcudnn8=${cudnn_version} \ +libnvinfer8=${tensorrt_version} \ +libnvinfer-plugin8=${tensorrt_version} \ +libnvparsers8=${tensorrt_version} \ +libnvonnxparsers8=${tensorrt_version} \ + +sudo apt-mark hold \ +libcudnn8 \ +libnvinfer8 \ +libnvinfer-plugin8 \ +libnvparsers8 \ +libnvonnxparsers8 + +sudo apt-get install -y \ +libcudnn8-dev=${cudnn_version} \ +libnvinfer-dev=${tensorrt_version} \ +libnvinfer-plugin-dev=${tensorrt_version} \ +libnvparsers-dev=${tensorrt_version} \ +libnvonnxparsers-dev=${tensorrt_version} + +sudo apt-mark hold \ +libcudnn8-dev \ +libnvinfer-dev \ +libnvinfer-plugin-dev \ +libnvparsers-dev \ +libnvonnxparsers-dev ``` From 26e72c3a80649547bd606f46eb4230fdb8ef04ab Mon Sep 17 00:00:00 2001 From: oguzkaganozt Date: Wed, 28 Feb 2024 14:20:21 +0300 Subject: [PATCH 09/17] Update Ansible role Readmes Signed-off-by: oguzkaganozt --- ansible/playbooks/docker.yaml | 2 +- ansible/playbooks/openadk.yaml | 2 ++ ansible/playbooks/universe.yaml | 1 + ansible/roles/build_tools/README.md | 19 ++++++++++++ .../defaults/main.yaml | 0 .../meta/main.yaml | 0 ansible/roles/build_tools/tasks/main.yaml | 6 ++++ ansible/roles/cuda/README.md | 2 ++ ansible/roles/dev_tools/README.md | 5 ++++ ansible/roles/dev_tools/tasks/main.yaml | 8 +++++ .../README.md | 29 +++++++++++-------- .../defaults/main.yaml | 0 .../nvidia_container_toolkit/meta/main.yaml | 0 .../tasks/main.yaml | 0 ansible/roles/pacmod/tasks/main.yaml | 8 ----- docker/autoware-openadk/Dockerfile | 1 - 16 files changed, 61 insertions(+), 22 deletions(-) create mode 100644 ansible/roles/build_tools/README.md rename ansible/roles/{nvidia_docker => build_tools}/defaults/main.yaml (100%) rename ansible/roles/{nvidia_docker => build_tools}/meta/main.yaml (100%) create mode 100644 ansible/roles/build_tools/tasks/main.yaml rename ansible/roles/{nvidia_docker => nvidia_container_toolkit}/README.md (61%) create mode 100644 ansible/roles/nvidia_container_toolkit/defaults/main.yaml create mode 100644 ansible/roles/nvidia_container_toolkit/meta/main.yaml rename ansible/roles/{nvidia_docker => nvidia_container_toolkit}/tasks/main.yaml (100%) diff --git a/ansible/playbooks/docker.yaml b/ansible/playbooks/docker.yaml index 0a6d5c42acc..5cc84043e27 100644 --- a/ansible/playbooks/docker.yaml +++ b/ansible/playbooks/docker.yaml @@ -23,4 +23,4 @@ - role: autoware.dev_env.cuda when: prompt_install_nvidia == 'y' - role: autoware.dev_env.docker_engine - - role: autoware.dev_env.nvidia_docker + - role: autoware.dev_env.nvidia_container_toolkit diff --git a/ansible/playbooks/openadk.yaml b/ansible/playbooks/openadk.yaml index 75f39038812..060cdc732e8 100644 --- a/ansible/playbooks/openadk.yaml +++ b/ansible/playbooks/openadk.yaml @@ -24,6 +24,8 @@ when: module == 'base' - role: autoware.dev_env.kisak_mesa when: module == 'base' + - role: autoware.dev_env.build_tools + when: module == 'all' and install_devel=='y' # Module specific dependencies - role: autoware.dev_env.geographiclib diff --git a/ansible/playbooks/universe.yaml b/ansible/playbooks/universe.yaml index fbc42d56577..ea9b9fbb64b 100644 --- a/ansible/playbooks/universe.yaml +++ b/ansible/playbooks/universe.yaml @@ -39,6 +39,7 @@ - role: autoware.dev_env.ros2_dev_tools - role: autoware.dev_env.rmw_implementation - role: autoware.dev_env.gdown + - role: autoware.dev_env.build_tools # Autoware module dependencies - role: autoware.dev_env.geographiclib diff --git a/ansible/roles/build_tools/README.md b/ansible/roles/build_tools/README.md new file mode 100644 index 00000000000..3db8fa73d2f --- /dev/null +++ b/ansible/roles/build_tools/README.md @@ -0,0 +1,19 @@ +# Build Tools + +This role installs build tools for building Autoware. + +## Tools + +- ccache + +## Inputs + +## Manual Installation + +```bash +# Update package lists +sudo apt-get update + +# Install ccache +sudo apt-get install -y ccache +``` diff --git a/ansible/roles/nvidia_docker/defaults/main.yaml b/ansible/roles/build_tools/defaults/main.yaml similarity index 100% rename from ansible/roles/nvidia_docker/defaults/main.yaml rename to ansible/roles/build_tools/defaults/main.yaml diff --git a/ansible/roles/nvidia_docker/meta/main.yaml b/ansible/roles/build_tools/meta/main.yaml similarity index 100% rename from ansible/roles/nvidia_docker/meta/main.yaml rename to ansible/roles/build_tools/meta/main.yaml diff --git a/ansible/roles/build_tools/tasks/main.yaml b/ansible/roles/build_tools/tasks/main.yaml new file mode 100644 index 00000000000..e83056a6428 --- /dev/null +++ b/ansible/roles/build_tools/tasks/main.yaml @@ -0,0 +1,6 @@ +- name: Install ccache + become: true + ansible.builtin.apt: + name: ccache + state: latest + update_cache: true diff --git a/ansible/roles/cuda/README.md b/ansible/roles/cuda/README.md index 6525a5a05aa..57d638d1187 100644 --- a/ansible/roles/cuda/README.md +++ b/ansible/roles/cuda/README.md @@ -2,6 +2,8 @@ This role installs [CUDA Toolkit](https://developer.nvidia.com/cuda-toolkit) following [this page](https://developer.nvidia.com/cuda-downloads?target_os=Linux&target_arch=x86_64&Distribution=Ubuntu&target_version=22.04&target_type=deb_network) and [this page](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#post-installation-actions). +This role also registers Vulkan, OpenGL, and OpenCL GPU vendors for future use. + ## Inputs | Name | Required | Description | diff --git a/ansible/roles/dev_tools/README.md b/ansible/roles/dev_tools/README.md index 1cce1e43a0e..3b31b06c212 100644 --- a/ansible/roles/dev_tools/README.md +++ b/ansible/roles/dev_tools/README.md @@ -8,12 +8,14 @@ This role installs optional development tools for Autoware. - pre-commit - clang-format - Go +- PlotJuggler ## Inputs | Name | Required | Description | | ------------- | -------- | --------------------------------------- | | clang-version | true | The version of clang-format to install. | +| ros-distro | true | The ROS distribution. | ## Manual Installation @@ -38,4 +40,7 @@ pip3 install clang-format==${pre_commit_clang_format_version} # Install Go sudo apt-get install -y golang + +# Install PlotJuggler +sudo apt-get install -y ros-${ROS_DISTRO}-plotjuggler-ros ``` diff --git a/ansible/roles/dev_tools/tasks/main.yaml b/ansible/roles/dev_tools/tasks/main.yaml index 62201d86863..2a11dd7ffc6 100644 --- a/ansible/roles/dev_tools/tasks/main.yaml +++ b/ansible/roles/dev_tools/tasks/main.yaml @@ -36,3 +36,11 @@ name: golang state: latest update_cache: true + +- name: Install plotjuggler + become: true + ansible.builtin.apt: + name: + - ros-{{ rosdistro }}-plotjuggler-ros + state: latest + update_cache: true diff --git a/ansible/roles/nvidia_docker/README.md b/ansible/roles/nvidia_container_toolkit/README.md similarity index 61% rename from ansible/roles/nvidia_docker/README.md rename to ansible/roles/nvidia_container_toolkit/README.md index 303ac8ad6eb..01dd1791e63 100644 --- a/ansible/roles/nvidia_docker/README.md +++ b/ansible/roles/nvidia_container_toolkit/README.md @@ -1,6 +1,6 @@ -# nvidia_docker +# nvidia_container_toolkit -This role installs [NVIDIA Container Toolkit](https://github.com/NVIDIA/nvidia-docker) following the [installation guide](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#docker). +This role installs [NVIDIA Container Toolkit](https://github.com/NVIDIA/nvidia-container-toolkit) following the [installation guide](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html). ## Inputs @@ -13,26 +13,31 @@ Install Nvidia Container Toolkit: ```bash -# Taken from https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#setting-up-nvidia-container-toolkit +# Add NVIDIA container toolkit GPG key +sudo apt-key adv --fetch-keys https://nvidia.github.io/libnvidia-container/gpgkey +sudo gpg --no-default-keyring --keyring /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg --import /etc/apt/trusted.gpg -# Setup the package repository and the GPG key: -distribution=$(. /etc/os-release;echo $ID$VERSION_ID) \ - && curl -s -L https://nvidia.github.io/libnvidia-container/gpgkey | sudo apt-key add - \ - && curl -s -L https://nvidia.github.io/libnvidia-container/$distribution/libnvidia-container.list | sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list +# Add NVIDIA container toolkit repository +echo "deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://nvidia.github.io/libnvidia-container/stable/deb/$(dpkg --print-architecture) /" | sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list -# Install the nvidia-docker2 package (and dependencies) after updating the package listing: +# Update the package list sudo apt-get update -sudo apt-get install -y nvidia-docker2 -# Restart the Docker daemon to complete the installation after setting the default runtime: +# Install NVIDIA Container Toolkit +sudo apt-get install -y nvidia-container-toolkit + +# Add NVIDIA runtime support to docker engine +sudo nvidia-ctk runtime configure --runtime=docker + +# Restart docker daemon sudo systemctl restart docker # At this point, a working setup can be tested by running a base CUDA container: -sudo docker run --rm --gpus all nvidia/cuda:11.0.3-base-ubuntu20.04 nvidia-smi +sudo docker run --rm --gpus all nvcr.io/nvidia/cuda:12.3.1-runtime-ubuntu20.04 nvidia-smi # This should result in a console output shown below: # +-----------------------------------------------------------------------------+ -# | NVIDIA-SMI 450.51.06 Driver Version: 450.51.06 CUDA Version: 11.0 | +# | NVIDIA-SMI 545.23.08 Driver Version: 545.23.08 CUDA Version: 12.3.1 | # |-------------------------------+----------------------+----------------------+ # | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | # | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | diff --git a/ansible/roles/nvidia_container_toolkit/defaults/main.yaml b/ansible/roles/nvidia_container_toolkit/defaults/main.yaml new file mode 100644 index 00000000000..e69de29bb2d diff --git a/ansible/roles/nvidia_container_toolkit/meta/main.yaml b/ansible/roles/nvidia_container_toolkit/meta/main.yaml new file mode 100644 index 00000000000..e69de29bb2d diff --git a/ansible/roles/nvidia_docker/tasks/main.yaml b/ansible/roles/nvidia_container_toolkit/tasks/main.yaml similarity index 100% rename from ansible/roles/nvidia_docker/tasks/main.yaml rename to ansible/roles/nvidia_container_toolkit/tasks/main.yaml diff --git a/ansible/roles/pacmod/tasks/main.yaml b/ansible/roles/pacmod/tasks/main.yaml index d3fc094034e..ed96c249ef4 100644 --- a/ansible/roles/pacmod/tasks/main.yaml +++ b/ansible/roles/pacmod/tasks/main.yaml @@ -1,11 +1,3 @@ -- name: Install plotjuggler - become: true - ansible.builtin.apt: - name: - - ros-{{ rosdistro }}-plotjuggler-ros - state: latest - update_cache: true - - name: Install apt-transport-https become: true ansible.builtin.apt: diff --git a/docker/autoware-openadk/Dockerfile b/docker/autoware-openadk/Dockerfile index 71f94fe373b..bd41668d6f5 100644 --- a/docker/autoware-openadk/Dockerfile +++ b/docker/autoware-openadk/Dockerfile @@ -14,7 +14,6 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install --no-ins cmake \ curl \ gosu \ - ccache \ gnupg \ vim \ unzip \ From 5ff919e3268e3c12f75ec56480d67ab7b882c749 Mon Sep 17 00:00:00 2001 From: oguzkaganozt Date: Fri, 1 Mar 2024 13:12:14 +0300 Subject: [PATCH 10/17] fix artifact naming Signed-off-by: oguzkaganozt --- .github/actions/docker-build-and-push/action.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/actions/docker-build-and-push/action.yaml b/.github/actions/docker-build-and-push/action.yaml index a50e7abbd61..7b5c120e83a 100644 --- a/.github/actions/docker-build-and-push/action.yaml +++ b/.github/actions/docker-build-and-push/action.yaml @@ -144,7 +144,7 @@ runs: id: artifact-upload-step-prebuilt uses: actions/upload-artifact@v4 with: - name: prebuilt-image-${{ inputs.tag-suffix }} + name: prebuilt-image${{ inputs.tag-suffix }} path: /tmp/prebuilt.tar retention-days: 7 compression-level: 6 @@ -156,7 +156,7 @@ runs: id: artifact-upload-step-devel uses: actions/upload-artifact@v4 with: - name: devel-image-${{ inputs.tag-suffix }} + name: devel-image${{ inputs.tag-suffix }} path: /tmp/devel.tar retention-days: 7 compression-level: 6 @@ -168,7 +168,7 @@ runs: id: artifact-upload-step-runtime uses: actions/upload-artifact@v4 with: - name: runtime-image-${{ inputs.tag-suffix }} + name: runtime-image${{ inputs.tag-suffix }} path: /tmp/runtime.tar retention-days: 7 compression-level: 6 From c77dd04aebbda39b1744721318b22963c79e2ff4 Mon Sep 17 00:00:00 2001 From: Oguz Ozturk Date: Tue, 5 Mar 2024 14:41:55 +0300 Subject: [PATCH 11/17] fix typo Signed-off-by: Oguz Ozturk --- ansible/playbooks/openadk.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/playbooks/openadk.yaml b/ansible/playbooks/openadk.yaml index 060cdc732e8..145cb7830f3 100644 --- a/ansible/playbooks/openadk.yaml +++ b/ansible/playbooks/openadk.yaml @@ -25,7 +25,7 @@ - role: autoware.dev_env.kisak_mesa when: module == 'base' - role: autoware.dev_env.build_tools - when: module == 'all' and install_devel=='y' + when: module == 'all' and install_devel=='true' # Module specific dependencies - role: autoware.dev_env.geographiclib From 83c88b9081b41ff5724db8ef5f6e9482a0582eb0 Mon Sep 17 00:00:00 2001 From: oguzkaganozt Date: Wed, 6 Mar 2024 16:43:30 +0300 Subject: [PATCH 12/17] Update readme Signed-off-by: oguzkaganozt --- .devcontainer/Dockerfile | 2 +- ansible/roles/tensorrt/README.md | 3 + docker/README.md | 100 +++++++++++++++++++++++++------ docker/run.sh | 20 ++++--- 4 files changed, 97 insertions(+), 28 deletions(-) diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 045aa5c10cf..4392e159588 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,4 +1,4 @@ -FROM ghcr.io/autowarefoundation/openadk:latest-devel +FROM ghcr.io/autowarefoundation/autoware-openadk:latest-devel ENV SHELL /bin/bash diff --git a/ansible/roles/tensorrt/README.md b/ansible/roles/tensorrt/README.md index a7e22feedaf..2b4bc48b108 100644 --- a/ansible/roles/tensorrt/README.md +++ b/ansible/roles/tensorrt/README.md @@ -11,6 +11,9 @@ This role installs TensorRT and cuDNN following [this page](https://docs.nvidia. ## Manual Installation +For Universe, the `cudnn_version` and `tensorrt_version` variables should be copied from +[amd64.env](../../../amd64.env) or [arm64.env](../../../arm64.env) depending on the architecture used. + ```bash wget -O /tmp/amd64.env https://raw.githubusercontent.com/autowarefoundation/autoware/main/amd64.env && source /tmp/amd64.env diff --git a/docker/README.md b/docker/README.md index 19be1aec827..0263dd83162 100644 --- a/docker/README.md +++ b/docker/README.md @@ -1,33 +1,39 @@ -# Open AD Kit: Containerized Workloads for the Autoware +# Open AD Kit: Containerized Workloads for Autoware -Open AD Kit offers two types of Docker image to let you get started with the Autoware quickly: `devel` and `runtime`. +Open AD Kit offers two types of Docker image to let you get started with Autoware quickly: `devel` and `runtime`. -1. The `devel` image contains the development environment and enables you to build and develop Autoware from source. Keep in mind that 'devel' image always include more recent changes than the 'runtime' image to provide the latest development environment. -2. The `runtime` image contains only runtime executables and enables you to try out Autoware quickly. 'runtime' image is more stable than 'devel' image and is recommended for production use. +1. The `devel` image enables you to develop Autoware without setting up the local development environment. +2. The `runtime` image contains only runtime executables and enables you to try out Autoware quickly. -**Note**: Before proceeding, confirm and agree with the [NVIDIA Deep Learning Container license](https://developer.nvidia.com/ngc/nvidia-deep-learning-container-license). By pulling and using the Autoware Open AD Kit images, you accept the terms and conditions of the license. +!!! info + + Before proceeding, confirm and agree with the [NVIDIA Deep Learning Container license](https://developer.nvidia.com/ngc/nvidia-deep-learning-container-license). By pulling and using the Autoware Open AD Kit images, you accept the terms and conditions of the license. ## Prerequisites - Docker -- NVIDIA Container Toolkit (optional) -- NVIDIA CUDA 12 compatible GPU Driver (optional) +- NVIDIA Container Toolkit (preferred) +- NVIDIA CUDA 12 compatible GPU Driver (preferred) The [setup script](../setup-dev-env.sh) will install all required dependencies with the setup script: ```bash -./setup-dev-env.sh --docker +./setup-dev-env.sh -y docker ``` -To install without NVIDIA GPU support: +To install without **NVIDIA GPU** support: ```bash -./setup-dev-env.sh --docker --no-nvidia +./setup-dev-env.sh -y --no-nvidia docker ``` +!!! info + + GPU acceleration is required for some features such as object detection and traffic light detection/classification. For details of how to enable these features without a GPU, refer to the [Running Autoware without CUDA](../how-to-guides/others/running-autoware-without-cuda.md). + ## Usage -### Runtime +### Runtime Setup You can use `run.sh` to run the Autoware runtime container with the map data: @@ -35,28 +41,34 @@ You can use `run.sh` to run the Autoware runtime container with the map data: ./docker/run.sh --map-path path_to_map_data ``` -**Note**: You can use `--no-nvidia` to run without NVIDIA GPU support, and `--headless` to run without display that means no RViz visualization. +!!! info + + You can use `--no-nvidia` to run without NVIDIA GPU support, and `--headless` to run without display that means no RViz visualization. -For more launch options you can edit the launch command `--launch-cmd`: +For more launch options you can edit the launch command with `--launch-cmd` option: ```bash ./docker/run.sh --map-path path_to_map_data --launch-cmd "ros2 launch autoware_launch autoware.launch.xml map_path:=/autoware_map vehicle_model:=sample_vehicle sensor_model:=sample_sensor_kit" ``` -### Using Development Container +#### Run Autoware simulator -```bash -./docker/run.sh --devel -``` +Inside the container, you can run the Autoware simulation by following these tutorials: + +[Planning Simulation](../../tutorials/ad-hoc-simulation/planning-simulation.md) -**Note**: By default workspace mounted on the container will be current directory, you can change the workspace path by `--workspace path_to_workspace`. For development environments without NVIDIA GPU support use `--no-nvidia`. +[Rosbag Replay Simulation](../../tutorials/ad-hoc-simulation/rosbag-replay-simulation.md). + +### Development Setup + +You can use [VS Code Remote Containers](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) to develop Autoware in the containerized environment with ease. Or you can use `run.sh` manually to run the Autoware development container with the workspace mounted. #### Using VS Code Remote Containers for Development Get the Visual Studio Code's [Remote - Containers](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) extension. And reopen the workspace in the container by selecting `Remote-Containers: Reopen in Container` from the Command Palette (`F1`). -By default devcontainer assumes NIVIDA GPU support, you can change this by deleting these lines within `.devcontainer/devcontainer.json`: +By default devcontainer assumes NVIDIA GPU support, you can change this by deleting these lines within `.devcontainer/devcontainer.json`: ```json "hostRequirements": { @@ -68,6 +80,56 @@ By default devcontainer assumes NIVIDA GPU support, you can change this by delet "--gpus", "all" ``` +#### Using `run.sh` for Development + +```bash +./docker/run.sh --devel --workspace path_to_workspace +``` + +!!! info + + You should mount the workspace you are working on with `--workspace path_to_workspace`. For a development environment without NVIDIA GPU support use `--no-nvidia`. + +#### Creating a new workspace + +If you don't have a workspace yet, you can create a new workspace with following steps: + +1. Create the `src` directory and clone repositories into it. + + ```bash + mkdir src + vcs import src < autoware.repos + ``` + +2. Update dependent ROS packages. + + The dependency of Autoware may change after the Docker image was created. + In that case, you need to run the following commands to update the dependency. + + ```bash + sudo apt update + rosdep update + rosdep install -y --from-paths src --ignore-src --rosdistro $ROS_DISTRO + ``` + +3. Build the workspace. + + ```bash + colcon build --symlink-install --cmake-args -DCMAKE_BUILD_TYPE=Release + ``` + + If there is any build issue, refer to [Troubleshooting](../../support/troubleshooting/index.md#build-issues). + +> **To Update the Workspace** +> +> ```bash +> cd autoware +> git pull +> vcs import src < autoware.repos +> vcs pull src +> ``` + + ## Building Docker images from scratch If you want to build these images locally for development purposes, run the following command: diff --git a/docker/run.sh b/docker/run.sh index a6ff0e35b95..3ea92854b3c 100755 --- a/docker/run.sh +++ b/docker/run.sh @@ -35,11 +35,11 @@ print_help() { echo -e "${RED}Usage:${NC} run.sh [OPTIONS] [LAUNCH_CMD](optional)" echo -e "Options:" echo -e " ${GREEN}--help/-h${NC} Display this help message" - echo -e " ${GREEN}--map-path${NC} Specify the path to the map files (mandatory if no custom launch command is provided)" + echo -e " ${GREEN}--map-path${NC} Specify to mount map files into /autoware_map (mandatory if no custom launch command is provided)" echo -e " ${GREEN}--no-nvidia${NC} Disable NVIDIA GPU support" echo -e " ${GREEN}--devel${NC} Use the latest development version of Autoware" echo -e " ${GREEN}--headless${NC} Run Autoware in headless mode (default: false)" - echo -e " ${GREEN}--workspace${NC} Specify the workspace path to mount into container" + echo -e " ${GREEN}--workspace${NC} Specify to mount the workspace into /workspace" echo "" } @@ -98,17 +98,21 @@ set_variables() { # Mount map path if provided MAP="-v ${MAP_PATH}:/autoware_map:ro" - # Set default launch command if not provided - if [ "$LAUNCH_CMD" == "" ]; then - LAUNCH_CMD=${DEFAULT_LAUNCH_CMD} - fi - - # Set workspace path if provided with current user and group + # Set workspace path if provided and login with local user if [ "$WORKSPACE_PATH" != "" ]; then USER_ID="-e LOCAL_UID=$(id -u) -e LOCAL_GID=$(id -g) -e LOCAL_USER=$(id -un) -e LOCAL_GROUP=$(id -gn)" WORKSPACE="-v ${WORKSPACE_PATH}:/workspace" fi + # Set default launch command if not provided + if [ "$LAUNCH_CMD" == "" ]; then + if [ "$WORKSPACE_PATH" != "" ]; then + LAUNCH_CMD="/bin/bash" + else + LAUNCH_CMD=${DEFAULT_LAUNCH_CMD} + fi + fi + # Set image based on option if [ "$option_devel" == "true" ]; then IMAGE="ghcr.io/autowarefoundation/autoware-openadk:latest-devel" From 70d844e401026cf3d0215d0f79bac357967648a0 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 6 Mar 2024 13:43:45 +0000 Subject: [PATCH 13/17] style(pre-commit): autofix --- docker/README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/docker/README.md b/docker/README.md index 0263dd83162..be79a809cbc 100644 --- a/docker/README.md +++ b/docker/README.md @@ -129,7 +129,6 @@ If you don't have a workspace yet, you can create a new workspace with following > vcs pull src > ``` - ## Building Docker images from scratch If you want to build these images locally for development purposes, run the following command: From 66f255256e2b68a7499eb6357306c7126c9e533b Mon Sep 17 00:00:00 2001 From: oguzkaganozt Date: Wed, 6 Mar 2024 17:08:28 +0300 Subject: [PATCH 14/17] Add two flavors for VS containers Signed-off-by: oguzkaganozt --- .devcontainer/{ => base}/Dockerfile | 0 .devcontainer/base/devcontainer.json | 24 +++++++++++++++ .devcontainer/cuda/Dockerfile | 14 +++++++++ .devcontainer/{ => cuda}/devcontainer.json | 2 +- ansible/playbooks/openadk.yaml | 2 +- docker/README.md | 36 ++++++++++++++++------ 6 files changed, 66 insertions(+), 12 deletions(-) rename .devcontainer/{ => base}/Dockerfile (100%) create mode 100644 .devcontainer/base/devcontainer.json create mode 100644 .devcontainer/cuda/Dockerfile rename .devcontainer/{ => cuda}/devcontainer.json (94%) diff --git a/.devcontainer/Dockerfile b/.devcontainer/base/Dockerfile similarity index 100% rename from .devcontainer/Dockerfile rename to .devcontainer/base/Dockerfile diff --git a/.devcontainer/base/devcontainer.json b/.devcontainer/base/devcontainer.json new file mode 100644 index 00000000000..bf5238b4c7a --- /dev/null +++ b/.devcontainer/base/devcontainer.json @@ -0,0 +1,24 @@ +{ + "name": "Autoware", + "build": { + "dockerfile": "Dockerfile" + }, + "remoteUser": "autoware", + "hostRequirements": { + "gpu": true + }, + "runArgs": [ + "--cap-add=SYS_PTRACE", + "--security-opt", + "seccomp=unconfined", + "--net=host", + "--volume=/etc/localtime:/etc/localtime:ro" + ], + "customizations": { + "vscode": { + "settings.json": { + "terminal.integrated.profiles.linux": { "bash": { "path": "/bin/bash" } } + } + } + } +} diff --git a/.devcontainer/cuda/Dockerfile b/.devcontainer/cuda/Dockerfile new file mode 100644 index 00000000000..e0c09f5de29 --- /dev/null +++ b/.devcontainer/cuda/Dockerfile @@ -0,0 +1,14 @@ +FROM ghcr.io/autowarefoundation/autoware-openadk:latest-devel-cuda + +ENV SHELL /bin/bash + +ARG USERNAME=autoware +ARG USER_UID=1000 +ARG USER_GID=$USER_UID + +RUN groupadd --gid $USER_GID $USERNAME \ + && useradd --uid $USER_UID --gid $USER_GID -m $USERNAME \ + && apt-get update \ + && apt-get install -y sudo \ + && echo $USERNAME ALL=\(root\) NOPASSWD:ALL > /etc/sudoers.d/$USERNAME \ + && chmod 0440 /etc/sudoers.d/$USERNAME diff --git a/.devcontainer/devcontainer.json b/.devcontainer/cuda/devcontainer.json similarity index 94% rename from .devcontainer/devcontainer.json rename to .devcontainer/cuda/devcontainer.json index 1fd21bcaba3..ccae5a61799 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/cuda/devcontainer.json @@ -1,5 +1,5 @@ { - "name": "Autoware", + "name": "Autoware-cuda", "build": { "dockerfile": "Dockerfile" }, diff --git a/ansible/playbooks/openadk.yaml b/ansible/playbooks/openadk.yaml index 145cb7830f3..496b3d8a73b 100644 --- a/ansible/playbooks/openadk.yaml +++ b/ansible/playbooks/openadk.yaml @@ -5,7 +5,7 @@ - name: Verify OS ansible.builtin.fail: msg: Only Ubuntu 22.04 is supported for this branch. Please refer to https://autowarefoundation.github.io/autoware-documentation/main/installation/autoware/source-installation/. - when: ansible_distribution == 'Ubuntu' and ansible_distribution_version != '22.04' + when: ansible_distribution != 'Ubuntu' or ansible_distribution_version != '22.04' - name: Print args ansible.builtin.debug: diff --git a/docker/README.md b/docker/README.md index be79a809cbc..9b90be1df14 100644 --- a/docker/README.md +++ b/docker/README.md @@ -33,7 +33,7 @@ To install without **NVIDIA GPU** support: ## Usage -### Runtime Setup +### Runtime You can use `run.sh` to run the Autoware runtime container with the map data: @@ -59,7 +59,7 @@ Inside the container, you can run the Autoware simulation by following these tut [Rosbag Replay Simulation](../../tutorials/ad-hoc-simulation/rosbag-replay-simulation.md). -### Development Setup +### Development Environment You can use [VS Code Remote Containers](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) to develop Autoware in the containerized environment with ease. Or you can use `run.sh` manually to run the Autoware development container with the workspace mounted. @@ -68,18 +68,34 @@ You can use [VS Code Remote Containers](https://marketplace.visualstudio.com/ite Get the Visual Studio Code's [Remote - Containers](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) extension. And reopen the workspace in the container by selecting `Remote-Containers: Reopen in Container` from the Command Palette (`F1`). -By default devcontainer assumes NVIDIA GPU support, you can change this by deleting these lines within `.devcontainer/devcontainer.json`: +Autoware and Autoware-cuda are available containers for development. -```json - "hostRequirements": { - "gpu": true - }, -``` +If you want to use CUDA supported dev container, you need to install the NVIDIA Container Toolkit before opening the workspace in the container: + +```bash +# Add NVIDIA container toolkit GPG key +sudo apt-key adv --fetch-keys https://nvidia.github.io/libnvidia-container/gpgkey +sudo gpg --no-default-keyring --keyring /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg --import /etc/apt/trusted.gpg + +# Add NVIDIA container toolkit repository +echo "deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://nvidia.github.io/libnvidia-container/stable/deb/$(dpkg --print-architecture) /" | sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list + +# Update the package list +sudo apt-get update -```json - "--gpus", "all" +# Install NVIDIA Container Toolkit +sudo apt-get install -y nvidia-container-toolkit + +# Add NVIDIA runtime support to docker engine +sudo nvidia-ctk runtime configure --runtime=docker + +# Restart docker daemon +sudo systemctl restart docker ``` +Then, you can use the `Remote-Containers: Reopen in Container` command to open the workspace in the container. + + #### Using `run.sh` for Development ```bash From 3df3bf920e320687eb7705651358a69dd7c5d61a Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 6 Mar 2024 14:16:42 +0000 Subject: [PATCH 15/17] style(pre-commit): autofix --- docker/README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/docker/README.md b/docker/README.md index 9b90be1df14..17358afd8fb 100644 --- a/docker/README.md +++ b/docker/README.md @@ -95,7 +95,6 @@ sudo systemctl restart docker Then, you can use the `Remote-Containers: Reopen in Container` command to open the workspace in the container. - #### Using `run.sh` for Development ```bash From 09811ae95d7a89b94ae0fad90b4abd51ad6d7971 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=2E=20Fatih=20C=C4=B1r=C4=B1t?= Date: Wed, 6 Mar 2024 19:00:23 +0300 Subject: [PATCH 16/17] fix comment for ansible dev_tools MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: M. Fatih Cırıt --- ansible/roles/dev_tools/README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/ansible/roles/dev_tools/README.md b/ansible/roles/dev_tools/README.md index 3b31b06c212..145c70f020b 100644 --- a/ansible/roles/dev_tools/README.md +++ b/ansible/roles/dev_tools/README.md @@ -35,7 +35,6 @@ git lfs install pip3 install pre-commit # Install a specific version of clang-format using pip3 -# Replace X.Y with the actual version you want to install pip3 install clang-format==${pre_commit_clang_format_version} # Install Go From fe9a840e74da3076dc6e5b184dc8f0a4606eec99 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=2E=20Fatih=20C=C4=B1r=C4=B1t?= Date: Wed, 6 Mar 2024 19:02:30 +0300 Subject: [PATCH 17/17] reorder prefix and suffix MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: M. Fatih Cırıt --- .github/actions/docker-build-and-push/action.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/actions/docker-build-and-push/action.yaml b/.github/actions/docker-build-and-push/action.yaml index 7b5c120e83a..13526e9083e 100644 --- a/.github/actions/docker-build-and-push/action.yaml +++ b/.github/actions/docker-build-and-push/action.yaml @@ -8,10 +8,10 @@ inputs: build-args: description: "" required: false - tag-suffix: + tag-prefix: description: "" required: false - tag-prefix: + tag-suffix: description: "" required: false allow-push: