diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md
index 1fd55fb0b2..8f905969a6 100644
--- a/.github/CONTRIBUTING.md
+++ b/.github/CONTRIBUTING.md
@@ -60,7 +60,7 @@ This project relies on the following dependencies:
## Run PMM-Server in Docker
-1. Run `docker run -d -p 80:80 -p 443:443 --name pmm-server public.ecr.aws/e7j3v3n0/pmm-server:3-dev-latest`.
+1. Run `docker run -d -p 80:8080 -p 443:8443 --name pmm-server public.ecr.aws/e7j3v3n0/pmm-server:3-dev-latest`.
2. Open http://localhost/.
Please note, the use of port 80 is discouraged and should be avoided. For optimal security, use port 443 along with a valid SSL certificate.
diff --git a/.github/ISSUE_TEMPLATE/doc-md-template.md b/.github/ISSUE_TEMPLATE/doc-md-template.md
index 7dd3802c62..50055dde93 100644
--- a/.github/ISSUE_TEMPLATE/doc-md-template.md
+++ b/.github/ISSUE_TEMPLATE/doc-md-template.md
@@ -63,4 +63,4 @@ Uses same type but different label text:
- Caution: Used to mean 'Continue with care'.
-- Important: A significant point that deserves emphasis.
\ No newline at end of file
+- Important: A significant point that deserves emphasis.
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index caced98a72..7bd01d8d32 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -57,3 +57,70 @@ updates:
ignore:
- dependency-name: "*"
update-types: ["version-update:semver-patch"]
+
+# V3 branch
+ - package-ecosystem: "gomod"
+ directory: "/"
+ target-branch: "v3"
+ schedule:
+ interval: "daily"
+ ignore:
+ - dependency-name: "*"
+ update-types: ["version-update:semver-patch"]
+
+ - package-ecosystem: "gomod"
+ directory: "/tools"
+ target-branch: "v3"
+ schedule:
+ interval: "daily"
+ ignore:
+ - dependency-name: "*"
+ update-types: ["version-update:semver-patch"]
+
+ - package-ecosystem: "gomod"
+ directory: "/api-tests/tools"
+ target-branch: "v3"
+ schedule:
+ interval: "daily"
+ ignore:
+ - dependency-name: "*"
+ update-types: ["version-update:semver-patch"]
+
+ - package-ecosystem: "docker"
+ directory: "/"
+ target-branch: "v3"
+ schedule:
+ interval: "daily"
+
+ - package-ecosystem: "docker"
+ directory: "/build/docker/client"
+ target-branch: "v3"
+ schedule:
+ interval: "daily"
+
+ - package-ecosystem: "docker"
+ directory: "/build/docker/rpmbuild"
+ target-branch: "v3"
+ schedule:
+ interval: "daily"
+
+ - package-ecosystem: "docker"
+ directory: "/build/docker/server"
+ target-branch: "v3"
+ schedule:
+ interval: "daily"
+
+ - package-ecosystem: "github-actions"
+ directory: "/"
+ target-branch: "v3"
+ schedule:
+ interval: "daily"
+
+ - package-ecosystem: "npm"
+ directory: "/cli-tests"
+ target-branch: "v3"
+ schedule:
+ interval: "daily"
+ ignore:
+ - dependency-name: "*"
+ update-types: ["version-update:semver-patch"]
diff --git a/.github/workflows/admin.yml b/.github/workflows/admin.yml
index fe289c9ebb..f1d3a9f56a 100644
--- a/.github/workflows/admin.yml
+++ b/.github/workflows/admin.yml
@@ -40,13 +40,13 @@ jobs:
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Set up Go release
- uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0
+ uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
with:
go-version-file: ${{ github.workspace }}/go.mod
cache: false
- name: Enable Go build cache
- uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2
+ uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
with:
path: ~/.cache/go-build
key: ${{ runner.os }}-go-build-${{ github.ref }}-${{ hashFiles('**') }}
@@ -55,7 +55,7 @@ jobs:
${{ runner.os }}-go-build-
- name: Enable Go modules cache
- uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2
+ uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-modules-${{ hashFiles('**/go.sum') }}
@@ -71,7 +71,7 @@ jobs:
run: make test-cover
- name: Upload coverage results
- uses: codecov/codecov-action@5c47607acb93fed5485fdbf7232e8a31425f672a # v5.0.2
+ uses: codecov/codecov-action@13ce06bfc6bbe3ecf90edbbf1bc32fe5978ca1d3 # v5.3.1
with:
file: cover.out
flags: admin
@@ -103,13 +103,13 @@ jobs:
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Set up Go release
- uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0
+ uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
with:
go-version-file: ${{ github.workspace }}/go.mod
cache: false
- name: Enable Go build cache
- uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2
+ uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
with:
path: ~/.cache/go-build
key: ${{ runner.os }}-go-build-${{ github.ref }}-${{ hashFiles('**') }}
@@ -118,7 +118,7 @@ jobs:
${{ runner.os }}-go-build-
- name: Enable Go modules cache
- uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2
+ uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-modules-${{ hashFiles('**/go.sum') }}
@@ -158,7 +158,7 @@ jobs:
- name: Attach the report on failure
if: failure()
- uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3
+ uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0
with:
name: "report-${{ matrix.go-version }}-${{ matrix.test-type }}"
path: ${{ github.workspace }}/cli-tests/playwright-report/
diff --git a/.github/workflows/agent.yml b/.github/workflows/agent.yml
index 6ea36bc739..f0248d68c4 100644
--- a/.github/workflows/agent.yml
+++ b/.github/workflows/agent.yml
@@ -77,13 +77,13 @@ jobs:
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Set up Go release
- uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0
+ uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
with:
go-version-file: ${{ github.workspace }}/go.mod
cache: false
- name: Enable Go build cache
- uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2
+ uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
with:
path: ~/.cache/go-build
key: ${{ runner.os }}-go-build-${{ github.ref }}-${{ hashFiles('**') }}
@@ -92,7 +92,7 @@ jobs:
${{ runner.os }}-go-build-
- name: Enable Go modules cache
- uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2
+ uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-modules-${{ hashFiles('**/go.sum') }}
@@ -113,9 +113,9 @@ jobs:
run: make test-cover
- name: Upload coverage results
- uses: codecov/codecov-action@5c47607acb93fed5485fdbf7232e8a31425f672a # v5.0.2
+ uses: codecov/codecov-action@13ce06bfc6bbe3ecf90edbbf1bc32fe5978ca1d3 # v5.3.1
with:
- file: cover.out
+ files: cover.out
flags: agent
env_vars: MYSQL_IMAGE,MONGO_IMAGE,POSTGRES_IMAGE,PMM_SERVER_IMAGE
fail_ci_if_error: false
diff --git a/.github/workflows/api-tests.yml b/.github/workflows/api-tests.yml
index b1900d1594..a62a5267e5 100644
--- a/.github/workflows/api-tests.yml
+++ b/.github/workflows/api-tests.yml
@@ -146,7 +146,7 @@ jobs:
- name: Upload the logs on failure
if: ${{ failure() }}
- uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3
+ uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0
with:
name: "logs.zip"
path: ${{ github.workspace }}/logs.zip
diff --git a/.github/workflows/api.yml b/.github/workflows/api.yml
index 840dc318bb..c25b657c55 100644
--- a/.github/workflows/api.yml
+++ b/.github/workflows/api.yml
@@ -29,12 +29,12 @@ jobs:
echo "VERSION=$VERSION" >> $GITHUB_ENV
echo "ID=$ID" >> $GITHUB_ENV
- - name: Sync API spec
- uses: readmeio/rdme@51a80867c45de15e2b41af0c4bd5bbc61b932804 # v8.6.6
+ - name: API
+ uses: readmeio/rdme@e770fc3b3af009753fb02f636b59498523dccb37 # v10.1.1
with:
rdme: openapi ./api/swagger/swagger.json --version=${{ env.VERSION }} --id=${{ env.ID }} --key=${{ secrets.README_TOKEN }}
- - name: Sync Markdown docs
- uses: readmeio/rdme@51a80867c45de15e2b41af0c4bd5bbc61b932804 # v8.6.6
- with:
- rdme: docs ./docs/api --version=${{ env.VERSION }} --key=${{ secrets.README_TOKEN }}
+ - name: Markdown docs
+ uses: readmeio/rdme@e770fc3b3af009753fb02f636b59498523dccb37 # v10.1.1
+ with:
+ rdme: docs docs/api --version=${{ env.VERSION }} --key=${{ secrets.README_TOKEN }}
diff --git a/.github/workflows/clean.yml b/.github/workflows/clean.yml
index cfbbe9cf7b..22558619bf 100644
--- a/.github/workflows/clean.yml
+++ b/.github/workflows/clean.yml
@@ -35,7 +35,7 @@ jobs:
env:
# to avoid error due to `go version` accepting -v flag with an argument since 1.15
GOFLAGS: ""
- uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0
+ uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
with:
go-version: ${{ matrix.go.version }}
@@ -45,7 +45,7 @@ jobs:
lfs: true
- name: Enable Go modules cache
- uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2
+ uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
with:
path: ~/go/pkg/mod
key: ${{ matrix.go.version }}-modules-${{ hashFiles('**/go.sum') }}
@@ -53,7 +53,7 @@ jobs:
${{ matrix.go.version }}-modules-
- name: Enable Go build cache
- uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2
+ uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
with:
path: ~/.cache/go-build
key: ${{ matrix.go.version }}-build-${{ github.ref }}-${{ hashFiles('**') }}
diff --git a/.github/workflows/dependabot.yml b/.github/workflows/dependabot.yml
index 2104d7b2a3..cd46f83dee 100644
--- a/.github/workflows/dependabot.yml
+++ b/.github/workflows/dependabot.yml
@@ -8,12 +8,12 @@ permissions:
jobs:
dependabot:
name: Enable auto-merge
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
if: ${{ github.actor == 'dependabot[bot]' }}
steps:
- name: Dependabot metadata
id: metadata
- uses: dependabot/fetch-metadata@dbb049abf0d677abbd7f7eee0375145b417fdd34 # v2.2.0
+ uses: dependabot/fetch-metadata@d7267f607e9d3fb96fc2fbe83e0af444713e90b7 # v2.3.0
with:
github-token: "${{ secrets.GITHUB_TOKEN }}"
diff --git a/.github/workflows/devcontainer.yml b/.github/workflows/devcontainer.yml
index b7ce2b2a8c..aaa72cce42 100644
--- a/.github/workflows/devcontainer.yml
+++ b/.github/workflows/devcontainer.yml
@@ -37,7 +37,7 @@ jobs:
ref: ${{ github.event.inputs.branch }}
- name: Set up Docker Buildx
- uses: docker/setup-buildx-action@c47758b77c9736f4b2ef4073d4d51994fabfe349 # v3.7.1
+ uses: docker/setup-buildx-action@f7ce87c1d6bead3e36075b2ce75da1f6cc28aaca # v3.9.0
- name: Login to ghcr.io registry
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0
@@ -54,7 +54,7 @@ jobs:
password: ${{ secrets.DOCKERHUB_PASSWORD }}
- name: Build and push to registries
- uses: docker/build-push-action@4f58ea79222b3b9dc2c8bbdd6debcef730109a75 # v6.9.0
+ uses: docker/build-push-action@ca877d9245402d1537745e0e356eab47c3520991 # v6.13.0
with:
file: ./.devcontainer/Dockerfile
context: .
diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml
index edadbb92bf..170464aae3 100644
--- a/.github/workflows/documentation.yml
+++ b/.github/workflows/documentation.yml
@@ -25,7 +25,7 @@ jobs:
Makefile.include
- name: Setup Python
- uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0
+ uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0
with:
python-version: '3.x'
cache: 'pip'
@@ -55,6 +55,5 @@ jobs:
working-directory: documentation
run: |
mike deploy 3 -b publish -p
- # Note: enable the next line after v3 GA
- # mike set-default 3 -b publish -p
- mike retitle 3 "3.x (BETA)" -b publish -p
+ mike set-default 3 -b publish -p
+ mike retitle 3 "3.x (LATEST)" -b publish -p
diff --git a/.github/workflows/helm-tests.yml b/.github/workflows/helm-tests.yml
index 3d4880c617..73a3666b7f 100644
--- a/.github/workflows/helm-tests.yml
+++ b/.github/workflows/helm-tests.yml
@@ -57,4 +57,4 @@ jobs:
kubectl get pods
kubectl describe pod --selector=app.kubernetes.io/name=pmm || true
kubectl get events --sort-by=lastTimestamp
- kubectl logs --all-containers --timestamps --selector=app.kubernetes.io/name=pmm || true
\ No newline at end of file
+ kubectl logs --all-containers --timestamps --selector=app.kubernetes.io/name=pmm || true
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index e3c2c610a2..a1dbefe690 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -23,14 +23,14 @@ jobs:
- name: Check out code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- - name: Set up Go
- uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0
+ - name: Set up Go release
+ uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
with:
go-version-file: ${{ github.workspace }}/go.mod
cache: false
- name: Enable Go build cache
- uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2
+ uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
with:
path: ~/.cache/go-build
key: ${{ runner.os }}-go-build-${{ github.ref }}-${{ hashFiles('**') }}
@@ -39,7 +39,7 @@ jobs:
${{ runner.os }}-go-build-
- name: Enable Go modules cache
- uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2
+ uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-modules-${{ hashFiles('**/go.sum') }}
@@ -102,12 +102,12 @@ jobs:
echo "$out" | bin/reviewdog -f=buf -reporter=github-pr-review -fail-level=error
- name: Run code linters
- uses: reviewdog/action-golangci-lint@7708105983c614f7a2725e2172908b7709d1c3e4 # v2.6.2
+ uses: reviewdog/action-golangci-lint@dd3fda91790ca90e75049e5c767509dc0ec7d99b # v2.7.0
with:
github_token: ${{ secrets.ROBOT_TOKEN || secrets.GITHUB_TOKEN }}
go_version_file: ${{ github.workspace }}/go.mod
reporter: github-pr-review
- fail_on_error: true
+ fail_level: error
cache: false
golangci_lint_flags: "-c=.golangci.yml"
golangci_lint_version: v1.62.0 # Version should match specified in Makefile
@@ -139,20 +139,20 @@ jobs:
git status
- typos_check:
+ spell-check:
+ name: Spell check
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- with:
- fetch-depth: 0
+
- name: Check spelling of md files
- uses: crate-ci/typos@9d890159570d5018df91fedfa40b4730cd4a81b1 # v1.28.4
+ uses: crate-ci/typos@11ca4583f2f3f74c7e7785c0ecb20fe2c99a4308 # v1.29.5
with:
files: "**/*.md ./documentation/**/*.md"
merge-gatekeeper:
- needs: [ check, typos_check ]
+ needs: [ check, spell-check ]
name: Merge Gatekeeper
if: ${{ always() }}
runs-on: ubuntu-22.04
diff --git a/.github/workflows/managed.yml b/.github/workflows/managed.yml
index c7a9c88760..dbda78d10f 100644
--- a/.github/workflows/managed.yml
+++ b/.github/workflows/managed.yml
@@ -83,9 +83,9 @@ jobs:
run: docker exec -i pmm-server make -C managed test-cover
- name: Upload coverage results
- uses: codecov/codecov-action@5c47607acb93fed5485fdbf7232e8a31425f672a # v5.0.2
+ uses: codecov/codecov-action@13ce06bfc6bbe3ecf90edbbf1bc32fe5978ca1d3 # v5.3.1
with:
- file: managed/cover.out
+ files: managed/cover.out
flags: managed
env_vars: PMM_SERVER_IMAGE
fail_ci_if_error: false
diff --git a/.github/workflows/qan-api2.yml b/.github/workflows/qan-api2.yml
index 3c91ca20f7..25114f7fcf 100644
--- a/.github/workflows/qan-api2.yml
+++ b/.github/workflows/qan-api2.yml
@@ -41,13 +41,13 @@ jobs:
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Set up Go release
- uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0
+ uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
with:
go-version-file: ${{ github.workspace }}/go.mod
cache: false
- name: Enable Go build cache
- uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2
+ uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
with:
path: ~/.cache/go-build
key: ${{ runner.os }}-go-build-${{ github.ref }}-${{ hashFiles('**') }}
@@ -56,7 +56,7 @@ jobs:
${{ runner.os }}-go-build-
- name: Enable Go modules cache
- uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2
+ uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-modules-${{ hashFiles('**/go.sum') }}
diff --git a/.github/workflows/release-doc.yml b/.github/workflows/release-doc.yml
index 56c3cf48b5..0c8f5cc189 100644
--- a/.github/workflows/release-doc.yml
+++ b/.github/workflows/release-doc.yml
@@ -3,6 +3,8 @@ on:
push:
tags:
- v[0-9]+.[0-9]+.[0-9]+*
+ workflow_dispatch:
+
permissions:
contents: read
@@ -10,6 +12,7 @@ jobs:
release:
permissions:
contents: write # for softprops/action-gh-release to create GitHub release
+
if: startsWith(github.ref, 'refs/tags/')
runs-on: ubuntu-22.04
steps:
@@ -18,9 +21,10 @@ jobs:
shell: bash
run: |
version="${release_tag/refs\/tags\/v/}"
- wget https://raw.githubusercontent.com/percona/pmm-doc/main/docs/release-notes/$version.md -O ${{ github.workspace }}-CHANGELOG.txt
+ wget https://raw.githubusercontent.com/percona/pmm/v3/documentation/docs/release-notes/$version.md -O ${{ github.workspace }}-CHANGELOG.txt
env:
release_tag: ${{ github.ref }}
+
- name: Convert mkdocs
shell: bash --noprofile --norc -ex {0}
run: |
@@ -29,8 +33,9 @@ jobs:
grep -rl '!!! alert alert-info' ${{ github.workspace }}-CHANGELOG.txt | xargs --no-run-if-empty sed -i 's/\!\!\! alert alert-info/\>/g'
grep -rl '!!! note alert alert-primary' ${{ github.workspace }}-CHANGELOG.txt | xargs --no-run-if-empty sed -i 's/\!\!\! note alert alert-primary "\(.*\)"/\> \:memo\: **\1**/g'
grep -rl '!!! note alert alert-primary' ${{ github.workspace }}-CHANGELOG.txt | xargs --no-run-if-empty sed -i 's/\!\!\! note alert alert-primary/\> \:memo\: **Note**/g'
+
- name: Create Release
- uses: softprops/action-gh-release@e7a8f85e1c67a31e6ed99a94b41bd0b71bbee6b8 # v2.0.9
+ uses: softprops/action-gh-release@c95fe1489396fe8a9eb87c0abf8aa5b2ef267fda # v2.2.1
with:
body_path: ${{ github.workspace }}-CHANGELOG.txt
draft: true
diff --git a/.github/workflows/sbom.yml b/.github/workflows/sbom.yml
index 13a3a0caff..c3d7cedc4a 100644
--- a/.github/workflows/sbom.yml
+++ b/.github/workflows/sbom.yml
@@ -13,13 +13,13 @@ jobs:
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Create SBOM for PMM
- uses: anchore/sbom-action@fc46e51fd3cb168ffb36c6d1915723c47db58abb # v0.17.7
+ uses: anchore/sbom-action@f325610c9f50a54015d37c8d16cb3b0e2c8f4de0 # v0.18.0
with:
file: go.mod
artifact-name: pmm.spdx.json
- name: Publish SBOM for PMM
- uses: anchore/sbom-action/publish-sbom@fc46e51fd3cb168ffb36c6d1915723c47db58abb # v0.17.7
+ uses: anchore/sbom-action/publish-sbom@f325610c9f50a54015d37c8d16cb3b0e2c8f4de0 # v0.18.0
with:
sbom-artifact-match: ".*\\.spdx\\.json$"
@@ -30,12 +30,12 @@ jobs:
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Create SBOM for vmproxy
- uses: anchore/sbom-action@fc46e51fd3cb168ffb36c6d1915723c47db58abb # v0.17.7
+ uses: anchore/sbom-action@f325610c9f50a54015d37c8d16cb3b0e2c8f4de0 # v0.18.0
with:
path: ./vmproxy
artifact-name: vmproxy.spdx.json
- name: Publish SBOM for vmproxy
- uses: anchore/sbom-action/publish-sbom@fc46e51fd3cb168ffb36c6d1915723c47db58abb # v0.17.7
+ uses: anchore/sbom-action/publish-sbom@f325610c9f50a54015d37c8d16cb3b0e2c8f4de0 # v0.18.0
with:
sbom-artifact-match: ".*\\.spdx\\.json$"
diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml
index c66d00cdda..56ed73b400 100644
--- a/.github/workflows/scorecard.yml
+++ b/.github/workflows/scorecard.yml
@@ -35,7 +35,7 @@ jobs:
publish_results: true
- name: Upload results
- uses: actions/upload-artifact@97a0fba1372883ab732affbe8f94b823f91727db # v3.pre.node20
+ uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v3.pre.node20
with:
name: SARIF file
path: results.sarif
@@ -43,6 +43,6 @@ jobs:
# Upload the results to GitHub's code scanning dashboard (optional).
- name: "Upload to code-scanning"
- uses: github/codeql-action/upload-sarif@ea9e4e37992a54ee68a9622e985e60c8e8f12d9f # v3.27.4
+ uses: github/codeql-action/upload-sarif@9e8d0789d4a0fa9ceb6b1738f7e269594bdd67f0 # v3.28.9
with:
sarif_file: results.sarif
diff --git a/.github/workflows/vmproxy.yml b/.github/workflows/vmproxy.yml
index 7d38a6140e..4635170cc9 100644
--- a/.github/workflows/vmproxy.yml
+++ b/.github/workflows/vmproxy.yml
@@ -41,13 +41,13 @@ jobs:
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Set up Go release
- uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0
+ uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
with:
go-version-file: ${{ github.workspace }}/go.mod
cache: false
- name: Enable Go build cache
- uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2
+ uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
with:
path: ~/.cache/go-build
key: ${{ runner.os }}-go-build-${{ github.ref }}-${{ hashFiles('**') }}
@@ -56,7 +56,7 @@ jobs:
${{ runner.os }}-go-build-
- name: Enable Go modules cache
- uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2
+ uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-modules-${{ hashFiles('**/go.sum') }}
@@ -73,7 +73,7 @@ jobs:
run: make test-cover
- name: Upload coverage results
- uses: codecov/codecov-action@5c47607acb93fed5485fdbf7232e8a31425f672a # v5.0.2
+ uses: codecov/codecov-action@13ce06bfc6bbe3ecf90edbbf1bc32fe5978ca1d3 # v5.3.1
with:
file: cover.out
flags: vmproxy
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index b5add1d313..1f4c751a6f 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -3,14 +3,14 @@
We'd be glad to welcome you to Percona community which tries to keep the open source open. [Percona Monitoring and Management (PMM)](https://www.percona.com/software/database-tools/percona-monitoring-and-management) is an open source database monitoring solution. It allows you to monitor your databases, different services (HAProxy, ProxySQL and etc) as well as Nodes, Kubernetes clusters and containers. Please check our [Documentation](https://docs.percona.com/percona-monitoring-and-management/details/architecture.html) for the actual architecture.
## Table of contents
-1. [Project repos structure](#Project-repos-structure)
-2. [API documentation](#API-Reference-Documentation)
-3. [Prerequisites](#Prerequisites)
-4. [Submitting a bug](#Submitting-a-Bug)
-5. [Setup your local development environment](#Setup-your-local-development-environment)
-6. [Tests](#Tests)
-7. [Feature Build](#Feature-Build)
-8. [Code Reviews](#Code-Reviews)
+1. [Project repos structure](#project-repos-structure)
+2. [API documentation](#api-reference-documentation)
+3. [Prerequisites](#prerequisites)
+4. [Submitting a bug](#submitting-a-bug)
+5. [Setup your local development environment](#setup-your-local-development-environment)
+6. [Tests](#tests)
+7. [Feature build](#feature-build)
+8. [Code Reviews](#code-reviews)
## Project repos structure
This project is built from several repositories:
@@ -190,7 +190,7 @@ Please see: [How to create a feature build](https://github.com/Percona-Lab/pmm-s
### The Rules
1. Create a Feature Build for every feature/improvement/bugfix you are working on.
-2. Create a draft Pull Request in https://percona-lab/pmm-submodules.
+2. Create a draft Pull Request in https://github.com/Percona-Lab/pmm-submodules.
3. Change the status of the Pull Request from Draft to Open ONLY if you are contributing code changes to pmm-submodules (very rare).
4. Provide a short explanation in the Description field of you feature build PR and checkboxes to all related Pull Requests. If you need examples, check out [PRs](https://github.com/Percona-Lab/pmm-submodules/pulls) made by others.
5. After all related PRs in feature build are merged you should:
diff --git a/README.md b/README.md
index a2cad08312..d0160f58a4 100644
--- a/README.md
+++ b/README.md
@@ -5,6 +5,7 @@
[](https://codecov.io/gh/percona/pmm)
[](https://goreportcard.com/report/github.com/percona/pmm)
[](https://scorecard.dev/viewer/?uri=github.com/percona/pmm)
+[](https://www.bestpractices.dev/projects/9702)
[](https://forums.percona.com/)

@@ -20,9 +21,9 @@ PMM helps users to:
* Improve Data Security
-See the [PMM Documentation](https://www.percona.com/doc/percona-monitoring-and-management/2.x/index.html) for more information.
+See the [PMM Documentation](https://docs.percona.com/percona-monitoring-and-management/3/index.html) for more information.
-## Use Cases
+## Use cases
* Monitor your database performance with customizable dashboards and real-time alerting.
* Spot critical performance issues faster, understand the root cause of incidents better and troubleshoot them more efficiently.
@@ -32,30 +33,31 @@ See the [PMM Documentation](https://www.percona.com/doc/percona-monitoring-and-m
## Architecture
-Please check our [Documentation](https://docs.percona.com/percona-monitoring-and-management/details/architecture.html) for the actual architecture.
+Check [PMM documentation](../pmm/documentation/docs/index.md) for the actual architecture.
-
+
-
+
-
+
## Installation
-There are numbers of installation methods, please check our [Setting Up](https://docs.percona.com/percona-monitoring-and-management/setting-up/index.html) documentation page.
+There are numbers of installation methods, please check our [Setting Up](../pmm/documentation/docs/install-pmm/index.md) documentation page.
But in a nutshell:
-1. Download PMM server Docker image
+
+1. Download PMM server Docker image:
```bash
$ docker pull percona/pmm-server:3
```
-2. Create the data volume container
+2. Create the data volume container:
```bash
$ docker volume create pmm-data
```
-3. Run PMM server container
+3. Run PMM Server container:
```bash
$ docker run --detach --restart always \
--publish 443:8443 \
@@ -65,10 +67,18 @@ percona/pmm-server:3
```
4. Start a web browser and in the address bar enter the server name or IP address of the PMM server host.
-
+
Enter the username and password. The defaults are username: **admin** and password: **admin**
+# Need help?
+
+| **Commercial Support** | **Community Support** |
+|:--|:--|
+| **Enterprise-grade support** for mission-critical monitoring deployments with Percona Monitoring and Management.
Get expert guidance for complex monitoring scenarios across hybrid environments—from cloud providers to bare metal infrastructures. | Connect with our engineers and community members to troubleshoot issues, share best practices, and discuss monitoring strategies. |
+| **[Get Percona Support](https://hubs.ly/Q02_Fs100)** | **[Visit our Forum](https://forums.percona.com/c/percona-monitoring-and-management-pmm)** |
+
+
## How to get involved
We encourage contributions and are always looking for new members that are as dedicated to serving the community as we are.
@@ -77,7 +87,7 @@ If you’re looking for information about how you can contribute, we have [contr
We're looking forward to your contributions and hope to hear from you soon on our [Forums](https://forums.percona.com).
-## Submitting Bug Reports
+## Submitting bug reports
If you find a bug in Percona Monitoring and Management or one of the related projects, you should submit a report to that project's [JIRA](https://jira.percona.com) issue tracker. Some of related project also have GitHub Issues enabled, so you also could submit there.
@@ -85,17 +95,18 @@ Your first step should be [to search](https://jira.percona.com/issues/?jql=proje
If there is no existing report, submit a report following these steps:
-1. [Sign in to Percona JIRA.](https://jira.percona.com/login.jsp) You will need to create an account if you do not have one.
-2. [Go to the Create Issue screen and select the relevant project.](https://jira.percona.com/secure/CreateIssueDetails!init.jspa?pid=11600&issuetype=1&priority=3)
-3. Fill in the fields of Summary, Description, Steps To Reproduce, and Affects Version to the best you can. If the bug corresponds to a crash, attach the stack trace from the logs.
+1. [Sign in to Percona JIRA](https://jira.percona.com). You will need to create an account if you do not have one.
+2. From the top navigation bar, anywhere in Jira, click **Create**.
+3. Select Percona Monitoring and Management (PMM) from the **Project** drop-down menu.
+4. Fill in the fields of **Summary**, **Description**, **Steps To Reproduce**, and **Affects Version** to the best you can. If the bug corresponds to a crash, attach the stack trace from the logs.
-An excellent resource is [Elika Etemad's article on filing good bug reports.](http://fantasai.inkedblade.net/style/talks/filing-good-bugs/).
+An excellent resource is [Elika Etemad's article on filing good bug reports](http://fantasai.inkedblade.net/style/talks/filing-good-bugs/).
As a general rule of thumb, please try to create bug reports that are:
-- *Reproducible.* Include steps to reproduce the problem.
-- *Specific.* Include as much detail as possible: which version, what environment, etc.
-- *Unique.* Do not duplicate existing tickets.
+- *Reproducible* - Include steps to reproduce the problem.
+- *Specific* - Include as much detail as possible: which version, what environment, etc.
+- *Unique* - Do not duplicate existing tickets.
## Licensing
diff --git a/admin/commands/config.go b/admin/commands/config.go
index 061e317c26..8454aaa8ad 100644
--- a/admin/commands/config.go
+++ b/admin/commands/config.go
@@ -43,9 +43,9 @@ func (res *configResult) String() string {
//
//nolint:lll
type ConfigCommand struct {
- NodeAddress string `arg:"" default:"${nodeIp}" help:"Node address (autodetected, default: ${default})"`
+ NodeAddress string `arg:"" default:"${nodeIp}" help:"Node address (autodetected, default: ${nodeIp})"`
NodeType string `arg:"" enum:"generic,container" default:"${nodeTypeDefault}" help:"Node type. One of: [${enum}]. Default: ${default}"`
- NodeName string `arg:"" default:"${hostname}" help:"Node name (autodetected, default: ${default})"`
+ NodeName string `arg:"" default:"${hostname}" help:"Node name (autodetected, default: ${hostname})"`
NodeModel string `help:"Node model"`
Region string `help:"Node region"`
Az string `help:"Node availability zone"`
diff --git a/admin/commands/management/register.go b/admin/commands/management/register.go
index 3b6a6afc47..2f235815d6 100644
--- a/admin/commands/management/register.go
+++ b/admin/commands/management/register.go
@@ -49,11 +49,11 @@ func (res *registerResult) String() string {
//
//nolint:lll
type RegisterCommand struct {
- Address string `name:"node-address" arg:"" default:"${nodeIp}" help:"Node address (autodetected, default: ${default})"`
+ Address string `name:"node-address" arg:"" default:"${nodeIp}" help:"Node address (autodetected, default: ${nodeIp})"`
NodeType string `arg:"" enum:"generic,container" default:"generic" help:"Node type. One of: [${enum}]. Default: ${default}"`
- NodeName string `arg:"" default:"${hostname}" help:"Node name (autodetected, default: ${default})"`
- MachineID string `default:"${defaultMachineID}" help:"Node machine-id (autodetected, default: ${default})"`
- Distro string `default:"${distro}" help:"Node OS distribution (autodetected, default: ${default})"`
+ NodeName string `arg:"" default:"${hostname}" help:"Node name (autodetected, default: ${hostname})"`
+ MachineID string `default:"${defaultMachineID}" help:"Node machine-id (autodetected, default: ${defaultMachineID})"`
+ Distro string `default:"${distro}" help:"Node OS distribution (autodetected, default: ${distro})"`
ContainerID string `help:"Container ID"`
ContainerName string `help:"Container name"`
NodeModel string `help:"Node model"`
diff --git a/admin/commands/pmm/server/docker/mock_functions_test.go b/admin/commands/pmm/server/docker/mock_functions_test.go
index 0e9b42168a..271cd2e2be 100644
--- a/admin/commands/pmm/server/docker/mock_functions_test.go
+++ b/admin/commands/pmm/server/docker/mock_functions_test.go
@@ -206,7 +206,7 @@ func (_m *MockFunctions) FindServerContainers(ctx context.Context) ([]types.Cont
return r0, r1
}
-// GetDockerClient provides a mock function with given fields:
+// GetDockerClient provides a mock function with no fields
func (_m *MockFunctions) GetDockerClient() *client.Client {
ret := _m.Called()
@@ -262,7 +262,7 @@ func (_m *MockFunctions) InstallDocker(ctx context.Context) error {
return r0
}
-// IsDockerInstalled provides a mock function with given fields:
+// IsDockerInstalled provides a mock function with no fields
func (_m *MockFunctions) IsDockerInstalled() (bool, error) {
ret := _m.Called()
diff --git a/agent/agentlocal/mock_client_test.go b/agent/agentlocal/mock_client_test.go
index 3214180af4..a8f491fe3f 100644
--- a/agent/agentlocal/mock_client_test.go
+++ b/agent/agentlocal/mock_client_test.go
@@ -26,7 +26,7 @@ func (_m *mockClient) Describe(_a0 chan<- *prometheus.Desc) {
_m.Called(_a0)
}
-// GetConnectionUpTime provides a mock function with given fields:
+// GetConnectionUpTime provides a mock function with no fields
func (_m *mockClient) GetConnectionUpTime() float32 {
ret := _m.Called()
@@ -44,7 +44,7 @@ func (_m *mockClient) GetConnectionUpTime() float32 {
return r0
}
-// GetNetworkInformation provides a mock function with given fields:
+// GetNetworkInformation provides a mock function with no fields
func (_m *mockClient) GetNetworkInformation() (time.Duration, time.Duration, error) {
ret := _m.Called()
@@ -79,7 +79,7 @@ func (_m *mockClient) GetNetworkInformation() (time.Duration, time.Duration, err
return r0, r1, r2
}
-// GetServerConnectMetadata provides a mock function with given fields:
+// GetServerConnectMetadata provides a mock function with no fields
func (_m *mockClient) GetServerConnectMetadata() *agentv1.ServerConnectMetadata {
ret := _m.Called()
diff --git a/agent/agentlocal/mock_supervisor_test.go b/agent/agentlocal/mock_supervisor_test.go
index c587060ef7..421137f0e5 100644
--- a/agent/agentlocal/mock_supervisor_test.go
+++ b/agent/agentlocal/mock_supervisor_test.go
@@ -13,7 +13,7 @@ type mockSupervisor struct {
mock.Mock
}
-// AgentsList provides a mock function with given fields:
+// AgentsList provides a mock function with no fields
func (_m *mockSupervisor) AgentsList() []*agentlocalv1.AgentInfo {
ret := _m.Called()
@@ -33,7 +33,7 @@ func (_m *mockSupervisor) AgentsList() []*agentlocalv1.AgentInfo {
return r0
}
-// AgentsLogs provides a mock function with given fields:
+// AgentsLogs provides a mock function with no fields
func (_m *mockSupervisor) AgentsLogs() map[string][]string {
ret := _m.Called()
diff --git a/agent/agents/mongodb/internal/profiler/aggregator/aggregator.go b/agent/agents/mongodb/internal/profiler/aggregator/aggregator.go
index fe429aa1c4..b24a9dc087 100644
--- a/agent/agents/mongodb/internal/profiler/aggregator/aggregator.go
+++ b/agent/agents/mongodb/internal/profiler/aggregator/aggregator.go
@@ -237,7 +237,7 @@ func (a *Aggregator) newInterval(ts time.Time) {
a.timeEnd = a.timeStart.Add(a.d)
}
-func (a *Aggregator) createResult(ctx context.Context) *report.Result {
+func (a *Aggregator) createResult(_ context.Context) *report.Result {
queries := a.mongostats.Queries()
queryStats := queries.CalcQueriesStats(int64(DefaultInterval))
var buckets []*agentv1.MetricsBucket
@@ -300,6 +300,10 @@ func (a *Aggregator) createResult(ctx context.Context) *report.Result {
bucket.Mongodb.MResponseLengthP99 = float32(v.ResponseLength.Pct99)
bucket.Mongodb.MResponseLengthSum = float32(v.ResponseLength.Total)
+ bucket.Mongodb.MFullScanCnt = float32(v.CollScanCount)
+ bucket.Mongodb.MFullScanSum = float32(v.CollScanSum) / 1000
+ bucket.Mongodb.PlanSummary = v.PlanSummary
+
buckets = append(buckets, bucket)
}
diff --git a/agent/agents/mongodb/internal/profiler/aggregator/aggregator_test.go b/agent/agents/mongodb/internal/profiler/aggregator/aggregator_test.go
index 7eb650dd34..ca08443b57 100644
--- a/agent/agents/mongodb/internal/profiler/aggregator/aggregator_test.go
+++ b/agent/agents/mongodb/internal/profiler/aggregator/aggregator_test.go
@@ -38,7 +38,7 @@ func TestAggregator(t *testing.T) {
t.Run("Add", func(t *testing.T) {
t.Run("error if aggregator is not running", func(t *testing.T) {
a := New(time.Now(), "test-agent", logrus.WithField("component", "test"), truncate.GetMongoDBDefaultMaxQueryLength())
- err := a.Add(nil, proto.SystemProfile{})
+ err := a.Add(context.TODO(), proto.SystemProfile{})
assert.EqualError(t, err, "aggregator is not running")
})
})
diff --git a/agent/client/mock_supervisor_test.go b/agent/client/mock_supervisor_test.go
index d1b0d1c079..27616f4a84 100644
--- a/agent/client/mock_supervisor_test.go
+++ b/agent/client/mock_supervisor_test.go
@@ -45,7 +45,7 @@ func (_m *mockSupervisor) AgentLogByID(_a0 string) ([]string, uint) {
return r0, r1
}
-// AgentsList provides a mock function with given fields:
+// AgentsList provides a mock function with no fields
func (_m *mockSupervisor) AgentsList() []*agentlocalv1.AgentInfo {
ret := _m.Called()
@@ -65,7 +65,7 @@ func (_m *mockSupervisor) AgentsList() []*agentlocalv1.AgentInfo {
return r0
}
-// Changes provides a mock function with given fields:
+// Changes provides a mock function with no fields
func (_m *mockSupervisor) Changes() <-chan *agentv1.StateChangedRequest {
ret := _m.Called()
@@ -85,7 +85,7 @@ func (_m *mockSupervisor) Changes() <-chan *agentv1.StateChangedRequest {
return r0
}
-// ClearChangesChannel provides a mock function with given fields:
+// ClearChangesChannel provides a mock function with no fields
func (_m *mockSupervisor) ClearChangesChannel() {
_m.Called()
}
@@ -100,7 +100,7 @@ func (_m *mockSupervisor) Describe(_a0 chan<- *prometheus.Desc) {
_m.Called(_a0)
}
-// QANRequests provides a mock function with given fields:
+// QANRequests provides a mock function with no fields
func (_m *mockSupervisor) QANRequests() <-chan *agentv1.QANCollectRequest {
ret := _m.Called()
@@ -120,7 +120,7 @@ func (_m *mockSupervisor) QANRequests() <-chan *agentv1.QANCollectRequest {
return r0
}
-// RestartAgents provides a mock function with given fields:
+// RestartAgents provides a mock function with no fields
func (_m *mockSupervisor) RestartAgents() {
_m.Called()
}
diff --git a/api/accesscontrol/v1beta1/accesscontrol.pb.go b/api/accesscontrol/v1beta1/accesscontrol.pb.go
index d7b9d135fa..880aeb7a07 100644
--- a/api/accesscontrol/v1beta1/accesscontrol.pb.go
+++ b/api/accesscontrol/v1beta1/accesscontrol.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: accesscontrol/v1beta1/accesscontrol.proto
@@ -9,6 +9,7 @@ package accesscontrolv1beta1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
_ "github.com/envoyproxy/protoc-gen-validate/validate"
_ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options"
@@ -25,13 +26,12 @@ const (
)
type CreateRoleRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"`
+ Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
+ Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"`
- Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
- Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *CreateRoleRequest) Reset() {
@@ -86,11 +86,10 @@ func (x *CreateRoleRequest) GetDescription() string {
}
type CreateRoleResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ RoleId uint32 `protobuf:"varint,1,opt,name=role_id,json=roleId,proto3" json:"role_id,omitempty"`
unknownFields protoimpl.UnknownFields
-
- RoleId uint32 `protobuf:"varint,1,opt,name=role_id,json=roleId,proto3" json:"role_id,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *CreateRoleResponse) Reset() {
@@ -131,14 +130,13 @@ func (x *CreateRoleResponse) GetRoleId() uint32 {
}
type UpdateRoleRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ RoleId uint32 `protobuf:"varint,1,opt,name=role_id,json=roleId,proto3" json:"role_id,omitempty"`
+ Title *string `protobuf:"bytes,2,opt,name=title,proto3,oneof" json:"title,omitempty"`
+ Filter *string `protobuf:"bytes,3,opt,name=filter,proto3,oneof" json:"filter,omitempty"`
+ Description *string `protobuf:"bytes,4,opt,name=description,proto3,oneof" json:"description,omitempty"`
unknownFields protoimpl.UnknownFields
-
- RoleId uint32 `protobuf:"varint,1,opt,name=role_id,json=roleId,proto3" json:"role_id,omitempty"`
- Title *string `protobuf:"bytes,2,opt,name=title,proto3,oneof" json:"title,omitempty"`
- Filter *string `protobuf:"bytes,3,opt,name=filter,proto3,oneof" json:"filter,omitempty"`
- Description *string `protobuf:"bytes,4,opt,name=description,proto3,oneof" json:"description,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *UpdateRoleRequest) Reset() {
@@ -200,9 +198,9 @@ func (x *UpdateRoleRequest) GetDescription() string {
}
type UpdateRoleResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *UpdateRoleResponse) Reset() {
@@ -236,13 +234,12 @@ func (*UpdateRoleResponse) Descriptor() ([]byte, []int) {
}
type DeleteRoleRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- RoleId uint32 `protobuf:"varint,1,opt,name=role_id,json=roleId,proto3" json:"role_id,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ RoleId uint32 `protobuf:"varint,1,opt,name=role_id,json=roleId,proto3" json:"role_id,omitempty"`
// Role ID to be used as a replacement for the role. Additional logic applies.
ReplacementRoleId uint32 `protobuf:"varint,2,opt,name=replacement_role_id,json=replacementRoleId,proto3" json:"replacement_role_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *DeleteRoleRequest) Reset() {
@@ -290,9 +287,9 @@ func (x *DeleteRoleRequest) GetReplacementRoleId() uint32 {
}
type DeleteRoleResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *DeleteRoleResponse) Reset() {
@@ -326,11 +323,10 @@ func (*DeleteRoleResponse) Descriptor() ([]byte, []int) {
}
type GetRoleRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ RoleId uint32 `protobuf:"varint,1,opt,name=role_id,json=roleId,proto3" json:"role_id,omitempty"`
unknownFields protoimpl.UnknownFields
-
- RoleId uint32 `protobuf:"varint,1,opt,name=role_id,json=roleId,proto3" json:"role_id,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *GetRoleRequest) Reset() {
@@ -371,14 +367,13 @@ func (x *GetRoleRequest) GetRoleId() uint32 {
}
type GetRoleResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ RoleId uint32 `protobuf:"varint,1,opt,name=role_id,json=roleId,proto3" json:"role_id,omitempty"`
+ Title string `protobuf:"bytes,2,opt,name=title,proto3" json:"title,omitempty"`
+ Filter string `protobuf:"bytes,3,opt,name=filter,proto3" json:"filter,omitempty"`
+ Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"`
unknownFields protoimpl.UnknownFields
-
- RoleId uint32 `protobuf:"varint,1,opt,name=role_id,json=roleId,proto3" json:"role_id,omitempty"`
- Title string `protobuf:"bytes,2,opt,name=title,proto3" json:"title,omitempty"`
- Filter string `protobuf:"bytes,3,opt,name=filter,proto3" json:"filter,omitempty"`
- Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *GetRoleResponse) Reset() {
@@ -440,11 +435,10 @@ func (x *GetRoleResponse) GetDescription() string {
}
type SetDefaultRoleRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ RoleId uint32 `protobuf:"varint,1,opt,name=role_id,json=roleId,proto3" json:"role_id,omitempty"`
unknownFields protoimpl.UnknownFields
-
- RoleId uint32 `protobuf:"varint,1,opt,name=role_id,json=roleId,proto3" json:"role_id,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *SetDefaultRoleRequest) Reset() {
@@ -485,9 +479,9 @@ func (x *SetDefaultRoleRequest) GetRoleId() uint32 {
}
type SetDefaultRoleResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *SetDefaultRoleResponse) Reset() {
@@ -521,12 +515,11 @@ func (*SetDefaultRoleResponse) Descriptor() ([]byte, []int) {
}
type AssignRolesRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ RoleIds []uint32 `protobuf:"varint,1,rep,packed,name=role_ids,json=roleIds,proto3" json:"role_ids,omitempty"`
+ UserId uint32 `protobuf:"varint,2,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"`
unknownFields protoimpl.UnknownFields
-
- RoleIds []uint32 `protobuf:"varint,1,rep,packed,name=role_ids,json=roleIds,proto3" json:"role_ids,omitempty"`
- UserId uint32 `protobuf:"varint,2,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *AssignRolesRequest) Reset() {
@@ -574,9 +567,9 @@ func (x *AssignRolesRequest) GetUserId() uint32 {
}
type AssignRolesResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AssignRolesResponse) Reset() {
@@ -610,9 +603,9 @@ func (*AssignRolesResponse) Descriptor() ([]byte, []int) {
}
type ListRolesRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ListRolesRequest) Reset() {
@@ -646,11 +639,10 @@ func (*ListRolesRequest) Descriptor() ([]byte, []int) {
}
type ListRolesResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Roles []*ListRolesResponse_RoleData `protobuf:"bytes,1,rep,name=roles,proto3" json:"roles,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Roles []*ListRolesResponse_RoleData `protobuf:"bytes,1,rep,name=roles,proto3" json:"roles,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *ListRolesResponse) Reset() {
@@ -691,14 +683,13 @@ func (x *ListRolesResponse) GetRoles() []*ListRolesResponse_RoleData {
}
type ListRolesResponse_RoleData struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ RoleId uint32 `protobuf:"varint,1,opt,name=role_id,json=roleId,proto3" json:"role_id,omitempty"`
+ Title string `protobuf:"bytes,2,opt,name=title,proto3" json:"title,omitempty"`
+ Filter string `protobuf:"bytes,3,opt,name=filter,proto3" json:"filter,omitempty"`
+ Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"`
unknownFields protoimpl.UnknownFields
-
- RoleId uint32 `protobuf:"varint,1,opt,name=role_id,json=roleId,proto3" json:"role_id,omitempty"`
- Title string `protobuf:"bytes,2,opt,name=title,proto3" json:"title,omitempty"`
- Filter string `protobuf:"bytes,3,opt,name=filter,proto3" json:"filter,omitempty"`
- Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *ListRolesResponse_RoleData) Reset() {
@@ -761,7 +752,7 @@ func (x *ListRolesResponse_RoleData) GetDescription() string {
var File_accesscontrol_v1beta1_accesscontrol_proto protoreflect.FileDescriptor
-var file_accesscontrol_v1beta1_accesscontrol_proto_rawDesc = []byte{
+var file_accesscontrol_v1beta1_accesscontrol_proto_rawDesc = string([]byte{
0x0a, 0x29, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f,
0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x63, 0x6f,
0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x61, 0x63, 0x63,
@@ -941,16 +932,16 @@ var file_accesscontrol_v1beta1_accesscontrol_proto_rawDesc = []byte{
0x61, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02,
0x16, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x3a, 0x3a,
0x56, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_accesscontrol_v1beta1_accesscontrol_proto_rawDescOnce sync.Once
- file_accesscontrol_v1beta1_accesscontrol_proto_rawDescData = file_accesscontrol_v1beta1_accesscontrol_proto_rawDesc
+ file_accesscontrol_v1beta1_accesscontrol_proto_rawDescData []byte
)
func file_accesscontrol_v1beta1_accesscontrol_proto_rawDescGZIP() []byte {
file_accesscontrol_v1beta1_accesscontrol_proto_rawDescOnce.Do(func() {
- file_accesscontrol_v1beta1_accesscontrol_proto_rawDescData = protoimpl.X.CompressGZIP(file_accesscontrol_v1beta1_accesscontrol_proto_rawDescData)
+ file_accesscontrol_v1beta1_accesscontrol_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_accesscontrol_v1beta1_accesscontrol_proto_rawDesc), len(file_accesscontrol_v1beta1_accesscontrol_proto_rawDesc)))
})
return file_accesscontrol_v1beta1_accesscontrol_proto_rawDescData
}
@@ -1009,7 +1000,7 @@ func file_accesscontrol_v1beta1_accesscontrol_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_accesscontrol_v1beta1_accesscontrol_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_accesscontrol_v1beta1_accesscontrol_proto_rawDesc), len(file_accesscontrol_v1beta1_accesscontrol_proto_rawDesc)),
NumEnums: 0,
NumMessages: 15,
NumExtensions: 0,
@@ -1020,7 +1011,6 @@ func file_accesscontrol_v1beta1_accesscontrol_proto_init() {
MessageInfos: file_accesscontrol_v1beta1_accesscontrol_proto_msgTypes,
}.Build()
File_accesscontrol_v1beta1_accesscontrol_proto = out.File
- file_accesscontrol_v1beta1_accesscontrol_proto_rawDesc = nil
file_accesscontrol_v1beta1_accesscontrol_proto_goTypes = nil
file_accesscontrol_v1beta1_accesscontrol_proto_depIdxs = nil
}
diff --git a/api/accesscontrol/v1beta1/accesscontrol.pb.gw.go b/api/accesscontrol/v1beta1/accesscontrol.pb.gw.go
index 8f10784f9d..f886c484c7 100644
--- a/api/accesscontrol/v1beta1/accesscontrol.pb.gw.go
+++ b/api/accesscontrol/v1beta1/accesscontrol.pb.gw.go
@@ -10,6 +10,7 @@ package accesscontrolv1beta1
import (
"context"
+ "errors"
"io"
"net/http"
@@ -28,89 +29,74 @@ var (
_ codes.Code
_ io.Reader
_ status.Status
+ _ = errors.New
_ = runtime.String
_ = utilities.NewDoubleArray
_ = metadata.Join
)
func request_AccessControlService_CreateRole_0(ctx context.Context, marshaler runtime.Marshaler, client AccessControlServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq CreateRoleRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq CreateRoleRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.CreateRole(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_AccessControlService_CreateRole_0(ctx context.Context, marshaler runtime.Marshaler, server AccessControlServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq CreateRoleRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq CreateRoleRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.CreateRole(ctx, &protoReq)
return msg, metadata, err
}
func request_AccessControlService_UpdateRole_0(ctx context.Context, marshaler runtime.Marshaler, client AccessControlServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq UpdateRoleRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq UpdateRoleRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["role_id"]
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ val, ok := pathParams["role_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "role_id")
}
-
protoReq.RoleId, err = runtime.Uint32(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "role_id", err)
}
-
msg, err := client.UpdateRole(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_AccessControlService_UpdateRole_0(ctx context.Context, marshaler runtime.Marshaler, server AccessControlServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq UpdateRoleRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq UpdateRoleRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["role_id"]
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ val, ok := pathParams["role_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "role_id")
}
-
protoReq.RoleId, err = runtime.Uint32(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "role_id", err)
}
-
msg, err := server.UpdateRole(ctx, &protoReq)
return msg, metadata, err
}
@@ -118,179 +104,151 @@ func local_request_AccessControlService_UpdateRole_0(ctx context.Context, marsha
var filter_AccessControlService_DeleteRole_0 = &utilities.DoubleArray{Encoding: map[string]int{"role_id": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}}
func request_AccessControlService_DeleteRole_0(ctx context.Context, marshaler runtime.Marshaler, client AccessControlServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq DeleteRoleRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq DeleteRoleRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["role_id"]
+ val, ok := pathParams["role_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "role_id")
}
-
protoReq.RoleId, err = runtime.Uint32(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "role_id", err)
}
-
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_AccessControlService_DeleteRole_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.DeleteRole(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_AccessControlService_DeleteRole_0(ctx context.Context, marshaler runtime.Marshaler, server AccessControlServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq DeleteRoleRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq DeleteRoleRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["role_id"]
+ val, ok := pathParams["role_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "role_id")
}
-
protoReq.RoleId, err = runtime.Uint32(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "role_id", err)
}
-
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_AccessControlService_DeleteRole_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.DeleteRole(ctx, &protoReq)
return msg, metadata, err
}
func request_AccessControlService_GetRole_0(ctx context.Context, marshaler runtime.Marshaler, client AccessControlServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetRoleRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq GetRoleRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["role_id"]
+ val, ok := pathParams["role_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "role_id")
}
-
protoReq.RoleId, err = runtime.Uint32(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "role_id", err)
}
-
msg, err := client.GetRole(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_AccessControlService_GetRole_0(ctx context.Context, marshaler runtime.Marshaler, server AccessControlServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetRoleRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq GetRoleRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["role_id"]
+ val, ok := pathParams["role_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "role_id")
}
-
protoReq.RoleId, err = runtime.Uint32(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "role_id", err)
}
-
msg, err := server.GetRole(ctx, &protoReq)
return msg, metadata, err
}
func request_AccessControlService_ListRoles_0(ctx context.Context, marshaler runtime.Marshaler, client AccessControlServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListRolesRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ListRolesRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := client.ListRoles(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_AccessControlService_ListRoles_0(ctx context.Context, marshaler runtime.Marshaler, server AccessControlServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListRolesRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ListRolesRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := server.ListRoles(ctx, &protoReq)
return msg, metadata, err
}
func request_AccessControlService_AssignRoles_0(ctx context.Context, marshaler runtime.Marshaler, client AccessControlServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq AssignRolesRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq AssignRolesRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.AssignRoles(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_AccessControlService_AssignRoles_0(ctx context.Context, marshaler runtime.Marshaler, server AccessControlServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq AssignRolesRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq AssignRolesRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.AssignRoles(ctx, &protoReq)
return msg, metadata, err
}
func request_AccessControlService_SetDefaultRole_0(ctx context.Context, marshaler runtime.Marshaler, client AccessControlServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq SetDefaultRoleRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq SetDefaultRoleRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.SetDefaultRole(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_AccessControlService_SetDefaultRole_0(ctx context.Context, marshaler runtime.Marshaler, server AccessControlServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq SetDefaultRoleRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq SetDefaultRoleRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.SetDefaultRole(ctx, &protoReq)
return msg, metadata, err
}
@@ -301,15 +259,13 @@ func local_request_AccessControlService_SetDefaultRole_0(ctx context.Context, ma
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterAccessControlServiceHandlerFromEndpoint instead.
// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
func RegisterAccessControlServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server AccessControlServiceServer) error {
- mux.Handle("POST", pattern_AccessControlService_CreateRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_AccessControlService_CreateRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/accesscontrol.v1beta1.AccessControlService/CreateRole", runtime.WithHTTPPathPattern("/v1/accesscontrol/roles"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/accesscontrol.v1beta1.AccessControlService/CreateRole", runtime.WithHTTPPathPattern("/v1/accesscontrol/roles"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -321,19 +277,15 @@ func RegisterAccessControlServiceHandlerServer(ctx context.Context, mux *runtime
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AccessControlService_CreateRole_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("PUT", pattern_AccessControlService_UpdateRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPut, pattern_AccessControlService_UpdateRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/accesscontrol.v1beta1.AccessControlService/UpdateRole", runtime.WithHTTPPathPattern("/v1/accesscontrol/roles/{role_id}"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/accesscontrol.v1beta1.AccessControlService/UpdateRole", runtime.WithHTTPPathPattern("/v1/accesscontrol/roles/{role_id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -345,19 +297,15 @@ func RegisterAccessControlServiceHandlerServer(ctx context.Context, mux *runtime
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AccessControlService_UpdateRole_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("DELETE", pattern_AccessControlService_DeleteRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodDelete, pattern_AccessControlService_DeleteRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/accesscontrol.v1beta1.AccessControlService/DeleteRole", runtime.WithHTTPPathPattern("/v1/accesscontrol/roles/{role_id}"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/accesscontrol.v1beta1.AccessControlService/DeleteRole", runtime.WithHTTPPathPattern("/v1/accesscontrol/roles/{role_id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -369,19 +317,15 @@ func RegisterAccessControlServiceHandlerServer(ctx context.Context, mux *runtime
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AccessControlService_DeleteRole_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_AccessControlService_GetRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_AccessControlService_GetRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/accesscontrol.v1beta1.AccessControlService/GetRole", runtime.WithHTTPPathPattern("/v1/accesscontrol/roles/{role_id}"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/accesscontrol.v1beta1.AccessControlService/GetRole", runtime.WithHTTPPathPattern("/v1/accesscontrol/roles/{role_id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -393,19 +337,15 @@ func RegisterAccessControlServiceHandlerServer(ctx context.Context, mux *runtime
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AccessControlService_GetRole_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_AccessControlService_ListRoles_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_AccessControlService_ListRoles_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/accesscontrol.v1beta1.AccessControlService/ListRoles", runtime.WithHTTPPathPattern("/v1/accesscontrol/roles"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/accesscontrol.v1beta1.AccessControlService/ListRoles", runtime.WithHTTPPathPattern("/v1/accesscontrol/roles"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -417,19 +357,15 @@ func RegisterAccessControlServiceHandlerServer(ctx context.Context, mux *runtime
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AccessControlService_ListRoles_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_AccessControlService_AssignRoles_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_AccessControlService_AssignRoles_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/accesscontrol.v1beta1.AccessControlService/AssignRoles", runtime.WithHTTPPathPattern("/v1/accesscontrol/roles:assign"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/accesscontrol.v1beta1.AccessControlService/AssignRoles", runtime.WithHTTPPathPattern("/v1/accesscontrol/roles:assign"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -441,19 +377,15 @@ func RegisterAccessControlServiceHandlerServer(ctx context.Context, mux *runtime
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AccessControlService_AssignRoles_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_AccessControlService_SetDefaultRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_AccessControlService_SetDefaultRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/accesscontrol.v1beta1.AccessControlService/SetDefaultRole", runtime.WithHTTPPathPattern("/v1/accesscontrol/roles:setDefault"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/accesscontrol.v1beta1.AccessControlService/SetDefaultRole", runtime.WithHTTPPathPattern("/v1/accesscontrol/roles:setDefault"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -465,7 +397,6 @@ func RegisterAccessControlServiceHandlerServer(ctx context.Context, mux *runtime
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AccessControlService_SetDefaultRole_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@@ -493,7 +424,6 @@ func RegisterAccessControlServiceHandlerFromEndpoint(ctx context.Context, mux *r
}
}()
}()
-
return RegisterAccessControlServiceHandler(ctx, mux, conn)
}
@@ -509,13 +439,11 @@ func RegisterAccessControlServiceHandler(ctx context.Context, mux *runtime.Serve
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "AccessControlServiceClient" to call the correct interceptors. This client ignores the HTTP middlewares.
func RegisterAccessControlServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client AccessControlServiceClient) error {
- mux.Handle("POST", pattern_AccessControlService_CreateRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_AccessControlService_CreateRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/accesscontrol.v1beta1.AccessControlService/CreateRole", runtime.WithHTTPPathPattern("/v1/accesscontrol/roles"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/accesscontrol.v1beta1.AccessControlService/CreateRole", runtime.WithHTTPPathPattern("/v1/accesscontrol/roles"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -526,17 +454,13 @@ func RegisterAccessControlServiceHandlerClient(ctx context.Context, mux *runtime
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AccessControlService_CreateRole_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("PUT", pattern_AccessControlService_UpdateRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPut, pattern_AccessControlService_UpdateRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/accesscontrol.v1beta1.AccessControlService/UpdateRole", runtime.WithHTTPPathPattern("/v1/accesscontrol/roles/{role_id}"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/accesscontrol.v1beta1.AccessControlService/UpdateRole", runtime.WithHTTPPathPattern("/v1/accesscontrol/roles/{role_id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -547,17 +471,13 @@ func RegisterAccessControlServiceHandlerClient(ctx context.Context, mux *runtime
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AccessControlService_UpdateRole_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("DELETE", pattern_AccessControlService_DeleteRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodDelete, pattern_AccessControlService_DeleteRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/accesscontrol.v1beta1.AccessControlService/DeleteRole", runtime.WithHTTPPathPattern("/v1/accesscontrol/roles/{role_id}"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/accesscontrol.v1beta1.AccessControlService/DeleteRole", runtime.WithHTTPPathPattern("/v1/accesscontrol/roles/{role_id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -568,17 +488,13 @@ func RegisterAccessControlServiceHandlerClient(ctx context.Context, mux *runtime
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AccessControlService_DeleteRole_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_AccessControlService_GetRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_AccessControlService_GetRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/accesscontrol.v1beta1.AccessControlService/GetRole", runtime.WithHTTPPathPattern("/v1/accesscontrol/roles/{role_id}"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/accesscontrol.v1beta1.AccessControlService/GetRole", runtime.WithHTTPPathPattern("/v1/accesscontrol/roles/{role_id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -589,17 +505,13 @@ func RegisterAccessControlServiceHandlerClient(ctx context.Context, mux *runtime
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AccessControlService_GetRole_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_AccessControlService_ListRoles_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_AccessControlService_ListRoles_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/accesscontrol.v1beta1.AccessControlService/ListRoles", runtime.WithHTTPPathPattern("/v1/accesscontrol/roles"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/accesscontrol.v1beta1.AccessControlService/ListRoles", runtime.WithHTTPPathPattern("/v1/accesscontrol/roles"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -610,17 +522,13 @@ func RegisterAccessControlServiceHandlerClient(ctx context.Context, mux *runtime
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AccessControlService_ListRoles_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_AccessControlService_AssignRoles_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_AccessControlService_AssignRoles_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/accesscontrol.v1beta1.AccessControlService/AssignRoles", runtime.WithHTTPPathPattern("/v1/accesscontrol/roles:assign"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/accesscontrol.v1beta1.AccessControlService/AssignRoles", runtime.WithHTTPPathPattern("/v1/accesscontrol/roles:assign"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -631,17 +539,13 @@ func RegisterAccessControlServiceHandlerClient(ctx context.Context, mux *runtime
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AccessControlService_AssignRoles_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_AccessControlService_SetDefaultRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_AccessControlService_SetDefaultRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/accesscontrol.v1beta1.AccessControlService/SetDefaultRole", runtime.WithHTTPPathPattern("/v1/accesscontrol/roles:setDefault"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/accesscontrol.v1beta1.AccessControlService/SetDefaultRole", runtime.WithHTTPPathPattern("/v1/accesscontrol/roles:setDefault"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -652,41 +556,27 @@ func RegisterAccessControlServiceHandlerClient(ctx context.Context, mux *runtime
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AccessControlService_SetDefaultRole_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
return nil
}
var (
- pattern_AccessControlService_CreateRole_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "accesscontrol", "roles"}, ""))
-
- pattern_AccessControlService_UpdateRole_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "accesscontrol", "roles", "role_id"}, ""))
-
- pattern_AccessControlService_DeleteRole_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "accesscontrol", "roles", "role_id"}, ""))
-
- pattern_AccessControlService_GetRole_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "accesscontrol", "roles", "role_id"}, ""))
-
- pattern_AccessControlService_ListRoles_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "accesscontrol", "roles"}, ""))
-
- pattern_AccessControlService_AssignRoles_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "accesscontrol", "roles"}, "assign"))
-
+ pattern_AccessControlService_CreateRole_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "accesscontrol", "roles"}, ""))
+ pattern_AccessControlService_UpdateRole_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "accesscontrol", "roles", "role_id"}, ""))
+ pattern_AccessControlService_DeleteRole_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "accesscontrol", "roles", "role_id"}, ""))
+ pattern_AccessControlService_GetRole_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "accesscontrol", "roles", "role_id"}, ""))
+ pattern_AccessControlService_ListRoles_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "accesscontrol", "roles"}, ""))
+ pattern_AccessControlService_AssignRoles_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "accesscontrol", "roles"}, "assign"))
pattern_AccessControlService_SetDefaultRole_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "accesscontrol", "roles"}, "setDefault"))
)
var (
- forward_AccessControlService_CreateRole_0 = runtime.ForwardResponseMessage
-
- forward_AccessControlService_UpdateRole_0 = runtime.ForwardResponseMessage
-
- forward_AccessControlService_DeleteRole_0 = runtime.ForwardResponseMessage
-
- forward_AccessControlService_GetRole_0 = runtime.ForwardResponseMessage
-
- forward_AccessControlService_ListRoles_0 = runtime.ForwardResponseMessage
-
- forward_AccessControlService_AssignRoles_0 = runtime.ForwardResponseMessage
-
+ forward_AccessControlService_CreateRole_0 = runtime.ForwardResponseMessage
+ forward_AccessControlService_UpdateRole_0 = runtime.ForwardResponseMessage
+ forward_AccessControlService_DeleteRole_0 = runtime.ForwardResponseMessage
+ forward_AccessControlService_GetRole_0 = runtime.ForwardResponseMessage
+ forward_AccessControlService_ListRoles_0 = runtime.ForwardResponseMessage
+ forward_AccessControlService_AssignRoles_0 = runtime.ForwardResponseMessage
forward_AccessControlService_SetDefaultRole_0 = runtime.ForwardResponseMessage
)
diff --git a/api/accesscontrol/v1beta1/accesscontrol.pb.validate.go b/api/accesscontrol/v1beta1/accesscontrol.pb.validate.go
index 583b94b1da..a055badfff 100644
--- a/api/accesscontrol/v1beta1/accesscontrol.pb.validate.go
+++ b/api/accesscontrol/v1beta1/accesscontrol.pb.validate.go
@@ -86,7 +86,7 @@ type CreateRoleRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m CreateRoleRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -190,7 +190,7 @@ type CreateRoleResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m CreateRoleResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -324,7 +324,7 @@ type UpdateRoleRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m UpdateRoleRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -426,7 +426,7 @@ type UpdateRoleResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m UpdateRoleResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -541,7 +541,7 @@ type DeleteRoleRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m DeleteRoleRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -643,7 +643,7 @@ type DeleteRoleResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m DeleteRoleResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -756,7 +756,7 @@ type GetRoleRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetRoleRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -864,7 +864,7 @@ type GetRoleResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetRoleResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -975,7 +975,7 @@ type SetDefaultRoleRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m SetDefaultRoleRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1077,7 +1077,7 @@ type SetDefaultRoleResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m SetDefaultRoleResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1190,7 +1190,7 @@ type AssignRolesRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AssignRolesRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1292,7 +1292,7 @@ type AssignRolesResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AssignRolesResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1394,7 +1394,7 @@ type ListRolesRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListRolesRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1528,7 +1528,7 @@ type ListRolesResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListRolesResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1638,7 +1638,7 @@ type ListRolesResponse_RoleDataMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListRolesResponse_RoleDataMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/api/actions/v1/actions.pb.go b/api/actions/v1/actions.pb.go
index 74abcba4fb..1b88f29167 100644
--- a/api/actions/v1/actions.pb.go
+++ b/api/actions/v1/actions.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: actions/v1/actions.proto
@@ -9,6 +9,7 @@ package actionsv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
_ "github.com/envoyproxy/protoc-gen-validate/validate"
_ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options"
@@ -102,12 +103,11 @@ func (ActionType) EnumDescriptor() ([]byte, []int) {
}
type GetActionRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique Action ID.
- ActionId string `protobuf:"bytes,1,opt,name=action_id,json=actionId,proto3" json:"action_id,omitempty"`
+ ActionId string `protobuf:"bytes,1,opt,name=action_id,json=actionId,proto3" json:"action_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GetActionRequest) Reset() {
@@ -148,10 +148,7 @@ func (x *GetActionRequest) GetActionId() string {
}
type GetActionResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique Action ID.
ActionId string `protobuf:"bytes,1,opt,name=action_id,json=actionId,proto3" json:"action_id,omitempty"`
// pmm-agent ID where this Action is running / was run.
@@ -161,7 +158,9 @@ type GetActionResponse struct {
// True if Action is finished.
Done bool `protobuf:"varint,4,opt,name=done,proto3" json:"done,omitempty"`
// Error message if Action failed.
- Error string `protobuf:"bytes,5,opt,name=error,proto3" json:"error,omitempty"`
+ Error string `protobuf:"bytes,5,opt,name=error,proto3" json:"error,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GetActionResponse) Reset() {
@@ -230,10 +229,7 @@ func (x *GetActionResponse) GetError() string {
}
type StartMySQLExplainActionParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// pmm-agent ID where to run this Action.
PmmAgentId string `protobuf:"bytes,1,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
// Service ID for this Action. Required.
@@ -243,7 +239,9 @@ type StartMySQLExplainActionParams struct {
// Array of placeholder values
Placeholders []string `protobuf:"bytes,5,rep,name=placeholders,proto3" json:"placeholders,omitempty"`
// Database name. Required if it can't be deduced from the query ID.
- Database string `protobuf:"bytes,6,opt,name=database,proto3" json:"database,omitempty"`
+ Database string `protobuf:"bytes,6,opt,name=database,proto3" json:"database,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartMySQLExplainActionParams) Reset() {
@@ -312,14 +310,13 @@ func (x *StartMySQLExplainActionParams) GetDatabase() string {
}
type StartMySQLExplainActionResult struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique Action ID.
ActionId string `protobuf:"bytes,1,opt,name=action_id,json=actionId,proto3" json:"action_id,omitempty"`
// pmm-agent ID where to this Action was started.
- PmmAgentId string `protobuf:"bytes,2,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
+ PmmAgentId string `protobuf:"bytes,2,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartMySQLExplainActionResult) Reset() {
@@ -367,10 +364,7 @@ func (x *StartMySQLExplainActionResult) GetPmmAgentId() string {
}
type StartMySQLExplainJSONActionParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// pmm-agent ID where to run this Action.
PmmAgentId string `protobuf:"bytes,1,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
// Service ID for this Action. Required.
@@ -380,7 +374,9 @@ type StartMySQLExplainJSONActionParams struct {
// Array of placeholder values
Placeholders []string `protobuf:"bytes,5,rep,name=placeholders,proto3" json:"placeholders,omitempty"`
// Database name. Required if it can't be deduced from the query ID.
- Database string `protobuf:"bytes,6,opt,name=database,proto3" json:"database,omitempty"`
+ Database string `protobuf:"bytes,6,opt,name=database,proto3" json:"database,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartMySQLExplainJSONActionParams) Reset() {
@@ -449,14 +445,13 @@ func (x *StartMySQLExplainJSONActionParams) GetDatabase() string {
}
type StartMySQLExplainJSONActionResult struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique Action ID.
ActionId string `protobuf:"bytes,1,opt,name=action_id,json=actionId,proto3" json:"action_id,omitempty"`
// pmm-agent ID where to this Action was started.
- PmmAgentId string `protobuf:"bytes,2,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
+ PmmAgentId string `protobuf:"bytes,2,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartMySQLExplainJSONActionResult) Reset() {
@@ -504,10 +499,7 @@ func (x *StartMySQLExplainJSONActionResult) GetPmmAgentId() string {
}
type StartMySQLExplainTraditionalJSONActionParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// pmm-agent ID where to run this Action.
PmmAgentId string `protobuf:"bytes,1,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
// Service ID for this Action. Required.
@@ -517,7 +509,9 @@ type StartMySQLExplainTraditionalJSONActionParams struct {
// Array of placeholder values
Placeholders []string `protobuf:"bytes,5,rep,name=placeholders,proto3" json:"placeholders,omitempty"`
// Database name. Required if it can't be deduced from the query ID.
- Database string `protobuf:"bytes,6,opt,name=database,proto3" json:"database,omitempty"`
+ Database string `protobuf:"bytes,6,opt,name=database,proto3" json:"database,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartMySQLExplainTraditionalJSONActionParams) Reset() {
@@ -586,14 +580,13 @@ func (x *StartMySQLExplainTraditionalJSONActionParams) GetDatabase() string {
}
type StartMySQLExplainTraditionalJSONActionResult struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique Action ID.
ActionId string `protobuf:"bytes,1,opt,name=action_id,json=actionId,proto3" json:"action_id,omitempty"`
// pmm-agent ID where to this Action was started.
- PmmAgentId string `protobuf:"bytes,2,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
+ PmmAgentId string `protobuf:"bytes,2,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartMySQLExplainTraditionalJSONActionResult) Reset() {
@@ -641,10 +634,7 @@ func (x *StartMySQLExplainTraditionalJSONActionResult) GetPmmAgentId() string {
}
type StartMySQLShowCreateTableActionParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// pmm-agent ID where to run this Action.
PmmAgentId string `protobuf:"bytes,1,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
// Service ID for this Action. Required.
@@ -652,7 +642,9 @@ type StartMySQLShowCreateTableActionParams struct {
// Table name. Required. May additionally contain a database name.
TableName string `protobuf:"bytes,3,opt,name=table_name,json=tableName,proto3" json:"table_name,omitempty"`
// Database name. Required if not given in the table_name field.
- Database string `protobuf:"bytes,4,opt,name=database,proto3" json:"database,omitempty"`
+ Database string `protobuf:"bytes,4,opt,name=database,proto3" json:"database,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartMySQLShowCreateTableActionParams) Reset() {
@@ -714,14 +706,13 @@ func (x *StartMySQLShowCreateTableActionParams) GetDatabase() string {
}
type StartMySQLShowCreateTableActionResult struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique Action ID.
ActionId string `protobuf:"bytes,1,opt,name=action_id,json=actionId,proto3" json:"action_id,omitempty"`
// pmm-agent ID where to this Action was started.
- PmmAgentId string `protobuf:"bytes,2,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
+ PmmAgentId string `protobuf:"bytes,2,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartMySQLShowCreateTableActionResult) Reset() {
@@ -769,10 +760,7 @@ func (x *StartMySQLShowCreateTableActionResult) GetPmmAgentId() string {
}
type StartMySQLShowTableStatusActionParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// pmm-agent ID where to run this Action.
PmmAgentId string `protobuf:"bytes,1,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
// Service ID for this Action. Required.
@@ -780,7 +768,9 @@ type StartMySQLShowTableStatusActionParams struct {
// Table name. Required. May additionally contain a database name.
TableName string `protobuf:"bytes,3,opt,name=table_name,json=tableName,proto3" json:"table_name,omitempty"`
// Database name. Required if not given in the table_name field.
- Database string `protobuf:"bytes,4,opt,name=database,proto3" json:"database,omitempty"`
+ Database string `protobuf:"bytes,4,opt,name=database,proto3" json:"database,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartMySQLShowTableStatusActionParams) Reset() {
@@ -842,14 +832,13 @@ func (x *StartMySQLShowTableStatusActionParams) GetDatabase() string {
}
type StartMySQLShowTableStatusActionResult struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique Action ID.
ActionId string `protobuf:"bytes,1,opt,name=action_id,json=actionId,proto3" json:"action_id,omitempty"`
// pmm-agent ID where to this Action was started.
- PmmAgentId string `protobuf:"bytes,2,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
+ PmmAgentId string `protobuf:"bytes,2,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartMySQLShowTableStatusActionResult) Reset() {
@@ -897,10 +886,7 @@ func (x *StartMySQLShowTableStatusActionResult) GetPmmAgentId() string {
}
type StartMySQLShowIndexActionParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// pmm-agent ID where to run this Action.
PmmAgentId string `protobuf:"bytes,1,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
// Service ID for this Action. Required.
@@ -908,7 +894,9 @@ type StartMySQLShowIndexActionParams struct {
// Table name. Required. May additionally contain a database name.
TableName string `protobuf:"bytes,3,opt,name=table_name,json=tableName,proto3" json:"table_name,omitempty"`
// Database name. Required if not given in the table_name field.
- Database string `protobuf:"bytes,4,opt,name=database,proto3" json:"database,omitempty"`
+ Database string `protobuf:"bytes,4,opt,name=database,proto3" json:"database,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartMySQLShowIndexActionParams) Reset() {
@@ -970,14 +958,13 @@ func (x *StartMySQLShowIndexActionParams) GetDatabase() string {
}
type StartMySQLShowIndexActionResult struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique Action ID.
ActionId string `protobuf:"bytes,1,opt,name=action_id,json=actionId,proto3" json:"action_id,omitempty"`
// pmm-agent ID where to this Action was started.
- PmmAgentId string `protobuf:"bytes,2,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
+ PmmAgentId string `protobuf:"bytes,2,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartMySQLShowIndexActionResult) Reset() {
@@ -1025,10 +1012,7 @@ func (x *StartMySQLShowIndexActionResult) GetPmmAgentId() string {
}
type StartPostgreSQLShowCreateTableActionParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// pmm-agent ID where to run this Action.
PmmAgentId string `protobuf:"bytes,1,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
// Service ID for this Action. Required.
@@ -1036,7 +1020,9 @@ type StartPostgreSQLShowCreateTableActionParams struct {
// Table name. Required. May additionally contain a database name.
TableName string `protobuf:"bytes,3,opt,name=table_name,json=tableName,proto3" json:"table_name,omitempty"`
// Database name. Required if not given in the table_name field.
- Database string `protobuf:"bytes,4,opt,name=database,proto3" json:"database,omitempty"`
+ Database string `protobuf:"bytes,4,opt,name=database,proto3" json:"database,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartPostgreSQLShowCreateTableActionParams) Reset() {
@@ -1098,14 +1084,13 @@ func (x *StartPostgreSQLShowCreateTableActionParams) GetDatabase() string {
}
type StartPostgreSQLShowCreateTableActionResult struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique Action ID.
ActionId string `protobuf:"bytes,1,opt,name=action_id,json=actionId,proto3" json:"action_id,omitempty"`
// pmm-agent ID where to this Action was started.
- PmmAgentId string `protobuf:"bytes,2,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
+ PmmAgentId string `protobuf:"bytes,2,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartPostgreSQLShowCreateTableActionResult) Reset() {
@@ -1153,10 +1138,7 @@ func (x *StartPostgreSQLShowCreateTableActionResult) GetPmmAgentId() string {
}
type StartPostgreSQLShowIndexActionParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// pmm-agent ID where to run this Action.
PmmAgentId string `protobuf:"bytes,1,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
// Service ID for this Action. Required.
@@ -1164,7 +1146,9 @@ type StartPostgreSQLShowIndexActionParams struct {
// Table name. Required. May additionally contain a database name.
TableName string `protobuf:"bytes,3,opt,name=table_name,json=tableName,proto3" json:"table_name,omitempty"`
// Database name. Required if not given in the table_name field.
- Database string `protobuf:"bytes,4,opt,name=database,proto3" json:"database,omitempty"`
+ Database string `protobuf:"bytes,4,opt,name=database,proto3" json:"database,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartPostgreSQLShowIndexActionParams) Reset() {
@@ -1226,14 +1210,13 @@ func (x *StartPostgreSQLShowIndexActionParams) GetDatabase() string {
}
type StartPostgreSQLShowIndexActionResult struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique Action ID.
ActionId string `protobuf:"bytes,1,opt,name=action_id,json=actionId,proto3" json:"action_id,omitempty"`
// pmm-agent ID where to this Action was started.
- PmmAgentId string `protobuf:"bytes,2,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
+ PmmAgentId string `protobuf:"bytes,2,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartPostgreSQLShowIndexActionResult) Reset() {
@@ -1281,16 +1264,15 @@ func (x *StartPostgreSQLShowIndexActionResult) GetPmmAgentId() string {
}
type StartMongoDBExplainActionParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// pmm-agent ID where to run this Action.
PmmAgentId string `protobuf:"bytes,1,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
// Service ID for this Action. Required.
ServiceId string `protobuf:"bytes,2,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
// Query. Required.
- Query string `protobuf:"bytes,3,opt,name=query,proto3" json:"query,omitempty"`
+ Query string `protobuf:"bytes,3,opt,name=query,proto3" json:"query,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartMongoDBExplainActionParams) Reset() {
@@ -1345,14 +1327,13 @@ func (x *StartMongoDBExplainActionParams) GetQuery() string {
}
type StartMongoDBExplainActionResult struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique Action ID.
ActionId string `protobuf:"bytes,1,opt,name=action_id,json=actionId,proto3" json:"action_id,omitempty"`
// pmm-agent ID where to this Action was started.
- PmmAgentId string `protobuf:"bytes,2,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
+ PmmAgentId string `protobuf:"bytes,2,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartMongoDBExplainActionResult) Reset() {
@@ -1401,14 +1382,13 @@ func (x *StartMongoDBExplainActionResult) GetPmmAgentId() string {
// Message to prepare pt-pg-summary data
type StartPTPgSummaryActionParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// pmm-agent ID where to run this Action.
PmmAgentId string `protobuf:"bytes,1,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
// Service ID for this Action.
- ServiceId string `protobuf:"bytes,2,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
+ ServiceId string `protobuf:"bytes,2,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartPTPgSummaryActionParams) Reset() {
@@ -1457,14 +1437,13 @@ func (x *StartPTPgSummaryActionParams) GetServiceId() string {
// Message to retrieve the prepared pt-pg-summary data
type StartPTPgSummaryActionResult struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique Action ID.
ActionId string `protobuf:"bytes,1,opt,name=action_id,json=actionId,proto3" json:"action_id,omitempty"`
// pmm-agent ID where to this Action was started.
- PmmAgentId string `protobuf:"bytes,2,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
+ PmmAgentId string `protobuf:"bytes,2,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartPTPgSummaryActionResult) Reset() {
@@ -1513,14 +1492,13 @@ func (x *StartPTPgSummaryActionResult) GetPmmAgentId() string {
// Message to prepare pt-mongodb-summary data
type StartPTMongoDBSummaryActionParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// pmm-agent ID where to run this Action.
PmmAgentId string `protobuf:"bytes,1,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
// Service ID for this Action.
- ServiceId string `protobuf:"bytes,2,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
+ ServiceId string `protobuf:"bytes,2,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartPTMongoDBSummaryActionParams) Reset() {
@@ -1569,14 +1547,13 @@ func (x *StartPTMongoDBSummaryActionParams) GetServiceId() string {
// Message to retrieve the prepared pt-mongodb-summary data
type StartPTMongoDBSummaryActionResult struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique Action ID.
ActionId string `protobuf:"bytes,1,opt,name=action_id,json=actionId,proto3" json:"action_id,omitempty"`
// pmm-agent ID where to this Action was started.
- PmmAgentId string `protobuf:"bytes,2,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
+ PmmAgentId string `protobuf:"bytes,2,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartPTMongoDBSummaryActionResult) Reset() {
@@ -1625,14 +1602,13 @@ func (x *StartPTMongoDBSummaryActionResult) GetPmmAgentId() string {
// Message to prepare pt-mysql-summary data
type StartPTMySQLSummaryActionParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// pmm-agent ID where to run this Action.
PmmAgentId string `protobuf:"bytes,1,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
// Service ID for this Action.
- ServiceId string `protobuf:"bytes,2,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
+ ServiceId string `protobuf:"bytes,2,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartPTMySQLSummaryActionParams) Reset() {
@@ -1681,14 +1657,13 @@ func (x *StartPTMySQLSummaryActionParams) GetServiceId() string {
// Message to retrieve the prepared pt-mysql-summary data
type StartPTMySQLSummaryActionResult struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique Action ID.
ActionId string `protobuf:"bytes,1,opt,name=action_id,json=actionId,proto3" json:"action_id,omitempty"`
// pmm-agent ID where to this Action was started.
- PmmAgentId string `protobuf:"bytes,2,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
+ PmmAgentId string `protobuf:"bytes,2,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartPTMySQLSummaryActionResult) Reset() {
@@ -1736,14 +1711,13 @@ func (x *StartPTMySQLSummaryActionResult) GetPmmAgentId() string {
}
type StartPTSummaryActionRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// pmm-agent ID where to run this Action.
PmmAgentId string `protobuf:"bytes,1,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
// Node ID for this Action.
- NodeId string `protobuf:"bytes,2,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
+ NodeId string `protobuf:"bytes,2,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartPTSummaryActionRequest) Reset() {
@@ -1791,14 +1765,13 @@ func (x *StartPTSummaryActionRequest) GetNodeId() string {
}
type StartPTSummaryActionResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique Action ID.
ActionId string `protobuf:"bytes,1,opt,name=action_id,json=actionId,proto3" json:"action_id,omitempty"`
// pmm-agent ID where to this Action was started.
- PmmAgentId string `protobuf:"bytes,2,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
+ PmmAgentId string `protobuf:"bytes,2,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartPTSummaryActionResponse) Reset() {
@@ -1846,12 +1819,11 @@ func (x *StartPTSummaryActionResponse) GetPmmAgentId() string {
}
type CancelActionRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique Action ID. Required.
- ActionId string `protobuf:"bytes,1,opt,name=action_id,json=actionId,proto3" json:"action_id,omitempty"`
+ ActionId string `protobuf:"bytes,1,opt,name=action_id,json=actionId,proto3" json:"action_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *CancelActionRequest) Reset() {
@@ -1892,9 +1864,9 @@ func (x *CancelActionRequest) GetActionId() string {
}
type CancelActionResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *CancelActionResponse) Reset() {
@@ -1928,11 +1900,8 @@ func (*CancelActionResponse) Descriptor() ([]byte, []int) {
}
type StartServiceActionRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Types that are assignable to Action:
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Types that are valid to be assigned to Action:
//
// *StartServiceActionRequest_MysqlExplain
// *StartServiceActionRequest_MysqlExplainJson
@@ -1946,7 +1915,9 @@ type StartServiceActionRequest struct {
// *StartServiceActionRequest_PtMongodbSummary
// *StartServiceActionRequest_PtMysqlSummary
// *StartServiceActionRequest_PtPostgresSummary
- Action isStartServiceActionRequest_Action `protobuf_oneof:"action"`
+ Action isStartServiceActionRequest_Action `protobuf_oneof:"action"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartServiceActionRequest) Reset() {
@@ -1979,93 +1950,117 @@ func (*StartServiceActionRequest) Descriptor() ([]byte, []int) {
return file_actions_v1_actions_proto_rawDescGZIP(), []int{30}
}
-func (m *StartServiceActionRequest) GetAction() isStartServiceActionRequest_Action {
- if m != nil {
- return m.Action
+func (x *StartServiceActionRequest) GetAction() isStartServiceActionRequest_Action {
+ if x != nil {
+ return x.Action
}
return nil
}
func (x *StartServiceActionRequest) GetMysqlExplain() *StartMySQLExplainActionParams {
- if x, ok := x.GetAction().(*StartServiceActionRequest_MysqlExplain); ok {
- return x.MysqlExplain
+ if x != nil {
+ if x, ok := x.Action.(*StartServiceActionRequest_MysqlExplain); ok {
+ return x.MysqlExplain
+ }
}
return nil
}
func (x *StartServiceActionRequest) GetMysqlExplainJson() *StartMySQLExplainJSONActionParams {
- if x, ok := x.GetAction().(*StartServiceActionRequest_MysqlExplainJson); ok {
- return x.MysqlExplainJson
+ if x != nil {
+ if x, ok := x.Action.(*StartServiceActionRequest_MysqlExplainJson); ok {
+ return x.MysqlExplainJson
+ }
}
return nil
}
func (x *StartServiceActionRequest) GetMysqlExplainTraditionalJson() *StartMySQLExplainTraditionalJSONActionParams {
- if x, ok := x.GetAction().(*StartServiceActionRequest_MysqlExplainTraditionalJson); ok {
- return x.MysqlExplainTraditionalJson
+ if x != nil {
+ if x, ok := x.Action.(*StartServiceActionRequest_MysqlExplainTraditionalJson); ok {
+ return x.MysqlExplainTraditionalJson
+ }
}
return nil
}
func (x *StartServiceActionRequest) GetMysqlShowIndex() *StartMySQLShowIndexActionParams {
- if x, ok := x.GetAction().(*StartServiceActionRequest_MysqlShowIndex); ok {
- return x.MysqlShowIndex
+ if x != nil {
+ if x, ok := x.Action.(*StartServiceActionRequest_MysqlShowIndex); ok {
+ return x.MysqlShowIndex
+ }
}
return nil
}
func (x *StartServiceActionRequest) GetMysqlShowCreateTable() *StartMySQLShowCreateTableActionParams {
- if x, ok := x.GetAction().(*StartServiceActionRequest_MysqlShowCreateTable); ok {
- return x.MysqlShowCreateTable
+ if x != nil {
+ if x, ok := x.Action.(*StartServiceActionRequest_MysqlShowCreateTable); ok {
+ return x.MysqlShowCreateTable
+ }
}
return nil
}
func (x *StartServiceActionRequest) GetMysqlShowTableStatus() *StartMySQLShowTableStatusActionParams {
- if x, ok := x.GetAction().(*StartServiceActionRequest_MysqlShowTableStatus); ok {
- return x.MysqlShowTableStatus
+ if x != nil {
+ if x, ok := x.Action.(*StartServiceActionRequest_MysqlShowTableStatus); ok {
+ return x.MysqlShowTableStatus
+ }
}
return nil
}
func (x *StartServiceActionRequest) GetPostgresShowCreateTable() *StartPostgreSQLShowCreateTableActionParams {
- if x, ok := x.GetAction().(*StartServiceActionRequest_PostgresShowCreateTable); ok {
- return x.PostgresShowCreateTable
+ if x != nil {
+ if x, ok := x.Action.(*StartServiceActionRequest_PostgresShowCreateTable); ok {
+ return x.PostgresShowCreateTable
+ }
}
return nil
}
func (x *StartServiceActionRequest) GetPostgresShowIndex() *StartPostgreSQLShowIndexActionParams {
- if x, ok := x.GetAction().(*StartServiceActionRequest_PostgresShowIndex); ok {
- return x.PostgresShowIndex
+ if x != nil {
+ if x, ok := x.Action.(*StartServiceActionRequest_PostgresShowIndex); ok {
+ return x.PostgresShowIndex
+ }
}
return nil
}
func (x *StartServiceActionRequest) GetMongodbExplain() *StartMongoDBExplainActionParams {
- if x, ok := x.GetAction().(*StartServiceActionRequest_MongodbExplain); ok {
- return x.MongodbExplain
+ if x != nil {
+ if x, ok := x.Action.(*StartServiceActionRequest_MongodbExplain); ok {
+ return x.MongodbExplain
+ }
}
return nil
}
func (x *StartServiceActionRequest) GetPtMongodbSummary() *StartPTMongoDBSummaryActionParams {
- if x, ok := x.GetAction().(*StartServiceActionRequest_PtMongodbSummary); ok {
- return x.PtMongodbSummary
+ if x != nil {
+ if x, ok := x.Action.(*StartServiceActionRequest_PtMongodbSummary); ok {
+ return x.PtMongodbSummary
+ }
}
return nil
}
func (x *StartServiceActionRequest) GetPtMysqlSummary() *StartPTMySQLSummaryActionParams {
- if x, ok := x.GetAction().(*StartServiceActionRequest_PtMysqlSummary); ok {
- return x.PtMysqlSummary
+ if x != nil {
+ if x, ok := x.Action.(*StartServiceActionRequest_PtMysqlSummary); ok {
+ return x.PtMysqlSummary
+ }
}
return nil
}
func (x *StartServiceActionRequest) GetPtPostgresSummary() *StartPTPgSummaryActionParams {
- if x, ok := x.GetAction().(*StartServiceActionRequest_PtPostgresSummary); ok {
- return x.PtPostgresSummary
+ if x != nil {
+ if x, ok := x.Action.(*StartServiceActionRequest_PtPostgresSummary); ok {
+ return x.PtPostgresSummary
+ }
}
return nil
}
@@ -2147,11 +2142,8 @@ func (*StartServiceActionRequest_PtMysqlSummary) isStartServiceActionRequest_Act
func (*StartServiceActionRequest_PtPostgresSummary) isStartServiceActionRequest_Action() {}
type StartServiceActionResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Types that are assignable to Action:
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Types that are valid to be assigned to Action:
//
// *StartServiceActionResponse_MysqlExplain
// *StartServiceActionResponse_MysqlExplainJson
@@ -2165,7 +2157,9 @@ type StartServiceActionResponse struct {
// *StartServiceActionResponse_PtMongodbSummary
// *StartServiceActionResponse_PtMysqlSummary
// *StartServiceActionResponse_PtPostgresSummary
- Action isStartServiceActionResponse_Action `protobuf_oneof:"action"`
+ Action isStartServiceActionResponse_Action `protobuf_oneof:"action"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartServiceActionResponse) Reset() {
@@ -2198,93 +2192,117 @@ func (*StartServiceActionResponse) Descriptor() ([]byte, []int) {
return file_actions_v1_actions_proto_rawDescGZIP(), []int{31}
}
-func (m *StartServiceActionResponse) GetAction() isStartServiceActionResponse_Action {
- if m != nil {
- return m.Action
+func (x *StartServiceActionResponse) GetAction() isStartServiceActionResponse_Action {
+ if x != nil {
+ return x.Action
}
return nil
}
func (x *StartServiceActionResponse) GetMysqlExplain() *StartMySQLExplainActionResult {
- if x, ok := x.GetAction().(*StartServiceActionResponse_MysqlExplain); ok {
- return x.MysqlExplain
+ if x != nil {
+ if x, ok := x.Action.(*StartServiceActionResponse_MysqlExplain); ok {
+ return x.MysqlExplain
+ }
}
return nil
}
func (x *StartServiceActionResponse) GetMysqlExplainJson() *StartMySQLExplainJSONActionResult {
- if x, ok := x.GetAction().(*StartServiceActionResponse_MysqlExplainJson); ok {
- return x.MysqlExplainJson
+ if x != nil {
+ if x, ok := x.Action.(*StartServiceActionResponse_MysqlExplainJson); ok {
+ return x.MysqlExplainJson
+ }
}
return nil
}
func (x *StartServiceActionResponse) GetMysqlExplainTraditionalJson() *StartMySQLExplainTraditionalJSONActionResult {
- if x, ok := x.GetAction().(*StartServiceActionResponse_MysqlExplainTraditionalJson); ok {
- return x.MysqlExplainTraditionalJson
+ if x != nil {
+ if x, ok := x.Action.(*StartServiceActionResponse_MysqlExplainTraditionalJson); ok {
+ return x.MysqlExplainTraditionalJson
+ }
}
return nil
}
func (x *StartServiceActionResponse) GetMysqlShowIndex() *StartMySQLShowIndexActionResult {
- if x, ok := x.GetAction().(*StartServiceActionResponse_MysqlShowIndex); ok {
- return x.MysqlShowIndex
+ if x != nil {
+ if x, ok := x.Action.(*StartServiceActionResponse_MysqlShowIndex); ok {
+ return x.MysqlShowIndex
+ }
}
return nil
}
func (x *StartServiceActionResponse) GetMysqlShowCreateTable() *StartMySQLShowCreateTableActionResult {
- if x, ok := x.GetAction().(*StartServiceActionResponse_MysqlShowCreateTable); ok {
- return x.MysqlShowCreateTable
+ if x != nil {
+ if x, ok := x.Action.(*StartServiceActionResponse_MysqlShowCreateTable); ok {
+ return x.MysqlShowCreateTable
+ }
}
return nil
}
func (x *StartServiceActionResponse) GetMysqlShowTableStatus() *StartMySQLShowTableStatusActionResult {
- if x, ok := x.GetAction().(*StartServiceActionResponse_MysqlShowTableStatus); ok {
- return x.MysqlShowTableStatus
+ if x != nil {
+ if x, ok := x.Action.(*StartServiceActionResponse_MysqlShowTableStatus); ok {
+ return x.MysqlShowTableStatus
+ }
}
return nil
}
func (x *StartServiceActionResponse) GetPostgresqlShowCreateTable() *StartPostgreSQLShowCreateTableActionResult {
- if x, ok := x.GetAction().(*StartServiceActionResponse_PostgresqlShowCreateTable); ok {
- return x.PostgresqlShowCreateTable
+ if x != nil {
+ if x, ok := x.Action.(*StartServiceActionResponse_PostgresqlShowCreateTable); ok {
+ return x.PostgresqlShowCreateTable
+ }
}
return nil
}
func (x *StartServiceActionResponse) GetPostgresqlShowIndex() *StartPostgreSQLShowIndexActionResult {
- if x, ok := x.GetAction().(*StartServiceActionResponse_PostgresqlShowIndex); ok {
- return x.PostgresqlShowIndex
+ if x != nil {
+ if x, ok := x.Action.(*StartServiceActionResponse_PostgresqlShowIndex); ok {
+ return x.PostgresqlShowIndex
+ }
}
return nil
}
func (x *StartServiceActionResponse) GetMongodbExplain() *StartMongoDBExplainActionResult {
- if x, ok := x.GetAction().(*StartServiceActionResponse_MongodbExplain); ok {
- return x.MongodbExplain
+ if x != nil {
+ if x, ok := x.Action.(*StartServiceActionResponse_MongodbExplain); ok {
+ return x.MongodbExplain
+ }
}
return nil
}
func (x *StartServiceActionResponse) GetPtMongodbSummary() *StartPTMongoDBSummaryActionResult {
- if x, ok := x.GetAction().(*StartServiceActionResponse_PtMongodbSummary); ok {
- return x.PtMongodbSummary
+ if x != nil {
+ if x, ok := x.Action.(*StartServiceActionResponse_PtMongodbSummary); ok {
+ return x.PtMongodbSummary
+ }
}
return nil
}
func (x *StartServiceActionResponse) GetPtMysqlSummary() *StartPTMySQLSummaryActionResult {
- if x, ok := x.GetAction().(*StartServiceActionResponse_PtMysqlSummary); ok {
- return x.PtMysqlSummary
+ if x != nil {
+ if x, ok := x.Action.(*StartServiceActionResponse_PtMysqlSummary); ok {
+ return x.PtMysqlSummary
+ }
}
return nil
}
func (x *StartServiceActionResponse) GetPtPostgresSummary() *StartPTPgSummaryActionResult {
- if x, ok := x.GetAction().(*StartServiceActionResponse_PtPostgresSummary); ok {
- return x.PtPostgresSummary
+ if x != nil {
+ if x, ok := x.Action.(*StartServiceActionResponse_PtPostgresSummary); ok {
+ return x.PtPostgresSummary
+ }
}
return nil
}
@@ -2368,7 +2386,7 @@ func (*StartServiceActionResponse_PtPostgresSummary) isStartServiceActionRespons
var File_actions_v1_actions_proto protoreflect.FileDescriptor
-var file_actions_v1_actions_proto_rawDesc = []byte{
+var file_actions_v1_actions_proto_rawDesc = string([]byte{
0x0a, 0x18, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x63, 0x74,
0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x61, 0x63, 0x74, 0x69,
0x6f, 0x6e, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61,
@@ -2847,16 +2865,16 @@ var file_actions_v1_actions_proto_rawDesc = []byte{
0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65,
0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0b, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73,
0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_actions_v1_actions_proto_rawDescOnce sync.Once
- file_actions_v1_actions_proto_rawDescData = file_actions_v1_actions_proto_rawDesc
+ file_actions_v1_actions_proto_rawDescData []byte
)
func file_actions_v1_actions_proto_rawDescGZIP() []byte {
file_actions_v1_actions_proto_rawDescOnce.Do(func() {
- file_actions_v1_actions_proto_rawDescData = protoimpl.X.CompressGZIP(file_actions_v1_actions_proto_rawDescData)
+ file_actions_v1_actions_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_actions_v1_actions_proto_rawDesc), len(file_actions_v1_actions_proto_rawDesc)))
})
return file_actions_v1_actions_proto_rawDescData
}
@@ -2978,7 +2996,7 @@ func file_actions_v1_actions_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_actions_v1_actions_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_actions_v1_actions_proto_rawDesc), len(file_actions_v1_actions_proto_rawDesc)),
NumEnums: 1,
NumMessages: 32,
NumExtensions: 0,
@@ -2990,7 +3008,6 @@ func file_actions_v1_actions_proto_init() {
MessageInfos: file_actions_v1_actions_proto_msgTypes,
}.Build()
File_actions_v1_actions_proto = out.File
- file_actions_v1_actions_proto_rawDesc = nil
file_actions_v1_actions_proto_goTypes = nil
file_actions_v1_actions_proto_depIdxs = nil
}
diff --git a/api/actions/v1/actions.pb.gw.go b/api/actions/v1/actions.pb.gw.go
index c84fd58bf9..254b6ff65c 100644
--- a/api/actions/v1/actions.pb.gw.go
+++ b/api/actions/v1/actions.pb.gw.go
@@ -10,6 +10,7 @@ package actionsv1
import (
"context"
+ "errors"
"io"
"net/http"
@@ -28,129 +29,116 @@ var (
_ codes.Code
_ io.Reader
_ status.Status
+ _ = errors.New
_ = runtime.String
_ = utilities.NewDoubleArray
_ = metadata.Join
)
func request_ActionsService_GetAction_0(ctx context.Context, marshaler runtime.Marshaler, client ActionsServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetActionRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq GetActionRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["action_id"]
+ val, ok := pathParams["action_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "action_id")
}
-
protoReq.ActionId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "action_id", err)
}
-
msg, err := client.GetAction(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_ActionsService_GetAction_0(ctx context.Context, marshaler runtime.Marshaler, server ActionsServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetActionRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq GetActionRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["action_id"]
+ val, ok := pathParams["action_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "action_id")
}
-
protoReq.ActionId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "action_id", err)
}
-
msg, err := server.GetAction(ctx, &protoReq)
return msg, metadata, err
}
func request_ActionsService_StartServiceAction_0(ctx context.Context, marshaler runtime.Marshaler, client ActionsServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq StartServiceActionRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq StartServiceActionRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.StartServiceAction(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_ActionsService_StartServiceAction_0(ctx context.Context, marshaler runtime.Marshaler, server ActionsServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq StartServiceActionRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq StartServiceActionRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.StartServiceAction(ctx, &protoReq)
return msg, metadata, err
}
func request_ActionsService_StartPTSummaryAction_0(ctx context.Context, marshaler runtime.Marshaler, client ActionsServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq StartPTSummaryActionRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq StartPTSummaryActionRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.StartPTSummaryAction(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_ActionsService_StartPTSummaryAction_0(ctx context.Context, marshaler runtime.Marshaler, server ActionsServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq StartPTSummaryActionRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq StartPTSummaryActionRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.StartPTSummaryAction(ctx, &protoReq)
return msg, metadata, err
}
func request_ActionsService_CancelAction_0(ctx context.Context, marshaler runtime.Marshaler, client ActionsServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq CancelActionRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq CancelActionRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.CancelAction(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_ActionsService_CancelAction_0(ctx context.Context, marshaler runtime.Marshaler, server ActionsServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq CancelActionRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq CancelActionRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.CancelAction(ctx, &protoReq)
return msg, metadata, err
}
@@ -161,15 +149,13 @@ func local_request_ActionsService_CancelAction_0(ctx context.Context, marshaler
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterActionsServiceHandlerFromEndpoint instead.
// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
func RegisterActionsServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server ActionsServiceServer) error {
- mux.Handle("GET", pattern_ActionsService_GetAction_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_ActionsService_GetAction_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/actions.v1.ActionsService/GetAction", runtime.WithHTTPPathPattern("/v1/actions/{action_id}"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/actions.v1.ActionsService/GetAction", runtime.WithHTTPPathPattern("/v1/actions/{action_id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -181,19 +167,15 @@ func RegisterActionsServiceHandlerServer(ctx context.Context, mux *runtime.Serve
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ActionsService_GetAction_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_ActionsService_StartServiceAction_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_ActionsService_StartServiceAction_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/actions.v1.ActionsService/StartServiceAction", runtime.WithHTTPPathPattern("/v1/actions:startServiceAction"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/actions.v1.ActionsService/StartServiceAction", runtime.WithHTTPPathPattern("/v1/actions:startServiceAction"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -205,19 +187,15 @@ func RegisterActionsServiceHandlerServer(ctx context.Context, mux *runtime.Serve
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ActionsService_StartServiceAction_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_ActionsService_StartPTSummaryAction_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_ActionsService_StartPTSummaryAction_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/actions.v1.ActionsService/StartPTSummaryAction", runtime.WithHTTPPathPattern("/v1/actions:startNodeAction"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/actions.v1.ActionsService/StartPTSummaryAction", runtime.WithHTTPPathPattern("/v1/actions:startNodeAction"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -229,19 +207,15 @@ func RegisterActionsServiceHandlerServer(ctx context.Context, mux *runtime.Serve
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ActionsService_StartPTSummaryAction_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_ActionsService_CancelAction_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_ActionsService_CancelAction_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/actions.v1.ActionsService/CancelAction", runtime.WithHTTPPathPattern("/v1/actions:cancelAction"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/actions.v1.ActionsService/CancelAction", runtime.WithHTTPPathPattern("/v1/actions:cancelAction"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -253,7 +227,6 @@ func RegisterActionsServiceHandlerServer(ctx context.Context, mux *runtime.Serve
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ActionsService_CancelAction_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@@ -281,7 +254,6 @@ func RegisterActionsServiceHandlerFromEndpoint(ctx context.Context, mux *runtime
}
}()
}()
-
return RegisterActionsServiceHandler(ctx, mux, conn)
}
@@ -297,13 +269,11 @@ func RegisterActionsServiceHandler(ctx context.Context, mux *runtime.ServeMux, c
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "ActionsServiceClient" to call the correct interceptors. This client ignores the HTTP middlewares.
func RegisterActionsServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client ActionsServiceClient) error {
- mux.Handle("GET", pattern_ActionsService_GetAction_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_ActionsService_GetAction_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/actions.v1.ActionsService/GetAction", runtime.WithHTTPPathPattern("/v1/actions/{action_id}"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/actions.v1.ActionsService/GetAction", runtime.WithHTTPPathPattern("/v1/actions/{action_id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -314,17 +284,13 @@ func RegisterActionsServiceHandlerClient(ctx context.Context, mux *runtime.Serve
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ActionsService_GetAction_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_ActionsService_StartServiceAction_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_ActionsService_StartServiceAction_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/actions.v1.ActionsService/StartServiceAction", runtime.WithHTTPPathPattern("/v1/actions:startServiceAction"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/actions.v1.ActionsService/StartServiceAction", runtime.WithHTTPPathPattern("/v1/actions:startServiceAction"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -335,17 +301,13 @@ func RegisterActionsServiceHandlerClient(ctx context.Context, mux *runtime.Serve
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ActionsService_StartServiceAction_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_ActionsService_StartPTSummaryAction_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_ActionsService_StartPTSummaryAction_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/actions.v1.ActionsService/StartPTSummaryAction", runtime.WithHTTPPathPattern("/v1/actions:startNodeAction"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/actions.v1.ActionsService/StartPTSummaryAction", runtime.WithHTTPPathPattern("/v1/actions:startNodeAction"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -356,17 +318,13 @@ func RegisterActionsServiceHandlerClient(ctx context.Context, mux *runtime.Serve
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ActionsService_StartPTSummaryAction_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_ActionsService_CancelAction_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_ActionsService_CancelAction_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/actions.v1.ActionsService/CancelAction", runtime.WithHTTPPathPattern("/v1/actions:cancelAction"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/actions.v1.ActionsService/CancelAction", runtime.WithHTTPPathPattern("/v1/actions:cancelAction"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -377,29 +335,21 @@ func RegisterActionsServiceHandlerClient(ctx context.Context, mux *runtime.Serve
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ActionsService_CancelAction_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
return nil
}
var (
- pattern_ActionsService_GetAction_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2}, []string{"v1", "actions", "action_id"}, ""))
-
- pattern_ActionsService_StartServiceAction_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "actions"}, "startServiceAction"))
-
+ pattern_ActionsService_GetAction_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2}, []string{"v1", "actions", "action_id"}, ""))
+ pattern_ActionsService_StartServiceAction_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "actions"}, "startServiceAction"))
pattern_ActionsService_StartPTSummaryAction_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "actions"}, "startNodeAction"))
-
- pattern_ActionsService_CancelAction_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "actions"}, "cancelAction"))
+ pattern_ActionsService_CancelAction_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "actions"}, "cancelAction"))
)
var (
- forward_ActionsService_GetAction_0 = runtime.ForwardResponseMessage
-
- forward_ActionsService_StartServiceAction_0 = runtime.ForwardResponseMessage
-
+ forward_ActionsService_GetAction_0 = runtime.ForwardResponseMessage
+ forward_ActionsService_StartServiceAction_0 = runtime.ForwardResponseMessage
forward_ActionsService_StartPTSummaryAction_0 = runtime.ForwardResponseMessage
-
- forward_ActionsService_CancelAction_0 = runtime.ForwardResponseMessage
+ forward_ActionsService_CancelAction_0 = runtime.ForwardResponseMessage
)
diff --git a/api/actions/v1/actions.pb.validate.go b/api/actions/v1/actions.pb.validate.go
index dfd6679b0f..3c8feb7b77 100644
--- a/api/actions/v1/actions.pb.validate.go
+++ b/api/actions/v1/actions.pb.validate.go
@@ -82,7 +82,7 @@ type GetActionRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetActionRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -192,7 +192,7 @@ type GetActionResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetActionResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -311,7 +311,7 @@ type StartMySQLExplainActionParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartMySQLExplainActionParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -418,7 +418,7 @@ type StartMySQLExplainActionResultMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartMySQLExplainActionResultMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -540,7 +540,7 @@ type StartMySQLExplainJSONActionParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartMySQLExplainJSONActionParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -649,7 +649,7 @@ type StartMySQLExplainJSONActionResultMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartMySQLExplainJSONActionResultMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -772,7 +772,7 @@ type StartMySQLExplainTraditionalJSONActionParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartMySQLExplainTraditionalJSONActionParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -883,7 +883,7 @@ type StartMySQLExplainTraditionalJSONActionResultMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartMySQLExplainTraditionalJSONActionResultMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1015,7 +1015,7 @@ type StartMySQLShowCreateTableActionParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartMySQLShowCreateTableActionParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1124,7 +1124,7 @@ type StartMySQLShowCreateTableActionResultMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartMySQLShowCreateTableActionResultMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1255,7 +1255,7 @@ type StartMySQLShowTableStatusActionParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartMySQLShowTableStatusActionParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1364,7 +1364,7 @@ type StartMySQLShowTableStatusActionResultMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartMySQLShowTableStatusActionResultMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1493,7 +1493,7 @@ type StartMySQLShowIndexActionParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartMySQLShowIndexActionParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1600,7 +1600,7 @@ type StartMySQLShowIndexActionResultMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartMySQLShowIndexActionResultMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1732,7 +1732,7 @@ type StartPostgreSQLShowCreateTableActionParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartPostgreSQLShowCreateTableActionParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1842,7 +1842,7 @@ type StartPostgreSQLShowCreateTableActionResultMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartPostgreSQLShowCreateTableActionResultMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1973,7 +1973,7 @@ type StartPostgreSQLShowIndexActionParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartPostgreSQLShowIndexActionParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2082,7 +2082,7 @@ type StartPostgreSQLShowIndexActionResultMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartPostgreSQLShowIndexActionResultMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2209,7 +2209,7 @@ type StartMongoDBExplainActionParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartMongoDBExplainActionParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2316,7 +2316,7 @@ type StartMongoDBExplainActionResultMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartMongoDBExplainActionResultMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2423,7 +2423,7 @@ type StartPTPgSummaryActionParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartPTPgSummaryActionParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2530,7 +2530,7 @@ type StartPTPgSummaryActionResultMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartPTPgSummaryActionResultMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2639,7 +2639,7 @@ type StartPTMongoDBSummaryActionParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartPTMongoDBSummaryActionParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2748,7 +2748,7 @@ type StartPTMongoDBSummaryActionResultMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartPTMongoDBSummaryActionResultMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2855,7 +2855,7 @@ type StartPTMySQLSummaryActionParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartPTMySQLSummaryActionParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2962,7 +2962,7 @@ type StartPTMySQLSummaryActionResultMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartPTMySQLSummaryActionResultMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -3069,7 +3069,7 @@ type StartPTSummaryActionRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartPTSummaryActionRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -3176,7 +3176,7 @@ type StartPTSummaryActionResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartPTSummaryActionResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -3290,7 +3290,7 @@ type CancelActionRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m CancelActionRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -3392,7 +3392,7 @@ type CancelActionResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m CancelActionResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -3991,7 +3991,7 @@ type StartServiceActionRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartServiceActionRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -4590,7 +4590,7 @@ type StartServiceActionResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartServiceActionResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/api/advisors/v1/advisors.pb.go b/api/advisors/v1/advisors.pb.go
index 51703e2cbe..d2c0987bfc 100644
--- a/api/advisors/v1/advisors.pb.go
+++ b/api/advisors/v1/advisors.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: advisors/v1/advisors.proto
@@ -9,6 +9,7 @@ package advisorsv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
_ "github.com/envoyproxy/protoc-gen-validate/validate"
_ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options"
@@ -133,18 +134,17 @@ func (AdvisorCheckFamily) EnumDescriptor() ([]byte, []int) {
// AdvisorCheckResult represents the check result returned from pmm-managed after running the check.
type AdvisorCheckResult struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Summary string `protobuf:"bytes,1,opt,name=summary,proto3" json:"summary,omitempty"`
- Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
- Severity v1.Severity `protobuf:"varint,3,opt,name=severity,proto3,enum=management.v1.Severity" json:"severity,omitempty"`
- Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Summary string `protobuf:"bytes,1,opt,name=summary,proto3" json:"summary,omitempty"`
+ Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+ Severity v1.Severity `protobuf:"varint,3,opt,name=severity,proto3,enum=management.v1.Severity" json:"severity,omitempty"`
+ Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// URL containing information on how to resolve an issue detected by an Advisor check.
ReadMoreUrl string `protobuf:"bytes,5,opt,name=read_more_url,json=readMoreUrl,proto3" json:"read_more_url,omitempty"`
// Name of the monitored service on which the check ran.
- ServiceName string `protobuf:"bytes,6,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"`
+ ServiceName string `protobuf:"bytes,6,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AdvisorCheckResult) Reset() {
@@ -221,12 +221,9 @@ func (x *AdvisorCheckResult) GetServiceName() string {
// CheckResultSummary is a summary of check results.
type CheckResultSummary struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"`
- ServiceId string `protobuf:"bytes,2,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"`
+ ServiceId string `protobuf:"bytes,2,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
// Number of failed checks for this service with severity level "EMERGENCY".
EmergencyCount uint32 `protobuf:"varint,3,opt,name=emergency_count,json=emergencyCount,proto3" json:"emergency_count,omitempty"`
// Number of failed checks for this service with severity level "ALERT".
@@ -242,7 +239,9 @@ type CheckResultSummary struct {
// Number of failed checks for this service with severity level "INFO".
InfoCount uint32 `protobuf:"varint,9,opt,name=info_count,json=infoCount,proto3" json:"info_count,omitempty"`
// Number of failed checks for this service with severity level "DEBUG".
- DebugCount uint32 `protobuf:"varint,10,opt,name=debug_count,json=debugCount,proto3" json:"debug_count,omitempty"`
+ DebugCount uint32 `protobuf:"varint,10,opt,name=debug_count,json=debugCount,proto3" json:"debug_count,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *CheckResultSummary) Reset() {
@@ -347,14 +346,11 @@ func (x *CheckResultSummary) GetDebugCount() uint32 {
// CheckResult represents the check results for a given service.
type CheckResult struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Summary string `protobuf:"bytes,1,opt,name=summary,proto3" json:"summary,omitempty"`
- Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
- Severity v1.Severity `protobuf:"varint,3,opt,name=severity,proto3,enum=management.v1.Severity" json:"severity,omitempty"`
- Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Summary string `protobuf:"bytes,1,opt,name=summary,proto3" json:"summary,omitempty"`
+ Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+ Severity v1.Severity `protobuf:"varint,3,opt,name=severity,proto3,enum=management.v1.Severity" json:"severity,omitempty"`
+ Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// URL containing information on how to resolve an issue detected by an Advisor check.
ReadMoreUrl string `protobuf:"bytes,5,opt,name=read_more_url,json=readMoreUrl,proto3" json:"read_more_url,omitempty"`
// Name of the monitored service on which the check ran.
@@ -364,7 +360,9 @@ type CheckResult struct {
// Name of the check that failed
CheckName string `protobuf:"bytes,8,opt,name=check_name,json=checkName,proto3" json:"check_name,omitempty"`
// Silence status of the check result
- Silenced bool `protobuf:"varint,10,opt,name=silenced,proto3" json:"silenced,omitempty"`
+ Silenced bool `protobuf:"varint,10,opt,name=silenced,proto3" json:"silenced,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *CheckResult) Reset() {
@@ -462,10 +460,7 @@ func (x *CheckResult) GetSilenced() bool {
// AdvisorCheck contains check name and status.
type AdvisorCheck struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Machine-readable name (ID) that is used in expression.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// True if that check is enabled.
@@ -477,7 +472,9 @@ type AdvisorCheck struct {
// Check execution interval.
Interval AdvisorCheckInterval `protobuf:"varint,5,opt,name=interval,proto3,enum=advisors.v1.AdvisorCheckInterval" json:"interval,omitempty"`
// DB family.
- Family AdvisorCheckFamily `protobuf:"varint,6,opt,name=family,proto3,enum=advisors.v1.AdvisorCheckFamily" json:"family,omitempty"`
+ Family AdvisorCheckFamily `protobuf:"varint,6,opt,name=family,proto3,enum=advisors.v1.AdvisorCheckFamily" json:"family,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AdvisorCheck) Reset() {
@@ -553,10 +550,7 @@ func (x *AdvisorCheck) GetFamily() AdvisorCheckFamily {
}
type Advisor struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Machine-readable name (ID) that is used in expression.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Long human-readable description.
@@ -568,7 +562,9 @@ type Advisor struct {
// Category.
Category string `protobuf:"bytes,5,opt,name=category,proto3" json:"category,omitempty"`
// Advisor checks.
- Checks []*AdvisorCheck `protobuf:"bytes,6,rep,name=checks,proto3" json:"checks,omitempty"`
+ Checks []*AdvisorCheck `protobuf:"bytes,6,rep,name=checks,proto3" json:"checks,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *Advisor) Reset() {
@@ -645,15 +641,14 @@ func (x *Advisor) GetChecks() []*AdvisorCheck {
// ChangeAdvisorCheckParams specifies a single check parameters.
type ChangeAdvisorCheckParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The name of the check to change.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Enable *bool `protobuf:"varint,2,opt,name=enable,proto3,oneof" json:"enable,omitempty"`
// check execution interval.
- Interval AdvisorCheckInterval `protobuf:"varint,4,opt,name=interval,proto3,enum=advisors.v1.AdvisorCheckInterval" json:"interval,omitempty"`
+ Interval AdvisorCheckInterval `protobuf:"varint,4,opt,name=interval,proto3,enum=advisors.v1.AdvisorCheckInterval" json:"interval,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ChangeAdvisorCheckParams) Reset() {
@@ -708,12 +703,11 @@ func (x *ChangeAdvisorCheckParams) GetInterval() AdvisorCheckInterval {
}
type StartAdvisorChecksRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Names of the checks that should be started.
- Names []string `protobuf:"bytes,1,rep,name=names,proto3" json:"names,omitempty"`
+ Names []string `protobuf:"bytes,1,rep,name=names,proto3" json:"names,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartAdvisorChecksRequest) Reset() {
@@ -754,9 +748,9 @@ func (x *StartAdvisorChecksRequest) GetNames() []string {
}
type StartAdvisorChecksResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartAdvisorChecksResponse) Reset() {
@@ -790,9 +784,9 @@ func (*StartAdvisorChecksResponse) Descriptor() ([]byte, []int) {
}
type ListAdvisorChecksRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ListAdvisorChecksRequest) Reset() {
@@ -826,11 +820,10 @@ func (*ListAdvisorChecksRequest) Descriptor() ([]byte, []int) {
}
type ListAdvisorChecksResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Checks []*AdvisorCheck `protobuf:"bytes,1,rep,name=checks,proto3" json:"checks,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Checks []*AdvisorCheck `protobuf:"bytes,1,rep,name=checks,proto3" json:"checks,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *ListAdvisorChecksResponse) Reset() {
@@ -871,9 +864,9 @@ func (x *ListAdvisorChecksResponse) GetChecks() []*AdvisorCheck {
}
type ListAdvisorsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ListAdvisorsRequest) Reset() {
@@ -907,11 +900,10 @@ func (*ListAdvisorsRequest) Descriptor() ([]byte, []int) {
}
type ListAdvisorsResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Advisors []*Advisor `protobuf:"bytes,1,rep,name=advisors,proto3" json:"advisors,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Advisors []*Advisor `protobuf:"bytes,1,rep,name=advisors,proto3" json:"advisors,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *ListAdvisorsResponse) Reset() {
@@ -952,11 +944,10 @@ func (x *ListAdvisorsResponse) GetAdvisors() []*Advisor {
}
type ChangeAdvisorChecksRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Params []*ChangeAdvisorCheckParams `protobuf:"bytes,1,rep,name=params,proto3" json:"params,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Params []*ChangeAdvisorCheckParams `protobuf:"bytes,1,rep,name=params,proto3" json:"params,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *ChangeAdvisorChecksRequest) Reset() {
@@ -997,9 +988,9 @@ func (x *ChangeAdvisorChecksRequest) GetParams() []*ChangeAdvisorCheckParams {
}
type ChangeAdvisorChecksResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ChangeAdvisorChecksResponse) Reset() {
@@ -1033,9 +1024,9 @@ func (*ChangeAdvisorChecksResponse) Descriptor() ([]byte, []int) {
}
type ListFailedServicesRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ListFailedServicesRequest) Reset() {
@@ -1069,11 +1060,10 @@ func (*ListFailedServicesRequest) Descriptor() ([]byte, []int) {
}
type ListFailedServicesResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Result []*CheckResultSummary `protobuf:"bytes,1,rep,name=result,proto3" json:"result,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Result []*CheckResultSummary `protobuf:"bytes,1,rep,name=result,proto3" json:"result,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *ListFailedServicesResponse) Reset() {
@@ -1114,16 +1104,15 @@ func (x *ListFailedServicesResponse) GetResult() []*CheckResultSummary {
}
type GetFailedChecksRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Maximum number of results per page.
PageSize *int32 `protobuf:"varint,1,opt,name=page_size,json=pageSize,proto3,oneof" json:"page_size,omitempty"`
// Index of the requested page, starts from 0.
PageIndex *int32 `protobuf:"varint,2,opt,name=page_index,json=pageIndex,proto3,oneof" json:"page_index,omitempty"`
// Service ID.
- ServiceId string `protobuf:"bytes,3,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
+ ServiceId string `protobuf:"bytes,3,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GetFailedChecksRequest) Reset() {
@@ -1178,16 +1167,15 @@ func (x *GetFailedChecksRequest) GetServiceId() string {
}
type GetFailedChecksResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Total number of results.
TotalItems int32 `protobuf:"varint,1,opt,name=total_items,json=totalItems,proto3" json:"total_items,omitempty"`
// Total number of pages.
TotalPages int32 `protobuf:"varint,2,opt,name=total_pages,json=totalPages,proto3" json:"total_pages,omitempty"`
// Check results
- Results []*CheckResult `protobuf:"bytes,3,rep,name=results,proto3" json:"results,omitempty"`
+ Results []*CheckResult `protobuf:"bytes,3,rep,name=results,proto3" json:"results,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GetFailedChecksResponse) Reset() {
@@ -1243,7 +1231,7 @@ func (x *GetFailedChecksResponse) GetResults() []*CheckResult {
var File_advisors_v1_advisors_proto protoreflect.FileDescriptor
-var file_advisors_v1_advisors_proto_rawDesc = []byte{
+var file_advisors_v1_advisors_proto_rawDesc = string([]byte{
0x0a, 0x1a, 0x61, 0x64, 0x76, 0x69, 0x73, 0x6f, 0x72, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x64,
0x76, 0x69, 0x73, 0x6f, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x61, 0x64,
0x76, 0x69, 0x73, 0x6f, 0x72, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
@@ -1534,16 +1522,16 @@ var file_advisors_v1_advisors_proto_rawDesc = []byte{
0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea,
0x02, 0x0c, 0x41, 0x64, 0x76, 0x69, 0x73, 0x6f, 0x72, 0x73, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_advisors_v1_advisors_proto_rawDescOnce sync.Once
- file_advisors_v1_advisors_proto_rawDescData = file_advisors_v1_advisors_proto_rawDesc
+ file_advisors_v1_advisors_proto_rawDescData []byte
)
func file_advisors_v1_advisors_proto_rawDescGZIP() []byte {
file_advisors_v1_advisors_proto_rawDescOnce.Do(func() {
- file_advisors_v1_advisors_proto_rawDescData = protoimpl.X.CompressGZIP(file_advisors_v1_advisors_proto_rawDescData)
+ file_advisors_v1_advisors_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_advisors_v1_advisors_proto_rawDesc), len(file_advisors_v1_advisors_proto_rawDesc)))
})
return file_advisors_v1_advisors_proto_rawDescData
}
@@ -1622,7 +1610,7 @@ func file_advisors_v1_advisors_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_advisors_v1_advisors_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_advisors_v1_advisors_proto_rawDesc), len(file_advisors_v1_advisors_proto_rawDesc)),
NumEnums: 2,
NumMessages: 20,
NumExtensions: 0,
@@ -1634,7 +1622,6 @@ func file_advisors_v1_advisors_proto_init() {
MessageInfos: file_advisors_v1_advisors_proto_msgTypes,
}.Build()
File_advisors_v1_advisors_proto = out.File
- file_advisors_v1_advisors_proto_rawDesc = nil
file_advisors_v1_advisors_proto_goTypes = nil
file_advisors_v1_advisors_proto_depIdxs = nil
}
diff --git a/api/advisors/v1/advisors.pb.gw.go b/api/advisors/v1/advisors.pb.gw.go
index bff3437706..b2372373f6 100644
--- a/api/advisors/v1/advisors.pb.gw.go
+++ b/api/advisors/v1/advisors.pb.gw.go
@@ -10,6 +10,7 @@ package advisorsv1
import (
"context"
+ "errors"
"io"
"net/http"
@@ -28,23 +29,26 @@ var (
_ codes.Code
_ io.Reader
_ status.Status
+ _ = errors.New
_ = runtime.String
_ = utilities.NewDoubleArray
_ = metadata.Join
)
func request_AdvisorService_ListFailedServices_0(ctx context.Context, marshaler runtime.Marshaler, client AdvisorServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListFailedServicesRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ListFailedServicesRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := client.ListFailedServices(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_AdvisorService_ListFailedServices_0(ctx context.Context, marshaler runtime.Marshaler, server AdvisorServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListFailedServicesRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ListFailedServicesRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := server.ListFailedServices(ctx, &protoReq)
return msg, metadata, err
}
@@ -52,111 +56,115 @@ func local_request_AdvisorService_ListFailedServices_0(ctx context.Context, mars
var filter_AdvisorService_GetFailedChecks_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
func request_AdvisorService_GetFailedChecks_0(ctx context.Context, marshaler runtime.Marshaler, client AdvisorServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetFailedChecksRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq GetFailedChecksRequest
+ metadata runtime.ServerMetadata
+ )
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_AdvisorService_GetFailedChecks_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.GetFailedChecks(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_AdvisorService_GetFailedChecks_0(ctx context.Context, marshaler runtime.Marshaler, server AdvisorServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetFailedChecksRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq GetFailedChecksRequest
+ metadata runtime.ServerMetadata
+ )
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_AdvisorService_GetFailedChecks_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.GetFailedChecks(ctx, &protoReq)
return msg, metadata, err
}
func request_AdvisorService_StartAdvisorChecks_0(ctx context.Context, marshaler runtime.Marshaler, client AdvisorServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq StartAdvisorChecksRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq StartAdvisorChecksRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.StartAdvisorChecks(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_AdvisorService_StartAdvisorChecks_0(ctx context.Context, marshaler runtime.Marshaler, server AdvisorServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq StartAdvisorChecksRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq StartAdvisorChecksRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.StartAdvisorChecks(ctx, &protoReq)
return msg, metadata, err
}
func request_AdvisorService_ListAdvisorChecks_0(ctx context.Context, marshaler runtime.Marshaler, client AdvisorServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListAdvisorChecksRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ListAdvisorChecksRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := client.ListAdvisorChecks(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_AdvisorService_ListAdvisorChecks_0(ctx context.Context, marshaler runtime.Marshaler, server AdvisorServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListAdvisorChecksRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ListAdvisorChecksRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := server.ListAdvisorChecks(ctx, &protoReq)
return msg, metadata, err
}
func request_AdvisorService_ListAdvisors_0(ctx context.Context, marshaler runtime.Marshaler, client AdvisorServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListAdvisorsRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ListAdvisorsRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := client.ListAdvisors(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_AdvisorService_ListAdvisors_0(ctx context.Context, marshaler runtime.Marshaler, server AdvisorServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListAdvisorsRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ListAdvisorsRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := server.ListAdvisors(ctx, &protoReq)
return msg, metadata, err
}
func request_AdvisorService_ChangeAdvisorChecks_0(ctx context.Context, marshaler runtime.Marshaler, client AdvisorServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ChangeAdvisorChecksRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq ChangeAdvisorChecksRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.ChangeAdvisorChecks(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_AdvisorService_ChangeAdvisorChecks_0(ctx context.Context, marshaler runtime.Marshaler, server AdvisorServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ChangeAdvisorChecksRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq ChangeAdvisorChecksRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.ChangeAdvisorChecks(ctx, &protoReq)
return msg, metadata, err
}
@@ -167,15 +175,13 @@ func local_request_AdvisorService_ChangeAdvisorChecks_0(ctx context.Context, mar
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterAdvisorServiceHandlerFromEndpoint instead.
// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
func RegisterAdvisorServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server AdvisorServiceServer) error {
- mux.Handle("GET", pattern_AdvisorService_ListFailedServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_AdvisorService_ListFailedServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/advisors.v1.AdvisorService/ListFailedServices", runtime.WithHTTPPathPattern("/v1/advisors/failedServices"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/advisors.v1.AdvisorService/ListFailedServices", runtime.WithHTTPPathPattern("/v1/advisors/failedServices"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -187,19 +193,15 @@ func RegisterAdvisorServiceHandlerServer(ctx context.Context, mux *runtime.Serve
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AdvisorService_ListFailedServices_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_AdvisorService_GetFailedChecks_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_AdvisorService_GetFailedChecks_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/advisors.v1.AdvisorService/GetFailedChecks", runtime.WithHTTPPathPattern("/v1/advisors/checks/failed"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/advisors.v1.AdvisorService/GetFailedChecks", runtime.WithHTTPPathPattern("/v1/advisors/checks/failed"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -211,19 +213,15 @@ func RegisterAdvisorServiceHandlerServer(ctx context.Context, mux *runtime.Serve
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AdvisorService_GetFailedChecks_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_AdvisorService_StartAdvisorChecks_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_AdvisorService_StartAdvisorChecks_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/advisors.v1.AdvisorService/StartAdvisorChecks", runtime.WithHTTPPathPattern("/v1/advisors/checks:start"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/advisors.v1.AdvisorService/StartAdvisorChecks", runtime.WithHTTPPathPattern("/v1/advisors/checks:start"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -235,19 +233,15 @@ func RegisterAdvisorServiceHandlerServer(ctx context.Context, mux *runtime.Serve
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AdvisorService_StartAdvisorChecks_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_AdvisorService_ListAdvisorChecks_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_AdvisorService_ListAdvisorChecks_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/advisors.v1.AdvisorService/ListAdvisorChecks", runtime.WithHTTPPathPattern("/v1/advisors/checks"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/advisors.v1.AdvisorService/ListAdvisorChecks", runtime.WithHTTPPathPattern("/v1/advisors/checks"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -259,19 +253,15 @@ func RegisterAdvisorServiceHandlerServer(ctx context.Context, mux *runtime.Serve
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AdvisorService_ListAdvisorChecks_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_AdvisorService_ListAdvisors_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_AdvisorService_ListAdvisors_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/advisors.v1.AdvisorService/ListAdvisors", runtime.WithHTTPPathPattern("/v1/advisors"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/advisors.v1.AdvisorService/ListAdvisors", runtime.WithHTTPPathPattern("/v1/advisors"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -283,19 +273,15 @@ func RegisterAdvisorServiceHandlerServer(ctx context.Context, mux *runtime.Serve
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AdvisorService_ListAdvisors_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_AdvisorService_ChangeAdvisorChecks_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_AdvisorService_ChangeAdvisorChecks_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/advisors.v1.AdvisorService/ChangeAdvisorChecks", runtime.WithHTTPPathPattern("/v1/advisors/checks:batchChange"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/advisors.v1.AdvisorService/ChangeAdvisorChecks", runtime.WithHTTPPathPattern("/v1/advisors/checks:batchChange"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -307,7 +293,6 @@ func RegisterAdvisorServiceHandlerServer(ctx context.Context, mux *runtime.Serve
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AdvisorService_ChangeAdvisorChecks_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@@ -335,7 +320,6 @@ func RegisterAdvisorServiceHandlerFromEndpoint(ctx context.Context, mux *runtime
}
}()
}()
-
return RegisterAdvisorServiceHandler(ctx, mux, conn)
}
@@ -351,13 +335,11 @@ func RegisterAdvisorServiceHandler(ctx context.Context, mux *runtime.ServeMux, c
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "AdvisorServiceClient" to call the correct interceptors. This client ignores the HTTP middlewares.
func RegisterAdvisorServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client AdvisorServiceClient) error {
- mux.Handle("GET", pattern_AdvisorService_ListFailedServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_AdvisorService_ListFailedServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/advisors.v1.AdvisorService/ListFailedServices", runtime.WithHTTPPathPattern("/v1/advisors/failedServices"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/advisors.v1.AdvisorService/ListFailedServices", runtime.WithHTTPPathPattern("/v1/advisors/failedServices"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -368,17 +350,13 @@ func RegisterAdvisorServiceHandlerClient(ctx context.Context, mux *runtime.Serve
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AdvisorService_ListFailedServices_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_AdvisorService_GetFailedChecks_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_AdvisorService_GetFailedChecks_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/advisors.v1.AdvisorService/GetFailedChecks", runtime.WithHTTPPathPattern("/v1/advisors/checks/failed"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/advisors.v1.AdvisorService/GetFailedChecks", runtime.WithHTTPPathPattern("/v1/advisors/checks/failed"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -389,17 +367,13 @@ func RegisterAdvisorServiceHandlerClient(ctx context.Context, mux *runtime.Serve
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AdvisorService_GetFailedChecks_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_AdvisorService_StartAdvisorChecks_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_AdvisorService_StartAdvisorChecks_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/advisors.v1.AdvisorService/StartAdvisorChecks", runtime.WithHTTPPathPattern("/v1/advisors/checks:start"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/advisors.v1.AdvisorService/StartAdvisorChecks", runtime.WithHTTPPathPattern("/v1/advisors/checks:start"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -410,17 +384,13 @@ func RegisterAdvisorServiceHandlerClient(ctx context.Context, mux *runtime.Serve
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AdvisorService_StartAdvisorChecks_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_AdvisorService_ListAdvisorChecks_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_AdvisorService_ListAdvisorChecks_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/advisors.v1.AdvisorService/ListAdvisorChecks", runtime.WithHTTPPathPattern("/v1/advisors/checks"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/advisors.v1.AdvisorService/ListAdvisorChecks", runtime.WithHTTPPathPattern("/v1/advisors/checks"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -431,17 +401,13 @@ func RegisterAdvisorServiceHandlerClient(ctx context.Context, mux *runtime.Serve
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AdvisorService_ListAdvisorChecks_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_AdvisorService_ListAdvisors_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_AdvisorService_ListAdvisors_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/advisors.v1.AdvisorService/ListAdvisors", runtime.WithHTTPPathPattern("/v1/advisors"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/advisors.v1.AdvisorService/ListAdvisors", runtime.WithHTTPPathPattern("/v1/advisors"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -452,17 +418,13 @@ func RegisterAdvisorServiceHandlerClient(ctx context.Context, mux *runtime.Serve
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AdvisorService_ListAdvisors_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_AdvisorService_ChangeAdvisorChecks_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_AdvisorService_ChangeAdvisorChecks_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/advisors.v1.AdvisorService/ChangeAdvisorChecks", runtime.WithHTTPPathPattern("/v1/advisors/checks:batchChange"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/advisors.v1.AdvisorService/ChangeAdvisorChecks", runtime.WithHTTPPathPattern("/v1/advisors/checks:batchChange"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -473,37 +435,25 @@ func RegisterAdvisorServiceHandlerClient(ctx context.Context, mux *runtime.Serve
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AdvisorService_ChangeAdvisorChecks_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
return nil
}
var (
- pattern_AdvisorService_ListFailedServices_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "advisors", "failedServices"}, ""))
-
- pattern_AdvisorService_GetFailedChecks_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "advisors", "checks", "failed"}, ""))
-
- pattern_AdvisorService_StartAdvisorChecks_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "advisors", "checks"}, "start"))
-
- pattern_AdvisorService_ListAdvisorChecks_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "advisors", "checks"}, ""))
-
- pattern_AdvisorService_ListAdvisors_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "advisors"}, ""))
-
+ pattern_AdvisorService_ListFailedServices_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "advisors", "failedServices"}, ""))
+ pattern_AdvisorService_GetFailedChecks_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "advisors", "checks", "failed"}, ""))
+ pattern_AdvisorService_StartAdvisorChecks_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "advisors", "checks"}, "start"))
+ pattern_AdvisorService_ListAdvisorChecks_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "advisors", "checks"}, ""))
+ pattern_AdvisorService_ListAdvisors_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "advisors"}, ""))
pattern_AdvisorService_ChangeAdvisorChecks_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "advisors", "checks"}, "batchChange"))
)
var (
- forward_AdvisorService_ListFailedServices_0 = runtime.ForwardResponseMessage
-
- forward_AdvisorService_GetFailedChecks_0 = runtime.ForwardResponseMessage
-
- forward_AdvisorService_StartAdvisorChecks_0 = runtime.ForwardResponseMessage
-
- forward_AdvisorService_ListAdvisorChecks_0 = runtime.ForwardResponseMessage
-
- forward_AdvisorService_ListAdvisors_0 = runtime.ForwardResponseMessage
-
+ forward_AdvisorService_ListFailedServices_0 = runtime.ForwardResponseMessage
+ forward_AdvisorService_GetFailedChecks_0 = runtime.ForwardResponseMessage
+ forward_AdvisorService_StartAdvisorChecks_0 = runtime.ForwardResponseMessage
+ forward_AdvisorService_ListAdvisorChecks_0 = runtime.ForwardResponseMessage
+ forward_AdvisorService_ListAdvisors_0 = runtime.ForwardResponseMessage
forward_AdvisorService_ChangeAdvisorChecks_0 = runtime.ForwardResponseMessage
)
diff --git a/api/advisors/v1/advisors.pb.validate.go b/api/advisors/v1/advisors.pb.validate.go
index 9c9bdff982..ef8f850024 100644
--- a/api/advisors/v1/advisors.pb.validate.go
+++ b/api/advisors/v1/advisors.pb.validate.go
@@ -87,7 +87,7 @@ type AdvisorCheckResultMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AdvisorCheckResultMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -209,7 +209,7 @@ type CheckResultSummaryMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m CheckResultSummaryMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -328,7 +328,7 @@ type CheckResultMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m CheckResultMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -439,7 +439,7 @@ type AdvisorCheckMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AdvisorCheckMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -581,7 +581,7 @@ type AdvisorMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AdvisorMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -689,7 +689,7 @@ type ChangeAdvisorCheckParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ChangeAdvisorCheckParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -791,7 +791,7 @@ type StartAdvisorChecksRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartAdvisorChecksRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -893,7 +893,7 @@ type StartAdvisorChecksResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartAdvisorChecksResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -995,7 +995,7 @@ type ListAdvisorChecksRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListAdvisorChecksRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1131,7 +1131,7 @@ type ListAdvisorChecksResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListAdvisorChecksResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1233,7 +1233,7 @@ type ListAdvisorsRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListAdvisorsRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1369,7 +1369,7 @@ type ListAdvisorsResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListAdvisorsResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1505,7 +1505,7 @@ type ChangeAdvisorChecksRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ChangeAdvisorChecksRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1607,7 +1607,7 @@ type ChangeAdvisorChecksResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ChangeAdvisorChecksResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1710,7 +1710,7 @@ type ListFailedServicesRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListFailedServicesRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1846,7 +1846,7 @@ type ListFailedServicesResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListFailedServicesResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1976,7 +1976,7 @@ type GetFailedChecksRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetFailedChecksRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2116,7 +2116,7 @@ type GetFailedChecksResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetFailedChecksResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/api/agent/pb/agent.pb.go b/api/agent/pb/agent.pb.go
index cafc2136d8..5f0d1fcf75 100644
--- a/api/agent/pb/agent.pb.go
+++ b/api/agent/pb/agent.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: agent/pb/agent.proto
@@ -8,6 +8,7 @@ package pb
import (
reflect "reflect"
+ unsafe "unsafe"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
@@ -24,7 +25,7 @@ const (
var File_agent_pb_agent_proto protoreflect.FileDescriptor
-var file_agent_pb_agent_proto_rawDesc = []byte{
+var file_agent_pb_agent_proto_rawDesc = string([]byte{
0x0a, 0x14, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x70, 0x62, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x1a, 0x14, 0x61,
0x67, 0x65, 0x6e, 0x74, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72,
@@ -41,7 +42,7 @@ var file_agent_pb_agent_proto_rawDesc = []byte{
0x6e, 0x74, 0xe2, 0x02, 0x11, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65,
0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x05, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x62, 0x06,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var file_agent_pb_agent_proto_goTypes = []any{
(*v1.AgentMessage)(nil), // 0: agent.v1.AgentMessage
@@ -67,7 +68,7 @@ func file_agent_pb_agent_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_agent_pb_agent_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_agent_pb_agent_proto_rawDesc), len(file_agent_pb_agent_proto_rawDesc)),
NumEnums: 0,
NumMessages: 0,
NumExtensions: 0,
@@ -77,7 +78,6 @@ func file_agent_pb_agent_proto_init() {
DependencyIndexes: file_agent_pb_agent_proto_depIdxs,
}.Build()
File_agent_pb_agent_proto = out.File
- file_agent_pb_agent_proto_rawDesc = nil
file_agent_pb_agent_proto_goTypes = nil
file_agent_pb_agent_proto_depIdxs = nil
}
diff --git a/api/agent/v1/agent.pb.go b/api/agent/v1/agent.pb.go
index dbfad1aec7..6bec94bc06 100644
--- a/api/agent/v1/agent.pb.go
+++ b/api/agent/v1/agent.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: agent/v1/agent.proto
@@ -9,6 +9,7 @@ package agentv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
status "google.golang.org/genproto/googleapis/rpc/status"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
@@ -135,16 +136,15 @@ func (StartActionRequest_RestartSystemServiceParams_SystemService) EnumDescripto
// TextFiles contains files which can be used to connect to DB (certificates, keys and etc).
type TextFiles struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// files contains files which can be used to connect to DB (certificates, keys and etc).
- Files map[string]string `protobuf:"bytes,1,rep,name=files,proto3" json:"files,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ Files map[string]string `protobuf:"bytes,1,rep,name=files,proto3" json:"files,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// template_left_delim is used to replace placeholders in templates (DSN and etc.) with paths to files.
TemplateLeftDelim string `protobuf:"bytes,2,opt,name=template_left_delim,json=templateLeftDelim,proto3" json:"template_left_delim,omitempty"`
// template_right_delim is used to replace placeholders in templates (DSN and etc.) with paths to files.
TemplateRightDelim string `protobuf:"bytes,3,opt,name=template_right_delim,json=templateRightDelim,proto3" json:"template_right_delim,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *TextFiles) Reset() {
@@ -200,9 +200,9 @@ func (x *TextFiles) GetTemplateRightDelim() string {
// Ping is a AgentMessage/ServerMessage for checking connectivity, latency and clock drift.
type Ping struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *Ping) Reset() {
@@ -237,11 +237,10 @@ func (*Ping) Descriptor() ([]byte, []int) {
// Pong is an AgentMessage/ServerMessage with current time for measuring clock drift.
type Pong struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ CurrentTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=current_time,json=currentTime,proto3" json:"current_time,omitempty"`
unknownFields protoimpl.UnknownFields
-
- CurrentTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=current_time,json=currentTime,proto3" json:"current_time,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *Pong) Reset() {
@@ -283,11 +282,10 @@ func (x *Pong) GetCurrentTime() *timestamppb.Timestamp {
// QANCollectRequest is an AgentMessage for sending QAN data for qan-api.
type QANCollectRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ MetricsBucket []*MetricsBucket `protobuf:"bytes,1,rep,name=metrics_bucket,json=metricsBucket,proto3" json:"metrics_bucket,omitempty"`
unknownFields protoimpl.UnknownFields
-
- MetricsBucket []*MetricsBucket `protobuf:"bytes,1,rep,name=metrics_bucket,json=metricsBucket,proto3" json:"metrics_bucket,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *QANCollectRequest) Reset() {
@@ -329,9 +327,9 @@ func (x *QANCollectRequest) GetMetricsBucket() []*MetricsBucket {
// QANCollectResponse is a ServerMessage for QAN data acceptance.
type QANCollectResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *QANCollectResponse) Reset() {
@@ -366,15 +364,14 @@ func (*QANCollectResponse) Descriptor() ([]byte, []int) {
// StateChangedRequest is an AgentMessage describing actual agent status.
type StateChangedRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- AgentId string `protobuf:"bytes,1,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"`
- Status v1.AgentStatus `protobuf:"varint,2,opt,name=status,proto3,enum=inventory.v1.AgentStatus" json:"status,omitempty"`
- ListenPort uint32 `protobuf:"varint,3,opt,name=listen_port,json=listenPort,proto3" json:"listen_port,omitempty"`
- ProcessExecPath string `protobuf:"bytes,4,opt,name=process_exec_path,json=processExecPath,proto3" json:"process_exec_path,omitempty"`
- Version string `protobuf:"bytes,5,opt,name=version,proto3" json:"version,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ AgentId string `protobuf:"bytes,1,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"`
+ Status v1.AgentStatus `protobuf:"varint,2,opt,name=status,proto3,enum=inventory.v1.AgentStatus" json:"status,omitempty"`
+ ListenPort uint32 `protobuf:"varint,3,opt,name=listen_port,json=listenPort,proto3" json:"listen_port,omitempty"`
+ ProcessExecPath string `protobuf:"bytes,4,opt,name=process_exec_path,json=processExecPath,proto3" json:"process_exec_path,omitempty"`
+ Version string `protobuf:"bytes,5,opt,name=version,proto3" json:"version,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StateChangedRequest) Reset() {
@@ -444,9 +441,9 @@ func (x *StateChangedRequest) GetVersion() string {
// StateChangedResponse is a ServerMessage for StateChangedRequest acceptance.
type StateChangedResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StateChangedResponse) Reset() {
@@ -481,12 +478,11 @@ func (*StateChangedResponse) Descriptor() ([]byte, []int) {
// SetStateRequest is a ServerMessage asking pmm-agent to run agents according to desired state.
type SetStateRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- AgentProcesses map[string]*SetStateRequest_AgentProcess `protobuf:"bytes,1,rep,name=agent_processes,json=agentProcesses,proto3" json:"agent_processes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- BuiltinAgents map[string]*SetStateRequest_BuiltinAgent `protobuf:"bytes,2,rep,name=builtin_agents,json=builtinAgents,proto3" json:"builtin_agents,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ AgentProcesses map[string]*SetStateRequest_AgentProcess `protobuf:"bytes,1,rep,name=agent_processes,json=agentProcesses,proto3" json:"agent_processes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ BuiltinAgents map[string]*SetStateRequest_BuiltinAgent `protobuf:"bytes,2,rep,name=builtin_agents,json=builtinAgents,proto3" json:"builtin_agents,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *SetStateRequest) Reset() {
@@ -535,9 +531,9 @@ func (x *SetStateRequest) GetBuiltinAgents() map[string]*SetStateRequest_Builtin
// SetStateResponse is an AgentMessage for SetStateRequest acceptance.
type SetStateResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *SetStateResponse) Reset() {
@@ -572,11 +568,8 @@ func (*SetStateResponse) Descriptor() ([]byte, []int) {
// QueryActionValue represents a single value used in query Actions.
type QueryActionValue struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Types that are assignable to Kind:
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Types that are valid to be assigned to Kind:
//
// *QueryActionValue_Nil
// *QueryActionValue_Bool
@@ -588,7 +581,9 @@ type QueryActionValue struct {
// *QueryActionValue_Slice
// *QueryActionValue_Map
// *QueryActionValue_Binary
- Kind isQueryActionValue_Kind `protobuf_oneof:"kind"`
+ Kind isQueryActionValue_Kind `protobuf_oneof:"kind"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *QueryActionValue) Reset() {
@@ -621,79 +616,99 @@ func (*QueryActionValue) Descriptor() ([]byte, []int) {
return file_agent_v1_agent_proto_rawDescGZIP(), []int{9}
}
-func (m *QueryActionValue) GetKind() isQueryActionValue_Kind {
- if m != nil {
- return m.Kind
+func (x *QueryActionValue) GetKind() isQueryActionValue_Kind {
+ if x != nil {
+ return x.Kind
}
return nil
}
func (x *QueryActionValue) GetNil() bool {
- if x, ok := x.GetKind().(*QueryActionValue_Nil); ok {
- return x.Nil
+ if x != nil {
+ if x, ok := x.Kind.(*QueryActionValue_Nil); ok {
+ return x.Nil
+ }
}
return false
}
func (x *QueryActionValue) GetBool() bool {
- if x, ok := x.GetKind().(*QueryActionValue_Bool); ok {
- return x.Bool
+ if x != nil {
+ if x, ok := x.Kind.(*QueryActionValue_Bool); ok {
+ return x.Bool
+ }
}
return false
}
func (x *QueryActionValue) GetInt64() int64 {
- if x, ok := x.GetKind().(*QueryActionValue_Int64); ok {
- return x.Int64
+ if x != nil {
+ if x, ok := x.Kind.(*QueryActionValue_Int64); ok {
+ return x.Int64
+ }
}
return 0
}
func (x *QueryActionValue) GetUint64() uint64 {
- if x, ok := x.GetKind().(*QueryActionValue_Uint64); ok {
- return x.Uint64
+ if x != nil {
+ if x, ok := x.Kind.(*QueryActionValue_Uint64); ok {
+ return x.Uint64
+ }
}
return 0
}
func (x *QueryActionValue) GetDouble() float64 {
- if x, ok := x.GetKind().(*QueryActionValue_Double); ok {
- return x.Double
+ if x != nil {
+ if x, ok := x.Kind.(*QueryActionValue_Double); ok {
+ return x.Double
+ }
}
return 0
}
func (x *QueryActionValue) GetBytes() []byte {
- if x, ok := x.GetKind().(*QueryActionValue_Bytes); ok {
- return x.Bytes
+ if x != nil {
+ if x, ok := x.Kind.(*QueryActionValue_Bytes); ok {
+ return x.Bytes
+ }
}
return nil
}
func (x *QueryActionValue) GetTimestamp() *timestamppb.Timestamp {
- if x, ok := x.GetKind().(*QueryActionValue_Timestamp); ok {
- return x.Timestamp
+ if x != nil {
+ if x, ok := x.Kind.(*QueryActionValue_Timestamp); ok {
+ return x.Timestamp
+ }
}
return nil
}
func (x *QueryActionValue) GetSlice() *QueryActionSlice {
- if x, ok := x.GetKind().(*QueryActionValue_Slice); ok {
- return x.Slice
+ if x != nil {
+ if x, ok := x.Kind.(*QueryActionValue_Slice); ok {
+ return x.Slice
+ }
}
return nil
}
func (x *QueryActionValue) GetMap() *QueryActionMap {
- if x, ok := x.GetKind().(*QueryActionValue_Map); ok {
- return x.Map
+ if x != nil {
+ if x, ok := x.Kind.(*QueryActionValue_Map); ok {
+ return x.Map
+ }
}
return nil
}
func (x *QueryActionValue) GetBinary() *QueryActionBinary {
- if x, ok := x.GetKind().(*QueryActionValue_Binary); ok {
- return x.Binary
+ if x != nil {
+ if x, ok := x.Kind.(*QueryActionValue_Binary); ok {
+ return x.Binary
+ }
}
return nil
}
@@ -764,11 +779,10 @@ func (*QueryActionValue_Binary) isQueryActionValue_Kind() {}
// QueryActionSlice represents a slice of values used in query Actions.
type QueryActionSlice struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Slice []*QueryActionValue `protobuf:"bytes,1,rep,name=slice,proto3" json:"slice,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Slice []*QueryActionValue `protobuf:"bytes,1,rep,name=slice,proto3" json:"slice,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *QueryActionSlice) Reset() {
@@ -810,11 +824,10 @@ func (x *QueryActionSlice) GetSlice() []*QueryActionValue {
// QueryActionMap represents a map of values used in query Actions.
type QueryActionMap struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Map map[string]*QueryActionValue `protobuf:"bytes,1,rep,name=map,proto3" json:"map,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
unknownFields protoimpl.UnknownFields
-
- Map map[string]*QueryActionValue `protobuf:"bytes,1,rep,name=map,proto3" json:"map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ sizeCache protoimpl.SizeCache
}
func (x *QueryActionMap) Reset() {
@@ -856,12 +869,11 @@ func (x *QueryActionMap) GetMap() map[string]*QueryActionValue {
// QueryActionBinary represents primitive.Binary used in query Actions.
type QueryActionBinary struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Subtype int32 `protobuf:"varint,1,opt,name=subtype,proto3" json:"subtype,omitempty"`
+ Bytes []byte `protobuf:"bytes,2,opt,name=bytes,proto3" json:"bytes,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Subtype int32 `protobuf:"varint,1,opt,name=subtype,proto3" json:"subtype,omitempty"`
- Bytes []byte `protobuf:"bytes,2,opt,name=bytes,proto3" json:"bytes,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *QueryActionBinary) Reset() {
@@ -910,16 +922,15 @@ func (x *QueryActionBinary) GetBytes() []byte {
// QueryActionResult represents query Action result.
type QueryActionResult struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// SQL result column names.
Columns []string `protobuf:"bytes,1,rep,name=columns,proto3" json:"columns,omitempty"`
// SQL result data rows.
Rows []*QueryActionSlice `protobuf:"bytes,2,rep,name=rows,proto3" json:"rows,omitempty"`
// MongoDB documents.
- Docs []*QueryActionMap `protobuf:"bytes,3,rep,name=docs,proto3" json:"docs,omitempty"`
+ Docs []*QueryActionMap `protobuf:"bytes,3,rep,name=docs,proto3" json:"docs,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *QueryActionResult) Reset() {
@@ -975,14 +986,11 @@ func (x *QueryActionResult) GetDocs() []*QueryActionMap {
// StartActionRequest is a ServerMessage asking pmm-agent to start action.
type StartActionRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- ActionId string `protobuf:"bytes,1,opt,name=action_id,json=actionId,proto3" json:"action_id,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ ActionId string `protobuf:"bytes,1,opt,name=action_id,json=actionId,proto3" json:"action_id,omitempty"`
// Timeout for the whole action. If zero or absent, pmm-agent will pick one itself.
Timeout *durationpb.Duration `protobuf:"bytes,2,opt,name=timeout,proto3" json:"timeout,omitempty"`
- // Types that are assignable to Params:
+ // Types that are valid to be assigned to Params:
//
// *StartActionRequest_MysqlExplainParams
// *StartActionRequest_MysqlShowCreateTableParams
@@ -1005,7 +1013,9 @@ type StartActionRequest struct {
// *StartActionRequest_MongodbQueryReplsetgetstatusParams
// *StartActionRequest_MongodbQueryGetdiagnosticdataParams
// *StartActionRequest_RestartSysServiceParams
- Params isStartActionRequest_Params `protobuf_oneof:"params"`
+ Params isStartActionRequest_Params `protobuf_oneof:"params"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartActionRequest) Reset() {
@@ -1052,156 +1062,198 @@ func (x *StartActionRequest) GetTimeout() *durationpb.Duration {
return nil
}
-func (m *StartActionRequest) GetParams() isStartActionRequest_Params {
- if m != nil {
- return m.Params
+func (x *StartActionRequest) GetParams() isStartActionRequest_Params {
+ if x != nil {
+ return x.Params
}
return nil
}
func (x *StartActionRequest) GetMysqlExplainParams() *StartActionRequest_MySQLExplainParams {
- if x, ok := x.GetParams().(*StartActionRequest_MysqlExplainParams); ok {
- return x.MysqlExplainParams
+ if x != nil {
+ if x, ok := x.Params.(*StartActionRequest_MysqlExplainParams); ok {
+ return x.MysqlExplainParams
+ }
}
return nil
}
func (x *StartActionRequest) GetMysqlShowCreateTableParams() *StartActionRequest_MySQLShowCreateTableParams {
- if x, ok := x.GetParams().(*StartActionRequest_MysqlShowCreateTableParams); ok {
- return x.MysqlShowCreateTableParams
+ if x != nil {
+ if x, ok := x.Params.(*StartActionRequest_MysqlShowCreateTableParams); ok {
+ return x.MysqlShowCreateTableParams
+ }
}
return nil
}
func (x *StartActionRequest) GetMysqlShowTableStatusParams() *StartActionRequest_MySQLShowTableStatusParams {
- if x, ok := x.GetParams().(*StartActionRequest_MysqlShowTableStatusParams); ok {
- return x.MysqlShowTableStatusParams
+ if x != nil {
+ if x, ok := x.Params.(*StartActionRequest_MysqlShowTableStatusParams); ok {
+ return x.MysqlShowTableStatusParams
+ }
}
return nil
}
func (x *StartActionRequest) GetMysqlShowIndexParams() *StartActionRequest_MySQLShowIndexParams {
- if x, ok := x.GetParams().(*StartActionRequest_MysqlShowIndexParams); ok {
- return x.MysqlShowIndexParams
+ if x != nil {
+ if x, ok := x.Params.(*StartActionRequest_MysqlShowIndexParams); ok {
+ return x.MysqlShowIndexParams
+ }
}
return nil
}
func (x *StartActionRequest) GetPostgresqlShowCreateTableParams() *StartActionRequest_PostgreSQLShowCreateTableParams {
- if x, ok := x.GetParams().(*StartActionRequest_PostgresqlShowCreateTableParams); ok {
- return x.PostgresqlShowCreateTableParams
+ if x != nil {
+ if x, ok := x.Params.(*StartActionRequest_PostgresqlShowCreateTableParams); ok {
+ return x.PostgresqlShowCreateTableParams
+ }
}
return nil
}
func (x *StartActionRequest) GetPostgresqlShowIndexParams() *StartActionRequest_PostgreSQLShowIndexParams {
- if x, ok := x.GetParams().(*StartActionRequest_PostgresqlShowIndexParams); ok {
- return x.PostgresqlShowIndexParams
+ if x != nil {
+ if x, ok := x.Params.(*StartActionRequest_PostgresqlShowIndexParams); ok {
+ return x.PostgresqlShowIndexParams
+ }
}
return nil
}
func (x *StartActionRequest) GetMongodbExplainParams() *StartActionRequest_MongoDBExplainParams {
- if x, ok := x.GetParams().(*StartActionRequest_MongodbExplainParams); ok {
- return x.MongodbExplainParams
+ if x != nil {
+ if x, ok := x.Params.(*StartActionRequest_MongodbExplainParams); ok {
+ return x.MongodbExplainParams
+ }
}
return nil
}
func (x *StartActionRequest) GetPtSummaryParams() *StartActionRequest_PTSummaryParams {
- if x, ok := x.GetParams().(*StartActionRequest_PtSummaryParams); ok {
- return x.PtSummaryParams
+ if x != nil {
+ if x, ok := x.Params.(*StartActionRequest_PtSummaryParams); ok {
+ return x.PtSummaryParams
+ }
}
return nil
}
func (x *StartActionRequest) GetPtPgSummaryParams() *StartActionRequest_PTPgSummaryParams {
- if x, ok := x.GetParams().(*StartActionRequest_PtPgSummaryParams); ok {
- return x.PtPgSummaryParams
+ if x != nil {
+ if x, ok := x.Params.(*StartActionRequest_PtPgSummaryParams); ok {
+ return x.PtPgSummaryParams
+ }
}
return nil
}
func (x *StartActionRequest) GetPtMongodbSummaryParams() *StartActionRequest_PTMongoDBSummaryParams {
- if x, ok := x.GetParams().(*StartActionRequest_PtMongodbSummaryParams); ok {
- return x.PtMongodbSummaryParams
+ if x != nil {
+ if x, ok := x.Params.(*StartActionRequest_PtMongodbSummaryParams); ok {
+ return x.PtMongodbSummaryParams
+ }
}
return nil
}
func (x *StartActionRequest) GetPtMysqlSummaryParams() *StartActionRequest_PTMySQLSummaryParams {
- if x, ok := x.GetParams().(*StartActionRequest_PtMysqlSummaryParams); ok {
- return x.PtMysqlSummaryParams
+ if x != nil {
+ if x, ok := x.Params.(*StartActionRequest_PtMysqlSummaryParams); ok {
+ return x.PtMysqlSummaryParams
+ }
}
return nil
}
func (x *StartActionRequest) GetMysqlQueryShowParams() *StartActionRequest_MySQLQueryShowParams {
- if x, ok := x.GetParams().(*StartActionRequest_MysqlQueryShowParams); ok {
- return x.MysqlQueryShowParams
+ if x != nil {
+ if x, ok := x.Params.(*StartActionRequest_MysqlQueryShowParams); ok {
+ return x.MysqlQueryShowParams
+ }
}
return nil
}
func (x *StartActionRequest) GetMysqlQuerySelectParams() *StartActionRequest_MySQLQuerySelectParams {
- if x, ok := x.GetParams().(*StartActionRequest_MysqlQuerySelectParams); ok {
- return x.MysqlQuerySelectParams
+ if x != nil {
+ if x, ok := x.Params.(*StartActionRequest_MysqlQuerySelectParams); ok {
+ return x.MysqlQuerySelectParams
+ }
}
return nil
}
func (x *StartActionRequest) GetPostgresqlQueryShowParams() *StartActionRequest_PostgreSQLQueryShowParams {
- if x, ok := x.GetParams().(*StartActionRequest_PostgresqlQueryShowParams); ok {
- return x.PostgresqlQueryShowParams
+ if x != nil {
+ if x, ok := x.Params.(*StartActionRequest_PostgresqlQueryShowParams); ok {
+ return x.PostgresqlQueryShowParams
+ }
}
return nil
}
func (x *StartActionRequest) GetPostgresqlQuerySelectParams() *StartActionRequest_PostgreSQLQuerySelectParams {
- if x, ok := x.GetParams().(*StartActionRequest_PostgresqlQuerySelectParams); ok {
- return x.PostgresqlQuerySelectParams
+ if x != nil {
+ if x, ok := x.Params.(*StartActionRequest_PostgresqlQuerySelectParams); ok {
+ return x.PostgresqlQuerySelectParams
+ }
}
return nil
}
func (x *StartActionRequest) GetMongodbQueryGetparameterParams() *StartActionRequest_MongoDBQueryGetParameterParams {
- if x, ok := x.GetParams().(*StartActionRequest_MongodbQueryGetparameterParams); ok {
- return x.MongodbQueryGetparameterParams
+ if x != nil {
+ if x, ok := x.Params.(*StartActionRequest_MongodbQueryGetparameterParams); ok {
+ return x.MongodbQueryGetparameterParams
+ }
}
return nil
}
func (x *StartActionRequest) GetMongodbQueryBuildinfoParams() *StartActionRequest_MongoDBQueryBuildInfoParams {
- if x, ok := x.GetParams().(*StartActionRequest_MongodbQueryBuildinfoParams); ok {
- return x.MongodbQueryBuildinfoParams
+ if x != nil {
+ if x, ok := x.Params.(*StartActionRequest_MongodbQueryBuildinfoParams); ok {
+ return x.MongodbQueryBuildinfoParams
+ }
}
return nil
}
func (x *StartActionRequest) GetMongodbQueryGetcmdlineoptsParams() *StartActionRequest_MongoDBQueryGetCmdLineOptsParams {
- if x, ok := x.GetParams().(*StartActionRequest_MongodbQueryGetcmdlineoptsParams); ok {
- return x.MongodbQueryGetcmdlineoptsParams
+ if x != nil {
+ if x, ok := x.Params.(*StartActionRequest_MongodbQueryGetcmdlineoptsParams); ok {
+ return x.MongodbQueryGetcmdlineoptsParams
+ }
}
return nil
}
func (x *StartActionRequest) GetMongodbQueryReplsetgetstatusParams() *StartActionRequest_MongoDBQueryReplSetGetStatusParams {
- if x, ok := x.GetParams().(*StartActionRequest_MongodbQueryReplsetgetstatusParams); ok {
- return x.MongodbQueryReplsetgetstatusParams
+ if x != nil {
+ if x, ok := x.Params.(*StartActionRequest_MongodbQueryReplsetgetstatusParams); ok {
+ return x.MongodbQueryReplsetgetstatusParams
+ }
}
return nil
}
func (x *StartActionRequest) GetMongodbQueryGetdiagnosticdataParams() *StartActionRequest_MongoDBQueryGetDiagnosticDataParams {
- if x, ok := x.GetParams().(*StartActionRequest_MongodbQueryGetdiagnosticdataParams); ok {
- return x.MongodbQueryGetdiagnosticdataParams
+ if x != nil {
+ if x, ok := x.Params.(*StartActionRequest_MongodbQueryGetdiagnosticdataParams); ok {
+ return x.MongodbQueryGetdiagnosticdataParams
+ }
}
return nil
}
func (x *StartActionRequest) GetRestartSysServiceParams() *StartActionRequest_RestartSystemServiceParams {
- if x, ok := x.GetParams().(*StartActionRequest_RestartSysServiceParams); ok {
- return x.RestartSysServiceParams
+ if x != nil {
+ if x, ok := x.Params.(*StartActionRequest_RestartSysServiceParams); ok {
+ return x.RestartSysServiceParams
+ }
}
return nil
}
@@ -1338,9 +1390,9 @@ func (*StartActionRequest_RestartSysServiceParams) isStartActionRequest_Params()
// StartActionResponse is an AgentMessage for StartActionRequest acceptance.
type StartActionResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartActionResponse) Reset() {
@@ -1375,11 +1427,10 @@ func (*StartActionResponse) Descriptor() ([]byte, []int) {
// StopActionRequest is a ServerMessage asking pmm-agent to stop action.
type StopActionRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ ActionId string `protobuf:"bytes,1,opt,name=action_id,json=actionId,proto3" json:"action_id,omitempty"`
unknownFields protoimpl.UnknownFields
-
- ActionId string `protobuf:"bytes,1,opt,name=action_id,json=actionId,proto3" json:"action_id,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *StopActionRequest) Reset() {
@@ -1421,9 +1472,9 @@ func (x *StopActionRequest) GetActionId() string {
// StopActionResponse is an AgentMessage for StopActionRequest acceptance.
type StopActionResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StopActionResponse) Reset() {
@@ -1458,10 +1509,7 @@ func (*StopActionResponse) Descriptor() ([]byte, []int) {
// ActionResultRequest is an AgentMessage containing action run status and new chunk of output.
type ActionResultRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique action ID.
ActionId string `protobuf:"bytes,1,opt,name=action_id,json=actionId,proto3" json:"action_id,omitempty"`
// New chunk of action output.
@@ -1469,7 +1517,9 @@ type ActionResultRequest struct {
// True if action is finished.
Done bool `protobuf:"varint,4,opt,name=done,proto3" json:"done,omitempty"`
// Error message if action failed.
- Error string `protobuf:"bytes,5,opt,name=error,proto3" json:"error,omitempty"`
+ Error string `protobuf:"bytes,5,opt,name=error,proto3" json:"error,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ActionResultRequest) Reset() {
@@ -1532,9 +1582,9 @@ func (x *ActionResultRequest) GetError() string {
// ActionResultResponse is an ServerMessage for ActionResultRequest acceptance.
type ActionResultResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ActionResultResponse) Reset() {
@@ -1569,17 +1619,16 @@ func (*ActionResultResponse) Descriptor() ([]byte, []int) {
// PBMSwitchPITRRequest is a ServerMessage asking pmm-agent to switch PITR pbm feature.
type PBMSwitchPITRRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// DSN for the MongoDB service. May contain connection (dial) timeout.
// May contain placeholders for file paths in DSN.
Dsn string `protobuf:"bytes,1,opt,name=dsn,proto3" json:"dsn,omitempty"`
// Contains files and their contents which can be used in DSN.
TextFiles *TextFiles `protobuf:"bytes,2,opt,name=text_files,json=textFiles,proto3" json:"text_files,omitempty"`
// Specifies if PITR should be enabled or disabled.
- Enabled bool `protobuf:"varint,3,opt,name=enabled,proto3" json:"enabled,omitempty"`
+ Enabled bool `protobuf:"varint,3,opt,name=enabled,proto3" json:"enabled,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *PBMSwitchPITRRequest) Reset() {
@@ -1635,12 +1684,11 @@ func (x *PBMSwitchPITRRequest) GetEnabled() bool {
// PBMSwitchPITRResponse is an AgentMessage for PBMSwitchPITRRequest success result.
type PBMSwitchPITRResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Error message.
- Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
+ Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *PBMSwitchPITRResponse) Reset() {
@@ -1682,12 +1730,11 @@ func (x *PBMSwitchPITRResponse) GetError() string {
// AgentLogsRequest is an ServerMessage asking logs by Agent ID.
type AgentLogsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ AgentId string `protobuf:"bytes,1,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"`
+ Limit uint32 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"`
unknownFields protoimpl.UnknownFields
-
- AgentId string `protobuf:"bytes,1,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"`
- Limit uint32 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *AgentLogsRequest) Reset() {
@@ -1736,12 +1783,11 @@ func (x *AgentLogsRequest) GetLimit() uint32 {
// AgentLogsResponse is an AgentMessage containing logs by Agent ID.
type AgentLogsResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Logs []string `protobuf:"bytes,1,rep,name=logs,proto3" json:"logs,omitempty"`
- AgentConfigLogLinesCount uint32 `protobuf:"varint,2,opt,name=agent_config_log_lines_count,json=agentConfigLogLinesCount,proto3" json:"agent_config_log_lines_count,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Logs []string `protobuf:"bytes,1,rep,name=logs,proto3" json:"logs,omitempty"`
+ AgentConfigLogLinesCount uint32 `protobuf:"varint,2,opt,name=agent_config_log_lines_count,json=agentConfigLogLinesCount,proto3" json:"agent_config_log_lines_count,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AgentLogsResponse) Reset() {
@@ -1790,10 +1836,7 @@ func (x *AgentLogsResponse) GetAgentConfigLogLinesCount() uint32 {
// CheckConnectionRequest is a ServerMessage asking pmm-agent to check connection with Service.
type CheckConnectionRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Service type.
Type v1.ServiceType `protobuf:"varint,1,opt,name=type,proto3,enum=inventory.v1.ServiceType" json:"type,omitempty"`
// DSN for the service. May contain connection (dial) timeout.
@@ -1804,6 +1847,8 @@ type CheckConnectionRequest struct {
TextFiles *TextFiles `protobuf:"bytes,4,opt,name=text_files,json=textFiles,proto3" json:"text_files,omitempty"`
// TLS certificate wont be verified.
TlsSkipVerify bool `protobuf:"varint,5,opt,name=tls_skip_verify,json=tlsSkipVerify,proto3" json:"tls_skip_verify,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *CheckConnectionRequest) Reset() {
@@ -1873,12 +1918,11 @@ func (x *CheckConnectionRequest) GetTlsSkipVerify() bool {
// CheckConnectionResponse is an AgentMessage containing the result of a connection check.
type CheckConnectionResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Error message if connection check failed.
- Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
+ Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *CheckConnectionResponse) Reset() {
@@ -1920,10 +1964,7 @@ func (x *CheckConnectionResponse) GetError() string {
// ServiceInfoRequest is a ServerMessage that queries pmm-agent for database information.
type ServiceInfoRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Service type.
Type v1.ServiceType `protobuf:"varint,1,opt,name=type,proto3,enum=inventory.v1.ServiceType" json:"type,omitempty"`
// DSN for the service. May contain connection (dial) timeout.
@@ -1934,6 +1975,8 @@ type ServiceInfoRequest struct {
TextFiles *TextFiles `protobuf:"bytes,4,opt,name=text_files,json=textFiles,proto3" json:"text_files,omitempty"`
// TLS certificate wont be verified.
TlsSkipVerify bool `protobuf:"varint,5,opt,name=tls_skip_verify,json=tlsSkipVerify,proto3" json:"tls_skip_verify,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ServiceInfoRequest) Reset() {
@@ -2003,10 +2046,7 @@ func (x *ServiceInfoRequest) GetTlsSkipVerify() bool {
// ServiceInfoResponse is an AgentMessage containing information gathered from a service.
type ServiceInfoResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Error message if the request failed.
Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
// A number of MySQL tables, 0 if unknown.
@@ -2016,7 +2056,9 @@ type ServiceInfoResponse struct {
// A list of PostgreSQL databases.
DatabaseList []string `protobuf:"bytes,4,rep,name=database_list,json=databaseList,proto3" json:"database_list,omitempty"`
// A version of pg_stat_monitor, empty if unavailable.
- PgsmVersion *string `protobuf:"bytes,5,opt,name=pgsm_version,json=pgsmVersion,proto3,oneof" json:"pgsm_version,omitempty"`
+ PgsmVersion *string `protobuf:"bytes,5,opt,name=pgsm_version,json=pgsmVersion,proto3,oneof" json:"pgsm_version,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ServiceInfoResponse) Reset() {
@@ -2086,11 +2128,10 @@ func (x *ServiceInfoResponse) GetPgsmVersion() string {
// JobStatusRequest is a ServerMessage asking pmm-agent for job status.
type JobStatusRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ JobId string `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"`
unknownFields protoimpl.UnknownFields
-
- JobId string `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *JobStatusRequest) Reset() {
@@ -2132,11 +2173,10 @@ func (x *JobStatusRequest) GetJobId() string {
// JobStatusResponse is an AgentMessage containing job status.
type JobStatusResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Alive bool `protobuf:"varint,1,opt,name=alive,proto3" json:"alive,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Alive bool `protobuf:"varint,1,opt,name=alive,proto3" json:"alive,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *JobStatusResponse) Reset() {
@@ -2178,15 +2218,14 @@ func (x *JobStatusResponse) GetAlive() bool {
// S3LocationConfig represents S3 bucket configuration.
type S3LocationConfig struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Endpoint string `protobuf:"bytes,1,opt,name=endpoint,proto3" json:"endpoint,omitempty"`
+ AccessKey string `protobuf:"bytes,2,opt,name=access_key,json=accessKey,proto3" json:"access_key,omitempty"`
+ SecretKey string `protobuf:"bytes,3,opt,name=secret_key,json=secretKey,proto3" json:"secret_key,omitempty"`
+ BucketName string `protobuf:"bytes,4,opt,name=bucket_name,json=bucketName,proto3" json:"bucket_name,omitempty"`
+ BucketRegion string `protobuf:"bytes,5,opt,name=bucket_region,json=bucketRegion,proto3" json:"bucket_region,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Endpoint string `protobuf:"bytes,1,opt,name=endpoint,proto3" json:"endpoint,omitempty"`
- AccessKey string `protobuf:"bytes,2,opt,name=access_key,json=accessKey,proto3" json:"access_key,omitempty"`
- SecretKey string `protobuf:"bytes,3,opt,name=secret_key,json=secretKey,proto3" json:"secret_key,omitempty"`
- BucketName string `protobuf:"bytes,4,opt,name=bucket_name,json=bucketName,proto3" json:"bucket_name,omitempty"`
- BucketRegion string `protobuf:"bytes,5,opt,name=bucket_region,json=bucketRegion,proto3" json:"bucket_region,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *S3LocationConfig) Reset() {
@@ -2256,11 +2295,10 @@ func (x *S3LocationConfig) GetBucketRegion() string {
// FilesystemLocationConfig represents path for storing backup artifacts locally.
type FilesystemLocationConfig struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *FilesystemLocationConfig) Reset() {
@@ -2302,20 +2340,19 @@ func (x *FilesystemLocationConfig) GetPath() string {
// StartJobRequest is a ServerMessage asking pmm-agent to start job.
type StartJobRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- JobId string `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ JobId string `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"`
// Timeout for the job.
Timeout *durationpb.Duration `protobuf:"bytes,2,opt,name=timeout,proto3" json:"timeout,omitempty"`
- // Types that are assignable to Job:
+ // Types that are valid to be assigned to Job:
//
// *StartJobRequest_MysqlBackup
// *StartJobRequest_MysqlRestoreBackup
// *StartJobRequest_MongodbBackup
// *StartJobRequest_MongodbRestoreBackup
- Job isStartJobRequest_Job `protobuf_oneof:"job"`
+ Job isStartJobRequest_Job `protobuf_oneof:"job"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartJobRequest) Reset() {
@@ -2362,37 +2399,45 @@ func (x *StartJobRequest) GetTimeout() *durationpb.Duration {
return nil
}
-func (m *StartJobRequest) GetJob() isStartJobRequest_Job {
- if m != nil {
- return m.Job
+func (x *StartJobRequest) GetJob() isStartJobRequest_Job {
+ if x != nil {
+ return x.Job
}
return nil
}
func (x *StartJobRequest) GetMysqlBackup() *StartJobRequest_MySQLBackup {
- if x, ok := x.GetJob().(*StartJobRequest_MysqlBackup); ok {
- return x.MysqlBackup
+ if x != nil {
+ if x, ok := x.Job.(*StartJobRequest_MysqlBackup); ok {
+ return x.MysqlBackup
+ }
}
return nil
}
func (x *StartJobRequest) GetMysqlRestoreBackup() *StartJobRequest_MySQLRestoreBackup {
- if x, ok := x.GetJob().(*StartJobRequest_MysqlRestoreBackup); ok {
- return x.MysqlRestoreBackup
+ if x != nil {
+ if x, ok := x.Job.(*StartJobRequest_MysqlRestoreBackup); ok {
+ return x.MysqlRestoreBackup
+ }
}
return nil
}
func (x *StartJobRequest) GetMongodbBackup() *StartJobRequest_MongoDBBackup {
- if x, ok := x.GetJob().(*StartJobRequest_MongodbBackup); ok {
- return x.MongodbBackup
+ if x != nil {
+ if x, ok := x.Job.(*StartJobRequest_MongodbBackup); ok {
+ return x.MongodbBackup
+ }
}
return nil
}
func (x *StartJobRequest) GetMongodbRestoreBackup() *StartJobRequest_MongoDBRestoreBackup {
- if x, ok := x.GetJob().(*StartJobRequest_MongodbRestoreBackup); ok {
- return x.MongodbRestoreBackup
+ if x != nil {
+ if x, ok := x.Job.(*StartJobRequest_MongodbRestoreBackup); ok {
+ return x.MongodbRestoreBackup
+ }
}
return nil
}
@@ -2427,11 +2472,10 @@ func (*StartJobRequest_MongodbRestoreBackup) isStartJobRequest_Job() {}
// StartJobResponse is an AgentMessage for StartJobRequest acceptance.
type StartJobResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *StartJobResponse) Reset() {
@@ -2473,11 +2517,10 @@ func (x *StartJobResponse) GetError() string {
// StopJobRequest is a ServerMessage asking pmm-agent to stop job.
type StopJobRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ JobId string `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"`
unknownFields protoimpl.UnknownFields
-
- JobId string `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *StopJobRequest) Reset() {
@@ -2519,9 +2562,9 @@ func (x *StopJobRequest) GetJobId() string {
// StopJobResponse is an AgentMessage for StopJobRequest acceptance.
type StopJobResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StopJobResponse) Reset() {
@@ -2556,20 +2599,19 @@ func (*StopJobResponse) Descriptor() ([]byte, []int) {
// JobResult represents job result.
type JobResult struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
JobId string `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"`
Timestamp *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
- // Types that are assignable to Result:
+ // Types that are valid to be assigned to Result:
//
// *JobResult_Error_
// *JobResult_MysqlBackup
// *JobResult_MysqlRestoreBackup
// *JobResult_MongodbBackup
// *JobResult_MongodbRestoreBackup
- Result isJobResult_Result `protobuf_oneof:"result"`
+ Result isJobResult_Result `protobuf_oneof:"result"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *JobResult) Reset() {
@@ -2616,44 +2658,54 @@ func (x *JobResult) GetTimestamp() *timestamppb.Timestamp {
return nil
}
-func (m *JobResult) GetResult() isJobResult_Result {
- if m != nil {
- return m.Result
+func (x *JobResult) GetResult() isJobResult_Result {
+ if x != nil {
+ return x.Result
}
return nil
}
func (x *JobResult) GetError() *JobResult_Error {
- if x, ok := x.GetResult().(*JobResult_Error_); ok {
- return x.Error
+ if x != nil {
+ if x, ok := x.Result.(*JobResult_Error_); ok {
+ return x.Error
+ }
}
return nil
}
func (x *JobResult) GetMysqlBackup() *JobResult_MySQLBackup {
- if x, ok := x.GetResult().(*JobResult_MysqlBackup); ok {
- return x.MysqlBackup
+ if x != nil {
+ if x, ok := x.Result.(*JobResult_MysqlBackup); ok {
+ return x.MysqlBackup
+ }
}
return nil
}
func (x *JobResult) GetMysqlRestoreBackup() *JobResult_MySQLRestoreBackup {
- if x, ok := x.GetResult().(*JobResult_MysqlRestoreBackup); ok {
- return x.MysqlRestoreBackup
+ if x != nil {
+ if x, ok := x.Result.(*JobResult_MysqlRestoreBackup); ok {
+ return x.MysqlRestoreBackup
+ }
}
return nil
}
func (x *JobResult) GetMongodbBackup() *JobResult_MongoDBBackup {
- if x, ok := x.GetResult().(*JobResult_MongodbBackup); ok {
- return x.MongodbBackup
+ if x != nil {
+ if x, ok := x.Result.(*JobResult_MongodbBackup); ok {
+ return x.MongodbBackup
+ }
}
return nil
}
func (x *JobResult) GetMongodbRestoreBackup() *JobResult_MongoDBRestoreBackup {
- if x, ok := x.GetResult().(*JobResult_MongodbRestoreBackup); ok {
- return x.MongodbRestoreBackup
+ if x != nil {
+ if x, ok := x.Result.(*JobResult_MongodbRestoreBackup); ok {
+ return x.MongodbRestoreBackup
+ }
}
return nil
}
@@ -2694,18 +2746,17 @@ func (*JobResult_MongodbRestoreBackup) isJobResult_Result() {}
// JobProgress represents job progress messages like percentage of completion, status updates, etc.
type JobProgress struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
JobId string `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"`
Timestamp *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
- // Types that are assignable to Result:
+ // Types that are valid to be assigned to Result:
//
// *JobProgress_MysqlBackup
// *JobProgress_MysqlRestoreBackup
// *JobProgress_Logs_
- Result isJobProgress_Result `protobuf_oneof:"result"`
+ Result isJobProgress_Result `protobuf_oneof:"result"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *JobProgress) Reset() {
@@ -2752,30 +2803,36 @@ func (x *JobProgress) GetTimestamp() *timestamppb.Timestamp {
return nil
}
-func (m *JobProgress) GetResult() isJobProgress_Result {
- if m != nil {
- return m.Result
+func (x *JobProgress) GetResult() isJobProgress_Result {
+ if x != nil {
+ return x.Result
}
return nil
}
func (x *JobProgress) GetMysqlBackup() *JobProgress_MySQLBackup {
- if x, ok := x.GetResult().(*JobProgress_MysqlBackup); ok {
- return x.MysqlBackup
+ if x != nil {
+ if x, ok := x.Result.(*JobProgress_MysqlBackup); ok {
+ return x.MysqlBackup
+ }
}
return nil
}
func (x *JobProgress) GetMysqlRestoreBackup() *JobProgress_MySQLRestoreBackup {
- if x, ok := x.GetResult().(*JobProgress_MysqlRestoreBackup); ok {
- return x.MysqlRestoreBackup
+ if x != nil {
+ if x, ok := x.Result.(*JobProgress_MysqlRestoreBackup); ok {
+ return x.MysqlRestoreBackup
+ }
}
return nil
}
func (x *JobProgress) GetLogs() *JobProgress_Logs {
- if x, ok := x.GetResult().(*JobProgress_Logs_); ok {
- return x.Logs
+ if x != nil {
+ if x, ok := x.Result.(*JobProgress_Logs_); ok {
+ return x.Logs
+ }
}
return nil
}
@@ -2804,11 +2861,10 @@ func (*JobProgress_Logs_) isJobProgress_Result() {}
// GetVersionsRequest represents a request for version retrieving of different software.
type GetVersionsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Softwares []*GetVersionsRequest_Software `protobuf:"bytes,1,rep,name=softwares,proto3" json:"softwares,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Softwares []*GetVersionsRequest_Software `protobuf:"bytes,1,rep,name=softwares,proto3" json:"softwares,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *GetVersionsRequest) Reset() {
@@ -2850,11 +2906,10 @@ func (x *GetVersionsRequest) GetSoftwares() []*GetVersionsRequest_Software {
// GetVersionsResponse represents a response containing versions of software in the same order as in the request.
type GetVersionsResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Versions []*GetVersionsResponse_Version `protobuf:"bytes,1,rep,name=versions,proto3" json:"versions,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Versions []*GetVersionsResponse_Version `protobuf:"bytes,1,rep,name=versions,proto3" json:"versions,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *GetVersionsResponse) Reset() {
@@ -2895,18 +2950,15 @@ func (x *GetVersionsResponse) GetVersions() []*GetVersionsResponse_Version {
}
type AgentMessage struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
// The responder sets the status field in two situations:
// 1. When it received a request with the payload field not set.
// That means that responded is older than the requester, and doesn't know about newer payload types.
// Status code UNIMPLEMENTED (12) is reserved for that case.
// 2. When the payload is set, but the request can't be performed due to some error.
Status *status.Status `protobuf:"bytes,2047,opt,name=status,proto3" json:"status,omitempty"`
- // Types that are assignable to Payload:
+ // Types that are valid to be assigned to Payload:
//
// *AgentMessage_Ping
// *AgentMessage_StateChanged
@@ -2926,7 +2978,9 @@ type AgentMessage struct {
// *AgentMessage_PbmSwitchPitr
// *AgentMessage_AgentLogs
// *AgentMessage_ServiceInfo
- Payload isAgentMessage_Payload `protobuf_oneof:"payload"`
+ Payload isAgentMessage_Payload `protobuf_oneof:"payload"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AgentMessage) Reset() {
@@ -2973,135 +3027,171 @@ func (x *AgentMessage) GetStatus() *status.Status {
return nil
}
-func (m *AgentMessage) GetPayload() isAgentMessage_Payload {
- if m != nil {
- return m.Payload
+func (x *AgentMessage) GetPayload() isAgentMessage_Payload {
+ if x != nil {
+ return x.Payload
}
return nil
}
func (x *AgentMessage) GetPing() *Ping {
- if x, ok := x.GetPayload().(*AgentMessage_Ping); ok {
- return x.Ping
+ if x != nil {
+ if x, ok := x.Payload.(*AgentMessage_Ping); ok {
+ return x.Ping
+ }
}
return nil
}
func (x *AgentMessage) GetStateChanged() *StateChangedRequest {
- if x, ok := x.GetPayload().(*AgentMessage_StateChanged); ok {
- return x.StateChanged
+ if x != nil {
+ if x, ok := x.Payload.(*AgentMessage_StateChanged); ok {
+ return x.StateChanged
+ }
}
return nil
}
func (x *AgentMessage) GetQanCollect() *QANCollectRequest {
- if x, ok := x.GetPayload().(*AgentMessage_QanCollect); ok {
- return x.QanCollect
+ if x != nil {
+ if x, ok := x.Payload.(*AgentMessage_QanCollect); ok {
+ return x.QanCollect
+ }
}
return nil
}
func (x *AgentMessage) GetActionResult() *ActionResultRequest {
- if x, ok := x.GetPayload().(*AgentMessage_ActionResult); ok {
- return x.ActionResult
+ if x != nil {
+ if x, ok := x.Payload.(*AgentMessage_ActionResult); ok {
+ return x.ActionResult
+ }
}
return nil
}
func (x *AgentMessage) GetJobResult() *JobResult {
- if x, ok := x.GetPayload().(*AgentMessage_JobResult); ok {
- return x.JobResult
+ if x != nil {
+ if x, ok := x.Payload.(*AgentMessage_JobResult); ok {
+ return x.JobResult
+ }
}
return nil
}
func (x *AgentMessage) GetJobProgress() *JobProgress {
- if x, ok := x.GetPayload().(*AgentMessage_JobProgress); ok {
- return x.JobProgress
+ if x != nil {
+ if x, ok := x.Payload.(*AgentMessage_JobProgress); ok {
+ return x.JobProgress
+ }
}
return nil
}
func (x *AgentMessage) GetPong() *Pong {
- if x, ok := x.GetPayload().(*AgentMessage_Pong); ok {
- return x.Pong
+ if x != nil {
+ if x, ok := x.Payload.(*AgentMessage_Pong); ok {
+ return x.Pong
+ }
}
return nil
}
func (x *AgentMessage) GetSetState() *SetStateResponse {
- if x, ok := x.GetPayload().(*AgentMessage_SetState); ok {
- return x.SetState
+ if x != nil {
+ if x, ok := x.Payload.(*AgentMessage_SetState); ok {
+ return x.SetState
+ }
}
return nil
}
func (x *AgentMessage) GetStartAction() *StartActionResponse {
- if x, ok := x.GetPayload().(*AgentMessage_StartAction); ok {
- return x.StartAction
+ if x != nil {
+ if x, ok := x.Payload.(*AgentMessage_StartAction); ok {
+ return x.StartAction
+ }
}
return nil
}
func (x *AgentMessage) GetStopAction() *StopActionResponse {
- if x, ok := x.GetPayload().(*AgentMessage_StopAction); ok {
- return x.StopAction
+ if x != nil {
+ if x, ok := x.Payload.(*AgentMessage_StopAction); ok {
+ return x.StopAction
+ }
}
return nil
}
func (x *AgentMessage) GetCheckConnection() *CheckConnectionResponse {
- if x, ok := x.GetPayload().(*AgentMessage_CheckConnection); ok {
- return x.CheckConnection
+ if x != nil {
+ if x, ok := x.Payload.(*AgentMessage_CheckConnection); ok {
+ return x.CheckConnection
+ }
}
return nil
}
func (x *AgentMessage) GetStartJob() *StartJobResponse {
- if x, ok := x.GetPayload().(*AgentMessage_StartJob); ok {
- return x.StartJob
+ if x != nil {
+ if x, ok := x.Payload.(*AgentMessage_StartJob); ok {
+ return x.StartJob
+ }
}
return nil
}
func (x *AgentMessage) GetStopJob() *StopJobResponse {
- if x, ok := x.GetPayload().(*AgentMessage_StopJob); ok {
- return x.StopJob
+ if x != nil {
+ if x, ok := x.Payload.(*AgentMessage_StopJob); ok {
+ return x.StopJob
+ }
}
return nil
}
func (x *AgentMessage) GetJobStatus() *JobStatusResponse {
- if x, ok := x.GetPayload().(*AgentMessage_JobStatus); ok {
- return x.JobStatus
+ if x != nil {
+ if x, ok := x.Payload.(*AgentMessage_JobStatus); ok {
+ return x.JobStatus
+ }
}
return nil
}
func (x *AgentMessage) GetGetVersions() *GetVersionsResponse {
- if x, ok := x.GetPayload().(*AgentMessage_GetVersions); ok {
- return x.GetVersions
+ if x != nil {
+ if x, ok := x.Payload.(*AgentMessage_GetVersions); ok {
+ return x.GetVersions
+ }
}
return nil
}
func (x *AgentMessage) GetPbmSwitchPitr() *PBMSwitchPITRResponse {
- if x, ok := x.GetPayload().(*AgentMessage_PbmSwitchPitr); ok {
- return x.PbmSwitchPitr
+ if x != nil {
+ if x, ok := x.Payload.(*AgentMessage_PbmSwitchPitr); ok {
+ return x.PbmSwitchPitr
+ }
}
return nil
}
func (x *AgentMessage) GetAgentLogs() *AgentLogsResponse {
- if x, ok := x.GetPayload().(*AgentMessage_AgentLogs); ok {
- return x.AgentLogs
+ if x != nil {
+ if x, ok := x.Payload.(*AgentMessage_AgentLogs); ok {
+ return x.AgentLogs
+ }
}
return nil
}
func (x *AgentMessage) GetServiceInfo() *ServiceInfoResponse {
- if x, ok := x.GetPayload().(*AgentMessage_ServiceInfo); ok {
- return x.ServiceInfo
+ if x != nil {
+ if x, ok := x.Payload.(*AgentMessage_ServiceInfo); ok {
+ return x.ServiceInfo
+ }
}
return nil
}
@@ -3221,18 +3311,15 @@ func (*AgentMessage_AgentLogs) isAgentMessage_Payload() {}
func (*AgentMessage_ServiceInfo) isAgentMessage_Payload() {}
type ServerMessage struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
// The responder sets the status field in two situations:
// 1. When it received a request with the payload field not set.
// That means that responded is older than the requester, and doesn't know about newer payload types.
// Status code UNIMPLEMENTED (12) is reserved for that case.
// 2. When the payload is set, but the request can't be performed due to some error.
Status *status.Status `protobuf:"bytes,2047,opt,name=status,proto3" json:"status,omitempty"`
- // Types that are assignable to Payload:
+ // Types that are valid to be assigned to Payload:
//
// *ServerMessage_Pong
// *ServerMessage_StateChanged
@@ -3250,7 +3337,9 @@ type ServerMessage struct {
// *ServerMessage_PbmSwitchPitr
// *ServerMessage_AgentLogs
// *ServerMessage_ServiceInfo
- Payload isServerMessage_Payload `protobuf_oneof:"payload"`
+ Payload isServerMessage_Payload `protobuf_oneof:"payload"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ServerMessage) Reset() {
@@ -3297,121 +3386,153 @@ func (x *ServerMessage) GetStatus() *status.Status {
return nil
}
-func (m *ServerMessage) GetPayload() isServerMessage_Payload {
- if m != nil {
- return m.Payload
+func (x *ServerMessage) GetPayload() isServerMessage_Payload {
+ if x != nil {
+ return x.Payload
}
return nil
}
func (x *ServerMessage) GetPong() *Pong {
- if x, ok := x.GetPayload().(*ServerMessage_Pong); ok {
- return x.Pong
+ if x != nil {
+ if x, ok := x.Payload.(*ServerMessage_Pong); ok {
+ return x.Pong
+ }
}
return nil
}
func (x *ServerMessage) GetStateChanged() *StateChangedResponse {
- if x, ok := x.GetPayload().(*ServerMessage_StateChanged); ok {
- return x.StateChanged
+ if x != nil {
+ if x, ok := x.Payload.(*ServerMessage_StateChanged); ok {
+ return x.StateChanged
+ }
}
return nil
}
func (x *ServerMessage) GetQanCollect() *QANCollectResponse {
- if x, ok := x.GetPayload().(*ServerMessage_QanCollect); ok {
- return x.QanCollect
+ if x != nil {
+ if x, ok := x.Payload.(*ServerMessage_QanCollect); ok {
+ return x.QanCollect
+ }
}
return nil
}
func (x *ServerMessage) GetActionResult() *ActionResultResponse {
- if x, ok := x.GetPayload().(*ServerMessage_ActionResult); ok {
- return x.ActionResult
+ if x != nil {
+ if x, ok := x.Payload.(*ServerMessage_ActionResult); ok {
+ return x.ActionResult
+ }
}
return nil
}
func (x *ServerMessage) GetPing() *Ping {
- if x, ok := x.GetPayload().(*ServerMessage_Ping); ok {
- return x.Ping
+ if x != nil {
+ if x, ok := x.Payload.(*ServerMessage_Ping); ok {
+ return x.Ping
+ }
}
return nil
}
func (x *ServerMessage) GetSetState() *SetStateRequest {
- if x, ok := x.GetPayload().(*ServerMessage_SetState); ok {
- return x.SetState
+ if x != nil {
+ if x, ok := x.Payload.(*ServerMessage_SetState); ok {
+ return x.SetState
+ }
}
return nil
}
func (x *ServerMessage) GetStartAction() *StartActionRequest {
- if x, ok := x.GetPayload().(*ServerMessage_StartAction); ok {
- return x.StartAction
+ if x != nil {
+ if x, ok := x.Payload.(*ServerMessage_StartAction); ok {
+ return x.StartAction
+ }
}
return nil
}
func (x *ServerMessage) GetStopAction() *StopActionRequest {
- if x, ok := x.GetPayload().(*ServerMessage_StopAction); ok {
- return x.StopAction
+ if x != nil {
+ if x, ok := x.Payload.(*ServerMessage_StopAction); ok {
+ return x.StopAction
+ }
}
return nil
}
func (x *ServerMessage) GetCheckConnection() *CheckConnectionRequest {
- if x, ok := x.GetPayload().(*ServerMessage_CheckConnection); ok {
- return x.CheckConnection
+ if x != nil {
+ if x, ok := x.Payload.(*ServerMessage_CheckConnection); ok {
+ return x.CheckConnection
+ }
}
return nil
}
func (x *ServerMessage) GetStartJob() *StartJobRequest {
- if x, ok := x.GetPayload().(*ServerMessage_StartJob); ok {
- return x.StartJob
+ if x != nil {
+ if x, ok := x.Payload.(*ServerMessage_StartJob); ok {
+ return x.StartJob
+ }
}
return nil
}
func (x *ServerMessage) GetStopJob() *StopJobRequest {
- if x, ok := x.GetPayload().(*ServerMessage_StopJob); ok {
- return x.StopJob
+ if x != nil {
+ if x, ok := x.Payload.(*ServerMessage_StopJob); ok {
+ return x.StopJob
+ }
}
return nil
}
func (x *ServerMessage) GetJobStatus() *JobStatusRequest {
- if x, ok := x.GetPayload().(*ServerMessage_JobStatus); ok {
- return x.JobStatus
+ if x != nil {
+ if x, ok := x.Payload.(*ServerMessage_JobStatus); ok {
+ return x.JobStatus
+ }
}
return nil
}
func (x *ServerMessage) GetGetVersions() *GetVersionsRequest {
- if x, ok := x.GetPayload().(*ServerMessage_GetVersions); ok {
- return x.GetVersions
+ if x != nil {
+ if x, ok := x.Payload.(*ServerMessage_GetVersions); ok {
+ return x.GetVersions
+ }
}
return nil
}
func (x *ServerMessage) GetPbmSwitchPitr() *PBMSwitchPITRRequest {
- if x, ok := x.GetPayload().(*ServerMessage_PbmSwitchPitr); ok {
- return x.PbmSwitchPitr
+ if x != nil {
+ if x, ok := x.Payload.(*ServerMessage_PbmSwitchPitr); ok {
+ return x.PbmSwitchPitr
+ }
}
return nil
}
func (x *ServerMessage) GetAgentLogs() *AgentLogsRequest {
- if x, ok := x.GetPayload().(*ServerMessage_AgentLogs); ok {
- return x.AgentLogs
+ if x != nil {
+ if x, ok := x.Payload.(*ServerMessage_AgentLogs); ok {
+ return x.AgentLogs
+ }
}
return nil
}
func (x *ServerMessage) GetServiceInfo() *ServiceInfoRequest {
- if x, ok := x.GetPayload().(*ServerMessage_ServiceInfo); ok {
- return x.ServiceInfo
+ if x != nil {
+ if x, ok := x.Payload.(*ServerMessage_ServiceInfo); ok {
+ return x.ServiceInfo
+ }
}
return nil
}
@@ -3520,17 +3641,16 @@ func (*ServerMessage_ServiceInfo) isServerMessage_Payload() {}
// AgentProcess describes desired configuration of a single agent process started by pmm-agent.
type SetStateRequest_AgentProcess struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Type v1.AgentType `protobuf:"varint,1,opt,name=type,proto3,enum=inventory.v1.AgentType" json:"type,omitempty"`
- TemplateLeftDelim string `protobuf:"bytes,2,opt,name=template_left_delim,json=templateLeftDelim,proto3" json:"template_left_delim,omitempty"`
- TemplateRightDelim string `protobuf:"bytes,3,opt,name=template_right_delim,json=templateRightDelim,proto3" json:"template_right_delim,omitempty"`
- Args []string `protobuf:"bytes,4,rep,name=args,proto3" json:"args,omitempty"`
- Env []string `protobuf:"bytes,5,rep,name=env,proto3" json:"env,omitempty"`
- TextFiles map[string]string `protobuf:"bytes,6,rep,name=text_files,json=textFiles,proto3" json:"text_files,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- RedactWords []string `protobuf:"bytes,7,rep,name=redact_words,json=redactWords,proto3" json:"redact_words,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Type v1.AgentType `protobuf:"varint,1,opt,name=type,proto3,enum=inventory.v1.AgentType" json:"type,omitempty"`
+ TemplateLeftDelim string `protobuf:"bytes,2,opt,name=template_left_delim,json=templateLeftDelim,proto3" json:"template_left_delim,omitempty"`
+ TemplateRightDelim string `protobuf:"bytes,3,opt,name=template_right_delim,json=templateRightDelim,proto3" json:"template_right_delim,omitempty"`
+ Args []string `protobuf:"bytes,4,rep,name=args,proto3" json:"args,omitempty"`
+ Env []string `protobuf:"bytes,5,rep,name=env,proto3" json:"env,omitempty"`
+ TextFiles map[string]string `protobuf:"bytes,6,rep,name=text_files,json=textFiles,proto3" json:"text_files,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ RedactWords []string `protobuf:"bytes,7,rep,name=redact_words,json=redactWords,proto3" json:"redact_words,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *SetStateRequest_AgentProcess) Reset() {
@@ -3614,12 +3734,9 @@ func (x *SetStateRequest_AgentProcess) GetRedactWords() []string {
// BuiltinAgent describes desired configuration of a single built-in agent for pmm-agent.
type SetStateRequest_BuiltinAgent struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Type v1.AgentType `protobuf:"varint,1,opt,name=type,proto3,enum=inventory.v1.AgentType" json:"type,omitempty"`
- Dsn string `protobuf:"bytes,2,opt,name=dsn,proto3" json:"dsn,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Type v1.AgentType `protobuf:"varint,1,opt,name=type,proto3,enum=inventory.v1.AgentType" json:"type,omitempty"`
+ Dsn string `protobuf:"bytes,2,opt,name=dsn,proto3" json:"dsn,omitempty"`
// Limit query length in QAN (default: server-defined; -1: no limit).
MaxQueryLength int32 `protobuf:"varint,3,opt,name=max_query_length,json=maxQueryLength,proto3" json:"max_query_length,omitempty"`
// Disable parsing comments from queries and showing them in QAN.
@@ -3634,6 +3751,8 @@ type SetStateRequest_BuiltinAgent struct {
Tls bool `protobuf:"varint,8,opt,name=tls,proto3" json:"tls,omitempty"`
// TLS certificate wont be verified.
TlsSkipVerify bool `protobuf:"varint,9,opt,name=tls_skip_verify,json=tlsSkipVerify,proto3" json:"tls_skip_verify,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *SetStateRequest_BuiltinAgent) Reset() {
@@ -3731,10 +3850,7 @@ func (x *SetStateRequest_BuiltinAgent) GetTlsSkipVerify() bool {
// MySQLExplainParams describes MySQL EXPLAIN action parameters.
type StartActionRequest_MySQLExplainParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// DSN for the service. May contain connection (dial) timeout.
Dsn string `protobuf:"bytes,1,opt,name=dsn,proto3" json:"dsn,omitempty"`
Query string `protobuf:"bytes,2,opt,name=query,proto3" json:"query,omitempty"`
@@ -3745,6 +3861,8 @@ type StartActionRequest_MySQLExplainParams struct {
TlsFiles *TextFiles `protobuf:"bytes,6,opt,name=tls_files,json=tlsFiles,proto3" json:"tls_files,omitempty"`
// TLS certificate wont be verified.
TlsSkipVerify bool `protobuf:"varint,7,opt,name=tls_skip_verify,json=tlsSkipVerify,proto3" json:"tls_skip_verify,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartActionRequest_MySQLExplainParams) Reset() {
@@ -3828,10 +3946,7 @@ func (x *StartActionRequest_MySQLExplainParams) GetTlsSkipVerify() bool {
// MySQLShowCreateTableParams describes MySQL SHOW CREATE TABLE action parameters.
type StartActionRequest_MySQLShowCreateTableParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// DSN for the service. May contain connection (dial) timeout.
Dsn string `protobuf:"bytes,1,opt,name=dsn,proto3" json:"dsn,omitempty"`
Table string `protobuf:"bytes,2,opt,name=table,proto3" json:"table,omitempty"`
@@ -3839,6 +3954,8 @@ type StartActionRequest_MySQLShowCreateTableParams struct {
TlsFiles *TextFiles `protobuf:"bytes,3,opt,name=tls_files,json=tlsFiles,proto3" json:"tls_files,omitempty"`
// TLS certificate wont be verified.
TlsSkipVerify bool `protobuf:"varint,4,opt,name=tls_skip_verify,json=tlsSkipVerify,proto3" json:"tls_skip_verify,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartActionRequest_MySQLShowCreateTableParams) Reset() {
@@ -3901,10 +4018,7 @@ func (x *StartActionRequest_MySQLShowCreateTableParams) GetTlsSkipVerify() bool
// MySQLShowTableStatusParams describes MySQL SHOW TABLE STATUS action parameters.
type StartActionRequest_MySQLShowTableStatusParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// DSN for the service. May contain connection (dial) timeout.
Dsn string `protobuf:"bytes,1,opt,name=dsn,proto3" json:"dsn,omitempty"`
Table string `protobuf:"bytes,2,opt,name=table,proto3" json:"table,omitempty"`
@@ -3912,6 +4026,8 @@ type StartActionRequest_MySQLShowTableStatusParams struct {
TlsFiles *TextFiles `protobuf:"bytes,3,opt,name=tls_files,json=tlsFiles,proto3" json:"tls_files,omitempty"`
// TLS certificate wont be verified.
TlsSkipVerify bool `protobuf:"varint,4,opt,name=tls_skip_verify,json=tlsSkipVerify,proto3" json:"tls_skip_verify,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartActionRequest_MySQLShowTableStatusParams) Reset() {
@@ -3974,10 +4090,7 @@ func (x *StartActionRequest_MySQLShowTableStatusParams) GetTlsSkipVerify() bool
// MySQLShowIndexParams describes MySQL SHOW INDEX action parameters.
type StartActionRequest_MySQLShowIndexParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// DSN for the service. May contain connection (dial) timeout.
Dsn string `protobuf:"bytes,1,opt,name=dsn,proto3" json:"dsn,omitempty"`
Table string `protobuf:"bytes,2,opt,name=table,proto3" json:"table,omitempty"`
@@ -3985,6 +4098,8 @@ type StartActionRequest_MySQLShowIndexParams struct {
TlsFiles *TextFiles `protobuf:"bytes,3,opt,name=tls_files,json=tlsFiles,proto3" json:"tls_files,omitempty"`
// TLS certificate wont be verified.
TlsSkipVerify bool `protobuf:"varint,4,opt,name=tls_skip_verify,json=tlsSkipVerify,proto3" json:"tls_skip_verify,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartActionRequest_MySQLShowIndexParams) Reset() {
@@ -4047,10 +4162,7 @@ func (x *StartActionRequest_MySQLShowIndexParams) GetTlsSkipVerify() bool {
// PostgreSQLShowCreateTableParams describes PostgreSQL SHOW CREATE TABLE action parameters.
type StartActionRequest_PostgreSQLShowCreateTableParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// DSN for the service. May contain connection (dial) timeout.
Dsn string `protobuf:"bytes,1,opt,name=dsn,proto3" json:"dsn,omitempty"`
Table string `protobuf:"bytes,2,opt,name=table,proto3" json:"table,omitempty"`
@@ -4058,6 +4170,8 @@ type StartActionRequest_PostgreSQLShowCreateTableParams struct {
TlsFiles *TextFiles `protobuf:"bytes,3,opt,name=tls_files,json=tlsFiles,proto3" json:"tls_files,omitempty"`
// TLS certificate wont be verified.
TlsSkipVerify bool `protobuf:"varint,4,opt,name=tls_skip_verify,json=tlsSkipVerify,proto3" json:"tls_skip_verify,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartActionRequest_PostgreSQLShowCreateTableParams) Reset() {
@@ -4120,10 +4234,7 @@ func (x *StartActionRequest_PostgreSQLShowCreateTableParams) GetTlsSkipVerify()
// PostgreSQLShowIndexParams describes PostgreSQL SHOW INDEX action parameters.
type StartActionRequest_PostgreSQLShowIndexParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// DSN for the service. May contain connection (dial) timeout.
Dsn string `protobuf:"bytes,1,opt,name=dsn,proto3" json:"dsn,omitempty"`
Table string `protobuf:"bytes,2,opt,name=table,proto3" json:"table,omitempty"`
@@ -4131,6 +4242,8 @@ type StartActionRequest_PostgreSQLShowIndexParams struct {
TlsFiles *TextFiles `protobuf:"bytes,3,opt,name=tls_files,json=tlsFiles,proto3" json:"tls_files,omitempty"`
// TLS certificate wont be verified.
TlsSkipVerify bool `protobuf:"varint,4,opt,name=tls_skip_verify,json=tlsSkipVerify,proto3" json:"tls_skip_verify,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartActionRequest_PostgreSQLShowIndexParams) Reset() {
@@ -4193,16 +4306,15 @@ func (x *StartActionRequest_PostgreSQLShowIndexParams) GetTlsSkipVerify() bool {
// MongoDBExplainParams describes MongoDB EXPLAIN action parameters.
type StartActionRequest_MongoDBExplainParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// DSN for the service. May contain connection (dial) timeout.
// May contain placeholders for file paths in DSN.
Dsn string `protobuf:"bytes,1,opt,name=dsn,proto3" json:"dsn,omitempty"`
Query string `protobuf:"bytes,2,opt,name=query,proto3" json:"query,omitempty"`
// Contains files and their contents which can be used in DSN.
- TextFiles *TextFiles `protobuf:"bytes,3,opt,name=text_files,json=textFiles,proto3" json:"text_files,omitempty"`
+ TextFiles *TextFiles `protobuf:"bytes,3,opt,name=text_files,json=textFiles,proto3" json:"text_files,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartActionRequest_MongoDBExplainParams) Reset() {
@@ -4258,9 +4370,9 @@ func (x *StartActionRequest_MongoDBExplainParams) GetTextFiles() *TextFiles {
// PTSummaryParams describes parameters for PT summary.
type StartActionRequest_PTSummaryParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartActionRequest_PTSummaryParams) Reset() {
@@ -4295,14 +4407,13 @@ func (*StartActionRequest_PTSummaryParams) Descriptor() ([]byte, []int) {
// PTPgSummaryParams describes parameters for PT PG summary.
type StartActionRequest_PTPgSummaryParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"`
+ Port uint32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"`
+ Username string `protobuf:"bytes,3,opt,name=username,proto3" json:"username,omitempty"`
+ Password string `protobuf:"bytes,4,opt,name=password,proto3" json:"password,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"`
- Port uint32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"`
- Username string `protobuf:"bytes,3,opt,name=username,proto3" json:"username,omitempty"`
- Password string `protobuf:"bytes,4,opt,name=password,proto3" json:"password,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *StartActionRequest_PTPgSummaryParams) Reset() {
@@ -4365,14 +4476,13 @@ func (x *StartActionRequest_PTPgSummaryParams) GetPassword() string {
// PTMongoDBSummaryParams describes parameters for PT MongoDB summary.
type StartActionRequest_PTMongoDBSummaryParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"`
+ Port uint32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"`
+ Username string `protobuf:"bytes,3,opt,name=username,proto3" json:"username,omitempty"`
+ Password string `protobuf:"bytes,4,opt,name=password,proto3" json:"password,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"`
- Port uint32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"`
- Username string `protobuf:"bytes,3,opt,name=username,proto3" json:"username,omitempty"`
- Password string `protobuf:"bytes,4,opt,name=password,proto3" json:"password,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *StartActionRequest_PTMongoDBSummaryParams) Reset() {
@@ -4435,15 +4545,14 @@ func (x *StartActionRequest_PTMongoDBSummaryParams) GetPassword() string {
// PTMySQLSummaryParams describes parameters for PT MySQL summary.
type StartActionRequest_PTMySQLSummaryParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"`
+ Port uint32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"`
+ Socket string `protobuf:"bytes,3,opt,name=socket,proto3" json:"socket,omitempty"`
+ Username string `protobuf:"bytes,4,opt,name=username,proto3" json:"username,omitempty"`
+ Password string `protobuf:"bytes,5,opt,name=password,proto3" json:"password,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"`
- Port uint32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"`
- Socket string `protobuf:"bytes,3,opt,name=socket,proto3" json:"socket,omitempty"`
- Username string `protobuf:"bytes,4,opt,name=username,proto3" json:"username,omitempty"`
- Password string `protobuf:"bytes,5,opt,name=password,proto3" json:"password,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *StartActionRequest_PTMySQLSummaryParams) Reset() {
@@ -4513,10 +4622,7 @@ func (x *StartActionRequest_PTMySQLSummaryParams) GetPassword() string {
// MySQLQueryShowParams describes MySQL SHOW query action parameters.
type StartActionRequest_MySQLQueryShowParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// DSN for the service. May contain connection (dial) timeout.
Dsn string `protobuf:"bytes,1,opt,name=dsn,proto3" json:"dsn,omitempty"`
// Query suffix (without leading SHOW).
@@ -4525,6 +4631,8 @@ type StartActionRequest_MySQLQueryShowParams struct {
TlsFiles *TextFiles `protobuf:"bytes,3,opt,name=tls_files,json=tlsFiles,proto3" json:"tls_files,omitempty"`
// TLS certificate wont be verified.
TlsSkipVerify bool `protobuf:"varint,4,opt,name=tls_skip_verify,json=tlsSkipVerify,proto3" json:"tls_skip_verify,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartActionRequest_MySQLQueryShowParams) Reset() {
@@ -4587,10 +4695,7 @@ func (x *StartActionRequest_MySQLQueryShowParams) GetTlsSkipVerify() bool {
// MySQLQuerySelectParams describes MySQL SELECT query action parameters.
type StartActionRequest_MySQLQuerySelectParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// DSN for the service. May contain connection (dial) timeout.
Dsn string `protobuf:"bytes,1,opt,name=dsn,proto3" json:"dsn,omitempty"`
// Query suffix (without leading SELECT).
@@ -4599,6 +4704,8 @@ type StartActionRequest_MySQLQuerySelectParams struct {
TlsFiles *TextFiles `protobuf:"bytes,3,opt,name=tls_files,json=tlsFiles,proto3" json:"tls_files,omitempty"`
// TLS certificate wont be verified.
TlsSkipVerify bool `protobuf:"varint,4,opt,name=tls_skip_verify,json=tlsSkipVerify,proto3" json:"tls_skip_verify,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartActionRequest_MySQLQuerySelectParams) Reset() {
@@ -4661,16 +4768,15 @@ func (x *StartActionRequest_MySQLQuerySelectParams) GetTlsSkipVerify() bool {
// PostgreSQLQueryShowParams describes PostgreSQL SHOW query action parameters.
type StartActionRequest_PostgreSQLQueryShowParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// DSN for the service. May contain connection (dial) timeout.
Dsn string `protobuf:"bytes,1,opt,name=dsn,proto3" json:"dsn,omitempty"`
// Contains files and their contents which can be used in DSN.
TlsFiles *TextFiles `protobuf:"bytes,2,opt,name=tls_files,json=tlsFiles,proto3" json:"tls_files,omitempty"`
// TLS certificate wont be verified.
TlsSkipVerify bool `protobuf:"varint,3,opt,name=tls_skip_verify,json=tlsSkipVerify,proto3" json:"tls_skip_verify,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartActionRequest_PostgreSQLQueryShowParams) Reset() {
@@ -4726,10 +4832,7 @@ func (x *StartActionRequest_PostgreSQLQueryShowParams) GetTlsSkipVerify() bool {
// PostgreSQLQuerySelectParams describes PostgreSQL SELECT query action parameters.
type StartActionRequest_PostgreSQLQuerySelectParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// DSN for the service. May contain connection (dial) timeout.
Dsn string `protobuf:"bytes,1,opt,name=dsn,proto3" json:"dsn,omitempty"`
// Query suffix (without leading SELECT).
@@ -4738,6 +4841,8 @@ type StartActionRequest_PostgreSQLQuerySelectParams struct {
TlsFiles *TextFiles `protobuf:"bytes,3,opt,name=tls_files,json=tlsFiles,proto3" json:"tls_files,omitempty"`
// TLS certificate wont be verified.
TlsSkipVerify bool `protobuf:"varint,4,opt,name=tls_skip_verify,json=tlsSkipVerify,proto3" json:"tls_skip_verify,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartActionRequest_PostgreSQLQuerySelectParams) Reset() {
@@ -4800,15 +4905,14 @@ func (x *StartActionRequest_PostgreSQLQuerySelectParams) GetTlsSkipVerify() bool
// MongoDBQueryGetParameterParams describes MongoDB getParameter query action parameters.
type StartActionRequest_MongoDBQueryGetParameterParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// DSN for the service. May contain connection (dial) timeout.
// May contain placeholders for file paths in DSN.
Dsn string `protobuf:"bytes,1,opt,name=dsn,proto3" json:"dsn,omitempty"`
// Contains files and their contents which can be used in DSN.
- TextFiles *TextFiles `protobuf:"bytes,2,opt,name=text_files,json=textFiles,proto3" json:"text_files,omitempty"`
+ TextFiles *TextFiles `protobuf:"bytes,2,opt,name=text_files,json=textFiles,proto3" json:"text_files,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartActionRequest_MongoDBQueryGetParameterParams) Reset() {
@@ -4857,15 +4961,14 @@ func (x *StartActionRequest_MongoDBQueryGetParameterParams) GetTextFiles() *Text
// MongoDBQueryBuildInfoParams describes MongoDB buildInfo query action parameters.
type StartActionRequest_MongoDBQueryBuildInfoParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// DSN for the service. May contain connection (dial) timeout.
// May contain placeholders for file paths in DSN.
Dsn string `protobuf:"bytes,1,opt,name=dsn,proto3" json:"dsn,omitempty"`
// Contains files and their contents which can be used in DSN.
- TextFiles *TextFiles `protobuf:"bytes,2,opt,name=text_files,json=textFiles,proto3" json:"text_files,omitempty"`
+ TextFiles *TextFiles `protobuf:"bytes,2,opt,name=text_files,json=textFiles,proto3" json:"text_files,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartActionRequest_MongoDBQueryBuildInfoParams) Reset() {
@@ -4914,15 +5017,14 @@ func (x *StartActionRequest_MongoDBQueryBuildInfoParams) GetTextFiles() *TextFil
// MongoDBQueryGetCmdLineOptsParams describes MongoDB getCmdLineOpts query action parameters.
type StartActionRequest_MongoDBQueryGetCmdLineOptsParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// DSN for the service. May contain connection (dial) timeout.
// May contain placeholders for file paths in DSN.
Dsn string `protobuf:"bytes,1,opt,name=dsn,proto3" json:"dsn,omitempty"`
// Contains files and their contents which can be used in DSN.
- TextFiles *TextFiles `protobuf:"bytes,2,opt,name=text_files,json=textFiles,proto3" json:"text_files,omitempty"`
+ TextFiles *TextFiles `protobuf:"bytes,2,opt,name=text_files,json=textFiles,proto3" json:"text_files,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartActionRequest_MongoDBQueryGetCmdLineOptsParams) Reset() {
@@ -4971,15 +5073,14 @@ func (x *StartActionRequest_MongoDBQueryGetCmdLineOptsParams) GetTextFiles() *Te
// MongoDBQueryReplSetGetStatusParams describes MongoDB replSetGetStatus query action parameters.
type StartActionRequest_MongoDBQueryReplSetGetStatusParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// DSN for the service. May contain connection (dial) timeout.
// May contain placeholders for file paths in DSN.
Dsn string `protobuf:"bytes,1,opt,name=dsn,proto3" json:"dsn,omitempty"`
// Contains files and their contents which can be used in DSN.
- TextFiles *TextFiles `protobuf:"bytes,2,opt,name=text_files,json=textFiles,proto3" json:"text_files,omitempty"`
+ TextFiles *TextFiles `protobuf:"bytes,2,opt,name=text_files,json=textFiles,proto3" json:"text_files,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartActionRequest_MongoDBQueryReplSetGetStatusParams) Reset() {
@@ -5028,15 +5129,14 @@ func (x *StartActionRequest_MongoDBQueryReplSetGetStatusParams) GetTextFiles() *
// MongoDBQueryGetDiagnosticDataParams describes MongoDB getDiagnosticData query action parameters.
type StartActionRequest_MongoDBQueryGetDiagnosticDataParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// DSN for the service. May contain connection (dial) timeout.
// May contain placeholders for file paths in DSN.
Dsn string `protobuf:"bytes,1,opt,name=dsn,proto3" json:"dsn,omitempty"`
// Contains files and their contents which can be used in DSN.
- TextFiles *TextFiles `protobuf:"bytes,2,opt,name=text_files,json=textFiles,proto3" json:"text_files,omitempty"`
+ TextFiles *TextFiles `protobuf:"bytes,2,opt,name=text_files,json=textFiles,proto3" json:"text_files,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartActionRequest_MongoDBQueryGetDiagnosticDataParams) Reset() {
@@ -5085,11 +5185,10 @@ func (x *StartActionRequest_MongoDBQueryGetDiagnosticDataParams) GetTextFiles()
// RestartSystemServiceParams describes an action request to restart a systemctl service on a node.
type StartActionRequest_RestartSystemServiceParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
SystemService StartActionRequest_RestartSystemServiceParams_SystemService `protobuf:"varint,1,opt,name=system_service,json=systemService,proto3,enum=agent.v1.StartActionRequest_RestartSystemServiceParams_SystemService" json:"system_service,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartActionRequest_RestartSystemServiceParams) Reset() {
@@ -5131,12 +5230,11 @@ func (x *StartActionRequest_RestartSystemServiceParams) GetSystemService() Start
// Stats contains various Service statistics.
type CheckConnectionResponse_Stats struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// A number of tables, 0 if unknown.
- TableCount int32 `protobuf:"varint,1,opt,name=table_count,json=tableCount,proto3" json:"table_count,omitempty"`
+ TableCount int32 `protobuf:"varint,1,opt,name=table_count,json=tableCount,proto3" json:"table_count,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *CheckConnectionResponse_Stats) Reset() {
@@ -5178,10 +5276,7 @@ func (x *CheckConnectionResponse_Stats) GetTableCount() int32 {
// MySQLBackup is job for backup MySQL service.
type StartJobRequest_MySQLBackup struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Database user;
User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
// Database password.
@@ -5198,10 +5293,12 @@ type StartJobRequest_MySQLBackup struct {
Folder string `protobuf:"bytes,7,opt,name=folder,proto3" json:"folder,omitempty"`
// Backup target location.
//
- // Types that are assignable to LocationConfig:
+ // Types that are valid to be assigned to LocationConfig:
//
// *StartJobRequest_MySQLBackup_S3Config
LocationConfig isStartJobRequest_MySQLBackup_LocationConfig `protobuf_oneof:"location_config"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartJobRequest_MySQLBackup) Reset() {
@@ -5283,16 +5380,18 @@ func (x *StartJobRequest_MySQLBackup) GetFolder() string {
return ""
}
-func (m *StartJobRequest_MySQLBackup) GetLocationConfig() isStartJobRequest_MySQLBackup_LocationConfig {
- if m != nil {
- return m.LocationConfig
+func (x *StartJobRequest_MySQLBackup) GetLocationConfig() isStartJobRequest_MySQLBackup_LocationConfig {
+ if x != nil {
+ return x.LocationConfig
}
return nil
}
func (x *StartJobRequest_MySQLBackup) GetS3Config() *S3LocationConfig {
- if x, ok := x.GetLocationConfig().(*StartJobRequest_MySQLBackup_S3Config); ok {
- return x.S3Config
+ if x != nil {
+ if x, ok := x.LocationConfig.(*StartJobRequest_MySQLBackup_S3Config); ok {
+ return x.S3Config
+ }
}
return nil
}
@@ -5309,10 +5408,7 @@ func (*StartJobRequest_MySQLBackup_S3Config) isStartJobRequest_MySQLBackup_Locat
// MySQLRestoreBackup is job for MySQL restore backup service.
type StartJobRequest_MySQLRestoreBackup struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Service identifier where the backup should be restored.
ServiceId string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
// Backup name.
@@ -5321,10 +5417,12 @@ type StartJobRequest_MySQLRestoreBackup struct {
Folder string `protobuf:"bytes,3,opt,name=folder,proto3" json:"folder,omitempty"`
// Where backup is stored.
//
- // Types that are assignable to LocationConfig:
+ // Types that are valid to be assigned to LocationConfig:
//
// *StartJobRequest_MySQLRestoreBackup_S3Config
LocationConfig isStartJobRequest_MySQLRestoreBackup_LocationConfig `protobuf_oneof:"location_config"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartJobRequest_MySQLRestoreBackup) Reset() {
@@ -5378,16 +5476,18 @@ func (x *StartJobRequest_MySQLRestoreBackup) GetFolder() string {
return ""
}
-func (m *StartJobRequest_MySQLRestoreBackup) GetLocationConfig() isStartJobRequest_MySQLRestoreBackup_LocationConfig {
- if m != nil {
- return m.LocationConfig
+func (x *StartJobRequest_MySQLRestoreBackup) GetLocationConfig() isStartJobRequest_MySQLRestoreBackup_LocationConfig {
+ if x != nil {
+ return x.LocationConfig
}
return nil
}
func (x *StartJobRequest_MySQLRestoreBackup) GetS3Config() *S3LocationConfig {
- if x, ok := x.GetLocationConfig().(*StartJobRequest_MySQLRestoreBackup_S3Config); ok {
- return x.S3Config
+ if x != nil {
+ if x, ok := x.LocationConfig.(*StartJobRequest_MySQLRestoreBackup_S3Config); ok {
+ return x.S3Config
+ }
}
return nil
}
@@ -5405,10 +5505,7 @@ func (*StartJobRequest_MySQLRestoreBackup_S3Config) isStartJobRequest_MySQLResto
// MongoDBBackup is job for backup MongoDB service.
type StartJobRequest_MongoDBBackup struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// DSN for the MongoDB service. May contain connection (dial) timeout.
// May contain placeholders for file paths in DSN.
Dsn string `protobuf:"bytes,1,opt,name=dsn,proto3" json:"dsn,omitempty"`
@@ -5424,11 +5521,13 @@ type StartJobRequest_MongoDBBackup struct {
DataModel v11.DataModel `protobuf:"varint,6,opt,name=data_model,json=dataModel,proto3,enum=backup.v1.DataModel" json:"data_model,omitempty"`
// Backup target location.
//
- // Types that are assignable to LocationConfig:
+ // Types that are valid to be assigned to LocationConfig:
//
// *StartJobRequest_MongoDBBackup_S3Config
// *StartJobRequest_MongoDBBackup_FilesystemConfig
LocationConfig isStartJobRequest_MongoDBBackup_LocationConfig `protobuf_oneof:"location_config"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartJobRequest_MongoDBBackup) Reset() {
@@ -5503,23 +5602,27 @@ func (x *StartJobRequest_MongoDBBackup) GetDataModel() v11.DataModel {
return v11.DataModel(0)
}
-func (m *StartJobRequest_MongoDBBackup) GetLocationConfig() isStartJobRequest_MongoDBBackup_LocationConfig {
- if m != nil {
- return m.LocationConfig
+func (x *StartJobRequest_MongoDBBackup) GetLocationConfig() isStartJobRequest_MongoDBBackup_LocationConfig {
+ if x != nil {
+ return x.LocationConfig
}
return nil
}
func (x *StartJobRequest_MongoDBBackup) GetS3Config() *S3LocationConfig {
- if x, ok := x.GetLocationConfig().(*StartJobRequest_MongoDBBackup_S3Config); ok {
- return x.S3Config
+ if x != nil {
+ if x, ok := x.LocationConfig.(*StartJobRequest_MongoDBBackup_S3Config); ok {
+ return x.S3Config
+ }
}
return nil
}
func (x *StartJobRequest_MongoDBBackup) GetFilesystemConfig() *FilesystemLocationConfig {
- if x, ok := x.GetLocationConfig().(*StartJobRequest_MongoDBBackup_FilesystemConfig); ok {
- return x.FilesystemConfig
+ if x != nil {
+ if x, ok := x.LocationConfig.(*StartJobRequest_MongoDBBackup_FilesystemConfig); ok {
+ return x.FilesystemConfig
+ }
}
return nil
}
@@ -5543,10 +5646,7 @@ func (*StartJobRequest_MongoDBBackup_FilesystemConfig) isStartJobRequest_MongoDB
// MongoDBRestoreBackup is job for MongoDB restore backup service.
type StartJobRequest_MongoDBRestoreBackup struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// DSN for the MongoDB service. May contain connection (dial) timeout.
// May contain placeholders for file paths in DSN.
Dsn string `protobuf:"bytes,1,opt,name=dsn,proto3" json:"dsn,omitempty"`
@@ -5562,11 +5662,13 @@ type StartJobRequest_MongoDBRestoreBackup struct {
PitrTimestamp *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=pitr_timestamp,json=pitrTimestamp,proto3" json:"pitr_timestamp,omitempty"`
// Where backup is stored.
//
- // Types that are assignable to LocationConfig:
+ // Types that are valid to be assigned to LocationConfig:
//
// *StartJobRequest_MongoDBRestoreBackup_S3Config
// *StartJobRequest_MongoDBRestoreBackup_FilesystemConfig
LocationConfig isStartJobRequest_MongoDBRestoreBackup_LocationConfig `protobuf_oneof:"location_config"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartJobRequest_MongoDBRestoreBackup) Reset() {
@@ -5641,23 +5743,27 @@ func (x *StartJobRequest_MongoDBRestoreBackup) GetPitrTimestamp() *timestamppb.T
return nil
}
-func (m *StartJobRequest_MongoDBRestoreBackup) GetLocationConfig() isStartJobRequest_MongoDBRestoreBackup_LocationConfig {
- if m != nil {
- return m.LocationConfig
+func (x *StartJobRequest_MongoDBRestoreBackup) GetLocationConfig() isStartJobRequest_MongoDBRestoreBackup_LocationConfig {
+ if x != nil {
+ return x.LocationConfig
}
return nil
}
func (x *StartJobRequest_MongoDBRestoreBackup) GetS3Config() *S3LocationConfig {
- if x, ok := x.GetLocationConfig().(*StartJobRequest_MongoDBRestoreBackup_S3Config); ok {
- return x.S3Config
+ if x != nil {
+ if x, ok := x.LocationConfig.(*StartJobRequest_MongoDBRestoreBackup_S3Config); ok {
+ return x.S3Config
+ }
}
return nil
}
func (x *StartJobRequest_MongoDBRestoreBackup) GetFilesystemConfig() *FilesystemLocationConfig {
- if x, ok := x.GetLocationConfig().(*StartJobRequest_MongoDBRestoreBackup_FilesystemConfig); ok {
- return x.FilesystemConfig
+ if x != nil {
+ if x, ok := x.LocationConfig.(*StartJobRequest_MongoDBRestoreBackup_FilesystemConfig); ok {
+ return x.FilesystemConfig
+ }
}
return nil
}
@@ -5682,11 +5788,10 @@ func (*StartJobRequest_MongoDBRestoreBackup_FilesystemConfig) isStartJobRequest_
// Error contains job error message.
type JobResult_Error struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *JobResult_Error) Reset() {
@@ -5728,13 +5833,12 @@ func (x *JobResult_Error) GetMessage() string {
// MongoDBBackup contains result for MongoDB backup job.
type JobResult_MongoDBBackup struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- IsShardedCluster bool `protobuf:"varint,1,opt,name=is_sharded_cluster,json=isShardedCluster,proto3" json:"is_sharded_cluster,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ IsShardedCluster bool `protobuf:"varint,1,opt,name=is_sharded_cluster,json=isShardedCluster,proto3" json:"is_sharded_cluster,omitempty"`
// Contains additional data describing artifact.
- Metadata *v11.Metadata `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"`
+ Metadata *v11.Metadata `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *JobResult_MongoDBBackup) Reset() {
@@ -5783,12 +5887,11 @@ func (x *JobResult_MongoDBBackup) GetMetadata() *v11.Metadata {
// MySQLBackup contains result for MySQL backup job.
type JobResult_MySQLBackup struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Contains additional data describing artifact.
- Metadata *v11.Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
+ Metadata *v11.Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *JobResult_MySQLBackup) Reset() {
@@ -5830,9 +5933,9 @@ func (x *JobResult_MySQLBackup) GetMetadata() *v11.Metadata {
// MySQLRestoreBackup contains result for MySQL restore backup job.
type JobResult_MySQLRestoreBackup struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *JobResult_MySQLRestoreBackup) Reset() {
@@ -5867,9 +5970,9 @@ func (*JobResult_MySQLRestoreBackup) Descriptor() ([]byte, []int) {
// MongoDBRestoreBackup contains result for MongoDB restore backup job.
type JobResult_MongoDBRestoreBackup struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *JobResult_MongoDBRestoreBackup) Reset() {
@@ -5904,9 +6007,9 @@ func (*JobResult_MongoDBRestoreBackup) Descriptor() ([]byte, []int) {
// MySQLBackup contains backup job status update.
type JobProgress_MySQLBackup struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *JobProgress_MySQLBackup) Reset() {
@@ -5941,9 +6044,9 @@ func (*JobProgress_MySQLBackup) Descriptor() ([]byte, []int) {
// MySQLRestoreBackup contains restore backup job status update.
type JobProgress_MySQLRestoreBackup struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *JobProgress_MySQLRestoreBackup) Reset() {
@@ -5978,13 +6081,12 @@ func (*JobProgress_MySQLRestoreBackup) Descriptor() ([]byte, []int) {
// Logs contains generic logs from job.
type JobProgress_Logs struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ ChunkId uint32 `protobuf:"varint,1,opt,name=chunk_id,json=chunkId,proto3" json:"chunk_id,omitempty"`
+ Data string `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
+ Done bool `protobuf:"varint,3,opt,name=done,proto3" json:"done,omitempty"`
unknownFields protoimpl.UnknownFields
-
- ChunkId uint32 `protobuf:"varint,1,opt,name=chunk_id,json=chunkId,proto3" json:"chunk_id,omitempty"`
- Data string `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
- Done bool `protobuf:"varint,3,opt,name=done,proto3" json:"done,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *JobProgress_Logs) Reset() {
@@ -6040,9 +6142,9 @@ func (x *JobProgress_Logs) GetDone() bool {
// MySQLd is used for mysqld binary version retrieving using `mysqld --version`.
type GetVersionsRequest_MySQLd struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GetVersionsRequest_MySQLd) Reset() {
@@ -6077,9 +6179,9 @@ func (*GetVersionsRequest_MySQLd) Descriptor() ([]byte, []int) {
// Xtrabackup is used for xtrabackup binary version retrieving.
type GetVersionsRequest_Xtrabackup struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GetVersionsRequest_Xtrabackup) Reset() {
@@ -6114,9 +6216,9 @@ func (*GetVersionsRequest_Xtrabackup) Descriptor() ([]byte, []int) {
// Xbcloud is used for xbcloud binary version retrieving.
type GetVersionsRequest_Xbcloud struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GetVersionsRequest_Xbcloud) Reset() {
@@ -6151,9 +6253,9 @@ func (*GetVersionsRequest_Xbcloud) Descriptor() ([]byte, []int) {
// Qpress is used for qpress binary version retrieving.
type GetVersionsRequest_Qpress struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GetVersionsRequest_Qpress) Reset() {
@@ -6188,9 +6290,9 @@ func (*GetVersionsRequest_Qpress) Descriptor() ([]byte, []int) {
// MongoDB is used for mongod binary version retrieving.
type GetVersionsRequest_MongoDB struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GetVersionsRequest_MongoDB) Reset() {
@@ -6225,9 +6327,9 @@ func (*GetVersionsRequest_MongoDB) Descriptor() ([]byte, []int) {
// PBM is used for pbm (Percona Backup for MongoDB) binary version retrieving.
type GetVersionsRequest_PBM struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GetVersionsRequest_PBM) Reset() {
@@ -6262,11 +6364,8 @@ func (*GetVersionsRequest_PBM) Descriptor() ([]byte, []int) {
// Software is used to select software for which retrieve version.
type GetVersionsRequest_Software struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Types that are assignable to Software:
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Types that are valid to be assigned to Software:
//
// *GetVersionsRequest_Software_Mysqld
// *GetVersionsRequest_Software_Xtrabackup
@@ -6274,7 +6373,9 @@ type GetVersionsRequest_Software struct {
// *GetVersionsRequest_Software_Qpress
// *GetVersionsRequest_Software_Mongod
// *GetVersionsRequest_Software_Pbm
- Software isGetVersionsRequest_Software_Software `protobuf_oneof:"software"`
+ Software isGetVersionsRequest_Software_Software `protobuf_oneof:"software"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GetVersionsRequest_Software) Reset() {
@@ -6307,51 +6408,63 @@ func (*GetVersionsRequest_Software) Descriptor() ([]byte, []int) {
return file_agent_v1_agent_proto_rawDescGZIP(), []int{38, 6}
}
-func (m *GetVersionsRequest_Software) GetSoftware() isGetVersionsRequest_Software_Software {
- if m != nil {
- return m.Software
+func (x *GetVersionsRequest_Software) GetSoftware() isGetVersionsRequest_Software_Software {
+ if x != nil {
+ return x.Software
}
return nil
}
func (x *GetVersionsRequest_Software) GetMysqld() *GetVersionsRequest_MySQLd {
- if x, ok := x.GetSoftware().(*GetVersionsRequest_Software_Mysqld); ok {
- return x.Mysqld
+ if x != nil {
+ if x, ok := x.Software.(*GetVersionsRequest_Software_Mysqld); ok {
+ return x.Mysqld
+ }
}
return nil
}
func (x *GetVersionsRequest_Software) GetXtrabackup() *GetVersionsRequest_Xtrabackup {
- if x, ok := x.GetSoftware().(*GetVersionsRequest_Software_Xtrabackup); ok {
- return x.Xtrabackup
+ if x != nil {
+ if x, ok := x.Software.(*GetVersionsRequest_Software_Xtrabackup); ok {
+ return x.Xtrabackup
+ }
}
return nil
}
func (x *GetVersionsRequest_Software) GetXbcloud() *GetVersionsRequest_Xbcloud {
- if x, ok := x.GetSoftware().(*GetVersionsRequest_Software_Xbcloud); ok {
- return x.Xbcloud
+ if x != nil {
+ if x, ok := x.Software.(*GetVersionsRequest_Software_Xbcloud); ok {
+ return x.Xbcloud
+ }
}
return nil
}
func (x *GetVersionsRequest_Software) GetQpress() *GetVersionsRequest_Qpress {
- if x, ok := x.GetSoftware().(*GetVersionsRequest_Software_Qpress); ok {
- return x.Qpress
+ if x != nil {
+ if x, ok := x.Software.(*GetVersionsRequest_Software_Qpress); ok {
+ return x.Qpress
+ }
}
return nil
}
func (x *GetVersionsRequest_Software) GetMongod() *GetVersionsRequest_MongoDB {
- if x, ok := x.GetSoftware().(*GetVersionsRequest_Software_Mongod); ok {
- return x.Mongod
+ if x != nil {
+ if x, ok := x.Software.(*GetVersionsRequest_Software_Mongod); ok {
+ return x.Mongod
+ }
}
return nil
}
func (x *GetVersionsRequest_Software) GetPbm() *GetVersionsRequest_PBM {
- if x, ok := x.GetSoftware().(*GetVersionsRequest_Software_Pbm); ok {
- return x.Pbm
+ if x != nil {
+ if x, ok := x.Software.(*GetVersionsRequest_Software_Pbm); ok {
+ return x.Pbm
+ }
}
return nil
}
@@ -6399,12 +6512,11 @@ func (*GetVersionsRequest_Software_Pbm) isGetVersionsRequest_Software_Software()
// Version contains the version field of the requested software and
// the error field which is set in case of version retrieving error.
type GetVersionsResponse_Version struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
+ Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
- Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *GetVersionsResponse_Version) Reset() {
@@ -6453,7 +6565,7 @@ func (x *GetVersionsResponse_Version) GetError() string {
var File_agent_v1_agent_proto protoreflect.FileDescriptor
-var file_agent_v1_agent_proto_rawDesc = []byte{
+var file_agent_v1_agent_proto_rawDesc = string([]byte{
0x0a, 0x14, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31,
0x1a, 0x18, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6c, 0x6c, 0x65,
@@ -7499,16 +7611,16 @@ var file_agent_v1_agent_proto_rawDesc = []byte{
0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x09,
0x41, 0x67, 0x65, 0x6e, 0x74, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x33,
-}
+})
var (
file_agent_v1_agent_proto_rawDescOnce sync.Once
- file_agent_v1_agent_proto_rawDescData = file_agent_v1_agent_proto_rawDesc
+ file_agent_v1_agent_proto_rawDescData []byte
)
func file_agent_v1_agent_proto_rawDescGZIP() []byte {
file_agent_v1_agent_proto_rawDescOnce.Do(func() {
- file_agent_v1_agent_proto_rawDescData = protoimpl.X.CompressGZIP(file_agent_v1_agent_proto_rawDescData)
+ file_agent_v1_agent_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_agent_v1_agent_proto_rawDesc), len(file_agent_v1_agent_proto_rawDesc)))
})
return file_agent_v1_agent_proto_rawDescData
}
@@ -7897,7 +8009,7 @@ func file_agent_v1_agent_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_agent_v1_agent_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_agent_v1_agent_proto_rawDesc), len(file_agent_v1_agent_proto_rawDesc)),
NumEnums: 2,
NumMessages: 91,
NumExtensions: 0,
@@ -7909,7 +8021,6 @@ func file_agent_v1_agent_proto_init() {
MessageInfos: file_agent_v1_agent_proto_msgTypes,
}.Build()
File_agent_v1_agent_proto = out.File
- file_agent_v1_agent_proto_rawDesc = nil
file_agent_v1_agent_proto_goTypes = nil
file_agent_v1_agent_proto_depIdxs = nil
}
diff --git a/api/agent/v1/agent.pb.validate.go b/api/agent/v1/agent.pb.validate.go
index a6fb4bda41..53247ab091 100644
--- a/api/agent/v1/agent.pb.validate.go
+++ b/api/agent/v1/agent.pb.validate.go
@@ -83,7 +83,7 @@ type TextFilesMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m TextFilesMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -181,7 +181,7 @@ type PingMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m PingMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -308,7 +308,7 @@ type PongMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m PongMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -442,7 +442,7 @@ type QANCollectRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m QANCollectRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -544,7 +544,7 @@ type QANCollectResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m QANCollectResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -656,7 +656,7 @@ type StateChangedRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StateChangedRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -758,7 +758,7 @@ type StateChangedResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StateChangedResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -952,7 +952,7 @@ type SetStateRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m SetStateRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1052,7 +1052,7 @@ type SetStateResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m SetStateResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1393,7 +1393,7 @@ type QueryActionValueMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m QueryActionValueMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1527,7 +1527,7 @@ type QueryActionSliceMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m QueryActionSliceMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1673,7 +1673,7 @@ type QueryActionMapMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m QueryActionMapMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1777,7 +1777,7 @@ type QueryActionBinaryMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m QueryActionBinaryMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1947,7 +1947,7 @@ type QueryActionResultMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m QueryActionResultMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2946,7 +2946,7 @@ type StartActionRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartActionRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -3048,7 +3048,7 @@ type StartActionResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartActionResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -3152,7 +3152,7 @@ type StopActionRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StopActionRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -3254,7 +3254,7 @@ type StopActionResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StopActionResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -3364,7 +3364,7 @@ type ActionResultRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ActionResultRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -3466,7 +3466,7 @@ type ActionResultResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ActionResultResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -3601,7 +3601,7 @@ type PBMSwitchPITRRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m PBMSwitchPITRRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -3705,7 +3705,7 @@ type PBMSwitchPITRResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m PBMSwitchPITRResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -3811,7 +3811,7 @@ type AgentLogsRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AgentLogsRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -3913,7 +3913,7 @@ type AgentLogsResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AgentLogsResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -4079,7 +4079,7 @@ type CheckConnectionRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m CheckConnectionRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -4183,7 +4183,7 @@ type CheckConnectionResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m CheckConnectionResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -4349,7 +4349,7 @@ type ServiceInfoRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ServiceInfoRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -4461,7 +4461,7 @@ type ServiceInfoResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ServiceInfoResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -4565,7 +4565,7 @@ type JobStatusRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m JobStatusRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -4667,7 +4667,7 @@ type JobStatusResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m JobStatusResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -4779,7 +4779,7 @@ type S3LocationConfigMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m S3LocationConfigMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -4881,7 +4881,7 @@ type FilesystemLocationConfigMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m FilesystemLocationConfigMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -5183,7 +5183,7 @@ type StartJobRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartJobRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -5285,7 +5285,7 @@ type StartJobResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartJobResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -5387,7 +5387,7 @@ type StopJobRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StopJobRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -5487,7 +5487,7 @@ type StopJobResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StopJobResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -5827,7 +5827,7 @@ type JobResultMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m JobResultMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -6085,7 +6085,7 @@ type JobProgressMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m JobProgressMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -6219,7 +6219,7 @@ type GetVersionsRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetVersionsRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -6355,7 +6355,7 @@ type GetVersionsResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetVersionsResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -7230,7 +7230,7 @@ type AgentMessageMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AgentMessageMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -8022,7 +8022,7 @@ type ServerMessageMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ServerMessageMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -8130,7 +8130,7 @@ type SetStateRequest_AgentProcessMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m SetStateRequest_AgentProcessMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -8278,7 +8278,7 @@ type SetStateRequest_BuiltinAgentMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m SetStateRequest_BuiltinAgentMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -8422,7 +8422,7 @@ type StartActionRequest_MySQLExplainParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartActionRequest_MySQLExplainParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -8563,7 +8563,7 @@ type StartActionRequest_MySQLShowCreateTableParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartActionRequest_MySQLShowCreateTableParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -8707,7 +8707,7 @@ type StartActionRequest_MySQLShowTableStatusParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartActionRequest_MySQLShowTableStatusParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -8851,7 +8851,7 @@ type StartActionRequest_MySQLShowIndexParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartActionRequest_MySQLShowIndexParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -8993,7 +8993,7 @@ type StartActionRequest_PostgreSQLShowCreateTableParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartActionRequest_PostgreSQLShowCreateTableParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -9141,7 +9141,7 @@ type StartActionRequest_PostgreSQLShowIndexParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartActionRequest_PostgreSQLShowIndexParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -9281,7 +9281,7 @@ type StartActionRequest_MongoDBExplainParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartActionRequest_MongoDBExplainParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -9386,7 +9386,7 @@ type StartActionRequest_PTSummaryParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartActionRequest_PTSummaryParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -9499,7 +9499,7 @@ type StartActionRequest_PTPgSummaryParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartActionRequest_PTPgSummaryParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -9613,7 +9613,7 @@ type StartActionRequest_PTMongoDBSummaryParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartActionRequest_PTMongoDBSummaryParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -9729,7 +9729,7 @@ type StartActionRequest_PTMySQLSummaryParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartActionRequest_PTMySQLSummaryParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -9870,7 +9870,7 @@ type StartActionRequest_MySQLQueryShowParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartActionRequest_MySQLQueryShowParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -10011,7 +10011,7 @@ type StartActionRequest_MySQLQuerySelectParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartActionRequest_MySQLQuerySelectParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -10150,7 +10150,7 @@ type StartActionRequest_PostgreSQLQueryShowParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartActionRequest_PostgreSQLQueryShowParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -10292,7 +10292,7 @@ type StartActionRequest_PostgreSQLQuerySelectParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartActionRequest_PostgreSQLQuerySelectParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -10432,7 +10432,7 @@ type StartActionRequest_MongoDBQueryGetParameterParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartActionRequest_MongoDBQueryGetParameterParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -10576,7 +10576,7 @@ type StartActionRequest_MongoDBQueryBuildInfoParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartActionRequest_MongoDBQueryBuildInfoParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -10717,7 +10717,7 @@ type StartActionRequest_MongoDBQueryGetCmdLineOptsParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartActionRequest_MongoDBQueryGetCmdLineOptsParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -10862,7 +10862,7 @@ type StartActionRequest_MongoDBQueryReplSetGetStatusParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartActionRequest_MongoDBQueryReplSetGetStatusParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -11011,7 +11011,7 @@ type StartActionRequest_MongoDBQueryGetDiagnosticDataParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartActionRequest_MongoDBQueryGetDiagnosticDataParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -11130,7 +11130,7 @@ type StartActionRequest_RestartSystemServiceParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartActionRequest_RestartSystemServiceParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -11238,7 +11238,7 @@ type CheckConnectionResponse_StatsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m CheckConnectionResponse_StatsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -11401,7 +11401,7 @@ type StartJobRequest_MySQLBackupMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartJobRequest_MySQLBackupMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -11558,7 +11558,7 @@ type StartJobRequest_MySQLRestoreBackupMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartJobRequest_MySQLRestoreBackupMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -11787,7 +11787,7 @@ type StartJobRequest_MongoDBBackupMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartJobRequest_MongoDBBackupMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -12072,7 +12072,7 @@ type StartJobRequest_MongoDBRestoreBackupMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartJobRequest_MongoDBRestoreBackupMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -12177,7 +12177,7 @@ type JobResult_ErrorMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m JobResult_ErrorMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -12308,7 +12308,7 @@ type JobResult_MongoDBBackupMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m JobResult_MongoDBBackupMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -12439,7 +12439,7 @@ type JobResult_MySQLBackupMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m JobResult_MySQLBackupMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -12541,7 +12541,7 @@ type JobResult_MySQLRestoreBackupMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m JobResult_MySQLRestoreBackupMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -12644,7 +12644,7 @@ type JobResult_MongoDBRestoreBackupMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m JobResult_MongoDBRestoreBackupMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -12747,7 +12747,7 @@ type JobProgress_MySQLBackupMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m JobProgress_MySQLBackupMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -12849,7 +12849,7 @@ type JobProgress_MySQLRestoreBackupMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m JobProgress_MySQLRestoreBackupMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -12958,7 +12958,7 @@ type JobProgress_LogsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m JobProgress_LogsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -13058,7 +13058,7 @@ type GetVersionsRequest_MySQLdMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetVersionsRequest_MySQLdMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -13160,7 +13160,7 @@ type GetVersionsRequest_XtrabackupMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetVersionsRequest_XtrabackupMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -13263,7 +13263,7 @@ type GetVersionsRequest_XbcloudMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetVersionsRequest_XbcloudMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -13365,7 +13365,7 @@ type GetVersionsRequest_QpressMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetVersionsRequest_QpressMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -13467,7 +13467,7 @@ type GetVersionsRequest_MongoDBMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetVersionsRequest_MongoDBMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -13569,7 +13569,7 @@ type GetVersionsRequest_PBMMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetVersionsRequest_PBMMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -13922,7 +13922,7 @@ type GetVersionsRequest_SoftwareMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetVersionsRequest_SoftwareMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -14029,7 +14029,7 @@ type GetVersionsResponse_VersionMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetVersionsResponse_VersionMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/api/agent/v1/collector.pb.go b/api/agent/v1/collector.pb.go
index 48b83564be..ef89017ff4 100644
--- a/api/agent/v1/collector.pb.go
+++ b/api/agent/v1/collector.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: agent/v1/collector.proto
@@ -9,6 +9,7 @@ package agentv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
@@ -82,14 +83,13 @@ func (ExampleType) EnumDescriptor() ([]byte, []int) {
// MetricsBucket is aggregated message created by pmm-agent.
// Contains information about one query selected in defined way from query class in specific period of time.
type MetricsBucket struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Common *MetricsBucket_Common `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"`
+ Mysql *MetricsBucket_MySQL `protobuf:"bytes,2,opt,name=mysql,proto3" json:"mysql,omitempty"`
+ Mongodb *MetricsBucket_MongoDB `protobuf:"bytes,3,opt,name=mongodb,proto3" json:"mongodb,omitempty"`
+ Postgresql *MetricsBucket_PostgreSQL `protobuf:"bytes,4,opt,name=postgresql,proto3" json:"postgresql,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Common *MetricsBucket_Common `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"`
- Mysql *MetricsBucket_MySQL `protobuf:"bytes,2,opt,name=mysql,proto3" json:"mysql,omitempty"`
- Mongodb *MetricsBucket_MongoDB `protobuf:"bytes,3,opt,name=mongodb,proto3" json:"mongodb,omitempty"`
- Postgresql *MetricsBucket_PostgreSQL `protobuf:"bytes,4,opt,name=postgresql,proto3" json:"postgresql,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *MetricsBucket) Reset() {
@@ -152,12 +152,11 @@ func (x *MetricsBucket) GetPostgresql() *MetricsBucket_PostgreSQL {
// HistogramItem represents one item in histogram.
type HistogramItem struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Range string `protobuf:"bytes,1,opt,name=range,proto3" json:"range,omitempty"`
+ Frequency uint32 `protobuf:"varint,2,opt,name=frequency,proto3" json:"frequency,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Range string `protobuf:"bytes,1,opt,name=range,proto3" json:"range,omitempty"`
- Frequency uint32 `protobuf:"varint,2,opt,name=frequency,proto3" json:"frequency,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *HistogramItem) Reset() {
@@ -206,10 +205,7 @@ func (x *HistogramItem) GetFrequency() uint32 {
// Common contains common fields for all DBs.
type MetricsBucket_Common struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// md5 of digest_text/fingerprint.
Queryid string `protobuf:"bytes,1,opt,name=queryid,proto3" json:"queryid,omitempty"`
// contains fingerprint prepared by sql parser, which can be different than fingerprint.
@@ -217,7 +213,7 @@ type MetricsBucket_Common struct {
// ammount of variables in query.
PlaceholdersCount uint32 `protobuf:"varint,26,opt,name=placeholders_count,json=placeholdersCount,proto3" json:"placeholders_count,omitempty"`
// List of keys and values of comments.
- Comments map[string]string `protobuf:"bytes,27,rep,name=comments,proto3" json:"comments,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ Comments map[string]string `protobuf:"bytes,27,rep,name=comments,proto3" json:"comments,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// digest_text - query signature. Query without values.
Fingerprint string `protobuf:"bytes,2,opt,name=fingerprint,proto3" json:"fingerprint,omitempty"`
// Dimension Group.
@@ -252,7 +248,7 @@ type MetricsBucket_Common struct {
// How many queries was with error in bucket.
NumQueriesWithErrors float32 `protobuf:"fixed32,16,opt,name=num_queries_with_errors,json=numQueriesWithErrors,proto3" json:"num_queries_with_errors,omitempty"`
// List of errors: {code: count}.
- Errors map[uint64]uint64 `protobuf:"bytes,17,rep,name=errors,proto3" json:"errors,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
+ Errors map[uint64]uint64 `protobuf:"bytes,17,rep,name=errors,proto3" json:"errors,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"`
// Amount queries in this bucket.
NumQueries float32 `protobuf:"fixed32,18,opt,name=num_queries,json=numQueries,proto3" json:"num_queries,omitempty"`
// How many times query_time was found.
@@ -265,6 +261,8 @@ type MetricsBucket_Common struct {
MQueryTimeMax float32 `protobuf:"fixed32,22,opt,name=m_query_time_max,json=mQueryTimeMax,proto3" json:"m_query_time_max,omitempty"`
// 99 percentile of value of query_time in bucket.
MQueryTimeP99 float32 `protobuf:"fixed32,23,opt,name=m_query_time_p99,json=mQueryTimeP99,proto3" json:"m_query_time_p99,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *MetricsBucket_Common) Reset() {
@@ -481,11 +479,8 @@ func (x *MetricsBucket_Common) GetMQueryTimeP99() float32 {
// MySQL contains metrics for MySQL.
type MetricsBucket_MySQL struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- MLockTimeCnt float32 `protobuf:"fixed32,1,opt,name=m_lock_time_cnt,json=mLockTimeCnt,proto3" json:"m_lock_time_cnt,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ MLockTimeCnt float32 `protobuf:"fixed32,1,opt,name=m_lock_time_cnt,json=mLockTimeCnt,proto3" json:"m_lock_time_cnt,omitempty"`
// The time to acquire locks in seconds.
MLockTimeSum float32 `protobuf:"fixed32,2,opt,name=m_lock_time_sum,json=mLockTimeSum,proto3" json:"m_lock_time_sum,omitempty"`
MLockTimeMin float32 `protobuf:"fixed32,3,opt,name=m_lock_time_min,json=mLockTimeMin,proto3" json:"m_lock_time_min,omitempty"`
@@ -635,6 +630,8 @@ type MetricsBucket_MySQL struct {
MNoGoodIndexUsedCnt float32 `protobuf:"fixed32,114,opt,name=m_no_good_index_used_cnt,json=mNoGoodIndexUsedCnt,proto3" json:"m_no_good_index_used_cnt,omitempty"`
// The number of queries without good index.
MNoGoodIndexUsedSum float32 `protobuf:"fixed32,115,opt,name=m_no_good_index_used_sum,json=mNoGoodIndexUsedSum,proto3" json:"m_no_good_index_used_sum,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *MetricsBucket_MySQL) Reset() {
@@ -1474,11 +1471,8 @@ func (x *MetricsBucket_MySQL) GetMNoGoodIndexUsedSum() float32 {
// MongoDB contains metrics for Mongo DB.
type MetricsBucket_MongoDB struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- MDocsReturnedCnt float32 `protobuf:"fixed32,1,opt,name=m_docs_returned_cnt,json=mDocsReturnedCnt,proto3" json:"m_docs_returned_cnt,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ MDocsReturnedCnt float32 `protobuf:"fixed32,1,opt,name=m_docs_returned_cnt,json=mDocsReturnedCnt,proto3" json:"m_docs_returned_cnt,omitempty"`
// The number of returned documents.
MDocsReturnedSum float32 `protobuf:"fixed32,2,opt,name=m_docs_returned_sum,json=mDocsReturnedSum,proto3" json:"m_docs_returned_sum,omitempty"`
MDocsReturnedMin float32 `protobuf:"fixed32,3,opt,name=m_docs_returned_min,json=mDocsReturnedMin,proto3" json:"m_docs_returned_min,omitempty"`
@@ -1496,6 +1490,13 @@ type MetricsBucket_MongoDB struct {
MDocsScannedMin float32 `protobuf:"fixed32,13,opt,name=m_docs_scanned_min,json=mDocsScannedMin,proto3" json:"m_docs_scanned_min,omitempty"`
MDocsScannedMax float32 `protobuf:"fixed32,14,opt,name=m_docs_scanned_max,json=mDocsScannedMax,proto3" json:"m_docs_scanned_max,omitempty"`
MDocsScannedP99 float32 `protobuf:"fixed32,15,opt,name=m_docs_scanned_p99,json=mDocsScannedP99,proto3" json:"m_docs_scanned_p99,omitempty"`
+ // The query performed a full collection scan (COLLSCAN).
+ MFullScanCnt float32 `protobuf:"fixed32,16,opt,name=m_full_scan_cnt,json=mFullScanCnt,proto3" json:"m_full_scan_cnt,omitempty"`
+ MFullScanSum float32 `protobuf:"fixed32,17,opt,name=m_full_scan_sum,json=mFullScanSum,proto3" json:"m_full_scan_sum,omitempty"`
+ // Plan summary type (COLLSCAN, IXSCAN, etc).
+ PlanSummary string `protobuf:"bytes,18,opt,name=plan_summary,json=planSummary,proto3" json:"plan_summary,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *MetricsBucket_MongoDB) Reset() {
@@ -1633,13 +1634,31 @@ func (x *MetricsBucket_MongoDB) GetMDocsScannedP99() float32 {
return 0
}
+func (x *MetricsBucket_MongoDB) GetMFullScanCnt() float32 {
+ if x != nil {
+ return x.MFullScanCnt
+ }
+ return 0
+}
+
+func (x *MetricsBucket_MongoDB) GetMFullScanSum() float32 {
+ if x != nil {
+ return x.MFullScanSum
+ }
+ return 0
+}
+
+func (x *MetricsBucket_MongoDB) GetPlanSummary() string {
+ if x != nil {
+ return x.PlanSummary
+ }
+ return ""
+}
+
// PostgreSQL contains metrics for PostgreSQL.
type MetricsBucket_PostgreSQL struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- MRowsCnt float32 `protobuf:"fixed32,1,opt,name=m_rows_cnt,json=mRowsCnt,proto3" json:"m_rows_cnt,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ MRowsCnt float32 `protobuf:"fixed32,1,opt,name=m_rows_cnt,json=mRowsCnt,proto3" json:"m_rows_cnt,omitempty"`
// The number of rows sent to the client.
MRowsSum float32 `protobuf:"fixed32,2,opt,name=m_rows_sum,json=mRowsSum,proto3" json:"m_rows_sum,omitempty"`
MSharedBlksHitCnt float32 `protobuf:"fixed32,3,opt,name=m_shared_blks_hit_cnt,json=mSharedBlksHitCnt,proto3" json:"m_shared_blks_hit_cnt,omitempty"`
@@ -1723,6 +1742,8 @@ type MetricsBucket_PostgreSQL struct {
Planid string `protobuf:"bytes,46,opt,name=planid,proto3" json:"planid,omitempty"`
QueryPlan string `protobuf:"bytes,48,opt,name=query_plan,json=queryPlan,proto3" json:"query_plan,omitempty"`
HistogramItems []*HistogramItem `protobuf:"bytes,49,rep,name=histogram_items,json=histogramItems,proto3" json:"histogram_items,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *MetricsBucket_PostgreSQL) Reset() {
@@ -2128,12 +2149,12 @@ func (x *MetricsBucket_PostgreSQL) GetHistogramItems() []*HistogramItem {
var File_agent_v1_collector_proto protoreflect.FileDescriptor
-var file_agent_v1_collector_proto_rawDesc = []byte{
+var file_agent_v1_collector_proto_rawDesc = string([]byte{
0x0a, 0x18, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6c, 0x6c, 0x65,
0x63, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x61, 0x67, 0x65, 0x6e,
0x74, 0x2e, 0x76, 0x31, 0x1a, 0x19, 0x69, 0x6e, 0x76, 0x65, 0x6e, 0x74, 0x6f, 0x72, 0x79, 0x2f,
0x76, 0x31, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22,
- 0xaa, 0x4f, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x42, 0x75, 0x63, 0x6b, 0x65,
+ 0x9b, 0x50, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x42, 0x75, 0x63, 0x6b, 0x65,
0x74, 0x12, 0x36, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x1e, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74,
0x72, 0x69, 0x63, 0x73, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f,
@@ -2565,7 +2586,7 @@ var file_agent_v1_collector_proto_rawDesc = []byte{
0x6e, 0x6f, 0x5f, 0x67, 0x6f, 0x6f, 0x64, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x5f, 0x75, 0x73,
0x65, 0x64, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x73, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x6d, 0x4e,
0x6f, 0x47, 0x6f, 0x6f, 0x64, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x55, 0x73, 0x65, 0x64, 0x53, 0x75,
- 0x6d, 0x1a, 0xd4, 0x05, 0x0a, 0x07, 0x4d, 0x6f, 0x6e, 0x67, 0x6f, 0x44, 0x42, 0x12, 0x2d, 0x0a,
+ 0x6d, 0x1a, 0xc5, 0x06, 0x0a, 0x07, 0x4d, 0x6f, 0x6e, 0x67, 0x6f, 0x44, 0x42, 0x12, 0x2d, 0x0a,
0x13, 0x6d, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x5f, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x65, 0x64,
0x5f, 0x63, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x10, 0x6d, 0x44, 0x6f, 0x63,
0x73, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x65, 0x64, 0x43, 0x6e, 0x74, 0x12, 0x2d, 0x0a, 0x13,
@@ -2610,197 +2631,205 @@ var file_agent_v1_collector_proto_rawDesc = []byte{
0x73, 0x53, 0x63, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x4d, 0x61, 0x78, 0x12, 0x2b, 0x0a, 0x12, 0x6d,
0x5f, 0x64, 0x6f, 0x63, 0x73, 0x5f, 0x73, 0x63, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x70, 0x39,
0x39, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0f, 0x6d, 0x44, 0x6f, 0x63, 0x73, 0x53, 0x63,
- 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x50, 0x39, 0x39, 0x1a, 0xd1, 0x13, 0x0a, 0x0a, 0x50, 0x6f, 0x73,
- 0x74, 0x67, 0x72, 0x65, 0x53, 0x51, 0x4c, 0x12, 0x1c, 0x0a, 0x0a, 0x6d, 0x5f, 0x72, 0x6f, 0x77,
- 0x73, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x08, 0x6d, 0x52, 0x6f,
- 0x77, 0x73, 0x43, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x0a, 0x6d, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x5f,
- 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x08, 0x6d, 0x52, 0x6f, 0x77, 0x73,
- 0x53, 0x75, 0x6d, 0x12, 0x30, 0x0a, 0x15, 0x6d, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x5f,
- 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x68, 0x69, 0x74, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x02, 0x52, 0x11, 0x6d, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x42, 0x6c, 0x6b, 0x73, 0x48,
- 0x69, 0x74, 0x43, 0x6e, 0x74, 0x12, 0x30, 0x0a, 0x15, 0x6d, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65,
- 0x64, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x68, 0x69, 0x74, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x02, 0x52, 0x11, 0x6d, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x42, 0x6c, 0x6b,
- 0x73, 0x48, 0x69, 0x74, 0x53, 0x75, 0x6d, 0x12, 0x32, 0x0a, 0x16, 0x6d, 0x5f, 0x73, 0x68, 0x61,
- 0x72, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x63, 0x6e,
- 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x02, 0x52, 0x12, 0x6d, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64,
- 0x42, 0x6c, 0x6b, 0x73, 0x52, 0x65, 0x61, 0x64, 0x43, 0x6e, 0x74, 0x12, 0x32, 0x0a, 0x16, 0x6d,
- 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x72, 0x65, 0x61,
- 0x64, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x06, 0x20, 0x01, 0x28, 0x02, 0x52, 0x12, 0x6d, 0x53, 0x68,
- 0x61, 0x72, 0x65, 0x64, 0x42, 0x6c, 0x6b, 0x73, 0x52, 0x65, 0x61, 0x64, 0x53, 0x75, 0x6d, 0x12,
- 0x38, 0x0a, 0x19, 0x6d, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6b, 0x73,
- 0x5f, 0x64, 0x69, 0x72, 0x74, 0x69, 0x65, 0x64, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01,
- 0x28, 0x02, 0x52, 0x15, 0x6d, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x42, 0x6c, 0x6b, 0x73, 0x44,
- 0x69, 0x72, 0x74, 0x69, 0x65, 0x64, 0x43, 0x6e, 0x74, 0x12, 0x38, 0x0a, 0x19, 0x6d, 0x5f, 0x73,
- 0x68, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x64, 0x69, 0x72, 0x74, 0x69,
- 0x65, 0x64, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x02, 0x52, 0x15, 0x6d, 0x53,
- 0x68, 0x61, 0x72, 0x65, 0x64, 0x42, 0x6c, 0x6b, 0x73, 0x44, 0x69, 0x72, 0x74, 0x69, 0x65, 0x64,
- 0x53, 0x75, 0x6d, 0x12, 0x38, 0x0a, 0x19, 0x6d, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x5f,
- 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x63, 0x6e, 0x74,
- 0x18, 0x09, 0x20, 0x01, 0x28, 0x02, 0x52, 0x15, 0x6d, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x42,
- 0x6c, 0x6b, 0x73, 0x57, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x43, 0x6e, 0x74, 0x12, 0x38, 0x0a,
- 0x19, 0x6d, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x77,
- 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x02,
- 0x52, 0x15, 0x6d, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x42, 0x6c, 0x6b, 0x73, 0x57, 0x72, 0x69,
- 0x74, 0x74, 0x65, 0x6e, 0x53, 0x75, 0x6d, 0x12, 0x2e, 0x0a, 0x14, 0x6d, 0x5f, 0x6c, 0x6f, 0x63,
- 0x61, 0x6c, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x68, 0x69, 0x74, 0x5f, 0x63, 0x6e, 0x74, 0x18,
- 0x0b, 0x20, 0x01, 0x28, 0x02, 0x52, 0x10, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x6c, 0x6b,
- 0x73, 0x48, 0x69, 0x74, 0x43, 0x6e, 0x74, 0x12, 0x2e, 0x0a, 0x14, 0x6d, 0x5f, 0x6c, 0x6f, 0x63,
- 0x61, 0x6c, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x68, 0x69, 0x74, 0x5f, 0x73, 0x75, 0x6d, 0x18,
- 0x0c, 0x20, 0x01, 0x28, 0x02, 0x52, 0x10, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x6c, 0x6b,
- 0x73, 0x48, 0x69, 0x74, 0x53, 0x75, 0x6d, 0x12, 0x30, 0x0a, 0x15, 0x6d, 0x5f, 0x6c, 0x6f, 0x63,
- 0x61, 0x6c, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x63, 0x6e, 0x74,
- 0x18, 0x0d, 0x20, 0x01, 0x28, 0x02, 0x52, 0x11, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x6c,
- 0x6b, 0x73, 0x52, 0x65, 0x61, 0x64, 0x43, 0x6e, 0x74, 0x12, 0x30, 0x0a, 0x15, 0x6d, 0x5f, 0x6c,
- 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x73,
- 0x75, 0x6d, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x02, 0x52, 0x11, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c,
- 0x42, 0x6c, 0x6b, 0x73, 0x52, 0x65, 0x61, 0x64, 0x53, 0x75, 0x6d, 0x12, 0x36, 0x0a, 0x18, 0x6d,
- 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x64, 0x69, 0x72, 0x74,
- 0x69, 0x65, 0x64, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x02, 0x52, 0x14, 0x6d,
- 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x6c, 0x6b, 0x73, 0x44, 0x69, 0x72, 0x74, 0x69, 0x65, 0x64,
- 0x43, 0x6e, 0x74, 0x12, 0x36, 0x0a, 0x18, 0x6d, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x62,
- 0x6c, 0x6b, 0x73, 0x5f, 0x64, 0x69, 0x72, 0x74, 0x69, 0x65, 0x64, 0x5f, 0x73, 0x75, 0x6d, 0x18,
- 0x10, 0x20, 0x01, 0x28, 0x02, 0x52, 0x14, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x6c, 0x6b,
- 0x73, 0x44, 0x69, 0x72, 0x74, 0x69, 0x65, 0x64, 0x53, 0x75, 0x6d, 0x12, 0x36, 0x0a, 0x18, 0x6d,
- 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x77, 0x72, 0x69, 0x74,
- 0x74, 0x65, 0x6e, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x11, 0x20, 0x01, 0x28, 0x02, 0x52, 0x14, 0x6d,
- 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x6c, 0x6b, 0x73, 0x57, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e,
- 0x43, 0x6e, 0x74, 0x12, 0x36, 0x0a, 0x18, 0x6d, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x62,
- 0x6c, 0x6b, 0x73, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x75, 0x6d, 0x18,
- 0x12, 0x20, 0x01, 0x28, 0x02, 0x52, 0x14, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x6c, 0x6b,
- 0x73, 0x57, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x75, 0x6d, 0x12, 0x2e, 0x0a, 0x14, 0x6d,
- 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f,
- 0x63, 0x6e, 0x74, 0x18, 0x13, 0x20, 0x01, 0x28, 0x02, 0x52, 0x10, 0x6d, 0x54, 0x65, 0x6d, 0x70,
- 0x42, 0x6c, 0x6b, 0x73, 0x52, 0x65, 0x61, 0x64, 0x43, 0x6e, 0x74, 0x12, 0x2e, 0x0a, 0x14, 0x6d,
- 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f,
- 0x73, 0x75, 0x6d, 0x18, 0x14, 0x20, 0x01, 0x28, 0x02, 0x52, 0x10, 0x6d, 0x54, 0x65, 0x6d, 0x70,
- 0x42, 0x6c, 0x6b, 0x73, 0x52, 0x65, 0x61, 0x64, 0x53, 0x75, 0x6d, 0x12, 0x34, 0x0a, 0x17, 0x6d,
- 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x74,
- 0x65, 0x6e, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x15, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x6d, 0x54,
- 0x65, 0x6d, 0x70, 0x42, 0x6c, 0x6b, 0x73, 0x57, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x43, 0x6e,
- 0x74, 0x12, 0x34, 0x0a, 0x17, 0x6d, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x5f, 0x62, 0x6c, 0x6b, 0x73,
- 0x5f, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x16, 0x20, 0x01,
- 0x28, 0x02, 0x52, 0x13, 0x6d, 0x54, 0x65, 0x6d, 0x70, 0x42, 0x6c, 0x6b, 0x73, 0x57, 0x72, 0x69,
- 0x74, 0x74, 0x65, 0x6e, 0x53, 0x75, 0x6d, 0x12, 0x39, 0x0a, 0x1a, 0x6d, 0x5f, 0x73, 0x68, 0x61,
- 0x72, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6b, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x74, 0x69, 0x6d,
- 0x65, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x17, 0x20, 0x01, 0x28, 0x02, 0x52, 0x15, 0x6d, 0x53, 0x68,
- 0x61, 0x72, 0x65, 0x64, 0x42, 0x6c, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x43,
- 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x1a, 0x6d, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x62,
- 0x6c, 0x6b, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x75, 0x6d,
- 0x18, 0x18, 0x20, 0x01, 0x28, 0x02, 0x52, 0x15, 0x6d, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x42,
- 0x6c, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x3b, 0x0a,
- 0x1b, 0x6d, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6b, 0x5f, 0x77, 0x72,
- 0x69, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x19, 0x20, 0x01,
- 0x28, 0x02, 0x52, 0x16, 0x6d, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x42, 0x6c, 0x6b, 0x57, 0x72,
- 0x69, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x43, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x1b, 0x6d, 0x5f,
- 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6b, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65,
- 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x02, 0x52,
- 0x16, 0x6d, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x42, 0x6c, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x65,
- 0x54, 0x69, 0x6d, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x37, 0x0a, 0x19, 0x6d, 0x5f, 0x6c, 0x6f, 0x63,
- 0x61, 0x6c, 0x5f, 0x62, 0x6c, 0x6b, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65,
- 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x32, 0x20, 0x01, 0x28, 0x02, 0x52, 0x14, 0x6d, 0x4c, 0x6f, 0x63,
- 0x61, 0x6c, 0x42, 0x6c, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x43, 0x6e, 0x74,
- 0x12, 0x37, 0x0a, 0x19, 0x6d, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x62, 0x6c, 0x6b, 0x5f,
- 0x72, 0x65, 0x61, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x33, 0x20,
- 0x01, 0x28, 0x02, 0x52, 0x14, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x6c, 0x6b, 0x52, 0x65,
- 0x61, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x39, 0x0a, 0x1a, 0x6d, 0x5f, 0x6c,
- 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x62, 0x6c, 0x6b, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74,
- 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x34, 0x20, 0x01, 0x28, 0x02, 0x52, 0x15, 0x6d,
- 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x6c, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x65, 0x54, 0x69, 0x6d,
- 0x65, 0x43, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x1a, 0x6d, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f,
- 0x62, 0x6c, 0x6b, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73,
- 0x75, 0x6d, 0x18, 0x35, 0x20, 0x01, 0x28, 0x02, 0x52, 0x15, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c,
- 0x42, 0x6c, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x75, 0x6d, 0x12,
- 0x2c, 0x0a, 0x13, 0x6d, 0x5f, 0x63, 0x70, 0x75, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x74, 0x69,
- 0x6d, 0x65, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0f, 0x6d, 0x43,
- 0x70, 0x75, 0x55, 0x73, 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x43, 0x6e, 0x74, 0x12, 0x2c, 0x0a,
- 0x13, 0x6d, 0x5f, 0x63, 0x70, 0x75, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x74, 0x69, 0x6d, 0x65,
- 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0f, 0x6d, 0x43, 0x70, 0x75,
- 0x55, 0x73, 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x2a, 0x0a, 0x12, 0x6d,
- 0x5f, 0x63, 0x70, 0x75, 0x5f, 0x73, 0x79, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x6e,
- 0x74, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0e, 0x6d, 0x43, 0x70, 0x75, 0x53, 0x79, 0x73,
- 0x54, 0x69, 0x6d, 0x65, 0x43, 0x6e, 0x74, 0x12, 0x2a, 0x0a, 0x12, 0x6d, 0x5f, 0x63, 0x70, 0x75,
- 0x5f, 0x73, 0x79, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x1e, 0x20,
- 0x01, 0x28, 0x02, 0x52, 0x0e, 0x6d, 0x43, 0x70, 0x75, 0x53, 0x79, 0x73, 0x54, 0x69, 0x6d, 0x65,
- 0x53, 0x75, 0x6d, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x6d, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18,
- 0x2b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6d, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29,
- 0x0a, 0x11, 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x73, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x5f,
- 0x73, 0x75, 0x6d, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0e, 0x6d, 0x50, 0x6c, 0x61, 0x6e,
- 0x73, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x53, 0x75, 0x6d, 0x12, 0x29, 0x0a, 0x11, 0x6d, 0x5f, 0x70,
- 0x6c, 0x61, 0x6e, 0x73, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x20,
- 0x20, 0x01, 0x28, 0x02, 0x52, 0x0e, 0x6d, 0x50, 0x6c, 0x61, 0x6e, 0x73, 0x43, 0x61, 0x6c, 0x6c,
- 0x73, 0x43, 0x6e, 0x74, 0x12, 0x29, 0x0a, 0x11, 0x6d, 0x5f, 0x77, 0x61, 0x6c, 0x5f, 0x72, 0x65,
- 0x63, 0x6f, 0x72, 0x64, 0x73, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x21, 0x20, 0x01, 0x28, 0x02, 0x52,
- 0x0e, 0x6d, 0x57, 0x61, 0x6c, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x53, 0x75, 0x6d, 0x12,
- 0x29, 0x0a, 0x11, 0x6d, 0x5f, 0x77, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73,
- 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x22, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0e, 0x6d, 0x57, 0x61, 0x6c,
- 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x43, 0x6e, 0x74, 0x12, 0x21, 0x0a, 0x0d, 0x6d, 0x5f,
- 0x77, 0x61, 0x6c, 0x5f, 0x66, 0x70, 0x69, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x23, 0x20, 0x01, 0x28,
- 0x02, 0x52, 0x0a, 0x6d, 0x57, 0x61, 0x6c, 0x46, 0x70, 0x69, 0x53, 0x75, 0x6d, 0x12, 0x21, 0x0a,
- 0x0d, 0x6d, 0x5f, 0x77, 0x61, 0x6c, 0x5f, 0x66, 0x70, 0x69, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x24,
- 0x20, 0x01, 0x28, 0x02, 0x52, 0x0a, 0x6d, 0x57, 0x61, 0x6c, 0x46, 0x70, 0x69, 0x43, 0x6e, 0x74,
- 0x12, 0x25, 0x0a, 0x0f, 0x6d, 0x5f, 0x77, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f,
- 0x73, 0x75, 0x6d, 0x18, 0x25, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0c, 0x6d, 0x57, 0x61, 0x6c, 0x42,
- 0x79, 0x74, 0x65, 0x73, 0x53, 0x75, 0x6d, 0x12, 0x25, 0x0a, 0x0f, 0x6d, 0x5f, 0x77, 0x61, 0x6c,
- 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x26, 0x20, 0x01, 0x28, 0x02,
- 0x52, 0x0c, 0x6d, 0x57, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x43, 0x6e, 0x74, 0x12, 0x25,
- 0x0a, 0x0f, 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x75,
- 0x6d, 0x18, 0x27, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0c, 0x6d, 0x50, 0x6c, 0x61, 0x6e, 0x54, 0x69,
- 0x6d, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x25, 0x0a, 0x0f, 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x5f,
- 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x28, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0c,
- 0x6d, 0x50, 0x6c, 0x61, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x43, 0x6e, 0x74, 0x12, 0x25, 0x0a, 0x0f,
- 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x18,
- 0x29, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0c, 0x6d, 0x50, 0x6c, 0x61, 0x6e, 0x54, 0x69, 0x6d, 0x65,
- 0x4d, 0x69, 0x6e, 0x12, 0x25, 0x0a, 0x0f, 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x5f, 0x74, 0x69,
- 0x6d, 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0c, 0x6d, 0x50,
- 0x6c, 0x61, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x4d, 0x61, 0x78, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x6f,
- 0x70, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x69, 0x64, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x0a, 0x74, 0x6f, 0x70, 0x51, 0x75, 0x65, 0x72, 0x79, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x74,
- 0x6f, 0x70, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x2f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
- 0x74, 0x6f, 0x70, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x29, 0x0a, 0x10, 0x61, 0x70, 0x70, 0x6c,
- 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x2d, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x0f, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e,
- 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x6c, 0x61, 0x6e, 0x69, 0x64, 0x18, 0x2e, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x6c, 0x61, 0x6e, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x71,
- 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x18, 0x30, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x09, 0x71, 0x75, 0x65, 0x72, 0x79, 0x50, 0x6c, 0x61, 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x68, 0x69,
- 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x31, 0x20,
- 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x48,
- 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x0e, 0x68, 0x69,
- 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x22, 0x43, 0x0a, 0x0d,
- 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x14, 0x0a,
- 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x72, 0x61,
- 0x6e, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x66, 0x72, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x79,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x66, 0x72, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63,
- 0x79, 0x2a, 0x95, 0x01, 0x0a, 0x0b, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x54, 0x79, 0x70,
- 0x65, 0x12, 0x1c, 0x0a, 0x18, 0x45, 0x58, 0x41, 0x4d, 0x50, 0x4c, 0x45, 0x5f, 0x54, 0x59, 0x50,
- 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12,
- 0x17, 0x0a, 0x13, 0x45, 0x58, 0x41, 0x4d, 0x50, 0x4c, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
- 0x52, 0x41, 0x4e, 0x44, 0x4f, 0x4d, 0x10, 0x01, 0x12, 0x18, 0x0a, 0x14, 0x45, 0x58, 0x41, 0x4d,
- 0x50, 0x4c, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x4c, 0x4f, 0x57, 0x45, 0x53, 0x54,
- 0x10, 0x02, 0x12, 0x18, 0x0a, 0x14, 0x45, 0x58, 0x41, 0x4d, 0x50, 0x4c, 0x45, 0x5f, 0x54, 0x59,
- 0x50, 0x45, 0x5f, 0x46, 0x41, 0x53, 0x54, 0x45, 0x53, 0x54, 0x10, 0x03, 0x12, 0x1b, 0x0a, 0x17,
- 0x45, 0x58, 0x41, 0x4d, 0x50, 0x4c, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x57, 0x49, 0x54,
- 0x48, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x04, 0x42, 0x8c, 0x01, 0x0a, 0x0c, 0x63, 0x6f,
- 0x6d, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x43, 0x6f, 0x6c, 0x6c,
- 0x65, 0x63, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2b, 0x67, 0x69,
- 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x65, 0x72, 0x63, 0x6f, 0x6e, 0x61,
- 0x2f, 0x70, 0x6d, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x76,
- 0x31, 0x3b, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x41, 0x58, 0x58, 0xaa,
- 0x02, 0x08, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x08, 0x41, 0x67, 0x65,
- 0x6e, 0x74, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x14, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x5c, 0x56, 0x31,
- 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x09, 0x41,
- 0x67, 0x65, 0x6e, 0x74, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+ 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x50, 0x39, 0x39, 0x12, 0x25, 0x0a, 0x0f, 0x6d, 0x5f, 0x66, 0x75,
+ 0x6c, 0x6c, 0x5f, 0x73, 0x63, 0x61, 0x6e, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28,
+ 0x02, 0x52, 0x0c, 0x6d, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x63, 0x61, 0x6e, 0x43, 0x6e, 0x74, 0x12,
+ 0x25, 0x0a, 0x0f, 0x6d, 0x5f, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x73, 0x63, 0x61, 0x6e, 0x5f, 0x73,
+ 0x75, 0x6d, 0x18, 0x11, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0c, 0x6d, 0x46, 0x75, 0x6c, 0x6c, 0x53,
+ 0x63, 0x61, 0x6e, 0x53, 0x75, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x6c, 0x61, 0x6e, 0x5f, 0x73,
+ 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x6c,
+ 0x61, 0x6e, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x1a, 0xd1, 0x13, 0x0a, 0x0a, 0x50, 0x6f,
+ 0x73, 0x74, 0x67, 0x72, 0x65, 0x53, 0x51, 0x4c, 0x12, 0x1c, 0x0a, 0x0a, 0x6d, 0x5f, 0x72, 0x6f,
+ 0x77, 0x73, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x08, 0x6d, 0x52,
+ 0x6f, 0x77, 0x73, 0x43, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x0a, 0x6d, 0x5f, 0x72, 0x6f, 0x77, 0x73,
+ 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x08, 0x6d, 0x52, 0x6f, 0x77,
+ 0x73, 0x53, 0x75, 0x6d, 0x12, 0x30, 0x0a, 0x15, 0x6d, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64,
+ 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x68, 0x69, 0x74, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x02, 0x52, 0x11, 0x6d, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x42, 0x6c, 0x6b, 0x73,
+ 0x48, 0x69, 0x74, 0x43, 0x6e, 0x74, 0x12, 0x30, 0x0a, 0x15, 0x6d, 0x5f, 0x73, 0x68, 0x61, 0x72,
+ 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x68, 0x69, 0x74, 0x5f, 0x73, 0x75, 0x6d, 0x18,
+ 0x04, 0x20, 0x01, 0x28, 0x02, 0x52, 0x11, 0x6d, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x42, 0x6c,
+ 0x6b, 0x73, 0x48, 0x69, 0x74, 0x53, 0x75, 0x6d, 0x12, 0x32, 0x0a, 0x16, 0x6d, 0x5f, 0x73, 0x68,
+ 0x61, 0x72, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x63,
+ 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x02, 0x52, 0x12, 0x6d, 0x53, 0x68, 0x61, 0x72, 0x65,
+ 0x64, 0x42, 0x6c, 0x6b, 0x73, 0x52, 0x65, 0x61, 0x64, 0x43, 0x6e, 0x74, 0x12, 0x32, 0x0a, 0x16,
+ 0x6d, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x72, 0x65,
+ 0x61, 0x64, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x06, 0x20, 0x01, 0x28, 0x02, 0x52, 0x12, 0x6d, 0x53,
+ 0x68, 0x61, 0x72, 0x65, 0x64, 0x42, 0x6c, 0x6b, 0x73, 0x52, 0x65, 0x61, 0x64, 0x53, 0x75, 0x6d,
+ 0x12, 0x38, 0x0a, 0x19, 0x6d, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6b,
+ 0x73, 0x5f, 0x64, 0x69, 0x72, 0x74, 0x69, 0x65, 0x64, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x07, 0x20,
+ 0x01, 0x28, 0x02, 0x52, 0x15, 0x6d, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x42, 0x6c, 0x6b, 0x73,
+ 0x44, 0x69, 0x72, 0x74, 0x69, 0x65, 0x64, 0x43, 0x6e, 0x74, 0x12, 0x38, 0x0a, 0x19, 0x6d, 0x5f,
+ 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x64, 0x69, 0x72, 0x74,
+ 0x69, 0x65, 0x64, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x02, 0x52, 0x15, 0x6d,
+ 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x42, 0x6c, 0x6b, 0x73, 0x44, 0x69, 0x72, 0x74, 0x69, 0x65,
+ 0x64, 0x53, 0x75, 0x6d, 0x12, 0x38, 0x0a, 0x19, 0x6d, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64,
+ 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x63, 0x6e,
+ 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x02, 0x52, 0x15, 0x6d, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64,
+ 0x42, 0x6c, 0x6b, 0x73, 0x57, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x43, 0x6e, 0x74, 0x12, 0x38,
+ 0x0a, 0x19, 0x6d, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f,
+ 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28,
+ 0x02, 0x52, 0x15, 0x6d, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x42, 0x6c, 0x6b, 0x73, 0x57, 0x72,
+ 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x75, 0x6d, 0x12, 0x2e, 0x0a, 0x14, 0x6d, 0x5f, 0x6c, 0x6f,
+ 0x63, 0x61, 0x6c, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x68, 0x69, 0x74, 0x5f, 0x63, 0x6e, 0x74,
+ 0x18, 0x0b, 0x20, 0x01, 0x28, 0x02, 0x52, 0x10, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x6c,
+ 0x6b, 0x73, 0x48, 0x69, 0x74, 0x43, 0x6e, 0x74, 0x12, 0x2e, 0x0a, 0x14, 0x6d, 0x5f, 0x6c, 0x6f,
+ 0x63, 0x61, 0x6c, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x68, 0x69, 0x74, 0x5f, 0x73, 0x75, 0x6d,
+ 0x18, 0x0c, 0x20, 0x01, 0x28, 0x02, 0x52, 0x10, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x6c,
+ 0x6b, 0x73, 0x48, 0x69, 0x74, 0x53, 0x75, 0x6d, 0x12, 0x30, 0x0a, 0x15, 0x6d, 0x5f, 0x6c, 0x6f,
+ 0x63, 0x61, 0x6c, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x63, 0x6e,
+ 0x74, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x02, 0x52, 0x11, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42,
+ 0x6c, 0x6b, 0x73, 0x52, 0x65, 0x61, 0x64, 0x43, 0x6e, 0x74, 0x12, 0x30, 0x0a, 0x15, 0x6d, 0x5f,
+ 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f,
+ 0x73, 0x75, 0x6d, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x02, 0x52, 0x11, 0x6d, 0x4c, 0x6f, 0x63, 0x61,
+ 0x6c, 0x42, 0x6c, 0x6b, 0x73, 0x52, 0x65, 0x61, 0x64, 0x53, 0x75, 0x6d, 0x12, 0x36, 0x0a, 0x18,
+ 0x6d, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x64, 0x69, 0x72,
+ 0x74, 0x69, 0x65, 0x64, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x02, 0x52, 0x14,
+ 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x6c, 0x6b, 0x73, 0x44, 0x69, 0x72, 0x74, 0x69, 0x65,
+ 0x64, 0x43, 0x6e, 0x74, 0x12, 0x36, 0x0a, 0x18, 0x6d, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f,
+ 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x64, 0x69, 0x72, 0x74, 0x69, 0x65, 0x64, 0x5f, 0x73, 0x75, 0x6d,
+ 0x18, 0x10, 0x20, 0x01, 0x28, 0x02, 0x52, 0x14, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x6c,
+ 0x6b, 0x73, 0x44, 0x69, 0x72, 0x74, 0x69, 0x65, 0x64, 0x53, 0x75, 0x6d, 0x12, 0x36, 0x0a, 0x18,
+ 0x6d, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x77, 0x72, 0x69,
+ 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x11, 0x20, 0x01, 0x28, 0x02, 0x52, 0x14,
+ 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x6c, 0x6b, 0x73, 0x57, 0x72, 0x69, 0x74, 0x74, 0x65,
+ 0x6e, 0x43, 0x6e, 0x74, 0x12, 0x36, 0x0a, 0x18, 0x6d, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f,
+ 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x75, 0x6d,
+ 0x18, 0x12, 0x20, 0x01, 0x28, 0x02, 0x52, 0x14, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x6c,
+ 0x6b, 0x73, 0x57, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x75, 0x6d, 0x12, 0x2e, 0x0a, 0x14,
+ 0x6d, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x72, 0x65, 0x61, 0x64,
+ 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x13, 0x20, 0x01, 0x28, 0x02, 0x52, 0x10, 0x6d, 0x54, 0x65, 0x6d,
+ 0x70, 0x42, 0x6c, 0x6b, 0x73, 0x52, 0x65, 0x61, 0x64, 0x43, 0x6e, 0x74, 0x12, 0x2e, 0x0a, 0x14,
+ 0x6d, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x72, 0x65, 0x61, 0x64,
+ 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x14, 0x20, 0x01, 0x28, 0x02, 0x52, 0x10, 0x6d, 0x54, 0x65, 0x6d,
+ 0x70, 0x42, 0x6c, 0x6b, 0x73, 0x52, 0x65, 0x61, 0x64, 0x53, 0x75, 0x6d, 0x12, 0x34, 0x0a, 0x17,
+ 0x6d, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x77, 0x72, 0x69, 0x74,
+ 0x74, 0x65, 0x6e, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x15, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x6d,
+ 0x54, 0x65, 0x6d, 0x70, 0x42, 0x6c, 0x6b, 0x73, 0x57, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x43,
+ 0x6e, 0x74, 0x12, 0x34, 0x0a, 0x17, 0x6d, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x5f, 0x62, 0x6c, 0x6b,
+ 0x73, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x16, 0x20,
+ 0x01, 0x28, 0x02, 0x52, 0x13, 0x6d, 0x54, 0x65, 0x6d, 0x70, 0x42, 0x6c, 0x6b, 0x73, 0x57, 0x72,
+ 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x75, 0x6d, 0x12, 0x39, 0x0a, 0x1a, 0x6d, 0x5f, 0x73, 0x68,
+ 0x61, 0x72, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6b, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x74, 0x69,
+ 0x6d, 0x65, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x17, 0x20, 0x01, 0x28, 0x02, 0x52, 0x15, 0x6d, 0x53,
+ 0x68, 0x61, 0x72, 0x65, 0x64, 0x42, 0x6c, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x54, 0x69, 0x6d, 0x65,
+ 0x43, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x1a, 0x6d, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x5f,
+ 0x62, 0x6c, 0x6b, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x75,
+ 0x6d, 0x18, 0x18, 0x20, 0x01, 0x28, 0x02, 0x52, 0x15, 0x6d, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64,
+ 0x42, 0x6c, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x3b,
+ 0x0a, 0x1b, 0x6d, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6b, 0x5f, 0x77,
+ 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x19, 0x20,
+ 0x01, 0x28, 0x02, 0x52, 0x16, 0x6d, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x42, 0x6c, 0x6b, 0x57,
+ 0x72, 0x69, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x43, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x1b, 0x6d,
+ 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6b, 0x5f, 0x77, 0x72, 0x69, 0x74,
+ 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x02,
+ 0x52, 0x16, 0x6d, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x42, 0x6c, 0x6b, 0x57, 0x72, 0x69, 0x74,
+ 0x65, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x37, 0x0a, 0x19, 0x6d, 0x5f, 0x6c, 0x6f,
+ 0x63, 0x61, 0x6c, 0x5f, 0x62, 0x6c, 0x6b, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x74, 0x69, 0x6d,
+ 0x65, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x32, 0x20, 0x01, 0x28, 0x02, 0x52, 0x14, 0x6d, 0x4c, 0x6f,
+ 0x63, 0x61, 0x6c, 0x42, 0x6c, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x43, 0x6e,
+ 0x74, 0x12, 0x37, 0x0a, 0x19, 0x6d, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x62, 0x6c, 0x6b,
+ 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x33,
+ 0x20, 0x01, 0x28, 0x02, 0x52, 0x14, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x6c, 0x6b, 0x52,
+ 0x65, 0x61, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x39, 0x0a, 0x1a, 0x6d, 0x5f,
+ 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x62, 0x6c, 0x6b, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f,
+ 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x34, 0x20, 0x01, 0x28, 0x02, 0x52, 0x15,
+ 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x6c, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x65, 0x54, 0x69,
+ 0x6d, 0x65, 0x43, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x1a, 0x6d, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c,
+ 0x5f, 0x62, 0x6c, 0x6b, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f,
+ 0x73, 0x75, 0x6d, 0x18, 0x35, 0x20, 0x01, 0x28, 0x02, 0x52, 0x15, 0x6d, 0x4c, 0x6f, 0x63, 0x61,
+ 0x6c, 0x42, 0x6c, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x75, 0x6d,
+ 0x12, 0x2c, 0x0a, 0x13, 0x6d, 0x5f, 0x63, 0x70, 0x75, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x74,
+ 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0f, 0x6d,
+ 0x43, 0x70, 0x75, 0x55, 0x73, 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x43, 0x6e, 0x74, 0x12, 0x2c,
+ 0x0a, 0x13, 0x6d, 0x5f, 0x63, 0x70, 0x75, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x74, 0x69, 0x6d,
+ 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0f, 0x6d, 0x43, 0x70,
+ 0x75, 0x55, 0x73, 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x2a, 0x0a, 0x12,
+ 0x6d, 0x5f, 0x63, 0x70, 0x75, 0x5f, 0x73, 0x79, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63,
+ 0x6e, 0x74, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0e, 0x6d, 0x43, 0x70, 0x75, 0x53, 0x79,
+ 0x73, 0x54, 0x69, 0x6d, 0x65, 0x43, 0x6e, 0x74, 0x12, 0x2a, 0x0a, 0x12, 0x6d, 0x5f, 0x63, 0x70,
+ 0x75, 0x5f, 0x73, 0x79, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x1e,
+ 0x20, 0x01, 0x28, 0x02, 0x52, 0x0e, 0x6d, 0x43, 0x70, 0x75, 0x53, 0x79, 0x73, 0x54, 0x69, 0x6d,
+ 0x65, 0x53, 0x75, 0x6d, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x6d, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65,
+ 0x18, 0x2b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6d, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12,
+ 0x29, 0x0a, 0x11, 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x73, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x73,
+ 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0e, 0x6d, 0x50, 0x6c, 0x61,
+ 0x6e, 0x73, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x53, 0x75, 0x6d, 0x12, 0x29, 0x0a, 0x11, 0x6d, 0x5f,
+ 0x70, 0x6c, 0x61, 0x6e, 0x73, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x5f, 0x63, 0x6e, 0x74, 0x18,
+ 0x20, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0e, 0x6d, 0x50, 0x6c, 0x61, 0x6e, 0x73, 0x43, 0x61, 0x6c,
+ 0x6c, 0x73, 0x43, 0x6e, 0x74, 0x12, 0x29, 0x0a, 0x11, 0x6d, 0x5f, 0x77, 0x61, 0x6c, 0x5f, 0x72,
+ 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x21, 0x20, 0x01, 0x28, 0x02,
+ 0x52, 0x0e, 0x6d, 0x57, 0x61, 0x6c, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x53, 0x75, 0x6d,
+ 0x12, 0x29, 0x0a, 0x11, 0x6d, 0x5f, 0x77, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64,
+ 0x73, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x22, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0e, 0x6d, 0x57, 0x61,
+ 0x6c, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x43, 0x6e, 0x74, 0x12, 0x21, 0x0a, 0x0d, 0x6d,
+ 0x5f, 0x77, 0x61, 0x6c, 0x5f, 0x66, 0x70, 0x69, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x23, 0x20, 0x01,
+ 0x28, 0x02, 0x52, 0x0a, 0x6d, 0x57, 0x61, 0x6c, 0x46, 0x70, 0x69, 0x53, 0x75, 0x6d, 0x12, 0x21,
+ 0x0a, 0x0d, 0x6d, 0x5f, 0x77, 0x61, 0x6c, 0x5f, 0x66, 0x70, 0x69, 0x5f, 0x63, 0x6e, 0x74, 0x18,
+ 0x24, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0a, 0x6d, 0x57, 0x61, 0x6c, 0x46, 0x70, 0x69, 0x43, 0x6e,
+ 0x74, 0x12, 0x25, 0x0a, 0x0f, 0x6d, 0x5f, 0x77, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73,
+ 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x25, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0c, 0x6d, 0x57, 0x61, 0x6c,
+ 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x75, 0x6d, 0x12, 0x25, 0x0a, 0x0f, 0x6d, 0x5f, 0x77, 0x61,
+ 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x26, 0x20, 0x01, 0x28,
+ 0x02, 0x52, 0x0c, 0x6d, 0x57, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x43, 0x6e, 0x74, 0x12,
+ 0x25, 0x0a, 0x0f, 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73,
+ 0x75, 0x6d, 0x18, 0x27, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0c, 0x6d, 0x50, 0x6c, 0x61, 0x6e, 0x54,
+ 0x69, 0x6d, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x25, 0x0a, 0x0f, 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x6e,
+ 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x28, 0x20, 0x01, 0x28, 0x02, 0x52,
+ 0x0c, 0x6d, 0x50, 0x6c, 0x61, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x43, 0x6e, 0x74, 0x12, 0x25, 0x0a,
+ 0x0f, 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6d, 0x69, 0x6e,
+ 0x18, 0x29, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0c, 0x6d, 0x50, 0x6c, 0x61, 0x6e, 0x54, 0x69, 0x6d,
+ 0x65, 0x4d, 0x69, 0x6e, 0x12, 0x25, 0x0a, 0x0f, 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x5f, 0x74,
+ 0x69, 0x6d, 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0c, 0x6d,
+ 0x50, 0x6c, 0x61, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x4d, 0x61, 0x78, 0x12, 0x1f, 0x0a, 0x0b, 0x74,
+ 0x6f, 0x70, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x69, 0x64, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0a, 0x74, 0x6f, 0x70, 0x51, 0x75, 0x65, 0x72, 0x79, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09,
+ 0x74, 0x6f, 0x70, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x2f, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x08, 0x74, 0x6f, 0x70, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x29, 0x0a, 0x10, 0x61, 0x70, 0x70,
+ 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x2d, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0f, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x6c, 0x61, 0x6e, 0x69, 0x64, 0x18, 0x2e,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x6c, 0x61, 0x6e, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a,
+ 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x18, 0x30, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x09, 0x71, 0x75, 0x65, 0x72, 0x79, 0x50, 0x6c, 0x61, 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x68,
+ 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x31,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e,
+ 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x0e, 0x68,
+ 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x22, 0x43, 0x0a,
+ 0x0d, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x14,
+ 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x72,
+ 0x61, 0x6e, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x66, 0x72, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63,
+ 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x66, 0x72, 0x65, 0x71, 0x75, 0x65, 0x6e,
+ 0x63, 0x79, 0x2a, 0x95, 0x01, 0x0a, 0x0b, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x54, 0x79,
+ 0x70, 0x65, 0x12, 0x1c, 0x0a, 0x18, 0x45, 0x58, 0x41, 0x4d, 0x50, 0x4c, 0x45, 0x5f, 0x54, 0x59,
+ 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00,
+ 0x12, 0x17, 0x0a, 0x13, 0x45, 0x58, 0x41, 0x4d, 0x50, 0x4c, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45,
+ 0x5f, 0x52, 0x41, 0x4e, 0x44, 0x4f, 0x4d, 0x10, 0x01, 0x12, 0x18, 0x0a, 0x14, 0x45, 0x58, 0x41,
+ 0x4d, 0x50, 0x4c, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x4c, 0x4f, 0x57, 0x45, 0x53,
+ 0x54, 0x10, 0x02, 0x12, 0x18, 0x0a, 0x14, 0x45, 0x58, 0x41, 0x4d, 0x50, 0x4c, 0x45, 0x5f, 0x54,
+ 0x59, 0x50, 0x45, 0x5f, 0x46, 0x41, 0x53, 0x54, 0x45, 0x53, 0x54, 0x10, 0x03, 0x12, 0x1b, 0x0a,
+ 0x17, 0x45, 0x58, 0x41, 0x4d, 0x50, 0x4c, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x57, 0x49,
+ 0x54, 0x48, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x04, 0x42, 0x8c, 0x01, 0x0a, 0x0c, 0x63,
+ 0x6f, 0x6d, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x43, 0x6f, 0x6c,
+ 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2b, 0x67,
+ 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x65, 0x72, 0x63, 0x6f, 0x6e,
+ 0x61, 0x2f, 0x70, 0x6d, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f,
+ 0x76, 0x31, 0x3b, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x41, 0x58, 0x58,
+ 0xaa, 0x02, 0x08, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x08, 0x41, 0x67,
+ 0x65, 0x6e, 0x74, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x14, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x5c, 0x56,
+ 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x09,
+ 0x41, 0x67, 0x65, 0x6e, 0x74, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
+})
var (
file_agent_v1_collector_proto_rawDescOnce sync.Once
- file_agent_v1_collector_proto_rawDescData = file_agent_v1_collector_proto_rawDesc
+ file_agent_v1_collector_proto_rawDescData []byte
)
func file_agent_v1_collector_proto_rawDescGZIP() []byte {
file_agent_v1_collector_proto_rawDescOnce.Do(func() {
- file_agent_v1_collector_proto_rawDescData = protoimpl.X.CompressGZIP(file_agent_v1_collector_proto_rawDescData)
+ file_agent_v1_collector_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_agent_v1_collector_proto_rawDesc), len(file_agent_v1_collector_proto_rawDesc)))
})
return file_agent_v1_collector_proto_rawDescData
}
@@ -2848,7 +2877,7 @@ func file_agent_v1_collector_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_agent_v1_collector_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_agent_v1_collector_proto_rawDesc), len(file_agent_v1_collector_proto_rawDesc)),
NumEnums: 1,
NumMessages: 8,
NumExtensions: 0,
@@ -2860,7 +2889,6 @@ func file_agent_v1_collector_proto_init() {
MessageInfos: file_agent_v1_collector_proto_msgTypes,
}.Build()
File_agent_v1_collector_proto = out.File
- file_agent_v1_collector_proto_rawDesc = nil
file_agent_v1_collector_proto_goTypes = nil
file_agent_v1_collector_proto_depIdxs = nil
}
diff --git a/api/agent/v1/collector.pb.validate.go b/api/agent/v1/collector.pb.validate.go
index c9e6de7d1b..4165f48a41 100644
--- a/api/agent/v1/collector.pb.validate.go
+++ b/api/agent/v1/collector.pb.validate.go
@@ -191,7 +191,7 @@ type MetricsBucketMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m MetricsBucketMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -295,7 +295,7 @@ type HistogramItemMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m HistogramItemMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -445,7 +445,7 @@ type MetricsBucket_CommonMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m MetricsBucket_CommonMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -777,7 +777,7 @@ type MetricsBucket_MySQLMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m MetricsBucket_MySQLMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -895,6 +895,12 @@ func (m *MetricsBucket_MongoDB) validate(all bool) error {
// no validation rules for MDocsScannedP99
+ // no validation rules for MFullScanCnt
+
+ // no validation rules for MFullScanSum
+
+ // no validation rules for PlanSummary
+
if len(errors) > 0 {
return MetricsBucket_MongoDBMultiError(errors)
}
@@ -909,7 +915,7 @@ type MetricsBucket_MongoDBMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m MetricsBucket_MongoDBMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1149,7 +1155,7 @@ type MetricsBucket_PostgreSQLMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m MetricsBucket_PostgreSQLMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/api/agent/v1/collector.proto b/api/agent/v1/collector.proto
index 7c613f75f3..54c6b9bafe 100644
--- a/api/agent/v1/collector.proto
+++ b/api/agent/v1/collector.proto
@@ -261,6 +261,11 @@ message MetricsBucket {
float m_docs_scanned_min = 13;
float m_docs_scanned_max = 14;
float m_docs_scanned_p99 = 15;
+ // The query performed a full collection scan (COLLSCAN).
+ float m_full_scan_cnt = 16;
+ float m_full_scan_sum = 17;
+ // Plan summary type (COLLSCAN, IXSCAN, etc).
+ string plan_summary = 18;
}
// PostgreSQL contains metrics for PostgreSQL.
message PostgreSQL {
diff --git a/api/agentlocal/v1/agentlocal.pb.go b/api/agentlocal/v1/agentlocal.pb.go
index c8bc022a48..326720dbcc 100644
--- a/api/agentlocal/v1/agentlocal.pb.go
+++ b/api/agentlocal/v1/agentlocal.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: agentlocal/v1/agentlocal.proto
@@ -9,6 +9,7 @@ package agentlocalv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
_ "google.golang.org/genproto/googleapis/api/annotations"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
@@ -27,10 +28,7 @@ const (
// ServerInfo contains information about the PMM Server.
type ServerInfo struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// PMM Server URL in a form https://HOST:PORT/.
Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"`
// PMM Server's TLS certificate validation should be skipped if true.
@@ -42,7 +40,9 @@ type ServerInfo struct {
// Ping time from pmm-agent to pmm-managed (if agent is connected).
Latency *durationpb.Duration `protobuf:"bytes,5,opt,name=latency,proto3" json:"latency,omitempty"`
// Clock drift from PMM Server (if agent is connected).
- ClockDrift *durationpb.Duration `protobuf:"bytes,6,opt,name=clock_drift,json=clockDrift,proto3" json:"clock_drift,omitempty"`
+ ClockDrift *durationpb.Duration `protobuf:"bytes,6,opt,name=clock_drift,json=clockDrift,proto3" json:"clock_drift,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ServerInfo) Reset() {
@@ -119,17 +119,16 @@ func (x *ServerInfo) GetClockDrift() *durationpb.Duration {
// AgentInfo contains information about Agent managed by pmm-agent.
type AgentInfo struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- AgentId string `protobuf:"bytes,1,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"`
- AgentType v1.AgentType `protobuf:"varint,2,opt,name=agent_type,json=agentType,proto3,enum=inventory.v1.AgentType" json:"agent_type,omitempty"`
- Status v1.AgentStatus `protobuf:"varint,3,opt,name=status,proto3,enum=inventory.v1.AgentStatus" json:"status,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ AgentId string `protobuf:"bytes,1,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"`
+ AgentType v1.AgentType `protobuf:"varint,2,opt,name=agent_type,json=agentType,proto3,enum=inventory.v1.AgentType" json:"agent_type,omitempty"`
+ Status v1.AgentStatus `protobuf:"varint,3,opt,name=status,proto3,enum=inventory.v1.AgentStatus" json:"status,omitempty"`
// The current listen port of this Agent (exporter or vmagent).
// Zero for other Agent types, or if unknown or not yet supported.
ListenPort uint32 `protobuf:"varint,4,opt,name=listen_port,json=listenPort,proto3" json:"listen_port,omitempty"`
ProcessExecPath string `protobuf:"bytes,5,opt,name=process_exec_path,json=processExecPath,proto3" json:"process_exec_path,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AgentInfo) Reset() {
@@ -198,12 +197,11 @@ func (x *AgentInfo) GetProcessExecPath() string {
}
type StatusRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Returns network info (latency and clock_drift) if true.
GetNetworkInfo bool `protobuf:"varint,1,opt,name=get_network_info,json=getNetworkInfo,proto3" json:"get_network_info,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StatusRequest) Reset() {
@@ -244,21 +242,20 @@ func (x *StatusRequest) GetGetNetworkInfo() bool {
}
type StatusResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- AgentId string `protobuf:"bytes,1,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"`
- RunsOnNodeId string `protobuf:"bytes,2,opt,name=runs_on_node_id,json=runsOnNodeId,proto3" json:"runs_on_node_id,omitempty"` // TODO: rename to node_id
- NodeName string `protobuf:"bytes,3,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"`
- ServerInfo *ServerInfo `protobuf:"bytes,4,opt,name=server_info,json=serverInfo,proto3" json:"server_info,omitempty"`
- AgentsInfo []*AgentInfo `protobuf:"bytes,5,rep,name=agents_info,json=agentsInfo,proto3" json:"agents_info,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ AgentId string `protobuf:"bytes,1,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"`
+ RunsOnNodeId string `protobuf:"bytes,2,opt,name=runs_on_node_id,json=runsOnNodeId,proto3" json:"runs_on_node_id,omitempty"` // TODO: rename to node_id
+ NodeName string `protobuf:"bytes,3,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"`
+ ServerInfo *ServerInfo `protobuf:"bytes,4,opt,name=server_info,json=serverInfo,proto3" json:"server_info,omitempty"`
+ AgentsInfo []*AgentInfo `protobuf:"bytes,5,rep,name=agents_info,json=agentsInfo,proto3" json:"agents_info,omitempty"`
// Config file path if pmm-agent was started with one.
ConfigFilepath string `protobuf:"bytes,6,opt,name=config_filepath,json=configFilepath,proto3" json:"config_filepath,omitempty"`
// PMM Agent version.
AgentVersion string `protobuf:"bytes,7,opt,name=agent_version,json=agentVersion,proto3" json:"agent_version,omitempty"`
// Shows connection uptime in percentage between agent and server
ConnectionUptime float32 `protobuf:"fixed32,8,opt,name=connection_uptime,json=connectionUptime,proto3" json:"connection_uptime,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StatusResponse) Reset() {
@@ -348,9 +345,9 @@ func (x *StatusResponse) GetConnectionUptime() float32 {
}
type ReloadRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ReloadRequest) Reset() {
@@ -385,9 +382,9 @@ func (*ReloadRequest) Descriptor() ([]byte, []int) {
// ReloadRequest may not be received by the client due to pmm-agent restart.
type ReloadResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ReloadResponse) Reset() {
@@ -422,7 +419,7 @@ func (*ReloadResponse) Descriptor() ([]byte, []int) {
var File_agentlocal_v1_agentlocal_proto protoreflect.FileDescriptor
-var file_agentlocal_v1_agentlocal_proto_rawDesc = []byte{
+var file_agentlocal_v1_agentlocal_proto_rawDesc = string([]byte{
0x0a, 0x1e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x2f, 0x76, 0x31, 0x2f,
0x61, 0x67, 0x65, 0x6e, 0x74, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x12, 0x0d, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x2e, 0x76, 0x31, 0x1a,
@@ -517,16 +514,16 @@ var file_agentlocal_v1_agentlocal_proto_rawDesc = []byte{
0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61,
0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x6c, 0x6f, 0x63, 0x61,
0x6c, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_agentlocal_v1_agentlocal_proto_rawDescOnce sync.Once
- file_agentlocal_v1_agentlocal_proto_rawDescData = file_agentlocal_v1_agentlocal_proto_rawDesc
+ file_agentlocal_v1_agentlocal_proto_rawDescData []byte
)
func file_agentlocal_v1_agentlocal_proto_rawDescGZIP() []byte {
file_agentlocal_v1_agentlocal_proto_rawDescOnce.Do(func() {
- file_agentlocal_v1_agentlocal_proto_rawDescData = protoimpl.X.CompressGZIP(file_agentlocal_v1_agentlocal_proto_rawDescData)
+ file_agentlocal_v1_agentlocal_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_agentlocal_v1_agentlocal_proto_rawDesc), len(file_agentlocal_v1_agentlocal_proto_rawDesc)))
})
return file_agentlocal_v1_agentlocal_proto_rawDescData
}
@@ -573,7 +570,7 @@ func file_agentlocal_v1_agentlocal_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_agentlocal_v1_agentlocal_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_agentlocal_v1_agentlocal_proto_rawDesc), len(file_agentlocal_v1_agentlocal_proto_rawDesc)),
NumEnums: 0,
NumMessages: 6,
NumExtensions: 0,
@@ -584,7 +581,6 @@ func file_agentlocal_v1_agentlocal_proto_init() {
MessageInfos: file_agentlocal_v1_agentlocal_proto_msgTypes,
}.Build()
File_agentlocal_v1_agentlocal_proto = out.File
- file_agentlocal_v1_agentlocal_proto_rawDesc = nil
file_agentlocal_v1_agentlocal_proto_goTypes = nil
file_agentlocal_v1_agentlocal_proto_depIdxs = nil
}
diff --git a/api/agentlocal/v1/agentlocal.pb.gw.go b/api/agentlocal/v1/agentlocal.pb.gw.go
index f8795a8ff4..bba84b5937 100644
--- a/api/agentlocal/v1/agentlocal.pb.gw.go
+++ b/api/agentlocal/v1/agentlocal.pb.gw.go
@@ -10,6 +10,7 @@ package agentlocalv1
import (
"context"
+ "errors"
"io"
"net/http"
@@ -28,31 +29,32 @@ var (
_ codes.Code
_ io.Reader
_ status.Status
+ _ = errors.New
_ = runtime.String
_ = utilities.NewDoubleArray
_ = metadata.Join
)
func request_AgentLocalService_Status_0(ctx context.Context, marshaler runtime.Marshaler, client AgentLocalServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq StatusRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq StatusRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.Status(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_AgentLocalService_Status_0(ctx context.Context, marshaler runtime.Marshaler, server AgentLocalServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq StatusRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq StatusRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.Status(ctx, &protoReq)
return msg, metadata, err
}
@@ -60,55 +62,55 @@ func local_request_AgentLocalService_Status_0(ctx context.Context, marshaler run
var filter_AgentLocalService_Status_1 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
func request_AgentLocalService_Status_1(ctx context.Context, marshaler runtime.Marshaler, client AgentLocalServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq StatusRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq StatusRequest
+ metadata runtime.ServerMetadata
+ )
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_AgentLocalService_Status_1); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.Status(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_AgentLocalService_Status_1(ctx context.Context, marshaler runtime.Marshaler, server AgentLocalServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq StatusRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq StatusRequest
+ metadata runtime.ServerMetadata
+ )
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_AgentLocalService_Status_1); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.Status(ctx, &protoReq)
return msg, metadata, err
}
func request_AgentLocalService_Reload_0(ctx context.Context, marshaler runtime.Marshaler, client AgentLocalServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ReloadRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq ReloadRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.Reload(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_AgentLocalService_Reload_0(ctx context.Context, marshaler runtime.Marshaler, server AgentLocalServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ReloadRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq ReloadRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.Reload(ctx, &protoReq)
return msg, metadata, err
}
@@ -119,15 +121,13 @@ func local_request_AgentLocalService_Reload_0(ctx context.Context, marshaler run
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterAgentLocalServiceHandlerFromEndpoint instead.
// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
func RegisterAgentLocalServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server AgentLocalServiceServer) error {
- mux.Handle("POST", pattern_AgentLocalService_Status_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_AgentLocalService_Status_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/agentlocal.v1.AgentLocalService/Status", runtime.WithHTTPPathPattern("/local/Status"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/agentlocal.v1.AgentLocalService/Status", runtime.WithHTTPPathPattern("/local/Status"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -139,19 +139,15 @@ func RegisterAgentLocalServiceHandlerServer(ctx context.Context, mux *runtime.Se
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AgentLocalService_Status_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_AgentLocalService_Status_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_AgentLocalService_Status_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/agentlocal.v1.AgentLocalService/Status", runtime.WithHTTPPathPattern("/local/Status"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/agentlocal.v1.AgentLocalService/Status", runtime.WithHTTPPathPattern("/local/Status"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -163,19 +159,15 @@ func RegisterAgentLocalServiceHandlerServer(ctx context.Context, mux *runtime.Se
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AgentLocalService_Status_1(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_AgentLocalService_Reload_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_AgentLocalService_Reload_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/agentlocal.v1.AgentLocalService/Reload", runtime.WithHTTPPathPattern("/local/Reload"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/agentlocal.v1.AgentLocalService/Reload", runtime.WithHTTPPathPattern("/local/Reload"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -187,7 +179,6 @@ func RegisterAgentLocalServiceHandlerServer(ctx context.Context, mux *runtime.Se
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AgentLocalService_Reload_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@@ -215,7 +206,6 @@ func RegisterAgentLocalServiceHandlerFromEndpoint(ctx context.Context, mux *runt
}
}()
}()
-
return RegisterAgentLocalServiceHandler(ctx, mux, conn)
}
@@ -231,13 +221,11 @@ func RegisterAgentLocalServiceHandler(ctx context.Context, mux *runtime.ServeMux
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "AgentLocalServiceClient" to call the correct interceptors. This client ignores the HTTP middlewares.
func RegisterAgentLocalServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client AgentLocalServiceClient) error {
- mux.Handle("POST", pattern_AgentLocalService_Status_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_AgentLocalService_Status_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/agentlocal.v1.AgentLocalService/Status", runtime.WithHTTPPathPattern("/local/Status"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/agentlocal.v1.AgentLocalService/Status", runtime.WithHTTPPathPattern("/local/Status"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -248,17 +236,13 @@ func RegisterAgentLocalServiceHandlerClient(ctx context.Context, mux *runtime.Se
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AgentLocalService_Status_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_AgentLocalService_Status_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_AgentLocalService_Status_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/agentlocal.v1.AgentLocalService/Status", runtime.WithHTTPPathPattern("/local/Status"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/agentlocal.v1.AgentLocalService/Status", runtime.WithHTTPPathPattern("/local/Status"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -269,17 +253,13 @@ func RegisterAgentLocalServiceHandlerClient(ctx context.Context, mux *runtime.Se
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AgentLocalService_Status_1(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_AgentLocalService_Reload_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_AgentLocalService_Reload_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/agentlocal.v1.AgentLocalService/Reload", runtime.WithHTTPPathPattern("/local/Reload"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/agentlocal.v1.AgentLocalService/Reload", runtime.WithHTTPPathPattern("/local/Reload"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -290,25 +270,19 @@ func RegisterAgentLocalServiceHandlerClient(ctx context.Context, mux *runtime.Se
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AgentLocalService_Reload_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
return nil
}
var (
pattern_AgentLocalService_Status_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"local", "Status"}, ""))
-
pattern_AgentLocalService_Status_1 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"local", "Status"}, ""))
-
pattern_AgentLocalService_Reload_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"local", "Reload"}, ""))
)
var (
forward_AgentLocalService_Status_0 = runtime.ForwardResponseMessage
-
forward_AgentLocalService_Status_1 = runtime.ForwardResponseMessage
-
forward_AgentLocalService_Reload_0 = runtime.ForwardResponseMessage
)
diff --git a/api/agentlocal/v1/agentlocal.pb.validate.go b/api/agentlocal/v1/agentlocal.pb.validate.go
index 5c3bcf6a3d..118cb9f916 100644
--- a/api/agentlocal/v1/agentlocal.pb.validate.go
+++ b/api/agentlocal/v1/agentlocal.pb.validate.go
@@ -140,7 +140,7 @@ type ServerInfoMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ServerInfoMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -249,7 +249,7 @@ type AgentInfoMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AgentInfoMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -351,7 +351,7 @@ type StatusRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StatusRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -526,7 +526,7 @@ type StatusResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StatusResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -626,7 +626,7 @@ type ReloadRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ReloadRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -726,7 +726,7 @@ type ReloadResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ReloadResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/api/alerting/v1/alerting.pb.go b/api/alerting/v1/alerting.pb.go
index 5d599a55c2..4247a7a22b 100644
--- a/api/alerting/v1/alerting.pb.go
+++ b/api/alerting/v1/alerting.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: alerting/v1/alerting.proto
@@ -9,6 +9,7 @@ package alertingv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
_ "github.com/envoyproxy/protoc-gen-validate/validate"
_ "google.golang.org/genproto/googleapis/api/annotations"
@@ -139,11 +140,10 @@ func (FilterType) EnumDescriptor() ([]byte, []int) {
// BoolParamDefinition represents boolean parameter's default value.
type BoolParamDefinition struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Default *bool `protobuf:"varint,1,opt,name=default,proto3,oneof" json:"default,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Default *bool `protobuf:"varint,1,opt,name=default,proto3,oneof" json:"default,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *BoolParamDefinition) Reset() {
@@ -185,16 +185,15 @@ func (x *BoolParamDefinition) GetDefault() bool {
// FloatParamDefinition represents float parameter's default value and valid range.
type FloatParamDefinition struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Default value.
Default *float64 `protobuf:"fixed64,1,opt,name=default,proto3,oneof" json:"default,omitempty"`
// Minimum valid value (inclusive).
Min *float64 `protobuf:"fixed64,2,opt,name=min,proto3,oneof" json:"min,omitempty"`
// Maximum valid value (inclusive).
- Max *float64 `protobuf:"fixed64,3,opt,name=max,proto3,oneof" json:"max,omitempty"`
+ Max *float64 `protobuf:"fixed64,3,opt,name=max,proto3,oneof" json:"max,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *FloatParamDefinition) Reset() {
@@ -250,12 +249,11 @@ func (x *FloatParamDefinition) GetMax() float64 {
// StringParamDefinition represents string parameter's default value.
type StringParamDefinition struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Default value.
- Default *string `protobuf:"bytes,1,opt,name=default,proto3,oneof" json:"default,omitempty"`
+ Default *string `protobuf:"bytes,1,opt,name=default,proto3,oneof" json:"default,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StringParamDefinition) Reset() {
@@ -297,10 +295,7 @@ func (x *StringParamDefinition) GetDefault() string {
// ParamDefinition represents a single query parameter.
type ParamDefinition struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Machine-readable name (ID) that is used in expression.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Short human-readable parameter summary.
@@ -311,12 +306,14 @@ type ParamDefinition struct {
Type ParamType `protobuf:"varint,4,opt,name=type,proto3,enum=alerting.v1.ParamType" json:"type,omitempty"`
// Parameter value.
//
- // Types that are assignable to Value:
+ // Types that are valid to be assigned to Value:
//
// *ParamDefinition_Bool
// *ParamDefinition_Float
// *ParamDefinition_String_
- Value isParamDefinition_Value `protobuf_oneof:"value"`
+ Value isParamDefinition_Value `protobuf_oneof:"value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ParamDefinition) Reset() {
@@ -377,30 +374,36 @@ func (x *ParamDefinition) GetType() ParamType {
return ParamType_PARAM_TYPE_UNSPECIFIED
}
-func (m *ParamDefinition) GetValue() isParamDefinition_Value {
- if m != nil {
- return m.Value
+func (x *ParamDefinition) GetValue() isParamDefinition_Value {
+ if x != nil {
+ return x.Value
}
return nil
}
func (x *ParamDefinition) GetBool() *BoolParamDefinition {
- if x, ok := x.GetValue().(*ParamDefinition_Bool); ok {
- return x.Bool
+ if x != nil {
+ if x, ok := x.Value.(*ParamDefinition_Bool); ok {
+ return x.Bool
+ }
}
return nil
}
func (x *ParamDefinition) GetFloat() *FloatParamDefinition {
- if x, ok := x.GetValue().(*ParamDefinition_Float); ok {
- return x.Float
+ if x != nil {
+ if x, ok := x.Value.(*ParamDefinition_Float); ok {
+ return x.Float
+ }
}
return nil
}
func (x *ParamDefinition) GetString_() *StringParamDefinition {
- if x, ok := x.GetValue().(*ParamDefinition_String_); ok {
- return x.String_
+ if x != nil {
+ if x, ok := x.Value.(*ParamDefinition_String_); ok {
+ return x.String_
+ }
}
return nil
}
@@ -432,10 +435,7 @@ func (*ParamDefinition_String_) isParamDefinition_Value() {}
// Template represents Alert Template that is used to create Alert Rule.
type Template struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Machine-readable name (ID).
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Short human-readable summary.
@@ -449,15 +449,17 @@ type Template struct {
// Severity.
Severity v1.Severity `protobuf:"varint,6,opt,name=severity,proto3,enum=management.v1.Severity" json:"severity,omitempty"`
// Labels.
- Labels map[string]string `protobuf:"bytes,7,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ Labels map[string]string `protobuf:"bytes,7,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Annotations.
- Annotations map[string]string `protobuf:"bytes,8,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ Annotations map[string]string `protobuf:"bytes,8,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Template source. Only templates created via API can be updated or deleted via API.
Source TemplateSource `protobuf:"varint,9,opt,name=source,proto3,enum=alerting.v1.TemplateSource" json:"source,omitempty"`
// Template creation time. Empty for built-in and SaaS templates.
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,10,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
// YAML template file content. Empty for built-in and SaaS templates.
- Yaml string `protobuf:"bytes,11,opt,name=yaml,proto3" json:"yaml,omitempty"`
+ Yaml string `protobuf:"bytes,11,opt,name=yaml,proto3" json:"yaml,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *Template) Reset() {
@@ -568,16 +570,15 @@ func (x *Template) GetYaml() string {
}
type ListTemplatesRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Maximum number of results per page.
PageSize *int32 `protobuf:"varint,1,opt,name=page_size,json=pageSize,proto3,oneof" json:"page_size,omitempty"`
// Index of the requested page, starts from 0.
PageIndex *int32 `protobuf:"varint,2,opt,name=page_index,json=pageIndex,proto3,oneof" json:"page_index,omitempty"`
// If true, template files will be re-read from disk.
- Reload bool `protobuf:"varint,3,opt,name=reload,proto3" json:"reload,omitempty"`
+ Reload bool `protobuf:"varint,3,opt,name=reload,proto3" json:"reload,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ListTemplatesRequest) Reset() {
@@ -632,16 +633,15 @@ func (x *ListTemplatesRequest) GetReload() bool {
}
type ListTemplatesResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Total number of results.
TotalItems int32 `protobuf:"varint,1,opt,name=total_items,json=totalItems,proto3" json:"total_items,omitempty"`
// Total number of pages.
TotalPages int32 `protobuf:"varint,2,opt,name=total_pages,json=totalPages,proto3" json:"total_pages,omitempty"`
// Alerting templates.
- Templates []*Template `protobuf:"bytes,3,rep,name=templates,proto3" json:"templates,omitempty"`
+ Templates []*Template `protobuf:"bytes,3,rep,name=templates,proto3" json:"templates,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ListTemplatesResponse) Reset() {
@@ -696,12 +696,11 @@ func (x *ListTemplatesResponse) GetTemplates() []*Template {
}
type CreateTemplateRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// YAML template file content.
- Yaml string `protobuf:"bytes,1,opt,name=yaml,proto3" json:"yaml,omitempty"`
+ Yaml string `protobuf:"bytes,1,opt,name=yaml,proto3" json:"yaml,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *CreateTemplateRequest) Reset() {
@@ -742,9 +741,9 @@ func (x *CreateTemplateRequest) GetYaml() string {
}
type CreateTemplateResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *CreateTemplateResponse) Reset() {
@@ -778,14 +777,13 @@ func (*CreateTemplateResponse) Descriptor() ([]byte, []int) {
}
type UpdateTemplateRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Machine-readable name (ID).
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// YAML template file content.
- Yaml string `protobuf:"bytes,2,opt,name=yaml,proto3" json:"yaml,omitempty"`
+ Yaml string `protobuf:"bytes,2,opt,name=yaml,proto3" json:"yaml,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *UpdateTemplateRequest) Reset() {
@@ -833,9 +831,9 @@ func (x *UpdateTemplateRequest) GetYaml() string {
}
type UpdateTemplateResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *UpdateTemplateResponse) Reset() {
@@ -869,11 +867,10 @@ func (*UpdateTemplateResponse) Descriptor() ([]byte, []int) {
}
type DeleteTemplateRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *DeleteTemplateRequest) Reset() {
@@ -914,9 +911,9 @@ func (x *DeleteTemplateRequest) GetName() string {
}
type DeleteTemplateResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *DeleteTemplateResponse) Reset() {
@@ -951,13 +948,12 @@ func (*DeleteTemplateResponse) Descriptor() ([]byte, []int) {
// Filter represents a single filter condition.
type Filter struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Type FilterType `protobuf:"varint,1,opt,name=type,proto3,enum=alerting.v1.FilterType" json:"type,omitempty"`
+ Label string `protobuf:"bytes,2,opt,name=label,proto3" json:"label,omitempty"`
+ Regexp string `protobuf:"bytes,3,opt,name=regexp,proto3" json:"regexp,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Type FilterType `protobuf:"varint,1,opt,name=type,proto3,enum=alerting.v1.FilterType" json:"type,omitempty"`
- Label string `protobuf:"bytes,2,opt,name=label,proto3" json:"label,omitempty"`
- Regexp string `protobuf:"bytes,3,opt,name=regexp,proto3" json:"regexp,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *Filter) Reset() {
@@ -1013,22 +1009,21 @@ func (x *Filter) GetRegexp() string {
// ParamValue represents a single rule parameter value.
type ParamValue struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Machine-readable name (ID) that is used in expression.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Parameter type.
Type ParamType `protobuf:"varint,2,opt,name=type,proto3,enum=alerting.v1.ParamType" json:"type,omitempty"`
// Parameter value.
//
- // Types that are assignable to Value:
+ // Types that are valid to be assigned to Value:
//
// *ParamValue_Bool
// *ParamValue_Float
// *ParamValue_String_
- Value isParamValue_Value `protobuf_oneof:"value"`
+ Value isParamValue_Value `protobuf_oneof:"value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ParamValue) Reset() {
@@ -1075,30 +1070,36 @@ func (x *ParamValue) GetType() ParamType {
return ParamType_PARAM_TYPE_UNSPECIFIED
}
-func (m *ParamValue) GetValue() isParamValue_Value {
- if m != nil {
- return m.Value
+func (x *ParamValue) GetValue() isParamValue_Value {
+ if x != nil {
+ return x.Value
}
return nil
}
func (x *ParamValue) GetBool() bool {
- if x, ok := x.GetValue().(*ParamValue_Bool); ok {
- return x.Bool
+ if x != nil {
+ if x, ok := x.Value.(*ParamValue_Bool); ok {
+ return x.Bool
+ }
}
return false
}
func (x *ParamValue) GetFloat() float64 {
- if x, ok := x.GetValue().(*ParamValue_Float); ok {
- return x.Float
+ if x != nil {
+ if x, ok := x.Value.(*ParamValue_Float); ok {
+ return x.Float
+ }
}
return 0
}
func (x *ParamValue) GetString_() string {
- if x, ok := x.GetValue().(*ParamValue_String_); ok {
- return x.String_
+ if x != nil {
+ if x, ok := x.Value.(*ParamValue_String_); ok {
+ return x.String_
+ }
}
return ""
}
@@ -1129,10 +1130,7 @@ func (*ParamValue_Float) isParamValue_Value() {}
func (*ParamValue_String_) isParamValue_Value() {}
type CreateRuleRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Template name.
TemplateName string `protobuf:"bytes,1,opt,name=template_name,json=templateName,proto3" json:"template_name,omitempty"`
// Rule name.
@@ -1148,11 +1146,13 @@ type CreateRuleRequest struct {
// Rule severity. Should be set.
Severity v1.Severity `protobuf:"varint,7,opt,name=severity,proto3,enum=management.v1.Severity" json:"severity,omitempty"`
// All custom labels to add or remove (with empty values) to default labels from template.
- CustomLabels map[string]string `protobuf:"bytes,8,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,8,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Filters.
Filters []*Filter `protobuf:"bytes,9,rep,name=filters,proto3" json:"filters,omitempty"`
// Evaluation Interval
- Interval *durationpb.Duration `protobuf:"bytes,10,opt,name=interval,proto3" json:"interval,omitempty"`
+ Interval *durationpb.Duration `protobuf:"bytes,10,opt,name=interval,proto3" json:"interval,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *CreateRuleRequest) Reset() {
@@ -1256,9 +1256,9 @@ func (x *CreateRuleRequest) GetInterval() *durationpb.Duration {
}
type CreateRuleResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *CreateRuleResponse) Reset() {
@@ -1293,7 +1293,7 @@ func (*CreateRuleResponse) Descriptor() ([]byte, []int) {
var File_alerting_v1_alerting_proto protoreflect.FileDescriptor
-var file_alerting_v1_alerting_proto_rawDesc = []byte{
+var file_alerting_v1_alerting_proto_rawDesc = string([]byte{
0x0a, 0x1a, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x6c,
0x65, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x61, 0x6c,
0x65, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x1a, 0x18, 0x61, 0x6c, 0x65, 0x72, 0x74,
@@ -1540,16 +1540,16 @@ var file_alerting_v1_alerting_proto_rawDesc = []byte{
0x17, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42,
0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0c, 0x41, 0x6c, 0x65, 0x72, 0x74,
0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_alerting_v1_alerting_proto_rawDescOnce sync.Once
- file_alerting_v1_alerting_proto_rawDescData = file_alerting_v1_alerting_proto_rawDesc
+ file_alerting_v1_alerting_proto_rawDescData []byte
)
func file_alerting_v1_alerting_proto_rawDescGZIP() []byte {
file_alerting_v1_alerting_proto_rawDescOnce.Do(func() {
- file_alerting_v1_alerting_proto_rawDescData = protoimpl.X.CompressGZIP(file_alerting_v1_alerting_proto_rawDescData)
+ file_alerting_v1_alerting_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_alerting_v1_alerting_proto_rawDesc), len(file_alerting_v1_alerting_proto_rawDesc)))
})
return file_alerting_v1_alerting_proto_rawDescData
}
@@ -1651,7 +1651,7 @@ func file_alerting_v1_alerting_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_alerting_v1_alerting_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_alerting_v1_alerting_proto_rawDesc), len(file_alerting_v1_alerting_proto_rawDesc)),
NumEnums: 2,
NumMessages: 20,
NumExtensions: 0,
@@ -1663,7 +1663,6 @@ func file_alerting_v1_alerting_proto_init() {
MessageInfos: file_alerting_v1_alerting_proto_msgTypes,
}.Build()
File_alerting_v1_alerting_proto = out.File
- file_alerting_v1_alerting_proto_rawDesc = nil
file_alerting_v1_alerting_proto_goTypes = nil
file_alerting_v1_alerting_proto_depIdxs = nil
}
diff --git a/api/alerting/v1/alerting.pb.gw.go b/api/alerting/v1/alerting.pb.gw.go
index 8edb3ca048..ae793fc4ef 100644
--- a/api/alerting/v1/alerting.pb.gw.go
+++ b/api/alerting/v1/alerting.pb.gw.go
@@ -10,6 +10,7 @@ package alertingv1
import (
"context"
+ "errors"
"io"
"net/http"
@@ -28,6 +29,7 @@ var (
_ codes.Code
_ io.Reader
_ status.Status
+ _ = errors.New
_ = runtime.String
_ = utilities.NewDoubleArray
_ = metadata.Join
@@ -36,187 +38,157 @@ var (
var filter_AlertingService_ListTemplates_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
func request_AlertingService_ListTemplates_0(ctx context.Context, marshaler runtime.Marshaler, client AlertingServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListTemplatesRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ListTemplatesRequest
+ metadata runtime.ServerMetadata
+ )
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_AlertingService_ListTemplates_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.ListTemplates(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_AlertingService_ListTemplates_0(ctx context.Context, marshaler runtime.Marshaler, server AlertingServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListTemplatesRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ListTemplatesRequest
+ metadata runtime.ServerMetadata
+ )
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_AlertingService_ListTemplates_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.ListTemplates(ctx, &protoReq)
return msg, metadata, err
}
func request_AlertingService_CreateTemplate_0(ctx context.Context, marshaler runtime.Marshaler, client AlertingServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq CreateTemplateRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq CreateTemplateRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.CreateTemplate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_AlertingService_CreateTemplate_0(ctx context.Context, marshaler runtime.Marshaler, server AlertingServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq CreateTemplateRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq CreateTemplateRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.CreateTemplate(ctx, &protoReq)
return msg, metadata, err
}
func request_AlertingService_UpdateTemplate_0(ctx context.Context, marshaler runtime.Marshaler, client AlertingServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq UpdateTemplateRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq UpdateTemplateRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["name"]
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ val, ok := pathParams["name"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "name")
}
-
protoReq.Name, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "name", err)
}
-
msg, err := client.UpdateTemplate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_AlertingService_UpdateTemplate_0(ctx context.Context, marshaler runtime.Marshaler, server AlertingServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq UpdateTemplateRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq UpdateTemplateRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["name"]
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ val, ok := pathParams["name"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "name")
}
-
protoReq.Name, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "name", err)
}
-
msg, err := server.UpdateTemplate(ctx, &protoReq)
return msg, metadata, err
}
func request_AlertingService_DeleteTemplate_0(ctx context.Context, marshaler runtime.Marshaler, client AlertingServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq DeleteTemplateRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq DeleteTemplateRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["name"]
+ val, ok := pathParams["name"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "name")
}
-
protoReq.Name, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "name", err)
}
-
msg, err := client.DeleteTemplate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_AlertingService_DeleteTemplate_0(ctx context.Context, marshaler runtime.Marshaler, server AlertingServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq DeleteTemplateRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq DeleteTemplateRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["name"]
+ val, ok := pathParams["name"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "name")
}
-
protoReq.Name, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "name", err)
}
-
msg, err := server.DeleteTemplate(ctx, &protoReq)
return msg, metadata, err
}
func request_AlertingService_CreateRule_0(ctx context.Context, marshaler runtime.Marshaler, client AlertingServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq CreateRuleRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq CreateRuleRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.CreateRule(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_AlertingService_CreateRule_0(ctx context.Context, marshaler runtime.Marshaler, server AlertingServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq CreateRuleRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq CreateRuleRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.CreateRule(ctx, &protoReq)
return msg, metadata, err
}
@@ -227,15 +199,13 @@ func local_request_AlertingService_CreateRule_0(ctx context.Context, marshaler r
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterAlertingServiceHandlerFromEndpoint instead.
// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
func RegisterAlertingServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server AlertingServiceServer) error {
- mux.Handle("GET", pattern_AlertingService_ListTemplates_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_AlertingService_ListTemplates_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/alerting.v1.AlertingService/ListTemplates", runtime.WithHTTPPathPattern("/v1/alerting/templates"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/alerting.v1.AlertingService/ListTemplates", runtime.WithHTTPPathPattern("/v1/alerting/templates"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -247,19 +217,15 @@ func RegisterAlertingServiceHandlerServer(ctx context.Context, mux *runtime.Serv
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AlertingService_ListTemplates_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_AlertingService_CreateTemplate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_AlertingService_CreateTemplate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/alerting.v1.AlertingService/CreateTemplate", runtime.WithHTTPPathPattern("/v1/alerting/templates"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/alerting.v1.AlertingService/CreateTemplate", runtime.WithHTTPPathPattern("/v1/alerting/templates"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -271,19 +237,15 @@ func RegisterAlertingServiceHandlerServer(ctx context.Context, mux *runtime.Serv
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AlertingService_CreateTemplate_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("PUT", pattern_AlertingService_UpdateTemplate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPut, pattern_AlertingService_UpdateTemplate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/alerting.v1.AlertingService/UpdateTemplate", runtime.WithHTTPPathPattern("/v1/alerting/templates/{name}"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/alerting.v1.AlertingService/UpdateTemplate", runtime.WithHTTPPathPattern("/v1/alerting/templates/{name}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -295,19 +257,15 @@ func RegisterAlertingServiceHandlerServer(ctx context.Context, mux *runtime.Serv
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AlertingService_UpdateTemplate_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("DELETE", pattern_AlertingService_DeleteTemplate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodDelete, pattern_AlertingService_DeleteTemplate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/alerting.v1.AlertingService/DeleteTemplate", runtime.WithHTTPPathPattern("/v1/alerting/templates/{name}"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/alerting.v1.AlertingService/DeleteTemplate", runtime.WithHTTPPathPattern("/v1/alerting/templates/{name}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -319,19 +277,15 @@ func RegisterAlertingServiceHandlerServer(ctx context.Context, mux *runtime.Serv
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AlertingService_DeleteTemplate_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_AlertingService_CreateRule_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_AlertingService_CreateRule_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/alerting.v1.AlertingService/CreateRule", runtime.WithHTTPPathPattern("/v1/alerting/rules"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/alerting.v1.AlertingService/CreateRule", runtime.WithHTTPPathPattern("/v1/alerting/rules"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -343,7 +297,6 @@ func RegisterAlertingServiceHandlerServer(ctx context.Context, mux *runtime.Serv
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AlertingService_CreateRule_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@@ -371,7 +324,6 @@ func RegisterAlertingServiceHandlerFromEndpoint(ctx context.Context, mux *runtim
}
}()
}()
-
return RegisterAlertingServiceHandler(ctx, mux, conn)
}
@@ -387,13 +339,11 @@ func RegisterAlertingServiceHandler(ctx context.Context, mux *runtime.ServeMux,
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "AlertingServiceClient" to call the correct interceptors. This client ignores the HTTP middlewares.
func RegisterAlertingServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client AlertingServiceClient) error {
- mux.Handle("GET", pattern_AlertingService_ListTemplates_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_AlertingService_ListTemplates_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/alerting.v1.AlertingService/ListTemplates", runtime.WithHTTPPathPattern("/v1/alerting/templates"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/alerting.v1.AlertingService/ListTemplates", runtime.WithHTTPPathPattern("/v1/alerting/templates"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -404,17 +354,13 @@ func RegisterAlertingServiceHandlerClient(ctx context.Context, mux *runtime.Serv
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AlertingService_ListTemplates_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_AlertingService_CreateTemplate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_AlertingService_CreateTemplate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/alerting.v1.AlertingService/CreateTemplate", runtime.WithHTTPPathPattern("/v1/alerting/templates"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/alerting.v1.AlertingService/CreateTemplate", runtime.WithHTTPPathPattern("/v1/alerting/templates"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -425,17 +371,13 @@ func RegisterAlertingServiceHandlerClient(ctx context.Context, mux *runtime.Serv
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AlertingService_CreateTemplate_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("PUT", pattern_AlertingService_UpdateTemplate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPut, pattern_AlertingService_UpdateTemplate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/alerting.v1.AlertingService/UpdateTemplate", runtime.WithHTTPPathPattern("/v1/alerting/templates/{name}"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/alerting.v1.AlertingService/UpdateTemplate", runtime.WithHTTPPathPattern("/v1/alerting/templates/{name}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -446,17 +388,13 @@ func RegisterAlertingServiceHandlerClient(ctx context.Context, mux *runtime.Serv
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AlertingService_UpdateTemplate_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("DELETE", pattern_AlertingService_DeleteTemplate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodDelete, pattern_AlertingService_DeleteTemplate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/alerting.v1.AlertingService/DeleteTemplate", runtime.WithHTTPPathPattern("/v1/alerting/templates/{name}"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/alerting.v1.AlertingService/DeleteTemplate", runtime.WithHTTPPathPattern("/v1/alerting/templates/{name}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -467,17 +405,13 @@ func RegisterAlertingServiceHandlerClient(ctx context.Context, mux *runtime.Serv
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AlertingService_DeleteTemplate_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_AlertingService_CreateRule_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_AlertingService_CreateRule_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/alerting.v1.AlertingService/CreateRule", runtime.WithHTTPPathPattern("/v1/alerting/rules"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/alerting.v1.AlertingService/CreateRule", runtime.WithHTTPPathPattern("/v1/alerting/rules"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -488,33 +422,23 @@ func RegisterAlertingServiceHandlerClient(ctx context.Context, mux *runtime.Serv
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AlertingService_CreateRule_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
return nil
}
var (
- pattern_AlertingService_ListTemplates_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "alerting", "templates"}, ""))
-
+ pattern_AlertingService_ListTemplates_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "alerting", "templates"}, ""))
pattern_AlertingService_CreateTemplate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "alerting", "templates"}, ""))
-
pattern_AlertingService_UpdateTemplate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "alerting", "templates", "name"}, ""))
-
pattern_AlertingService_DeleteTemplate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "alerting", "templates", "name"}, ""))
-
- pattern_AlertingService_CreateRule_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "alerting", "rules"}, ""))
+ pattern_AlertingService_CreateRule_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "alerting", "rules"}, ""))
)
var (
- forward_AlertingService_ListTemplates_0 = runtime.ForwardResponseMessage
-
+ forward_AlertingService_ListTemplates_0 = runtime.ForwardResponseMessage
forward_AlertingService_CreateTemplate_0 = runtime.ForwardResponseMessage
-
forward_AlertingService_UpdateTemplate_0 = runtime.ForwardResponseMessage
-
forward_AlertingService_DeleteTemplate_0 = runtime.ForwardResponseMessage
-
- forward_AlertingService_CreateRule_0 = runtime.ForwardResponseMessage
+ forward_AlertingService_CreateRule_0 = runtime.ForwardResponseMessage
)
diff --git a/api/alerting/v1/alerting.pb.validate.go b/api/alerting/v1/alerting.pb.validate.go
index 8c49e99a95..f6a7dbcf1f 100644
--- a/api/alerting/v1/alerting.pb.validate.go
+++ b/api/alerting/v1/alerting.pb.validate.go
@@ -79,7 +79,7 @@ type BoolParamDefinitionMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m BoolParamDefinitionMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -193,7 +193,7 @@ type FloatParamDefinitionMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m FloatParamDefinitionMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -299,7 +299,7 @@ type StringParamDefinitionMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StringParamDefinitionMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -555,7 +555,7 @@ type ParamDefinitionMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ParamDefinitionMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -762,7 +762,7 @@ type TemplateMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m TemplateMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -890,7 +890,7 @@ type ListTemplatesRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListTemplatesRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1030,7 +1030,7 @@ type ListTemplatesResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListTemplatesResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1143,7 +1143,7 @@ type CreateTemplateRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m CreateTemplateRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1245,7 +1245,7 @@ type CreateTemplateResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m CreateTemplateResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1369,7 +1369,7 @@ type UpdateTemplateRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m UpdateTemplateRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1471,7 +1471,7 @@ type UpdateTemplateResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m UpdateTemplateResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1584,7 +1584,7 @@ type DeleteTemplateRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m DeleteTemplateRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1686,7 +1686,7 @@ type DeleteTemplateResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m DeleteTemplateResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1792,7 +1792,7 @@ type FilterMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m FilterMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1945,7 +1945,7 @@ type ParamValueMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ParamValueMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2183,7 +2183,7 @@ type CreateRuleRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m CreateRuleRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2285,7 +2285,7 @@ type CreateRuleResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m CreateRuleResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/api/alerting/v1/params.pb.go b/api/alerting/v1/params.pb.go
index 80c9f6c43d..c972130724 100644
--- a/api/alerting/v1/params.pb.go
+++ b/api/alerting/v1/params.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: alerting/v1/params.proto
@@ -9,6 +9,7 @@ package alertingv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
@@ -129,7 +130,7 @@ func (ParamType) EnumDescriptor() ([]byte, []int) {
var File_alerting_v1_params_proto protoreflect.FileDescriptor
-var file_alerting_v1_params_proto_rawDesc = []byte{
+var file_alerting_v1_params_proto_rawDesc = string([]byte{
0x0a, 0x18, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x61,
0x72, 0x61, 0x6d, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x61, 0x6c, 0x65, 0x72,
0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x2a, 0x5a, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x61, 0x6d,
@@ -156,16 +157,16 @@ var file_alerting_v1_params_proto_rawDesc = []byte{
0x67, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
0xea, 0x02, 0x0c, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x31, 0x62,
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_alerting_v1_params_proto_rawDescOnce sync.Once
- file_alerting_v1_params_proto_rawDescData = file_alerting_v1_params_proto_rawDesc
+ file_alerting_v1_params_proto_rawDescData []byte
)
func file_alerting_v1_params_proto_rawDescGZIP() []byte {
file_alerting_v1_params_proto_rawDescOnce.Do(func() {
- file_alerting_v1_params_proto_rawDescData = protoimpl.X.CompressGZIP(file_alerting_v1_params_proto_rawDescData)
+ file_alerting_v1_params_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_alerting_v1_params_proto_rawDesc), len(file_alerting_v1_params_proto_rawDesc)))
})
return file_alerting_v1_params_proto_rawDescData
}
@@ -195,7 +196,7 @@ func file_alerting_v1_params_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_alerting_v1_params_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_alerting_v1_params_proto_rawDesc), len(file_alerting_v1_params_proto_rawDesc)),
NumEnums: 2,
NumMessages: 0,
NumExtensions: 0,
@@ -206,7 +207,6 @@ func file_alerting_v1_params_proto_init() {
EnumInfos: file_alerting_v1_params_proto_enumTypes,
}.Build()
File_alerting_v1_params_proto = out.File
- file_alerting_v1_params_proto_rawDesc = nil
file_alerting_v1_params_proto_goTypes = nil
file_alerting_v1_params_proto_depIdxs = nil
}
diff --git a/api/backup/v1/artifacts.pb.go b/api/backup/v1/artifacts.pb.go
index 0d5056a189..daf2fbf888 100644
--- a/api/backup/v1/artifacts.pb.go
+++ b/api/backup/v1/artifacts.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: backup/v1/artifacts.proto
@@ -9,6 +9,7 @@ package backupv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
_ "github.com/envoyproxy/protoc-gen-validate/validate"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
@@ -93,10 +94,7 @@ func (BackupStatus) EnumDescriptor() ([]byte, []int) {
// Artifact represents single backup artifact.
type Artifact struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Machine-readable artifact ID.
ArtifactId string `protobuf:"bytes,1,opt,name=artifact_id,json=artifactId,proto3" json:"artifact_id,omitempty"`
// Artifact name
@@ -124,7 +122,9 @@ type Artifact struct {
// Folder to store artifact on a storage.
Folder string `protobuf:"bytes,13,opt,name=folder,proto3" json:"folder,omitempty"`
// List of artifact metadata.
- MetadataList []*Metadata `protobuf:"bytes,14,rep,name=metadata_list,json=metadataList,proto3" json:"metadata_list,omitempty"`
+ MetadataList []*Metadata `protobuf:"bytes,14,rep,name=metadata_list,json=metadataList,proto3" json:"metadata_list,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *Artifact) Reset() {
@@ -256,9 +256,9 @@ func (x *Artifact) GetMetadataList() []*Metadata {
}
type ListArtifactsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ListArtifactsRequest) Reset() {
@@ -292,11 +292,10 @@ func (*ListArtifactsRequest) Descriptor() ([]byte, []int) {
}
type ListArtifactsResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Artifacts []*Artifact `protobuf:"bytes,1,rep,name=artifacts,proto3" json:"artifacts,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Artifacts []*Artifact `protobuf:"bytes,1,rep,name=artifacts,proto3" json:"artifacts,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *ListArtifactsResponse) Reset() {
@@ -337,14 +336,13 @@ func (x *ListArtifactsResponse) GetArtifacts() []*Artifact {
}
type DeleteArtifactRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Machine-readable artifact ID.
ArtifactId string `protobuf:"bytes,1,opt,name=artifact_id,json=artifactId,proto3" json:"artifact_id,omitempty"`
// Removes all the backup files associated with artifact if flag is set.
- RemoveFiles bool `protobuf:"varint,2,opt,name=remove_files,json=removeFiles,proto3" json:"remove_files,omitempty"`
+ RemoveFiles bool `protobuf:"varint,2,opt,name=remove_files,json=removeFiles,proto3" json:"remove_files,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *DeleteArtifactRequest) Reset() {
@@ -392,9 +390,9 @@ func (x *DeleteArtifactRequest) GetRemoveFiles() bool {
}
type DeleteArtifactResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *DeleteArtifactResponse) Reset() {
@@ -428,14 +426,13 @@ func (*DeleteArtifactResponse) Descriptor() ([]byte, []int) {
}
type PitrTimerange struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// start_timestamp is the time of the first event in the PITR chunk.
StartTimestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"`
// end_timestamp is the time of the last event in the PITR chunk.
- EndTimestamp *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=end_timestamp,json=endTimestamp,proto3" json:"end_timestamp,omitempty"`
+ EndTimestamp *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=end_timestamp,json=endTimestamp,proto3" json:"end_timestamp,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *PitrTimerange) Reset() {
@@ -483,12 +480,11 @@ func (x *PitrTimerange) GetEndTimestamp() *timestamppb.Timestamp {
}
type ListPitrTimerangesRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Artifact ID represents artifact whose location has PITR timeranges to be retrieved.
- ArtifactId string `protobuf:"bytes,1,opt,name=artifact_id,json=artifactId,proto3" json:"artifact_id,omitempty"`
+ ArtifactId string `protobuf:"bytes,1,opt,name=artifact_id,json=artifactId,proto3" json:"artifact_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ListPitrTimerangesRequest) Reset() {
@@ -529,11 +525,10 @@ func (x *ListPitrTimerangesRequest) GetArtifactId() string {
}
type ListPitrTimerangesResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Timeranges []*PitrTimerange `protobuf:"bytes,1,rep,name=timeranges,proto3" json:"timeranges,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Timeranges []*PitrTimerange `protobuf:"bytes,1,rep,name=timeranges,proto3" json:"timeranges,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *ListPitrTimerangesResponse) Reset() {
@@ -575,7 +570,7 @@ func (x *ListPitrTimerangesResponse) GetTimeranges() []*PitrTimerange {
var File_backup_v1_artifacts_proto protoreflect.FileDescriptor
-var file_backup_v1_artifacts_proto_rawDesc = []byte{
+var file_backup_v1_artifacts_proto_rawDesc = string([]byte{
0x0a, 0x19, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x72, 0x74, 0x69,
0x66, 0x61, 0x63, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x62, 0x61, 0x63,
0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x1a, 0x16, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2f, 0x76,
@@ -680,16 +675,16 @@ var file_backup_v1_artifacts_proto_rawDesc = []byte{
0x31, 0xe2, 0x02, 0x15, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50,
0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0a, 0x42, 0x61, 0x63, 0x6b,
0x75, 0x70, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_backup_v1_artifacts_proto_rawDescOnce sync.Once
- file_backup_v1_artifacts_proto_rawDescData = file_backup_v1_artifacts_proto_rawDesc
+ file_backup_v1_artifacts_proto_rawDescData []byte
)
func file_backup_v1_artifacts_proto_rawDescGZIP() []byte {
file_backup_v1_artifacts_proto_rawDescOnce.Do(func() {
- file_backup_v1_artifacts_proto_rawDescData = protoimpl.X.CompressGZIP(file_backup_v1_artifacts_proto_rawDescData)
+ file_backup_v1_artifacts_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_backup_v1_artifacts_proto_rawDesc), len(file_backup_v1_artifacts_proto_rawDesc)))
})
return file_backup_v1_artifacts_proto_rawDescData
}
@@ -741,7 +736,7 @@ func file_backup_v1_artifacts_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_backup_v1_artifacts_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_backup_v1_artifacts_proto_rawDesc), len(file_backup_v1_artifacts_proto_rawDesc)),
NumEnums: 1,
NumMessages: 8,
NumExtensions: 0,
@@ -753,7 +748,6 @@ func file_backup_v1_artifacts_proto_init() {
MessageInfos: file_backup_v1_artifacts_proto_msgTypes,
}.Build()
File_backup_v1_artifacts_proto = out.File
- file_backup_v1_artifacts_proto_rawDesc = nil
file_backup_v1_artifacts_proto_goTypes = nil
file_backup_v1_artifacts_proto_depIdxs = nil
}
diff --git a/api/backup/v1/artifacts.pb.validate.go b/api/backup/v1/artifacts.pb.validate.go
index 0384aec4cd..4b5be98043 100644
--- a/api/backup/v1/artifacts.pb.validate.go
+++ b/api/backup/v1/artifacts.pb.validate.go
@@ -157,7 +157,7 @@ type ArtifactMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ArtifactMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -257,7 +257,7 @@ type ListArtifactsRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListArtifactsRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -393,7 +393,7 @@ type ListArtifactsResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListArtifactsResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -508,7 +508,7 @@ type DeleteArtifactRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m DeleteArtifactRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -610,7 +610,7 @@ type DeleteArtifactResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m DeleteArtifactResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -770,7 +770,7 @@ type PitrTimerangeMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m PitrTimerangeMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -881,7 +881,7 @@ type ListPitrTimerangesRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListPitrTimerangesRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1017,7 +1017,7 @@ type ListPitrTimerangesResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListPitrTimerangesResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/api/backup/v1/backup.pb.go b/api/backup/v1/backup.pb.go
index ffe4df804c..58ae06b4a0 100644
--- a/api/backup/v1/backup.pb.go
+++ b/api/backup/v1/backup.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: backup/v1/backup.proto
@@ -9,6 +9,7 @@ package backupv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
_ "github.com/envoyproxy/protoc-gen-validate/validate"
_ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options"
@@ -29,10 +30,7 @@ const (
)
type StartBackupRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Service identifier.
ServiceId string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
// Machine-readable location ID.
@@ -48,7 +46,9 @@ type StartBackupRequest struct {
// DataModel represents the data model used for the backup.
DataModel DataModel `protobuf:"varint,7,opt,name=data_model,json=dataModel,proto3,enum=backup.v1.DataModel" json:"data_model,omitempty"`
// Folder on storage for artifact.
- Folder string `protobuf:"bytes,8,opt,name=folder,proto3" json:"folder,omitempty"`
+ Folder string `protobuf:"bytes,8,opt,name=folder,proto3" json:"folder,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartBackupRequest) Reset() {
@@ -138,12 +138,11 @@ func (x *StartBackupRequest) GetFolder() string {
}
type StartBackupResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique identifier.
- ArtifactId string `protobuf:"bytes,1,opt,name=artifact_id,json=artifactId,proto3" json:"artifact_id,omitempty"`
+ ArtifactId string `protobuf:"bytes,1,opt,name=artifact_id,json=artifactId,proto3" json:"artifact_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartBackupResponse) Reset() {
@@ -184,12 +183,11 @@ func (x *StartBackupResponse) GetArtifactId() string {
}
type ListArtifactCompatibleServicesRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Artifact id used to determine restore compatibility.
- ArtifactId string `protobuf:"bytes,1,opt,name=artifact_id,json=artifactId,proto3" json:"artifact_id,omitempty"`
+ ArtifactId string `protobuf:"bytes,1,opt,name=artifact_id,json=artifactId,proto3" json:"artifact_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ListArtifactCompatibleServicesRequest) Reset() {
@@ -230,12 +228,11 @@ func (x *ListArtifactCompatibleServicesRequest) GetArtifactId() string {
}
type ListArtifactCompatibleServicesResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Mysql []*v1.MySQLService `protobuf:"bytes,1,rep,name=mysql,proto3" json:"mysql,omitempty"`
+ Mongodb []*v1.MongoDBService `protobuf:"bytes,2,rep,name=mongodb,proto3" json:"mongodb,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Mysql []*v1.MySQLService `protobuf:"bytes,1,rep,name=mysql,proto3" json:"mysql,omitempty"`
- Mongodb []*v1.MongoDBService `protobuf:"bytes,2,rep,name=mongodb,proto3" json:"mongodb,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *ListArtifactCompatibleServicesResponse) Reset() {
@@ -284,10 +281,7 @@ func (x *ListArtifactCompatibleServicesResponse) GetMongodb() []*v1.MongoDBServi
// ScheduledBackup represents scheduled task for backup.
type ScheduledBackup struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Machine-readable ID.
ScheduledBackupId string `protobuf:"bytes,1,opt,name=scheduled_backup_id,json=scheduledBackupId,proto3" json:"scheduled_backup_id,omitempty"`
// Machine-readable service ID.
@@ -325,7 +319,9 @@ type ScheduledBackup struct {
// Next run.
NextRun *timestamppb.Timestamp `protobuf:"bytes,18,opt,name=next_run,json=nextRun,proto3" json:"next_run,omitempty"`
// How many artifacts keep. 0 - unlimited.
- Retention uint32 `protobuf:"varint,19,opt,name=retention,proto3" json:"retention,omitempty"`
+ Retention uint32 `protobuf:"varint,19,opt,name=retention,proto3" json:"retention,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ScheduledBackup) Reset() {
@@ -492,10 +488,7 @@ func (x *ScheduledBackup) GetRetention() uint32 {
}
type ScheduleBackupRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Service identifier where backup should be performed.
ServiceId string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
// Machine-readable location ID.
@@ -521,7 +514,9 @@ type ScheduleBackupRequest struct {
// Backup data model (physical or logical).
DataModel DataModel `protobuf:"varint,12,opt,name=data_model,json=dataModel,proto3,enum=backup.v1.DataModel" json:"data_model,omitempty"`
// How many artifacts keep. 0 - unlimited.
- Retention uint32 `protobuf:"varint,13,opt,name=retention,proto3" json:"retention,omitempty"`
+ Retention uint32 `protobuf:"varint,13,opt,name=retention,proto3" json:"retention,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ScheduleBackupRequest) Reset() {
@@ -646,11 +641,10 @@ func (x *ScheduleBackupRequest) GetRetention() uint32 {
}
type ScheduleBackupResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- ScheduledBackupId string `protobuf:"bytes,1,opt,name=scheduled_backup_id,json=scheduledBackupId,proto3" json:"scheduled_backup_id,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ ScheduledBackupId string `protobuf:"bytes,1,opt,name=scheduled_backup_id,json=scheduledBackupId,proto3" json:"scheduled_backup_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ScheduleBackupResponse) Reset() {
@@ -691,9 +685,9 @@ func (x *ScheduleBackupResponse) GetScheduledBackupId() string {
}
type ListScheduledBackupsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ListScheduledBackupsRequest) Reset() {
@@ -727,11 +721,10 @@ func (*ListScheduledBackupsRequest) Descriptor() ([]byte, []int) {
}
type ListScheduledBackupsResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- ScheduledBackups []*ScheduledBackup `protobuf:"bytes,1,rep,name=scheduled_backups,json=scheduledBackups,proto3" json:"scheduled_backups,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ ScheduledBackups []*ScheduledBackup `protobuf:"bytes,1,rep,name=scheduled_backups,json=scheduledBackups,proto3" json:"scheduled_backups,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ListScheduledBackupsResponse) Reset() {
@@ -772,12 +765,9 @@ func (x *ListScheduledBackupsResponse) GetScheduledBackups() []*ScheduledBackup
}
type ChangeScheduledBackupRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- ScheduledBackupId string `protobuf:"bytes,1,opt,name=scheduled_backup_id,json=scheduledBackupId,proto3" json:"scheduled_backup_id,omitempty"`
- Enabled *bool `protobuf:"varint,2,opt,name=enabled,proto3,oneof" json:"enabled,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ ScheduledBackupId string `protobuf:"bytes,1,opt,name=scheduled_backup_id,json=scheduledBackupId,proto3" json:"scheduled_backup_id,omitempty"`
+ Enabled *bool `protobuf:"varint,2,opt,name=enabled,proto3,oneof" json:"enabled,omitempty"`
// How often backup should be run in cron format.
CronExpression *string `protobuf:"bytes,3,opt,name=cron_expression,json=cronExpression,proto3,oneof" json:"cron_expression,omitempty"`
// First backup wouldn't happen before this time.
@@ -791,7 +781,9 @@ type ChangeScheduledBackupRequest struct {
// Delay between each retry. Should have a suffix in JSON: 1s, 1m, 1h.
RetryInterval *durationpb.Duration `protobuf:"bytes,8,opt,name=retry_interval,json=retryInterval,proto3" json:"retry_interval,omitempty"`
// How many artifacts keep. 0 - unlimited.
- Retention *uint32 `protobuf:"varint,9,opt,name=retention,proto3,oneof" json:"retention,omitempty"`
+ Retention *uint32 `protobuf:"varint,9,opt,name=retention,proto3,oneof" json:"retention,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ChangeScheduledBackupRequest) Reset() {
@@ -888,9 +880,9 @@ func (x *ChangeScheduledBackupRequest) GetRetention() uint32 {
}
type ChangeScheduledBackupResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ChangeScheduledBackupResponse) Reset() {
@@ -924,11 +916,10 @@ func (*ChangeScheduledBackupResponse) Descriptor() ([]byte, []int) {
}
type RemoveScheduledBackupRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- ScheduledBackupId string `protobuf:"bytes,1,opt,name=scheduled_backup_id,json=scheduledBackupId,proto3" json:"scheduled_backup_id,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ ScheduledBackupId string `protobuf:"bytes,1,opt,name=scheduled_backup_id,json=scheduledBackupId,proto3" json:"scheduled_backup_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *RemoveScheduledBackupRequest) Reset() {
@@ -969,9 +960,9 @@ func (x *RemoveScheduledBackupRequest) GetScheduledBackupId() string {
}
type RemoveScheduledBackupResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *RemoveScheduledBackupResponse) Reset() {
@@ -1005,13 +996,12 @@ func (*RemoveScheduledBackupResponse) Descriptor() ([]byte, []int) {
}
type GetLogsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ ArtifactId string `protobuf:"bytes,1,opt,name=artifact_id,json=artifactId,proto3" json:"artifact_id,omitempty"`
+ Offset uint32 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"`
+ Limit uint32 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"`
unknownFields protoimpl.UnknownFields
-
- ArtifactId string `protobuf:"bytes,1,opt,name=artifact_id,json=artifactId,proto3" json:"artifact_id,omitempty"`
- Offset uint32 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"`
- Limit uint32 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *GetLogsRequest) Reset() {
@@ -1066,12 +1056,11 @@ func (x *GetLogsRequest) GetLimit() uint32 {
}
type GetLogsResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Logs []*LogChunk `protobuf:"bytes,1,rep,name=logs,proto3" json:"logs,omitempty"`
+ End bool `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Logs []*LogChunk `protobuf:"bytes,1,rep,name=logs,proto3" json:"logs,omitempty"`
- End bool `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *GetLogsResponse) Reset() {
@@ -1120,7 +1109,7 @@ func (x *GetLogsResponse) GetEnd() bool {
var File_backup_v1_backup_proto protoreflect.FileDescriptor
-var file_backup_v1_backup_proto_rawDesc = []byte{
+var file_backup_v1_backup_proto_rawDesc = string([]byte{
0x0a, 0x16, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2f, 0x76, 0x31, 0x2f, 0x62, 0x61, 0x63, 0x6b,
0x75, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70,
0x2e, 0x76, 0x31, 0x1a, 0x19, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2f, 0x76, 0x31, 0x2f, 0x61,
@@ -1483,16 +1472,16 @@ var file_backup_v1_backup_proto_rawDesc = []byte{
0x6b, 0x75, 0x70, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
0x74, 0x61, 0xea, 0x02, 0x0a, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x3a, 0x3a, 0x56, 0x31, 0x62,
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_backup_v1_backup_proto_rawDescOnce sync.Once
- file_backup_v1_backup_proto_rawDescData = file_backup_v1_backup_proto_rawDesc
+ file_backup_v1_backup_proto_rawDescData []byte
)
func file_backup_v1_backup_proto_rawDescGZIP() []byte {
file_backup_v1_backup_proto_rawDescOnce.Do(func() {
- file_backup_v1_backup_proto_rawDescData = protoimpl.X.CompressGZIP(file_backup_v1_backup_proto_rawDescData)
+ file_backup_v1_backup_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_backup_v1_backup_proto_rawDesc), len(file_backup_v1_backup_proto_rawDesc)))
})
return file_backup_v1_backup_proto_rawDescData
}
@@ -1589,7 +1578,7 @@ func file_backup_v1_backup_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_backup_v1_backup_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_backup_v1_backup_proto_rawDesc), len(file_backup_v1_backup_proto_rawDesc)),
NumEnums: 0,
NumMessages: 15,
NumExtensions: 0,
@@ -1600,7 +1589,6 @@ func file_backup_v1_backup_proto_init() {
MessageInfos: file_backup_v1_backup_proto_msgTypes,
}.Build()
File_backup_v1_backup_proto = out.File
- file_backup_v1_backup_proto_rawDesc = nil
file_backup_v1_backup_proto_goTypes = nil
file_backup_v1_backup_proto_depIdxs = nil
}
diff --git a/api/backup/v1/backup.pb.gw.go b/api/backup/v1/backup.pb.gw.go
index 65d84b9c88..8bb43b8ad6 100644
--- a/api/backup/v1/backup.pb.gw.go
+++ b/api/backup/v1/backup.pb.gw.go
@@ -10,6 +10,7 @@ package backupv1
import (
"context"
+ "errors"
"io"
"net/http"
@@ -28,195 +29,170 @@ var (
_ codes.Code
_ io.Reader
_ status.Status
+ _ = errors.New
_ = runtime.String
_ = utilities.NewDoubleArray
_ = metadata.Join
)
func request_BackupService_StartBackup_0(ctx context.Context, marshaler runtime.Marshaler, client BackupServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq StartBackupRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq StartBackupRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.StartBackup(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_BackupService_StartBackup_0(ctx context.Context, marshaler runtime.Marshaler, server BackupServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq StartBackupRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq StartBackupRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.StartBackup(ctx, &protoReq)
return msg, metadata, err
}
func request_BackupService_ListArtifactCompatibleServices_0(ctx context.Context, marshaler runtime.Marshaler, client BackupServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListArtifactCompatibleServicesRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq ListArtifactCompatibleServicesRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["artifact_id"]
+ val, ok := pathParams["artifact_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "artifact_id")
}
-
protoReq.ArtifactId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "artifact_id", err)
}
-
msg, err := client.ListArtifactCompatibleServices(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_BackupService_ListArtifactCompatibleServices_0(ctx context.Context, marshaler runtime.Marshaler, server BackupServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListArtifactCompatibleServicesRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq ListArtifactCompatibleServicesRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["artifact_id"]
+ val, ok := pathParams["artifact_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "artifact_id")
}
-
protoReq.ArtifactId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "artifact_id", err)
}
-
msg, err := server.ListArtifactCompatibleServices(ctx, &protoReq)
return msg, metadata, err
}
func request_BackupService_ScheduleBackup_0(ctx context.Context, marshaler runtime.Marshaler, client BackupServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ScheduleBackupRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq ScheduleBackupRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.ScheduleBackup(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_BackupService_ScheduleBackup_0(ctx context.Context, marshaler runtime.Marshaler, server BackupServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ScheduleBackupRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq ScheduleBackupRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.ScheduleBackup(ctx, &protoReq)
return msg, metadata, err
}
func request_BackupService_ListScheduledBackups_0(ctx context.Context, marshaler runtime.Marshaler, client BackupServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListScheduledBackupsRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ListScheduledBackupsRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := client.ListScheduledBackups(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_BackupService_ListScheduledBackups_0(ctx context.Context, marshaler runtime.Marshaler, server BackupServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListScheduledBackupsRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ListScheduledBackupsRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := server.ListScheduledBackups(ctx, &protoReq)
return msg, metadata, err
}
func request_BackupService_ChangeScheduledBackup_0(ctx context.Context, marshaler runtime.Marshaler, client BackupServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ChangeScheduledBackupRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq ChangeScheduledBackupRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.ChangeScheduledBackup(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_BackupService_ChangeScheduledBackup_0(ctx context.Context, marshaler runtime.Marshaler, server BackupServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ChangeScheduledBackupRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq ChangeScheduledBackupRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.ChangeScheduledBackup(ctx, &protoReq)
return msg, metadata, err
}
func request_BackupService_RemoveScheduledBackup_0(ctx context.Context, marshaler runtime.Marshaler, client BackupServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq RemoveScheduledBackupRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq RemoveScheduledBackupRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["scheduled_backup_id"]
+ val, ok := pathParams["scheduled_backup_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "scheduled_backup_id")
}
-
protoReq.ScheduledBackupId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "scheduled_backup_id", err)
}
-
msg, err := client.RemoveScheduledBackup(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_BackupService_RemoveScheduledBackup_0(ctx context.Context, marshaler runtime.Marshaler, server BackupServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq RemoveScheduledBackupRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq RemoveScheduledBackupRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["scheduled_backup_id"]
+ val, ok := pathParams["scheduled_backup_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "scheduled_backup_id")
}
-
protoReq.ScheduledBackupId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "scheduled_backup_id", err)
}
-
msg, err := server.RemoveScheduledBackup(ctx, &protoReq)
return msg, metadata, err
}
@@ -224,81 +200,67 @@ func local_request_BackupService_RemoveScheduledBackup_0(ctx context.Context, ma
var filter_BackupService_GetLogs_0 = &utilities.DoubleArray{Encoding: map[string]int{"artifact_id": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}}
func request_BackupService_GetLogs_0(ctx context.Context, marshaler runtime.Marshaler, client BackupServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetLogsRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq GetLogsRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["artifact_id"]
+ val, ok := pathParams["artifact_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "artifact_id")
}
-
protoReq.ArtifactId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "artifact_id", err)
}
-
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_BackupService_GetLogs_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.GetLogs(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_BackupService_GetLogs_0(ctx context.Context, marshaler runtime.Marshaler, server BackupServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetLogsRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq GetLogsRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["artifact_id"]
+ val, ok := pathParams["artifact_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "artifact_id")
}
-
protoReq.ArtifactId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "artifact_id", err)
}
-
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_BackupService_GetLogs_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.GetLogs(ctx, &protoReq)
return msg, metadata, err
}
func request_BackupService_ListArtifacts_0(ctx context.Context, marshaler runtime.Marshaler, client BackupServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListArtifactsRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ListArtifactsRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := client.ListArtifacts(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_BackupService_ListArtifacts_0(ctx context.Context, marshaler runtime.Marshaler, server BackupServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListArtifactsRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ListArtifactsRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := server.ListArtifacts(ctx, &protoReq)
return msg, metadata, err
}
@@ -306,115 +268,85 @@ func local_request_BackupService_ListArtifacts_0(ctx context.Context, marshaler
var filter_BackupService_DeleteArtifact_0 = &utilities.DoubleArray{Encoding: map[string]int{"artifact_id": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}}
func request_BackupService_DeleteArtifact_0(ctx context.Context, marshaler runtime.Marshaler, client BackupServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq DeleteArtifactRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq DeleteArtifactRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["artifact_id"]
+ val, ok := pathParams["artifact_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "artifact_id")
}
-
protoReq.ArtifactId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "artifact_id", err)
}
-
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_BackupService_DeleteArtifact_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.DeleteArtifact(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_BackupService_DeleteArtifact_0(ctx context.Context, marshaler runtime.Marshaler, server BackupServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq DeleteArtifactRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq DeleteArtifactRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["artifact_id"]
+ val, ok := pathParams["artifact_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "artifact_id")
}
-
protoReq.ArtifactId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "artifact_id", err)
}
-
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_BackupService_DeleteArtifact_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.DeleteArtifact(ctx, &protoReq)
return msg, metadata, err
}
func request_BackupService_ListPitrTimeranges_0(ctx context.Context, marshaler runtime.Marshaler, client BackupServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListPitrTimerangesRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq ListPitrTimerangesRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["artifact_id"]
+ val, ok := pathParams["artifact_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "artifact_id")
}
-
protoReq.ArtifactId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "artifact_id", err)
}
-
msg, err := client.ListPitrTimeranges(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_BackupService_ListPitrTimeranges_0(ctx context.Context, marshaler runtime.Marshaler, server BackupServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListPitrTimerangesRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq ListPitrTimerangesRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["artifact_id"]
+ val, ok := pathParams["artifact_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "artifact_id")
}
-
protoReq.ArtifactId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "artifact_id", err)
}
-
msg, err := server.ListPitrTimeranges(ctx, &protoReq)
return msg, metadata, err
}
@@ -425,15 +357,13 @@ func local_request_BackupService_ListPitrTimeranges_0(ctx context.Context, marsh
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterBackupServiceHandlerFromEndpoint instead.
// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
func RegisterBackupServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server BackupServiceServer) error {
- mux.Handle("POST", pattern_BackupService_StartBackup_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_BackupService_StartBackup_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/backup.v1.BackupService/StartBackup", runtime.WithHTTPPathPattern("/v1/backups:start"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/backup.v1.BackupService/StartBackup", runtime.WithHTTPPathPattern("/v1/backups:start"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -445,19 +375,15 @@ func RegisterBackupServiceHandlerServer(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_BackupService_StartBackup_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_BackupService_ListArtifactCompatibleServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_BackupService_ListArtifactCompatibleServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/backup.v1.BackupService/ListArtifactCompatibleServices", runtime.WithHTTPPathPattern("/v1/backups/{artifact_id}/compatible-services"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/backup.v1.BackupService/ListArtifactCompatibleServices", runtime.WithHTTPPathPattern("/v1/backups/{artifact_id}/compatible-services"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -469,19 +395,15 @@ func RegisterBackupServiceHandlerServer(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_BackupService_ListArtifactCompatibleServices_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_BackupService_ScheduleBackup_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_BackupService_ScheduleBackup_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/backup.v1.BackupService/ScheduleBackup", runtime.WithHTTPPathPattern("/v1/backups:schedule"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/backup.v1.BackupService/ScheduleBackup", runtime.WithHTTPPathPattern("/v1/backups:schedule"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -493,19 +415,15 @@ func RegisterBackupServiceHandlerServer(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_BackupService_ScheduleBackup_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_BackupService_ListScheduledBackups_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_BackupService_ListScheduledBackups_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/backup.v1.BackupService/ListScheduledBackups", runtime.WithHTTPPathPattern("/v1/backups/scheduled"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/backup.v1.BackupService/ListScheduledBackups", runtime.WithHTTPPathPattern("/v1/backups/scheduled"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -517,19 +435,15 @@ func RegisterBackupServiceHandlerServer(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_BackupService_ListScheduledBackups_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("PUT", pattern_BackupService_ChangeScheduledBackup_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPut, pattern_BackupService_ChangeScheduledBackup_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/backup.v1.BackupService/ChangeScheduledBackup", runtime.WithHTTPPathPattern("/v1/backups:changeScheduled"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/backup.v1.BackupService/ChangeScheduledBackup", runtime.WithHTTPPathPattern("/v1/backups:changeScheduled"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -541,19 +455,15 @@ func RegisterBackupServiceHandlerServer(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_BackupService_ChangeScheduledBackup_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("DELETE", pattern_BackupService_RemoveScheduledBackup_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodDelete, pattern_BackupService_RemoveScheduledBackup_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/backup.v1.BackupService/RemoveScheduledBackup", runtime.WithHTTPPathPattern("/v1/backups/{scheduled_backup_id}"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/backup.v1.BackupService/RemoveScheduledBackup", runtime.WithHTTPPathPattern("/v1/backups/{scheduled_backup_id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -565,19 +475,15 @@ func RegisterBackupServiceHandlerServer(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_BackupService_RemoveScheduledBackup_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_BackupService_GetLogs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_BackupService_GetLogs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/backup.v1.BackupService/GetLogs", runtime.WithHTTPPathPattern("/v1/backups/{artifact_id}/logs"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/backup.v1.BackupService/GetLogs", runtime.WithHTTPPathPattern("/v1/backups/{artifact_id}/logs"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -589,19 +495,15 @@ func RegisterBackupServiceHandlerServer(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_BackupService_GetLogs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_BackupService_ListArtifacts_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_BackupService_ListArtifacts_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/backup.v1.BackupService/ListArtifacts", runtime.WithHTTPPathPattern("/v1/backups/artifacts"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/backup.v1.BackupService/ListArtifacts", runtime.WithHTTPPathPattern("/v1/backups/artifacts"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -613,19 +515,15 @@ func RegisterBackupServiceHandlerServer(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_BackupService_ListArtifacts_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("DELETE", pattern_BackupService_DeleteArtifact_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodDelete, pattern_BackupService_DeleteArtifact_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/backup.v1.BackupService/DeleteArtifact", runtime.WithHTTPPathPattern("/v1/backups/artifacts/{artifact_id}"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/backup.v1.BackupService/DeleteArtifact", runtime.WithHTTPPathPattern("/v1/backups/artifacts/{artifact_id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -637,19 +535,15 @@ func RegisterBackupServiceHandlerServer(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_BackupService_DeleteArtifact_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_BackupService_ListPitrTimeranges_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_BackupService_ListPitrTimeranges_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/backup.v1.BackupService/ListPitrTimeranges", runtime.WithHTTPPathPattern("/v1/backups/artifacts/{artifact_id}/pitr-timeranges"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/backup.v1.BackupService/ListPitrTimeranges", runtime.WithHTTPPathPattern("/v1/backups/artifacts/{artifact_id}/pitr-timeranges"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -661,7 +555,6 @@ func RegisterBackupServiceHandlerServer(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_BackupService_ListPitrTimeranges_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@@ -689,7 +582,6 @@ func RegisterBackupServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.
}
}()
}()
-
return RegisterBackupServiceHandler(ctx, mux, conn)
}
@@ -705,13 +597,11 @@ func RegisterBackupServiceHandler(ctx context.Context, mux *runtime.ServeMux, co
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "BackupServiceClient" to call the correct interceptors. This client ignores the HTTP middlewares.
func RegisterBackupServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client BackupServiceClient) error {
- mux.Handle("POST", pattern_BackupService_StartBackup_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_BackupService_StartBackup_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/backup.v1.BackupService/StartBackup", runtime.WithHTTPPathPattern("/v1/backups:start"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/backup.v1.BackupService/StartBackup", runtime.WithHTTPPathPattern("/v1/backups:start"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -722,17 +612,13 @@ func RegisterBackupServiceHandlerClient(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_BackupService_StartBackup_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_BackupService_ListArtifactCompatibleServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_BackupService_ListArtifactCompatibleServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/backup.v1.BackupService/ListArtifactCompatibleServices", runtime.WithHTTPPathPattern("/v1/backups/{artifact_id}/compatible-services"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/backup.v1.BackupService/ListArtifactCompatibleServices", runtime.WithHTTPPathPattern("/v1/backups/{artifact_id}/compatible-services"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -743,17 +629,13 @@ func RegisterBackupServiceHandlerClient(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_BackupService_ListArtifactCompatibleServices_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_BackupService_ScheduleBackup_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_BackupService_ScheduleBackup_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/backup.v1.BackupService/ScheduleBackup", runtime.WithHTTPPathPattern("/v1/backups:schedule"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/backup.v1.BackupService/ScheduleBackup", runtime.WithHTTPPathPattern("/v1/backups:schedule"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -764,17 +646,13 @@ func RegisterBackupServiceHandlerClient(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_BackupService_ScheduleBackup_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_BackupService_ListScheduledBackups_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_BackupService_ListScheduledBackups_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/backup.v1.BackupService/ListScheduledBackups", runtime.WithHTTPPathPattern("/v1/backups/scheduled"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/backup.v1.BackupService/ListScheduledBackups", runtime.WithHTTPPathPattern("/v1/backups/scheduled"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -785,17 +663,13 @@ func RegisterBackupServiceHandlerClient(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_BackupService_ListScheduledBackups_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("PUT", pattern_BackupService_ChangeScheduledBackup_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPut, pattern_BackupService_ChangeScheduledBackup_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/backup.v1.BackupService/ChangeScheduledBackup", runtime.WithHTTPPathPattern("/v1/backups:changeScheduled"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/backup.v1.BackupService/ChangeScheduledBackup", runtime.WithHTTPPathPattern("/v1/backups:changeScheduled"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -806,17 +680,13 @@ func RegisterBackupServiceHandlerClient(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_BackupService_ChangeScheduledBackup_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("DELETE", pattern_BackupService_RemoveScheduledBackup_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodDelete, pattern_BackupService_RemoveScheduledBackup_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/backup.v1.BackupService/RemoveScheduledBackup", runtime.WithHTTPPathPattern("/v1/backups/{scheduled_backup_id}"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/backup.v1.BackupService/RemoveScheduledBackup", runtime.WithHTTPPathPattern("/v1/backups/{scheduled_backup_id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -827,17 +697,13 @@ func RegisterBackupServiceHandlerClient(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_BackupService_RemoveScheduledBackup_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_BackupService_GetLogs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_BackupService_GetLogs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/backup.v1.BackupService/GetLogs", runtime.WithHTTPPathPattern("/v1/backups/{artifact_id}/logs"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/backup.v1.BackupService/GetLogs", runtime.WithHTTPPathPattern("/v1/backups/{artifact_id}/logs"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -848,17 +714,13 @@ func RegisterBackupServiceHandlerClient(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_BackupService_GetLogs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_BackupService_ListArtifacts_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_BackupService_ListArtifacts_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/backup.v1.BackupService/ListArtifacts", runtime.WithHTTPPathPattern("/v1/backups/artifacts"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/backup.v1.BackupService/ListArtifacts", runtime.WithHTTPPathPattern("/v1/backups/artifacts"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -869,17 +731,13 @@ func RegisterBackupServiceHandlerClient(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_BackupService_ListArtifacts_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("DELETE", pattern_BackupService_DeleteArtifact_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodDelete, pattern_BackupService_DeleteArtifact_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/backup.v1.BackupService/DeleteArtifact", runtime.WithHTTPPathPattern("/v1/backups/artifacts/{artifact_id}"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/backup.v1.BackupService/DeleteArtifact", runtime.WithHTTPPathPattern("/v1/backups/artifacts/{artifact_id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -890,17 +748,13 @@ func RegisterBackupServiceHandlerClient(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_BackupService_DeleteArtifact_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_BackupService_ListPitrTimeranges_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_BackupService_ListPitrTimeranges_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/backup.v1.BackupService/ListPitrTimeranges", runtime.WithHTTPPathPattern("/v1/backups/artifacts/{artifact_id}/pitr-timeranges"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/backup.v1.BackupService/ListPitrTimeranges", runtime.WithHTTPPathPattern("/v1/backups/artifacts/{artifact_id}/pitr-timeranges"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -911,53 +765,33 @@ func RegisterBackupServiceHandlerClient(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_BackupService_ListPitrTimeranges_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
return nil
}
var (
- pattern_BackupService_StartBackup_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "backups"}, "start"))
-
+ pattern_BackupService_StartBackup_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "backups"}, "start"))
pattern_BackupService_ListArtifactCompatibleServices_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2, 2, 3}, []string{"v1", "backups", "artifact_id", "compatible-services"}, ""))
-
- pattern_BackupService_ScheduleBackup_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "backups"}, "schedule"))
-
- pattern_BackupService_ListScheduledBackups_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "backups", "scheduled"}, ""))
-
- pattern_BackupService_ChangeScheduledBackup_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "backups"}, "changeScheduled"))
-
- pattern_BackupService_RemoveScheduledBackup_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2}, []string{"v1", "backups", "scheduled_backup_id"}, ""))
-
- pattern_BackupService_GetLogs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2, 2, 3}, []string{"v1", "backups", "artifact_id", "logs"}, ""))
-
- pattern_BackupService_ListArtifacts_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "backups", "artifacts"}, ""))
-
- pattern_BackupService_DeleteArtifact_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "backups", "artifacts", "artifact_id"}, ""))
-
- pattern_BackupService_ListPitrTimeranges_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"v1", "backups", "artifacts", "artifact_id", "pitr-timeranges"}, ""))
+ pattern_BackupService_ScheduleBackup_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "backups"}, "schedule"))
+ pattern_BackupService_ListScheduledBackups_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "backups", "scheduled"}, ""))
+ pattern_BackupService_ChangeScheduledBackup_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "backups"}, "changeScheduled"))
+ pattern_BackupService_RemoveScheduledBackup_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2}, []string{"v1", "backups", "scheduled_backup_id"}, ""))
+ pattern_BackupService_GetLogs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2, 2, 3}, []string{"v1", "backups", "artifact_id", "logs"}, ""))
+ pattern_BackupService_ListArtifacts_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "backups", "artifacts"}, ""))
+ pattern_BackupService_DeleteArtifact_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "backups", "artifacts", "artifact_id"}, ""))
+ pattern_BackupService_ListPitrTimeranges_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"v1", "backups", "artifacts", "artifact_id", "pitr-timeranges"}, ""))
)
var (
- forward_BackupService_StartBackup_0 = runtime.ForwardResponseMessage
-
+ forward_BackupService_StartBackup_0 = runtime.ForwardResponseMessage
forward_BackupService_ListArtifactCompatibleServices_0 = runtime.ForwardResponseMessage
-
- forward_BackupService_ScheduleBackup_0 = runtime.ForwardResponseMessage
-
- forward_BackupService_ListScheduledBackups_0 = runtime.ForwardResponseMessage
-
- forward_BackupService_ChangeScheduledBackup_0 = runtime.ForwardResponseMessage
-
- forward_BackupService_RemoveScheduledBackup_0 = runtime.ForwardResponseMessage
-
- forward_BackupService_GetLogs_0 = runtime.ForwardResponseMessage
-
- forward_BackupService_ListArtifacts_0 = runtime.ForwardResponseMessage
-
- forward_BackupService_DeleteArtifact_0 = runtime.ForwardResponseMessage
-
- forward_BackupService_ListPitrTimeranges_0 = runtime.ForwardResponseMessage
+ forward_BackupService_ScheduleBackup_0 = runtime.ForwardResponseMessage
+ forward_BackupService_ListScheduledBackups_0 = runtime.ForwardResponseMessage
+ forward_BackupService_ChangeScheduledBackup_0 = runtime.ForwardResponseMessage
+ forward_BackupService_RemoveScheduledBackup_0 = runtime.ForwardResponseMessage
+ forward_BackupService_GetLogs_0 = runtime.ForwardResponseMessage
+ forward_BackupService_ListArtifacts_0 = runtime.ForwardResponseMessage
+ forward_BackupService_DeleteArtifact_0 = runtime.ForwardResponseMessage
+ forward_BackupService_ListPitrTimeranges_0 = runtime.ForwardResponseMessage
)
diff --git a/api/backup/v1/backup.pb.validate.go b/api/backup/v1/backup.pb.validate.go
index 1a2b4a6109..743bd45f68 100644
--- a/api/backup/v1/backup.pb.validate.go
+++ b/api/backup/v1/backup.pb.validate.go
@@ -132,7 +132,7 @@ type StartBackupRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartBackupRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -236,7 +236,7 @@ type StartBackupResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartBackupResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -351,7 +351,7 @@ type ListArtifactCompatibleServicesRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListArtifactCompatibleServicesRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -525,7 +525,7 @@ type ListArtifactCompatibleServicesResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListArtifactCompatibleServicesResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -774,7 +774,7 @@ type ScheduledBackupMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ScheduledBackupMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -981,7 +981,7 @@ type ScheduleBackupRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ScheduleBackupRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1085,7 +1085,7 @@ type ScheduleBackupResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ScheduleBackupResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1187,7 +1187,7 @@ type ListScheduledBackupsRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListScheduledBackupsRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1324,7 +1324,7 @@ type ListScheduledBackupsResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListScheduledBackupsResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1520,7 +1520,7 @@ type ChangeScheduledBackupRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ChangeScheduledBackupRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1623,7 +1623,7 @@ type ChangeScheduledBackupResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ChangeScheduledBackupResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1737,7 +1737,7 @@ type RemoveScheduledBackupRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m RemoveScheduledBackupRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1840,7 +1840,7 @@ type RemoveScheduledBackupResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m RemoveScheduledBackupResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1958,7 +1958,7 @@ type GetLogsRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetLogsRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2094,7 +2094,7 @@ type GetLogsResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetLogsResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/api/backup/v1/common.pb.go b/api/backup/v1/common.pb.go
index 4e0990d687..ce96f637b3 100644
--- a/api/backup/v1/common.pb.go
+++ b/api/backup/v1/common.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: backup/v1/common.proto
@@ -9,6 +9,7 @@ package backupv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
_ "github.com/envoyproxy/protoc-gen-validate/validate"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
@@ -128,12 +129,11 @@ func (BackupMode) EnumDescriptor() ([]byte, []int) {
// File represents file or folder on a storage.
type File struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ IsDirectory bool `protobuf:"varint,2,opt,name=is_directory,json=isDirectory,proto3" json:"is_directory,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- IsDirectory bool `protobuf:"varint,2,opt,name=is_directory,json=isDirectory,proto3" json:"is_directory,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *File) Reset() {
@@ -182,12 +182,11 @@ func (x *File) GetIsDirectory() bool {
// PbmMetadata contains additional data for pbm cli tools.
type PbmMetadata struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Name of backup in backup tool representation.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *PbmMetadata) Reset() {
@@ -229,20 +228,19 @@ func (x *PbmMetadata) GetName() string {
// Metadata contains extra artifact data like files it consists of, tool specific data, etc.
type Metadata struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// List of files backup consists of.
FileList []*File `protobuf:"bytes,1,rep,name=file_list,json=fileList,proto3" json:"file_list,omitempty"`
// Exact time DB can be restored to.
RestoreTo *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=restore_to,json=restoreTo,proto3" json:"restore_to,omitempty"`
// Extra data for backup tools.
//
- // Types that are assignable to BackupToolMetadata:
+ // Types that are valid to be assigned to BackupToolMetadata:
//
// *Metadata_PbmMetadata
BackupToolMetadata isMetadata_BackupToolMetadata `protobuf_oneof:"backup_tool_metadata"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *Metadata) Reset() {
@@ -289,16 +287,18 @@ func (x *Metadata) GetRestoreTo() *timestamppb.Timestamp {
return nil
}
-func (m *Metadata) GetBackupToolMetadata() isMetadata_BackupToolMetadata {
- if m != nil {
- return m.BackupToolMetadata
+func (x *Metadata) GetBackupToolMetadata() isMetadata_BackupToolMetadata {
+ if x != nil {
+ return x.BackupToolMetadata
}
return nil
}
func (x *Metadata) GetPbmMetadata() *PbmMetadata {
- if x, ok := x.GetBackupToolMetadata().(*Metadata_PbmMetadata); ok {
- return x.PbmMetadata
+ if x != nil {
+ if x, ok := x.BackupToolMetadata.(*Metadata_PbmMetadata); ok {
+ return x.PbmMetadata
+ }
}
return nil
}
@@ -315,12 +315,11 @@ func (*Metadata_PbmMetadata) isMetadata_BackupToolMetadata() {}
// LogChunk represent one chunk of logs.
type LogChunk struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ ChunkId uint32 `protobuf:"varint,1,opt,name=chunk_id,json=chunkId,proto3" json:"chunk_id,omitempty"`
+ Data string `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
unknownFields protoimpl.UnknownFields
-
- ChunkId uint32 `protobuf:"varint,1,opt,name=chunk_id,json=chunkId,proto3" json:"chunk_id,omitempty"`
- Data string `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *LogChunk) Reset() {
@@ -369,7 +368,7 @@ func (x *LogChunk) GetData() string {
var File_backup_v1_common_proto protoreflect.FileDescriptor
-var file_backup_v1_common_proto_rawDesc = []byte{
+var file_backup_v1_common_proto_rawDesc = string([]byte{
0x0a, 0x16, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6d, 0x6d,
0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70,
0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74,
@@ -422,16 +421,16 @@ var file_backup_v1_common_proto_rawDesc = []byte{
0x15, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65,
0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0a, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x3a,
0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_backup_v1_common_proto_rawDescOnce sync.Once
- file_backup_v1_common_proto_rawDescData = file_backup_v1_common_proto_rawDesc
+ file_backup_v1_common_proto_rawDescData []byte
)
func file_backup_v1_common_proto_rawDescGZIP() []byte {
file_backup_v1_common_proto_rawDescOnce.Do(func() {
- file_backup_v1_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_backup_v1_common_proto_rawDescData)
+ file_backup_v1_common_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_backup_v1_common_proto_rawDesc), len(file_backup_v1_common_proto_rawDesc)))
})
return file_backup_v1_common_proto_rawDescData
}
@@ -473,7 +472,7 @@ func file_backup_v1_common_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_backup_v1_common_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_backup_v1_common_proto_rawDesc), len(file_backup_v1_common_proto_rawDesc)),
NumEnums: 2,
NumMessages: 4,
NumExtensions: 0,
@@ -485,7 +484,6 @@ func file_backup_v1_common_proto_init() {
MessageInfos: file_backup_v1_common_proto_msgTypes,
}.Build()
File_backup_v1_common_proto = out.File
- file_backup_v1_common_proto_rawDesc = nil
file_backup_v1_common_proto_goTypes = nil
file_backup_v1_common_proto_depIdxs = nil
}
diff --git a/api/backup/v1/common.pb.validate.go b/api/backup/v1/common.pb.validate.go
index 5e60e5f51b..20835ce8c7 100644
--- a/api/backup/v1/common.pb.validate.go
+++ b/api/backup/v1/common.pb.validate.go
@@ -82,7 +82,7 @@ type FileMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m FileMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -183,7 +183,7 @@ type PbmMetadataMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m PbmMetadataMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -391,7 +391,7 @@ type MetadataMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m MetadataMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -494,7 +494,7 @@ type LogChunkMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m LogChunkMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/api/backup/v1/errors.pb.go b/api/backup/v1/errors.pb.go
index d2ba5a9692..d26dfb6331 100644
--- a/api/backup/v1/errors.pb.go
+++ b/api/backup/v1/errors.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: backup/v1/errors.proto
@@ -9,6 +9,7 @@ package backupv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
@@ -88,11 +89,10 @@ func (ErrorCode) EnumDescriptor() ([]byte, []int) {
// Error represents error message returned in the details field in the response.
type Error struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Code ErrorCode `protobuf:"varint,1,opt,name=code,proto3,enum=backup.v1.ErrorCode" json:"code,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Code ErrorCode `protobuf:"varint,1,opt,name=code,proto3,enum=backup.v1.ErrorCode" json:"code,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *Error) Reset() {
@@ -134,7 +134,7 @@ func (x *Error) GetCode() ErrorCode {
var File_backup_v1_errors_proto protoreflect.FileDescriptor
-var file_backup_v1_errors_proto_rawDesc = []byte{
+var file_backup_v1_errors_proto_rawDesc = string([]byte{
0x0a, 0x16, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2f, 0x76, 0x31, 0x2f, 0x65, 0x72, 0x72, 0x6f,
0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70,
0x2e, 0x76, 0x31, 0x22, 0x31, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x28, 0x0a, 0x04,
@@ -166,16 +166,16 @@ var file_backup_v1_errors_proto_rawDesc = []byte{
0x70, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
0xea, 0x02, 0x0a, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_backup_v1_errors_proto_rawDescOnce sync.Once
- file_backup_v1_errors_proto_rawDescData = file_backup_v1_errors_proto_rawDesc
+ file_backup_v1_errors_proto_rawDescData []byte
)
func file_backup_v1_errors_proto_rawDescGZIP() []byte {
file_backup_v1_errors_proto_rawDescOnce.Do(func() {
- file_backup_v1_errors_proto_rawDescData = protoimpl.X.CompressGZIP(file_backup_v1_errors_proto_rawDescData)
+ file_backup_v1_errors_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_backup_v1_errors_proto_rawDesc), len(file_backup_v1_errors_proto_rawDesc)))
})
return file_backup_v1_errors_proto_rawDescData
}
@@ -207,7 +207,7 @@ func file_backup_v1_errors_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_backup_v1_errors_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_backup_v1_errors_proto_rawDesc), len(file_backup_v1_errors_proto_rawDesc)),
NumEnums: 1,
NumMessages: 1,
NumExtensions: 0,
@@ -219,7 +219,6 @@ func file_backup_v1_errors_proto_init() {
MessageInfos: file_backup_v1_errors_proto_msgTypes,
}.Build()
File_backup_v1_errors_proto = out.File
- file_backup_v1_errors_proto_rawDesc = nil
file_backup_v1_errors_proto_goTypes = nil
file_backup_v1_errors_proto_depIdxs = nil
}
diff --git a/api/backup/v1/errors.pb.validate.go b/api/backup/v1/errors.pb.validate.go
index c60c1054c9..9aebb9d764 100644
--- a/api/backup/v1/errors.pb.validate.go
+++ b/api/backup/v1/errors.pb.validate.go
@@ -71,7 +71,7 @@ type ErrorMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ErrorMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/api/backup/v1/locations.pb.go b/api/backup/v1/locations.pb.go
index 66230b030e..08fa136b19 100644
--- a/api/backup/v1/locations.pb.go
+++ b/api/backup/v1/locations.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: backup/v1/locations.proto
@@ -9,6 +9,7 @@ package backupv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
_ "github.com/envoyproxy/protoc-gen-validate/validate"
_ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options"
@@ -26,11 +27,10 @@ const (
// FilesystemLocationConfig represents file system location config.
type FilesystemLocationConfig struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *FilesystemLocationConfig) Reset() {
@@ -72,14 +72,13 @@ func (x *FilesystemLocationConfig) GetPath() string {
// S3LocationConfig represents S3 bucket configuration.
type S3LocationConfig struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Endpoint string `protobuf:"bytes,1,opt,name=endpoint,proto3" json:"endpoint,omitempty"`
+ AccessKey string `protobuf:"bytes,2,opt,name=access_key,json=accessKey,proto3" json:"access_key,omitempty"`
+ SecretKey string `protobuf:"bytes,3,opt,name=secret_key,json=secretKey,proto3" json:"secret_key,omitempty"`
+ BucketName string `protobuf:"bytes,4,opt,name=bucket_name,json=bucketName,proto3" json:"bucket_name,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Endpoint string `protobuf:"bytes,1,opt,name=endpoint,proto3" json:"endpoint,omitempty"`
- AccessKey string `protobuf:"bytes,2,opt,name=access_key,json=accessKey,proto3" json:"access_key,omitempty"`
- SecretKey string `protobuf:"bytes,3,opt,name=secret_key,json=secretKey,proto3" json:"secret_key,omitempty"`
- BucketName string `protobuf:"bytes,4,opt,name=bucket_name,json=bucketName,proto3" json:"bucket_name,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *S3LocationConfig) Reset() {
@@ -142,21 +141,20 @@ func (x *S3LocationConfig) GetBucketName() string {
// Location represents single Backup Location.
type Location struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Machine-readable ID.
LocationId string `protobuf:"bytes,1,opt,name=location_id,json=locationId,proto3" json:"location_id,omitempty"`
// Location name
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
// Short description
Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
- // Types that are assignable to Config:
+ // Types that are valid to be assigned to Config:
//
// *Location_FilesystemConfig
// *Location_S3Config
- Config isLocation_Config `protobuf_oneof:"config"`
+ Config isLocation_Config `protobuf_oneof:"config"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *Location) Reset() {
@@ -210,23 +208,27 @@ func (x *Location) GetDescription() string {
return ""
}
-func (m *Location) GetConfig() isLocation_Config {
- if m != nil {
- return m.Config
+func (x *Location) GetConfig() isLocation_Config {
+ if x != nil {
+ return x.Config
}
return nil
}
func (x *Location) GetFilesystemConfig() *FilesystemLocationConfig {
- if x, ok := x.GetConfig().(*Location_FilesystemConfig); ok {
- return x.FilesystemConfig
+ if x != nil {
+ if x, ok := x.Config.(*Location_FilesystemConfig); ok {
+ return x.FilesystemConfig
+ }
}
return nil
}
func (x *Location) GetS3Config() *S3LocationConfig {
- if x, ok := x.GetConfig().(*Location_S3Config); ok {
- return x.S3Config
+ if x != nil {
+ if x, ok := x.Config.(*Location_S3Config); ok {
+ return x.S3Config
+ }
}
return nil
}
@@ -248,9 +250,9 @@ func (*Location_FilesystemConfig) isLocation_Config() {}
func (*Location_S3Config) isLocation_Config() {}
type ListLocationsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ListLocationsRequest) Reset() {
@@ -284,11 +286,10 @@ func (*ListLocationsRequest) Descriptor() ([]byte, []int) {
}
type ListLocationsResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Locations []*Location `protobuf:"bytes,1,rep,name=locations,proto3" json:"locations,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Locations []*Location `protobuf:"bytes,1,rep,name=locations,proto3" json:"locations,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *ListLocationsResponse) Reset() {
@@ -329,17 +330,16 @@ func (x *ListLocationsResponse) GetLocations() []*Location {
}
type AddLocationRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Location name
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
// Filesystem location configuration. Exactly one config should be set.
FilesystemConfig *FilesystemLocationConfig `protobuf:"bytes,3,opt,name=filesystem_config,json=filesystemConfig,proto3" json:"filesystem_config,omitempty"`
// S3 Bucket configuration. Exactly one config should be set.
- S3Config *S3LocationConfig `protobuf:"bytes,4,opt,name=s3_config,json=s3Config,proto3" json:"s3_config,omitempty"`
+ S3Config *S3LocationConfig `protobuf:"bytes,4,opt,name=s3_config,json=s3Config,proto3" json:"s3_config,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddLocationRequest) Reset() {
@@ -401,12 +401,11 @@ func (x *AddLocationRequest) GetS3Config() *S3LocationConfig {
}
type AddLocationResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Machine-readable ID.
- LocationId string `protobuf:"bytes,1,opt,name=location_id,json=locationId,proto3" json:"location_id,omitempty"`
+ LocationId string `protobuf:"bytes,1,opt,name=location_id,json=locationId,proto3" json:"location_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddLocationResponse) Reset() {
@@ -447,10 +446,7 @@ func (x *AddLocationResponse) GetLocationId() string {
}
type ChangeLocationRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Machine-readable ID.
LocationId string `protobuf:"bytes,1,opt,name=location_id,json=locationId,proto3" json:"location_id,omitempty"`
// Location name
@@ -459,7 +455,9 @@ type ChangeLocationRequest struct {
// Filesystem location configuration. Exactly one config should be set.
FilesystemConfig *FilesystemLocationConfig `protobuf:"bytes,4,opt,name=filesystem_config,json=filesystemConfig,proto3" json:"filesystem_config,omitempty"`
// S3 Bucket configuration. Exactly one config should be set.
- S3Config *S3LocationConfig `protobuf:"bytes,5,opt,name=s3_config,json=s3Config,proto3" json:"s3_config,omitempty"`
+ S3Config *S3LocationConfig `protobuf:"bytes,5,opt,name=s3_config,json=s3Config,proto3" json:"s3_config,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ChangeLocationRequest) Reset() {
@@ -528,9 +526,9 @@ func (x *ChangeLocationRequest) GetS3Config() *S3LocationConfig {
}
type ChangeLocationResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ChangeLocationResponse) Reset() {
@@ -564,14 +562,13 @@ func (*ChangeLocationResponse) Descriptor() ([]byte, []int) {
}
type RemoveLocationRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Machine-readable ID.
LocationId string `protobuf:"bytes,1,opt,name=location_id,json=locationId,proto3" json:"location_id,omitempty"`
// Force mode
- Force bool `protobuf:"varint,2,opt,name=force,proto3" json:"force,omitempty"`
+ Force bool `protobuf:"varint,2,opt,name=force,proto3" json:"force,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *RemoveLocationRequest) Reset() {
@@ -619,9 +616,9 @@ func (x *RemoveLocationRequest) GetForce() bool {
}
type RemoveLocationResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *RemoveLocationResponse) Reset() {
@@ -655,14 +652,13 @@ func (*RemoveLocationResponse) Descriptor() ([]byte, []int) {
}
type TestLocationConfigRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Filesystem location configuration. Exactly one config should be set.
FilesystemConfig *FilesystemLocationConfig `protobuf:"bytes,1,opt,name=filesystem_config,json=filesystemConfig,proto3" json:"filesystem_config,omitempty"`
// S3 Bucket configuration. Exactly one config should be set.
- S3Config *S3LocationConfig `protobuf:"bytes,2,opt,name=s3_config,json=s3Config,proto3" json:"s3_config,omitempty"`
+ S3Config *S3LocationConfig `protobuf:"bytes,2,opt,name=s3_config,json=s3Config,proto3" json:"s3_config,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *TestLocationConfigRequest) Reset() {
@@ -710,9 +706,9 @@ func (x *TestLocationConfigRequest) GetS3Config() *S3LocationConfig {
}
type TestLocationConfigResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *TestLocationConfigResponse) Reset() {
@@ -747,7 +743,7 @@ func (*TestLocationConfigResponse) Descriptor() ([]byte, []int) {
var File_backup_v1_locations_proto protoreflect.FileDescriptor
-var file_backup_v1_locations_proto_rawDesc = []byte{
+var file_backup_v1_locations_proto_rawDesc = string([]byte{
0x0a, 0x19, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2f, 0x76, 0x31, 0x2f, 0x6c, 0x6f, 0x63, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x62, 0x61, 0x63,
0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61,
@@ -919,16 +915,16 @@ var file_backup_v1_locations_proto_rawDesc = []byte{
0x6b, 0x75, 0x70, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
0x74, 0x61, 0xea, 0x02, 0x0a, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x3a, 0x3a, 0x56, 0x31, 0x62,
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_backup_v1_locations_proto_rawDescOnce sync.Once
- file_backup_v1_locations_proto_rawDescData = file_backup_v1_locations_proto_rawDesc
+ file_backup_v1_locations_proto_rawDescData []byte
)
func file_backup_v1_locations_proto_rawDescGZIP() []byte {
file_backup_v1_locations_proto_rawDescOnce.Do(func() {
- file_backup_v1_locations_proto_rawDescData = protoimpl.X.CompressGZIP(file_backup_v1_locations_proto_rawDescData)
+ file_backup_v1_locations_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_backup_v1_locations_proto_rawDesc), len(file_backup_v1_locations_proto_rawDesc)))
})
return file_backup_v1_locations_proto_rawDescData
}
@@ -992,7 +988,7 @@ func file_backup_v1_locations_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_backup_v1_locations_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_backup_v1_locations_proto_rawDesc), len(file_backup_v1_locations_proto_rawDesc)),
NumEnums: 0,
NumMessages: 13,
NumExtensions: 0,
@@ -1003,7 +999,6 @@ func file_backup_v1_locations_proto_init() {
MessageInfos: file_backup_v1_locations_proto_msgTypes,
}.Build()
File_backup_v1_locations_proto = out.File
- file_backup_v1_locations_proto_rawDesc = nil
file_backup_v1_locations_proto_goTypes = nil
file_backup_v1_locations_proto_depIdxs = nil
}
diff --git a/api/backup/v1/locations.pb.gw.go b/api/backup/v1/locations.pb.gw.go
index 63fe8d8483..89731b2c73 100644
--- a/api/backup/v1/locations.pb.gw.go
+++ b/api/backup/v1/locations.pb.gw.go
@@ -10,6 +10,7 @@ package backupv1
import (
"context"
+ "errors"
"io"
"net/http"
@@ -28,105 +29,92 @@ var (
_ codes.Code
_ io.Reader
_ status.Status
+ _ = errors.New
_ = runtime.String
_ = utilities.NewDoubleArray
_ = metadata.Join
)
func request_LocationsService_ListLocations_0(ctx context.Context, marshaler runtime.Marshaler, client LocationsServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListLocationsRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ListLocationsRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := client.ListLocations(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_LocationsService_ListLocations_0(ctx context.Context, marshaler runtime.Marshaler, server LocationsServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListLocationsRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ListLocationsRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := server.ListLocations(ctx, &protoReq)
return msg, metadata, err
}
func request_LocationsService_AddLocation_0(ctx context.Context, marshaler runtime.Marshaler, client LocationsServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq AddLocationRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq AddLocationRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.AddLocation(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_LocationsService_AddLocation_0(ctx context.Context, marshaler runtime.Marshaler, server LocationsServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq AddLocationRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq AddLocationRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.AddLocation(ctx, &protoReq)
return msg, metadata, err
}
func request_LocationsService_ChangeLocation_0(ctx context.Context, marshaler runtime.Marshaler, client LocationsServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ChangeLocationRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq ChangeLocationRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["location_id"]
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ val, ok := pathParams["location_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "location_id")
}
-
protoReq.LocationId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "location_id", err)
}
-
msg, err := client.ChangeLocation(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_LocationsService_ChangeLocation_0(ctx context.Context, marshaler runtime.Marshaler, server LocationsServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ChangeLocationRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq ChangeLocationRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["location_id"]
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ val, ok := pathParams["location_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "location_id")
}
-
protoReq.LocationId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "location_id", err)
}
-
msg, err := server.ChangeLocation(ctx, &protoReq)
return msg, metadata, err
}
@@ -134,89 +122,73 @@ func local_request_LocationsService_ChangeLocation_0(ctx context.Context, marsha
var filter_LocationsService_RemoveLocation_0 = &utilities.DoubleArray{Encoding: map[string]int{"location_id": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}}
func request_LocationsService_RemoveLocation_0(ctx context.Context, marshaler runtime.Marshaler, client LocationsServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq RemoveLocationRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq RemoveLocationRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["location_id"]
+ val, ok := pathParams["location_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "location_id")
}
-
protoReq.LocationId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "location_id", err)
}
-
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_LocationsService_RemoveLocation_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.RemoveLocation(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_LocationsService_RemoveLocation_0(ctx context.Context, marshaler runtime.Marshaler, server LocationsServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq RemoveLocationRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq RemoveLocationRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["location_id"]
+ val, ok := pathParams["location_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "location_id")
}
-
protoReq.LocationId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "location_id", err)
}
-
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_LocationsService_RemoveLocation_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.RemoveLocation(ctx, &protoReq)
return msg, metadata, err
}
func request_LocationsService_TestLocationConfig_0(ctx context.Context, marshaler runtime.Marshaler, client LocationsServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq TestLocationConfigRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq TestLocationConfigRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.TestLocationConfig(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_LocationsService_TestLocationConfig_0(ctx context.Context, marshaler runtime.Marshaler, server LocationsServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq TestLocationConfigRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq TestLocationConfigRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.TestLocationConfig(ctx, &protoReq)
return msg, metadata, err
}
@@ -227,15 +199,13 @@ func local_request_LocationsService_TestLocationConfig_0(ctx context.Context, ma
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterLocationsServiceHandlerFromEndpoint instead.
// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
func RegisterLocationsServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server LocationsServiceServer) error {
- mux.Handle("GET", pattern_LocationsService_ListLocations_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_LocationsService_ListLocations_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/backup.v1.LocationsService/ListLocations", runtime.WithHTTPPathPattern("/v1/backups/locations"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/backup.v1.LocationsService/ListLocations", runtime.WithHTTPPathPattern("/v1/backups/locations"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -247,19 +217,15 @@ func RegisterLocationsServiceHandlerServer(ctx context.Context, mux *runtime.Ser
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_LocationsService_ListLocations_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_LocationsService_AddLocation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_LocationsService_AddLocation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/backup.v1.LocationsService/AddLocation", runtime.WithHTTPPathPattern("/v1/backups/locations"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/backup.v1.LocationsService/AddLocation", runtime.WithHTTPPathPattern("/v1/backups/locations"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -271,19 +237,15 @@ func RegisterLocationsServiceHandlerServer(ctx context.Context, mux *runtime.Ser
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_LocationsService_AddLocation_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("PUT", pattern_LocationsService_ChangeLocation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPut, pattern_LocationsService_ChangeLocation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/backup.v1.LocationsService/ChangeLocation", runtime.WithHTTPPathPattern("/v1/backups/locations/{location_id}"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/backup.v1.LocationsService/ChangeLocation", runtime.WithHTTPPathPattern("/v1/backups/locations/{location_id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -295,19 +257,15 @@ func RegisterLocationsServiceHandlerServer(ctx context.Context, mux *runtime.Ser
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_LocationsService_ChangeLocation_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("DELETE", pattern_LocationsService_RemoveLocation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodDelete, pattern_LocationsService_RemoveLocation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/backup.v1.LocationsService/RemoveLocation", runtime.WithHTTPPathPattern("/v1/backups/locations/{location_id}"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/backup.v1.LocationsService/RemoveLocation", runtime.WithHTTPPathPattern("/v1/backups/locations/{location_id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -319,19 +277,15 @@ func RegisterLocationsServiceHandlerServer(ctx context.Context, mux *runtime.Ser
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_LocationsService_RemoveLocation_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_LocationsService_TestLocationConfig_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_LocationsService_TestLocationConfig_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/backup.v1.LocationsService/TestLocationConfig", runtime.WithHTTPPathPattern("/v1/backups/locations:testConfig"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/backup.v1.LocationsService/TestLocationConfig", runtime.WithHTTPPathPattern("/v1/backups/locations:testConfig"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -343,7 +297,6 @@ func RegisterLocationsServiceHandlerServer(ctx context.Context, mux *runtime.Ser
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_LocationsService_TestLocationConfig_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@@ -371,7 +324,6 @@ func RegisterLocationsServiceHandlerFromEndpoint(ctx context.Context, mux *runti
}
}()
}()
-
return RegisterLocationsServiceHandler(ctx, mux, conn)
}
@@ -387,13 +339,11 @@ func RegisterLocationsServiceHandler(ctx context.Context, mux *runtime.ServeMux,
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "LocationsServiceClient" to call the correct interceptors. This client ignores the HTTP middlewares.
func RegisterLocationsServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client LocationsServiceClient) error {
- mux.Handle("GET", pattern_LocationsService_ListLocations_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_LocationsService_ListLocations_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/backup.v1.LocationsService/ListLocations", runtime.WithHTTPPathPattern("/v1/backups/locations"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/backup.v1.LocationsService/ListLocations", runtime.WithHTTPPathPattern("/v1/backups/locations"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -404,17 +354,13 @@ func RegisterLocationsServiceHandlerClient(ctx context.Context, mux *runtime.Ser
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_LocationsService_ListLocations_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_LocationsService_AddLocation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_LocationsService_AddLocation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/backup.v1.LocationsService/AddLocation", runtime.WithHTTPPathPattern("/v1/backups/locations"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/backup.v1.LocationsService/AddLocation", runtime.WithHTTPPathPattern("/v1/backups/locations"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -425,17 +371,13 @@ func RegisterLocationsServiceHandlerClient(ctx context.Context, mux *runtime.Ser
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_LocationsService_AddLocation_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("PUT", pattern_LocationsService_ChangeLocation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPut, pattern_LocationsService_ChangeLocation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/backup.v1.LocationsService/ChangeLocation", runtime.WithHTTPPathPattern("/v1/backups/locations/{location_id}"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/backup.v1.LocationsService/ChangeLocation", runtime.WithHTTPPathPattern("/v1/backups/locations/{location_id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -446,17 +388,13 @@ func RegisterLocationsServiceHandlerClient(ctx context.Context, mux *runtime.Ser
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_LocationsService_ChangeLocation_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("DELETE", pattern_LocationsService_RemoveLocation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodDelete, pattern_LocationsService_RemoveLocation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/backup.v1.LocationsService/RemoveLocation", runtime.WithHTTPPathPattern("/v1/backups/locations/{location_id}"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/backup.v1.LocationsService/RemoveLocation", runtime.WithHTTPPathPattern("/v1/backups/locations/{location_id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -467,17 +405,13 @@ func RegisterLocationsServiceHandlerClient(ctx context.Context, mux *runtime.Ser
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_LocationsService_RemoveLocation_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_LocationsService_TestLocationConfig_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_LocationsService_TestLocationConfig_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/backup.v1.LocationsService/TestLocationConfig", runtime.WithHTTPPathPattern("/v1/backups/locations:testConfig"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/backup.v1.LocationsService/TestLocationConfig", runtime.WithHTTPPathPattern("/v1/backups/locations:testConfig"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -488,33 +422,23 @@ func RegisterLocationsServiceHandlerClient(ctx context.Context, mux *runtime.Ser
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_LocationsService_TestLocationConfig_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
return nil
}
var (
- pattern_LocationsService_ListLocations_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "backups", "locations"}, ""))
-
- pattern_LocationsService_AddLocation_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "backups", "locations"}, ""))
-
- pattern_LocationsService_ChangeLocation_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "backups", "locations", "location_id"}, ""))
-
- pattern_LocationsService_RemoveLocation_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "backups", "locations", "location_id"}, ""))
-
+ pattern_LocationsService_ListLocations_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "backups", "locations"}, ""))
+ pattern_LocationsService_AddLocation_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "backups", "locations"}, ""))
+ pattern_LocationsService_ChangeLocation_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "backups", "locations", "location_id"}, ""))
+ pattern_LocationsService_RemoveLocation_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "backups", "locations", "location_id"}, ""))
pattern_LocationsService_TestLocationConfig_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "backups", "locations"}, "testConfig"))
)
var (
- forward_LocationsService_ListLocations_0 = runtime.ForwardResponseMessage
-
- forward_LocationsService_AddLocation_0 = runtime.ForwardResponseMessage
-
- forward_LocationsService_ChangeLocation_0 = runtime.ForwardResponseMessage
-
- forward_LocationsService_RemoveLocation_0 = runtime.ForwardResponseMessage
-
+ forward_LocationsService_ListLocations_0 = runtime.ForwardResponseMessage
+ forward_LocationsService_AddLocation_0 = runtime.ForwardResponseMessage
+ forward_LocationsService_ChangeLocation_0 = runtime.ForwardResponseMessage
+ forward_LocationsService_RemoveLocation_0 = runtime.ForwardResponseMessage
forward_LocationsService_TestLocationConfig_0 = runtime.ForwardResponseMessage
)
diff --git a/api/backup/v1/locations.pb.validate.go b/api/backup/v1/locations.pb.validate.go
index 3b3d32f5ef..04d465b6e6 100644
--- a/api/backup/v1/locations.pb.validate.go
+++ b/api/backup/v1/locations.pb.validate.go
@@ -82,7 +82,7 @@ type FilesystemLocationConfigMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m FilesystemLocationConfigMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -228,7 +228,7 @@ type S3LocationConfigMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m S3LocationConfigMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -420,7 +420,7 @@ type LocationMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m LocationMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -520,7 +520,7 @@ type ListLocationsRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListLocationsRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -656,7 +656,7 @@ type ListLocationsResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListLocationsResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -829,7 +829,7 @@ type AddLocationRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddLocationRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -933,7 +933,7 @@ type AddLocationResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddLocationResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1108,7 +1108,7 @@ type ChangeLocationRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ChangeLocationRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1210,7 +1210,7 @@ type ChangeLocationResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ChangeLocationResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1316,7 +1316,7 @@ type RemoveLocationRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m RemoveLocationRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1418,7 +1418,7 @@ type RemoveLocationResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m RemoveLocationResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1578,7 +1578,7 @@ type TestLocationConfigRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m TestLocationConfigRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1680,7 +1680,7 @@ type TestLocationConfigResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m TestLocationConfigResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/api/backup/v1/restores.pb.go b/api/backup/v1/restores.pb.go
index 245ef2061d..fd65bb5ba3 100644
--- a/api/backup/v1/restores.pb.go
+++ b/api/backup/v1/restores.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: backup/v1/restores.proto
@@ -9,6 +9,7 @@ package backupv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
_ "github.com/envoyproxy/protoc-gen-validate/validate"
_ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options"
@@ -80,10 +81,7 @@ func (RestoreStatus) EnumDescriptor() ([]byte, []int) {
// RestoreHistoryItem represents single backup restore item.
type RestoreHistoryItem struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Machine-readable restore id.
RestoreId string `protobuf:"bytes,1,opt,name=restore_id,json=restoreId,proto3" json:"restore_id,omitempty"`
// ID of the artifact used for restore.
@@ -110,6 +108,8 @@ type RestoreHistoryItem struct {
FinishedAt *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=finished_at,json=finishedAt,proto3" json:"finished_at,omitempty"`
// PITR timestamp is filled for PITR restores, empty otherwise.
PitrTimestamp *timestamppb.Timestamp `protobuf:"bytes,13,opt,name=pitr_timestamp,json=pitrTimestamp,proto3" json:"pitr_timestamp,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *RestoreHistoryItem) Reset() {
@@ -234,9 +234,9 @@ func (x *RestoreHistoryItem) GetPitrTimestamp() *timestamppb.Timestamp {
}
type ListRestoresRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ListRestoresRequest) Reset() {
@@ -270,11 +270,10 @@ func (*ListRestoresRequest) Descriptor() ([]byte, []int) {
}
type ListRestoresResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Items []*RestoreHistoryItem `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Items []*RestoreHistoryItem `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *ListRestoresResponse) Reset() {
@@ -315,13 +314,12 @@ func (x *ListRestoresResponse) GetItems() []*RestoreHistoryItem {
}
type RestoreServiceGetLogsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ RestoreId string `protobuf:"bytes,1,opt,name=restore_id,json=restoreId,proto3" json:"restore_id,omitempty"`
+ Offset uint32 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"`
+ Limit uint32 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"`
unknownFields protoimpl.UnknownFields
-
- RestoreId string `protobuf:"bytes,1,opt,name=restore_id,json=restoreId,proto3" json:"restore_id,omitempty"`
- Offset uint32 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"`
- Limit uint32 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *RestoreServiceGetLogsRequest) Reset() {
@@ -376,12 +374,11 @@ func (x *RestoreServiceGetLogsRequest) GetLimit() uint32 {
}
type RestoreServiceGetLogsResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Logs []*LogChunk `protobuf:"bytes,1,rep,name=logs,proto3" json:"logs,omitempty"`
+ End bool `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Logs []*LogChunk `protobuf:"bytes,1,rep,name=logs,proto3" json:"logs,omitempty"`
- End bool `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *RestoreServiceGetLogsResponse) Reset() {
@@ -429,16 +426,15 @@ func (x *RestoreServiceGetLogsResponse) GetEnd() bool {
}
type RestoreBackupRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Service identifier where backup should be restored.
ServiceId string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
// Artifact id to restore.
ArtifactId string `protobuf:"bytes,2,opt,name=artifact_id,json=artifactId,proto3" json:"artifact_id,omitempty"`
// Timestamp of PITR to restore to
PitrTimestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=pitr_timestamp,json=pitrTimestamp,proto3" json:"pitr_timestamp,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *RestoreBackupRequest) Reset() {
@@ -493,12 +489,11 @@ func (x *RestoreBackupRequest) GetPitrTimestamp() *timestamppb.Timestamp {
}
type RestoreBackupResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique restore identifier.
- RestoreId string `protobuf:"bytes,1,opt,name=restore_id,json=restoreId,proto3" json:"restore_id,omitempty"`
+ RestoreId string `protobuf:"bytes,1,opt,name=restore_id,json=restoreId,proto3" json:"restore_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *RestoreBackupResponse) Reset() {
@@ -540,7 +535,7 @@ func (x *RestoreBackupResponse) GetRestoreId() string {
var File_backup_v1_restores_proto protoreflect.FileDescriptor
-var file_backup_v1_restores_proto_rawDesc = []byte{
+var file_backup_v1_restores_proto_rawDesc = string([]byte{
0x0a, 0x18, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x73, 0x74,
0x6f, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x62, 0x61, 0x63, 0x6b,
0x75, 0x70, 0x2e, 0x76, 0x31, 0x1a, 0x16, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2f, 0x76, 0x31,
@@ -706,16 +701,16 @@ var file_backup_v1_restores_proto_rawDesc = []byte{
0x56, 0x31, 0xe2, 0x02, 0x15, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5c, 0x56, 0x31, 0x5c, 0x47,
0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0a, 0x42, 0x61, 0x63,
0x6b, 0x75, 0x70, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_backup_v1_restores_proto_rawDescOnce sync.Once
- file_backup_v1_restores_proto_rawDescData = file_backup_v1_restores_proto_rawDesc
+ file_backup_v1_restores_proto_rawDescData []byte
)
func file_backup_v1_restores_proto_rawDescGZIP() []byte {
file_backup_v1_restores_proto_rawDescOnce.Do(func() {
- file_backup_v1_restores_proto_rawDescData = protoimpl.X.CompressGZIP(file_backup_v1_restores_proto_rawDescData)
+ file_backup_v1_restores_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_backup_v1_restores_proto_rawDesc), len(file_backup_v1_restores_proto_rawDesc)))
})
return file_backup_v1_restores_proto_rawDescData
}
@@ -770,7 +765,7 @@ func file_backup_v1_restores_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_backup_v1_restores_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_backup_v1_restores_proto_rawDesc), len(file_backup_v1_restores_proto_rawDesc)),
NumEnums: 1,
NumMessages: 7,
NumExtensions: 0,
@@ -782,7 +777,6 @@ func file_backup_v1_restores_proto_init() {
MessageInfos: file_backup_v1_restores_proto_msgTypes,
}.Build()
File_backup_v1_restores_proto = out.File
- file_backup_v1_restores_proto_rawDesc = nil
file_backup_v1_restores_proto_goTypes = nil
file_backup_v1_restores_proto_depIdxs = nil
}
diff --git a/api/backup/v1/restores.pb.gw.go b/api/backup/v1/restores.pb.gw.go
index a23df5f9d3..d77af04410 100644
--- a/api/backup/v1/restores.pb.gw.go
+++ b/api/backup/v1/restores.pb.gw.go
@@ -10,6 +10,7 @@ package backupv1
import (
"context"
+ "errors"
"io"
"net/http"
@@ -28,23 +29,26 @@ var (
_ codes.Code
_ io.Reader
_ status.Status
+ _ = errors.New
_ = runtime.String
_ = utilities.NewDoubleArray
_ = metadata.Join
)
func request_RestoreService_ListRestores_0(ctx context.Context, marshaler runtime.Marshaler, client RestoreServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListRestoresRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ListRestoresRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := client.ListRestores(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_RestoreService_ListRestores_0(ctx context.Context, marshaler runtime.Marshaler, server RestoreServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListRestoresRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ListRestoresRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := server.ListRestores(ctx, &protoReq)
return msg, metadata, err
}
@@ -52,89 +56,73 @@ func local_request_RestoreService_ListRestores_0(ctx context.Context, marshaler
var filter_RestoreService_GetLogs_0 = &utilities.DoubleArray{Encoding: map[string]int{"restore_id": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}}
func request_RestoreService_GetLogs_0(ctx context.Context, marshaler runtime.Marshaler, client RestoreServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq RestoreServiceGetLogsRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq RestoreServiceGetLogsRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["restore_id"]
+ val, ok := pathParams["restore_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "restore_id")
}
-
protoReq.RestoreId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "restore_id", err)
}
-
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_RestoreService_GetLogs_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.GetLogs(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_RestoreService_GetLogs_0(ctx context.Context, marshaler runtime.Marshaler, server RestoreServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq RestoreServiceGetLogsRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq RestoreServiceGetLogsRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["restore_id"]
+ val, ok := pathParams["restore_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "restore_id")
}
-
protoReq.RestoreId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "restore_id", err)
}
-
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_RestoreService_GetLogs_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.GetLogs(ctx, &protoReq)
return msg, metadata, err
}
func request_RestoreService_RestoreBackup_0(ctx context.Context, marshaler runtime.Marshaler, client RestoreServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq RestoreBackupRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq RestoreBackupRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.RestoreBackup(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_RestoreService_RestoreBackup_0(ctx context.Context, marshaler runtime.Marshaler, server RestoreServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq RestoreBackupRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq RestoreBackupRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.RestoreBackup(ctx, &protoReq)
return msg, metadata, err
}
@@ -145,15 +133,13 @@ func local_request_RestoreService_RestoreBackup_0(ctx context.Context, marshaler
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterRestoreServiceHandlerFromEndpoint instead.
// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
func RegisterRestoreServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server RestoreServiceServer) error {
- mux.Handle("GET", pattern_RestoreService_ListRestores_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_RestoreService_ListRestores_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/backup.v1.RestoreService/ListRestores", runtime.WithHTTPPathPattern("/v1/backups/restores"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/backup.v1.RestoreService/ListRestores", runtime.WithHTTPPathPattern("/v1/backups/restores"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -165,19 +151,15 @@ func RegisterRestoreServiceHandlerServer(ctx context.Context, mux *runtime.Serve
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_RestoreService_ListRestores_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_RestoreService_GetLogs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_RestoreService_GetLogs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/backup.v1.RestoreService/GetLogs", runtime.WithHTTPPathPattern("/v1/backups/restores/{restore_id}/logs"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/backup.v1.RestoreService/GetLogs", runtime.WithHTTPPathPattern("/v1/backups/restores/{restore_id}/logs"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -189,19 +171,15 @@ func RegisterRestoreServiceHandlerServer(ctx context.Context, mux *runtime.Serve
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_RestoreService_GetLogs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_RestoreService_RestoreBackup_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_RestoreService_RestoreBackup_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/backup.v1.RestoreService/RestoreBackup", runtime.WithHTTPPathPattern("/v1/backups/restores:start"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/backup.v1.RestoreService/RestoreBackup", runtime.WithHTTPPathPattern("/v1/backups/restores:start"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -213,7 +191,6 @@ func RegisterRestoreServiceHandlerServer(ctx context.Context, mux *runtime.Serve
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_RestoreService_RestoreBackup_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@@ -241,7 +218,6 @@ func RegisterRestoreServiceHandlerFromEndpoint(ctx context.Context, mux *runtime
}
}()
}()
-
return RegisterRestoreServiceHandler(ctx, mux, conn)
}
@@ -257,13 +233,11 @@ func RegisterRestoreServiceHandler(ctx context.Context, mux *runtime.ServeMux, c
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "RestoreServiceClient" to call the correct interceptors. This client ignores the HTTP middlewares.
func RegisterRestoreServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client RestoreServiceClient) error {
- mux.Handle("GET", pattern_RestoreService_ListRestores_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_RestoreService_ListRestores_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/backup.v1.RestoreService/ListRestores", runtime.WithHTTPPathPattern("/v1/backups/restores"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/backup.v1.RestoreService/ListRestores", runtime.WithHTTPPathPattern("/v1/backups/restores"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -274,17 +248,13 @@ func RegisterRestoreServiceHandlerClient(ctx context.Context, mux *runtime.Serve
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_RestoreService_ListRestores_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_RestoreService_GetLogs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_RestoreService_GetLogs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/backup.v1.RestoreService/GetLogs", runtime.WithHTTPPathPattern("/v1/backups/restores/{restore_id}/logs"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/backup.v1.RestoreService/GetLogs", runtime.WithHTTPPathPattern("/v1/backups/restores/{restore_id}/logs"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -295,17 +265,13 @@ func RegisterRestoreServiceHandlerClient(ctx context.Context, mux *runtime.Serve
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_RestoreService_GetLogs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_RestoreService_RestoreBackup_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_RestoreService_RestoreBackup_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/backup.v1.RestoreService/RestoreBackup", runtime.WithHTTPPathPattern("/v1/backups/restores:start"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/backup.v1.RestoreService/RestoreBackup", runtime.WithHTTPPathPattern("/v1/backups/restores:start"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -316,25 +282,19 @@ func RegisterRestoreServiceHandlerClient(ctx context.Context, mux *runtime.Serve
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_RestoreService_RestoreBackup_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
return nil
}
var (
- pattern_RestoreService_ListRestores_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "backups", "restores"}, ""))
-
- pattern_RestoreService_GetLogs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"v1", "backups", "restores", "restore_id", "logs"}, ""))
-
+ pattern_RestoreService_ListRestores_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "backups", "restores"}, ""))
+ pattern_RestoreService_GetLogs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"v1", "backups", "restores", "restore_id", "logs"}, ""))
pattern_RestoreService_RestoreBackup_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "backups", "restores"}, "start"))
)
var (
- forward_RestoreService_ListRestores_0 = runtime.ForwardResponseMessage
-
- forward_RestoreService_GetLogs_0 = runtime.ForwardResponseMessage
-
+ forward_RestoreService_ListRestores_0 = runtime.ForwardResponseMessage
+ forward_RestoreService_GetLogs_0 = runtime.ForwardResponseMessage
forward_RestoreService_RestoreBackup_0 = runtime.ForwardResponseMessage
)
diff --git a/api/backup/v1/restores.pb.validate.go b/api/backup/v1/restores.pb.validate.go
index 212d73baf5..0f530c1fc9 100644
--- a/api/backup/v1/restores.pb.validate.go
+++ b/api/backup/v1/restores.pb.validate.go
@@ -178,7 +178,7 @@ type RestoreHistoryItemMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m RestoreHistoryItemMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -280,7 +280,7 @@ type ListRestoresRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListRestoresRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -416,7 +416,7 @@ type ListRestoresResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListRestoresResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -533,7 +533,7 @@ type RestoreServiceGetLogsRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m RestoreServiceGetLogsRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -672,7 +672,7 @@ type RestoreServiceGetLogsResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m RestoreServiceGetLogsResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -826,7 +826,7 @@ type RestoreBackupRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m RestoreBackupRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -930,7 +930,7 @@ type RestoreBackupResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m RestoreBackupResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/api/common/common.pb.go b/api/common/common.pb.go
index e40e872fef..4d18963dd8 100644
--- a/api/common/common.pb.go
+++ b/api/common/common.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: common/common.proto
@@ -9,6 +9,7 @@ package common
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
@@ -23,11 +24,10 @@ const (
// A wrapper for a string array. This type allows to distinguish between an empty array and a null value.
type StringArray struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Values []string `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Values []string `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *StringArray) Reset() {
@@ -69,11 +69,10 @@ func (x *StringArray) GetValues() []string {
// A wrapper for map[string]string. This type allows to distinguish between an empty map and a null value.
type StringMap struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Values map[string]string `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
unknownFields protoimpl.UnknownFields
-
- Values map[string]string `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ sizeCache protoimpl.SizeCache
}
func (x *StringMap) Reset() {
@@ -115,7 +114,7 @@ func (x *StringMap) GetValues() map[string]string {
var File_common_common_proto protoreflect.FileDescriptor
-var file_common_common_proto_rawDesc = []byte{
+var file_common_common_proto_rawDesc = string([]byte{
0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x25, 0x0a,
0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x41, 0x72, 0x72, 0x61, 0x79, 0x12, 0x16, 0x0a, 0x06,
@@ -137,16 +136,16 @@ var file_common_common_proto_rawDesc = []byte{
0x6d, 0x6d, 0x6f, 0x6e, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
0xea, 0x02, 0x06, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x33,
-}
+})
var (
file_common_common_proto_rawDescOnce sync.Once
- file_common_common_proto_rawDescData = file_common_common_proto_rawDesc
+ file_common_common_proto_rawDescData []byte
)
func file_common_common_proto_rawDescGZIP() []byte {
file_common_common_proto_rawDescOnce.Do(func() {
- file_common_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_common_common_proto_rawDescData)
+ file_common_common_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_common_common_proto_rawDesc), len(file_common_common_proto_rawDesc)))
})
return file_common_common_proto_rawDescData
}
@@ -178,7 +177,7 @@ func file_common_common_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_common_common_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_common_common_proto_rawDesc), len(file_common_common_proto_rawDesc)),
NumEnums: 0,
NumMessages: 3,
NumExtensions: 0,
@@ -189,7 +188,6 @@ func file_common_common_proto_init() {
MessageInfos: file_common_common_proto_msgTypes,
}.Build()
File_common_common_proto = out.File
- file_common_common_proto_rawDesc = nil
file_common_common_proto_goTypes = nil
file_common_common_proto_depIdxs = nil
}
diff --git a/api/common/common.pb.validate.go b/api/common/common.pb.validate.go
index 5c76854986..9caff2dff6 100644
--- a/api/common/common.pb.validate.go
+++ b/api/common/common.pb.validate.go
@@ -70,7 +70,7 @@ type StringArrayMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StringArrayMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -171,7 +171,7 @@ type StringMapMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StringMapMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/api/common/metrics_resolutions.pb.go b/api/common/metrics_resolutions.pb.go
index b2f0e094c4..e6658f0728 100644
--- a/api/common/metrics_resolutions.pb.go
+++ b/api/common/metrics_resolutions.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: common/metrics_resolutions.proto
@@ -9,6 +9,7 @@ package common
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
@@ -24,16 +25,15 @@ const (
// MetricsResolutions represents Prometheus exporters metrics resolutions.
type MetricsResolutions struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// High resolution. In JSON should be represented as a string with number of seconds with `s` suffix.
Hr *durationpb.Duration `protobuf:"bytes,1,opt,name=hr,proto3" json:"hr,omitempty"`
// Medium resolution. In JSON should be represented as a string with number of seconds with `s` suffix.
Mr *durationpb.Duration `protobuf:"bytes,2,opt,name=mr,proto3" json:"mr,omitempty"`
// Low resolution. In JSON should be represented as a string with number of seconds with `s` suffix.
- Lr *durationpb.Duration `protobuf:"bytes,3,opt,name=lr,proto3" json:"lr,omitempty"`
+ Lr *durationpb.Duration `protobuf:"bytes,3,opt,name=lr,proto3" json:"lr,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *MetricsResolutions) Reset() {
@@ -89,7 +89,7 @@ func (x *MetricsResolutions) GetLr() *durationpb.Duration {
var File_common_metrics_resolutions_proto protoreflect.FileDescriptor
-var file_common_metrics_resolutions_proto_rawDesc = []byte{
+var file_common_metrics_resolutions_proto_rawDesc = string([]byte{
0x0a, 0x20, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73,
0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x12, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67,
@@ -113,16 +113,16 @@ var file_common_metrics_resolutions_proto_rawDesc = []byte{
0x06, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0xe2, 0x02, 0x12, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x06, 0x43,
0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_common_metrics_resolutions_proto_rawDescOnce sync.Once
- file_common_metrics_resolutions_proto_rawDescData = file_common_metrics_resolutions_proto_rawDesc
+ file_common_metrics_resolutions_proto_rawDescData []byte
)
func file_common_metrics_resolutions_proto_rawDescGZIP() []byte {
file_common_metrics_resolutions_proto_rawDescOnce.Do(func() {
- file_common_metrics_resolutions_proto_rawDescData = protoimpl.X.CompressGZIP(file_common_metrics_resolutions_proto_rawDescData)
+ file_common_metrics_resolutions_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_common_metrics_resolutions_proto_rawDesc), len(file_common_metrics_resolutions_proto_rawDesc)))
})
return file_common_metrics_resolutions_proto_rawDescData
}
@@ -155,7 +155,7 @@ func file_common_metrics_resolutions_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_common_metrics_resolutions_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_common_metrics_resolutions_proto_rawDesc), len(file_common_metrics_resolutions_proto_rawDesc)),
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
@@ -166,7 +166,6 @@ func file_common_metrics_resolutions_proto_init() {
MessageInfos: file_common_metrics_resolutions_proto_msgTypes,
}.Build()
File_common_metrics_resolutions_proto = out.File
- file_common_metrics_resolutions_proto_rawDesc = nil
file_common_metrics_resolutions_proto_goTypes = nil
file_common_metrics_resolutions_proto_depIdxs = nil
}
diff --git a/api/common/metrics_resolutions.pb.validate.go b/api/common/metrics_resolutions.pb.validate.go
index 3f17783cdc..0668c224e6 100644
--- a/api/common/metrics_resolutions.pb.validate.go
+++ b/api/common/metrics_resolutions.pb.validate.go
@@ -158,7 +158,7 @@ type MetricsResolutionsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m MetricsResolutionsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/api/dump/v1beta1/dump.pb.go b/api/dump/v1beta1/dump.pb.go
index 36fe115123..d261cd7639 100644
--- a/api/dump/v1beta1/dump.pb.go
+++ b/api/dump/v1beta1/dump.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: dump/v1beta1/dump.proto
@@ -9,6 +9,7 @@ package dumpv1beta1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
_ "github.com/envoyproxy/protoc-gen-validate/validate"
_ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options"
@@ -78,16 +79,15 @@ func (DumpStatus) EnumDescriptor() ([]byte, []int) {
}
type Dump struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ DumpId string `protobuf:"bytes,1,opt,name=dump_id,json=dumpId,proto3" json:"dump_id,omitempty"`
+ Status DumpStatus `protobuf:"varint,2,opt,name=status,proto3,enum=dump.v1beta1.DumpStatus" json:"status,omitempty"`
+ ServiceNames []string `protobuf:"bytes,3,rep,name=service_names,json=serviceNames,proto3" json:"service_names,omitempty"`
+ StartTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
+ EndTime *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
+ CreatedAt *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
unknownFields protoimpl.UnknownFields
-
- DumpId string `protobuf:"bytes,1,opt,name=dump_id,json=dumpId,proto3" json:"dump_id,omitempty"`
- Status DumpStatus `protobuf:"varint,2,opt,name=status,proto3,enum=dump.v1beta1.DumpStatus" json:"status,omitempty"`
- ServiceNames []string `protobuf:"bytes,3,rep,name=service_names,json=serviceNames,proto3" json:"service_names,omitempty"`
- StartTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
- EndTime *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
- CreatedAt *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *Dump) Reset() {
@@ -163,15 +163,14 @@ func (x *Dump) GetCreatedAt() *timestamppb.Timestamp {
}
type StartDumpRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ ServiceNames []string `protobuf:"bytes,1,rep,name=service_names,json=serviceNames,proto3" json:"service_names,omitempty"`
+ StartTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
+ EndTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
+ ExportQan bool `protobuf:"varint,4,opt,name=export_qan,json=exportQan,proto3" json:"export_qan,omitempty"`
+ IgnoreLoad bool `protobuf:"varint,5,opt,name=ignore_load,json=ignoreLoad,proto3" json:"ignore_load,omitempty"`
unknownFields protoimpl.UnknownFields
-
- ServiceNames []string `protobuf:"bytes,1,rep,name=service_names,json=serviceNames,proto3" json:"service_names,omitempty"`
- StartTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
- EndTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
- ExportQan bool `protobuf:"varint,4,opt,name=export_qan,json=exportQan,proto3" json:"export_qan,omitempty"`
- IgnoreLoad bool `protobuf:"varint,5,opt,name=ignore_load,json=ignoreLoad,proto3" json:"ignore_load,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *StartDumpRequest) Reset() {
@@ -240,11 +239,10 @@ func (x *StartDumpRequest) GetIgnoreLoad() bool {
}
type StartDumpResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ DumpId string `protobuf:"bytes,1,opt,name=dump_id,json=dumpId,proto3" json:"dump_id,omitempty"`
unknownFields protoimpl.UnknownFields
-
- DumpId string `protobuf:"bytes,1,opt,name=dump_id,json=dumpId,proto3" json:"dump_id,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *StartDumpResponse) Reset() {
@@ -285,9 +283,9 @@ func (x *StartDumpResponse) GetDumpId() string {
}
type ListDumpsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ListDumpsRequest) Reset() {
@@ -321,11 +319,10 @@ func (*ListDumpsRequest) Descriptor() ([]byte, []int) {
}
type ListDumpsResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Dumps []*Dump `protobuf:"bytes,1,rep,name=dumps,proto3" json:"dumps,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Dumps []*Dump `protobuf:"bytes,1,rep,name=dumps,proto3" json:"dumps,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *ListDumpsResponse) Reset() {
@@ -366,11 +363,10 @@ func (x *ListDumpsResponse) GetDumps() []*Dump {
}
type DeleteDumpRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ DumpIds []string `protobuf:"bytes,1,rep,name=dump_ids,json=dumpIds,proto3" json:"dump_ids,omitempty"`
unknownFields protoimpl.UnknownFields
-
- DumpIds []string `protobuf:"bytes,1,rep,name=dump_ids,json=dumpIds,proto3" json:"dump_ids,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *DeleteDumpRequest) Reset() {
@@ -411,9 +407,9 @@ func (x *DeleteDumpRequest) GetDumpIds() []string {
}
type DeleteDumpResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *DeleteDumpResponse) Reset() {
@@ -447,13 +443,12 @@ func (*DeleteDumpResponse) Descriptor() ([]byte, []int) {
}
type GetDumpLogsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ DumpId string `protobuf:"bytes,1,opt,name=dump_id,json=dumpId,proto3" json:"dump_id,omitempty"`
+ Offset uint32 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"`
+ Limit uint32 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"`
unknownFields protoimpl.UnknownFields
-
- DumpId string `protobuf:"bytes,1,opt,name=dump_id,json=dumpId,proto3" json:"dump_id,omitempty"`
- Offset uint32 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"`
- Limit uint32 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *GetDumpLogsRequest) Reset() {
@@ -508,12 +503,11 @@ func (x *GetDumpLogsRequest) GetLimit() uint32 {
}
type GetDumpLogsResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Logs []*LogChunk `protobuf:"bytes,1,rep,name=logs,proto3" json:"logs,omitempty"`
+ End bool `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Logs []*LogChunk `protobuf:"bytes,1,rep,name=logs,proto3" json:"logs,omitempty"`
- End bool `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *GetDumpLogsResponse) Reset() {
@@ -562,12 +556,11 @@ func (x *GetDumpLogsResponse) GetEnd() bool {
// LogChunk represent one chunk of logs.
type LogChunk struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ ChunkId uint32 `protobuf:"varint,1,opt,name=chunk_id,json=chunkId,proto3" json:"chunk_id,omitempty"`
+ Data string `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
unknownFields protoimpl.UnknownFields
-
- ChunkId uint32 `protobuf:"varint,1,opt,name=chunk_id,json=chunkId,proto3" json:"chunk_id,omitempty"`
- Data string `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *LogChunk) Reset() {
@@ -615,14 +608,13 @@ func (x *LogChunk) GetData() string {
}
type SFTPParameters struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"`
+ User string `protobuf:"bytes,2,opt,name=user,proto3" json:"user,omitempty"`
+ Password string `protobuf:"bytes,3,opt,name=password,proto3" json:"password,omitempty"`
+ Directory string `protobuf:"bytes,4,opt,name=directory,proto3" json:"directory,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"`
- User string `protobuf:"bytes,2,opt,name=user,proto3" json:"user,omitempty"`
- Password string `protobuf:"bytes,3,opt,name=password,proto3" json:"password,omitempty"`
- Directory string `protobuf:"bytes,4,opt,name=directory,proto3" json:"directory,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *SFTPParameters) Reset() {
@@ -684,13 +676,12 @@ func (x *SFTPParameters) GetDirectory() string {
}
type UploadDumpRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- DumpIds []string `protobuf:"bytes,1,rep,name=dump_ids,json=dumpIds,proto3" json:"dump_ids,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ DumpIds []string `protobuf:"bytes,1,rep,name=dump_ids,json=dumpIds,proto3" json:"dump_ids,omitempty"`
// SFTP upload parameters.
SftpParameters *SFTPParameters `protobuf:"bytes,2,opt,name=sftp_parameters,json=sftpParameters,proto3" json:"sftp_parameters,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *UploadDumpRequest) Reset() {
@@ -738,9 +729,9 @@ func (x *UploadDumpRequest) GetSftpParameters() *SFTPParameters {
}
type UploadDumpResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *UploadDumpResponse) Reset() {
@@ -775,7 +766,7 @@ func (*UploadDumpResponse) Descriptor() ([]byte, []int) {
var File_dump_v1beta1_dump_proto protoreflect.FileDescriptor
-var file_dump_v1beta1_dump_proto_rawDesc = []byte{
+var file_dump_v1beta1_dump_proto_rawDesc = string([]byte{
0x0a, 0x17, 0x64, 0x75, 0x6d, 0x70, 0x2f, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x64,
0x75, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x64, 0x75, 0x6d, 0x70, 0x2e,
0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
@@ -937,16 +928,16 @@ var file_dump_v1beta1_dump_proto_rawDesc = []byte{
0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0d, 0x44, 0x75, 0x6d, 0x70,
0x3a, 0x3a, 0x56, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x33,
-}
+})
var (
file_dump_v1beta1_dump_proto_rawDescOnce sync.Once
- file_dump_v1beta1_dump_proto_rawDescData = file_dump_v1beta1_dump_proto_rawDesc
+ file_dump_v1beta1_dump_proto_rawDescData []byte
)
func file_dump_v1beta1_dump_proto_rawDescGZIP() []byte {
file_dump_v1beta1_dump_proto_rawDescOnce.Do(func() {
- file_dump_v1beta1_dump_proto_rawDescData = protoimpl.X.CompressGZIP(file_dump_v1beta1_dump_proto_rawDescData)
+ file_dump_v1beta1_dump_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_dump_v1beta1_dump_proto_rawDesc), len(file_dump_v1beta1_dump_proto_rawDesc)))
})
return file_dump_v1beta1_dump_proto_rawDescData
}
@@ -1009,7 +1000,7 @@ func file_dump_v1beta1_dump_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_dump_v1beta1_dump_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_dump_v1beta1_dump_proto_rawDesc), len(file_dump_v1beta1_dump_proto_rawDesc)),
NumEnums: 1,
NumMessages: 13,
NumExtensions: 0,
@@ -1021,7 +1012,6 @@ func file_dump_v1beta1_dump_proto_init() {
MessageInfos: file_dump_v1beta1_dump_proto_msgTypes,
}.Build()
File_dump_v1beta1_dump_proto = out.File
- file_dump_v1beta1_dump_proto_rawDesc = nil
file_dump_v1beta1_dump_proto_goTypes = nil
file_dump_v1beta1_dump_proto_depIdxs = nil
}
diff --git a/api/dump/v1beta1/dump.pb.gw.go b/api/dump/v1beta1/dump.pb.gw.go
index 3ffa7d594e..bbf8fe02c7 100644
--- a/api/dump/v1beta1/dump.pb.gw.go
+++ b/api/dump/v1beta1/dump.pb.gw.go
@@ -10,6 +10,7 @@ package dumpv1beta1
import (
"context"
+ "errors"
"io"
"net/http"
@@ -28,71 +29,74 @@ var (
_ codes.Code
_ io.Reader
_ status.Status
+ _ = errors.New
_ = runtime.String
_ = utilities.NewDoubleArray
_ = metadata.Join
)
func request_DumpService_StartDump_0(ctx context.Context, marshaler runtime.Marshaler, client DumpServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq StartDumpRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq StartDumpRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.StartDump(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_DumpService_StartDump_0(ctx context.Context, marshaler runtime.Marshaler, server DumpServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq StartDumpRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq StartDumpRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.StartDump(ctx, &protoReq)
return msg, metadata, err
}
func request_DumpService_ListDumps_0(ctx context.Context, marshaler runtime.Marshaler, client DumpServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListDumpsRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ListDumpsRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := client.ListDumps(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_DumpService_ListDumps_0(ctx context.Context, marshaler runtime.Marshaler, server DumpServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListDumpsRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ListDumpsRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := server.ListDumps(ctx, &protoReq)
return msg, metadata, err
}
func request_DumpService_DeleteDump_0(ctx context.Context, marshaler runtime.Marshaler, client DumpServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq DeleteDumpRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq DeleteDumpRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.DeleteDump(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_DumpService_DeleteDump_0(ctx context.Context, marshaler runtime.Marshaler, server DumpServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq DeleteDumpRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq DeleteDumpRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.DeleteDump(ctx, &protoReq)
return msg, metadata, err
}
@@ -100,89 +104,73 @@ func local_request_DumpService_DeleteDump_0(ctx context.Context, marshaler runti
var filter_DumpService_GetDumpLogs_0 = &utilities.DoubleArray{Encoding: map[string]int{"dump_id": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}}
func request_DumpService_GetDumpLogs_0(ctx context.Context, marshaler runtime.Marshaler, client DumpServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetDumpLogsRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq GetDumpLogsRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["dump_id"]
+ val, ok := pathParams["dump_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "dump_id")
}
-
protoReq.DumpId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "dump_id", err)
}
-
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_DumpService_GetDumpLogs_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.GetDumpLogs(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_DumpService_GetDumpLogs_0(ctx context.Context, marshaler runtime.Marshaler, server DumpServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetDumpLogsRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq GetDumpLogsRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["dump_id"]
+ val, ok := pathParams["dump_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "dump_id")
}
-
protoReq.DumpId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "dump_id", err)
}
-
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_DumpService_GetDumpLogs_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.GetDumpLogs(ctx, &protoReq)
return msg, metadata, err
}
func request_DumpService_UploadDump_0(ctx context.Context, marshaler runtime.Marshaler, client DumpServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq UploadDumpRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq UploadDumpRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.UploadDump(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_DumpService_UploadDump_0(ctx context.Context, marshaler runtime.Marshaler, server DumpServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq UploadDumpRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq UploadDumpRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.UploadDump(ctx, &protoReq)
return msg, metadata, err
}
@@ -193,15 +181,13 @@ func local_request_DumpService_UploadDump_0(ctx context.Context, marshaler runti
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterDumpServiceHandlerFromEndpoint instead.
// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
func RegisterDumpServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server DumpServiceServer) error {
- mux.Handle("POST", pattern_DumpService_StartDump_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_DumpService_StartDump_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/dump.v1beta1.DumpService/StartDump", runtime.WithHTTPPathPattern("/v1/dumps:start"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/dump.v1beta1.DumpService/StartDump", runtime.WithHTTPPathPattern("/v1/dumps:start"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -213,19 +199,15 @@ func RegisterDumpServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_DumpService_StartDump_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_DumpService_ListDumps_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_DumpService_ListDumps_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/dump.v1beta1.DumpService/ListDumps", runtime.WithHTTPPathPattern("/v1/dumps"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/dump.v1beta1.DumpService/ListDumps", runtime.WithHTTPPathPattern("/v1/dumps"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -237,19 +219,15 @@ func RegisterDumpServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_DumpService_ListDumps_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_DumpService_DeleteDump_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_DumpService_DeleteDump_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/dump.v1beta1.DumpService/DeleteDump", runtime.WithHTTPPathPattern("/v1/dumps:batchDelete"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/dump.v1beta1.DumpService/DeleteDump", runtime.WithHTTPPathPattern("/v1/dumps:batchDelete"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -261,19 +239,15 @@ func RegisterDumpServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_DumpService_DeleteDump_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_DumpService_GetDumpLogs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_DumpService_GetDumpLogs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/dump.v1beta1.DumpService/GetDumpLogs", runtime.WithHTTPPathPattern("/v1/dumps/{dump_id}/logs"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/dump.v1beta1.DumpService/GetDumpLogs", runtime.WithHTTPPathPattern("/v1/dumps/{dump_id}/logs"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -285,19 +259,15 @@ func RegisterDumpServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_DumpService_GetDumpLogs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_DumpService_UploadDump_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_DumpService_UploadDump_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/dump.v1beta1.DumpService/UploadDump", runtime.WithHTTPPathPattern("/v1/dumps:upload"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/dump.v1beta1.DumpService/UploadDump", runtime.WithHTTPPathPattern("/v1/dumps:upload"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -309,7 +279,6 @@ func RegisterDumpServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_DumpService_UploadDump_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@@ -337,7 +306,6 @@ func RegisterDumpServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.Se
}
}()
}()
-
return RegisterDumpServiceHandler(ctx, mux, conn)
}
@@ -353,13 +321,11 @@ func RegisterDumpServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "DumpServiceClient" to call the correct interceptors. This client ignores the HTTP middlewares.
func RegisterDumpServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client DumpServiceClient) error {
- mux.Handle("POST", pattern_DumpService_StartDump_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_DumpService_StartDump_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/dump.v1beta1.DumpService/StartDump", runtime.WithHTTPPathPattern("/v1/dumps:start"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/dump.v1beta1.DumpService/StartDump", runtime.WithHTTPPathPattern("/v1/dumps:start"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -370,17 +336,13 @@ func RegisterDumpServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_DumpService_StartDump_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_DumpService_ListDumps_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_DumpService_ListDumps_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/dump.v1beta1.DumpService/ListDumps", runtime.WithHTTPPathPattern("/v1/dumps"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/dump.v1beta1.DumpService/ListDumps", runtime.WithHTTPPathPattern("/v1/dumps"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -391,17 +353,13 @@ func RegisterDumpServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_DumpService_ListDumps_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_DumpService_DeleteDump_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_DumpService_DeleteDump_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/dump.v1beta1.DumpService/DeleteDump", runtime.WithHTTPPathPattern("/v1/dumps:batchDelete"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/dump.v1beta1.DumpService/DeleteDump", runtime.WithHTTPPathPattern("/v1/dumps:batchDelete"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -412,17 +370,13 @@ func RegisterDumpServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_DumpService_DeleteDump_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_DumpService_GetDumpLogs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_DumpService_GetDumpLogs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/dump.v1beta1.DumpService/GetDumpLogs", runtime.WithHTTPPathPattern("/v1/dumps/{dump_id}/logs"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/dump.v1beta1.DumpService/GetDumpLogs", runtime.WithHTTPPathPattern("/v1/dumps/{dump_id}/logs"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -433,17 +387,13 @@ func RegisterDumpServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_DumpService_GetDumpLogs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_DumpService_UploadDump_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_DumpService_UploadDump_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/dump.v1beta1.DumpService/UploadDump", runtime.WithHTTPPathPattern("/v1/dumps:upload"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/dump.v1beta1.DumpService/UploadDump", runtime.WithHTTPPathPattern("/v1/dumps:upload"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -454,33 +404,23 @@ func RegisterDumpServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_DumpService_UploadDump_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
return nil
}
var (
- pattern_DumpService_StartDump_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "dumps"}, "start"))
-
- pattern_DumpService_ListDumps_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "dumps"}, ""))
-
- pattern_DumpService_DeleteDump_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "dumps"}, "batchDelete"))
-
+ pattern_DumpService_StartDump_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "dumps"}, "start"))
+ pattern_DumpService_ListDumps_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "dumps"}, ""))
+ pattern_DumpService_DeleteDump_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "dumps"}, "batchDelete"))
pattern_DumpService_GetDumpLogs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2, 2, 3}, []string{"v1", "dumps", "dump_id", "logs"}, ""))
-
- pattern_DumpService_UploadDump_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "dumps"}, "upload"))
+ pattern_DumpService_UploadDump_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "dumps"}, "upload"))
)
var (
- forward_DumpService_StartDump_0 = runtime.ForwardResponseMessage
-
- forward_DumpService_ListDumps_0 = runtime.ForwardResponseMessage
-
- forward_DumpService_DeleteDump_0 = runtime.ForwardResponseMessage
-
+ forward_DumpService_StartDump_0 = runtime.ForwardResponseMessage
+ forward_DumpService_ListDumps_0 = runtime.ForwardResponseMessage
+ forward_DumpService_DeleteDump_0 = runtime.ForwardResponseMessage
forward_DumpService_GetDumpLogs_0 = runtime.ForwardResponseMessage
-
- forward_DumpService_UploadDump_0 = runtime.ForwardResponseMessage
+ forward_DumpService_UploadDump_0 = runtime.ForwardResponseMessage
)
diff --git a/api/dump/v1beta1/dump.pb.validate.go b/api/dump/v1beta1/dump.pb.validate.go
index 6c13b79ca9..6ff806abc2 100644
--- a/api/dump/v1beta1/dump.pb.validate.go
+++ b/api/dump/v1beta1/dump.pb.validate.go
@@ -160,7 +160,7 @@ type DumpMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m DumpMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -322,7 +322,7 @@ type StartDumpRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartDumpRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -424,7 +424,7 @@ type StartDumpResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartDumpResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -526,7 +526,7 @@ type ListDumpsRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListDumpsRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -660,7 +660,7 @@ type ListDumpsResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListDumpsResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -794,7 +794,7 @@ type DeleteDumpRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m DeleteDumpRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -896,7 +896,7 @@ type DeleteDumpResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m DeleteDumpResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1013,7 +1013,7 @@ type GetDumpLogsRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetDumpLogsRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1151,7 +1151,7 @@ type GetDumpLogsResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetDumpLogsResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1256,7 +1256,7 @@ type LogChunkMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m LogChunkMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1391,7 +1391,7 @@ type SFTPParametersMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m SFTPParametersMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1563,7 +1563,7 @@ type UploadDumpRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m UploadDumpRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1665,7 +1665,7 @@ type UploadDumpResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m UploadDumpResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/api/inventory/v1/agent_status.pb.go b/api/inventory/v1/agent_status.pb.go
index b9c28ee4be..de377fbb89 100644
--- a/api/inventory/v1/agent_status.pb.go
+++ b/api/inventory/v1/agent_status.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: inventory/v1/agent_status.proto
@@ -9,6 +9,7 @@ package inventoryv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
@@ -95,7 +96,7 @@ func (AgentStatus) EnumDescriptor() ([]byte, []int) {
var File_inventory_v1_agent_status_proto protoreflect.FileDescriptor
-var file_inventory_v1_agent_status_proto_rawDesc = []byte{
+var file_inventory_v1_agent_status_proto_rawDesc = string([]byte{
0x0a, 0x1f, 0x69, 0x6e, 0x76, 0x65, 0x6e, 0x74, 0x6f, 0x72, 0x79, 0x2f, 0x76, 0x31, 0x2f, 0x61,
0x67, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x12, 0x0c, 0x69, 0x6e, 0x76, 0x65, 0x6e, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2a,
@@ -126,16 +127,16 @@ var file_inventory_v1_agent_status_proto_rawDesc = []byte{
0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0d, 0x49,
0x6e, 0x76, 0x65, 0x6e, 0x74, 0x6f, 0x72, 0x79, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_inventory_v1_agent_status_proto_rawDescOnce sync.Once
- file_inventory_v1_agent_status_proto_rawDescData = file_inventory_v1_agent_status_proto_rawDesc
+ file_inventory_v1_agent_status_proto_rawDescData []byte
)
func file_inventory_v1_agent_status_proto_rawDescGZIP() []byte {
file_inventory_v1_agent_status_proto_rawDescOnce.Do(func() {
- file_inventory_v1_agent_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_inventory_v1_agent_status_proto_rawDescData)
+ file_inventory_v1_agent_status_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_inventory_v1_agent_status_proto_rawDesc), len(file_inventory_v1_agent_status_proto_rawDesc)))
})
return file_inventory_v1_agent_status_proto_rawDescData
}
@@ -164,7 +165,7 @@ func file_inventory_v1_agent_status_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_inventory_v1_agent_status_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_inventory_v1_agent_status_proto_rawDesc), len(file_inventory_v1_agent_status_proto_rawDesc)),
NumEnums: 1,
NumMessages: 0,
NumExtensions: 0,
@@ -175,7 +176,6 @@ func file_inventory_v1_agent_status_proto_init() {
EnumInfos: file_inventory_v1_agent_status_proto_enumTypes,
}.Build()
File_inventory_v1_agent_status_proto = out.File
- file_inventory_v1_agent_status_proto_rawDesc = nil
file_inventory_v1_agent_status_proto_goTypes = nil
file_inventory_v1_agent_status_proto_depIdxs = nil
}
diff --git a/api/inventory/v1/agents.pb.go b/api/inventory/v1/agents.pb.go
index b7ec1a509a..eb4ea200bb 100644
--- a/api/inventory/v1/agents.pb.go
+++ b/api/inventory/v1/agents.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: inventory/v1/agents.proto
@@ -9,6 +9,7 @@ package inventoryv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
_ "github.com/envoyproxy/protoc-gen-validate/validate"
_ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options"
@@ -120,20 +121,19 @@ func (AgentType) EnumDescriptor() ([]byte, []int) {
// PMMAgent runs on Generic or Container Node.
type PMMAgent struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique randomly generated instance identifier.
AgentId string `protobuf:"bytes,1,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"`
// Node identifier where this instance runs.
RunsOnNodeId string `protobuf:"bytes,2,opt,name=runs_on_node_id,json=runsOnNodeId,proto3" json:"runs_on_node_id,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,3,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,3,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// True if Agent is running and connected to pmm-managed.
Connected bool `protobuf:"varint,10,opt,name=connected,proto3" json:"connected,omitempty"`
// Path to exec process.
ProcessExecPath string `protobuf:"bytes,11,opt,name=process_exec_path,json=processExecPath,proto3" json:"process_exec_path,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *PMMAgent) Reset() {
@@ -205,10 +205,7 @@ func (x *PMMAgent) GetProcessExecPath() string {
// It scrapes other exporter Agents that are configured with push_metrics_enabled
// and uses Prometheus remote write protocol to push metrics to PMM Server.
type VMAgent struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique randomly generated instance identifier.
AgentId string `protobuf:"bytes,1,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"`
// The pmm-agent identifier which runs this instance.
@@ -218,7 +215,9 @@ type VMAgent struct {
// Path to exec process.
ProcessExecPath string `protobuf:"bytes,11,opt,name=process_exec_path,json=processExecPath,proto3" json:"process_exec_path,omitempty"`
// Listen port for scraping metrics.
- ListenPort uint32 `protobuf:"varint,12,opt,name=listen_port,json=listenPort,proto3" json:"listen_port,omitempty"`
+ ListenPort uint32 `protobuf:"varint,12,opt,name=listen_port,json=listenPort,proto3" json:"listen_port,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *VMAgent) Reset() {
@@ -288,10 +287,7 @@ func (x *VMAgent) GetListenPort() uint32 {
// NodeExporter runs on Generic or Container Node and exposes its metrics.
type NodeExporter struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique randomly generated instance identifier.
AgentId string `protobuf:"bytes,1,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"`
// The pmm-agent identifier which runs this instance.
@@ -299,7 +295,7 @@ type NodeExporter struct {
// Desired Agent status: enabled (false) or disabled (true).
Disabled bool `protobuf:"varint,3,opt,name=disabled,proto3" json:"disabled,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,4,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,4,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// True if exporter uses push metrics mode.
PushMetricsEnabled bool `protobuf:"varint,5,opt,name=push_metrics_enabled,json=pushMetricsEnabled,proto3" json:"push_metrics_enabled,omitempty"`
// List of disabled collector names.
@@ -316,6 +312,8 @@ type NodeExporter struct {
ExposeExporter bool `protobuf:"varint,14,opt,name=expose_exporter,json=exposeExporter,proto3" json:"expose_exporter,omitempty"`
// Metrics resolution for this agent.
MetricsResolutions *common.MetricsResolutions `protobuf:"bytes,15,opt,name=metrics_resolutions,json=metricsResolutions,proto3" json:"metrics_resolutions,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *NodeExporter) Reset() {
@@ -434,10 +432,7 @@ func (x *NodeExporter) GetMetricsResolutions() *common.MetricsResolutions {
// MySQLdExporter runs on Generic or Container Node and exposes MySQL Service metrics.
type MySQLdExporter struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique randomly generated instance identifier.
AgentId string `protobuf:"bytes,1,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"`
// The pmm-agent identifier which runs this instance.
@@ -463,7 +458,7 @@ type MySQLdExporter struct {
// Negative value means tablestats group collectors are always disabled.
TablestatsGroupTableLimit int32 `protobuf:"varint,11,opt,name=tablestats_group_table_limit,json=tablestatsGroupTableLimit,proto3" json:"tablestats_group_table_limit,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,12,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,12,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// True if exporter uses push metrics mode.
PushMetricsEnabled bool `protobuf:"varint,13,opt,name=push_metrics_enabled,json=pushMetricsEnabled,proto3" json:"push_metrics_enabled,omitempty"`
// List of disabled collector names.
@@ -484,6 +479,8 @@ type MySQLdExporter struct {
ExposeExporter bool `protobuf:"varint,25,opt,name=expose_exporter,json=exposeExporter,proto3" json:"expose_exporter,omitempty"`
// Metrics resolution for this agent.
MetricsResolutions *common.MetricsResolutions `protobuf:"bytes,26,opt,name=metrics_resolutions,json=metricsResolutions,proto3" json:"metrics_resolutions,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *MySQLdExporter) Reset() {
@@ -672,10 +669,7 @@ func (x *MySQLdExporter) GetMetricsResolutions() *common.MetricsResolutions {
// MongoDBExporter runs on Generic or Container Node and exposes MongoDB Service metrics.
type MongoDBExporter struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique randomly generated instance identifier.
AgentId string `protobuf:"bytes,1,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"`
// The pmm-agent identifier which runs this instance.
@@ -691,7 +685,7 @@ type MongoDBExporter struct {
// Skip TLS certificate and hostname validation.
TlsSkipVerify bool `protobuf:"varint,7,opt,name=tls_skip_verify,json=tlsSkipVerify,proto3" json:"tls_skip_verify,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,8,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,8,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// True if exporter uses push metrics mode.
PushMetricsEnabled bool `protobuf:"varint,9,opt,name=push_metrics_enabled,json=pushMetricsEnabled,proto3" json:"push_metrics_enabled,omitempty"`
// List of disabled collector names.
@@ -715,6 +709,8 @@ type MongoDBExporter struct {
ExposeExporter bool `protobuf:"varint,27,opt,name=expose_exporter,json=exposeExporter,proto3" json:"expose_exporter,omitempty"`
// Metrics resolution for this agent.
MetricsResolutions *common.MetricsResolutions `protobuf:"bytes,28,opt,name=metrics_resolutions,json=metricsResolutions,proto3" json:"metrics_resolutions,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *MongoDBExporter) Reset() {
@@ -882,10 +878,7 @@ func (x *MongoDBExporter) GetMetricsResolutions() *common.MetricsResolutions {
// PostgresExporter runs on Generic or Container Node and exposes PostgreSQL Service metrics.
type PostgresExporter struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique randomly generated instance identifier.
AgentId string `protobuf:"bytes,1,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"`
// The pmm-agent identifier which runs this instance.
@@ -901,7 +894,7 @@ type PostgresExporter struct {
// Skip TLS certificate and hostname validation. Uses sslmode=required instead of verify-full.
TlsSkipVerify bool `protobuf:"varint,7,opt,name=tls_skip_verify,json=tlsSkipVerify,proto3" json:"tls_skip_verify,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,8,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,8,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// True if exporter uses push metrics mode.
PushMetricsEnabled bool `protobuf:"varint,9,opt,name=push_metrics_enabled,json=pushMetricsEnabled,proto3" json:"push_metrics_enabled,omitempty"`
// List of disabled collector names.
@@ -922,6 +915,8 @@ type PostgresExporter struct {
MaxExporterConnections int32 `protobuf:"varint,26,opt,name=max_exporter_connections,json=maxExporterConnections,proto3" json:"max_exporter_connections,omitempty"`
// Metrics resolution for this agent.
MetricsResolutions *common.MetricsResolutions `protobuf:"bytes,27,opt,name=metrics_resolutions,json=metricsResolutions,proto3" json:"metrics_resolutions,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *PostgresExporter) Reset() {
@@ -1082,10 +1077,7 @@ func (x *PostgresExporter) GetMetricsResolutions() *common.MetricsResolutions {
// ProxySQLExporter runs on Generic or Container Node and exposes ProxySQL Service metrics.
type ProxySQLExporter struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique randomly generated instance identifier.
AgentId string `protobuf:"bytes,1,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"`
// The pmm-agent identifier which runs this instance.
@@ -1101,7 +1093,7 @@ type ProxySQLExporter struct {
// Skip TLS certificate and hostname validation.
TlsSkipVerify bool `protobuf:"varint,7,opt,name=tls_skip_verify,json=tlsSkipVerify,proto3" json:"tls_skip_verify,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,8,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,8,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// True if exporter uses push metrics mode.
PushMetricsEnabled bool `protobuf:"varint,9,opt,name=push_metrics_enabled,json=pushMetricsEnabled,proto3" json:"push_metrics_enabled,omitempty"`
// List of disabled collector names.
@@ -1118,6 +1110,8 @@ type ProxySQLExporter struct {
ExposeExporter bool `protobuf:"varint,24,opt,name=expose_exporter,json=exposeExporter,proto3" json:"expose_exporter,omitempty"`
// Metrics resolution for this agent.
MetricsResolutions *common.MetricsResolutions `protobuf:"bytes,25,opt,name=metrics_resolutions,json=metricsResolutions,proto3" json:"metrics_resolutions,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ProxySQLExporter) Reset() {
@@ -1264,10 +1258,7 @@ func (x *ProxySQLExporter) GetMetricsResolutions() *common.MetricsResolutions {
// QANMySQLPerfSchemaAgent runs within pmm-agent and sends MySQL Query Analytics data to the PMM Server.
type QANMySQLPerfSchemaAgent struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique randomly generated instance identifier.
AgentId string `protobuf:"bytes,1,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"`
// The pmm-agent identifier which runs this instance.
@@ -1295,13 +1286,15 @@ type QANMySQLPerfSchemaAgent struct {
// True if query examples are disabled.
QueryExamplesDisabled bool `protobuf:"varint,13,opt,name=query_examples_disabled,json=queryExamplesDisabled,proto3" json:"query_examples_disabled,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,14,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,14,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Actual Agent status.
Status AgentStatus `protobuf:"varint,20,opt,name=status,proto3,enum=inventory.v1.AgentStatus" json:"status,omitempty"`
// Path to exec process.
ProcessExecPath string `protobuf:"bytes,21,opt,name=process_exec_path,json=processExecPath,proto3" json:"process_exec_path,omitempty"`
// Log level for exporter.
- LogLevel LogLevel `protobuf:"varint,22,opt,name=log_level,json=logLevel,proto3,enum=inventory.v1.LogLevel" json:"log_level,omitempty"`
+ LogLevel LogLevel `protobuf:"varint,22,opt,name=log_level,json=logLevel,proto3,enum=inventory.v1.LogLevel" json:"log_level,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *QANMySQLPerfSchemaAgent) Reset() {
@@ -1455,10 +1448,7 @@ func (x *QANMySQLPerfSchemaAgent) GetLogLevel() LogLevel {
// QANMySQLSlowlogAgent runs within pmm-agent and sends MySQL Query Analytics data to the PMM Server.
type QANMySQLSlowlogAgent struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique randomly generated instance identifier.
AgentId string `protobuf:"bytes,1,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"`
// The pmm-agent identifier which runs this instance.
@@ -1488,13 +1478,15 @@ type QANMySQLSlowlogAgent struct {
// Slowlog file is rotated at this size if > 0.
MaxSlowlogFileSize int64 `protobuf:"varint,14,opt,name=max_slowlog_file_size,json=maxSlowlogFileSize,proto3" json:"max_slowlog_file_size,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,15,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,15,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Actual Agent status.
Status AgentStatus `protobuf:"varint,20,opt,name=status,proto3,enum=inventory.v1.AgentStatus" json:"status,omitempty"`
// mod tidy
ProcessExecPath string `protobuf:"bytes,21,opt,name=process_exec_path,json=processExecPath,proto3" json:"process_exec_path,omitempty"`
// Log level for exporter.
- LogLevel LogLevel `protobuf:"varint,22,opt,name=log_level,json=logLevel,proto3,enum=inventory.v1.LogLevel" json:"log_level,omitempty"`
+ LogLevel LogLevel `protobuf:"varint,22,opt,name=log_level,json=logLevel,proto3,enum=inventory.v1.LogLevel" json:"log_level,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *QANMySQLSlowlogAgent) Reset() {
@@ -1655,10 +1647,7 @@ func (x *QANMySQLSlowlogAgent) GetLogLevel() LogLevel {
// QANMongoDBProfilerAgent runs within pmm-agent and sends MongoDB Query Analytics data to the PMM Server.
type QANMongoDBProfilerAgent struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique randomly generated instance identifier.
AgentId string `protobuf:"bytes,1,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"`
// The pmm-agent identifier which runs this instance.
@@ -1676,13 +1665,15 @@ type QANMongoDBProfilerAgent struct {
// Limit query length in QAN (default: server-defined; -1: no limit).
MaxQueryLength int32 `protobuf:"varint,9,opt,name=max_query_length,json=maxQueryLength,proto3" json:"max_query_length,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,10,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,10,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Actual Agent status.
Status AgentStatus `protobuf:"varint,20,opt,name=status,proto3,enum=inventory.v1.AgentStatus" json:"status,omitempty"`
// Path to exec process.
ProcessExecPath string `protobuf:"bytes,21,opt,name=process_exec_path,json=processExecPath,proto3" json:"process_exec_path,omitempty"`
// Log level for exporter.
- LogLevel LogLevel `protobuf:"varint,22,opt,name=log_level,json=logLevel,proto3,enum=inventory.v1.LogLevel" json:"log_level,omitempty"`
+ LogLevel LogLevel `protobuf:"varint,22,opt,name=log_level,json=logLevel,proto3,enum=inventory.v1.LogLevel" json:"log_level,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *QANMongoDBProfilerAgent) Reset() {
@@ -1801,10 +1792,7 @@ func (x *QANMongoDBProfilerAgent) GetLogLevel() LogLevel {
// QANPostgreSQLPgStatementsAgent runs within pmm-agent and sends PostgreSQL Query Analytics data to the PMM Server.
type QANPostgreSQLPgStatementsAgent struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique randomly generated instance identifier.
AgentId string `protobuf:"bytes,1,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"`
// The pmm-agent identifier which runs this instance.
@@ -1824,13 +1812,15 @@ type QANPostgreSQLPgStatementsAgent struct {
// Skip TLS certificate and hostname validation.
TlsSkipVerify bool `protobuf:"varint,9,opt,name=tls_skip_verify,json=tlsSkipVerify,proto3" json:"tls_skip_verify,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,10,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,10,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Actual Agent status.
Status AgentStatus `protobuf:"varint,20,opt,name=status,proto3,enum=inventory.v1.AgentStatus" json:"status,omitempty"`
// Path to exec process.
ProcessExecPath string `protobuf:"bytes,21,opt,name=process_exec_path,json=processExecPath,proto3" json:"process_exec_path,omitempty"`
// Log level for exporter.
- LogLevel LogLevel `protobuf:"varint,22,opt,name=log_level,json=logLevel,proto3,enum=inventory.v1.LogLevel" json:"log_level,omitempty"`
+ LogLevel LogLevel `protobuf:"varint,22,opt,name=log_level,json=logLevel,proto3,enum=inventory.v1.LogLevel" json:"log_level,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *QANPostgreSQLPgStatementsAgent) Reset() {
@@ -1956,10 +1946,7 @@ func (x *QANPostgreSQLPgStatementsAgent) GetLogLevel() LogLevel {
// QANPostgreSQLPgStatMonitorAgent runs within pmm-agent and sends PostgreSQL Query Analytics data to the PMM Server.
type QANPostgreSQLPgStatMonitorAgent struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique randomly generated instance identifier.
AgentId string `protobuf:"bytes,1,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"`
// The pmm-agent identifier which runs this instance.
@@ -1981,13 +1968,15 @@ type QANPostgreSQLPgStatMonitorAgent struct {
// True if query examples are disabled.
QueryExamplesDisabled bool `protobuf:"varint,10,opt,name=query_examples_disabled,json=queryExamplesDisabled,proto3" json:"query_examples_disabled,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,11,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,11,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Actual Agent status.
Status AgentStatus `protobuf:"varint,20,opt,name=status,proto3,enum=inventory.v1.AgentStatus" json:"status,omitempty"`
// Path to exec process.
ProcessExecPath string `protobuf:"bytes,21,opt,name=process_exec_path,json=processExecPath,proto3" json:"process_exec_path,omitempty"`
// Log level for exporter.
- LogLevel LogLevel `protobuf:"varint,22,opt,name=log_level,json=logLevel,proto3,enum=inventory.v1.LogLevel" json:"log_level,omitempty"`
+ LogLevel LogLevel `protobuf:"varint,22,opt,name=log_level,json=logLevel,proto3,enum=inventory.v1.LogLevel" json:"log_level,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *QANPostgreSQLPgStatMonitorAgent) Reset() {
@@ -2120,10 +2109,7 @@ func (x *QANPostgreSQLPgStatMonitorAgent) GetLogLevel() LogLevel {
// RDSExporter runs on Generic or Container Node and exposes RemoteRDS Node metrics.
type RDSExporter struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique randomly generated instance identifier.
AgentId string `protobuf:"bytes,1,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"`
// The pmm-agent identifier which runs this instance.
@@ -2135,7 +2121,7 @@ type RDSExporter struct {
// AWS Access Key.
AwsAccessKey string `protobuf:"bytes,5,opt,name=aws_access_key,json=awsAccessKey,proto3" json:"aws_access_key,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,6,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,6,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Actual Agent status (the same for several configurations).
Status AgentStatus `protobuf:"varint,10,opt,name=status,proto3,enum=inventory.v1.AgentStatus" json:"status,omitempty"`
// Listen port for scraping metrics (the same for several configurations).
@@ -2154,6 +2140,8 @@ type RDSExporter struct {
AutoDiscoveryLimit int32 `protobuf:"varint,25,opt,name=auto_discovery_limit,json=autoDiscoveryLimit,proto3" json:"auto_discovery_limit,omitempty"`
// Metrics resolution for this agent.
MetricsResolutions *common.MetricsResolutions `protobuf:"bytes,26,opt,name=metrics_resolutions,json=metricsResolutions,proto3" json:"metrics_resolutions,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *RDSExporter) Reset() {
@@ -2293,10 +2281,7 @@ func (x *RDSExporter) GetMetricsResolutions() *common.MetricsResolutions {
// ExternalExporter runs on any Node type, including Remote Node.
type ExternalExporter struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique randomly generated instance identifier.
AgentId string `protobuf:"bytes,1,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"`
// Node identifier where this instance runs.
@@ -2312,7 +2297,7 @@ type ExternalExporter struct {
// Path under which metrics are exposed, used to generate URI.
MetricsPath string `protobuf:"bytes,7,opt,name=metrics_path,json=metricsPath,proto3" json:"metrics_path,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,8,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,8,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Listen port for scraping metrics.
ListenPort uint32 `protobuf:"varint,9,opt,name=listen_port,json=listenPort,proto3" json:"listen_port,omitempty"`
// True if exporter uses push metrics mode.
@@ -2321,6 +2306,8 @@ type ExternalExporter struct {
ProcessExecPath string `protobuf:"bytes,11,opt,name=process_exec_path,json=processExecPath,proto3" json:"process_exec_path,omitempty"`
// Metrics resolution for this agent.
MetricsResolutions *common.MetricsResolutions `protobuf:"bytes,12,opt,name=metrics_resolutions,json=metricsResolutions,proto3" json:"metrics_resolutions,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ExternalExporter) Reset() {
@@ -2439,10 +2426,7 @@ func (x *ExternalExporter) GetMetricsResolutions() *common.MetricsResolutions {
// AzureDatabaseExporter runs on Generic or Container Node and exposes RemoteAzure Node metrics.
type AzureDatabaseExporter struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique randomly generated instance identifier.
AgentId string `protobuf:"bytes,1,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"`
// The pmm-agent identifier which runs this instance.
@@ -2456,7 +2440,7 @@ type AzureDatabaseExporter struct {
// Azure database resource type (mysql, maria, postgres)
AzureDatabaseResourceType string `protobuf:"bytes,6,opt,name=azure_database_resource_type,json=azureDatabaseResourceType,proto3" json:"azure_database_resource_type,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,7,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,7,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Actual Agent status (the same for several configurations).
Status AgentStatus `protobuf:"varint,10,opt,name=status,proto3,enum=inventory.v1.AgentStatus" json:"status,omitempty"`
// Listen port for scraping metrics (the same for several configurations).
@@ -2469,6 +2453,8 @@ type AzureDatabaseExporter struct {
LogLevel LogLevel `protobuf:"varint,14,opt,name=log_level,json=logLevel,proto3,enum=inventory.v1.LogLevel" json:"log_level,omitempty"`
// Metrics resolution for this agent.
MetricsResolutions *common.MetricsResolutions `protobuf:"bytes,15,opt,name=metrics_resolutions,json=metricsResolutions,proto3" json:"metrics_resolutions,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AzureDatabaseExporter) Reset() {
@@ -2594,10 +2580,7 @@ func (x *AzureDatabaseExporter) GetMetricsResolutions() *common.MetricsResolutio
// ChangeCommonAgentParams contains parameters that can be changed for all Agents.
type ChangeCommonAgentParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Enable this Agent. Agents are enabled by default when they get added.
Enable *bool `protobuf:"varint,1,opt,name=enable,proto3,oneof" json:"enable,omitempty"`
// Replace all custom user-assigned labels.
@@ -2606,6 +2589,8 @@ type ChangeCommonAgentParams struct {
EnablePushMetrics *bool `protobuf:"varint,3,opt,name=enable_push_metrics,json=enablePushMetrics,proto3,oneof" json:"enable_push_metrics,omitempty"`
// Metrics resolution for this agent.
MetricsResolutions *common.MetricsResolutions `protobuf:"bytes,4,opt,name=metrics_resolutions,json=metricsResolutions,proto3" json:"metrics_resolutions,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ChangeCommonAgentParams) Reset() {
@@ -2667,10 +2652,7 @@ func (x *ChangeCommonAgentParams) GetMetricsResolutions() *common.MetricsResolut
}
type ListAgentsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Return only Agents started by this pmm-agent.
// Exactly one of these parameters should be present: pmm_agent_id, node_id, service_id.
PmmAgentId string `protobuf:"bytes,1,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
@@ -2681,7 +2663,9 @@ type ListAgentsRequest struct {
// Exactly one of these parameters should be present: pmm_agent_id, node_id, service_id.
ServiceId string `protobuf:"bytes,3,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
// Return only agents of a particular type.
- AgentType AgentType `protobuf:"varint,4,opt,name=agent_type,json=agentType,proto3,enum=inventory.v1.AgentType" json:"agent_type,omitempty"`
+ AgentType AgentType `protobuf:"varint,4,opt,name=agent_type,json=agentType,proto3,enum=inventory.v1.AgentType" json:"agent_type,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ListAgentsRequest) Reset() {
@@ -2743,10 +2727,7 @@ func (x *ListAgentsRequest) GetAgentType() AgentType {
}
type ListAgentsResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
PmmAgent []*PMMAgent `protobuf:"bytes,1,rep,name=pmm_agent,json=pmmAgent,proto3" json:"pmm_agent,omitempty"`
VmAgent []*VMAgent `protobuf:"bytes,2,rep,name=vm_agent,json=vmAgent,proto3" json:"vm_agent,omitempty"`
NodeExporter []*NodeExporter `protobuf:"bytes,3,rep,name=node_exporter,json=nodeExporter,proto3" json:"node_exporter,omitempty"`
@@ -2762,6 +2743,8 @@ type ListAgentsResponse struct {
ExternalExporter []*ExternalExporter `protobuf:"bytes,13,rep,name=external_exporter,json=externalExporter,proto3" json:"external_exporter,omitempty"`
RdsExporter []*RDSExporter `protobuf:"bytes,14,rep,name=rds_exporter,json=rdsExporter,proto3" json:"rds_exporter,omitempty"`
AzureDatabaseExporter []*AzureDatabaseExporter `protobuf:"bytes,15,rep,name=azure_database_exporter,json=azureDatabaseExporter,proto3" json:"azure_database_exporter,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ListAgentsResponse) Reset() {
@@ -2900,12 +2883,11 @@ func (x *ListAgentsResponse) GetAzureDatabaseExporter() []*AzureDatabaseExporter
}
type GetAgentRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique randomly generated instance identifier.
- AgentId string `protobuf:"bytes,1,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"`
+ AgentId string `protobuf:"bytes,1,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GetAgentRequest) Reset() {
@@ -2946,11 +2928,8 @@ func (x *GetAgentRequest) GetAgentId() string {
}
type GetAgentResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Types that are assignable to Agent:
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Types that are valid to be assigned to Agent:
//
// *GetAgentResponse_PmmAgent
// *GetAgentResponse_Vmagent
@@ -2967,7 +2946,9 @@ type GetAgentResponse struct {
// *GetAgentResponse_ExternalExporter
// *GetAgentResponse_RdsExporter
// *GetAgentResponse_AzureDatabaseExporter
- Agent isGetAgentResponse_Agent `protobuf_oneof:"agent"`
+ Agent isGetAgentResponse_Agent `protobuf_oneof:"agent"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GetAgentResponse) Reset() {
@@ -3000,114 +2981,144 @@ func (*GetAgentResponse) Descriptor() ([]byte, []int) {
return file_inventory_v1_agents_proto_rawDescGZIP(), []int{19}
}
-func (m *GetAgentResponse) GetAgent() isGetAgentResponse_Agent {
- if m != nil {
- return m.Agent
+func (x *GetAgentResponse) GetAgent() isGetAgentResponse_Agent {
+ if x != nil {
+ return x.Agent
}
return nil
}
func (x *GetAgentResponse) GetPmmAgent() *PMMAgent {
- if x, ok := x.GetAgent().(*GetAgentResponse_PmmAgent); ok {
- return x.PmmAgent
+ if x != nil {
+ if x, ok := x.Agent.(*GetAgentResponse_PmmAgent); ok {
+ return x.PmmAgent
+ }
}
return nil
}
func (x *GetAgentResponse) GetVmagent() *VMAgent {
- if x, ok := x.GetAgent().(*GetAgentResponse_Vmagent); ok {
- return x.Vmagent
+ if x != nil {
+ if x, ok := x.Agent.(*GetAgentResponse_Vmagent); ok {
+ return x.Vmagent
+ }
}
return nil
}
func (x *GetAgentResponse) GetNodeExporter() *NodeExporter {
- if x, ok := x.GetAgent().(*GetAgentResponse_NodeExporter); ok {
- return x.NodeExporter
+ if x != nil {
+ if x, ok := x.Agent.(*GetAgentResponse_NodeExporter); ok {
+ return x.NodeExporter
+ }
}
return nil
}
func (x *GetAgentResponse) GetMysqldExporter() *MySQLdExporter {
- if x, ok := x.GetAgent().(*GetAgentResponse_MysqldExporter); ok {
- return x.MysqldExporter
+ if x != nil {
+ if x, ok := x.Agent.(*GetAgentResponse_MysqldExporter); ok {
+ return x.MysqldExporter
+ }
}
return nil
}
func (x *GetAgentResponse) GetMongodbExporter() *MongoDBExporter {
- if x, ok := x.GetAgent().(*GetAgentResponse_MongodbExporter); ok {
- return x.MongodbExporter
+ if x != nil {
+ if x, ok := x.Agent.(*GetAgentResponse_MongodbExporter); ok {
+ return x.MongodbExporter
+ }
}
return nil
}
func (x *GetAgentResponse) GetPostgresExporter() *PostgresExporter {
- if x, ok := x.GetAgent().(*GetAgentResponse_PostgresExporter); ok {
- return x.PostgresExporter
+ if x != nil {
+ if x, ok := x.Agent.(*GetAgentResponse_PostgresExporter); ok {
+ return x.PostgresExporter
+ }
}
return nil
}
func (x *GetAgentResponse) GetProxysqlExporter() *ProxySQLExporter {
- if x, ok := x.GetAgent().(*GetAgentResponse_ProxysqlExporter); ok {
- return x.ProxysqlExporter
+ if x != nil {
+ if x, ok := x.Agent.(*GetAgentResponse_ProxysqlExporter); ok {
+ return x.ProxysqlExporter
+ }
}
return nil
}
func (x *GetAgentResponse) GetQanMysqlPerfschemaAgent() *QANMySQLPerfSchemaAgent {
- if x, ok := x.GetAgent().(*GetAgentResponse_QanMysqlPerfschemaAgent); ok {
- return x.QanMysqlPerfschemaAgent
+ if x != nil {
+ if x, ok := x.Agent.(*GetAgentResponse_QanMysqlPerfschemaAgent); ok {
+ return x.QanMysqlPerfschemaAgent
+ }
}
return nil
}
func (x *GetAgentResponse) GetQanMysqlSlowlogAgent() *QANMySQLSlowlogAgent {
- if x, ok := x.GetAgent().(*GetAgentResponse_QanMysqlSlowlogAgent); ok {
- return x.QanMysqlSlowlogAgent
+ if x != nil {
+ if x, ok := x.Agent.(*GetAgentResponse_QanMysqlSlowlogAgent); ok {
+ return x.QanMysqlSlowlogAgent
+ }
}
return nil
}
func (x *GetAgentResponse) GetQanMongodbProfilerAgent() *QANMongoDBProfilerAgent {
- if x, ok := x.GetAgent().(*GetAgentResponse_QanMongodbProfilerAgent); ok {
- return x.QanMongodbProfilerAgent
+ if x != nil {
+ if x, ok := x.Agent.(*GetAgentResponse_QanMongodbProfilerAgent); ok {
+ return x.QanMongodbProfilerAgent
+ }
}
return nil
}
func (x *GetAgentResponse) GetQanPostgresqlPgstatementsAgent() *QANPostgreSQLPgStatementsAgent {
- if x, ok := x.GetAgent().(*GetAgentResponse_QanPostgresqlPgstatementsAgent); ok {
- return x.QanPostgresqlPgstatementsAgent
+ if x != nil {
+ if x, ok := x.Agent.(*GetAgentResponse_QanPostgresqlPgstatementsAgent); ok {
+ return x.QanPostgresqlPgstatementsAgent
+ }
}
return nil
}
func (x *GetAgentResponse) GetQanPostgresqlPgstatmonitorAgent() *QANPostgreSQLPgStatMonitorAgent {
- if x, ok := x.GetAgent().(*GetAgentResponse_QanPostgresqlPgstatmonitorAgent); ok {
- return x.QanPostgresqlPgstatmonitorAgent
+ if x != nil {
+ if x, ok := x.Agent.(*GetAgentResponse_QanPostgresqlPgstatmonitorAgent); ok {
+ return x.QanPostgresqlPgstatmonitorAgent
+ }
}
return nil
}
func (x *GetAgentResponse) GetExternalExporter() *ExternalExporter {
- if x, ok := x.GetAgent().(*GetAgentResponse_ExternalExporter); ok {
- return x.ExternalExporter
+ if x != nil {
+ if x, ok := x.Agent.(*GetAgentResponse_ExternalExporter); ok {
+ return x.ExternalExporter
+ }
}
return nil
}
func (x *GetAgentResponse) GetRdsExporter() *RDSExporter {
- if x, ok := x.GetAgent().(*GetAgentResponse_RdsExporter); ok {
- return x.RdsExporter
+ if x != nil {
+ if x, ok := x.Agent.(*GetAgentResponse_RdsExporter); ok {
+ return x.RdsExporter
+ }
}
return nil
}
func (x *GetAgentResponse) GetAzureDatabaseExporter() *AzureDatabaseExporter {
- if x, ok := x.GetAgent().(*GetAgentResponse_AzureDatabaseExporter); ok {
- return x.AzureDatabaseExporter
+ if x != nil {
+ if x, ok := x.Agent.(*GetAgentResponse_AzureDatabaseExporter); ok {
+ return x.AzureDatabaseExporter
+ }
}
return nil
}
@@ -3207,14 +3218,13 @@ func (*GetAgentResponse_RdsExporter) isGetAgentResponse_Agent() {}
func (*GetAgentResponse_AzureDatabaseExporter) isGetAgentResponse_Agent() {}
type GetAgentLogsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique randomly generated instance identifier.
AgentId string `protobuf:"bytes,1,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"`
// Limit the number of log lines to this value. Pass 0 for no limit.
- Limit uint32 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"`
+ Limit uint32 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GetAgentLogsRequest) Reset() {
@@ -3262,12 +3272,11 @@ func (x *GetAgentLogsRequest) GetLimit() uint32 {
}
type GetAgentLogsResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Logs []string `protobuf:"bytes,1,rep,name=logs,proto3" json:"logs,omitempty"`
- AgentConfigLogLinesCount uint32 `protobuf:"varint,2,opt,name=agent_config_log_lines_count,json=agentConfigLogLinesCount,proto3" json:"agent_config_log_lines_count,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Logs []string `protobuf:"bytes,1,rep,name=logs,proto3" json:"logs,omitempty"`
+ AgentConfigLogLinesCount uint32 `protobuf:"varint,2,opt,name=agent_config_log_lines_count,json=agentConfigLogLinesCount,proto3" json:"agent_config_log_lines_count,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GetAgentLogsResponse) Reset() {
@@ -3315,11 +3324,8 @@ func (x *GetAgentLogsResponse) GetAgentConfigLogLinesCount() uint32 {
}
type AddAgentRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Types that are assignable to Agent:
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Types that are valid to be assigned to Agent:
//
// *AddAgentRequest_PmmAgent
// *AddAgentRequest_NodeExporter
@@ -3335,7 +3341,9 @@ type AddAgentRequest struct {
// *AddAgentRequest_QanMongodbProfilerAgent
// *AddAgentRequest_QanPostgresqlPgstatementsAgent
// *AddAgentRequest_QanPostgresqlPgstatmonitorAgent
- Agent isAddAgentRequest_Agent `protobuf_oneof:"agent"`
+ Agent isAddAgentRequest_Agent `protobuf_oneof:"agent"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddAgentRequest) Reset() {
@@ -3368,107 +3376,135 @@ func (*AddAgentRequest) Descriptor() ([]byte, []int) {
return file_inventory_v1_agents_proto_rawDescGZIP(), []int{22}
}
-func (m *AddAgentRequest) GetAgent() isAddAgentRequest_Agent {
- if m != nil {
- return m.Agent
+func (x *AddAgentRequest) GetAgent() isAddAgentRequest_Agent {
+ if x != nil {
+ return x.Agent
}
return nil
}
func (x *AddAgentRequest) GetPmmAgent() *AddPMMAgentParams {
- if x, ok := x.GetAgent().(*AddAgentRequest_PmmAgent); ok {
- return x.PmmAgent
+ if x != nil {
+ if x, ok := x.Agent.(*AddAgentRequest_PmmAgent); ok {
+ return x.PmmAgent
+ }
}
return nil
}
func (x *AddAgentRequest) GetNodeExporter() *AddNodeExporterParams {
- if x, ok := x.GetAgent().(*AddAgentRequest_NodeExporter); ok {
- return x.NodeExporter
+ if x != nil {
+ if x, ok := x.Agent.(*AddAgentRequest_NodeExporter); ok {
+ return x.NodeExporter
+ }
}
return nil
}
func (x *AddAgentRequest) GetMysqldExporter() *AddMySQLdExporterParams {
- if x, ok := x.GetAgent().(*AddAgentRequest_MysqldExporter); ok {
- return x.MysqldExporter
+ if x != nil {
+ if x, ok := x.Agent.(*AddAgentRequest_MysqldExporter); ok {
+ return x.MysqldExporter
+ }
}
return nil
}
func (x *AddAgentRequest) GetMongodbExporter() *AddMongoDBExporterParams {
- if x, ok := x.GetAgent().(*AddAgentRequest_MongodbExporter); ok {
- return x.MongodbExporter
+ if x != nil {
+ if x, ok := x.Agent.(*AddAgentRequest_MongodbExporter); ok {
+ return x.MongodbExporter
+ }
}
return nil
}
func (x *AddAgentRequest) GetPostgresExporter() *AddPostgresExporterParams {
- if x, ok := x.GetAgent().(*AddAgentRequest_PostgresExporter); ok {
- return x.PostgresExporter
+ if x != nil {
+ if x, ok := x.Agent.(*AddAgentRequest_PostgresExporter); ok {
+ return x.PostgresExporter
+ }
}
return nil
}
func (x *AddAgentRequest) GetProxysqlExporter() *AddProxySQLExporterParams {
- if x, ok := x.GetAgent().(*AddAgentRequest_ProxysqlExporter); ok {
- return x.ProxysqlExporter
+ if x != nil {
+ if x, ok := x.Agent.(*AddAgentRequest_ProxysqlExporter); ok {
+ return x.ProxysqlExporter
+ }
}
return nil
}
func (x *AddAgentRequest) GetExternalExporter() *AddExternalExporterParams {
- if x, ok := x.GetAgent().(*AddAgentRequest_ExternalExporter); ok {
- return x.ExternalExporter
+ if x != nil {
+ if x, ok := x.Agent.(*AddAgentRequest_ExternalExporter); ok {
+ return x.ExternalExporter
+ }
}
return nil
}
func (x *AddAgentRequest) GetRdsExporter() *AddRDSExporterParams {
- if x, ok := x.GetAgent().(*AddAgentRequest_RdsExporter); ok {
- return x.RdsExporter
+ if x != nil {
+ if x, ok := x.Agent.(*AddAgentRequest_RdsExporter); ok {
+ return x.RdsExporter
+ }
}
return nil
}
func (x *AddAgentRequest) GetAzureDatabaseExporter() *AddAzureDatabaseExporterParams {
- if x, ok := x.GetAgent().(*AddAgentRequest_AzureDatabaseExporter); ok {
- return x.AzureDatabaseExporter
+ if x != nil {
+ if x, ok := x.Agent.(*AddAgentRequest_AzureDatabaseExporter); ok {
+ return x.AzureDatabaseExporter
+ }
}
return nil
}
func (x *AddAgentRequest) GetQanMysqlPerfschemaAgent() *AddQANMySQLPerfSchemaAgentParams {
- if x, ok := x.GetAgent().(*AddAgentRequest_QanMysqlPerfschemaAgent); ok {
- return x.QanMysqlPerfschemaAgent
+ if x != nil {
+ if x, ok := x.Agent.(*AddAgentRequest_QanMysqlPerfschemaAgent); ok {
+ return x.QanMysqlPerfschemaAgent
+ }
}
return nil
}
func (x *AddAgentRequest) GetQanMysqlSlowlogAgent() *AddQANMySQLSlowlogAgentParams {
- if x, ok := x.GetAgent().(*AddAgentRequest_QanMysqlSlowlogAgent); ok {
- return x.QanMysqlSlowlogAgent
+ if x != nil {
+ if x, ok := x.Agent.(*AddAgentRequest_QanMysqlSlowlogAgent); ok {
+ return x.QanMysqlSlowlogAgent
+ }
}
return nil
}
func (x *AddAgentRequest) GetQanMongodbProfilerAgent() *AddQANMongoDBProfilerAgentParams {
- if x, ok := x.GetAgent().(*AddAgentRequest_QanMongodbProfilerAgent); ok {
- return x.QanMongodbProfilerAgent
+ if x != nil {
+ if x, ok := x.Agent.(*AddAgentRequest_QanMongodbProfilerAgent); ok {
+ return x.QanMongodbProfilerAgent
+ }
}
return nil
}
func (x *AddAgentRequest) GetQanPostgresqlPgstatementsAgent() *AddQANPostgreSQLPgStatementsAgentParams {
- if x, ok := x.GetAgent().(*AddAgentRequest_QanPostgresqlPgstatementsAgent); ok {
- return x.QanPostgresqlPgstatementsAgent
+ if x != nil {
+ if x, ok := x.Agent.(*AddAgentRequest_QanPostgresqlPgstatementsAgent); ok {
+ return x.QanPostgresqlPgstatementsAgent
+ }
}
return nil
}
func (x *AddAgentRequest) GetQanPostgresqlPgstatmonitorAgent() *AddQANPostgreSQLPgStatMonitorAgentParams {
- if x, ok := x.GetAgent().(*AddAgentRequest_QanPostgresqlPgstatmonitorAgent); ok {
- return x.QanPostgresqlPgstatmonitorAgent
+ if x != nil {
+ if x, ok := x.Agent.(*AddAgentRequest_QanPostgresqlPgstatmonitorAgent); ok {
+ return x.QanPostgresqlPgstatmonitorAgent
+ }
}
return nil
}
@@ -3562,11 +3598,8 @@ func (*AddAgentRequest_QanPostgresqlPgstatementsAgent) isAddAgentRequest_Agent()
func (*AddAgentRequest_QanPostgresqlPgstatmonitorAgent) isAddAgentRequest_Agent() {}
type AddAgentResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Types that are assignable to Agent:
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Types that are valid to be assigned to Agent:
//
// *AddAgentResponse_PmmAgent
// *AddAgentResponse_NodeExporter
@@ -3582,7 +3615,9 @@ type AddAgentResponse struct {
// *AddAgentResponse_QanMongodbProfilerAgent
// *AddAgentResponse_QanPostgresqlPgstatementsAgent
// *AddAgentResponse_QanPostgresqlPgstatmonitorAgent
- Agent isAddAgentResponse_Agent `protobuf_oneof:"agent"`
+ Agent isAddAgentResponse_Agent `protobuf_oneof:"agent"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddAgentResponse) Reset() {
@@ -3615,107 +3650,135 @@ func (*AddAgentResponse) Descriptor() ([]byte, []int) {
return file_inventory_v1_agents_proto_rawDescGZIP(), []int{23}
}
-func (m *AddAgentResponse) GetAgent() isAddAgentResponse_Agent {
- if m != nil {
- return m.Agent
+func (x *AddAgentResponse) GetAgent() isAddAgentResponse_Agent {
+ if x != nil {
+ return x.Agent
}
return nil
}
func (x *AddAgentResponse) GetPmmAgent() *PMMAgent {
- if x, ok := x.GetAgent().(*AddAgentResponse_PmmAgent); ok {
- return x.PmmAgent
+ if x != nil {
+ if x, ok := x.Agent.(*AddAgentResponse_PmmAgent); ok {
+ return x.PmmAgent
+ }
}
return nil
}
func (x *AddAgentResponse) GetNodeExporter() *NodeExporter {
- if x, ok := x.GetAgent().(*AddAgentResponse_NodeExporter); ok {
- return x.NodeExporter
+ if x != nil {
+ if x, ok := x.Agent.(*AddAgentResponse_NodeExporter); ok {
+ return x.NodeExporter
+ }
}
return nil
}
func (x *AddAgentResponse) GetMysqldExporter() *MySQLdExporter {
- if x, ok := x.GetAgent().(*AddAgentResponse_MysqldExporter); ok {
- return x.MysqldExporter
+ if x != nil {
+ if x, ok := x.Agent.(*AddAgentResponse_MysqldExporter); ok {
+ return x.MysqldExporter
+ }
}
return nil
}
func (x *AddAgentResponse) GetMongodbExporter() *MongoDBExporter {
- if x, ok := x.GetAgent().(*AddAgentResponse_MongodbExporter); ok {
- return x.MongodbExporter
+ if x != nil {
+ if x, ok := x.Agent.(*AddAgentResponse_MongodbExporter); ok {
+ return x.MongodbExporter
+ }
}
return nil
}
func (x *AddAgentResponse) GetPostgresExporter() *PostgresExporter {
- if x, ok := x.GetAgent().(*AddAgentResponse_PostgresExporter); ok {
- return x.PostgresExporter
+ if x != nil {
+ if x, ok := x.Agent.(*AddAgentResponse_PostgresExporter); ok {
+ return x.PostgresExporter
+ }
}
return nil
}
func (x *AddAgentResponse) GetProxysqlExporter() *ProxySQLExporter {
- if x, ok := x.GetAgent().(*AddAgentResponse_ProxysqlExporter); ok {
- return x.ProxysqlExporter
+ if x != nil {
+ if x, ok := x.Agent.(*AddAgentResponse_ProxysqlExporter); ok {
+ return x.ProxysqlExporter
+ }
}
return nil
}
func (x *AddAgentResponse) GetExternalExporter() *ExternalExporter {
- if x, ok := x.GetAgent().(*AddAgentResponse_ExternalExporter); ok {
- return x.ExternalExporter
+ if x != nil {
+ if x, ok := x.Agent.(*AddAgentResponse_ExternalExporter); ok {
+ return x.ExternalExporter
+ }
}
return nil
}
func (x *AddAgentResponse) GetRdsExporter() *RDSExporter {
- if x, ok := x.GetAgent().(*AddAgentResponse_RdsExporter); ok {
- return x.RdsExporter
+ if x != nil {
+ if x, ok := x.Agent.(*AddAgentResponse_RdsExporter); ok {
+ return x.RdsExporter
+ }
}
return nil
}
func (x *AddAgentResponse) GetAzureDatabaseExporter() *AzureDatabaseExporter {
- if x, ok := x.GetAgent().(*AddAgentResponse_AzureDatabaseExporter); ok {
- return x.AzureDatabaseExporter
+ if x != nil {
+ if x, ok := x.Agent.(*AddAgentResponse_AzureDatabaseExporter); ok {
+ return x.AzureDatabaseExporter
+ }
}
return nil
}
func (x *AddAgentResponse) GetQanMysqlPerfschemaAgent() *QANMySQLPerfSchemaAgent {
- if x, ok := x.GetAgent().(*AddAgentResponse_QanMysqlPerfschemaAgent); ok {
- return x.QanMysqlPerfschemaAgent
+ if x != nil {
+ if x, ok := x.Agent.(*AddAgentResponse_QanMysqlPerfschemaAgent); ok {
+ return x.QanMysqlPerfschemaAgent
+ }
}
return nil
}
func (x *AddAgentResponse) GetQanMysqlSlowlogAgent() *QANMySQLSlowlogAgent {
- if x, ok := x.GetAgent().(*AddAgentResponse_QanMysqlSlowlogAgent); ok {
- return x.QanMysqlSlowlogAgent
+ if x != nil {
+ if x, ok := x.Agent.(*AddAgentResponse_QanMysqlSlowlogAgent); ok {
+ return x.QanMysqlSlowlogAgent
+ }
}
return nil
}
func (x *AddAgentResponse) GetQanMongodbProfilerAgent() *QANMongoDBProfilerAgent {
- if x, ok := x.GetAgent().(*AddAgentResponse_QanMongodbProfilerAgent); ok {
- return x.QanMongodbProfilerAgent
+ if x != nil {
+ if x, ok := x.Agent.(*AddAgentResponse_QanMongodbProfilerAgent); ok {
+ return x.QanMongodbProfilerAgent
+ }
}
return nil
}
func (x *AddAgentResponse) GetQanPostgresqlPgstatementsAgent() *QANPostgreSQLPgStatementsAgent {
- if x, ok := x.GetAgent().(*AddAgentResponse_QanPostgresqlPgstatementsAgent); ok {
- return x.QanPostgresqlPgstatementsAgent
+ if x != nil {
+ if x, ok := x.Agent.(*AddAgentResponse_QanPostgresqlPgstatementsAgent); ok {
+ return x.QanPostgresqlPgstatementsAgent
+ }
}
return nil
}
func (x *AddAgentResponse) GetQanPostgresqlPgstatmonitorAgent() *QANPostgreSQLPgStatMonitorAgent {
- if x, ok := x.GetAgent().(*AddAgentResponse_QanPostgresqlPgstatmonitorAgent); ok {
- return x.QanPostgresqlPgstatmonitorAgent
+ if x != nil {
+ if x, ok := x.Agent.(*AddAgentResponse_QanPostgresqlPgstatmonitorAgent); ok {
+ return x.QanPostgresqlPgstatmonitorAgent
+ }
}
return nil
}
@@ -3809,12 +3872,9 @@ func (*AddAgentResponse_QanPostgresqlPgstatementsAgent) isAddAgentResponse_Agent
func (*AddAgentResponse_QanPostgresqlPgstatmonitorAgent) isAddAgentResponse_Agent() {}
type ChangeAgentRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- AgentId string `protobuf:"bytes,1,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"`
- // Types that are assignable to Agent:
+ state protoimpl.MessageState `protogen:"open.v1"`
+ AgentId string `protobuf:"bytes,1,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"`
+ // Types that are valid to be assigned to Agent:
//
// *ChangeAgentRequest_NodeExporter
// *ChangeAgentRequest_MysqldExporter
@@ -3829,7 +3889,9 @@ type ChangeAgentRequest struct {
// *ChangeAgentRequest_QanMongodbProfilerAgent
// *ChangeAgentRequest_QanPostgresqlPgstatementsAgent
// *ChangeAgentRequest_QanPostgresqlPgstatmonitorAgent
- Agent isChangeAgentRequest_Agent `protobuf_oneof:"agent"`
+ Agent isChangeAgentRequest_Agent `protobuf_oneof:"agent"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ChangeAgentRequest) Reset() {
@@ -3869,100 +3931,126 @@ func (x *ChangeAgentRequest) GetAgentId() string {
return ""
}
-func (m *ChangeAgentRequest) GetAgent() isChangeAgentRequest_Agent {
- if m != nil {
- return m.Agent
+func (x *ChangeAgentRequest) GetAgent() isChangeAgentRequest_Agent {
+ if x != nil {
+ return x.Agent
}
return nil
}
func (x *ChangeAgentRequest) GetNodeExporter() *ChangeNodeExporterParams {
- if x, ok := x.GetAgent().(*ChangeAgentRequest_NodeExporter); ok {
- return x.NodeExporter
+ if x != nil {
+ if x, ok := x.Agent.(*ChangeAgentRequest_NodeExporter); ok {
+ return x.NodeExporter
+ }
}
return nil
}
func (x *ChangeAgentRequest) GetMysqldExporter() *ChangeMySQLdExporterParams {
- if x, ok := x.GetAgent().(*ChangeAgentRequest_MysqldExporter); ok {
- return x.MysqldExporter
+ if x != nil {
+ if x, ok := x.Agent.(*ChangeAgentRequest_MysqldExporter); ok {
+ return x.MysqldExporter
+ }
}
return nil
}
func (x *ChangeAgentRequest) GetMongodbExporter() *ChangeMongoDBExporterParams {
- if x, ok := x.GetAgent().(*ChangeAgentRequest_MongodbExporter); ok {
- return x.MongodbExporter
+ if x != nil {
+ if x, ok := x.Agent.(*ChangeAgentRequest_MongodbExporter); ok {
+ return x.MongodbExporter
+ }
}
return nil
}
func (x *ChangeAgentRequest) GetPostgresExporter() *ChangePostgresExporterParams {
- if x, ok := x.GetAgent().(*ChangeAgentRequest_PostgresExporter); ok {
- return x.PostgresExporter
+ if x != nil {
+ if x, ok := x.Agent.(*ChangeAgentRequest_PostgresExporter); ok {
+ return x.PostgresExporter
+ }
}
return nil
}
func (x *ChangeAgentRequest) GetProxysqlExporter() *ChangeProxySQLExporterParams {
- if x, ok := x.GetAgent().(*ChangeAgentRequest_ProxysqlExporter); ok {
- return x.ProxysqlExporter
+ if x != nil {
+ if x, ok := x.Agent.(*ChangeAgentRequest_ProxysqlExporter); ok {
+ return x.ProxysqlExporter
+ }
}
return nil
}
func (x *ChangeAgentRequest) GetExternalExporter() *ChangeExternalExporterParams {
- if x, ok := x.GetAgent().(*ChangeAgentRequest_ExternalExporter); ok {
- return x.ExternalExporter
+ if x != nil {
+ if x, ok := x.Agent.(*ChangeAgentRequest_ExternalExporter); ok {
+ return x.ExternalExporter
+ }
}
return nil
}
func (x *ChangeAgentRequest) GetRdsExporter() *ChangeRDSExporterParams {
- if x, ok := x.GetAgent().(*ChangeAgentRequest_RdsExporter); ok {
- return x.RdsExporter
+ if x != nil {
+ if x, ok := x.Agent.(*ChangeAgentRequest_RdsExporter); ok {
+ return x.RdsExporter
+ }
}
return nil
}
func (x *ChangeAgentRequest) GetAzureDatabaseExporter() *ChangeAzureDatabaseExporterParams {
- if x, ok := x.GetAgent().(*ChangeAgentRequest_AzureDatabaseExporter); ok {
- return x.AzureDatabaseExporter
+ if x != nil {
+ if x, ok := x.Agent.(*ChangeAgentRequest_AzureDatabaseExporter); ok {
+ return x.AzureDatabaseExporter
+ }
}
return nil
}
func (x *ChangeAgentRequest) GetQanMysqlPerfschemaAgent() *ChangeQANMySQLPerfSchemaAgentParams {
- if x, ok := x.GetAgent().(*ChangeAgentRequest_QanMysqlPerfschemaAgent); ok {
- return x.QanMysqlPerfschemaAgent
+ if x != nil {
+ if x, ok := x.Agent.(*ChangeAgentRequest_QanMysqlPerfschemaAgent); ok {
+ return x.QanMysqlPerfschemaAgent
+ }
}
return nil
}
func (x *ChangeAgentRequest) GetQanMysqlSlowlogAgent() *ChangeQANMySQLSlowlogAgentParams {
- if x, ok := x.GetAgent().(*ChangeAgentRequest_QanMysqlSlowlogAgent); ok {
- return x.QanMysqlSlowlogAgent
+ if x != nil {
+ if x, ok := x.Agent.(*ChangeAgentRequest_QanMysqlSlowlogAgent); ok {
+ return x.QanMysqlSlowlogAgent
+ }
}
return nil
}
func (x *ChangeAgentRequest) GetQanMongodbProfilerAgent() *ChangeQANMongoDBProfilerAgentParams {
- if x, ok := x.GetAgent().(*ChangeAgentRequest_QanMongodbProfilerAgent); ok {
- return x.QanMongodbProfilerAgent
+ if x != nil {
+ if x, ok := x.Agent.(*ChangeAgentRequest_QanMongodbProfilerAgent); ok {
+ return x.QanMongodbProfilerAgent
+ }
}
return nil
}
func (x *ChangeAgentRequest) GetQanPostgresqlPgstatementsAgent() *ChangeQANPostgreSQLPgStatementsAgentParams {
- if x, ok := x.GetAgent().(*ChangeAgentRequest_QanPostgresqlPgstatementsAgent); ok {
- return x.QanPostgresqlPgstatementsAgent
+ if x != nil {
+ if x, ok := x.Agent.(*ChangeAgentRequest_QanPostgresqlPgstatementsAgent); ok {
+ return x.QanPostgresqlPgstatementsAgent
+ }
}
return nil
}
func (x *ChangeAgentRequest) GetQanPostgresqlPgstatmonitorAgent() *ChangeQANPostgreSQLPgStatMonitorAgentParams {
- if x, ok := x.GetAgent().(*ChangeAgentRequest_QanPostgresqlPgstatmonitorAgent); ok {
- return x.QanPostgresqlPgstatmonitorAgent
+ if x != nil {
+ if x, ok := x.Agent.(*ChangeAgentRequest_QanPostgresqlPgstatmonitorAgent); ok {
+ return x.QanPostgresqlPgstatmonitorAgent
+ }
}
return nil
}
@@ -4050,11 +4138,8 @@ func (*ChangeAgentRequest_QanPostgresqlPgstatementsAgent) isChangeAgentRequest_A
func (*ChangeAgentRequest_QanPostgresqlPgstatmonitorAgent) isChangeAgentRequest_Agent() {}
type ChangeAgentResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Types that are assignable to Agent:
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Types that are valid to be assigned to Agent:
//
// *ChangeAgentResponse_NodeExporter
// *ChangeAgentResponse_MysqldExporter
@@ -4069,7 +4154,9 @@ type ChangeAgentResponse struct {
// *ChangeAgentResponse_QanMongodbProfilerAgent
// *ChangeAgentResponse_QanPostgresqlPgstatementsAgent
// *ChangeAgentResponse_QanPostgresqlPgstatmonitorAgent
- Agent isChangeAgentResponse_Agent `protobuf_oneof:"agent"`
+ Agent isChangeAgentResponse_Agent `protobuf_oneof:"agent"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ChangeAgentResponse) Reset() {
@@ -4102,100 +4189,126 @@ func (*ChangeAgentResponse) Descriptor() ([]byte, []int) {
return file_inventory_v1_agents_proto_rawDescGZIP(), []int{25}
}
-func (m *ChangeAgentResponse) GetAgent() isChangeAgentResponse_Agent {
- if m != nil {
- return m.Agent
+func (x *ChangeAgentResponse) GetAgent() isChangeAgentResponse_Agent {
+ if x != nil {
+ return x.Agent
}
return nil
}
func (x *ChangeAgentResponse) GetNodeExporter() *NodeExporter {
- if x, ok := x.GetAgent().(*ChangeAgentResponse_NodeExporter); ok {
- return x.NodeExporter
+ if x != nil {
+ if x, ok := x.Agent.(*ChangeAgentResponse_NodeExporter); ok {
+ return x.NodeExporter
+ }
}
return nil
}
func (x *ChangeAgentResponse) GetMysqldExporter() *MySQLdExporter {
- if x, ok := x.GetAgent().(*ChangeAgentResponse_MysqldExporter); ok {
- return x.MysqldExporter
+ if x != nil {
+ if x, ok := x.Agent.(*ChangeAgentResponse_MysqldExporter); ok {
+ return x.MysqldExporter
+ }
}
return nil
}
func (x *ChangeAgentResponse) GetMongodbExporter() *MongoDBExporter {
- if x, ok := x.GetAgent().(*ChangeAgentResponse_MongodbExporter); ok {
- return x.MongodbExporter
+ if x != nil {
+ if x, ok := x.Agent.(*ChangeAgentResponse_MongodbExporter); ok {
+ return x.MongodbExporter
+ }
}
return nil
}
func (x *ChangeAgentResponse) GetPostgresExporter() *PostgresExporter {
- if x, ok := x.GetAgent().(*ChangeAgentResponse_PostgresExporter); ok {
- return x.PostgresExporter
+ if x != nil {
+ if x, ok := x.Agent.(*ChangeAgentResponse_PostgresExporter); ok {
+ return x.PostgresExporter
+ }
}
return nil
}
func (x *ChangeAgentResponse) GetProxysqlExporter() *ProxySQLExporter {
- if x, ok := x.GetAgent().(*ChangeAgentResponse_ProxysqlExporter); ok {
- return x.ProxysqlExporter
+ if x != nil {
+ if x, ok := x.Agent.(*ChangeAgentResponse_ProxysqlExporter); ok {
+ return x.ProxysqlExporter
+ }
}
return nil
}
func (x *ChangeAgentResponse) GetExternalExporter() *ExternalExporter {
- if x, ok := x.GetAgent().(*ChangeAgentResponse_ExternalExporter); ok {
- return x.ExternalExporter
+ if x != nil {
+ if x, ok := x.Agent.(*ChangeAgentResponse_ExternalExporter); ok {
+ return x.ExternalExporter
+ }
}
return nil
}
func (x *ChangeAgentResponse) GetRdsExporter() *RDSExporter {
- if x, ok := x.GetAgent().(*ChangeAgentResponse_RdsExporter); ok {
- return x.RdsExporter
+ if x != nil {
+ if x, ok := x.Agent.(*ChangeAgentResponse_RdsExporter); ok {
+ return x.RdsExporter
+ }
}
return nil
}
func (x *ChangeAgentResponse) GetAzureDatabaseExporter() *AzureDatabaseExporter {
- if x, ok := x.GetAgent().(*ChangeAgentResponse_AzureDatabaseExporter); ok {
- return x.AzureDatabaseExporter
+ if x != nil {
+ if x, ok := x.Agent.(*ChangeAgentResponse_AzureDatabaseExporter); ok {
+ return x.AzureDatabaseExporter
+ }
}
return nil
}
func (x *ChangeAgentResponse) GetQanMysqlPerfschemaAgent() *QANMySQLPerfSchemaAgent {
- if x, ok := x.GetAgent().(*ChangeAgentResponse_QanMysqlPerfschemaAgent); ok {
- return x.QanMysqlPerfschemaAgent
+ if x != nil {
+ if x, ok := x.Agent.(*ChangeAgentResponse_QanMysqlPerfschemaAgent); ok {
+ return x.QanMysqlPerfschemaAgent
+ }
}
return nil
}
func (x *ChangeAgentResponse) GetQanMysqlSlowlogAgent() *QANMySQLSlowlogAgent {
- if x, ok := x.GetAgent().(*ChangeAgentResponse_QanMysqlSlowlogAgent); ok {
- return x.QanMysqlSlowlogAgent
+ if x != nil {
+ if x, ok := x.Agent.(*ChangeAgentResponse_QanMysqlSlowlogAgent); ok {
+ return x.QanMysqlSlowlogAgent
+ }
}
return nil
}
func (x *ChangeAgentResponse) GetQanMongodbProfilerAgent() *QANMongoDBProfilerAgent {
- if x, ok := x.GetAgent().(*ChangeAgentResponse_QanMongodbProfilerAgent); ok {
- return x.QanMongodbProfilerAgent
+ if x != nil {
+ if x, ok := x.Agent.(*ChangeAgentResponse_QanMongodbProfilerAgent); ok {
+ return x.QanMongodbProfilerAgent
+ }
}
return nil
}
func (x *ChangeAgentResponse) GetQanPostgresqlPgstatementsAgent() *QANPostgreSQLPgStatementsAgent {
- if x, ok := x.GetAgent().(*ChangeAgentResponse_QanPostgresqlPgstatementsAgent); ok {
- return x.QanPostgresqlPgstatementsAgent
+ if x != nil {
+ if x, ok := x.Agent.(*ChangeAgentResponse_QanPostgresqlPgstatementsAgent); ok {
+ return x.QanPostgresqlPgstatementsAgent
+ }
}
return nil
}
func (x *ChangeAgentResponse) GetQanPostgresqlPgstatmonitorAgent() *QANPostgreSQLPgStatMonitorAgent {
- if x, ok := x.GetAgent().(*ChangeAgentResponse_QanPostgresqlPgstatmonitorAgent); ok {
- return x.QanPostgresqlPgstatmonitorAgent
+ if x != nil {
+ if x, ok := x.Agent.(*ChangeAgentResponse_QanPostgresqlPgstatmonitorAgent); ok {
+ return x.QanPostgresqlPgstatmonitorAgent
+ }
}
return nil
}
@@ -4283,14 +4396,13 @@ func (*ChangeAgentResponse_QanPostgresqlPgstatementsAgent) isChangeAgentResponse
func (*ChangeAgentResponse_QanPostgresqlPgstatmonitorAgent) isChangeAgentResponse_Agent() {}
type AddPMMAgentParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Node identifier where this instance runs.
RunsOnNodeId string `protobuf:"bytes,1,opt,name=runs_on_node_id,json=runsOnNodeId,proto3" json:"runs_on_node_id,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,2,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,2,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddPMMAgentParams) Reset() {
@@ -4338,14 +4450,11 @@ func (x *AddPMMAgentParams) GetCustomLabels() map[string]string {
}
type AddNodeExporterParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The pmm-agent identifier which runs this instance.
PmmAgentId string `protobuf:"bytes,1,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,2,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,2,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Enables push metrics mode for exporter.
PushMetrics bool `protobuf:"varint,3,opt,name=push_metrics,json=pushMetrics,proto3" json:"push_metrics,omitempty"`
// List of collector names to disable in this exporter.
@@ -4354,6 +4463,8 @@ type AddNodeExporterParams struct {
LogLevel LogLevel `protobuf:"varint,5,opt,name=log_level,json=logLevel,proto3,enum=inventory.v1.LogLevel" json:"log_level,omitempty"`
// Expose the node_exporter process on all public interfaces
ExposeExporter bool `protobuf:"varint,6,opt,name=expose_exporter,json=exposeExporter,proto3" json:"expose_exporter,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddNodeExporterParams) Reset() {
@@ -4429,10 +4540,7 @@ func (x *AddNodeExporterParams) GetExposeExporter() bool {
}
type ChangeNodeExporterParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Enable this Agent. Agents are enabled by default when they get added.
Enable *bool `protobuf:"varint,1,opt,name=enable,proto3,oneof" json:"enable,omitempty"`
// Replace all custom user-assigned labels.
@@ -4441,6 +4549,8 @@ type ChangeNodeExporterParams struct {
EnablePushMetrics *bool `protobuf:"varint,3,opt,name=enable_push_metrics,json=enablePushMetrics,proto3,oneof" json:"enable_push_metrics,omitempty"`
// Metrics resolution for this agent.
MetricsResolutions *common.MetricsResolutions `protobuf:"bytes,4,opt,name=metrics_resolutions,json=metricsResolutions,proto3" json:"metrics_resolutions,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ChangeNodeExporterParams) Reset() {
@@ -4502,10 +4612,7 @@ func (x *ChangeNodeExporterParams) GetMetricsResolutions() *common.MetricsResolu
}
type AddMySQLdExporterParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The pmm-agent identifier which runs this instance.
PmmAgentId string `protobuf:"bytes,1,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
// Service identifier.
@@ -4529,7 +4636,7 @@ type AddMySQLdExporterParams struct {
// Negative value means tablestats group collectors are always disabled.
TablestatsGroupTableLimit int32 `protobuf:"varint,10,opt,name=tablestats_group_table_limit,json=tablestatsGroupTableLimit,proto3" json:"tablestats_group_table_limit,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,11,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,11,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Skip connection check.
SkipConnectionCheck bool `protobuf:"varint,12,opt,name=skip_connection_check,json=skipConnectionCheck,proto3" json:"skip_connection_check,omitempty"`
// Enables push metrics mode for exporter.
@@ -4542,6 +4649,8 @@ type AddMySQLdExporterParams struct {
LogLevel LogLevel `protobuf:"varint,16,opt,name=log_level,json=logLevel,proto3,enum=inventory.v1.LogLevel" json:"log_level,omitempty"`
// Optionally expose the exporter process on all public interfaces
ExposeExporter bool `protobuf:"varint,17,opt,name=expose_exporter,json=exposeExporter,proto3" json:"expose_exporter,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddMySQLdExporterParams) Reset() {
@@ -4694,10 +4803,7 @@ func (x *AddMySQLdExporterParams) GetExposeExporter() bool {
}
type ChangeMySQLdExporterParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Enable this Agent. Agents are enabled by default when they get added.
Enable *bool `protobuf:"varint,1,opt,name=enable,proto3,oneof" json:"enable,omitempty"`
// Replace all custom user-assigned labels.
@@ -4706,6 +4812,8 @@ type ChangeMySQLdExporterParams struct {
EnablePushMetrics *bool `protobuf:"varint,3,opt,name=enable_push_metrics,json=enablePushMetrics,proto3,oneof" json:"enable_push_metrics,omitempty"`
// Metrics resolution for this agent.
MetricsResolutions *common.MetricsResolutions `protobuf:"bytes,4,opt,name=metrics_resolutions,json=metricsResolutions,proto3" json:"metrics_resolutions,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ChangeMySQLdExporterParams) Reset() {
@@ -4767,10 +4875,7 @@ func (x *ChangeMySQLdExporterParams) GetMetricsResolutions() *common.MetricsReso
}
type AddMongoDBExporterParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The pmm-agent identifier which runs this instance.
PmmAgentId string `protobuf:"bytes,1,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
// Service identifier.
@@ -4790,7 +4895,7 @@ type AddMongoDBExporterParams struct {
// Certificate Authority certificate chain.
TlsCa string `protobuf:"bytes,9,opt,name=tls_ca,json=tlsCa,proto3" json:"tls_ca,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,10,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,10,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Skip connection check.
SkipConnectionCheck bool `protobuf:"varint,11,opt,name=skip_connection_check,json=skipConnectionCheck,proto3" json:"skip_connection_check,omitempty"`
// Enables push metrics mode for exporter.
@@ -4814,6 +4919,8 @@ type AddMongoDBExporterParams struct {
LogLevel LogLevel `protobuf:"varint,19,opt,name=log_level,json=logLevel,proto3,enum=inventory.v1.LogLevel" json:"log_level,omitempty"`
// Optionally expose the exporter process on all public interfaces
ExposeExporter bool `protobuf:"varint,20,opt,name=expose_exporter,json=exposeExporter,proto3" json:"expose_exporter,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddMongoDBExporterParams) Reset() {
@@ -4987,10 +5094,7 @@ func (x *AddMongoDBExporterParams) GetExposeExporter() bool {
}
type ChangeMongoDBExporterParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Enable this Agent. Agents are enabled by default when they get added.
Enable *bool `protobuf:"varint,1,opt,name=enable,proto3,oneof" json:"enable,omitempty"`
// Replace all custom user-assigned labels.
@@ -4999,6 +5103,8 @@ type ChangeMongoDBExporterParams struct {
EnablePushMetrics *bool `protobuf:"varint,3,opt,name=enable_push_metrics,json=enablePushMetrics,proto3,oneof" json:"enable_push_metrics,omitempty"`
// Metrics resolution for this agent.
MetricsResolutions *common.MetricsResolutions `protobuf:"bytes,4,opt,name=metrics_resolutions,json=metricsResolutions,proto3" json:"metrics_resolutions,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ChangeMongoDBExporterParams) Reset() {
@@ -5060,10 +5166,7 @@ func (x *ChangeMongoDBExporterParams) GetMetricsResolutions() *common.MetricsRes
}
type AddPostgresExporterParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The pmm-agent identifier which runs this instance.
PmmAgentId string `protobuf:"bytes,1,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
// Service identifier.
@@ -5077,7 +5180,7 @@ type AddPostgresExporterParams struct {
// Skip TLS certificate and hostname validation. Uses sslmode=required instead of verify-full.
TlsSkipVerify bool `protobuf:"varint,6,opt,name=tls_skip_verify,json=tlsSkipVerify,proto3" json:"tls_skip_verify,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,7,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,7,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Skip connection check.
SkipConnectionCheck bool `protobuf:"varint,8,opt,name=skip_connection_check,json=skipConnectionCheck,proto3" json:"skip_connection_check,omitempty"`
// Enables push metrics mode for exporter.
@@ -5100,6 +5203,8 @@ type AddPostgresExporterParams struct {
ExposeExporter bool `protobuf:"varint,17,opt,name=expose_exporter,json=exposeExporter,proto3" json:"expose_exporter,omitempty"`
// Maximum number of connections that exporter can open to the database instance.
MaxExporterConnections int32 `protobuf:"varint,18,opt,name=max_exporter_connections,json=maxExporterConnections,proto3" json:"max_exporter_connections,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddPostgresExporterParams) Reset() {
@@ -5259,10 +5364,7 @@ func (x *AddPostgresExporterParams) GetMaxExporterConnections() int32 {
}
type ChangePostgresExporterParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Enable this Agent. Agents are enabled by default when they get added.
Enable *bool `protobuf:"varint,1,opt,name=enable,proto3,oneof" json:"enable,omitempty"`
// Replace all custom user-assigned labels.
@@ -5271,6 +5373,8 @@ type ChangePostgresExporterParams struct {
EnablePushMetrics *bool `protobuf:"varint,3,opt,name=enable_push_metrics,json=enablePushMetrics,proto3,oneof" json:"enable_push_metrics,omitempty"`
// Metrics resolution for this agent.
MetricsResolutions *common.MetricsResolutions `protobuf:"bytes,4,opt,name=metrics_resolutions,json=metricsResolutions,proto3" json:"metrics_resolutions,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ChangePostgresExporterParams) Reset() {
@@ -5332,10 +5436,7 @@ func (x *ChangePostgresExporterParams) GetMetricsResolutions() *common.MetricsRe
}
type AddProxySQLExporterParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The pmm-agent identifier which runs this instance.
PmmAgentId string `protobuf:"bytes,1,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
// Service identifier.
@@ -5349,7 +5450,7 @@ type AddProxySQLExporterParams struct {
// Skip TLS certificate and hostname validation.
TlsSkipVerify bool `protobuf:"varint,6,opt,name=tls_skip_verify,json=tlsSkipVerify,proto3" json:"tls_skip_verify,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,7,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,7,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Skip connection check.
SkipConnectionCheck bool `protobuf:"varint,8,opt,name=skip_connection_check,json=skipConnectionCheck,proto3" json:"skip_connection_check,omitempty"`
// Enables push metrics mode for exporter.
@@ -5362,6 +5463,8 @@ type AddProxySQLExporterParams struct {
LogLevel LogLevel `protobuf:"varint,12,opt,name=log_level,json=logLevel,proto3,enum=inventory.v1.LogLevel" json:"log_level,omitempty"`
// Optionally expose the exporter process on all public interfaces
ExposeExporter bool `protobuf:"varint,13,opt,name=expose_exporter,json=exposeExporter,proto3" json:"expose_exporter,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddProxySQLExporterParams) Reset() {
@@ -5486,10 +5589,7 @@ func (x *AddProxySQLExporterParams) GetExposeExporter() bool {
}
type ChangeProxySQLExporterParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Enable this Agent. Agents are enabled by default when they get added.
Enable *bool `protobuf:"varint,1,opt,name=enable,proto3,oneof" json:"enable,omitempty"`
// Replace all custom user-assigned labels.
@@ -5498,6 +5598,8 @@ type ChangeProxySQLExporterParams struct {
EnablePushMetrics *bool `protobuf:"varint,3,opt,name=enable_push_metrics,json=enablePushMetrics,proto3,oneof" json:"enable_push_metrics,omitempty"`
// Metrics resolution for this agent.
MetricsResolutions *common.MetricsResolutions `protobuf:"bytes,4,opt,name=metrics_resolutions,json=metricsResolutions,proto3" json:"metrics_resolutions,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ChangeProxySQLExporterParams) Reset() {
@@ -5559,10 +5661,7 @@ func (x *ChangeProxySQLExporterParams) GetMetricsResolutions() *common.MetricsRe
}
type AddQANMySQLPerfSchemaAgentParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The pmm-agent identifier which runs this instance.
PmmAgentId string `protobuf:"bytes,1,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
// Service identifier.
@@ -5586,13 +5685,15 @@ type AddQANMySQLPerfSchemaAgentParams struct {
// Disable query examples.
DisableQueryExamples bool `protobuf:"varint,11,opt,name=disable_query_examples,json=disableQueryExamples,proto3" json:"disable_query_examples,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,12,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,12,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Skip connection check.
SkipConnectionCheck bool `protobuf:"varint,13,opt,name=skip_connection_check,json=skipConnectionCheck,proto3" json:"skip_connection_check,omitempty"`
// Disable parsing comments from queries and showing them in QAN.
DisableCommentsParsing bool `protobuf:"varint,14,opt,name=disable_comments_parsing,json=disableCommentsParsing,proto3" json:"disable_comments_parsing,omitempty"`
// Log level for exporter.
- LogLevel LogLevel `protobuf:"varint,15,opt,name=log_level,json=logLevel,proto3,enum=inventory.v1.LogLevel" json:"log_level,omitempty"`
+ LogLevel LogLevel `protobuf:"varint,15,opt,name=log_level,json=logLevel,proto3,enum=inventory.v1.LogLevel" json:"log_level,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddQANMySQLPerfSchemaAgentParams) Reset() {
@@ -5731,10 +5832,7 @@ func (x *AddQANMySQLPerfSchemaAgentParams) GetLogLevel() LogLevel {
}
type ChangeQANMySQLPerfSchemaAgentParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Enable this Agent. Agents are enabled by default when they get added.
Enable *bool `protobuf:"varint,1,opt,name=enable,proto3,oneof" json:"enable,omitempty"`
// Replace all custom user-assigned labels.
@@ -5743,6 +5841,8 @@ type ChangeQANMySQLPerfSchemaAgentParams struct {
EnablePushMetrics *bool `protobuf:"varint,3,opt,name=enable_push_metrics,json=enablePushMetrics,proto3,oneof" json:"enable_push_metrics,omitempty"`
// Metrics resolution for this agent.
MetricsResolutions *common.MetricsResolutions `protobuf:"bytes,4,opt,name=metrics_resolutions,json=metricsResolutions,proto3" json:"metrics_resolutions,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ChangeQANMySQLPerfSchemaAgentParams) Reset() {
@@ -5804,10 +5904,7 @@ func (x *ChangeQANMySQLPerfSchemaAgentParams) GetMetricsResolutions() *common.Me
}
type AddQANMySQLSlowlogAgentParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The pmm-agent identifier which runs this instance.
PmmAgentId string `protobuf:"bytes,1,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
// Service identifier.
@@ -5834,13 +5931,15 @@ type AddQANMySQLSlowlogAgentParams struct {
// Use zero or negative value to disable rotation.
MaxSlowlogFileSize int64 `protobuf:"varint,12,opt,name=max_slowlog_file_size,json=maxSlowlogFileSize,proto3" json:"max_slowlog_file_size,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,13,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,13,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Skip connection check.
SkipConnectionCheck bool `protobuf:"varint,14,opt,name=skip_connection_check,json=skipConnectionCheck,proto3" json:"skip_connection_check,omitempty"`
// Disable parsing comments from queries and showing them in QAN.
DisableCommentsParsing bool `protobuf:"varint,15,opt,name=disable_comments_parsing,json=disableCommentsParsing,proto3" json:"disable_comments_parsing,omitempty"`
// Log level for exporter.
- LogLevel LogLevel `protobuf:"varint,16,opt,name=log_level,json=logLevel,proto3,enum=inventory.v1.LogLevel" json:"log_level,omitempty"`
+ LogLevel LogLevel `protobuf:"varint,16,opt,name=log_level,json=logLevel,proto3,enum=inventory.v1.LogLevel" json:"log_level,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddQANMySQLSlowlogAgentParams) Reset() {
@@ -5986,10 +6085,7 @@ func (x *AddQANMySQLSlowlogAgentParams) GetLogLevel() LogLevel {
}
type ChangeQANMySQLSlowlogAgentParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Enable this Agent. Agents are enabled by default when they get added.
Enable *bool `protobuf:"varint,1,opt,name=enable,proto3,oneof" json:"enable,omitempty"`
// Replace all custom user-assigned labels.
@@ -5998,6 +6094,8 @@ type ChangeQANMySQLSlowlogAgentParams struct {
EnablePushMetrics *bool `protobuf:"varint,3,opt,name=enable_push_metrics,json=enablePushMetrics,proto3,oneof" json:"enable_push_metrics,omitempty"`
// Metrics resolution for this agent.
MetricsResolutions *common.MetricsResolutions `protobuf:"bytes,4,opt,name=metrics_resolutions,json=metricsResolutions,proto3" json:"metrics_resolutions,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ChangeQANMySQLSlowlogAgentParams) Reset() {
@@ -6059,10 +6157,7 @@ func (x *ChangeQANMySQLSlowlogAgentParams) GetMetricsResolutions() *common.Metri
}
type AddQANMongoDBProfilerAgentParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The pmm-agent identifier which runs this instance.
PmmAgentId string `protobuf:"bytes,1,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
// Service identifier.
@@ -6084,7 +6179,7 @@ type AddQANMongoDBProfilerAgentParams struct {
// Limit query length in QAN (default: server-defined; -1: no limit).
MaxQueryLength int32 `protobuf:"varint,10,opt,name=max_query_length,json=maxQueryLength,proto3" json:"max_query_length,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,11,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,11,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Skip connection check.
SkipConnectionCheck bool `protobuf:"varint,12,opt,name=skip_connection_check,json=skipConnectionCheck,proto3" json:"skip_connection_check,omitempty"`
// Authentication mechanism.
@@ -6094,7 +6189,9 @@ type AddQANMongoDBProfilerAgentParams struct {
// Authentication database.
AuthenticationDatabase string `protobuf:"bytes,14,opt,name=authentication_database,json=authenticationDatabase,proto3" json:"authentication_database,omitempty"`
// Log level for exporter.
- LogLevel LogLevel `protobuf:"varint,15,opt,name=log_level,json=logLevel,proto3,enum=inventory.v1.LogLevel" json:"log_level,omitempty"`
+ LogLevel LogLevel `protobuf:"varint,15,opt,name=log_level,json=logLevel,proto3,enum=inventory.v1.LogLevel" json:"log_level,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddQANMongoDBProfilerAgentParams) Reset() {
@@ -6233,10 +6330,7 @@ func (x *AddQANMongoDBProfilerAgentParams) GetLogLevel() LogLevel {
}
type ChangeQANMongoDBProfilerAgentParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Enable this Agent. Agents are enabled by default when they get added.
Enable *bool `protobuf:"varint,1,opt,name=enable,proto3,oneof" json:"enable,omitempty"`
// Replace all custom user-assigned labels.
@@ -6245,6 +6339,8 @@ type ChangeQANMongoDBProfilerAgentParams struct {
EnablePushMetrics *bool `protobuf:"varint,3,opt,name=enable_push_metrics,json=enablePushMetrics,proto3,oneof" json:"enable_push_metrics,omitempty"`
// Metrics resolution for this agent.
MetricsResolutions *common.MetricsResolutions `protobuf:"bytes,4,opt,name=metrics_resolutions,json=metricsResolutions,proto3" json:"metrics_resolutions,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ChangeQANMongoDBProfilerAgentParams) Reset() {
@@ -6306,10 +6402,7 @@ func (x *ChangeQANMongoDBProfilerAgentParams) GetMetricsResolutions() *common.Me
}
type AddQANPostgreSQLPgStatementsAgentParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The pmm-agent identifier which runs this instance.
PmmAgentId string `protobuf:"bytes,1,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
// Service identifier.
@@ -6323,7 +6416,7 @@ type AddQANPostgreSQLPgStatementsAgentParams struct {
// Skip TLS certificate and hostname validation.
TlsSkipVerify bool `protobuf:"varint,6,opt,name=tls_skip_verify,json=tlsSkipVerify,proto3" json:"tls_skip_verify,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,7,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,7,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Skip connection check.
SkipConnectionCheck bool `protobuf:"varint,8,opt,name=skip_connection_check,json=skipConnectionCheck,proto3" json:"skip_connection_check,omitempty"`
// Disable parsing comments from queries and showing them in QAN.
@@ -6337,7 +6430,9 @@ type AddQANPostgreSQLPgStatementsAgentParams struct {
// TLS Certificate Key.
TlsKey string `protobuf:"bytes,13,opt,name=tls_key,json=tlsKey,proto3" json:"tls_key,omitempty"`
// Log level for exporter.
- LogLevel LogLevel `protobuf:"varint,14,opt,name=log_level,json=logLevel,proto3,enum=inventory.v1.LogLevel" json:"log_level,omitempty"`
+ LogLevel LogLevel `protobuf:"varint,14,opt,name=log_level,json=logLevel,proto3,enum=inventory.v1.LogLevel" json:"log_level,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddQANPostgreSQLPgStatementsAgentParams) Reset() {
@@ -6469,10 +6564,7 @@ func (x *AddQANPostgreSQLPgStatementsAgentParams) GetLogLevel() LogLevel {
}
type ChangeQANPostgreSQLPgStatementsAgentParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Enable this Agent. Agents are enabled by default when they get added.
Enable *bool `protobuf:"varint,1,opt,name=enable,proto3,oneof" json:"enable,omitempty"`
// Replace all custom user-assigned labels.
@@ -6481,6 +6573,8 @@ type ChangeQANPostgreSQLPgStatementsAgentParams struct {
EnablePushMetrics *bool `protobuf:"varint,3,opt,name=enable_push_metrics,json=enablePushMetrics,proto3,oneof" json:"enable_push_metrics,omitempty"`
// Metrics resolution for this agent.
MetricsResolutions *common.MetricsResolutions `protobuf:"bytes,4,opt,name=metrics_resolutions,json=metricsResolutions,proto3" json:"metrics_resolutions,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ChangeQANPostgreSQLPgStatementsAgentParams) Reset() {
@@ -6542,10 +6636,7 @@ func (x *ChangeQANPostgreSQLPgStatementsAgentParams) GetMetricsResolutions() *co
}
type AddQANPostgreSQLPgStatMonitorAgentParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The pmm-agent identifier which runs this instance.
PmmAgentId string `protobuf:"bytes,1,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
// Service identifier.
@@ -6563,7 +6654,7 @@ type AddQANPostgreSQLPgStatMonitorAgentParams struct {
// Disable query examples.
DisableQueryExamples bool `protobuf:"varint,8,opt,name=disable_query_examples,json=disableQueryExamples,proto3" json:"disable_query_examples,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,9,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,9,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Skip connection check.
SkipConnectionCheck bool `protobuf:"varint,10,opt,name=skip_connection_check,json=skipConnectionCheck,proto3" json:"skip_connection_check,omitempty"`
// Disable parsing comments from queries and showing them in QAN.
@@ -6575,7 +6666,9 @@ type AddQANPostgreSQLPgStatMonitorAgentParams struct {
// TLS Certificate Key.
TlsKey string `protobuf:"bytes,14,opt,name=tls_key,json=tlsKey,proto3" json:"tls_key,omitempty"`
// Log level for exporter.
- LogLevel LogLevel `protobuf:"varint,15,opt,name=log_level,json=logLevel,proto3,enum=inventory.v1.LogLevel" json:"log_level,omitempty"`
+ LogLevel LogLevel `protobuf:"varint,15,opt,name=log_level,json=logLevel,proto3,enum=inventory.v1.LogLevel" json:"log_level,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddQANPostgreSQLPgStatMonitorAgentParams) Reset() {
@@ -6714,10 +6807,7 @@ func (x *AddQANPostgreSQLPgStatMonitorAgentParams) GetLogLevel() LogLevel {
}
type ChangeQANPostgreSQLPgStatMonitorAgentParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Enable this Agent. Agents are enabled by default when they get added.
Enable *bool `protobuf:"varint,1,opt,name=enable,proto3,oneof" json:"enable,omitempty"`
// Replace all custom user-assigned labels.
@@ -6726,6 +6816,8 @@ type ChangeQANPostgreSQLPgStatMonitorAgentParams struct {
EnablePushMetrics *bool `protobuf:"varint,3,opt,name=enable_push_metrics,json=enablePushMetrics,proto3,oneof" json:"enable_push_metrics,omitempty"`
// Metrics resolution for this agent.
MetricsResolutions *common.MetricsResolutions `protobuf:"bytes,4,opt,name=metrics_resolutions,json=metricsResolutions,proto3" json:"metrics_resolutions,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ChangeQANPostgreSQLPgStatMonitorAgentParams) Reset() {
@@ -6787,10 +6879,7 @@ func (x *ChangeQANPostgreSQLPgStatMonitorAgentParams) GetMetricsResolutions() *c
}
type AddRDSExporterParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The pmm-agent identifier which runs this instance.
PmmAgentId string `protobuf:"bytes,1,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
// Node identifier.
@@ -6800,7 +6889,7 @@ type AddRDSExporterParams struct {
// AWS Secret Key.
AwsSecretKey string `protobuf:"bytes,4,opt,name=aws_secret_key,json=awsSecretKey,proto3" json:"aws_secret_key,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,5,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,5,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Skip connection check.
SkipConnectionCheck bool `protobuf:"varint,6,opt,name=skip_connection_check,json=skipConnectionCheck,proto3" json:"skip_connection_check,omitempty"`
// Disable basic metrics.
@@ -6810,7 +6899,9 @@ type AddRDSExporterParams struct {
// Enables push metrics mode for exporter.
PushMetrics bool `protobuf:"varint,9,opt,name=push_metrics,json=pushMetrics,proto3" json:"push_metrics,omitempty"`
// Log level for exporter.
- LogLevel LogLevel `protobuf:"varint,10,opt,name=log_level,json=logLevel,proto3,enum=inventory.v1.LogLevel" json:"log_level,omitempty"`
+ LogLevel LogLevel `protobuf:"varint,10,opt,name=log_level,json=logLevel,proto3,enum=inventory.v1.LogLevel" json:"log_level,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddRDSExporterParams) Reset() {
@@ -6914,10 +7005,7 @@ func (x *AddRDSExporterParams) GetLogLevel() LogLevel {
}
type ChangeRDSExporterParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Enable this Agent. Agents are enabled by default when they get added.
Enable *bool `protobuf:"varint,1,opt,name=enable,proto3,oneof" json:"enable,omitempty"`
// Replace all custom user-assigned labels.
@@ -6926,6 +7014,8 @@ type ChangeRDSExporterParams struct {
EnablePushMetrics *bool `protobuf:"varint,3,opt,name=enable_push_metrics,json=enablePushMetrics,proto3,oneof" json:"enable_push_metrics,omitempty"`
// Metrics resolution for this agent.
MetricsResolutions *common.MetricsResolutions `protobuf:"bytes,4,opt,name=metrics_resolutions,json=metricsResolutions,proto3" json:"metrics_resolutions,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ChangeRDSExporterParams) Reset() {
@@ -6987,10 +7077,7 @@ func (x *ChangeRDSExporterParams) GetMetricsResolutions() *common.MetricsResolut
}
type AddExternalExporterParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The node identifier where this instance is run.
RunsOnNodeId string `protobuf:"bytes,1,opt,name=runs_on_node_id,json=runsOnNodeId,proto3" json:"runs_on_node_id,omitempty"`
// Service identifier.
@@ -7006,9 +7093,11 @@ type AddExternalExporterParams struct {
// Listen port for scraping metrics.
ListenPort uint32 `protobuf:"varint,8,opt,name=listen_port,json=listenPort,proto3" json:"listen_port,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,10,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,10,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Enables push metrics mode for exporter.
- PushMetrics bool `protobuf:"varint,11,opt,name=push_metrics,json=pushMetrics,proto3" json:"push_metrics,omitempty"`
+ PushMetrics bool `protobuf:"varint,11,opt,name=push_metrics,json=pushMetrics,proto3" json:"push_metrics,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddExternalExporterParams) Reset() {
@@ -7105,10 +7194,7 @@ func (x *AddExternalExporterParams) GetPushMetrics() bool {
}
type ChangeExternalExporterParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Enable this Agent. Agents are enabled by default when they get added.
Enable *bool `protobuf:"varint,1,opt,name=enable,proto3,oneof" json:"enable,omitempty"`
// Replace all custom user-assigned labels.
@@ -7117,6 +7203,8 @@ type ChangeExternalExporterParams struct {
EnablePushMetrics *bool `protobuf:"varint,3,opt,name=enable_push_metrics,json=enablePushMetrics,proto3,oneof" json:"enable_push_metrics,omitempty"`
// Metrics resolution for this agent.
MetricsResolutions *common.MetricsResolutions `protobuf:"bytes,4,opt,name=metrics_resolutions,json=metricsResolutions,proto3" json:"metrics_resolutions,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ChangeExternalExporterParams) Reset() {
@@ -7178,10 +7266,7 @@ func (x *ChangeExternalExporterParams) GetMetricsResolutions() *common.MetricsRe
}
type AddAzureDatabaseExporterParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The pmm-agent identifier which runs this instance.
PmmAgentId string `protobuf:"bytes,1,opt,name=pmm_agent_id,json=pmmAgentId,proto3" json:"pmm_agent_id,omitempty"`
// Node identifier.
@@ -7199,13 +7284,15 @@ type AddAzureDatabaseExporterParams struct {
// Azure resource type (mysql, maria, postgres)
AzureDatabaseResourceType string `protobuf:"bytes,8,opt,name=azure_database_resource_type,json=azureDatabaseResourceType,proto3" json:"azure_database_resource_type,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,9,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,9,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Skip connection check.
SkipConnectionCheck bool `protobuf:"varint,10,opt,name=skip_connection_check,json=skipConnectionCheck,proto3" json:"skip_connection_check,omitempty"`
// Enables push metrics mode for exporter.
PushMetrics bool `protobuf:"varint,11,opt,name=push_metrics,json=pushMetrics,proto3" json:"push_metrics,omitempty"`
// Log level for exporter.
- LogLevel LogLevel `protobuf:"varint,12,opt,name=log_level,json=logLevel,proto3,enum=inventory.v1.LogLevel" json:"log_level,omitempty"`
+ LogLevel LogLevel `protobuf:"varint,12,opt,name=log_level,json=logLevel,proto3,enum=inventory.v1.LogLevel" json:"log_level,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddAzureDatabaseExporterParams) Reset() {
@@ -7323,10 +7410,7 @@ func (x *AddAzureDatabaseExporterParams) GetLogLevel() LogLevel {
}
type ChangeAzureDatabaseExporterParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Enable this Agent. Agents are enabled by default when they get added.
Enable *bool `protobuf:"varint,1,opt,name=enable,proto3,oneof" json:"enable,omitempty"`
// Replace all custom user-assigned labels.
@@ -7335,6 +7419,8 @@ type ChangeAzureDatabaseExporterParams struct {
EnablePushMetrics *bool `protobuf:"varint,3,opt,name=enable_push_metrics,json=enablePushMetrics,proto3,oneof" json:"enable_push_metrics,omitempty"`
// Metrics resolution for this agent.
MetricsResolutions *common.MetricsResolutions `protobuf:"bytes,4,opt,name=metrics_resolutions,json=metricsResolutions,proto3" json:"metrics_resolutions,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ChangeAzureDatabaseExporterParams) Reset() {
@@ -7396,13 +7482,12 @@ func (x *ChangeAzureDatabaseExporterParams) GetMetricsResolutions() *common.Metr
}
type RemoveAgentRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- AgentId string `protobuf:"bytes,1,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ AgentId string `protobuf:"bytes,1,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"`
// Remove agent with all dependencies.
- Force bool `protobuf:"varint,2,opt,name=force,proto3" json:"force,omitempty"`
+ Force bool `protobuf:"varint,2,opt,name=force,proto3" json:"force,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *RemoveAgentRequest) Reset() {
@@ -7450,9 +7535,9 @@ func (x *RemoveAgentRequest) GetForce() bool {
}
type RemoveAgentResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *RemoveAgentResponse) Reset() {
@@ -7487,7 +7572,7 @@ func (*RemoveAgentResponse) Descriptor() ([]byte, []int) {
var File_inventory_v1_agents_proto protoreflect.FileDescriptor
-var file_inventory_v1_agents_proto_rawDesc = []byte{
+var file_inventory_v1_agents_proto_rawDesc = string([]byte{
0x0a, 0x19, 0x69, 0x6e, 0x76, 0x65, 0x6e, 0x74, 0x6f, 0x72, 0x79, 0x2f, 0x76, 0x31, 0x2f, 0x61,
0x67, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x69, 0x6e, 0x76,
0x65, 0x6e, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x1a, 0x13, 0x63, 0x6f, 0x6d, 0x6d, 0x6f,
@@ -9659,16 +9744,16 @@ var file_inventory_v1_agents_proto_rawDesc = []byte{
0x79, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
0xea, 0x02, 0x0d, 0x49, 0x6e, 0x76, 0x65, 0x6e, 0x74, 0x6f, 0x72, 0x79, 0x3a, 0x3a, 0x56, 0x31,
0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_inventory_v1_agents_proto_rawDescOnce sync.Once
- file_inventory_v1_agents_proto_rawDescData = file_inventory_v1_agents_proto_rawDesc
+ file_inventory_v1_agents_proto_rawDescData []byte
)
func file_inventory_v1_agents_proto_rawDescGZIP() []byte {
file_inventory_v1_agents_proto_rawDescOnce.Do(func() {
- file_inventory_v1_agents_proto_rawDescData = protoimpl.X.CompressGZIP(file_inventory_v1_agents_proto_rawDescData)
+ file_inventory_v1_agents_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_inventory_v1_agents_proto_rawDesc), len(file_inventory_v1_agents_proto_rawDesc)))
})
return file_inventory_v1_agents_proto_rawDescData
}
@@ -10078,7 +10163,7 @@ func file_inventory_v1_agents_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_inventory_v1_agents_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_inventory_v1_agents_proto_rawDesc), len(file_inventory_v1_agents_proto_rawDesc)),
NumEnums: 1,
NumMessages: 83,
NumExtensions: 0,
@@ -10090,7 +10175,6 @@ func file_inventory_v1_agents_proto_init() {
MessageInfos: file_inventory_v1_agents_proto_msgTypes,
}.Build()
File_inventory_v1_agents_proto = out.File
- file_inventory_v1_agents_proto_rawDesc = nil
file_inventory_v1_agents_proto_goTypes = nil
file_inventory_v1_agents_proto_depIdxs = nil
}
diff --git a/api/inventory/v1/agents.pb.gw.go b/api/inventory/v1/agents.pb.gw.go
index ea823e5f0f..371c0a593f 100644
--- a/api/inventory/v1/agents.pb.gw.go
+++ b/api/inventory/v1/agents.pb.gw.go
@@ -10,6 +10,7 @@ package inventoryv1
import (
"context"
+ "errors"
"io"
"net/http"
@@ -28,6 +29,7 @@ var (
_ codes.Code
_ io.Reader
_ status.Status
+ _ = errors.New
_ = runtime.String
_ = utilities.NewDoubleArray
_ = metadata.Join
@@ -36,81 +38,67 @@ var (
var filter_AgentsService_ListAgents_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
func request_AgentsService_ListAgents_0(ctx context.Context, marshaler runtime.Marshaler, client AgentsServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListAgentsRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ListAgentsRequest
+ metadata runtime.ServerMetadata
+ )
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_AgentsService_ListAgents_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.ListAgents(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_AgentsService_ListAgents_0(ctx context.Context, marshaler runtime.Marshaler, server AgentsServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListAgentsRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ListAgentsRequest
+ metadata runtime.ServerMetadata
+ )
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_AgentsService_ListAgents_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.ListAgents(ctx, &protoReq)
return msg, metadata, err
}
func request_AgentsService_GetAgent_0(ctx context.Context, marshaler runtime.Marshaler, client AgentsServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetAgentRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq GetAgentRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["agent_id"]
+ val, ok := pathParams["agent_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "agent_id")
}
-
protoReq.AgentId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "agent_id", err)
}
-
msg, err := client.GetAgent(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_AgentsService_GetAgent_0(ctx context.Context, marshaler runtime.Marshaler, server AgentsServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetAgentRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq GetAgentRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["agent_id"]
+ val, ok := pathParams["agent_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "agent_id")
}
-
protoReq.AgentId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "agent_id", err)
}
-
msg, err := server.GetAgent(ctx, &protoReq)
return msg, metadata, err
}
@@ -118,147 +106,115 @@ func local_request_AgentsService_GetAgent_0(ctx context.Context, marshaler runti
var filter_AgentsService_GetAgentLogs_0 = &utilities.DoubleArray{Encoding: map[string]int{"agent_id": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}}
func request_AgentsService_GetAgentLogs_0(ctx context.Context, marshaler runtime.Marshaler, client AgentsServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetAgentLogsRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq GetAgentLogsRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["agent_id"]
+ val, ok := pathParams["agent_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "agent_id")
}
-
protoReq.AgentId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "agent_id", err)
}
-
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_AgentsService_GetAgentLogs_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.GetAgentLogs(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_AgentsService_GetAgentLogs_0(ctx context.Context, marshaler runtime.Marshaler, server AgentsServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetAgentLogsRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq GetAgentLogsRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["agent_id"]
+ val, ok := pathParams["agent_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "agent_id")
}
-
protoReq.AgentId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "agent_id", err)
}
-
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_AgentsService_GetAgentLogs_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.GetAgentLogs(ctx, &protoReq)
return msg, metadata, err
}
func request_AgentsService_AddAgent_0(ctx context.Context, marshaler runtime.Marshaler, client AgentsServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq AddAgentRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq AddAgentRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.AddAgent(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_AgentsService_AddAgent_0(ctx context.Context, marshaler runtime.Marshaler, server AgentsServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq AddAgentRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq AddAgentRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.AddAgent(ctx, &protoReq)
return msg, metadata, err
}
func request_AgentsService_ChangeAgent_0(ctx context.Context, marshaler runtime.Marshaler, client AgentsServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ChangeAgentRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq ChangeAgentRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["agent_id"]
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ val, ok := pathParams["agent_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "agent_id")
}
-
protoReq.AgentId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "agent_id", err)
}
-
msg, err := client.ChangeAgent(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_AgentsService_ChangeAgent_0(ctx context.Context, marshaler runtime.Marshaler, server AgentsServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ChangeAgentRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq ChangeAgentRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["agent_id"]
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ val, ok := pathParams["agent_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "agent_id")
}
-
protoReq.AgentId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "agent_id", err)
}
-
msg, err := server.ChangeAgent(ctx, &protoReq)
return msg, metadata, err
}
@@ -266,65 +222,49 @@ func local_request_AgentsService_ChangeAgent_0(ctx context.Context, marshaler ru
var filter_AgentsService_RemoveAgent_0 = &utilities.DoubleArray{Encoding: map[string]int{"agent_id": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}}
func request_AgentsService_RemoveAgent_0(ctx context.Context, marshaler runtime.Marshaler, client AgentsServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq RemoveAgentRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq RemoveAgentRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["agent_id"]
+ val, ok := pathParams["agent_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "agent_id")
}
-
protoReq.AgentId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "agent_id", err)
}
-
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_AgentsService_RemoveAgent_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.RemoveAgent(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_AgentsService_RemoveAgent_0(ctx context.Context, marshaler runtime.Marshaler, server AgentsServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq RemoveAgentRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq RemoveAgentRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["agent_id"]
+ val, ok := pathParams["agent_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "agent_id")
}
-
protoReq.AgentId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "agent_id", err)
}
-
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_AgentsService_RemoveAgent_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.RemoveAgent(ctx, &protoReq)
return msg, metadata, err
}
@@ -335,15 +275,13 @@ func local_request_AgentsService_RemoveAgent_0(ctx context.Context, marshaler ru
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterAgentsServiceHandlerFromEndpoint instead.
// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
func RegisterAgentsServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server AgentsServiceServer) error {
- mux.Handle("GET", pattern_AgentsService_ListAgents_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_AgentsService_ListAgents_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/inventory.v1.AgentsService/ListAgents", runtime.WithHTTPPathPattern("/v1/inventory/agents"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/inventory.v1.AgentsService/ListAgents", runtime.WithHTTPPathPattern("/v1/inventory/agents"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -355,19 +293,15 @@ func RegisterAgentsServiceHandlerServer(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AgentsService_ListAgents_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_AgentsService_GetAgent_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_AgentsService_GetAgent_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/inventory.v1.AgentsService/GetAgent", runtime.WithHTTPPathPattern("/v1/inventory/agents/{agent_id}"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/inventory.v1.AgentsService/GetAgent", runtime.WithHTTPPathPattern("/v1/inventory/agents/{agent_id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -379,19 +313,15 @@ func RegisterAgentsServiceHandlerServer(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AgentsService_GetAgent_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_AgentsService_GetAgentLogs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_AgentsService_GetAgentLogs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/inventory.v1.AgentsService/GetAgentLogs", runtime.WithHTTPPathPattern("/v1/inventory/agents/{agent_id}/logs"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/inventory.v1.AgentsService/GetAgentLogs", runtime.WithHTTPPathPattern("/v1/inventory/agents/{agent_id}/logs"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -403,19 +333,15 @@ func RegisterAgentsServiceHandlerServer(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AgentsService_GetAgentLogs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_AgentsService_AddAgent_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_AgentsService_AddAgent_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/inventory.v1.AgentsService/AddAgent", runtime.WithHTTPPathPattern("/v1/inventory/agents"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/inventory.v1.AgentsService/AddAgent", runtime.WithHTTPPathPattern("/v1/inventory/agents"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -427,19 +353,15 @@ func RegisterAgentsServiceHandlerServer(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AgentsService_AddAgent_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("PUT", pattern_AgentsService_ChangeAgent_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPut, pattern_AgentsService_ChangeAgent_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/inventory.v1.AgentsService/ChangeAgent", runtime.WithHTTPPathPattern("/v1/inventory/agents/{agent_id}"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/inventory.v1.AgentsService/ChangeAgent", runtime.WithHTTPPathPattern("/v1/inventory/agents/{agent_id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -451,19 +373,15 @@ func RegisterAgentsServiceHandlerServer(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AgentsService_ChangeAgent_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("DELETE", pattern_AgentsService_RemoveAgent_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodDelete, pattern_AgentsService_RemoveAgent_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/inventory.v1.AgentsService/RemoveAgent", runtime.WithHTTPPathPattern("/v1/inventory/agents/{agent_id}"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/inventory.v1.AgentsService/RemoveAgent", runtime.WithHTTPPathPattern("/v1/inventory/agents/{agent_id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -475,7 +393,6 @@ func RegisterAgentsServiceHandlerServer(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AgentsService_RemoveAgent_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@@ -503,7 +420,6 @@ func RegisterAgentsServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.
}
}()
}()
-
return RegisterAgentsServiceHandler(ctx, mux, conn)
}
@@ -519,13 +435,11 @@ func RegisterAgentsServiceHandler(ctx context.Context, mux *runtime.ServeMux, co
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "AgentsServiceClient" to call the correct interceptors. This client ignores the HTTP middlewares.
func RegisterAgentsServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client AgentsServiceClient) error {
- mux.Handle("GET", pattern_AgentsService_ListAgents_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_AgentsService_ListAgents_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/inventory.v1.AgentsService/ListAgents", runtime.WithHTTPPathPattern("/v1/inventory/agents"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/inventory.v1.AgentsService/ListAgents", runtime.WithHTTPPathPattern("/v1/inventory/agents"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -536,17 +450,13 @@ func RegisterAgentsServiceHandlerClient(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AgentsService_ListAgents_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_AgentsService_GetAgent_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_AgentsService_GetAgent_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/inventory.v1.AgentsService/GetAgent", runtime.WithHTTPPathPattern("/v1/inventory/agents/{agent_id}"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/inventory.v1.AgentsService/GetAgent", runtime.WithHTTPPathPattern("/v1/inventory/agents/{agent_id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -557,17 +467,13 @@ func RegisterAgentsServiceHandlerClient(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AgentsService_GetAgent_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_AgentsService_GetAgentLogs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_AgentsService_GetAgentLogs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/inventory.v1.AgentsService/GetAgentLogs", runtime.WithHTTPPathPattern("/v1/inventory/agents/{agent_id}/logs"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/inventory.v1.AgentsService/GetAgentLogs", runtime.WithHTTPPathPattern("/v1/inventory/agents/{agent_id}/logs"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -578,17 +484,13 @@ func RegisterAgentsServiceHandlerClient(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AgentsService_GetAgentLogs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_AgentsService_AddAgent_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_AgentsService_AddAgent_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/inventory.v1.AgentsService/AddAgent", runtime.WithHTTPPathPattern("/v1/inventory/agents"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/inventory.v1.AgentsService/AddAgent", runtime.WithHTTPPathPattern("/v1/inventory/agents"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -599,17 +501,13 @@ func RegisterAgentsServiceHandlerClient(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AgentsService_AddAgent_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("PUT", pattern_AgentsService_ChangeAgent_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPut, pattern_AgentsService_ChangeAgent_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/inventory.v1.AgentsService/ChangeAgent", runtime.WithHTTPPathPattern("/v1/inventory/agents/{agent_id}"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/inventory.v1.AgentsService/ChangeAgent", runtime.WithHTTPPathPattern("/v1/inventory/agents/{agent_id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -620,17 +518,13 @@ func RegisterAgentsServiceHandlerClient(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AgentsService_ChangeAgent_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("DELETE", pattern_AgentsService_RemoveAgent_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodDelete, pattern_AgentsService_RemoveAgent_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/inventory.v1.AgentsService/RemoveAgent", runtime.WithHTTPPathPattern("/v1/inventory/agents/{agent_id}"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/inventory.v1.AgentsService/RemoveAgent", runtime.WithHTTPPathPattern("/v1/inventory/agents/{agent_id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -641,37 +535,25 @@ func RegisterAgentsServiceHandlerClient(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_AgentsService_RemoveAgent_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
return nil
}
var (
- pattern_AgentsService_ListAgents_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "inventory", "agents"}, ""))
-
- pattern_AgentsService_GetAgent_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "inventory", "agents", "agent_id"}, ""))
-
+ pattern_AgentsService_ListAgents_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "inventory", "agents"}, ""))
+ pattern_AgentsService_GetAgent_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "inventory", "agents", "agent_id"}, ""))
pattern_AgentsService_GetAgentLogs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"v1", "inventory", "agents", "agent_id", "logs"}, ""))
-
- pattern_AgentsService_AddAgent_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "inventory", "agents"}, ""))
-
- pattern_AgentsService_ChangeAgent_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "inventory", "agents", "agent_id"}, ""))
-
- pattern_AgentsService_RemoveAgent_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "inventory", "agents", "agent_id"}, ""))
+ pattern_AgentsService_AddAgent_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "inventory", "agents"}, ""))
+ pattern_AgentsService_ChangeAgent_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "inventory", "agents", "agent_id"}, ""))
+ pattern_AgentsService_RemoveAgent_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "inventory", "agents", "agent_id"}, ""))
)
var (
- forward_AgentsService_ListAgents_0 = runtime.ForwardResponseMessage
-
- forward_AgentsService_GetAgent_0 = runtime.ForwardResponseMessage
-
+ forward_AgentsService_ListAgents_0 = runtime.ForwardResponseMessage
+ forward_AgentsService_GetAgent_0 = runtime.ForwardResponseMessage
forward_AgentsService_GetAgentLogs_0 = runtime.ForwardResponseMessage
-
- forward_AgentsService_AddAgent_0 = runtime.ForwardResponseMessage
-
- forward_AgentsService_ChangeAgent_0 = runtime.ForwardResponseMessage
-
- forward_AgentsService_RemoveAgent_0 = runtime.ForwardResponseMessage
+ forward_AgentsService_AddAgent_0 = runtime.ForwardResponseMessage
+ forward_AgentsService_ChangeAgent_0 = runtime.ForwardResponseMessage
+ forward_AgentsService_RemoveAgent_0 = runtime.ForwardResponseMessage
)
diff --git a/api/inventory/v1/agents.pb.validate.go b/api/inventory/v1/agents.pb.validate.go
index e856653f05..9010f1a668 100644
--- a/api/inventory/v1/agents.pb.validate.go
+++ b/api/inventory/v1/agents.pb.validate.go
@@ -80,7 +80,7 @@ type PMMAgentMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m PMMAgentMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -188,7 +188,7 @@ type VMAgentMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m VMAgentMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -336,7 +336,7 @@ type NodeExporterMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m NodeExporterMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -505,7 +505,7 @@ type MySQLdExporterMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m MySQLdExporterMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -666,7 +666,7 @@ type MongoDBExporterMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m MongoDBExporterMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -827,7 +827,7 @@ type PostgresExporterMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m PostgresExporterMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -984,7 +984,7 @@ type ProxySQLExporterMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ProxySQLExporterMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1118,7 +1118,7 @@ type QANMySQLPerfSchemaAgentMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m QANMySQLPerfSchemaAgentMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1256,7 +1256,7 @@ type QANMySQLSlowlogAgentMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m QANMySQLSlowlogAgentMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1382,7 +1382,7 @@ type QANMongoDBProfilerAgentMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m QANMongoDBProfilerAgentMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1510,7 +1510,7 @@ type QANPostgreSQLPgStatementsAgentMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m QANPostgreSQLPgStatementsAgentMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1641,7 +1641,7 @@ type QANPostgreSQLPgStatMonitorAgentMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m QANPostgreSQLPgStatMonitorAgentMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1800,7 +1800,7 @@ type RDSExporterMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m RDSExporterMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1951,7 +1951,7 @@ type ExternalExporterMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ExternalExporterMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2104,7 +2104,7 @@ type AzureDatabaseExporterMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AzureDatabaseExporterMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2274,7 +2274,7 @@ type ChangeCommonAgentParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ChangeCommonAgentParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2384,7 +2384,7 @@ type ListAgentsRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListAgentsRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2996,7 +2996,7 @@ type ListAgentsResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListAgentsResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -3109,7 +3109,7 @@ type GetAgentRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetAgentRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -3829,7 +3829,7 @@ type GetAgentResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetAgentResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -3942,7 +3942,7 @@ type GetAgentLogsRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetAgentLogsRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -4046,7 +4046,7 @@ type GetAgentLogsResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetAgentLogsResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -4727,7 +4727,7 @@ type AddAgentRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddAgentRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -5406,7 +5406,7 @@ type AddAgentResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddAgentResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -6055,7 +6055,7 @@ type ChangeAgentRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ChangeAgentRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -6695,7 +6695,7 @@ type ChangeAgentResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ChangeAgentResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -6810,7 +6810,7 @@ type AddPMMAgentParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddPMMAgentParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -6931,7 +6931,7 @@ type AddNodeExporterParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddNodeExporterParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -7101,7 +7101,7 @@ type ChangeNodeExporterParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ChangeNodeExporterParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -7262,7 +7262,7 @@ type AddMySQLdExporterParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddMySQLdExporterParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -7432,7 +7432,7 @@ type ChangeMySQLdExporterParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ChangeMySQLdExporterParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -7588,7 +7588,7 @@ type AddMongoDBExporterParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddMongoDBExporterParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -7758,7 +7758,7 @@ type ChangeMongoDBExporterParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ChangeMongoDBExporterParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -7922,7 +7922,7 @@ type AddPostgresExporterParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddPostgresExporterParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -8092,7 +8092,7 @@ type ChangePostgresExporterParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ChangePostgresExporterParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -8246,7 +8246,7 @@ type AddProxySQLExporterParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddProxySQLExporterParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -8416,7 +8416,7 @@ type ChangeProxySQLExporterParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ChangeProxySQLExporterParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -8578,7 +8578,7 @@ type AddQANMySQLPerfSchemaAgentParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddQANMySQLPerfSchemaAgentParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -8751,7 +8751,7 @@ type ChangeQANMySQLPerfSchemaAgentParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ChangeQANMySQLPerfSchemaAgentParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -8913,7 +8913,7 @@ type AddQANMySQLSlowlogAgentParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddQANMySQLSlowlogAgentParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -9086,7 +9086,7 @@ type ChangeQANMySQLSlowlogAgentParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ChangeQANMySQLSlowlogAgentParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -9239,7 +9239,7 @@ type AddQANMongoDBProfilerAgentParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddQANMongoDBProfilerAgentParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -9412,7 +9412,7 @@ type ChangeQANMongoDBProfilerAgentParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ChangeQANMongoDBProfilerAgentParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -9573,7 +9573,7 @@ type AddQANPostgreSQLPgStatementsAgentParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddQANPostgreSQLPgStatementsAgentParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -9747,7 +9747,7 @@ type ChangeQANPostgreSQLPgStatementsAgentParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ChangeQANPostgreSQLPgStatementsAgentParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -9910,7 +9910,7 @@ type AddQANPostgreSQLPgStatMonitorAgentParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddQANPostgreSQLPgStatMonitorAgentParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -10084,7 +10084,7 @@ type ChangeQANPostgreSQLPgStatMonitorAgentParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ChangeQANPostgreSQLPgStatMonitorAgentParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -10225,7 +10225,7 @@ type AddRDSExporterParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddRDSExporterParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -10395,7 +10395,7 @@ type ChangeRDSExporterParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ChangeRDSExporterParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -10533,7 +10533,7 @@ type AddExternalExporterParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddExternalExporterParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -10703,7 +10703,7 @@ type ChangeExternalExporterParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ChangeExternalExporterParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -10857,7 +10857,7 @@ type AddAzureDatabaseExporterParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddAzureDatabaseExporterParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -11030,7 +11030,7 @@ type ChangeAzureDatabaseExporterParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ChangeAzureDatabaseExporterParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -11146,7 +11146,7 @@ type RemoveAgentRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m RemoveAgentRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -11248,7 +11248,7 @@ type RemoveAgentResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m RemoveAgentResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/api/inventory/v1/log_level.pb.go b/api/inventory/v1/log_level.pb.go
index addb457bf8..b7697a84c1 100644
--- a/api/inventory/v1/log_level.pb.go
+++ b/api/inventory/v1/log_level.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: inventory/v1/log_level.proto
@@ -9,6 +9,7 @@ package inventoryv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
@@ -83,7 +84,7 @@ func (LogLevel) EnumDescriptor() ([]byte, []int) {
var File_inventory_v1_log_level_proto protoreflect.FileDescriptor
-var file_inventory_v1_log_level_proto_rawDesc = []byte{
+var file_inventory_v1_log_level_proto_rawDesc = string([]byte{
0x0a, 0x1c, 0x69, 0x6e, 0x76, 0x65, 0x6e, 0x74, 0x6f, 0x72, 0x79, 0x2f, 0x76, 0x31, 0x2f, 0x6c,
0x6f, 0x67, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c,
0x69, 0x6e, 0x76, 0x65, 0x6e, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2a, 0x8c, 0x01, 0x0a,
@@ -107,16 +108,16 @@ var file_inventory_v1_log_level_proto_rawDesc = []byte{
0x65, 0x6e, 0x74, 0x6f, 0x72, 0x79, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74,
0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0d, 0x49, 0x6e, 0x76, 0x65, 0x6e, 0x74, 0x6f, 0x72,
0x79, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_inventory_v1_log_level_proto_rawDescOnce sync.Once
- file_inventory_v1_log_level_proto_rawDescData = file_inventory_v1_log_level_proto_rawDesc
+ file_inventory_v1_log_level_proto_rawDescData []byte
)
func file_inventory_v1_log_level_proto_rawDescGZIP() []byte {
file_inventory_v1_log_level_proto_rawDescOnce.Do(func() {
- file_inventory_v1_log_level_proto_rawDescData = protoimpl.X.CompressGZIP(file_inventory_v1_log_level_proto_rawDescData)
+ file_inventory_v1_log_level_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_inventory_v1_log_level_proto_rawDesc), len(file_inventory_v1_log_level_proto_rawDesc)))
})
return file_inventory_v1_log_level_proto_rawDescData
}
@@ -145,7 +146,7 @@ func file_inventory_v1_log_level_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_inventory_v1_log_level_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_inventory_v1_log_level_proto_rawDesc), len(file_inventory_v1_log_level_proto_rawDesc)),
NumEnums: 1,
NumMessages: 0,
NumExtensions: 0,
@@ -156,7 +157,6 @@ func file_inventory_v1_log_level_proto_init() {
EnumInfos: file_inventory_v1_log_level_proto_enumTypes,
}.Build()
File_inventory_v1_log_level_proto = out.File
- file_inventory_v1_log_level_proto_rawDesc = nil
file_inventory_v1_log_level_proto_goTypes = nil
file_inventory_v1_log_level_proto_depIdxs = nil
}
diff --git a/api/inventory/v1/nodes.pb.go b/api/inventory/v1/nodes.pb.go
index a2ffc9e83d..2d76aa0f6c 100644
--- a/api/inventory/v1/nodes.pb.go
+++ b/api/inventory/v1/nodes.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: inventory/v1/nodes.proto
@@ -9,6 +9,7 @@ package inventoryv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
_ "github.com/envoyproxy/protoc-gen-validate/validate"
_ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options"
@@ -85,10 +86,7 @@ func (NodeType) EnumDescriptor() ([]byte, []int) {
// GenericNode represents a bare metal server or virtual machine.
type GenericNode struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique randomly generated instance identifier.
NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
// Unique across all Nodes user-defined name.
@@ -106,7 +104,9 @@ type GenericNode struct {
// Node availability zone.
Az string `protobuf:"bytes,8,opt,name=az,proto3" json:"az,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,9,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,9,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GenericNode) Reset() {
@@ -204,10 +204,7 @@ func (x *GenericNode) GetCustomLabels() map[string]string {
// ContainerNode represents a Docker container.
type ContainerNode struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique randomly generated instance identifier.
NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
// Unique across all Nodes user-defined name.
@@ -227,7 +224,9 @@ type ContainerNode struct {
// Node availability zone.
Az string `protobuf:"bytes,9,opt,name=az,proto3" json:"az,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,10,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,10,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ContainerNode) Reset() {
@@ -332,10 +331,7 @@ func (x *ContainerNode) GetCustomLabels() map[string]string {
// RemoteNode represents generic remote Node. It's a node where we don't run pmm-agents. Only external exporters can run on Remote Nodes.
type RemoteNode struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique randomly generated instance identifier.
NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
// Unique across all Nodes user-defined name.
@@ -349,7 +345,9 @@ type RemoteNode struct {
// Node availability zone.
Az string `protobuf:"bytes,6,opt,name=az,proto3" json:"az,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,7,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,7,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *RemoteNode) Reset() {
@@ -433,10 +431,7 @@ func (x *RemoteNode) GetCustomLabels() map[string]string {
// RemoteRDSNode represents remote RDS Node. Agents can't run on Remote RDS Nodes.
type RemoteRDSNode struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique randomly generated instance identifier.
NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
// Unique across all Nodes user-defined name.
@@ -450,7 +445,9 @@ type RemoteRDSNode struct {
// Node availability zone.
Az string `protobuf:"bytes,6,opt,name=az,proto3" json:"az,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,7,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,7,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *RemoteRDSNode) Reset() {
@@ -534,10 +531,7 @@ func (x *RemoteRDSNode) GetCustomLabels() map[string]string {
// RemoteAzureDatabaseNode represents remote AzureDatabase Node. Agents can't run on Remote AzureDatabase Nodes.
type RemoteAzureDatabaseNode struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique randomly generated instance identifier.
NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
// Unique across all Nodes user-defined name.
@@ -551,7 +545,9 @@ type RemoteAzureDatabaseNode struct {
// Node availability zone.
Az string `protobuf:"bytes,6,opt,name=az,proto3" json:"az,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,7,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,7,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *RemoteAzureDatabaseNode) Reset() {
@@ -634,12 +630,11 @@ func (x *RemoteAzureDatabaseNode) GetCustomLabels() map[string]string {
}
type ListNodesRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Return only Nodes with matching Node type.
- NodeType NodeType `protobuf:"varint,1,opt,name=node_type,json=nodeType,proto3,enum=inventory.v1.NodeType" json:"node_type,omitempty"`
+ NodeType NodeType `protobuf:"varint,1,opt,name=node_type,json=nodeType,proto3,enum=inventory.v1.NodeType" json:"node_type,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ListNodesRequest) Reset() {
@@ -680,15 +675,14 @@ func (x *ListNodesRequest) GetNodeType() NodeType {
}
type ListNodesResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
Generic []*GenericNode `protobuf:"bytes,1,rep,name=generic,proto3" json:"generic,omitempty"`
Container []*ContainerNode `protobuf:"bytes,2,rep,name=container,proto3" json:"container,omitempty"`
Remote []*RemoteNode `protobuf:"bytes,3,rep,name=remote,proto3" json:"remote,omitempty"`
RemoteRds []*RemoteRDSNode `protobuf:"bytes,4,rep,name=remote_rds,json=remoteRds,proto3" json:"remote_rds,omitempty"`
RemoteAzureDatabase []*RemoteAzureDatabaseNode `protobuf:"bytes,5,rep,name=remote_azure_database,json=remoteAzureDatabase,proto3" json:"remote_azure_database,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ListNodesResponse) Reset() {
@@ -757,12 +751,11 @@ func (x *ListNodesResponse) GetRemoteAzureDatabase() []*RemoteAzureDatabaseNode
}
type GetNodeRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique randomly generated instance identifier.
- NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
+ NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GetNodeRequest) Reset() {
@@ -803,18 +796,17 @@ func (x *GetNodeRequest) GetNodeId() string {
}
type GetNodeResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Types that are assignable to Node:
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Types that are valid to be assigned to Node:
//
// *GetNodeResponse_Generic
// *GetNodeResponse_Container
// *GetNodeResponse_Remote
// *GetNodeResponse_RemoteRds
// *GetNodeResponse_RemoteAzureDatabase
- Node isGetNodeResponse_Node `protobuf_oneof:"node"`
+ Node isGetNodeResponse_Node `protobuf_oneof:"node"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GetNodeResponse) Reset() {
@@ -847,44 +839,54 @@ func (*GetNodeResponse) Descriptor() ([]byte, []int) {
return file_inventory_v1_nodes_proto_rawDescGZIP(), []int{8}
}
-func (m *GetNodeResponse) GetNode() isGetNodeResponse_Node {
- if m != nil {
- return m.Node
+func (x *GetNodeResponse) GetNode() isGetNodeResponse_Node {
+ if x != nil {
+ return x.Node
}
return nil
}
func (x *GetNodeResponse) GetGeneric() *GenericNode {
- if x, ok := x.GetNode().(*GetNodeResponse_Generic); ok {
- return x.Generic
+ if x != nil {
+ if x, ok := x.Node.(*GetNodeResponse_Generic); ok {
+ return x.Generic
+ }
}
return nil
}
func (x *GetNodeResponse) GetContainer() *ContainerNode {
- if x, ok := x.GetNode().(*GetNodeResponse_Container); ok {
- return x.Container
+ if x != nil {
+ if x, ok := x.Node.(*GetNodeResponse_Container); ok {
+ return x.Container
+ }
}
return nil
}
func (x *GetNodeResponse) GetRemote() *RemoteNode {
- if x, ok := x.GetNode().(*GetNodeResponse_Remote); ok {
- return x.Remote
+ if x != nil {
+ if x, ok := x.Node.(*GetNodeResponse_Remote); ok {
+ return x.Remote
+ }
}
return nil
}
func (x *GetNodeResponse) GetRemoteRds() *RemoteRDSNode {
- if x, ok := x.GetNode().(*GetNodeResponse_RemoteRds); ok {
- return x.RemoteRds
+ if x != nil {
+ if x, ok := x.Node.(*GetNodeResponse_RemoteRds); ok {
+ return x.RemoteRds
+ }
}
return nil
}
func (x *GetNodeResponse) GetRemoteAzureDatabase() *RemoteAzureDatabaseNode {
- if x, ok := x.GetNode().(*GetNodeResponse_RemoteAzureDatabase); ok {
- return x.RemoteAzureDatabase
+ if x != nil {
+ if x, ok := x.Node.(*GetNodeResponse_RemoteAzureDatabase); ok {
+ return x.RemoteAzureDatabase
+ }
}
return nil
}
@@ -924,18 +926,17 @@ func (*GetNodeResponse_RemoteRds) isGetNodeResponse_Node() {}
func (*GetNodeResponse_RemoteAzureDatabase) isGetNodeResponse_Node() {}
type AddNodeRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Types that are assignable to Node:
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Types that are valid to be assigned to Node:
//
// *AddNodeRequest_Generic
// *AddNodeRequest_Container
// *AddNodeRequest_Remote
// *AddNodeRequest_RemoteRds
// *AddNodeRequest_RemoteAzure
- Node isAddNodeRequest_Node `protobuf_oneof:"node"`
+ Node isAddNodeRequest_Node `protobuf_oneof:"node"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddNodeRequest) Reset() {
@@ -968,44 +969,54 @@ func (*AddNodeRequest) Descriptor() ([]byte, []int) {
return file_inventory_v1_nodes_proto_rawDescGZIP(), []int{9}
}
-func (m *AddNodeRequest) GetNode() isAddNodeRequest_Node {
- if m != nil {
- return m.Node
+func (x *AddNodeRequest) GetNode() isAddNodeRequest_Node {
+ if x != nil {
+ return x.Node
}
return nil
}
func (x *AddNodeRequest) GetGeneric() *AddGenericNodeParams {
- if x, ok := x.GetNode().(*AddNodeRequest_Generic); ok {
- return x.Generic
+ if x != nil {
+ if x, ok := x.Node.(*AddNodeRequest_Generic); ok {
+ return x.Generic
+ }
}
return nil
}
func (x *AddNodeRequest) GetContainer() *AddContainerNodeParams {
- if x, ok := x.GetNode().(*AddNodeRequest_Container); ok {
- return x.Container
+ if x != nil {
+ if x, ok := x.Node.(*AddNodeRequest_Container); ok {
+ return x.Container
+ }
}
return nil
}
func (x *AddNodeRequest) GetRemote() *AddRemoteNodeParams {
- if x, ok := x.GetNode().(*AddNodeRequest_Remote); ok {
- return x.Remote
+ if x != nil {
+ if x, ok := x.Node.(*AddNodeRequest_Remote); ok {
+ return x.Remote
+ }
}
return nil
}
func (x *AddNodeRequest) GetRemoteRds() *AddRemoteRDSNodeParams {
- if x, ok := x.GetNode().(*AddNodeRequest_RemoteRds); ok {
- return x.RemoteRds
+ if x != nil {
+ if x, ok := x.Node.(*AddNodeRequest_RemoteRds); ok {
+ return x.RemoteRds
+ }
}
return nil
}
func (x *AddNodeRequest) GetRemoteAzure() *AddRemoteAzureNodeParams {
- if x, ok := x.GetNode().(*AddNodeRequest_RemoteAzure); ok {
- return x.RemoteAzure
+ if x != nil {
+ if x, ok := x.Node.(*AddNodeRequest_RemoteAzure); ok {
+ return x.RemoteAzure
+ }
}
return nil
}
@@ -1045,18 +1056,17 @@ func (*AddNodeRequest_RemoteRds) isAddNodeRequest_Node() {}
func (*AddNodeRequest_RemoteAzure) isAddNodeRequest_Node() {}
type AddNodeResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Types that are assignable to Node:
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Types that are valid to be assigned to Node:
//
// *AddNodeResponse_Generic
// *AddNodeResponse_Container
// *AddNodeResponse_Remote
// *AddNodeResponse_RemoteRds
// *AddNodeResponse_RemoteAzureDatabase
- Node isAddNodeResponse_Node `protobuf_oneof:"node"`
+ Node isAddNodeResponse_Node `protobuf_oneof:"node"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddNodeResponse) Reset() {
@@ -1089,44 +1099,54 @@ func (*AddNodeResponse) Descriptor() ([]byte, []int) {
return file_inventory_v1_nodes_proto_rawDescGZIP(), []int{10}
}
-func (m *AddNodeResponse) GetNode() isAddNodeResponse_Node {
- if m != nil {
- return m.Node
+func (x *AddNodeResponse) GetNode() isAddNodeResponse_Node {
+ if x != nil {
+ return x.Node
}
return nil
}
func (x *AddNodeResponse) GetGeneric() *GenericNode {
- if x, ok := x.GetNode().(*AddNodeResponse_Generic); ok {
- return x.Generic
+ if x != nil {
+ if x, ok := x.Node.(*AddNodeResponse_Generic); ok {
+ return x.Generic
+ }
}
return nil
}
func (x *AddNodeResponse) GetContainer() *ContainerNode {
- if x, ok := x.GetNode().(*AddNodeResponse_Container); ok {
- return x.Container
+ if x != nil {
+ if x, ok := x.Node.(*AddNodeResponse_Container); ok {
+ return x.Container
+ }
}
return nil
}
func (x *AddNodeResponse) GetRemote() *RemoteNode {
- if x, ok := x.GetNode().(*AddNodeResponse_Remote); ok {
- return x.Remote
+ if x != nil {
+ if x, ok := x.Node.(*AddNodeResponse_Remote); ok {
+ return x.Remote
+ }
}
return nil
}
func (x *AddNodeResponse) GetRemoteRds() *RemoteRDSNode {
- if x, ok := x.GetNode().(*AddNodeResponse_RemoteRds); ok {
- return x.RemoteRds
+ if x != nil {
+ if x, ok := x.Node.(*AddNodeResponse_RemoteRds); ok {
+ return x.RemoteRds
+ }
}
return nil
}
func (x *AddNodeResponse) GetRemoteAzureDatabase() *RemoteAzureDatabaseNode {
- if x, ok := x.GetNode().(*AddNodeResponse_RemoteAzureDatabase); ok {
- return x.RemoteAzureDatabase
+ if x != nil {
+ if x, ok := x.Node.(*AddNodeResponse_RemoteAzureDatabase); ok {
+ return x.RemoteAzureDatabase
+ }
}
return nil
}
@@ -1166,10 +1186,7 @@ func (*AddNodeResponse_RemoteRds) isAddNodeResponse_Node() {}
func (*AddNodeResponse_RemoteAzureDatabase) isAddNodeResponse_Node() {}
type AddGenericNodeParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique across all Nodes user-defined name.
NodeName string `protobuf:"bytes,1,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"`
// Node address (DNS name or IP).
@@ -1185,7 +1202,9 @@ type AddGenericNodeParams struct {
// Node availability zone.
Az string `protobuf:"bytes,7,opt,name=az,proto3" json:"az,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,8,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,8,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddGenericNodeParams) Reset() {
@@ -1275,10 +1294,7 @@ func (x *AddGenericNodeParams) GetCustomLabels() map[string]string {
}
type AddContainerNodeParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique across all Nodes user-defined name.
NodeName string `protobuf:"bytes,1,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"`
// Node address (DNS name or IP).
@@ -1296,7 +1312,9 @@ type AddContainerNodeParams struct {
// Node availability zone.
Az string `protobuf:"bytes,8,opt,name=az,proto3" json:"az,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,9,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,9,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddContainerNodeParams) Reset() {
@@ -1393,10 +1411,7 @@ func (x *AddContainerNodeParams) GetCustomLabels() map[string]string {
}
type AddRemoteNodeParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique across all Nodes user-defined name.
NodeName string `protobuf:"bytes,1,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"`
// Node address (DNS name or IP).
@@ -1408,7 +1423,9 @@ type AddRemoteNodeParams struct {
// Node availability zone.
Az string `protobuf:"bytes,5,opt,name=az,proto3" json:"az,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,6,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,6,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddRemoteNodeParams) Reset() {
@@ -1484,10 +1501,7 @@ func (x *AddRemoteNodeParams) GetCustomLabels() map[string]string {
}
type AddRemoteRDSNodeParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique across all Nodes user-defined name.
NodeName string `protobuf:"bytes,1,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"`
// DB instance identifier.
@@ -1499,7 +1513,9 @@ type AddRemoteRDSNodeParams struct {
// Node availability zone.
Az string `protobuf:"bytes,5,opt,name=az,proto3" json:"az,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,6,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,6,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddRemoteRDSNodeParams) Reset() {
@@ -1575,10 +1591,7 @@ func (x *AddRemoteRDSNodeParams) GetCustomLabels() map[string]string {
}
type AddRemoteAzureNodeParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique across all Nodes user-defined name.
NodeName string `protobuf:"bytes,1,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"`
// DB instance identifier.
@@ -1590,7 +1603,9 @@ type AddRemoteAzureNodeParams struct {
// Node availability zone.
Az string `protobuf:"bytes,5,opt,name=az,proto3" json:"az,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,6,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,6,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddRemoteAzureNodeParams) Reset() {
@@ -1666,14 +1681,13 @@ func (x *AddRemoteAzureNodeParams) GetCustomLabels() map[string]string {
}
type RemoveNodeRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique randomly generated instance identifier.
NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
// Remove node with all dependencies.
- Force bool `protobuf:"varint,2,opt,name=force,proto3" json:"force,omitempty"`
+ Force bool `protobuf:"varint,2,opt,name=force,proto3" json:"force,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *RemoveNodeRequest) Reset() {
@@ -1721,9 +1735,9 @@ func (x *RemoveNodeRequest) GetForce() bool {
}
type RemoveNodeResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *RemoveNodeResponse) Reset() {
@@ -1758,7 +1772,7 @@ func (*RemoveNodeResponse) Descriptor() ([]byte, []int) {
var File_inventory_v1_nodes_proto protoreflect.FileDescriptor
-var file_inventory_v1_nodes_proto_rawDesc = []byte{
+var file_inventory_v1_nodes_proto_rawDesc = string([]byte{
0x0a, 0x18, 0x69, 0x6e, 0x76, 0x65, 0x6e, 0x74, 0x6f, 0x72, 0x79, 0x2f, 0x76, 0x31, 0x2f, 0x6e,
0x6f, 0x64, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x69, 0x6e, 0x76, 0x65,
0x6e, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
@@ -2156,16 +2170,16 @@ var file_inventory_v1_nodes_proto_rawDesc = []byte{
0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0d,
0x49, 0x6e, 0x76, 0x65, 0x6e, 0x74, 0x6f, 0x72, 0x79, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_inventory_v1_nodes_proto_rawDescOnce sync.Once
- file_inventory_v1_nodes_proto_rawDescData = file_inventory_v1_nodes_proto_rawDesc
+ file_inventory_v1_nodes_proto_rawDescData []byte
)
func file_inventory_v1_nodes_proto_rawDescGZIP() []byte {
file_inventory_v1_nodes_proto_rawDescOnce.Do(func() {
- file_inventory_v1_nodes_proto_rawDescData = protoimpl.X.CompressGZIP(file_inventory_v1_nodes_proto_rawDescData)
+ file_inventory_v1_nodes_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_inventory_v1_nodes_proto_rawDesc), len(file_inventory_v1_nodes_proto_rawDesc)))
})
return file_inventory_v1_nodes_proto_rawDescData
}
@@ -2283,7 +2297,7 @@ func file_inventory_v1_nodes_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_inventory_v1_nodes_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_inventory_v1_nodes_proto_rawDesc), len(file_inventory_v1_nodes_proto_rawDesc)),
NumEnums: 1,
NumMessages: 28,
NumExtensions: 0,
@@ -2295,7 +2309,6 @@ func file_inventory_v1_nodes_proto_init() {
MessageInfos: file_inventory_v1_nodes_proto_msgTypes,
}.Build()
File_inventory_v1_nodes_proto = out.File
- file_inventory_v1_nodes_proto_rawDesc = nil
file_inventory_v1_nodes_proto_goTypes = nil
file_inventory_v1_nodes_proto_depIdxs = nil
}
diff --git a/api/inventory/v1/nodes.pb.gw.go b/api/inventory/v1/nodes.pb.gw.go
index b6ee5b6c38..baf4ebdb25 100644
--- a/api/inventory/v1/nodes.pb.gw.go
+++ b/api/inventory/v1/nodes.pb.gw.go
@@ -10,6 +10,7 @@ package inventoryv1
import (
"context"
+ "errors"
"io"
"net/http"
@@ -28,6 +29,7 @@ var (
_ codes.Code
_ io.Reader
_ status.Status
+ _ = errors.New
_ = runtime.String
_ = utilities.NewDoubleArray
_ = metadata.Join
@@ -36,105 +38,91 @@ var (
var filter_NodesService_ListNodes_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
func request_NodesService_ListNodes_0(ctx context.Context, marshaler runtime.Marshaler, client NodesServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListNodesRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ListNodesRequest
+ metadata runtime.ServerMetadata
+ )
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_NodesService_ListNodes_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.ListNodes(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_NodesService_ListNodes_0(ctx context.Context, marshaler runtime.Marshaler, server NodesServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListNodesRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ListNodesRequest
+ metadata runtime.ServerMetadata
+ )
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_NodesService_ListNodes_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.ListNodes(ctx, &protoReq)
return msg, metadata, err
}
func request_NodesService_GetNode_0(ctx context.Context, marshaler runtime.Marshaler, client NodesServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetNodeRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq GetNodeRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["node_id"]
+ val, ok := pathParams["node_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
}
-
protoReq.NodeId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
}
-
msg, err := client.GetNode(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_NodesService_GetNode_0(ctx context.Context, marshaler runtime.Marshaler, server NodesServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetNodeRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq GetNodeRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["node_id"]
+ val, ok := pathParams["node_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
}
-
protoReq.NodeId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
}
-
msg, err := server.GetNode(ctx, &protoReq)
return msg, metadata, err
}
func request_NodesService_AddNode_0(ctx context.Context, marshaler runtime.Marshaler, client NodesServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq AddNodeRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq AddNodeRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.AddNode(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_NodesService_AddNode_0(ctx context.Context, marshaler runtime.Marshaler, server NodesServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq AddNodeRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq AddNodeRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.AddNode(ctx, &protoReq)
return msg, metadata, err
}
@@ -142,65 +130,49 @@ func local_request_NodesService_AddNode_0(ctx context.Context, marshaler runtime
var filter_NodesService_RemoveNode_0 = &utilities.DoubleArray{Encoding: map[string]int{"node_id": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}}
func request_NodesService_RemoveNode_0(ctx context.Context, marshaler runtime.Marshaler, client NodesServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq RemoveNodeRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq RemoveNodeRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["node_id"]
+ val, ok := pathParams["node_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
}
-
protoReq.NodeId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
}
-
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_NodesService_RemoveNode_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.RemoveNode(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_NodesService_RemoveNode_0(ctx context.Context, marshaler runtime.Marshaler, server NodesServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq RemoveNodeRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq RemoveNodeRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["node_id"]
+ val, ok := pathParams["node_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
}
-
protoReq.NodeId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
}
-
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_NodesService_RemoveNode_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.RemoveNode(ctx, &protoReq)
return msg, metadata, err
}
@@ -211,15 +183,13 @@ func local_request_NodesService_RemoveNode_0(ctx context.Context, marshaler runt
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterNodesServiceHandlerFromEndpoint instead.
// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
func RegisterNodesServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server NodesServiceServer) error {
- mux.Handle("GET", pattern_NodesService_ListNodes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_NodesService_ListNodes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/inventory.v1.NodesService/ListNodes", runtime.WithHTTPPathPattern("/v1/inventory/nodes"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/inventory.v1.NodesService/ListNodes", runtime.WithHTTPPathPattern("/v1/inventory/nodes"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -231,19 +201,15 @@ func RegisterNodesServiceHandlerServer(ctx context.Context, mux *runtime.ServeMu
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_NodesService_ListNodes_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_NodesService_GetNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_NodesService_GetNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/inventory.v1.NodesService/GetNode", runtime.WithHTTPPathPattern("/v1/inventory/nodes/{node_id}"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/inventory.v1.NodesService/GetNode", runtime.WithHTTPPathPattern("/v1/inventory/nodes/{node_id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -255,19 +221,15 @@ func RegisterNodesServiceHandlerServer(ctx context.Context, mux *runtime.ServeMu
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_NodesService_GetNode_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_NodesService_AddNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_NodesService_AddNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/inventory.v1.NodesService/AddNode", runtime.WithHTTPPathPattern("/v1/inventory/nodes"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/inventory.v1.NodesService/AddNode", runtime.WithHTTPPathPattern("/v1/inventory/nodes"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -279,19 +241,15 @@ func RegisterNodesServiceHandlerServer(ctx context.Context, mux *runtime.ServeMu
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_NodesService_AddNode_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("DELETE", pattern_NodesService_RemoveNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodDelete, pattern_NodesService_RemoveNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/inventory.v1.NodesService/RemoveNode", runtime.WithHTTPPathPattern("/v1/inventory/nodes/{node_id}"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/inventory.v1.NodesService/RemoveNode", runtime.WithHTTPPathPattern("/v1/inventory/nodes/{node_id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -303,7 +261,6 @@ func RegisterNodesServiceHandlerServer(ctx context.Context, mux *runtime.ServeMu
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_NodesService_RemoveNode_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@@ -331,7 +288,6 @@ func RegisterNodesServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.S
}
}()
}()
-
return RegisterNodesServiceHandler(ctx, mux, conn)
}
@@ -347,13 +303,11 @@ func RegisterNodesServiceHandler(ctx context.Context, mux *runtime.ServeMux, con
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "NodesServiceClient" to call the correct interceptors. This client ignores the HTTP middlewares.
func RegisterNodesServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client NodesServiceClient) error {
- mux.Handle("GET", pattern_NodesService_ListNodes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_NodesService_ListNodes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/inventory.v1.NodesService/ListNodes", runtime.WithHTTPPathPattern("/v1/inventory/nodes"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/inventory.v1.NodesService/ListNodes", runtime.WithHTTPPathPattern("/v1/inventory/nodes"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -364,17 +318,13 @@ func RegisterNodesServiceHandlerClient(ctx context.Context, mux *runtime.ServeMu
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_NodesService_ListNodes_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_NodesService_GetNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_NodesService_GetNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/inventory.v1.NodesService/GetNode", runtime.WithHTTPPathPattern("/v1/inventory/nodes/{node_id}"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/inventory.v1.NodesService/GetNode", runtime.WithHTTPPathPattern("/v1/inventory/nodes/{node_id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -385,17 +335,13 @@ func RegisterNodesServiceHandlerClient(ctx context.Context, mux *runtime.ServeMu
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_NodesService_GetNode_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_NodesService_AddNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_NodesService_AddNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/inventory.v1.NodesService/AddNode", runtime.WithHTTPPathPattern("/v1/inventory/nodes"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/inventory.v1.NodesService/AddNode", runtime.WithHTTPPathPattern("/v1/inventory/nodes"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -406,17 +352,13 @@ func RegisterNodesServiceHandlerClient(ctx context.Context, mux *runtime.ServeMu
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_NodesService_AddNode_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("DELETE", pattern_NodesService_RemoveNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodDelete, pattern_NodesService_RemoveNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/inventory.v1.NodesService/RemoveNode", runtime.WithHTTPPathPattern("/v1/inventory/nodes/{node_id}"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/inventory.v1.NodesService/RemoveNode", runtime.WithHTTPPathPattern("/v1/inventory/nodes/{node_id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -427,29 +369,21 @@ func RegisterNodesServiceHandlerClient(ctx context.Context, mux *runtime.ServeMu
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_NodesService_RemoveNode_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
return nil
}
var (
- pattern_NodesService_ListNodes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "inventory", "nodes"}, ""))
-
- pattern_NodesService_GetNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "inventory", "nodes", "node_id"}, ""))
-
- pattern_NodesService_AddNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "inventory", "nodes"}, ""))
-
+ pattern_NodesService_ListNodes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "inventory", "nodes"}, ""))
+ pattern_NodesService_GetNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "inventory", "nodes", "node_id"}, ""))
+ pattern_NodesService_AddNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "inventory", "nodes"}, ""))
pattern_NodesService_RemoveNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "inventory", "nodes", "node_id"}, ""))
)
var (
- forward_NodesService_ListNodes_0 = runtime.ForwardResponseMessage
-
- forward_NodesService_GetNode_0 = runtime.ForwardResponseMessage
-
- forward_NodesService_AddNode_0 = runtime.ForwardResponseMessage
-
+ forward_NodesService_ListNodes_0 = runtime.ForwardResponseMessage
+ forward_NodesService_GetNode_0 = runtime.ForwardResponseMessage
+ forward_NodesService_AddNode_0 = runtime.ForwardResponseMessage
forward_NodesService_RemoveNode_0 = runtime.ForwardResponseMessage
)
diff --git a/api/inventory/v1/nodes.pb.validate.go b/api/inventory/v1/nodes.pb.validate.go
index fe3c397efa..672a96fb3c 100644
--- a/api/inventory/v1/nodes.pb.validate.go
+++ b/api/inventory/v1/nodes.pb.validate.go
@@ -88,7 +88,7 @@ type GenericNodeMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GenericNodeMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -208,7 +208,7 @@ type ContainerNodeMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ContainerNodeMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -321,7 +321,7 @@ type RemoteNodeMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m RemoteNodeMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -435,7 +435,7 @@ type RemoteRDSNodeMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m RemoteRDSNodeMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -549,7 +549,7 @@ type RemoteAzureDatabaseNodeMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m RemoteAzureDatabaseNodeMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -653,7 +653,7 @@ type ListNodesRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListNodesRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -923,7 +923,7 @@ type ListNodesResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListNodesResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1036,7 +1036,7 @@ type GetNodeRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetNodeRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1346,7 +1346,7 @@ type GetNodeResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetNodeResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1656,7 +1656,7 @@ type AddNodeRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddNodeRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1966,7 +1966,7 @@ type AddNodeResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddNodeResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2100,7 +2100,7 @@ type AddGenericNodeParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddGenericNodeParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2238,7 +2238,7 @@ type AddContainerNodeParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddContainerNodeParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2370,7 +2370,7 @@ type AddRemoteNodeParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddRemoteNodeParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2511,7 +2511,7 @@ type AddRemoteRDSNodeParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddRemoteRDSNodeParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2652,7 +2652,7 @@ type AddRemoteAzureNodeParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddRemoteAzureNodeParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2767,7 +2767,7 @@ type RemoveNodeRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m RemoveNodeRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2869,7 +2869,7 @@ type RemoveNodeResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m RemoveNodeResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/api/inventory/v1/services.pb.go b/api/inventory/v1/services.pb.go
index ee6842b032..ccc9c7e58f 100644
--- a/api/inventory/v1/services.pb.go
+++ b/api/inventory/v1/services.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: inventory/v1/services.proto
@@ -9,6 +9,7 @@ package inventoryv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
_ "github.com/envoyproxy/protoc-gen-validate/validate"
_ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options"
@@ -90,10 +91,7 @@ func (ServiceType) EnumDescriptor() ([]byte, []int) {
// MySQLService represents a generic MySQL instance.
type MySQLService struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique randomly generated instance identifier.
ServiceId string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
// Unique across all Services user-defined name.
@@ -116,9 +114,11 @@ type MySQLService struct {
// Replication set name.
ReplicationSet string `protobuf:"bytes,9,opt,name=replication_set,json=replicationSet,proto3" json:"replication_set,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,10,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,10,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// MySQL version.
- Version string `protobuf:"bytes,11,opt,name=version,proto3" json:"version,omitempty"`
+ Version string `protobuf:"bytes,11,opt,name=version,proto3" json:"version,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *MySQLService) Reset() {
@@ -230,10 +230,7 @@ func (x *MySQLService) GetVersion() string {
// MongoDBService represents a generic MongoDB instance.
type MongoDBService struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique randomly generated instance identifier.
ServiceId string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
// Unique across all Services user-defined name.
@@ -256,9 +253,11 @@ type MongoDBService struct {
// Replication set name.
ReplicationSet string `protobuf:"bytes,9,opt,name=replication_set,json=replicationSet,proto3" json:"replication_set,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,10,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,10,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// MongoDB version.
- Version string `protobuf:"bytes,11,opt,name=version,proto3" json:"version,omitempty"`
+ Version string `protobuf:"bytes,11,opt,name=version,proto3" json:"version,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *MongoDBService) Reset() {
@@ -370,10 +369,7 @@ func (x *MongoDBService) GetVersion() string {
// PostgreSQLService represents a generic PostgreSQL instance.
type PostgreSQLService struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique randomly generated instance identifier.
ServiceId string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
// Unique across all Services user-defined name.
@@ -398,11 +394,13 @@ type PostgreSQLService struct {
// Replication set name.
ReplicationSet string `protobuf:"bytes,10,opt,name=replication_set,json=replicationSet,proto3" json:"replication_set,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,11,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,11,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// PostgreSQL version.
Version string `protobuf:"bytes,12,opt,name=version,proto3" json:"version,omitempty"`
// Limit of databases for auto-discovery.
AutoDiscoveryLimit int32 `protobuf:"varint,13,opt,name=auto_discovery_limit,json=autoDiscoveryLimit,proto3" json:"auto_discovery_limit,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *PostgreSQLService) Reset() {
@@ -528,10 +526,7 @@ func (x *PostgreSQLService) GetAutoDiscoveryLimit() int32 {
// ProxySQLService represents a generic ProxySQL instance.
type ProxySQLService struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique randomly generated instance identifier.
ServiceId string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
// Unique across all Services user-defined name.
@@ -554,9 +549,11 @@ type ProxySQLService struct {
// Replication set name.
ReplicationSet string `protobuf:"bytes,9,opt,name=replication_set,json=replicationSet,proto3" json:"replication_set,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,10,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,10,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// ProxySQL version.
- Version string `protobuf:"bytes,11,opt,name=version,proto3" json:"version,omitempty"`
+ Version string `protobuf:"bytes,11,opt,name=version,proto3" json:"version,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ProxySQLService) Reset() {
@@ -668,10 +665,7 @@ func (x *ProxySQLService) GetVersion() string {
// HAProxyService represents a generic HAProxy service instance.
type HAProxyService struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique randomly generated instance identifier.
ServiceId string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
// Unique across all Services user-defined name.
@@ -685,7 +679,9 @@ type HAProxyService struct {
// Replication set name.
ReplicationSet string `protobuf:"bytes,6,opt,name=replication_set,json=replicationSet,proto3" json:"replication_set,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,7,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,7,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *HAProxyService) Reset() {
@@ -769,10 +765,7 @@ func (x *HAProxyService) GetCustomLabels() map[string]string {
// ExternalService represents a generic External service instance.
type ExternalService struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique randomly generated instance identifier.
ServiceId string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
// Unique across all Services user-defined name.
@@ -786,9 +779,11 @@ type ExternalService struct {
// Replication set name.
ReplicationSet string `protobuf:"bytes,6,opt,name=replication_set,json=replicationSet,proto3" json:"replication_set,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,7,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,7,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Group name of external service.
- Group string `protobuf:"bytes,8,opt,name=group,proto3" json:"group,omitempty"`
+ Group string `protobuf:"bytes,8,opt,name=group,proto3" json:"group,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ExternalService) Reset() {
@@ -878,16 +873,15 @@ func (x *ExternalService) GetGroup() string {
}
type ListServicesRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Return only Services running on that Node.
NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
// Return only services filtered by service type.
ServiceType ServiceType `protobuf:"varint,2,opt,name=service_type,json=serviceType,proto3,enum=inventory.v1.ServiceType" json:"service_type,omitempty"`
// Return only services in this external group.
ExternalGroup string `protobuf:"bytes,3,opt,name=external_group,json=externalGroup,proto3" json:"external_group,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ListServicesRequest) Reset() {
@@ -942,16 +936,15 @@ func (x *ListServicesRequest) GetExternalGroup() string {
}
type ListServicesResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Mysql []*MySQLService `protobuf:"bytes,1,rep,name=mysql,proto3" json:"mysql,omitempty"`
+ Mongodb []*MongoDBService `protobuf:"bytes,2,rep,name=mongodb,proto3" json:"mongodb,omitempty"`
+ Postgresql []*PostgreSQLService `protobuf:"bytes,3,rep,name=postgresql,proto3" json:"postgresql,omitempty"`
+ Proxysql []*ProxySQLService `protobuf:"bytes,4,rep,name=proxysql,proto3" json:"proxysql,omitempty"`
+ Haproxy []*HAProxyService `protobuf:"bytes,5,rep,name=haproxy,proto3" json:"haproxy,omitempty"`
+ External []*ExternalService `protobuf:"bytes,6,rep,name=external,proto3" json:"external,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Mysql []*MySQLService `protobuf:"bytes,1,rep,name=mysql,proto3" json:"mysql,omitempty"`
- Mongodb []*MongoDBService `protobuf:"bytes,2,rep,name=mongodb,proto3" json:"mongodb,omitempty"`
- Postgresql []*PostgreSQLService `protobuf:"bytes,3,rep,name=postgresql,proto3" json:"postgresql,omitempty"`
- Proxysql []*ProxySQLService `protobuf:"bytes,4,rep,name=proxysql,proto3" json:"proxysql,omitempty"`
- Haproxy []*HAProxyService `protobuf:"bytes,5,rep,name=haproxy,proto3" json:"haproxy,omitempty"`
- External []*ExternalService `protobuf:"bytes,6,rep,name=external,proto3" json:"external,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *ListServicesResponse) Reset() {
@@ -1027,9 +1020,9 @@ func (x *ListServicesResponse) GetExternal() []*ExternalService {
}
type ListActiveServiceTypesRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ListActiveServiceTypesRequest) Reset() {
@@ -1063,11 +1056,10 @@ func (*ListActiveServiceTypesRequest) Descriptor() ([]byte, []int) {
}
type ListActiveServiceTypesResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ ServiceTypes []ServiceType `protobuf:"varint,1,rep,packed,name=service_types,json=serviceTypes,proto3,enum=inventory.v1.ServiceType" json:"service_types,omitempty"`
unknownFields protoimpl.UnknownFields
-
- ServiceTypes []ServiceType `protobuf:"varint,1,rep,packed,name=service_types,json=serviceTypes,proto3,enum=inventory.v1.ServiceType" json:"service_types,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *ListActiveServiceTypesResponse) Reset() {
@@ -1108,12 +1100,11 @@ func (x *ListActiveServiceTypesResponse) GetServiceTypes() []ServiceType {
}
type GetServiceRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique randomly generated instance identifier.
- ServiceId string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
+ ServiceId string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GetServiceRequest) Reset() {
@@ -1154,11 +1145,8 @@ func (x *GetServiceRequest) GetServiceId() string {
}
type GetServiceResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Types that are assignable to Service:
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Types that are valid to be assigned to Service:
//
// *GetServiceResponse_Mysql
// *GetServiceResponse_Mongodb
@@ -1166,7 +1154,9 @@ type GetServiceResponse struct {
// *GetServiceResponse_Proxysql
// *GetServiceResponse_Haproxy
// *GetServiceResponse_External
- Service isGetServiceResponse_Service `protobuf_oneof:"service"`
+ Service isGetServiceResponse_Service `protobuf_oneof:"service"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GetServiceResponse) Reset() {
@@ -1199,51 +1189,63 @@ func (*GetServiceResponse) Descriptor() ([]byte, []int) {
return file_inventory_v1_services_proto_rawDescGZIP(), []int{11}
}
-func (m *GetServiceResponse) GetService() isGetServiceResponse_Service {
- if m != nil {
- return m.Service
+func (x *GetServiceResponse) GetService() isGetServiceResponse_Service {
+ if x != nil {
+ return x.Service
}
return nil
}
func (x *GetServiceResponse) GetMysql() *MySQLService {
- if x, ok := x.GetService().(*GetServiceResponse_Mysql); ok {
- return x.Mysql
+ if x != nil {
+ if x, ok := x.Service.(*GetServiceResponse_Mysql); ok {
+ return x.Mysql
+ }
}
return nil
}
func (x *GetServiceResponse) GetMongodb() *MongoDBService {
- if x, ok := x.GetService().(*GetServiceResponse_Mongodb); ok {
- return x.Mongodb
+ if x != nil {
+ if x, ok := x.Service.(*GetServiceResponse_Mongodb); ok {
+ return x.Mongodb
+ }
}
return nil
}
func (x *GetServiceResponse) GetPostgresql() *PostgreSQLService {
- if x, ok := x.GetService().(*GetServiceResponse_Postgresql); ok {
- return x.Postgresql
+ if x != nil {
+ if x, ok := x.Service.(*GetServiceResponse_Postgresql); ok {
+ return x.Postgresql
+ }
}
return nil
}
func (x *GetServiceResponse) GetProxysql() *ProxySQLService {
- if x, ok := x.GetService().(*GetServiceResponse_Proxysql); ok {
- return x.Proxysql
+ if x != nil {
+ if x, ok := x.Service.(*GetServiceResponse_Proxysql); ok {
+ return x.Proxysql
+ }
}
return nil
}
func (x *GetServiceResponse) GetHaproxy() *HAProxyService {
- if x, ok := x.GetService().(*GetServiceResponse_Haproxy); ok {
- return x.Haproxy
+ if x != nil {
+ if x, ok := x.Service.(*GetServiceResponse_Haproxy); ok {
+ return x.Haproxy
+ }
}
return nil
}
func (x *GetServiceResponse) GetExternal() *ExternalService {
- if x, ok := x.GetService().(*GetServiceResponse_External); ok {
- return x.External
+ if x != nil {
+ if x, ok := x.Service.(*GetServiceResponse_External); ok {
+ return x.External
+ }
}
return nil
}
@@ -1289,11 +1291,8 @@ func (*GetServiceResponse_Haproxy) isGetServiceResponse_Service() {}
func (*GetServiceResponse_External) isGetServiceResponse_Service() {}
type AddServiceRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Types that are assignable to Service:
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Types that are valid to be assigned to Service:
//
// *AddServiceRequest_Mysql
// *AddServiceRequest_Mongodb
@@ -1301,7 +1300,9 @@ type AddServiceRequest struct {
// *AddServiceRequest_Proxysql
// *AddServiceRequest_Haproxy
// *AddServiceRequest_External
- Service isAddServiceRequest_Service `protobuf_oneof:"service"`
+ Service isAddServiceRequest_Service `protobuf_oneof:"service"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddServiceRequest) Reset() {
@@ -1334,51 +1335,63 @@ func (*AddServiceRequest) Descriptor() ([]byte, []int) {
return file_inventory_v1_services_proto_rawDescGZIP(), []int{12}
}
-func (m *AddServiceRequest) GetService() isAddServiceRequest_Service {
- if m != nil {
- return m.Service
+func (x *AddServiceRequest) GetService() isAddServiceRequest_Service {
+ if x != nil {
+ return x.Service
}
return nil
}
func (x *AddServiceRequest) GetMysql() *AddMySQLServiceParams {
- if x, ok := x.GetService().(*AddServiceRequest_Mysql); ok {
- return x.Mysql
+ if x != nil {
+ if x, ok := x.Service.(*AddServiceRequest_Mysql); ok {
+ return x.Mysql
+ }
}
return nil
}
func (x *AddServiceRequest) GetMongodb() *AddMongoDBServiceParams {
- if x, ok := x.GetService().(*AddServiceRequest_Mongodb); ok {
- return x.Mongodb
+ if x != nil {
+ if x, ok := x.Service.(*AddServiceRequest_Mongodb); ok {
+ return x.Mongodb
+ }
}
return nil
}
func (x *AddServiceRequest) GetPostgresql() *AddPostgreSQLServiceParams {
- if x, ok := x.GetService().(*AddServiceRequest_Postgresql); ok {
- return x.Postgresql
+ if x != nil {
+ if x, ok := x.Service.(*AddServiceRequest_Postgresql); ok {
+ return x.Postgresql
+ }
}
return nil
}
func (x *AddServiceRequest) GetProxysql() *AddProxySQLServiceParams {
- if x, ok := x.GetService().(*AddServiceRequest_Proxysql); ok {
- return x.Proxysql
+ if x != nil {
+ if x, ok := x.Service.(*AddServiceRequest_Proxysql); ok {
+ return x.Proxysql
+ }
}
return nil
}
func (x *AddServiceRequest) GetHaproxy() *AddHAProxyServiceParams {
- if x, ok := x.GetService().(*AddServiceRequest_Haproxy); ok {
- return x.Haproxy
+ if x != nil {
+ if x, ok := x.Service.(*AddServiceRequest_Haproxy); ok {
+ return x.Haproxy
+ }
}
return nil
}
func (x *AddServiceRequest) GetExternal() *AddExternalServiceParams {
- if x, ok := x.GetService().(*AddServiceRequest_External); ok {
- return x.External
+ if x != nil {
+ if x, ok := x.Service.(*AddServiceRequest_External); ok {
+ return x.External
+ }
}
return nil
}
@@ -1424,11 +1437,8 @@ func (*AddServiceRequest_Haproxy) isAddServiceRequest_Service() {}
func (*AddServiceRequest_External) isAddServiceRequest_Service() {}
type AddServiceResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Types that are assignable to Service:
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Types that are valid to be assigned to Service:
//
// *AddServiceResponse_Mysql
// *AddServiceResponse_Mongodb
@@ -1436,7 +1446,9 @@ type AddServiceResponse struct {
// *AddServiceResponse_Proxysql
// *AddServiceResponse_Haproxy
// *AddServiceResponse_External
- Service isAddServiceResponse_Service `protobuf_oneof:"service"`
+ Service isAddServiceResponse_Service `protobuf_oneof:"service"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddServiceResponse) Reset() {
@@ -1469,51 +1481,63 @@ func (*AddServiceResponse) Descriptor() ([]byte, []int) {
return file_inventory_v1_services_proto_rawDescGZIP(), []int{13}
}
-func (m *AddServiceResponse) GetService() isAddServiceResponse_Service {
- if m != nil {
- return m.Service
+func (x *AddServiceResponse) GetService() isAddServiceResponse_Service {
+ if x != nil {
+ return x.Service
}
return nil
}
func (x *AddServiceResponse) GetMysql() *MySQLService {
- if x, ok := x.GetService().(*AddServiceResponse_Mysql); ok {
- return x.Mysql
+ if x != nil {
+ if x, ok := x.Service.(*AddServiceResponse_Mysql); ok {
+ return x.Mysql
+ }
}
return nil
}
func (x *AddServiceResponse) GetMongodb() *MongoDBService {
- if x, ok := x.GetService().(*AddServiceResponse_Mongodb); ok {
- return x.Mongodb
+ if x != nil {
+ if x, ok := x.Service.(*AddServiceResponse_Mongodb); ok {
+ return x.Mongodb
+ }
}
return nil
}
func (x *AddServiceResponse) GetPostgresql() *PostgreSQLService {
- if x, ok := x.GetService().(*AddServiceResponse_Postgresql); ok {
- return x.Postgresql
+ if x != nil {
+ if x, ok := x.Service.(*AddServiceResponse_Postgresql); ok {
+ return x.Postgresql
+ }
}
return nil
}
func (x *AddServiceResponse) GetProxysql() *ProxySQLService {
- if x, ok := x.GetService().(*AddServiceResponse_Proxysql); ok {
- return x.Proxysql
+ if x != nil {
+ if x, ok := x.Service.(*AddServiceResponse_Proxysql); ok {
+ return x.Proxysql
+ }
}
return nil
}
func (x *AddServiceResponse) GetHaproxy() *HAProxyService {
- if x, ok := x.GetService().(*AddServiceResponse_Haproxy); ok {
- return x.Haproxy
+ if x != nil {
+ if x, ok := x.Service.(*AddServiceResponse_Haproxy); ok {
+ return x.Haproxy
+ }
}
return nil
}
func (x *AddServiceResponse) GetExternal() *ExternalService {
- if x, ok := x.GetService().(*AddServiceResponse_External); ok {
- return x.External
+ if x != nil {
+ if x, ok := x.Service.(*AddServiceResponse_External); ok {
+ return x.External
+ }
}
return nil
}
@@ -1559,10 +1583,7 @@ func (*AddServiceResponse_Haproxy) isAddServiceResponse_Service() {}
func (*AddServiceResponse_External) isAddServiceResponse_Service() {}
type AddMySQLServiceParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique across all Services user-defined name. Required.
ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"`
// Node identifier where this instance runs. Required.
@@ -1583,7 +1604,9 @@ type AddMySQLServiceParams struct {
// Replication set name.
ReplicationSet string `protobuf:"bytes,8,opt,name=replication_set,json=replicationSet,proto3" json:"replication_set,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,9,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,9,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddMySQLServiceParams) Reset() {
@@ -1680,10 +1703,7 @@ func (x *AddMySQLServiceParams) GetCustomLabels() map[string]string {
}
type AddMongoDBServiceParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique across all Services user-defined name. Required.
ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"`
// Node identifier where this instance runs. Required.
@@ -1704,7 +1724,9 @@ type AddMongoDBServiceParams struct {
// Replication set name.
ReplicationSet string `protobuf:"bytes,8,opt,name=replication_set,json=replicationSet,proto3" json:"replication_set,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,9,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,9,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddMongoDBServiceParams) Reset() {
@@ -1801,10 +1823,7 @@ func (x *AddMongoDBServiceParams) GetCustomLabels() map[string]string {
}
type AddPostgreSQLServiceParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique across all Services user-defined name. Required.
ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"`
// Node identifier where this instance runs. Required.
@@ -1825,9 +1844,11 @@ type AddPostgreSQLServiceParams struct {
// Replication set name.
ReplicationSet string `protobuf:"bytes,8,opt,name=replication_set,json=replicationSet,proto3" json:"replication_set,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,9,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,9,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Limit of databases for auto-discovery.
AutoDiscoveryLimit int32 `protobuf:"varint,10,opt,name=auto_discovery_limit,json=autoDiscoveryLimit,proto3" json:"auto_discovery_limit,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddPostgreSQLServiceParams) Reset() {
@@ -1931,10 +1952,7 @@ func (x *AddPostgreSQLServiceParams) GetAutoDiscoveryLimit() int32 {
}
type AddProxySQLServiceParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique across all Services user-defined name. Required.
ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"`
// Node identifier where this instance runs. Required.
@@ -1955,7 +1973,9 @@ type AddProxySQLServiceParams struct {
// Replication set name.
ReplicationSet string `protobuf:"bytes,8,opt,name=replication_set,json=replicationSet,proto3" json:"replication_set,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,9,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,9,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddProxySQLServiceParams) Reset() {
@@ -2052,10 +2072,7 @@ func (x *AddProxySQLServiceParams) GetCustomLabels() map[string]string {
}
type AddHAProxyServiceParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique across all Services user-defined name. Required.
ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"`
// Node identifier where this instance runs. Required.
@@ -2067,7 +2084,9 @@ type AddHAProxyServiceParams struct {
// Replication set name.
ReplicationSet string `protobuf:"bytes,5,opt,name=replication_set,json=replicationSet,proto3" json:"replication_set,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,6,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,6,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddHAProxyServiceParams) Reset() {
@@ -2143,10 +2162,7 @@ func (x *AddHAProxyServiceParams) GetCustomLabels() map[string]string {
}
type AddExternalServiceParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique across all Services user-defined name. Required.
ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"`
// Node identifier where this instance runs. Required.
@@ -2158,9 +2174,11 @@ type AddExternalServiceParams struct {
// Replication set name.
ReplicationSet string `protobuf:"bytes,5,opt,name=replication_set,json=replicationSet,proto3" json:"replication_set,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,6,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,6,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Group name of external service.
- Group string `protobuf:"bytes,7,opt,name=group,proto3" json:"group,omitempty"`
+ Group string `protobuf:"bytes,7,opt,name=group,proto3" json:"group,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddExternalServiceParams) Reset() {
@@ -2243,14 +2261,13 @@ func (x *AddExternalServiceParams) GetGroup() string {
}
type RemoveServiceRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique randomly generated instance identifier. Required.
ServiceId string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
// Remove service with all dependencies.
- Force bool `protobuf:"varint,2,opt,name=force,proto3" json:"force,omitempty"`
+ Force bool `protobuf:"varint,2,opt,name=force,proto3" json:"force,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *RemoveServiceRequest) Reset() {
@@ -2298,9 +2315,9 @@ func (x *RemoveServiceRequest) GetForce() bool {
}
type RemoveServiceResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *RemoveServiceResponse) Reset() {
@@ -2334,17 +2351,16 @@ func (*RemoveServiceResponse) Descriptor() ([]byte, []int) {
}
type ChangeServiceRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- ServiceId string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
- Environment *string `protobuf:"bytes,2,opt,name=environment,proto3,oneof" json:"environment,omitempty"`
- Cluster *string `protobuf:"bytes,3,opt,name=cluster,proto3,oneof" json:"cluster,omitempty"`
- ReplicationSet *string `protobuf:"bytes,4,opt,name=replication_set,json=replicationSet,proto3,oneof" json:"replication_set,omitempty"`
- ExternalGroup *string `protobuf:"bytes,5,opt,name=external_group,json=externalGroup,proto3,oneof" json:"external_group,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ ServiceId string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
+ Environment *string `protobuf:"bytes,2,opt,name=environment,proto3,oneof" json:"environment,omitempty"`
+ Cluster *string `protobuf:"bytes,3,opt,name=cluster,proto3,oneof" json:"cluster,omitempty"`
+ ReplicationSet *string `protobuf:"bytes,4,opt,name=replication_set,json=replicationSet,proto3,oneof" json:"replication_set,omitempty"`
+ ExternalGroup *string `protobuf:"bytes,5,opt,name=external_group,json=externalGroup,proto3,oneof" json:"external_group,omitempty"`
// Replace all custom user-assigned labels.
- CustomLabels *common.StringMap `protobuf:"bytes,6,opt,name=custom_labels,json=customLabels,proto3,oneof" json:"custom_labels,omitempty"`
+ CustomLabels *common.StringMap `protobuf:"bytes,6,opt,name=custom_labels,json=customLabels,proto3,oneof" json:"custom_labels,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ChangeServiceRequest) Reset() {
@@ -2420,11 +2436,8 @@ func (x *ChangeServiceRequest) GetCustomLabels() *common.StringMap {
}
type ChangeServiceResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Types that are assignable to Service:
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Types that are valid to be assigned to Service:
//
// *ChangeServiceResponse_Mysql
// *ChangeServiceResponse_Mongodb
@@ -2432,7 +2445,9 @@ type ChangeServiceResponse struct {
// *ChangeServiceResponse_Proxysql
// *ChangeServiceResponse_Haproxy
// *ChangeServiceResponse_External
- Service isChangeServiceResponse_Service `protobuf_oneof:"service"`
+ Service isChangeServiceResponse_Service `protobuf_oneof:"service"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ChangeServiceResponse) Reset() {
@@ -2465,51 +2480,63 @@ func (*ChangeServiceResponse) Descriptor() ([]byte, []int) {
return file_inventory_v1_services_proto_rawDescGZIP(), []int{23}
}
-func (m *ChangeServiceResponse) GetService() isChangeServiceResponse_Service {
- if m != nil {
- return m.Service
+func (x *ChangeServiceResponse) GetService() isChangeServiceResponse_Service {
+ if x != nil {
+ return x.Service
}
return nil
}
func (x *ChangeServiceResponse) GetMysql() *MySQLService {
- if x, ok := x.GetService().(*ChangeServiceResponse_Mysql); ok {
- return x.Mysql
+ if x != nil {
+ if x, ok := x.Service.(*ChangeServiceResponse_Mysql); ok {
+ return x.Mysql
+ }
}
return nil
}
func (x *ChangeServiceResponse) GetMongodb() *MongoDBService {
- if x, ok := x.GetService().(*ChangeServiceResponse_Mongodb); ok {
- return x.Mongodb
+ if x != nil {
+ if x, ok := x.Service.(*ChangeServiceResponse_Mongodb); ok {
+ return x.Mongodb
+ }
}
return nil
}
func (x *ChangeServiceResponse) GetPostgresql() *PostgreSQLService {
- if x, ok := x.GetService().(*ChangeServiceResponse_Postgresql); ok {
- return x.Postgresql
+ if x != nil {
+ if x, ok := x.Service.(*ChangeServiceResponse_Postgresql); ok {
+ return x.Postgresql
+ }
}
return nil
}
func (x *ChangeServiceResponse) GetProxysql() *ProxySQLService {
- if x, ok := x.GetService().(*ChangeServiceResponse_Proxysql); ok {
- return x.Proxysql
+ if x != nil {
+ if x, ok := x.Service.(*ChangeServiceResponse_Proxysql); ok {
+ return x.Proxysql
+ }
}
return nil
}
func (x *ChangeServiceResponse) GetHaproxy() *HAProxyService {
- if x, ok := x.GetService().(*ChangeServiceResponse_Haproxy); ok {
- return x.Haproxy
+ if x != nil {
+ if x, ok := x.Service.(*ChangeServiceResponse_Haproxy); ok {
+ return x.Haproxy
+ }
}
return nil
}
func (x *ChangeServiceResponse) GetExternal() *ExternalService {
- if x, ok := x.GetService().(*ChangeServiceResponse_External); ok {
- return x.External
+ if x != nil {
+ if x, ok := x.Service.(*ChangeServiceResponse_External); ok {
+ return x.External
+ }
}
return nil
}
@@ -2556,7 +2583,7 @@ func (*ChangeServiceResponse_External) isChangeServiceResponse_Service() {}
var File_inventory_v1_services_proto protoreflect.FileDescriptor
-var file_inventory_v1_services_proto_rawDesc = []byte{
+var file_inventory_v1_services_proto_rawDesc = string([]byte{
0x0a, 0x1b, 0x69, 0x6e, 0x76, 0x65, 0x6e, 0x74, 0x6f, 0x72, 0x79, 0x2f, 0x76, 0x31, 0x2f, 0x73,
0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x69,
0x6e, 0x76, 0x65, 0x6e, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x1a, 0x13, 0x63, 0x6f, 0x6d,
@@ -3178,16 +3205,16 @@ var file_inventory_v1_services_proto_rawDesc = []byte{
0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0d, 0x49, 0x6e, 0x76,
0x65, 0x6e, 0x74, 0x6f, 0x72, 0x79, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x33,
-}
+})
var (
file_inventory_v1_services_proto_rawDescOnce sync.Once
- file_inventory_v1_services_proto_rawDescData = file_inventory_v1_services_proto_rawDesc
+ file_inventory_v1_services_proto_rawDescData []byte
)
func file_inventory_v1_services_proto_rawDescGZIP() []byte {
file_inventory_v1_services_proto_rawDescOnce.Do(func() {
- file_inventory_v1_services_proto_rawDescData = protoimpl.X.CompressGZIP(file_inventory_v1_services_proto_rawDescData)
+ file_inventory_v1_services_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_inventory_v1_services_proto_rawDesc), len(file_inventory_v1_services_proto_rawDesc)))
})
return file_inventory_v1_services_proto_rawDescData
}
@@ -3344,7 +3371,7 @@ func file_inventory_v1_services_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_inventory_v1_services_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_inventory_v1_services_proto_rawDesc), len(file_inventory_v1_services_proto_rawDesc)),
NumEnums: 1,
NumMessages: 36,
NumExtensions: 0,
@@ -3356,7 +3383,6 @@ func file_inventory_v1_services_proto_init() {
MessageInfos: file_inventory_v1_services_proto_msgTypes,
}.Build()
File_inventory_v1_services_proto = out.File
- file_inventory_v1_services_proto_rawDesc = nil
file_inventory_v1_services_proto_goTypes = nil
file_inventory_v1_services_proto_depIdxs = nil
}
diff --git a/api/inventory/v1/services.pb.gw.go b/api/inventory/v1/services.pb.gw.go
index 77a826da39..8a9f8fd62d 100644
--- a/api/inventory/v1/services.pb.gw.go
+++ b/api/inventory/v1/services.pb.gw.go
@@ -10,6 +10,7 @@ package inventoryv1
import (
"context"
+ "errors"
"io"
"net/http"
@@ -28,6 +29,7 @@ var (
_ codes.Code
_ io.Reader
_ status.Status
+ _ = errors.New
_ = runtime.String
_ = utilities.NewDoubleArray
_ = metadata.Join
@@ -36,129 +38,115 @@ var (
var filter_ServicesService_ListServices_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
func request_ServicesService_ListServices_0(ctx context.Context, marshaler runtime.Marshaler, client ServicesServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListServicesRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ListServicesRequest
+ metadata runtime.ServerMetadata
+ )
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ServicesService_ListServices_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.ListServices(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_ServicesService_ListServices_0(ctx context.Context, marshaler runtime.Marshaler, server ServicesServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListServicesRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ListServicesRequest
+ metadata runtime.ServerMetadata
+ )
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ServicesService_ListServices_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.ListServices(ctx, &protoReq)
return msg, metadata, err
}
func request_ServicesService_ListActiveServiceTypes_0(ctx context.Context, marshaler runtime.Marshaler, client ServicesServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListActiveServiceTypesRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq ListActiveServiceTypesRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.ListActiveServiceTypes(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_ServicesService_ListActiveServiceTypes_0(ctx context.Context, marshaler runtime.Marshaler, server ServicesServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListActiveServiceTypesRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq ListActiveServiceTypesRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.ListActiveServiceTypes(ctx, &protoReq)
return msg, metadata, err
}
func request_ServicesService_GetService_0(ctx context.Context, marshaler runtime.Marshaler, client ServicesServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetServiceRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq GetServiceRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["service_id"]
+ val, ok := pathParams["service_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "service_id")
}
-
protoReq.ServiceId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "service_id", err)
}
-
msg, err := client.GetService(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_ServicesService_GetService_0(ctx context.Context, marshaler runtime.Marshaler, server ServicesServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetServiceRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq GetServiceRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["service_id"]
+ val, ok := pathParams["service_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "service_id")
}
-
protoReq.ServiceId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "service_id", err)
}
-
msg, err := server.GetService(ctx, &protoReq)
return msg, metadata, err
}
func request_ServicesService_AddService_0(ctx context.Context, marshaler runtime.Marshaler, client ServicesServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq AddServiceRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq AddServiceRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.AddService(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_ServicesService_AddService_0(ctx context.Context, marshaler runtime.Marshaler, server ServicesServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq AddServiceRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq AddServiceRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.AddService(ctx, &protoReq)
return msg, metadata, err
}
@@ -166,123 +154,91 @@ func local_request_ServicesService_AddService_0(ctx context.Context, marshaler r
var filter_ServicesService_RemoveService_0 = &utilities.DoubleArray{Encoding: map[string]int{"service_id": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}}
func request_ServicesService_RemoveService_0(ctx context.Context, marshaler runtime.Marshaler, client ServicesServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq RemoveServiceRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq RemoveServiceRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["service_id"]
+ val, ok := pathParams["service_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "service_id")
}
-
protoReq.ServiceId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "service_id", err)
}
-
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ServicesService_RemoveService_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.RemoveService(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_ServicesService_RemoveService_0(ctx context.Context, marshaler runtime.Marshaler, server ServicesServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq RemoveServiceRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq RemoveServiceRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["service_id"]
+ val, ok := pathParams["service_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "service_id")
}
-
protoReq.ServiceId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "service_id", err)
}
-
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ServicesService_RemoveService_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.RemoveService(ctx, &protoReq)
return msg, metadata, err
}
func request_ServicesService_ChangeService_0(ctx context.Context, marshaler runtime.Marshaler, client ServicesServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ChangeServiceRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq ChangeServiceRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["service_id"]
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ val, ok := pathParams["service_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "service_id")
}
-
protoReq.ServiceId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "service_id", err)
}
-
msg, err := client.ChangeService(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_ServicesService_ChangeService_0(ctx context.Context, marshaler runtime.Marshaler, server ServicesServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ChangeServiceRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq ChangeServiceRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["service_id"]
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ val, ok := pathParams["service_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "service_id")
}
-
protoReq.ServiceId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "service_id", err)
}
-
msg, err := server.ChangeService(ctx, &protoReq)
return msg, metadata, err
}
@@ -293,15 +249,13 @@ func local_request_ServicesService_ChangeService_0(ctx context.Context, marshale
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterServicesServiceHandlerFromEndpoint instead.
// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
func RegisterServicesServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server ServicesServiceServer) error {
- mux.Handle("GET", pattern_ServicesService_ListServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_ServicesService_ListServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/inventory.v1.ServicesService/ListServices", runtime.WithHTTPPathPattern("/v1/inventory/services"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/inventory.v1.ServicesService/ListServices", runtime.WithHTTPPathPattern("/v1/inventory/services"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -313,19 +267,15 @@ func RegisterServicesServiceHandlerServer(ctx context.Context, mux *runtime.Serv
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ServicesService_ListServices_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_ServicesService_ListActiveServiceTypes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_ServicesService_ListActiveServiceTypes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/inventory.v1.ServicesService/ListActiveServiceTypes", runtime.WithHTTPPathPattern("/v1/inventory/services:getTypes"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/inventory.v1.ServicesService/ListActiveServiceTypes", runtime.WithHTTPPathPattern("/v1/inventory/services:getTypes"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -337,19 +287,15 @@ func RegisterServicesServiceHandlerServer(ctx context.Context, mux *runtime.Serv
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ServicesService_ListActiveServiceTypes_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_ServicesService_GetService_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_ServicesService_GetService_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/inventory.v1.ServicesService/GetService", runtime.WithHTTPPathPattern("/v1/inventory/services/{service_id}"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/inventory.v1.ServicesService/GetService", runtime.WithHTTPPathPattern("/v1/inventory/services/{service_id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -361,19 +307,15 @@ func RegisterServicesServiceHandlerServer(ctx context.Context, mux *runtime.Serv
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ServicesService_GetService_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_ServicesService_AddService_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_ServicesService_AddService_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/inventory.v1.ServicesService/AddService", runtime.WithHTTPPathPattern("/v1/inventory/services"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/inventory.v1.ServicesService/AddService", runtime.WithHTTPPathPattern("/v1/inventory/services"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -385,19 +327,15 @@ func RegisterServicesServiceHandlerServer(ctx context.Context, mux *runtime.Serv
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ServicesService_AddService_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("DELETE", pattern_ServicesService_RemoveService_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodDelete, pattern_ServicesService_RemoveService_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/inventory.v1.ServicesService/RemoveService", runtime.WithHTTPPathPattern("/v1/inventory/services/{service_id}"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/inventory.v1.ServicesService/RemoveService", runtime.WithHTTPPathPattern("/v1/inventory/services/{service_id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -409,19 +347,15 @@ func RegisterServicesServiceHandlerServer(ctx context.Context, mux *runtime.Serv
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ServicesService_RemoveService_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("PUT", pattern_ServicesService_ChangeService_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPut, pattern_ServicesService_ChangeService_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/inventory.v1.ServicesService/ChangeService", runtime.WithHTTPPathPattern("/v1/inventory/services/{service_id}"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/inventory.v1.ServicesService/ChangeService", runtime.WithHTTPPathPattern("/v1/inventory/services/{service_id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -433,7 +367,6 @@ func RegisterServicesServiceHandlerServer(ctx context.Context, mux *runtime.Serv
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ServicesService_ChangeService_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@@ -461,7 +394,6 @@ func RegisterServicesServiceHandlerFromEndpoint(ctx context.Context, mux *runtim
}
}()
}()
-
return RegisterServicesServiceHandler(ctx, mux, conn)
}
@@ -477,13 +409,11 @@ func RegisterServicesServiceHandler(ctx context.Context, mux *runtime.ServeMux,
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "ServicesServiceClient" to call the correct interceptors. This client ignores the HTTP middlewares.
func RegisterServicesServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client ServicesServiceClient) error {
- mux.Handle("GET", pattern_ServicesService_ListServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_ServicesService_ListServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/inventory.v1.ServicesService/ListServices", runtime.WithHTTPPathPattern("/v1/inventory/services"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/inventory.v1.ServicesService/ListServices", runtime.WithHTTPPathPattern("/v1/inventory/services"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -494,17 +424,13 @@ func RegisterServicesServiceHandlerClient(ctx context.Context, mux *runtime.Serv
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ServicesService_ListServices_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_ServicesService_ListActiveServiceTypes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_ServicesService_ListActiveServiceTypes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/inventory.v1.ServicesService/ListActiveServiceTypes", runtime.WithHTTPPathPattern("/v1/inventory/services:getTypes"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/inventory.v1.ServicesService/ListActiveServiceTypes", runtime.WithHTTPPathPattern("/v1/inventory/services:getTypes"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -515,17 +441,13 @@ func RegisterServicesServiceHandlerClient(ctx context.Context, mux *runtime.Serv
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ServicesService_ListActiveServiceTypes_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_ServicesService_GetService_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_ServicesService_GetService_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/inventory.v1.ServicesService/GetService", runtime.WithHTTPPathPattern("/v1/inventory/services/{service_id}"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/inventory.v1.ServicesService/GetService", runtime.WithHTTPPathPattern("/v1/inventory/services/{service_id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -536,17 +458,13 @@ func RegisterServicesServiceHandlerClient(ctx context.Context, mux *runtime.Serv
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ServicesService_GetService_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_ServicesService_AddService_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_ServicesService_AddService_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/inventory.v1.ServicesService/AddService", runtime.WithHTTPPathPattern("/v1/inventory/services"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/inventory.v1.ServicesService/AddService", runtime.WithHTTPPathPattern("/v1/inventory/services"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -557,17 +475,13 @@ func RegisterServicesServiceHandlerClient(ctx context.Context, mux *runtime.Serv
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ServicesService_AddService_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("DELETE", pattern_ServicesService_RemoveService_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodDelete, pattern_ServicesService_RemoveService_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/inventory.v1.ServicesService/RemoveService", runtime.WithHTTPPathPattern("/v1/inventory/services/{service_id}"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/inventory.v1.ServicesService/RemoveService", runtime.WithHTTPPathPattern("/v1/inventory/services/{service_id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -578,17 +492,13 @@ func RegisterServicesServiceHandlerClient(ctx context.Context, mux *runtime.Serv
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ServicesService_RemoveService_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("PUT", pattern_ServicesService_ChangeService_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPut, pattern_ServicesService_ChangeService_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/inventory.v1.ServicesService/ChangeService", runtime.WithHTTPPathPattern("/v1/inventory/services/{service_id}"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/inventory.v1.ServicesService/ChangeService", runtime.WithHTTPPathPattern("/v1/inventory/services/{service_id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -599,37 +509,25 @@ func RegisterServicesServiceHandlerClient(ctx context.Context, mux *runtime.Serv
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ServicesService_ChangeService_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
return nil
}
var (
- pattern_ServicesService_ListServices_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "inventory", "services"}, ""))
-
+ pattern_ServicesService_ListServices_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "inventory", "services"}, ""))
pattern_ServicesService_ListActiveServiceTypes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "inventory", "services"}, "getTypes"))
-
- pattern_ServicesService_GetService_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "inventory", "services", "service_id"}, ""))
-
- pattern_ServicesService_AddService_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "inventory", "services"}, ""))
-
- pattern_ServicesService_RemoveService_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "inventory", "services", "service_id"}, ""))
-
- pattern_ServicesService_ChangeService_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "inventory", "services", "service_id"}, ""))
+ pattern_ServicesService_GetService_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "inventory", "services", "service_id"}, ""))
+ pattern_ServicesService_AddService_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "inventory", "services"}, ""))
+ pattern_ServicesService_RemoveService_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "inventory", "services", "service_id"}, ""))
+ pattern_ServicesService_ChangeService_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "inventory", "services", "service_id"}, ""))
)
var (
- forward_ServicesService_ListServices_0 = runtime.ForwardResponseMessage
-
+ forward_ServicesService_ListServices_0 = runtime.ForwardResponseMessage
forward_ServicesService_ListActiveServiceTypes_0 = runtime.ForwardResponseMessage
-
- forward_ServicesService_GetService_0 = runtime.ForwardResponseMessage
-
- forward_ServicesService_AddService_0 = runtime.ForwardResponseMessage
-
- forward_ServicesService_RemoveService_0 = runtime.ForwardResponseMessage
-
- forward_ServicesService_ChangeService_0 = runtime.ForwardResponseMessage
+ forward_ServicesService_GetService_0 = runtime.ForwardResponseMessage
+ forward_ServicesService_AddService_0 = runtime.ForwardResponseMessage
+ forward_ServicesService_RemoveService_0 = runtime.ForwardResponseMessage
+ forward_ServicesService_ChangeService_0 = runtime.ForwardResponseMessage
)
diff --git a/api/inventory/v1/services.pb.validate.go b/api/inventory/v1/services.pb.validate.go
index 1bca110f71..21d69ee370 100644
--- a/api/inventory/v1/services.pb.validate.go
+++ b/api/inventory/v1/services.pb.validate.go
@@ -92,7 +92,7 @@ type MySQLServiceMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m MySQLServiceMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -214,7 +214,7 @@ type MongoDBServiceMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m MongoDBServiceMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -340,7 +340,7 @@ type PostgreSQLServiceMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m PostgreSQLServiceMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -464,7 +464,7 @@ type ProxySQLServiceMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ProxySQLServiceMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -578,7 +578,7 @@ type HAProxyServiceMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m HAProxyServiceMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -694,7 +694,7 @@ type ExternalServiceMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ExternalServiceMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -800,7 +800,7 @@ type ListServicesRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListServicesRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1106,7 +1106,7 @@ type ListServicesResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListServicesResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1208,7 +1208,7 @@ type ListActiveServiceTypesRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListActiveServiceTypesRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1311,7 +1311,7 @@ type ListActiveServiceTypesResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListActiveServiceTypesResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1425,7 +1425,7 @@ type GetServiceRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetServiceRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1778,7 +1778,7 @@ type GetServiceResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetServiceResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2131,7 +2131,7 @@ type AddServiceRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddServiceRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2484,7 +2484,7 @@ type AddServiceResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddServiceResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2622,7 +2622,7 @@ type AddMySQLServiceParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddMySQLServiceParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2760,7 +2760,7 @@ type AddMongoDBServiceParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddMongoDBServiceParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2900,7 +2900,7 @@ type AddPostgreSQLServiceParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddPostgreSQLServiceParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -3038,7 +3038,7 @@ type AddProxySQLServiceParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddProxySQLServiceParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -3170,7 +3170,7 @@ type AddHAProxyServiceParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddHAProxyServiceParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -3304,7 +3304,7 @@ type AddExternalServiceParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddExternalServiceParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -3419,7 +3419,7 @@ type RemoveServiceRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m RemoveServiceRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -3521,7 +3521,7 @@ type RemoveServiceResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m RemoveServiceResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -3681,7 +3681,7 @@ type ChangeServiceRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ChangeServiceRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -4034,7 +4034,7 @@ type ChangeServiceResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ChangeServiceResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/api/management/v1/agent.pb.go b/api/management/v1/agent.pb.go
index 97850fb8d0..8041b5d8d8 100644
--- a/api/management/v1/agent.pb.go
+++ b/api/management/v1/agent.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: management/v1/agent.proto
@@ -9,6 +9,7 @@ package managementv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
@@ -84,10 +85,7 @@ func (UpdateSeverity) EnumDescriptor() ([]byte, []int) {
}
type UniversalAgent struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique agent identifier.
AgentId string `protobuf:"bytes,1,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"`
// True if the agent password is set.
@@ -103,7 +101,7 @@ type UniversalAgent struct {
// Creation timestamp.
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
// Custom user-assigned labels.
- CustomLabels map[string]string `protobuf:"bytes,8,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,8,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Desired Agent status: enabled (false) or disabled (true).
Disabled bool `protobuf:"varint,9,opt,name=disabled,proto3" json:"disabled,omitempty"`
// List of disabled collector names.
@@ -170,6 +168,8 @@ type UniversalAgent struct {
IsConnected bool `protobuf:"varint,39,opt,name=is_connected,json=isConnected,proto3" json:"is_connected,omitempty"`
// True if an exporter agent is exposed on all host addresses.
ExposeExporter bool `protobuf:"varint,40,opt,name=expose_exporter,json=exposeExporter,proto3" json:"expose_exporter,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *UniversalAgent) Reset() {
@@ -483,14 +483,13 @@ func (x *UniversalAgent) GetExposeExporter() bool {
}
type ListAgentsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Return only Agents that relate to a specific ServiceID.
ServiceId string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
// Return only Agents that relate to a specific NodeID.
- NodeId string `protobuf:"bytes,2,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
+ NodeId string `protobuf:"bytes,2,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ListAgentsRequest) Reset() {
@@ -538,12 +537,11 @@ func (x *ListAgentsRequest) GetNodeId() string {
}
type ListAgentsResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// List of Agents.
- Agents []*UniversalAgent `protobuf:"bytes,1,rep,name=agents,proto3" json:"agents,omitempty"`
+ Agents []*UniversalAgent `protobuf:"bytes,1,rep,name=agents,proto3" json:"agents,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ListAgentsResponse) Reset() {
@@ -584,10 +582,7 @@ func (x *ListAgentsResponse) GetAgents() []*UniversalAgent {
}
type AgentVersions struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Agent ID.
AgentId string `protobuf:"bytes,1,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"`
// Agent version.
@@ -595,7 +590,9 @@ type AgentVersions struct {
// Node name where the agent runs.
NodeName string `protobuf:"bytes,3,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"`
// Update severity.
- Severity UpdateSeverity `protobuf:"varint,4,opt,name=severity,proto3,enum=management.v1.UpdateSeverity" json:"severity,omitempty"`
+ Severity UpdateSeverity `protobuf:"varint,4,opt,name=severity,proto3,enum=management.v1.UpdateSeverity" json:"severity,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AgentVersions) Reset() {
@@ -657,9 +654,9 @@ func (x *AgentVersions) GetSeverity() UpdateSeverity {
}
type ListAgentVersionsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ListAgentVersionsRequest) Reset() {
@@ -693,12 +690,11 @@ func (*ListAgentVersionsRequest) Descriptor() ([]byte, []int) {
}
type ListAgentVersionsResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// List of Agent versions.
AgentVersions []*AgentVersions `protobuf:"bytes,1,rep,name=agent_versions,json=agentVersions,proto3" json:"agent_versions,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ListAgentVersionsResponse) Reset() {
@@ -739,12 +735,11 @@ func (x *ListAgentVersionsResponse) GetAgentVersions() []*AgentVersions {
}
type UniversalAgent_MySQLOptions struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// True if TLS key is set.
- IsTlsKeySet bool `protobuf:"varint,1,opt,name=is_tls_key_set,json=isTlsKeySet,proto3" json:"is_tls_key_set,omitempty"`
+ IsTlsKeySet bool `protobuf:"varint,1,opt,name=is_tls_key_set,json=isTlsKeySet,proto3" json:"is_tls_key_set,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *UniversalAgent_MySQLOptions) Reset() {
@@ -785,10 +780,7 @@ func (x *UniversalAgent_MySQLOptions) GetIsTlsKeySet() bool {
}
type UniversalAgent_AzureOptions struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Azure client ID.
ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"`
// True if Azure client secret is set.
@@ -798,7 +790,9 @@ type UniversalAgent_AzureOptions struct {
// Azure subscription ID.
SubscriptionId string `protobuf:"bytes,4,opt,name=subscription_id,json=subscriptionId,proto3" json:"subscription_id,omitempty"`
// Azure tenant ID.
- TenantId string `protobuf:"bytes,5,opt,name=tenant_id,json=tenantId,proto3" json:"tenant_id,omitempty"`
+ TenantId string `protobuf:"bytes,5,opt,name=tenant_id,json=tenantId,proto3" json:"tenant_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *UniversalAgent_AzureOptions) Reset() {
@@ -867,10 +861,7 @@ func (x *UniversalAgent_AzureOptions) GetTenantId() string {
}
type UniversalAgent_MongoDBOptions struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// True if TLS certificate is set.
IsTlsCertificateKeySet bool `protobuf:"varint,1,opt,name=is_tls_certificate_key_set,json=isTlsCertificateKeySet,proto3" json:"is_tls_certificate_key_set,omitempty"`
// True if TLS certificate file password is set.
@@ -885,6 +876,8 @@ type UniversalAgent_MongoDBOptions struct {
CollectionsLimit int32 `protobuf:"varint,6,opt,name=collections_limit,json=collectionsLimit,proto3" json:"collections_limit,omitempty"`
// True if all collectors are enabled.
EnableAllCollectors bool `protobuf:"varint,7,opt,name=enable_all_collectors,json=enableAllCollectors,proto3" json:"enable_all_collectors,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *UniversalAgent_MongoDBOptions) Reset() {
@@ -967,16 +960,15 @@ func (x *UniversalAgent_MongoDBOptions) GetEnableAllCollectors() bool {
}
type UniversalAgent_PostgreSQLOptions struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// True if TLS key is set.
IsSslKeySet bool `protobuf:"varint,1,opt,name=is_ssl_key_set,json=isSslKeySet,proto3" json:"is_ssl_key_set,omitempty"`
// Limit of databases for auto-discovery.
AutoDiscoveryLimit int32 `protobuf:"varint,2,opt,name=auto_discovery_limit,json=autoDiscoveryLimit,proto3" json:"auto_discovery_limit,omitempty"`
// Maximum number of connections from exporter to PostgreSQL instance.
MaxExporterConnections int32 `protobuf:"varint,3,opt,name=max_exporter_connections,json=maxExporterConnections,proto3" json:"max_exporter_connections,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *UniversalAgent_PostgreSQLOptions) Reset() {
@@ -1032,7 +1024,7 @@ func (x *UniversalAgent_PostgreSQLOptions) GetMaxExporterConnections() int32 {
var File_management_v1_agent_proto protoreflect.FileDescriptor
-var file_management_v1_agent_proto_rawDesc = []byte{
+var file_management_v1_agent_proto_rawDesc = string([]byte{
0x0a, 0x19, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x76, 0x31, 0x2f,
0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x6d, 0x61, 0x6e,
0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67,
@@ -1266,16 +1258,16 @@ var file_management_v1_agent_proto_rawDesc = []byte{
0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0e,
0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_management_v1_agent_proto_rawDescOnce sync.Once
- file_management_v1_agent_proto_rawDescData = file_management_v1_agent_proto_rawDesc
+ file_management_v1_agent_proto_rawDescData []byte
)
func file_management_v1_agent_proto_rawDescGZIP() []byte {
file_management_v1_agent_proto_rawDescOnce.Do(func() {
- file_management_v1_agent_proto_rawDescData = protoimpl.X.CompressGZIP(file_management_v1_agent_proto_rawDescData)
+ file_management_v1_agent_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_management_v1_agent_proto_rawDesc), len(file_management_v1_agent_proto_rawDesc)))
})
return file_management_v1_agent_proto_rawDescData
}
@@ -1329,7 +1321,7 @@ func file_management_v1_agent_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_management_v1_agent_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_management_v1_agent_proto_rawDesc), len(file_management_v1_agent_proto_rawDesc)),
NumEnums: 1,
NumMessages: 11,
NumExtensions: 0,
@@ -1341,7 +1333,6 @@ func file_management_v1_agent_proto_init() {
MessageInfos: file_management_v1_agent_proto_msgTypes,
}.Build()
File_management_v1_agent_proto = out.File
- file_management_v1_agent_proto_rawDesc = nil
file_management_v1_agent_proto_goTypes = nil
file_management_v1_agent_proto_depIdxs = nil
}
diff --git a/api/management/v1/agent.pb.validate.go b/api/management/v1/agent.pb.validate.go
index 214119090c..d8b670df43 100644
--- a/api/management/v1/agent.pb.validate.go
+++ b/api/management/v1/agent.pb.validate.go
@@ -315,7 +315,7 @@ type UniversalAgentMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m UniversalAgentMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -419,7 +419,7 @@ type ListAgentsRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListAgentsRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -555,7 +555,7 @@ type ListAgentsResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListAgentsResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -665,7 +665,7 @@ type AgentVersionsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AgentVersionsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -765,7 +765,7 @@ type ListAgentVersionsRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListAgentVersionsRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -901,7 +901,7 @@ type ListAgentVersionsResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListAgentVersionsResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1005,7 +1005,7 @@ type UniversalAgent_MySQLOptionsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m UniversalAgent_MySQLOptionsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1118,7 +1118,7 @@ type UniversalAgent_AzureOptionsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m UniversalAgent_AzureOptionsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1233,7 +1233,7 @@ type UniversalAgent_MongoDBOptionsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m UniversalAgent_MongoDBOptionsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1344,7 +1344,7 @@ type UniversalAgent_PostgreSQLOptionsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m UniversalAgent_PostgreSQLOptionsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/api/management/v1/annotation.pb.go b/api/management/v1/annotation.pb.go
index 2eecdfbebc..f86b8bee5a 100644
--- a/api/management/v1/annotation.pb.go
+++ b/api/management/v1/annotation.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: management/v1/annotation.proto
@@ -9,6 +9,7 @@ package managementv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
_ "github.com/envoyproxy/protoc-gen-validate/validate"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
@@ -24,10 +25,7 @@ const (
// AddAnnotationRequest is a params to add new annotation.
type AddAnnotationRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// An annotation description. Required.
Text string `protobuf:"bytes,1,opt,name=text,proto3" json:"text,omitempty"`
// Tags are used to filter annotations.
@@ -35,7 +33,9 @@ type AddAnnotationRequest struct {
// Used for annotating a node.
NodeName string `protobuf:"bytes,3,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"`
// Used for annotating services.
- ServiceNames []string `protobuf:"bytes,4,rep,name=service_names,json=serviceNames,proto3" json:"service_names,omitempty"`
+ ServiceNames []string `protobuf:"bytes,4,rep,name=service_names,json=serviceNames,proto3" json:"service_names,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddAnnotationRequest) Reset() {
@@ -97,9 +97,9 @@ func (x *AddAnnotationRequest) GetServiceNames() []string {
}
type AddAnnotationResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddAnnotationResponse) Reset() {
@@ -134,7 +134,7 @@ func (*AddAnnotationResponse) Descriptor() ([]byte, []int) {
var File_management_v1_annotation_proto protoreflect.FileDescriptor
-var file_management_v1_annotation_proto_rawDesc = []byte{
+var file_management_v1_annotation_proto_rawDesc = string([]byte{
0x0a, 0x1e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x76, 0x31, 0x2f,
0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x12, 0x0d, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x1a,
@@ -162,16 +162,16 @@ var file_management_v1_annotation_proto_rawDesc = []byte{
0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea,
0x02, 0x0e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x3a, 0x3a, 0x56, 0x31,
0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_management_v1_annotation_proto_rawDescOnce sync.Once
- file_management_v1_annotation_proto_rawDescData = file_management_v1_annotation_proto_rawDesc
+ file_management_v1_annotation_proto_rawDescData []byte
)
func file_management_v1_annotation_proto_rawDescGZIP() []byte {
file_management_v1_annotation_proto_rawDescOnce.Do(func() {
- file_management_v1_annotation_proto_rawDescData = protoimpl.X.CompressGZIP(file_management_v1_annotation_proto_rawDescData)
+ file_management_v1_annotation_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_management_v1_annotation_proto_rawDesc), len(file_management_v1_annotation_proto_rawDesc)))
})
return file_management_v1_annotation_proto_rawDescData
}
@@ -201,7 +201,7 @@ func file_management_v1_annotation_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_management_v1_annotation_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_management_v1_annotation_proto_rawDesc), len(file_management_v1_annotation_proto_rawDesc)),
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
@@ -212,7 +212,6 @@ func file_management_v1_annotation_proto_init() {
MessageInfos: file_management_v1_annotation_proto_msgTypes,
}.Build()
File_management_v1_annotation_proto = out.File
- file_management_v1_annotation_proto_rawDesc = nil
file_management_v1_annotation_proto_goTypes = nil
file_management_v1_annotation_proto_depIdxs = nil
}
diff --git a/api/management/v1/annotation.pb.validate.go b/api/management/v1/annotation.pb.validate.go
index 8a12dd7f65..2a157cc372 100644
--- a/api/management/v1/annotation.pb.validate.go
+++ b/api/management/v1/annotation.pb.validate.go
@@ -84,7 +84,7 @@ type AddAnnotationRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddAnnotationRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -186,7 +186,7 @@ type AddAnnotationResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddAnnotationResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/api/management/v1/azure.pb.go b/api/management/v1/azure.pb.go
index 5df29f0eed..6b3af46c49 100644
--- a/api/management/v1/azure.pb.go
+++ b/api/management/v1/azure.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: management/v1/azure.proto
@@ -9,6 +9,7 @@ package managementv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
_ "github.com/envoyproxy/protoc-gen-validate/validate"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
@@ -76,10 +77,7 @@ func (DiscoverAzureDatabaseType) EnumDescriptor() ([]byte, []int) {
// DiscoverAzureDatabaseRequest discover azure databases request.
type DiscoverAzureDatabaseRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Azure client ID.
AzureClientId string `protobuf:"bytes,1,opt,name=azure_client_id,json=azureClientId,proto3" json:"azure_client_id,omitempty"`
// Azure client secret.
@@ -88,6 +86,8 @@ type DiscoverAzureDatabaseRequest struct {
AzureTenantId string `protobuf:"bytes,3,opt,name=azure_tenant_id,json=azureTenantId,proto3" json:"azure_tenant_id,omitempty"`
// Azure subscription ID.
AzureSubscriptionId string `protobuf:"bytes,4,opt,name=azure_subscription_id,json=azureSubscriptionId,proto3" json:"azure_subscription_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *DiscoverAzureDatabaseRequest) Reset() {
@@ -150,10 +150,7 @@ func (x *DiscoverAzureDatabaseRequest) GetAzureSubscriptionId() string {
// DiscoverAzureDatabaseInstance models an unique Azure Database instance for the list of instances returned by Discovery.
type DiscoverAzureDatabaseInstance struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Azure database instance ID.
InstanceId string `protobuf:"bytes,1,opt,name=instance_id,json=instanceId,proto3" json:"instance_id,omitempty"`
// Azure database location.
@@ -174,7 +171,9 @@ type DiscoverAzureDatabaseInstance struct {
Az string `protobuf:"bytes,9,opt,name=az,proto3" json:"az,omitempty"`
// Represents a purchasable Stock Keeping Unit (SKU) under a product.
// https://docs.microsoft.com/en-us/partner-center/develop/product-resources#sku.
- NodeModel string `protobuf:"bytes,10,opt,name=node_model,json=nodeModel,proto3" json:"node_model,omitempty"`
+ NodeModel string `protobuf:"bytes,10,opt,name=node_model,json=nodeModel,proto3" json:"node_model,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *DiscoverAzureDatabaseInstance) Reset() {
@@ -279,11 +278,10 @@ func (x *DiscoverAzureDatabaseInstance) GetNodeModel() string {
// DiscoverAzureDatabaseResponse discover azure databases response.
type DiscoverAzureDatabaseResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
AzureDatabaseInstance []*DiscoverAzureDatabaseInstance `protobuf:"bytes,1,rep,name=azure_database_instance,json=azureDatabaseInstance,proto3" json:"azure_database_instance,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *DiscoverAzureDatabaseResponse) Reset() {
@@ -324,10 +322,7 @@ func (x *DiscoverAzureDatabaseResponse) GetAzureDatabaseInstance() []*DiscoverAz
}
type AddAzureDatabaseRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Azure database location.
Region string `protobuf:"bytes,1,opt,name=region,proto3" json:"region,omitempty"`
// Azure database availability zone.
@@ -366,7 +361,7 @@ type AddAzureDatabaseRequest struct {
// If true, adds qan-mysql-perfschema-agent or qan-postgresql-pgstatements-agent.
Qan bool `protobuf:"varint,18,opt,name=qan,proto3" json:"qan,omitempty"`
// Custom user-assigned labels for Node and Service.
- CustomLabels map[string]string `protobuf:"bytes,19,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,19,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Skip connection check.
SkipConnectionCheck bool `protobuf:"varint,20,opt,name=skip_connection_check,json=skipConnectionCheck,proto3" json:"skip_connection_check,omitempty"`
// Use TLS for database connections.
@@ -380,7 +375,9 @@ type AddAzureDatabaseRequest struct {
// Use negative value to disable them.
TablestatsGroupTableLimit int32 `protobuf:"varint,24,opt,name=tablestats_group_table_limit,json=tablestatsGroupTableLimit,proto3" json:"tablestats_group_table_limit,omitempty"`
// Azure database resource type (mysql, maria, postgres)
- Type DiscoverAzureDatabaseType `protobuf:"varint,25,opt,name=type,proto3,enum=management.v1.DiscoverAzureDatabaseType" json:"type,omitempty"`
+ Type DiscoverAzureDatabaseType `protobuf:"varint,25,opt,name=type,proto3,enum=management.v1.DiscoverAzureDatabaseType" json:"type,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddAzureDatabaseRequest) Reset() {
@@ -589,9 +586,9 @@ func (x *AddAzureDatabaseRequest) GetType() DiscoverAzureDatabaseType {
}
type AddAzureDatabaseResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddAzureDatabaseResponse) Reset() {
@@ -626,7 +623,7 @@ func (*AddAzureDatabaseResponse) Descriptor() ([]byte, []int) {
var File_management_v1_azure_proto protoreflect.FileDescriptor
-var file_management_v1_azure_proto_rawDesc = []byte{
+var file_management_v1_azure_proto_rawDesc = string([]byte{
0x0a, 0x19, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x76, 0x31, 0x2f,
0x61, 0x7a, 0x75, 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x6d, 0x61, 0x6e,
0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69,
@@ -776,16 +773,16 @@ var file_management_v1_azure_proto_rawDesc = []byte{
0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0e, 0x4d,
0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_management_v1_azure_proto_rawDescOnce sync.Once
- file_management_v1_azure_proto_rawDescData = file_management_v1_azure_proto_rawDesc
+ file_management_v1_azure_proto_rawDescData []byte
)
func file_management_v1_azure_proto_rawDescGZIP() []byte {
file_management_v1_azure_proto_rawDescOnce.Do(func() {
- file_management_v1_azure_proto_rawDescData = protoimpl.X.CompressGZIP(file_management_v1_azure_proto_rawDescData)
+ file_management_v1_azure_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_management_v1_azure_proto_rawDesc), len(file_management_v1_azure_proto_rawDesc)))
})
return file_management_v1_azure_proto_rawDescData
}
@@ -825,7 +822,7 @@ func file_management_v1_azure_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_management_v1_azure_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_management_v1_azure_proto_rawDesc), len(file_management_v1_azure_proto_rawDesc)),
NumEnums: 1,
NumMessages: 6,
NumExtensions: 0,
@@ -837,7 +834,6 @@ func file_management_v1_azure_proto_init() {
MessageInfos: file_management_v1_azure_proto_msgTypes,
}.Build()
File_management_v1_azure_proto = out.File
- file_management_v1_azure_proto_rawDesc = nil
file_management_v1_azure_proto_goTypes = nil
file_management_v1_azure_proto_depIdxs = nil
}
diff --git a/api/management/v1/azure.pb.validate.go b/api/management/v1/azure.pb.validate.go
index 4e01f41eb7..6d5787b268 100644
--- a/api/management/v1/azure.pb.validate.go
+++ b/api/management/v1/azure.pb.validate.go
@@ -115,7 +115,7 @@ type DiscoverAzureDatabaseRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m DiscoverAzureDatabaseRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -238,7 +238,7 @@ type DiscoverAzureDatabaseInstanceMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m DiscoverAzureDatabaseInstanceMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -375,7 +375,7 @@ type DiscoverAzureDatabaseResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m DiscoverAzureDatabaseResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -618,7 +618,7 @@ type AddAzureDatabaseRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddAzureDatabaseRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -720,7 +720,7 @@ type AddAzureDatabaseResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddAzureDatabaseResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/api/management/v1/external.pb.go b/api/management/v1/external.pb.go
index 21bf55c8ac..5d6e3895f5 100644
--- a/api/management/v1/external.pb.go
+++ b/api/management/v1/external.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: management/v1/external.proto
@@ -9,6 +9,7 @@ package managementv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
_ "github.com/envoyproxy/protoc-gen-validate/validate"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
@@ -25,10 +26,7 @@ const (
)
type AddExternalServiceParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Node identifier on which an external exporter is been running.
// runs_on_node_id should always be passed with node_id.
// Exactly one of these parameters should be present: node_id, node_name, add_node.
@@ -65,7 +63,7 @@ type AddExternalServiceParams struct {
// Replication set name.
ReplicationSet string `protobuf:"bytes,14,opt,name=replication_set,json=replicationSet,proto3" json:"replication_set,omitempty"`
// Custom user-assigned labels for Service.
- CustomLabels map[string]string `protobuf:"bytes,15,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,15,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Group name of external service.
Group string `protobuf:"bytes,16,opt,name=group,proto3" json:"group,omitempty"`
// Defines metrics flow model for this exporter.
@@ -76,6 +74,8 @@ type AddExternalServiceParams struct {
MetricsMode MetricsMode `protobuf:"varint,17,opt,name=metrics_mode,json=metricsMode,proto3,enum=management.v1.MetricsMode" json:"metrics_mode,omitempty"`
// Skip connection check.
SkipConnectionCheck bool `protobuf:"varint,18,opt,name=skip_connection_check,json=skipConnectionCheck,proto3" json:"skip_connection_check,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddExternalServiceParams) Reset() {
@@ -235,12 +235,11 @@ func (x *AddExternalServiceParams) GetSkipConnectionCheck() bool {
}
type ExternalServiceResult struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Service *v1.ExternalService `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"`
- ExternalExporter *v1.ExternalExporter `protobuf:"bytes,2,opt,name=external_exporter,json=externalExporter,proto3" json:"external_exporter,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Service *v1.ExternalService `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"`
+ ExternalExporter *v1.ExternalExporter `protobuf:"bytes,2,opt,name=external_exporter,json=externalExporter,proto3" json:"external_exporter,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ExternalServiceResult) Reset() {
@@ -289,7 +288,7 @@ func (x *ExternalServiceResult) GetExternalExporter() *v1.ExternalExporter {
var File_management_v1_external_proto protoreflect.FileDescriptor
-var file_management_v1_external_proto_rawDesc = []byte{
+var file_management_v1_external_proto_rawDesc = string([]byte{
0x0a, 0x1c, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x76, 0x31, 0x2f,
0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d,
0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x1a, 0x19, 0x69,
@@ -374,16 +373,16 @@ var file_management_v1_external_proto_rawDesc = []byte{
0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02,
0x0e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x3a, 0x3a, 0x56, 0x31, 0x62,
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_management_v1_external_proto_rawDescOnce sync.Once
- file_management_v1_external_proto_rawDescData = file_management_v1_external_proto_rawDesc
+ file_management_v1_external_proto_rawDescData []byte
)
func file_management_v1_external_proto_rawDescGZIP() []byte {
file_management_v1_external_proto_rawDescOnce.Do(func() {
- file_management_v1_external_proto_rawDescData = protoimpl.X.CompressGZIP(file_management_v1_external_proto_rawDescData)
+ file_management_v1_external_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_management_v1_external_proto_rawDesc), len(file_management_v1_external_proto_rawDesc)))
})
return file_management_v1_external_proto_rawDescData
}
@@ -425,7 +424,7 @@ func file_management_v1_external_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_management_v1_external_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_management_v1_external_proto_rawDesc), len(file_management_v1_external_proto_rawDesc)),
NumEnums: 0,
NumMessages: 3,
NumExtensions: 0,
@@ -436,7 +435,6 @@ func file_management_v1_external_proto_init() {
MessageInfos: file_management_v1_external_proto_msgTypes,
}.Build()
File_management_v1_external_proto = out.File
- file_management_v1_external_proto_rawDesc = nil
file_management_v1_external_proto_goTypes = nil
file_management_v1_external_proto_depIdxs = nil
}
diff --git a/api/management/v1/external.pb.validate.go b/api/management/v1/external.pb.validate.go
index 78f9c05d9a..15c3c4f542 100644
--- a/api/management/v1/external.pb.validate.go
+++ b/api/management/v1/external.pb.validate.go
@@ -152,7 +152,7 @@ type AddExternalServiceParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddExternalServiceParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -312,7 +312,7 @@ type ExternalServiceResultMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ExternalServiceResultMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/api/management/v1/haproxy.pb.go b/api/management/v1/haproxy.pb.go
index e271046bc7..0202989ef3 100644
--- a/api/management/v1/haproxy.pb.go
+++ b/api/management/v1/haproxy.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: management/v1/haproxy.proto
@@ -9,6 +9,7 @@ package managementv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
_ "github.com/envoyproxy/protoc-gen-validate/validate"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
@@ -25,10 +26,7 @@ const (
)
type AddHAProxyServiceParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Node identifier on which an external exporter is been running.
// Exactly one of these parameters should be present: node_id, node_name, add_node.
NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
@@ -61,7 +59,7 @@ type AddHAProxyServiceParams struct {
// Replication set name.
ReplicationSet string `protobuf:"bytes,13,opt,name=replication_set,json=replicationSet,proto3" json:"replication_set,omitempty"`
// Custom user-assigned labels for Service.
- CustomLabels map[string]string `protobuf:"bytes,14,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,14,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Defines metrics flow model for this exporter.
// Metrics could be pushed to the server with vmagent,
// pulled by the server, or the server could choose behavior automatically.
@@ -70,6 +68,8 @@ type AddHAProxyServiceParams struct {
MetricsMode MetricsMode `protobuf:"varint,15,opt,name=metrics_mode,json=metricsMode,proto3,enum=management.v1.MetricsMode" json:"metrics_mode,omitempty"`
// Skip connection check.
SkipConnectionCheck bool `protobuf:"varint,16,opt,name=skip_connection_check,json=skipConnectionCheck,proto3" json:"skip_connection_check,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddHAProxyServiceParams) Reset() {
@@ -215,12 +215,11 @@ func (x *AddHAProxyServiceParams) GetSkipConnectionCheck() bool {
}
type HAProxyServiceResult struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Service *v1.HAProxyService `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"`
- ExternalExporter *v1.ExternalExporter `protobuf:"bytes,2,opt,name=external_exporter,json=externalExporter,proto3" json:"external_exporter,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Service *v1.HAProxyService `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"`
+ ExternalExporter *v1.ExternalExporter `protobuf:"bytes,2,opt,name=external_exporter,json=externalExporter,proto3" json:"external_exporter,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *HAProxyServiceResult) Reset() {
@@ -269,7 +268,7 @@ func (x *HAProxyServiceResult) GetExternalExporter() *v1.ExternalExporter {
var File_management_v1_haproxy_proto protoreflect.FileDescriptor
-var file_management_v1_haproxy_proto_rawDesc = []byte{
+var file_management_v1_haproxy_proto_rawDesc = string([]byte{
0x0a, 0x1b, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x76, 0x31, 0x2f,
0x68, 0x61, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x6d,
0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x1a, 0x19, 0x69, 0x6e,
@@ -350,16 +349,16 @@ var file_management_v1_haproxy_proto_rawDesc = []byte{
0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0e, 0x4d, 0x61,
0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_management_v1_haproxy_proto_rawDescOnce sync.Once
- file_management_v1_haproxy_proto_rawDescData = file_management_v1_haproxy_proto_rawDesc
+ file_management_v1_haproxy_proto_rawDescData []byte
)
func file_management_v1_haproxy_proto_rawDescGZIP() []byte {
file_management_v1_haproxy_proto_rawDescOnce.Do(func() {
- file_management_v1_haproxy_proto_rawDescData = protoimpl.X.CompressGZIP(file_management_v1_haproxy_proto_rawDescData)
+ file_management_v1_haproxy_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_management_v1_haproxy_proto_rawDesc), len(file_management_v1_haproxy_proto_rawDesc)))
})
return file_management_v1_haproxy_proto_rawDescData
}
@@ -401,7 +400,7 @@ func file_management_v1_haproxy_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_management_v1_haproxy_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_management_v1_haproxy_proto_rawDesc), len(file_management_v1_haproxy_proto_rawDesc)),
NumEnums: 0,
NumMessages: 3,
NumExtensions: 0,
@@ -412,7 +411,6 @@ func file_management_v1_haproxy_proto_init() {
MessageInfos: file_management_v1_haproxy_proto_msgTypes,
}.Build()
File_management_v1_haproxy_proto = out.File
- file_management_v1_haproxy_proto_rawDesc = nil
file_management_v1_haproxy_proto_goTypes = nil
file_management_v1_haproxy_proto_depIdxs = nil
}
diff --git a/api/management/v1/haproxy.pb.validate.go b/api/management/v1/haproxy.pb.validate.go
index fcad64aa63..b8e5637545 100644
--- a/api/management/v1/haproxy.pb.validate.go
+++ b/api/management/v1/haproxy.pb.validate.go
@@ -148,7 +148,7 @@ type AddHAProxyServiceParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddHAProxyServiceParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -308,7 +308,7 @@ type HAProxyServiceResultMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m HAProxyServiceResultMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/api/management/v1/metrics.pb.go b/api/management/v1/metrics.pb.go
index 27faa2b842..23859f2e0a 100644
--- a/api/management/v1/metrics.pb.go
+++ b/api/management/v1/metrics.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: management/v1/metrics.proto
@@ -9,6 +9,7 @@ package managementv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
@@ -75,7 +76,7 @@ func (MetricsMode) EnumDescriptor() ([]byte, []int) {
var File_management_v1_metrics_proto protoreflect.FileDescriptor
-var file_management_v1_metrics_proto_rawDesc = []byte{
+var file_management_v1_metrics_proto_rawDesc = string([]byte{
0x0a, 0x1b, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x76, 0x31, 0x2f,
0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x6d,
0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2a, 0x59, 0x0a, 0x0b,
@@ -96,16 +97,16 @@ var file_management_v1_metrics_proto_rawDesc = []byte{
0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65,
0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d,
0x65, 0x6e, 0x74, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_management_v1_metrics_proto_rawDescOnce sync.Once
- file_management_v1_metrics_proto_rawDescData = file_management_v1_metrics_proto_rawDesc
+ file_management_v1_metrics_proto_rawDescData []byte
)
func file_management_v1_metrics_proto_rawDescGZIP() []byte {
file_management_v1_metrics_proto_rawDescOnce.Do(func() {
- file_management_v1_metrics_proto_rawDescData = protoimpl.X.CompressGZIP(file_management_v1_metrics_proto_rawDescData)
+ file_management_v1_metrics_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_management_v1_metrics_proto_rawDesc), len(file_management_v1_metrics_proto_rawDesc)))
})
return file_management_v1_metrics_proto_rawDescData
}
@@ -134,7 +135,7 @@ func file_management_v1_metrics_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_management_v1_metrics_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_management_v1_metrics_proto_rawDesc), len(file_management_v1_metrics_proto_rawDesc)),
NumEnums: 1,
NumMessages: 0,
NumExtensions: 0,
@@ -145,7 +146,6 @@ func file_management_v1_metrics_proto_init() {
EnumInfos: file_management_v1_metrics_proto_enumTypes,
}.Build()
File_management_v1_metrics_proto = out.File
- file_management_v1_metrics_proto_rawDesc = nil
file_management_v1_metrics_proto_goTypes = nil
file_management_v1_metrics_proto_depIdxs = nil
}
diff --git a/api/management/v1/mongodb.pb.go b/api/management/v1/mongodb.pb.go
index 48ebbb5909..1e25d3203e 100644
--- a/api/management/v1/mongodb.pb.go
+++ b/api/management/v1/mongodb.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: management/v1/mongodb.proto
@@ -9,6 +9,7 @@ package managementv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
_ "github.com/envoyproxy/protoc-gen-validate/validate"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
@@ -25,10 +26,7 @@ const (
)
type AddMongoDBServiceParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Node identifier on which a service is been running.
// Exactly one of these parameters should be present: node_id, node_name, add_node.
NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
@@ -64,7 +62,7 @@ type AddMongoDBServiceParams struct {
// If true, adds qan-mongodb-profiler-agent for provided service.
QanMongodbProfiler bool `protobuf:"varint,15,opt,name=qan_mongodb_profiler,json=qanMongodbProfiler,proto3" json:"qan_mongodb_profiler,omitempty"`
// Custom user-assigned labels for Service.
- CustomLabels map[string]string `protobuf:"bytes,16,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,16,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Skip connection check.
SkipConnectionCheck bool `protobuf:"varint,17,opt,name=skip_connection_check,json=skipConnectionCheck,proto3" json:"skip_connection_check,omitempty"`
// Use TLS for database connections.
@@ -104,6 +102,8 @@ type AddMongoDBServiceParams struct {
LogLevel v1.LogLevel `protobuf:"varint,33,opt,name=log_level,json=logLevel,proto3,enum=inventory.v1.LogLevel" json:"log_level,omitempty"`
// Optionally expose the exporter process on all public interfaces
ExposeExporter bool `protobuf:"varint,34,opt,name=expose_exporter,json=exposeExporter,proto3" json:"expose_exporter,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddMongoDBServiceParams) Reset() {
@@ -361,13 +361,12 @@ func (x *AddMongoDBServiceParams) GetExposeExporter() bool {
}
type MongoDBServiceResult struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
Service *v1.MongoDBService `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"`
MongodbExporter *v1.MongoDBExporter `protobuf:"bytes,2,opt,name=mongodb_exporter,json=mongodbExporter,proto3" json:"mongodb_exporter,omitempty"`
QanMongodbProfiler *v1.QANMongoDBProfilerAgent `protobuf:"bytes,3,opt,name=qan_mongodb_profiler,json=qanMongodbProfiler,proto3" json:"qan_mongodb_profiler,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *MongoDBServiceResult) Reset() {
@@ -423,7 +422,7 @@ func (x *MongoDBServiceResult) GetQanMongodbProfiler() *v1.QANMongoDBProfilerAge
var File_management_v1_mongodb_proto protoreflect.FileDescriptor
-var file_management_v1_mongodb_proto_rawDesc = []byte{
+var file_management_v1_mongodb_proto_rawDesc = string([]byte{
0x0a, 0x1b, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x76, 0x31, 0x2f,
0x6d, 0x6f, 0x6e, 0x67, 0x6f, 0x64, 0x62, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x6d,
0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x1a, 0x19, 0x69, 0x6e,
@@ -558,16 +557,16 @@ var file_management_v1_mongodb_proto_rawDesc = []byte{
0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0e, 0x4d, 0x61, 0x6e, 0x61,
0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x33,
-}
+})
var (
file_management_v1_mongodb_proto_rawDescOnce sync.Once
- file_management_v1_mongodb_proto_rawDescData = file_management_v1_mongodb_proto_rawDesc
+ file_management_v1_mongodb_proto_rawDescData []byte
)
func file_management_v1_mongodb_proto_rawDescGZIP() []byte {
file_management_v1_mongodb_proto_rawDescOnce.Do(func() {
- file_management_v1_mongodb_proto_rawDescData = protoimpl.X.CompressGZIP(file_management_v1_mongodb_proto_rawDescData)
+ file_management_v1_mongodb_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_management_v1_mongodb_proto_rawDesc), len(file_management_v1_mongodb_proto_rawDesc)))
})
return file_management_v1_mongodb_proto_rawDescData
}
@@ -613,7 +612,7 @@ func file_management_v1_mongodb_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_management_v1_mongodb_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_management_v1_mongodb_proto_rawDesc), len(file_management_v1_mongodb_proto_rawDesc)),
NumEnums: 0,
NumMessages: 3,
NumExtensions: 0,
@@ -624,7 +623,6 @@ func file_management_v1_mongodb_proto_init() {
MessageInfos: file_management_v1_mongodb_proto_msgTypes,
}.Build()
File_management_v1_mongodb_proto = out.File
- file_management_v1_mongodb_proto_rawDesc = nil
file_management_v1_mongodb_proto_goTypes = nil
file_management_v1_mongodb_proto_depIdxs = nil
}
diff --git a/api/management/v1/mongodb.pb.validate.go b/api/management/v1/mongodb.pb.validate.go
index 59fed4e4ae..2e2c39337c 100644
--- a/api/management/v1/mongodb.pb.validate.go
+++ b/api/management/v1/mongodb.pb.validate.go
@@ -180,7 +180,7 @@ type AddMongoDBServiceParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddMongoDBServiceParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -369,7 +369,7 @@ type MongoDBServiceResultMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m MongoDBServiceResultMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/api/management/v1/mysql.pb.go b/api/management/v1/mysql.pb.go
index 28b64588a0..e4206df9ea 100644
--- a/api/management/v1/mysql.pb.go
+++ b/api/management/v1/mysql.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: management/v1/mysql.proto
@@ -9,6 +9,7 @@ package managementv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
_ "github.com/envoyproxy/protoc-gen-validate/validate"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
@@ -25,10 +26,7 @@ const (
)
type AddMySQLServiceParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Node identifier on which a service is been running.
// Exactly one of these parameters should be present: node_id, node_name, add_node.
NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
@@ -66,7 +64,7 @@ type AddMySQLServiceParams struct {
// If true, adds qan-mysql-slowlog-agent for provided service.
QanMysqlSlowlog bool `protobuf:"varint,15,opt,name=qan_mysql_slowlog,json=qanMysqlSlowlog,proto3" json:"qan_mysql_slowlog,omitempty"`
// Custom user-assigned labels for Service.
- CustomLabels map[string]string `protobuf:"bytes,16,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,16,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Skip connection check.
SkipConnectionCheck bool `protobuf:"varint,17,opt,name=skip_connection_check,json=skipConnectionCheck,proto3" json:"skip_connection_check,omitempty"`
// Disable parsing comments from queries and showing them in QAN.
@@ -105,6 +103,8 @@ type AddMySQLServiceParams struct {
LogLevel v1.LogLevel `protobuf:"varint,31,opt,name=log_level,json=logLevel,proto3,enum=inventory.v1.LogLevel" json:"log_level,omitempty"`
// Optionally expose the exporter process on all public interfaces
ExposeExporter bool `protobuf:"varint,32,opt,name=expose_exporter,json=exposeExporter,proto3" json:"expose_exporter,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddMySQLServiceParams) Reset() {
@@ -362,16 +362,15 @@ func (x *AddMySQLServiceParams) GetExposeExporter() bool {
}
type MySQLServiceResult struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
Service *v1.MySQLService `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"`
MysqldExporter *v1.MySQLdExporter `protobuf:"bytes,2,opt,name=mysqld_exporter,json=mysqldExporter,proto3" json:"mysqld_exporter,omitempty"`
QanMysqlPerfschema *v1.QANMySQLPerfSchemaAgent `protobuf:"bytes,3,opt,name=qan_mysql_perfschema,json=qanMysqlPerfschema,proto3" json:"qan_mysql_perfschema,omitempty"`
QanMysqlSlowlog *v1.QANMySQLSlowlogAgent `protobuf:"bytes,4,opt,name=qan_mysql_slowlog,json=qanMysqlSlowlog,proto3" json:"qan_mysql_slowlog,omitempty"`
// Actual table count at the moment of adding.
- TableCount int32 `protobuf:"varint,5,opt,name=table_count,json=tableCount,proto3" json:"table_count,omitempty"`
+ TableCount int32 `protobuf:"varint,5,opt,name=table_count,json=tableCount,proto3" json:"table_count,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *MySQLServiceResult) Reset() {
@@ -441,7 +440,7 @@ func (x *MySQLServiceResult) GetTableCount() int32 {
var File_management_v1_mysql_proto protoreflect.FileDescriptor
-var file_management_v1_mysql_proto_rawDesc = []byte{
+var file_management_v1_mysql_proto_rawDesc = string([]byte{
0x0a, 0x19, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x76, 0x31, 0x2f,
0x6d, 0x79, 0x73, 0x71, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x6d, 0x61, 0x6e,
0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x1a, 0x19, 0x69, 0x6e, 0x76, 0x65,
@@ -577,16 +576,16 @@ var file_management_v1_mysql_proto_rawDesc = []byte{
0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0e,
0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_management_v1_mysql_proto_rawDescOnce sync.Once
- file_management_v1_mysql_proto_rawDescData = file_management_v1_mysql_proto_rawDesc
+ file_management_v1_mysql_proto_rawDescData []byte
)
func file_management_v1_mysql_proto_rawDescGZIP() []byte {
file_management_v1_mysql_proto_rawDescOnce.Do(func() {
- file_management_v1_mysql_proto_rawDescData = protoimpl.X.CompressGZIP(file_management_v1_mysql_proto_rawDescData)
+ file_management_v1_mysql_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_management_v1_mysql_proto_rawDesc), len(file_management_v1_mysql_proto_rawDesc)))
})
return file_management_v1_mysql_proto_rawDescData
}
@@ -634,7 +633,7 @@ func file_management_v1_mysql_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_management_v1_mysql_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_management_v1_mysql_proto_rawDesc), len(file_management_v1_mysql_proto_rawDesc)),
NumEnums: 0,
NumMessages: 3,
NumExtensions: 0,
@@ -645,7 +644,6 @@ func file_management_v1_mysql_proto_init() {
MessageInfos: file_management_v1_mysql_proto_msgTypes,
}.Build()
File_management_v1_mysql_proto = out.File
- file_management_v1_mysql_proto_rawDesc = nil
file_management_v1_mysql_proto_goTypes = nil
file_management_v1_mysql_proto_depIdxs = nil
}
diff --git a/api/management/v1/mysql.pb.validate.go b/api/management/v1/mysql.pb.validate.go
index a4ed3375d4..2555a10356 100644
--- a/api/management/v1/mysql.pb.validate.go
+++ b/api/management/v1/mysql.pb.validate.go
@@ -191,7 +191,7 @@ type AddMySQLServiceParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddMySQLServiceParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -411,7 +411,7 @@ type MySQLServiceResultMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m MySQLServiceResultMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/api/management/v1/node.pb.go b/api/management/v1/node.pb.go
index 6a5622ded1..4b394fe3b8 100644
--- a/api/management/v1/node.pb.go
+++ b/api/management/v1/node.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: management/v1/node.proto
@@ -9,6 +9,7 @@ package managementv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
_ "github.com/envoyproxy/protoc-gen-validate/validate"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
@@ -84,10 +85,7 @@ func (UniversalNode_Status) EnumDescriptor() ([]byte, []int) {
// AddNodeParams holds node params and is used to add new node to inventory while adding new service.
type AddNodeParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Node type to be registered.
NodeType v1.NodeType `protobuf:"varint,1,opt,name=node_type,json=nodeType,proto3,enum=inventory.v1.NodeType" json:"node_type,omitempty"`
// Unique across all Nodes user-defined name.
@@ -107,7 +105,9 @@ type AddNodeParams struct {
// Node availability zone.
Az string `protobuf:"bytes,9,opt,name=az,proto3" json:"az,omitempty"`
// Custom user-assigned labels for Node.
- CustomLabels map[string]string `protobuf:"bytes,10,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,10,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddNodeParams) Reset() {
@@ -211,10 +211,7 @@ func (x *AddNodeParams) GetCustomLabels() map[string]string {
}
type RegisterNodeRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Node type to be registered.
NodeType v1.NodeType `protobuf:"varint,1,opt,name=node_type,json=nodeType,proto3,enum=inventory.v1.NodeType" json:"node_type,omitempty"`
// A user-defined name unique across all Nodes.
@@ -236,7 +233,7 @@ type RegisterNodeRequest struct {
// Node availability zone.
Az string `protobuf:"bytes,10,opt,name=az,proto3" json:"az,omitempty"`
// Custom user-assigned labels for Node.
- CustomLabels map[string]string `protobuf:"bytes,11,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,11,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// If true, and Node with that name already exist, it will be removed with all dependent Services and Agents.
Reregister bool `protobuf:"varint,12,opt,name=reregister,proto3" json:"reregister,omitempty"`
// Defines metrics flow model for node_exporter being added by this request.
@@ -249,6 +246,8 @@ type RegisterNodeRequest struct {
AgentPassword string `protobuf:"bytes,15,opt,name=agent_password,json=agentPassword,proto3" json:"agent_password,omitempty"`
// Optionally expose the exporter process on all public interfaces
ExposeExporter bool `protobuf:"varint,16,opt,name=expose_exporter,json=exposeExporter,proto3" json:"expose_exporter,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *RegisterNodeRequest) Reset() {
@@ -394,17 +393,16 @@ func (x *RegisterNodeRequest) GetExposeExporter() bool {
}
type RegisterNodeResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- GenericNode *v1.GenericNode `protobuf:"bytes,1,opt,name=generic_node,json=genericNode,proto3" json:"generic_node,omitempty"`
- ContainerNode *v1.ContainerNode `protobuf:"bytes,2,opt,name=container_node,json=containerNode,proto3" json:"container_node,omitempty"`
- PmmAgent *v1.PMMAgent `protobuf:"bytes,3,opt,name=pmm_agent,json=pmmAgent,proto3" json:"pmm_agent,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ GenericNode *v1.GenericNode `protobuf:"bytes,1,opt,name=generic_node,json=genericNode,proto3" json:"generic_node,omitempty"`
+ ContainerNode *v1.ContainerNode `protobuf:"bytes,2,opt,name=container_node,json=containerNode,proto3" json:"container_node,omitempty"`
+ PmmAgent *v1.PMMAgent `protobuf:"bytes,3,opt,name=pmm_agent,json=pmmAgent,proto3" json:"pmm_agent,omitempty"`
// Token represents token for vmagent auth config.
Token string `protobuf:"bytes,4,opt,name=token,proto3" json:"token,omitempty"`
// Warning message.
- Warning string `protobuf:"bytes,5,opt,name=warning,proto3" json:"warning,omitempty"`
+ Warning string `protobuf:"bytes,5,opt,name=warning,proto3" json:"warning,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *RegisterNodeResponse) Reset() {
@@ -473,14 +471,13 @@ func (x *RegisterNodeResponse) GetWarning() string {
}
type UnregisterNodeRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Node_id to be unregistered.
NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
// Force delete node, related service account, even if it has more service tokens attached.
- Force bool `protobuf:"varint,2,opt,name=force,proto3" json:"force,omitempty"`
+ Force bool `protobuf:"varint,2,opt,name=force,proto3" json:"force,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *UnregisterNodeRequest) Reset() {
@@ -528,12 +525,11 @@ func (x *UnregisterNodeRequest) GetForce() bool {
}
type UnregisterNodeResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Warning message if there are more service tokens attached to service account.
- Warning string `protobuf:"bytes,1,opt,name=warning,proto3" json:"warning,omitempty"`
+ Warning string `protobuf:"bytes,1,opt,name=warning,proto3" json:"warning,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *UnregisterNodeResponse) Reset() {
@@ -574,10 +570,7 @@ func (x *UnregisterNodeResponse) GetWarning() string {
}
type UniversalNode struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique Node identifier.
NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
// Node type.
@@ -601,7 +594,7 @@ type UniversalNode struct {
// Node availability zone.
Az string `protobuf:"bytes,11,opt,name=az,proto3" json:"az,omitempty"`
// Custom user-assigned labels for Node.
- CustomLabels map[string]string `protobuf:"bytes,12,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,12,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Creation timestamp.
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,13,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
// Last update timestamp.
@@ -611,7 +604,9 @@ type UniversalNode struct {
// List of agents related to this node.
Agents []*UniversalNode_Agent `protobuf:"bytes,16,rep,name=agents,proto3" json:"agents,omitempty"`
// List of services running on this node.
- Services []*UniversalNode_Service `protobuf:"bytes,17,rep,name=services,proto3" json:"services,omitempty"`
+ Services []*UniversalNode_Service `protobuf:"bytes,17,rep,name=services,proto3" json:"services,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *UniversalNode) Reset() {
@@ -764,12 +759,11 @@ func (x *UniversalNode) GetServices() []*UniversalNode_Service {
}
type ListNodesRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Node type to be filtered out.
- NodeType v1.NodeType `protobuf:"varint,1,opt,name=node_type,json=nodeType,proto3,enum=inventory.v1.NodeType" json:"node_type,omitempty"`
+ NodeType v1.NodeType `protobuf:"varint,1,opt,name=node_type,json=nodeType,proto3,enum=inventory.v1.NodeType" json:"node_type,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ListNodesRequest) Reset() {
@@ -810,11 +804,10 @@ func (x *ListNodesRequest) GetNodeType() v1.NodeType {
}
type ListNodesResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Nodes []*UniversalNode `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Nodes []*UniversalNode `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *ListNodesResponse) Reset() {
@@ -855,12 +848,11 @@ func (x *ListNodesResponse) GetNodes() []*UniversalNode {
}
type GetNodeRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique Node identifier.
- NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
+ NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GetNodeRequest) Reset() {
@@ -901,11 +893,10 @@ func (x *GetNodeRequest) GetNodeId() string {
}
type GetNodeResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Node *UniversalNode `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Node *UniversalNode `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *GetNodeResponse) Reset() {
@@ -947,16 +938,15 @@ func (x *GetNodeResponse) GetNode() *UniversalNode {
// Service represents a service running on a node.
type UniversalNode_Service struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique Service identifier.
ServiceId string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
// Service type.
ServiceType string `protobuf:"bytes,2,opt,name=service_type,json=serviceType,proto3" json:"service_type,omitempty"`
// Service name.
- ServiceName string `protobuf:"bytes,3,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"`
+ ServiceName string `protobuf:"bytes,3,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *UniversalNode_Service) Reset() {
@@ -1011,10 +1001,7 @@ func (x *UniversalNode_Service) GetServiceName() string {
}
type UniversalNode_Agent struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique Agent identifier.
AgentId string `protobuf:"bytes,1,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"`
// Agent type.
@@ -1022,7 +1009,9 @@ type UniversalNode_Agent struct {
// Actual Agent status.
Status string `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"`
// True if Agent is running and connected to pmm-managed.
- IsConnected bool `protobuf:"varint,4,opt,name=is_connected,json=isConnected,proto3" json:"is_connected,omitempty"`
+ IsConnected bool `protobuf:"varint,4,opt,name=is_connected,json=isConnected,proto3" json:"is_connected,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *UniversalNode_Agent) Reset() {
@@ -1085,7 +1074,7 @@ func (x *UniversalNode_Agent) GetIsConnected() bool {
var File_management_v1_node_proto protoreflect.FileDescriptor
-var file_management_v1_node_proto_rawDesc = []byte{
+var file_management_v1_node_proto_rawDesc = string([]byte{
0x0a, 0x18, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x76, 0x31, 0x2f,
0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x6d, 0x61, 0x6e, 0x61,
0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
@@ -1293,16 +1282,16 @@ var file_management_v1_node_proto_rawDesc = []byte{
0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0e,
0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_management_v1_node_proto_rawDescOnce sync.Once
- file_management_v1_node_proto_rawDescData = file_management_v1_node_proto_rawDesc
+ file_management_v1_node_proto_rawDescData []byte
)
func file_management_v1_node_proto_rawDescGZIP() []byte {
file_management_v1_node_proto_rawDescOnce.Do(func() {
- file_management_v1_node_proto_rawDescData = protoimpl.X.CompressGZIP(file_management_v1_node_proto_rawDescData)
+ file_management_v1_node_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_management_v1_node_proto_rawDesc), len(file_management_v1_node_proto_rawDesc)))
})
return file_management_v1_node_proto_rawDescData
}
@@ -1371,7 +1360,7 @@ func file_management_v1_node_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_management_v1_node_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_management_v1_node_proto_rawDesc), len(file_management_v1_node_proto_rawDesc)),
NumEnums: 1,
NumMessages: 15,
NumExtensions: 0,
@@ -1383,7 +1372,6 @@ func file_management_v1_node_proto_init() {
MessageInfos: file_management_v1_node_proto_msgTypes,
}.Build()
File_management_v1_node_proto = out.File
- file_management_v1_node_proto_rawDesc = nil
file_management_v1_node_proto_goTypes = nil
file_management_v1_node_proto_depIdxs = nil
}
diff --git a/api/management/v1/node.pb.validate.go b/api/management/v1/node.pb.validate.go
index fb7d985f9a..b8adc9fac4 100644
--- a/api/management/v1/node.pb.validate.go
+++ b/api/management/v1/node.pb.validate.go
@@ -104,7 +104,7 @@ type AddNodeParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddNodeParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -243,7 +243,7 @@ type RegisterNodeRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m RegisterNodeRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -436,7 +436,7 @@ type RegisterNodeResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m RegisterNodeResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -551,7 +551,7 @@ type UnregisterNodeRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m UnregisterNodeRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -655,7 +655,7 @@ type UnregisterNodeResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m UnregisterNodeResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -909,7 +909,7 @@ type UniversalNodeMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m UniversalNodeMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1011,7 +1011,7 @@ type ListNodesRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListNodesRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1145,7 +1145,7 @@ type ListNodesResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListNodesResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1258,7 +1258,7 @@ type GetNodeRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetNodeRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1387,7 +1387,7 @@ type GetNodeResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetNodeResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1493,7 +1493,7 @@ type UniversalNode_ServiceMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m UniversalNode_ServiceMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1603,7 +1603,7 @@ type UniversalNode_AgentMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m UniversalNode_AgentMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/api/management/v1/postgresql.pb.go b/api/management/v1/postgresql.pb.go
index 08c3bee477..8a992be330 100644
--- a/api/management/v1/postgresql.pb.go
+++ b/api/management/v1/postgresql.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: management/v1/postgresql.proto
@@ -9,6 +9,7 @@ package managementv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
_ "github.com/envoyproxy/protoc-gen-validate/validate"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
@@ -25,10 +26,7 @@ const (
)
type AddPostgreSQLServiceParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Node identifier on which a service is been running.
// Exactly one of these parameters should be present: node_id, node_name, add_node.
NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
@@ -72,7 +70,7 @@ type AddPostgreSQLServiceParams struct {
// Disable query examples.
DisableQueryExamples bool `protobuf:"varint,18,opt,name=disable_query_examples,json=disableQueryExamples,proto3" json:"disable_query_examples,omitempty"`
// Custom user-assigned labels for Service.
- CustomLabels map[string]string `protobuf:"bytes,19,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,19,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Skip connection check.
SkipConnectionCheck bool `protobuf:"varint,20,opt,name=skip_connection_check,json=skipConnectionCheck,proto3" json:"skip_connection_check,omitempty"`
// Disable parsing comments from queries and showing them in QAN.
@@ -103,6 +101,8 @@ type AddPostgreSQLServiceParams struct {
ExposeExporter bool `protobuf:"varint,32,opt,name=expose_exporter,json=exposeExporter,proto3" json:"expose_exporter,omitempty"`
// Maximum number of connections that exporter can open to the database instance.
MaxExporterConnections int32 `protobuf:"varint,33,opt,name=max_exporter_connections,json=maxExporterConnections,proto3" json:"max_exporter_connections,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddPostgreSQLServiceParams) Reset() {
@@ -367,16 +367,15 @@ func (x *AddPostgreSQLServiceParams) GetMaxExporterConnections() int32 {
}
type PostgreSQLServiceResult struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
Service *v1.PostgreSQLService `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"`
PostgresExporter *v1.PostgresExporter `protobuf:"bytes,2,opt,name=postgres_exporter,json=postgresExporter,proto3" json:"postgres_exporter,omitempty"`
QanPostgresqlPgstatementsAgent *v1.QANPostgreSQLPgStatementsAgent `protobuf:"bytes,3,opt,name=qan_postgresql_pgstatements_agent,json=qanPostgresqlPgstatementsAgent,proto3" json:"qan_postgresql_pgstatements_agent,omitempty"`
QanPostgresqlPgstatmonitorAgent *v1.QANPostgreSQLPgStatMonitorAgent `protobuf:"bytes,4,opt,name=qan_postgresql_pgstatmonitor_agent,json=qanPostgresqlPgstatmonitorAgent,proto3" json:"qan_postgresql_pgstatmonitor_agent,omitempty"`
// Warning message.
- Warning string `protobuf:"bytes,5,opt,name=warning,proto3" json:"warning,omitempty"`
+ Warning string `protobuf:"bytes,5,opt,name=warning,proto3" json:"warning,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *PostgreSQLServiceResult) Reset() {
@@ -446,7 +445,7 @@ func (x *PostgreSQLServiceResult) GetWarning() string {
var File_management_v1_postgresql_proto protoreflect.FileDescriptor
-var file_management_v1_postgresql_proto_rawDesc = []byte{
+var file_management_v1_postgresql_proto_rawDesc = string([]byte{
0x0a, 0x1e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x76, 0x31, 0x2f,
0x70, 0x6f, 0x73, 0x74, 0x67, 0x72, 0x65, 0x73, 0x71, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x12, 0x0d, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x1a,
@@ -593,16 +592,16 @@ var file_management_v1_postgresql_proto_rawDesc = []byte{
0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61,
0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e,
0x74, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_management_v1_postgresql_proto_rawDescOnce sync.Once
- file_management_v1_postgresql_proto_rawDescData = file_management_v1_postgresql_proto_rawDesc
+ file_management_v1_postgresql_proto_rawDescData []byte
)
func file_management_v1_postgresql_proto_rawDescGZIP() []byte {
file_management_v1_postgresql_proto_rawDescOnce.Do(func() {
- file_management_v1_postgresql_proto_rawDescData = protoimpl.X.CompressGZIP(file_management_v1_postgresql_proto_rawDescData)
+ file_management_v1_postgresql_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_management_v1_postgresql_proto_rawDesc), len(file_management_v1_postgresql_proto_rawDesc)))
})
return file_management_v1_postgresql_proto_rawDescData
}
@@ -650,7 +649,7 @@ func file_management_v1_postgresql_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_management_v1_postgresql_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_management_v1_postgresql_proto_rawDesc), len(file_management_v1_postgresql_proto_rawDesc)),
NumEnums: 0,
NumMessages: 3,
NumExtensions: 0,
@@ -661,7 +660,6 @@ func file_management_v1_postgresql_proto_init() {
MessageInfos: file_management_v1_postgresql_proto_msgTypes,
}.Build()
File_management_v1_postgresql_proto = out.File
- file_management_v1_postgresql_proto_rawDesc = nil
file_management_v1_postgresql_proto_goTypes = nil
file_management_v1_postgresql_proto_depIdxs = nil
}
diff --git a/api/management/v1/postgresql.pb.validate.go b/api/management/v1/postgresql.pb.validate.go
index 79da043015..bb9a930bb4 100644
--- a/api/management/v1/postgresql.pb.validate.go
+++ b/api/management/v1/postgresql.pb.validate.go
@@ -193,7 +193,7 @@ type AddPostgreSQLServiceParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddPostgreSQLServiceParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -413,7 +413,7 @@ type PostgreSQLServiceResultMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m PostgreSQLServiceResultMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/api/management/v1/proxysql.pb.go b/api/management/v1/proxysql.pb.go
index 0360313e81..522156a1d7 100644
--- a/api/management/v1/proxysql.pb.go
+++ b/api/management/v1/proxysql.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: management/v1/proxysql.proto
@@ -9,6 +9,7 @@ package managementv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
_ "github.com/envoyproxy/protoc-gen-validate/validate"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
@@ -25,10 +26,7 @@ const (
)
type AddProxySQLServiceParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Node identifier on which a service is been running.
// Exactly one of these parameters should be present: node_id, node_name, add_node.
NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
@@ -62,7 +60,7 @@ type AddProxySQLServiceParams struct {
// ProxySQL password for scraping metrics.
Password string `protobuf:"bytes,13,opt,name=password,proto3" json:"password,omitempty"`
// Custom user-assigned labels for Service.
- CustomLabels map[string]string `protobuf:"bytes,14,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,14,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Skip connection check.
SkipConnectionCheck bool `protobuf:"varint,15,opt,name=skip_connection_check,json=skipConnectionCheck,proto3" json:"skip_connection_check,omitempty"`
// Use TLS for database connections.
@@ -81,6 +79,8 @@ type AddProxySQLServiceParams struct {
LogLevel v1.LogLevel `protobuf:"varint,22,opt,name=log_level,json=logLevel,proto3,enum=inventory.v1.LogLevel" json:"log_level,omitempty"`
// Optionally expose the exporter process on all public interfaces
ExposeExporter bool `protobuf:"varint,23,opt,name=expose_exporter,json=exposeExporter,proto3" json:"expose_exporter,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddProxySQLServiceParams) Reset() {
@@ -268,12 +268,11 @@ func (x *AddProxySQLServiceParams) GetExposeExporter() bool {
}
type ProxySQLServiceResult struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Service *v1.ProxySQLService `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"`
- ProxysqlExporter *v1.ProxySQLExporter `protobuf:"bytes,2,opt,name=proxysql_exporter,json=proxysqlExporter,proto3" json:"proxysql_exporter,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Service *v1.ProxySQLService `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"`
+ ProxysqlExporter *v1.ProxySQLExporter `protobuf:"bytes,2,opt,name=proxysql_exporter,json=proxysqlExporter,proto3" json:"proxysql_exporter,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ProxySQLServiceResult) Reset() {
@@ -322,7 +321,7 @@ func (x *ProxySQLServiceResult) GetProxysqlExporter() *v1.ProxySQLExporter {
var File_management_v1_proxysql_proto protoreflect.FileDescriptor
-var file_management_v1_proxysql_proto_rawDesc = []byte{
+var file_management_v1_proxysql_proto_rawDesc = string([]byte{
0x0a, 0x1c, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x76, 0x31, 0x2f,
0x70, 0x72, 0x6f, 0x78, 0x79, 0x73, 0x71, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d,
0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x1a, 0x19, 0x69,
@@ -419,16 +418,16 @@ var file_management_v1_proxysql_proto_rawDesc = []byte{
0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61,
0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e,
0x74, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_management_v1_proxysql_proto_rawDescOnce sync.Once
- file_management_v1_proxysql_proto_rawDescData = file_management_v1_proxysql_proto_rawDesc
+ file_management_v1_proxysql_proto_rawDescData []byte
)
func file_management_v1_proxysql_proto_rawDescGZIP() []byte {
file_management_v1_proxysql_proto_rawDescOnce.Do(func() {
- file_management_v1_proxysql_proto_rawDescData = protoimpl.X.CompressGZIP(file_management_v1_proxysql_proto_rawDescData)
+ file_management_v1_proxysql_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_management_v1_proxysql_proto_rawDesc), len(file_management_v1_proxysql_proto_rawDesc)))
})
return file_management_v1_proxysql_proto_rawDescData
}
@@ -472,7 +471,7 @@ func file_management_v1_proxysql_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_management_v1_proxysql_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_management_v1_proxysql_proto_rawDesc), len(file_management_v1_proxysql_proto_rawDesc)),
NumEnums: 0,
NumMessages: 3,
NumExtensions: 0,
@@ -483,7 +482,6 @@ func file_management_v1_proxysql_proto_init() {
MessageInfos: file_management_v1_proxysql_proto_msgTypes,
}.Build()
File_management_v1_proxysql_proto = out.File
- file_management_v1_proxysql_proto_rawDesc = nil
file_management_v1_proxysql_proto_goTypes = nil
file_management_v1_proxysql_proto_depIdxs = nil
}
diff --git a/api/management/v1/proxysql.pb.validate.go b/api/management/v1/proxysql.pb.validate.go
index e427dd86aa..d9fea3aff8 100644
--- a/api/management/v1/proxysql.pb.validate.go
+++ b/api/management/v1/proxysql.pb.validate.go
@@ -171,7 +171,7 @@ type AddProxySQLServiceParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddProxySQLServiceParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -331,7 +331,7 @@ type ProxySQLServiceResultMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ProxySQLServiceResultMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/api/management/v1/rds.pb.go b/api/management/v1/rds.pb.go
index d493683cda..666341b1cd 100644
--- a/api/management/v1/rds.pb.go
+++ b/api/management/v1/rds.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: management/v1/rds.proto
@@ -9,6 +9,7 @@ package managementv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
_ "github.com/envoyproxy/protoc-gen-validate/validate"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
@@ -76,10 +77,7 @@ func (DiscoverRDSEngine) EnumDescriptor() ([]byte, []int) {
// DiscoverRDSInstance models an unique RDS instance for the list of instances returned by Discovery.
type DiscoverRDSInstance struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// AWS region.
Region string `protobuf:"bytes,1,opt,name=region,proto3" json:"region,omitempty"`
// AWS availability zone.
@@ -96,6 +94,8 @@ type DiscoverRDSInstance struct {
Engine DiscoverRDSEngine `protobuf:"varint,7,opt,name=engine,proto3,enum=management.v1.DiscoverRDSEngine" json:"engine,omitempty"`
// Engine version.
EngineVersion string `protobuf:"bytes,8,opt,name=engine_version,json=engineVersion,proto3" json:"engine_version,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *DiscoverRDSInstance) Reset() {
@@ -185,14 +185,13 @@ func (x *DiscoverRDSInstance) GetEngineVersion() string {
}
type DiscoverRDSRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// AWS Access key. Optional.
AwsAccessKey string `protobuf:"bytes,1,opt,name=aws_access_key,json=awsAccessKey,proto3" json:"aws_access_key,omitempty"`
// AWS Secret key. Optional.
- AwsSecretKey string `protobuf:"bytes,2,opt,name=aws_secret_key,json=awsSecretKey,proto3" json:"aws_secret_key,omitempty"`
+ AwsSecretKey string `protobuf:"bytes,2,opt,name=aws_secret_key,json=awsSecretKey,proto3" json:"aws_secret_key,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *DiscoverRDSRequest) Reset() {
@@ -240,11 +239,10 @@ func (x *DiscoverRDSRequest) GetAwsSecretKey() string {
}
type DiscoverRDSResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ RdsInstances []*DiscoverRDSInstance `protobuf:"bytes,1,rep,name=rds_instances,json=rdsInstances,proto3" json:"rds_instances,omitempty"`
unknownFields protoimpl.UnknownFields
-
- RdsInstances []*DiscoverRDSInstance `protobuf:"bytes,1,rep,name=rds_instances,json=rdsInstances,proto3" json:"rds_instances,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *DiscoverRDSResponse) Reset() {
@@ -285,10 +283,7 @@ func (x *DiscoverRDSResponse) GetRdsInstances() []*DiscoverRDSInstance {
}
type AddRDSServiceParams struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// AWS region.
Region string `protobuf:"bytes,1,opt,name=region,proto3" json:"region,omitempty"`
// AWS availability zone.
@@ -328,7 +323,7 @@ type AddRDSServiceParams struct {
// If true, adds qan-mysql-perfschema-agent.
QanMysqlPerfschema bool `protobuf:"varint,18,opt,name=qan_mysql_perfschema,json=qanMysqlPerfschema,proto3" json:"qan_mysql_perfschema,omitempty"`
// Custom user-assigned labels for Node and Service.
- CustomLabels map[string]string `protobuf:"bytes,19,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,19,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Skip connection check.
SkipConnectionCheck bool `protobuf:"varint,20,opt,name=skip_connection_check,json=skipConnectionCheck,proto3" json:"skip_connection_check,omitempty"`
// Use TLS for database connections.
@@ -360,6 +355,8 @@ type AddRDSServiceParams struct {
DisableCommentsParsing bool `protobuf:"varint,32,opt,name=disable_comments_parsing,json=disableCommentsParsing,proto3" json:"disable_comments_parsing,omitempty"`
// Maximum number of exporter connections to PostgreSQL instance.
MaxPostgresqlExporterConnections int32 `protobuf:"varint,33,opt,name=max_postgresql_exporter_connections,json=maxPostgresqlExporterConnections,proto3" json:"max_postgresql_exporter_connections,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddRDSServiceParams) Reset() {
@@ -631,10 +628,7 @@ func (x *AddRDSServiceParams) GetMaxPostgresqlExporterConnections() int32 {
}
type RDSServiceResult struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
Node *v1.RemoteRDSNode `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
RdsExporter *v1.RDSExporter `protobuf:"bytes,2,opt,name=rds_exporter,json=rdsExporter,proto3" json:"rds_exporter,omitempty"`
Mysql *v1.MySQLService `protobuf:"bytes,3,opt,name=mysql,proto3" json:"mysql,omitempty"`
@@ -643,6 +637,8 @@ type RDSServiceResult struct {
Postgresql *v1.PostgreSQLService `protobuf:"bytes,6,opt,name=postgresql,proto3" json:"postgresql,omitempty"`
PostgresqlExporter *v1.PostgresExporter `protobuf:"bytes,7,opt,name=postgresql_exporter,json=postgresqlExporter,proto3" json:"postgresql_exporter,omitempty"`
QanPostgresqlPgstatements *v1.QANPostgreSQLPgStatementsAgent `protobuf:"bytes,8,opt,name=qan_postgresql_pgstatements,json=qanPostgresqlPgstatements,proto3" json:"qan_postgresql_pgstatements,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *RDSServiceResult) Reset() {
@@ -733,7 +729,7 @@ func (x *RDSServiceResult) GetQanPostgresqlPgstatements() *v1.QANPostgreSQLPgSta
var File_management_v1_rds_proto protoreflect.FileDescriptor
-var file_management_v1_rds_proto_rawDesc = []byte{
+var file_management_v1_rds_proto_rawDesc = string([]byte{
0x0a, 0x17, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x76, 0x31, 0x2f,
0x72, 0x64, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x6d, 0x61, 0x6e, 0x61, 0x67,
0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x1a, 0x19, 0x69, 0x6e, 0x76, 0x65, 0x6e, 0x74,
@@ -929,16 +925,16 @@ var file_management_v1_rds_proto_rawDesc = []byte{
0x74, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
0xea, 0x02, 0x0e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x3a, 0x3a, 0x56,
0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_management_v1_rds_proto_rawDescOnce sync.Once
- file_management_v1_rds_proto_rawDescData = file_management_v1_rds_proto_rawDesc
+ file_management_v1_rds_proto_rawDescData []byte
)
func file_management_v1_rds_proto_rawDescGZIP() []byte {
file_management_v1_rds_proto_rawDescOnce.Do(func() {
- file_management_v1_rds_proto_rawDescData = protoimpl.X.CompressGZIP(file_management_v1_rds_proto_rawDescData)
+ file_management_v1_rds_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_management_v1_rds_proto_rawDesc), len(file_management_v1_rds_proto_rawDesc)))
})
return file_management_v1_rds_proto_rawDescData
}
@@ -997,7 +993,7 @@ func file_management_v1_rds_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_management_v1_rds_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_management_v1_rds_proto_rawDesc), len(file_management_v1_rds_proto_rawDesc)),
NumEnums: 1,
NumMessages: 6,
NumExtensions: 0,
@@ -1009,7 +1005,6 @@ func file_management_v1_rds_proto_init() {
MessageInfos: file_management_v1_rds_proto_msgTypes,
}.Build()
File_management_v1_rds_proto = out.File
- file_management_v1_rds_proto_rawDesc = nil
file_management_v1_rds_proto_goTypes = nil
file_management_v1_rds_proto_depIdxs = nil
}
diff --git a/api/management/v1/rds.pb.validate.go b/api/management/v1/rds.pb.validate.go
index 12b616d354..920bb97dbf 100644
--- a/api/management/v1/rds.pb.validate.go
+++ b/api/management/v1/rds.pb.validate.go
@@ -87,7 +87,7 @@ type DiscoverRDSInstanceMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m DiscoverRDSInstanceMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -193,7 +193,7 @@ type DiscoverRDSRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m DiscoverRDSRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -329,7 +329,7 @@ type DiscoverRDSResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m DiscoverRDSResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -544,7 +544,7 @@ type AddRDSServiceParamsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddRDSServiceParamsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -878,7 +878,7 @@ type RDSServiceResultMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m RDSServiceResultMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/api/management/v1/service.pb.go b/api/management/v1/service.pb.go
index d3b46ad63b..81f8391f8b 100644
--- a/api/management/v1/service.pb.go
+++ b/api/management/v1/service.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: management/v1/service.proto
@@ -9,6 +9,7 @@ package managementv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
_ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options"
_ "google.golang.org/genproto/googleapis/api/annotations"
@@ -84,11 +85,8 @@ func (UniversalService_Status) EnumDescriptor() ([]byte, []int) {
}
type AddServiceRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Types that are assignable to Service:
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Types that are valid to be assigned to Service:
//
// *AddServiceRequest_Mysql
// *AddServiceRequest_Mongodb
@@ -97,7 +95,9 @@ type AddServiceRequest struct {
// *AddServiceRequest_Haproxy
// *AddServiceRequest_External
// *AddServiceRequest_Rds
- Service isAddServiceRequest_Service `protobuf_oneof:"service"`
+ Service isAddServiceRequest_Service `protobuf_oneof:"service"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddServiceRequest) Reset() {
@@ -130,58 +130,72 @@ func (*AddServiceRequest) Descriptor() ([]byte, []int) {
return file_management_v1_service_proto_rawDescGZIP(), []int{0}
}
-func (m *AddServiceRequest) GetService() isAddServiceRequest_Service {
- if m != nil {
- return m.Service
+func (x *AddServiceRequest) GetService() isAddServiceRequest_Service {
+ if x != nil {
+ return x.Service
}
return nil
}
func (x *AddServiceRequest) GetMysql() *AddMySQLServiceParams {
- if x, ok := x.GetService().(*AddServiceRequest_Mysql); ok {
- return x.Mysql
+ if x != nil {
+ if x, ok := x.Service.(*AddServiceRequest_Mysql); ok {
+ return x.Mysql
+ }
}
return nil
}
func (x *AddServiceRequest) GetMongodb() *AddMongoDBServiceParams {
- if x, ok := x.GetService().(*AddServiceRequest_Mongodb); ok {
- return x.Mongodb
+ if x != nil {
+ if x, ok := x.Service.(*AddServiceRequest_Mongodb); ok {
+ return x.Mongodb
+ }
}
return nil
}
func (x *AddServiceRequest) GetPostgresql() *AddPostgreSQLServiceParams {
- if x, ok := x.GetService().(*AddServiceRequest_Postgresql); ok {
- return x.Postgresql
+ if x != nil {
+ if x, ok := x.Service.(*AddServiceRequest_Postgresql); ok {
+ return x.Postgresql
+ }
}
return nil
}
func (x *AddServiceRequest) GetProxysql() *AddProxySQLServiceParams {
- if x, ok := x.GetService().(*AddServiceRequest_Proxysql); ok {
- return x.Proxysql
+ if x != nil {
+ if x, ok := x.Service.(*AddServiceRequest_Proxysql); ok {
+ return x.Proxysql
+ }
}
return nil
}
func (x *AddServiceRequest) GetHaproxy() *AddHAProxyServiceParams {
- if x, ok := x.GetService().(*AddServiceRequest_Haproxy); ok {
- return x.Haproxy
+ if x != nil {
+ if x, ok := x.Service.(*AddServiceRequest_Haproxy); ok {
+ return x.Haproxy
+ }
}
return nil
}
func (x *AddServiceRequest) GetExternal() *AddExternalServiceParams {
- if x, ok := x.GetService().(*AddServiceRequest_External); ok {
- return x.External
+ if x != nil {
+ if x, ok := x.Service.(*AddServiceRequest_External); ok {
+ return x.External
+ }
}
return nil
}
func (x *AddServiceRequest) GetRds() *AddRDSServiceParams {
- if x, ok := x.GetService().(*AddServiceRequest_Rds); ok {
- return x.Rds
+ if x != nil {
+ if x, ok := x.Service.(*AddServiceRequest_Rds); ok {
+ return x.Rds
+ }
}
return nil
}
@@ -233,11 +247,8 @@ func (*AddServiceRequest_External) isAddServiceRequest_Service() {}
func (*AddServiceRequest_Rds) isAddServiceRequest_Service() {}
type AddServiceResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Types that are assignable to Service:
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Types that are valid to be assigned to Service:
//
// *AddServiceResponse_Mysql
// *AddServiceResponse_Mongodb
@@ -246,7 +257,9 @@ type AddServiceResponse struct {
// *AddServiceResponse_Haproxy
// *AddServiceResponse_External
// *AddServiceResponse_Rds
- Service isAddServiceResponse_Service `protobuf_oneof:"service"`
+ Service isAddServiceResponse_Service `protobuf_oneof:"service"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AddServiceResponse) Reset() {
@@ -279,58 +292,72 @@ func (*AddServiceResponse) Descriptor() ([]byte, []int) {
return file_management_v1_service_proto_rawDescGZIP(), []int{1}
}
-func (m *AddServiceResponse) GetService() isAddServiceResponse_Service {
- if m != nil {
- return m.Service
+func (x *AddServiceResponse) GetService() isAddServiceResponse_Service {
+ if x != nil {
+ return x.Service
}
return nil
}
func (x *AddServiceResponse) GetMysql() *MySQLServiceResult {
- if x, ok := x.GetService().(*AddServiceResponse_Mysql); ok {
- return x.Mysql
+ if x != nil {
+ if x, ok := x.Service.(*AddServiceResponse_Mysql); ok {
+ return x.Mysql
+ }
}
return nil
}
func (x *AddServiceResponse) GetMongodb() *MongoDBServiceResult {
- if x, ok := x.GetService().(*AddServiceResponse_Mongodb); ok {
- return x.Mongodb
+ if x != nil {
+ if x, ok := x.Service.(*AddServiceResponse_Mongodb); ok {
+ return x.Mongodb
+ }
}
return nil
}
func (x *AddServiceResponse) GetPostgresql() *PostgreSQLServiceResult {
- if x, ok := x.GetService().(*AddServiceResponse_Postgresql); ok {
- return x.Postgresql
+ if x != nil {
+ if x, ok := x.Service.(*AddServiceResponse_Postgresql); ok {
+ return x.Postgresql
+ }
}
return nil
}
func (x *AddServiceResponse) GetProxysql() *ProxySQLServiceResult {
- if x, ok := x.GetService().(*AddServiceResponse_Proxysql); ok {
- return x.Proxysql
+ if x != nil {
+ if x, ok := x.Service.(*AddServiceResponse_Proxysql); ok {
+ return x.Proxysql
+ }
}
return nil
}
func (x *AddServiceResponse) GetHaproxy() *HAProxyServiceResult {
- if x, ok := x.GetService().(*AddServiceResponse_Haproxy); ok {
- return x.Haproxy
+ if x != nil {
+ if x, ok := x.Service.(*AddServiceResponse_Haproxy); ok {
+ return x.Haproxy
+ }
}
return nil
}
func (x *AddServiceResponse) GetExternal() *ExternalServiceResult {
- if x, ok := x.GetService().(*AddServiceResponse_External); ok {
- return x.External
+ if x != nil {
+ if x, ok := x.Service.(*AddServiceResponse_External); ok {
+ return x.External
+ }
}
return nil
}
func (x *AddServiceResponse) GetRds() *RDSServiceResult {
- if x, ok := x.GetService().(*AddServiceResponse_Rds); ok {
- return x.Rds
+ if x != nil {
+ if x, ok := x.Service.(*AddServiceResponse_Rds); ok {
+ return x.Rds
+ }
}
return nil
}
@@ -382,14 +409,13 @@ func (*AddServiceResponse_External) isAddServiceResponse_Service() {}
func (*AddServiceResponse_Rds) isAddServiceResponse_Service() {}
type RemoveServiceRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Either a Service ID or a Service Name.
ServiceId string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
// Service type.
- ServiceType v1.ServiceType `protobuf:"varint,2,opt,name=service_type,json=serviceType,proto3,enum=inventory.v1.ServiceType" json:"service_type,omitempty"`
+ ServiceType v1.ServiceType `protobuf:"varint,2,opt,name=service_type,json=serviceType,proto3,enum=inventory.v1.ServiceType" json:"service_type,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *RemoveServiceRequest) Reset() {
@@ -437,9 +463,9 @@ func (x *RemoveServiceRequest) GetServiceType() v1.ServiceType {
}
type RemoveServiceResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *RemoveServiceResponse) Reset() {
@@ -473,10 +499,7 @@ func (*RemoveServiceResponse) Descriptor() ([]byte, []int) {
}
type UniversalService struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unique service identifier.
ServiceId string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
// Service type.
@@ -496,7 +519,7 @@ type UniversalService struct {
// Replication set name.
ReplicationSet string `protobuf:"bytes,9,opt,name=replication_set,json=replicationSet,proto3" json:"replication_set,omitempty"`
// Custom user-assigned labels for Service.
- CustomLabels map[string]string `protobuf:"bytes,10,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CustomLabels map[string]string `protobuf:"bytes,10,rep,name=custom_labels,json=customLabels,proto3" json:"custom_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// External group name.
ExternalGroup string `protobuf:"bytes,11,opt,name=external_group,json=externalGroup,proto3" json:"external_group,omitempty"`
// Access address (DNS name or IP).
@@ -517,7 +540,9 @@ type UniversalService struct {
// The health status of the service.
Status UniversalService_Status `protobuf:"varint,18,opt,name=status,proto3,enum=management.v1.UniversalService_Status" json:"status,omitempty"`
// The service/database version.
- Version string `protobuf:"bytes,19,opt,name=version,proto3" json:"version,omitempty"`
+ Version string `protobuf:"bytes,19,opt,name=version,proto3" json:"version,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *UniversalService) Reset() {
@@ -684,16 +709,15 @@ func (x *UniversalService) GetVersion() string {
}
type ListServicesRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Return only Services running on that Node.
NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
// Return only services filtered by service type.
ServiceType v1.ServiceType `protobuf:"varint,2,opt,name=service_type,json=serviceType,proto3,enum=inventory.v1.ServiceType" json:"service_type,omitempty"`
// Return only services in this external group.
ExternalGroup string `protobuf:"bytes,3,opt,name=external_group,json=externalGroup,proto3" json:"external_group,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ListServicesRequest) Reset() {
@@ -748,12 +772,11 @@ func (x *ListServicesRequest) GetExternalGroup() string {
}
type ListServicesResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// List of Services.
- Services []*UniversalService `protobuf:"bytes,1,rep,name=services,proto3" json:"services,omitempty"`
+ Services []*UniversalService `protobuf:"bytes,1,rep,name=services,proto3" json:"services,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ListServicesResponse) Reset() {
@@ -795,7 +818,7 @@ func (x *ListServicesResponse) GetServices() []*UniversalService {
var File_management_v1_service_proto protoreflect.FileDescriptor
-var file_management_v1_service_proto_rawDesc = []byte{
+var file_management_v1_service_proto_rawDesc = string([]byte{
0x0a, 0x1b, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x76, 0x31, 0x2f,
0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x6d,
0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x67, 0x6f,
@@ -1137,16 +1160,16 @@ var file_management_v1_service_proto_rawDesc = []byte{
0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0e, 0x4d,
0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_management_v1_service_proto_rawDescOnce sync.Once
- file_management_v1_service_proto_rawDescData = file_management_v1_service_proto_rawDesc
+ file_management_v1_service_proto_rawDescData []byte
)
func file_management_v1_service_proto_rawDescGZIP() []byte {
file_management_v1_service_proto_rawDescOnce.Do(func() {
- file_management_v1_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_management_v1_service_proto_rawDescData)
+ file_management_v1_service_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_management_v1_service_proto_rawDesc), len(file_management_v1_service_proto_rawDesc)))
})
return file_management_v1_service_proto_rawDescData
}
@@ -1298,7 +1321,7 @@ func file_management_v1_service_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_management_v1_service_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_management_v1_service_proto_rawDesc), len(file_management_v1_service_proto_rawDesc)),
NumEnums: 1,
NumMessages: 8,
NumExtensions: 0,
@@ -1310,7 +1333,6 @@ func file_management_v1_service_proto_init() {
MessageInfos: file_management_v1_service_proto_msgTypes,
}.Build()
File_management_v1_service_proto = out.File
- file_management_v1_service_proto_rawDesc = nil
file_management_v1_service_proto_goTypes = nil
file_management_v1_service_proto_depIdxs = nil
}
diff --git a/api/management/v1/service.pb.gw.go b/api/management/v1/service.pb.gw.go
index b71f27c3f6..f5624411a9 100644
--- a/api/management/v1/service.pb.gw.go
+++ b/api/management/v1/service.pb.gw.go
@@ -10,6 +10,7 @@ package managementv1
import (
"context"
+ "errors"
"io"
"net/http"
@@ -28,31 +29,32 @@ var (
_ codes.Code
_ io.Reader
_ status.Status
+ _ = errors.New
_ = runtime.String
_ = utilities.NewDoubleArray
_ = metadata.Join
)
func request_ManagementService_AddAnnotation_0(ctx context.Context, marshaler runtime.Marshaler, client ManagementServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq AddAnnotationRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq AddAnnotationRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.AddAnnotation(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_ManagementService_AddAnnotation_0(ctx context.Context, marshaler runtime.Marshaler, server ManagementServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq AddAnnotationRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq AddAnnotationRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.AddAnnotation(ctx, &protoReq)
return msg, metadata, err
}
@@ -60,71 +62,73 @@ func local_request_ManagementService_AddAnnotation_0(ctx context.Context, marsha
var filter_ManagementService_ListAgents_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
func request_ManagementService_ListAgents_0(ctx context.Context, marshaler runtime.Marshaler, client ManagementServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListAgentsRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ListAgentsRequest
+ metadata runtime.ServerMetadata
+ )
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ManagementService_ListAgents_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.ListAgents(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_ManagementService_ListAgents_0(ctx context.Context, marshaler runtime.Marshaler, server ManagementServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListAgentsRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ListAgentsRequest
+ metadata runtime.ServerMetadata
+ )
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ManagementService_ListAgents_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.ListAgents(ctx, &protoReq)
return msg, metadata, err
}
func request_ManagementService_ListAgentVersions_0(ctx context.Context, marshaler runtime.Marshaler, client ManagementServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListAgentVersionsRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ListAgentVersionsRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := client.ListAgentVersions(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_ManagementService_ListAgentVersions_0(ctx context.Context, marshaler runtime.Marshaler, server ManagementServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListAgentVersionsRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ListAgentVersionsRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := server.ListAgentVersions(ctx, &protoReq)
return msg, metadata, err
}
func request_ManagementService_RegisterNode_0(ctx context.Context, marshaler runtime.Marshaler, client ManagementServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq RegisterNodeRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq RegisterNodeRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.RegisterNode(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_ManagementService_RegisterNode_0(ctx context.Context, marshaler runtime.Marshaler, server ManagementServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq RegisterNodeRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq RegisterNodeRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.RegisterNode(ctx, &protoReq)
return msg, metadata, err
}
@@ -132,65 +136,49 @@ func local_request_ManagementService_RegisterNode_0(ctx context.Context, marshal
var filter_ManagementService_UnregisterNode_0 = &utilities.DoubleArray{Encoding: map[string]int{"node_id": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}}
func request_ManagementService_UnregisterNode_0(ctx context.Context, marshaler runtime.Marshaler, client ManagementServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq UnregisterNodeRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq UnregisterNodeRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["node_id"]
+ val, ok := pathParams["node_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
}
-
protoReq.NodeId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
}
-
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ManagementService_UnregisterNode_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.UnregisterNode(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_ManagementService_UnregisterNode_0(ctx context.Context, marshaler runtime.Marshaler, server ManagementServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq UnregisterNodeRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq UnregisterNodeRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["node_id"]
+ val, ok := pathParams["node_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
}
-
protoReq.NodeId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
}
-
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ManagementService_UnregisterNode_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.UnregisterNode(ctx, &protoReq)
return msg, metadata, err
}
@@ -198,105 +186,91 @@ func local_request_ManagementService_UnregisterNode_0(ctx context.Context, marsh
var filter_ManagementService_ListNodes_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
func request_ManagementService_ListNodes_0(ctx context.Context, marshaler runtime.Marshaler, client ManagementServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListNodesRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ListNodesRequest
+ metadata runtime.ServerMetadata
+ )
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ManagementService_ListNodes_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.ListNodes(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_ManagementService_ListNodes_0(ctx context.Context, marshaler runtime.Marshaler, server ManagementServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListNodesRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ListNodesRequest
+ metadata runtime.ServerMetadata
+ )
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ManagementService_ListNodes_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.ListNodes(ctx, &protoReq)
return msg, metadata, err
}
func request_ManagementService_GetNode_0(ctx context.Context, marshaler runtime.Marshaler, client ManagementServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetNodeRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq GetNodeRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["node_id"]
+ val, ok := pathParams["node_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
}
-
protoReq.NodeId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
}
-
msg, err := client.GetNode(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_ManagementService_GetNode_0(ctx context.Context, marshaler runtime.Marshaler, server ManagementServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetNodeRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq GetNodeRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["node_id"]
+ val, ok := pathParams["node_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
}
-
protoReq.NodeId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
}
-
msg, err := server.GetNode(ctx, &protoReq)
return msg, metadata, err
}
func request_ManagementService_AddService_0(ctx context.Context, marshaler runtime.Marshaler, client ManagementServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq AddServiceRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq AddServiceRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.AddService(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_ManagementService_AddService_0(ctx context.Context, marshaler runtime.Marshaler, server ManagementServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq AddServiceRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq AddServiceRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.AddService(ctx, &protoReq)
return msg, metadata, err
}
@@ -304,103 +278,103 @@ func local_request_ManagementService_AddService_0(ctx context.Context, marshaler
var filter_ManagementService_ListServices_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
func request_ManagementService_ListServices_0(ctx context.Context, marshaler runtime.Marshaler, client ManagementServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListServicesRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ListServicesRequest
+ metadata runtime.ServerMetadata
+ )
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ManagementService_ListServices_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.ListServices(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_ManagementService_ListServices_0(ctx context.Context, marshaler runtime.Marshaler, server ManagementServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListServicesRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ListServicesRequest
+ metadata runtime.ServerMetadata
+ )
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ManagementService_ListServices_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.ListServices(ctx, &protoReq)
return msg, metadata, err
}
func request_ManagementService_DiscoverRDS_0(ctx context.Context, marshaler runtime.Marshaler, client ManagementServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq DiscoverRDSRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq DiscoverRDSRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.DiscoverRDS(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_ManagementService_DiscoverRDS_0(ctx context.Context, marshaler runtime.Marshaler, server ManagementServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq DiscoverRDSRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq DiscoverRDSRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.DiscoverRDS(ctx, &protoReq)
return msg, metadata, err
}
func request_ManagementService_DiscoverAzureDatabase_0(ctx context.Context, marshaler runtime.Marshaler, client ManagementServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq DiscoverAzureDatabaseRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq DiscoverAzureDatabaseRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.DiscoverAzureDatabase(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_ManagementService_DiscoverAzureDatabase_0(ctx context.Context, marshaler runtime.Marshaler, server ManagementServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq DiscoverAzureDatabaseRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq DiscoverAzureDatabaseRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.DiscoverAzureDatabase(ctx, &protoReq)
return msg, metadata, err
}
func request_ManagementService_AddAzureDatabase_0(ctx context.Context, marshaler runtime.Marshaler, client ManagementServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq AddAzureDatabaseRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq AddAzureDatabaseRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.AddAzureDatabase(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_ManagementService_AddAzureDatabase_0(ctx context.Context, marshaler runtime.Marshaler, server ManagementServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq AddAzureDatabaseRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq AddAzureDatabaseRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.AddAzureDatabase(ctx, &protoReq)
return msg, metadata, err
}
@@ -408,65 +382,49 @@ func local_request_ManagementService_AddAzureDatabase_0(ctx context.Context, mar
var filter_ManagementService_RemoveService_0 = &utilities.DoubleArray{Encoding: map[string]int{"service_id": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}}
func request_ManagementService_RemoveService_0(ctx context.Context, marshaler runtime.Marshaler, client ManagementServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq RemoveServiceRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq RemoveServiceRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["service_id"]
+ val, ok := pathParams["service_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "service_id")
}
-
protoReq.ServiceId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "service_id", err)
}
-
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ManagementService_RemoveService_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.RemoveService(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_ManagementService_RemoveService_0(ctx context.Context, marshaler runtime.Marshaler, server ManagementServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq RemoveServiceRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq RemoveServiceRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["service_id"]
+ val, ok := pathParams["service_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "service_id")
}
-
protoReq.ServiceId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "service_id", err)
}
-
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ManagementService_RemoveService_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.RemoveService(ctx, &protoReq)
return msg, metadata, err
}
@@ -477,15 +435,13 @@ func local_request_ManagementService_RemoveService_0(ctx context.Context, marsha
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterManagementServiceHandlerFromEndpoint instead.
// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
func RegisterManagementServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server ManagementServiceServer) error {
- mux.Handle("POST", pattern_ManagementService_AddAnnotation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_ManagementService_AddAnnotation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/management.v1.ManagementService/AddAnnotation", runtime.WithHTTPPathPattern("/v1/management/annotations"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/management.v1.ManagementService/AddAnnotation", runtime.WithHTTPPathPattern("/v1/management/annotations"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -497,19 +453,15 @@ func RegisterManagementServiceHandlerServer(ctx context.Context, mux *runtime.Se
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ManagementService_AddAnnotation_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_ManagementService_ListAgents_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_ManagementService_ListAgents_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/management.v1.ManagementService/ListAgents", runtime.WithHTTPPathPattern("/v1/management/agents"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/management.v1.ManagementService/ListAgents", runtime.WithHTTPPathPattern("/v1/management/agents"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -521,19 +473,15 @@ func RegisterManagementServiceHandlerServer(ctx context.Context, mux *runtime.Se
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ManagementService_ListAgents_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_ManagementService_ListAgentVersions_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_ManagementService_ListAgentVersions_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/management.v1.ManagementService/ListAgentVersions", runtime.WithHTTPPathPattern("/v1/management/agents/versions"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/management.v1.ManagementService/ListAgentVersions", runtime.WithHTTPPathPattern("/v1/management/agents/versions"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -545,19 +493,15 @@ func RegisterManagementServiceHandlerServer(ctx context.Context, mux *runtime.Se
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ManagementService_ListAgentVersions_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_ManagementService_RegisterNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_ManagementService_RegisterNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/management.v1.ManagementService/RegisterNode", runtime.WithHTTPPathPattern("/v1/management/nodes"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/management.v1.ManagementService/RegisterNode", runtime.WithHTTPPathPattern("/v1/management/nodes"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -569,19 +513,15 @@ func RegisterManagementServiceHandlerServer(ctx context.Context, mux *runtime.Se
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ManagementService_RegisterNode_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("DELETE", pattern_ManagementService_UnregisterNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodDelete, pattern_ManagementService_UnregisterNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/management.v1.ManagementService/UnregisterNode", runtime.WithHTTPPathPattern("/v1/management/nodes/{node_id}"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/management.v1.ManagementService/UnregisterNode", runtime.WithHTTPPathPattern("/v1/management/nodes/{node_id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -593,19 +533,15 @@ func RegisterManagementServiceHandlerServer(ctx context.Context, mux *runtime.Se
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ManagementService_UnregisterNode_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_ManagementService_ListNodes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_ManagementService_ListNodes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/management.v1.ManagementService/ListNodes", runtime.WithHTTPPathPattern("/v1/management/nodes"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/management.v1.ManagementService/ListNodes", runtime.WithHTTPPathPattern("/v1/management/nodes"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -617,19 +553,15 @@ func RegisterManagementServiceHandlerServer(ctx context.Context, mux *runtime.Se
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ManagementService_ListNodes_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_ManagementService_GetNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_ManagementService_GetNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/management.v1.ManagementService/GetNode", runtime.WithHTTPPathPattern("/v1/management/nodes/{node_id}"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/management.v1.ManagementService/GetNode", runtime.WithHTTPPathPattern("/v1/management/nodes/{node_id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -641,19 +573,15 @@ func RegisterManagementServiceHandlerServer(ctx context.Context, mux *runtime.Se
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ManagementService_GetNode_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_ManagementService_AddService_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_ManagementService_AddService_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/management.v1.ManagementService/AddService", runtime.WithHTTPPathPattern("/v1/management/services"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/management.v1.ManagementService/AddService", runtime.WithHTTPPathPattern("/v1/management/services"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -665,19 +593,15 @@ func RegisterManagementServiceHandlerServer(ctx context.Context, mux *runtime.Se
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ManagementService_AddService_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_ManagementService_ListServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_ManagementService_ListServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/management.v1.ManagementService/ListServices", runtime.WithHTTPPathPattern("/v1/management/services"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/management.v1.ManagementService/ListServices", runtime.WithHTTPPathPattern("/v1/management/services"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -689,19 +613,15 @@ func RegisterManagementServiceHandlerServer(ctx context.Context, mux *runtime.Se
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ManagementService_ListServices_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_ManagementService_DiscoverRDS_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_ManagementService_DiscoverRDS_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/management.v1.ManagementService/DiscoverRDS", runtime.WithHTTPPathPattern("/v1/management/services:discoverRDS"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/management.v1.ManagementService/DiscoverRDS", runtime.WithHTTPPathPattern("/v1/management/services:discoverRDS"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -713,19 +633,15 @@ func RegisterManagementServiceHandlerServer(ctx context.Context, mux *runtime.Se
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ManagementService_DiscoverRDS_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_ManagementService_DiscoverAzureDatabase_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_ManagementService_DiscoverAzureDatabase_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/management.v1.ManagementService/DiscoverAzureDatabase", runtime.WithHTTPPathPattern("/v1/management/services:discoverAzure"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/management.v1.ManagementService/DiscoverAzureDatabase", runtime.WithHTTPPathPattern("/v1/management/services:discoverAzure"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -737,19 +653,15 @@ func RegisterManagementServiceHandlerServer(ctx context.Context, mux *runtime.Se
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ManagementService_DiscoverAzureDatabase_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_ManagementService_AddAzureDatabase_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_ManagementService_AddAzureDatabase_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/management.v1.ManagementService/AddAzureDatabase", runtime.WithHTTPPathPattern("/v1/management/services/azure"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/management.v1.ManagementService/AddAzureDatabase", runtime.WithHTTPPathPattern("/v1/management/services/azure"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -761,19 +673,15 @@ func RegisterManagementServiceHandlerServer(ctx context.Context, mux *runtime.Se
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ManagementService_AddAzureDatabase_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("DELETE", pattern_ManagementService_RemoveService_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodDelete, pattern_ManagementService_RemoveService_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/management.v1.ManagementService/RemoveService", runtime.WithHTTPPathPattern("/v1/management/services/{service_id}"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/management.v1.ManagementService/RemoveService", runtime.WithHTTPPathPattern("/v1/management/services/{service_id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -785,7 +693,6 @@ func RegisterManagementServiceHandlerServer(ctx context.Context, mux *runtime.Se
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ManagementService_RemoveService_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@@ -813,7 +720,6 @@ func RegisterManagementServiceHandlerFromEndpoint(ctx context.Context, mux *runt
}
}()
}()
-
return RegisterManagementServiceHandler(ctx, mux, conn)
}
@@ -829,13 +735,11 @@ func RegisterManagementServiceHandler(ctx context.Context, mux *runtime.ServeMux
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "ManagementServiceClient" to call the correct interceptors. This client ignores the HTTP middlewares.
func RegisterManagementServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client ManagementServiceClient) error {
- mux.Handle("POST", pattern_ManagementService_AddAnnotation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_ManagementService_AddAnnotation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/management.v1.ManagementService/AddAnnotation", runtime.WithHTTPPathPattern("/v1/management/annotations"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/management.v1.ManagementService/AddAnnotation", runtime.WithHTTPPathPattern("/v1/management/annotations"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -846,17 +750,13 @@ func RegisterManagementServiceHandlerClient(ctx context.Context, mux *runtime.Se
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ManagementService_AddAnnotation_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_ManagementService_ListAgents_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_ManagementService_ListAgents_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/management.v1.ManagementService/ListAgents", runtime.WithHTTPPathPattern("/v1/management/agents"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/management.v1.ManagementService/ListAgents", runtime.WithHTTPPathPattern("/v1/management/agents"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -867,17 +767,13 @@ func RegisterManagementServiceHandlerClient(ctx context.Context, mux *runtime.Se
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ManagementService_ListAgents_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_ManagementService_ListAgentVersions_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_ManagementService_ListAgentVersions_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/management.v1.ManagementService/ListAgentVersions", runtime.WithHTTPPathPattern("/v1/management/agents/versions"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/management.v1.ManagementService/ListAgentVersions", runtime.WithHTTPPathPattern("/v1/management/agents/versions"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -888,17 +784,13 @@ func RegisterManagementServiceHandlerClient(ctx context.Context, mux *runtime.Se
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ManagementService_ListAgentVersions_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_ManagementService_RegisterNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_ManagementService_RegisterNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/management.v1.ManagementService/RegisterNode", runtime.WithHTTPPathPattern("/v1/management/nodes"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/management.v1.ManagementService/RegisterNode", runtime.WithHTTPPathPattern("/v1/management/nodes"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -909,17 +801,13 @@ func RegisterManagementServiceHandlerClient(ctx context.Context, mux *runtime.Se
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ManagementService_RegisterNode_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("DELETE", pattern_ManagementService_UnregisterNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodDelete, pattern_ManagementService_UnregisterNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/management.v1.ManagementService/UnregisterNode", runtime.WithHTTPPathPattern("/v1/management/nodes/{node_id}"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/management.v1.ManagementService/UnregisterNode", runtime.WithHTTPPathPattern("/v1/management/nodes/{node_id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -930,17 +818,13 @@ func RegisterManagementServiceHandlerClient(ctx context.Context, mux *runtime.Se
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ManagementService_UnregisterNode_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_ManagementService_ListNodes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_ManagementService_ListNodes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/management.v1.ManagementService/ListNodes", runtime.WithHTTPPathPattern("/v1/management/nodes"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/management.v1.ManagementService/ListNodes", runtime.WithHTTPPathPattern("/v1/management/nodes"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -951,17 +835,13 @@ func RegisterManagementServiceHandlerClient(ctx context.Context, mux *runtime.Se
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ManagementService_ListNodes_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_ManagementService_GetNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_ManagementService_GetNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/management.v1.ManagementService/GetNode", runtime.WithHTTPPathPattern("/v1/management/nodes/{node_id}"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/management.v1.ManagementService/GetNode", runtime.WithHTTPPathPattern("/v1/management/nodes/{node_id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -972,17 +852,13 @@ func RegisterManagementServiceHandlerClient(ctx context.Context, mux *runtime.Se
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ManagementService_GetNode_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_ManagementService_AddService_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_ManagementService_AddService_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/management.v1.ManagementService/AddService", runtime.WithHTTPPathPattern("/v1/management/services"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/management.v1.ManagementService/AddService", runtime.WithHTTPPathPattern("/v1/management/services"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -993,17 +869,13 @@ func RegisterManagementServiceHandlerClient(ctx context.Context, mux *runtime.Se
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ManagementService_AddService_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_ManagementService_ListServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_ManagementService_ListServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/management.v1.ManagementService/ListServices", runtime.WithHTTPPathPattern("/v1/management/services"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/management.v1.ManagementService/ListServices", runtime.WithHTTPPathPattern("/v1/management/services"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -1014,17 +886,13 @@ func RegisterManagementServiceHandlerClient(ctx context.Context, mux *runtime.Se
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ManagementService_ListServices_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_ManagementService_DiscoverRDS_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_ManagementService_DiscoverRDS_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/management.v1.ManagementService/DiscoverRDS", runtime.WithHTTPPathPattern("/v1/management/services:discoverRDS"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/management.v1.ManagementService/DiscoverRDS", runtime.WithHTTPPathPattern("/v1/management/services:discoverRDS"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -1035,17 +903,13 @@ func RegisterManagementServiceHandlerClient(ctx context.Context, mux *runtime.Se
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ManagementService_DiscoverRDS_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_ManagementService_DiscoverAzureDatabase_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_ManagementService_DiscoverAzureDatabase_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/management.v1.ManagementService/DiscoverAzureDatabase", runtime.WithHTTPPathPattern("/v1/management/services:discoverAzure"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/management.v1.ManagementService/DiscoverAzureDatabase", runtime.WithHTTPPathPattern("/v1/management/services:discoverAzure"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -1056,17 +920,13 @@ func RegisterManagementServiceHandlerClient(ctx context.Context, mux *runtime.Se
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ManagementService_DiscoverAzureDatabase_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_ManagementService_AddAzureDatabase_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_ManagementService_AddAzureDatabase_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/management.v1.ManagementService/AddAzureDatabase", runtime.WithHTTPPathPattern("/v1/management/services/azure"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/management.v1.ManagementService/AddAzureDatabase", runtime.WithHTTPPathPattern("/v1/management/services/azure"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -1077,17 +937,13 @@ func RegisterManagementServiceHandlerClient(ctx context.Context, mux *runtime.Se
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ManagementService_AddAzureDatabase_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("DELETE", pattern_ManagementService_RemoveService_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodDelete, pattern_ManagementService_RemoveService_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/management.v1.ManagementService/RemoveService", runtime.WithHTTPPathPattern("/v1/management/services/{service_id}"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/management.v1.ManagementService/RemoveService", runtime.WithHTTPPathPattern("/v1/management/services/{service_id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -1098,65 +954,39 @@ func RegisterManagementServiceHandlerClient(ctx context.Context, mux *runtime.Se
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ManagementService_RemoveService_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
return nil
}
var (
- pattern_ManagementService_AddAnnotation_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "management", "annotations"}, ""))
-
- pattern_ManagementService_ListAgents_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "management", "agents"}, ""))
-
- pattern_ManagementService_ListAgentVersions_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "management", "agents", "versions"}, ""))
-
- pattern_ManagementService_RegisterNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "management", "nodes"}, ""))
-
- pattern_ManagementService_UnregisterNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "management", "nodes", "node_id"}, ""))
-
- pattern_ManagementService_ListNodes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "management", "nodes"}, ""))
-
- pattern_ManagementService_GetNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "management", "nodes", "node_id"}, ""))
-
- pattern_ManagementService_AddService_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "management", "services"}, ""))
-
- pattern_ManagementService_ListServices_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "management", "services"}, ""))
-
- pattern_ManagementService_DiscoverRDS_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "management", "services"}, "discoverRDS"))
-
+ pattern_ManagementService_AddAnnotation_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "management", "annotations"}, ""))
+ pattern_ManagementService_ListAgents_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "management", "agents"}, ""))
+ pattern_ManagementService_ListAgentVersions_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "management", "agents", "versions"}, ""))
+ pattern_ManagementService_RegisterNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "management", "nodes"}, ""))
+ pattern_ManagementService_UnregisterNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "management", "nodes", "node_id"}, ""))
+ pattern_ManagementService_ListNodes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "management", "nodes"}, ""))
+ pattern_ManagementService_GetNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "management", "nodes", "node_id"}, ""))
+ pattern_ManagementService_AddService_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "management", "services"}, ""))
+ pattern_ManagementService_ListServices_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "management", "services"}, ""))
+ pattern_ManagementService_DiscoverRDS_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "management", "services"}, "discoverRDS"))
pattern_ManagementService_DiscoverAzureDatabase_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "management", "services"}, "discoverAzure"))
-
- pattern_ManagementService_AddAzureDatabase_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "management", "services", "azure"}, ""))
-
- pattern_ManagementService_RemoveService_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "management", "services", "service_id"}, ""))
+ pattern_ManagementService_AddAzureDatabase_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "management", "services", "azure"}, ""))
+ pattern_ManagementService_RemoveService_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "management", "services", "service_id"}, ""))
)
var (
- forward_ManagementService_AddAnnotation_0 = runtime.ForwardResponseMessage
-
- forward_ManagementService_ListAgents_0 = runtime.ForwardResponseMessage
-
- forward_ManagementService_ListAgentVersions_0 = runtime.ForwardResponseMessage
-
- forward_ManagementService_RegisterNode_0 = runtime.ForwardResponseMessage
-
- forward_ManagementService_UnregisterNode_0 = runtime.ForwardResponseMessage
-
- forward_ManagementService_ListNodes_0 = runtime.ForwardResponseMessage
-
- forward_ManagementService_GetNode_0 = runtime.ForwardResponseMessage
-
- forward_ManagementService_AddService_0 = runtime.ForwardResponseMessage
-
- forward_ManagementService_ListServices_0 = runtime.ForwardResponseMessage
-
- forward_ManagementService_DiscoverRDS_0 = runtime.ForwardResponseMessage
-
+ forward_ManagementService_AddAnnotation_0 = runtime.ForwardResponseMessage
+ forward_ManagementService_ListAgents_0 = runtime.ForwardResponseMessage
+ forward_ManagementService_ListAgentVersions_0 = runtime.ForwardResponseMessage
+ forward_ManagementService_RegisterNode_0 = runtime.ForwardResponseMessage
+ forward_ManagementService_UnregisterNode_0 = runtime.ForwardResponseMessage
+ forward_ManagementService_ListNodes_0 = runtime.ForwardResponseMessage
+ forward_ManagementService_GetNode_0 = runtime.ForwardResponseMessage
+ forward_ManagementService_AddService_0 = runtime.ForwardResponseMessage
+ forward_ManagementService_ListServices_0 = runtime.ForwardResponseMessage
+ forward_ManagementService_DiscoverRDS_0 = runtime.ForwardResponseMessage
forward_ManagementService_DiscoverAzureDatabase_0 = runtime.ForwardResponseMessage
-
- forward_ManagementService_AddAzureDatabase_0 = runtime.ForwardResponseMessage
-
- forward_ManagementService_RemoveService_0 = runtime.ForwardResponseMessage
+ forward_ManagementService_AddAzureDatabase_0 = runtime.ForwardResponseMessage
+ forward_ManagementService_RemoveService_0 = runtime.ForwardResponseMessage
)
diff --git a/api/management/v1/service.pb.validate.go b/api/management/v1/service.pb.validate.go
index 7f4218604e..a0ab91b6ea 100644
--- a/api/management/v1/service.pb.validate.go
+++ b/api/management/v1/service.pb.validate.go
@@ -367,7 +367,7 @@ type AddServiceRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddServiceRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -761,7 +761,7 @@ type AddServiceResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AddServiceResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -867,7 +867,7 @@ type RemoveServiceRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m RemoveServiceRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -969,7 +969,7 @@ type RemoveServiceResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m RemoveServiceResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1195,7 +1195,7 @@ type UniversalServiceMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m UniversalServiceMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1301,7 +1301,7 @@ type ListServicesRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListServicesRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1437,7 +1437,7 @@ type ListServicesResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListServicesResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/api/management/v1/severity.pb.go b/api/management/v1/severity.pb.go
index 5701f74476..1d56b323d1 100644
--- a/api/management/v1/severity.pb.go
+++ b/api/management/v1/severity.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: management/v1/severity.proto
@@ -9,6 +9,7 @@ package managementv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
@@ -91,7 +92,7 @@ func (Severity) EnumDescriptor() ([]byte, []int) {
var File_management_v1_severity_proto protoreflect.FileDescriptor
-var file_management_v1_severity_proto_rawDesc = []byte{
+var file_management_v1_severity_proto_rawDesc = string([]byte{
0x0a, 0x1c, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x76, 0x31, 0x2f,
0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d,
0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2a, 0xcd, 0x01,
@@ -120,16 +121,16 @@ var file_management_v1_severity_proto_rawDesc = []byte{
0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0e,
0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_management_v1_severity_proto_rawDescOnce sync.Once
- file_management_v1_severity_proto_rawDescData = file_management_v1_severity_proto_rawDesc
+ file_management_v1_severity_proto_rawDescData []byte
)
func file_management_v1_severity_proto_rawDescGZIP() []byte {
file_management_v1_severity_proto_rawDescOnce.Do(func() {
- file_management_v1_severity_proto_rawDescData = protoimpl.X.CompressGZIP(file_management_v1_severity_proto_rawDescData)
+ file_management_v1_severity_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_management_v1_severity_proto_rawDesc), len(file_management_v1_severity_proto_rawDesc)))
})
return file_management_v1_severity_proto_rawDescData
}
@@ -158,7 +159,7 @@ func file_management_v1_severity_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_management_v1_severity_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_management_v1_severity_proto_rawDesc), len(file_management_v1_severity_proto_rawDesc)),
NumEnums: 1,
NumMessages: 0,
NumExtensions: 0,
@@ -169,7 +170,6 @@ func file_management_v1_severity_proto_init() {
EnumInfos: file_management_v1_severity_proto_enumTypes,
}.Build()
File_management_v1_severity_proto = out.File
- file_management_v1_severity_proto_rawDesc = nil
file_management_v1_severity_proto_goTypes = nil
file_management_v1_severity_proto_depIdxs = nil
}
diff --git a/api/platform/v1/platform.pb.go b/api/platform/v1/platform.pb.go
index c62f94c581..27cccee525 100644
--- a/api/platform/v1/platform.pb.go
+++ b/api/platform/v1/platform.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: platform/v1/platform.proto
@@ -9,6 +9,7 @@ package platformv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
_ "github.com/envoyproxy/protoc-gen-validate/validate"
_ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options"
@@ -26,14 +27,13 @@ const (
)
type ConnectRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// User defined human readable PMM Server Name.
ServerName string `protobuf:"bytes,1,opt,name=server_name,json=serverName,proto3" json:"server_name,omitempty"`
// Personal Access Token that the user obtains from Percona Portal.
PersonalAccessToken string `protobuf:"bytes,2,opt,name=personal_access_token,json=personalAccessToken,proto3" json:"personal_access_token,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ConnectRequest) Reset() {
@@ -81,9 +81,9 @@ func (x *ConnectRequest) GetPersonalAccessToken() string {
}
type ConnectResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ConnectResponse) Reset() {
@@ -117,12 +117,11 @@ func (*ConnectResponse) Descriptor() ([]byte, []int) {
}
type DisconnectRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Forces the cleanup process for connected PMM instances regardless of the Portal API response
- Force bool `protobuf:"varint,1,opt,name=force,proto3" json:"force,omitempty"`
+ Force bool `protobuf:"varint,1,opt,name=force,proto3" json:"force,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *DisconnectRequest) Reset() {
@@ -163,9 +162,9 @@ func (x *DisconnectRequest) GetForce() bool {
}
type DisconnectResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *DisconnectResponse) Reset() {
@@ -199,9 +198,9 @@ func (*DisconnectResponse) Descriptor() ([]byte, []int) {
}
type SearchOrganizationTicketsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *SearchOrganizationTicketsRequest) Reset() {
@@ -235,12 +234,11 @@ func (*SearchOrganizationTicketsRequest) Descriptor() ([]byte, []int) {
}
type SearchOrganizationTicketsResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Support tickets belonging to the Percona Portal Organization.
- Tickets []*OrganizationTicket `protobuf:"bytes,1,rep,name=tickets,proto3" json:"tickets,omitempty"`
+ Tickets []*OrganizationTicket `protobuf:"bytes,1,rep,name=tickets,proto3" json:"tickets,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *SearchOrganizationTicketsResponse) Reset() {
@@ -282,10 +280,7 @@ func (x *SearchOrganizationTicketsResponse) GetTickets() []*OrganizationTicket {
// OrganizationTicket contains information about the support ticket.
type OrganizationTicket struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Ticket number.
Number string `protobuf:"bytes,1,opt,name=number,proto3" json:"number,omitempty"`
// Ticket short description.
@@ -303,7 +298,9 @@ type OrganizationTicket struct {
// Task type.
TaskType string `protobuf:"bytes,8,opt,name=task_type,json=taskType,proto3" json:"task_type,omitempty"`
// Ticket url.
- Url string `protobuf:"bytes,9,opt,name=url,proto3" json:"url,omitempty"`
+ Url string `protobuf:"bytes,9,opt,name=url,proto3" json:"url,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *OrganizationTicket) Reset() {
@@ -400,9 +397,9 @@ func (x *OrganizationTicket) GetUrl() string {
}
type SearchOrganizationEntitlementsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *SearchOrganizationEntitlementsRequest) Reset() {
@@ -436,11 +433,10 @@ func (*SearchOrganizationEntitlementsRequest) Descriptor() ([]byte, []int) {
}
type SearchOrganizationEntitlementsResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Entitlements []*OrganizationEntitlement `protobuf:"bytes,1,rep,name=entitlements,proto3" json:"entitlements,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Entitlements []*OrganizationEntitlement `protobuf:"bytes,1,rep,name=entitlements,proto3" json:"entitlements,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *SearchOrganizationEntitlementsResponse) Reset() {
@@ -482,10 +478,7 @@ func (x *SearchOrganizationEntitlementsResponse) GetEntitlements() []*Organizati
// OrganizationEntitlement contains information about Organization entitlement.
type OrganizationEntitlement struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Entitlement number.
Number string `protobuf:"bytes,1,opt,name=number,proto3" json:"number,omitempty"`
// Entitlement name.
@@ -509,7 +502,9 @@ type OrganizationEntitlement struct {
// Note: only date is used here but not time.
EndDate *timestamppb.Timestamp `protobuf:"bytes,10,opt,name=end_date,json=endDate,proto3" json:"end_date,omitempty"`
// Platform specific options covered by this entitlement.
- Platform *OrganizationEntitlement_Platform `protobuf:"bytes,11,opt,name=platform,proto3" json:"platform,omitempty"`
+ Platform *OrganizationEntitlement_Platform `protobuf:"bytes,11,opt,name=platform,proto3" json:"platform,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *OrganizationEntitlement) Reset() {
@@ -620,9 +615,9 @@ func (x *OrganizationEntitlement) GetPlatform() *OrganizationEntitlement_Platfor
}
type GetContactInformationRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GetContactInformationRequest) Reset() {
@@ -656,13 +651,12 @@ func (*GetContactInformationRequest) Descriptor() ([]byte, []int) {
}
type GetContactInformationResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
CustomerSuccess *GetContactInformationResponse_CustomerSuccess `protobuf:"bytes,1,opt,name=customer_success,json=customerSuccess,proto3" json:"customer_success,omitempty"`
// URL to open a new support ticket.
- NewTicketUrl string `protobuf:"bytes,2,opt,name=new_ticket_url,json=newTicketUrl,proto3" json:"new_ticket_url,omitempty"`
+ NewTicketUrl string `protobuf:"bytes,2,opt,name=new_ticket_url,json=newTicketUrl,proto3" json:"new_ticket_url,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GetContactInformationResponse) Reset() {
@@ -710,9 +704,9 @@ func (x *GetContactInformationResponse) GetNewTicketUrl() string {
}
type ServerInfoRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ServerInfoRequest) Reset() {
@@ -746,14 +740,13 @@ func (*ServerInfoRequest) Descriptor() ([]byte, []int) {
}
type ServerInfoResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- PmmServerName string `protobuf:"bytes,1,opt,name=pmm_server_name,json=pmmServerName,proto3" json:"pmm_server_name,omitempty"`
- PmmServerId string `protobuf:"bytes,2,opt,name=pmm_server_id,json=pmmServerId,proto3" json:"pmm_server_id,omitempty"`
- PmmServerTelemetryId string `protobuf:"bytes,3,opt,name=pmm_server_telemetry_id,json=pmmServerTelemetryId,proto3" json:"pmm_server_telemetry_id,omitempty"`
- ConnectedToPortal bool `protobuf:"varint,4,opt,name=connected_to_portal,json=connectedToPortal,proto3" json:"connected_to_portal,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ PmmServerName string `protobuf:"bytes,1,opt,name=pmm_server_name,json=pmmServerName,proto3" json:"pmm_server_name,omitempty"`
+ PmmServerId string `protobuf:"bytes,2,opt,name=pmm_server_id,json=pmmServerId,proto3" json:"pmm_server_id,omitempty"`
+ PmmServerTelemetryId string `protobuf:"bytes,3,opt,name=pmm_server_telemetry_id,json=pmmServerTelemetryId,proto3" json:"pmm_server_telemetry_id,omitempty"`
+ ConnectedToPortal bool `protobuf:"varint,4,opt,name=connected_to_portal,json=connectedToPortal,proto3" json:"connected_to_portal,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ServerInfoResponse) Reset() {
@@ -815,9 +808,9 @@ func (x *ServerInfoResponse) GetConnectedToPortal() bool {
}
type UserStatusRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *UserStatusRequest) Reset() {
@@ -851,11 +844,10 @@ func (*UserStatusRequest) Descriptor() ([]byte, []int) {
}
type UserStatusResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- IsPlatformUser bool `protobuf:"varint,1,opt,name=is_platform_user,json=isPlatformUser,proto3" json:"is_platform_user,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ IsPlatformUser bool `protobuf:"varint,1,opt,name=is_platform_user,json=isPlatformUser,proto3" json:"is_platform_user,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *UserStatusResponse) Reset() {
@@ -897,14 +889,13 @@ func (x *UserStatusResponse) GetIsPlatformUser() bool {
// Platform indicates platform specific entitlements.
type OrganizationEntitlement_Platform struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Flag indicates that security advisors are covered by this entitlement.
SecurityAdvisor *string `protobuf:"bytes,1,opt,name=security_advisor,json=securityAdvisor,proto3,oneof" json:"security_advisor,omitempty"`
// Flag indicates that config advisors are covered by this entitlement.
ConfigAdvisor *string `protobuf:"bytes,2,opt,name=config_advisor,json=configAdvisor,proto3,oneof" json:"config_advisor,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *OrganizationEntitlement_Platform) Reset() {
@@ -953,12 +944,11 @@ func (x *OrganizationEntitlement_Platform) GetConfigAdvisor() string {
// CustomerSuccess contains the contanct details of the customer success employee assigned to a customer's account.
type GetContactInformationResponse_CustomerSuccess struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Email string `protobuf:"bytes,2,opt,name=email,proto3" json:"email,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- Email string `protobuf:"bytes,2,opt,name=email,proto3" json:"email,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *GetContactInformationResponse_CustomerSuccess) Reset() {
@@ -1007,7 +997,7 @@ func (x *GetContactInformationResponse_CustomerSuccess) GetEmail() string {
var File_platform_v1_platform_proto protoreflect.FileDescriptor
-var file_platform_v1_platform_proto_rawDesc = []byte{
+var file_platform_v1_platform_proto_rawDesc = string([]byte{
0x0a, 0x1a, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x6c,
0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x70, 0x6c,
0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
@@ -1271,16 +1261,16 @@ var file_platform_v1_platform_proto_rawDesc = []byte{
0x72, 0x6d, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
0x61, 0xea, 0x02, 0x0c, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x3a, 0x3a, 0x56, 0x31,
0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_platform_v1_platform_proto_rawDescOnce sync.Once
- file_platform_v1_platform_proto_rawDescData = file_platform_v1_platform_proto_rawDesc
+ file_platform_v1_platform_proto_rawDescData []byte
)
func file_platform_v1_platform_proto_rawDescGZIP() []byte {
file_platform_v1_platform_proto_rawDescOnce.Do(func() {
- file_platform_v1_platform_proto_rawDescData = protoimpl.X.CompressGZIP(file_platform_v1_platform_proto_rawDescData)
+ file_platform_v1_platform_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_platform_v1_platform_proto_rawDesc), len(file_platform_v1_platform_proto_rawDesc)))
})
return file_platform_v1_platform_proto_rawDescData
}
@@ -1350,7 +1340,7 @@ func file_platform_v1_platform_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_platform_v1_platform_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_platform_v1_platform_proto_rawDesc), len(file_platform_v1_platform_proto_rawDesc)),
NumEnums: 0,
NumMessages: 18,
NumExtensions: 0,
@@ -1361,7 +1351,6 @@ func file_platform_v1_platform_proto_init() {
MessageInfos: file_platform_v1_platform_proto_msgTypes,
}.Build()
File_platform_v1_platform_proto = out.File
- file_platform_v1_platform_proto_rawDesc = nil
file_platform_v1_platform_proto_goTypes = nil
file_platform_v1_platform_proto_depIdxs = nil
}
diff --git a/api/platform/v1/platform.pb.gw.go b/api/platform/v1/platform.pb.gw.go
index 06cfee4c9e..1aa01798e2 100644
--- a/api/platform/v1/platform.pb.gw.go
+++ b/api/platform/v1/platform.pb.gw.go
@@ -10,6 +10,7 @@ package platformv1
import (
"context"
+ "errors"
"io"
"net/http"
@@ -28,135 +29,146 @@ var (
_ codes.Code
_ io.Reader
_ status.Status
+ _ = errors.New
_ = runtime.String
_ = utilities.NewDoubleArray
_ = metadata.Join
)
func request_PlatformService_Connect_0(ctx context.Context, marshaler runtime.Marshaler, client PlatformServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ConnectRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq ConnectRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.Connect(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_PlatformService_Connect_0(ctx context.Context, marshaler runtime.Marshaler, server PlatformServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ConnectRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq ConnectRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.Connect(ctx, &protoReq)
return msg, metadata, err
}
func request_PlatformService_Disconnect_0(ctx context.Context, marshaler runtime.Marshaler, client PlatformServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq DisconnectRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq DisconnectRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.Disconnect(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_PlatformService_Disconnect_0(ctx context.Context, marshaler runtime.Marshaler, server PlatformServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq DisconnectRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq DisconnectRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.Disconnect(ctx, &protoReq)
return msg, metadata, err
}
func request_PlatformService_SearchOrganizationTickets_0(ctx context.Context, marshaler runtime.Marshaler, client PlatformServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq SearchOrganizationTicketsRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq SearchOrganizationTicketsRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := client.SearchOrganizationTickets(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_PlatformService_SearchOrganizationTickets_0(ctx context.Context, marshaler runtime.Marshaler, server PlatformServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq SearchOrganizationTicketsRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq SearchOrganizationTicketsRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := server.SearchOrganizationTickets(ctx, &protoReq)
return msg, metadata, err
}
func request_PlatformService_SearchOrganizationEntitlements_0(ctx context.Context, marshaler runtime.Marshaler, client PlatformServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq SearchOrganizationEntitlementsRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq SearchOrganizationEntitlementsRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := client.SearchOrganizationEntitlements(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_PlatformService_SearchOrganizationEntitlements_0(ctx context.Context, marshaler runtime.Marshaler, server PlatformServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq SearchOrganizationEntitlementsRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq SearchOrganizationEntitlementsRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := server.SearchOrganizationEntitlements(ctx, &protoReq)
return msg, metadata, err
}
func request_PlatformService_GetContactInformation_0(ctx context.Context, marshaler runtime.Marshaler, client PlatformServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetContactInformationRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq GetContactInformationRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := client.GetContactInformation(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_PlatformService_GetContactInformation_0(ctx context.Context, marshaler runtime.Marshaler, server PlatformServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetContactInformationRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq GetContactInformationRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := server.GetContactInformation(ctx, &protoReq)
return msg, metadata, err
}
func request_PlatformService_ServerInfo_0(ctx context.Context, marshaler runtime.Marshaler, client PlatformServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ServerInfoRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ServerInfoRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := client.ServerInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_PlatformService_ServerInfo_0(ctx context.Context, marshaler runtime.Marshaler, server PlatformServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ServerInfoRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ServerInfoRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := server.ServerInfo(ctx, &protoReq)
return msg, metadata, err
}
func request_PlatformService_UserStatus_0(ctx context.Context, marshaler runtime.Marshaler, client PlatformServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq UserStatusRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq UserStatusRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := client.UserStatus(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_PlatformService_UserStatus_0(ctx context.Context, marshaler runtime.Marshaler, server PlatformServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq UserStatusRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq UserStatusRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := server.UserStatus(ctx, &protoReq)
return msg, metadata, err
}
@@ -167,15 +179,13 @@ func local_request_PlatformService_UserStatus_0(ctx context.Context, marshaler r
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterPlatformServiceHandlerFromEndpoint instead.
// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
func RegisterPlatformServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server PlatformServiceServer) error {
- mux.Handle("POST", pattern_PlatformService_Connect_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_PlatformService_Connect_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/platform.v1.PlatformService/Connect", runtime.WithHTTPPathPattern("/v1/platform:connect"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/platform.v1.PlatformService/Connect", runtime.WithHTTPPathPattern("/v1/platform:connect"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -187,19 +197,15 @@ func RegisterPlatformServiceHandlerServer(ctx context.Context, mux *runtime.Serv
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_PlatformService_Connect_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_PlatformService_Disconnect_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_PlatformService_Disconnect_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/platform.v1.PlatformService/Disconnect", runtime.WithHTTPPathPattern("/v1/platform:disconnect"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/platform.v1.PlatformService/Disconnect", runtime.WithHTTPPathPattern("/v1/platform:disconnect"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -211,19 +217,15 @@ func RegisterPlatformServiceHandlerServer(ctx context.Context, mux *runtime.Serv
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_PlatformService_Disconnect_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_PlatformService_SearchOrganizationTickets_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_PlatformService_SearchOrganizationTickets_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/platform.v1.PlatformService/SearchOrganizationTickets", runtime.WithHTTPPathPattern("/v1/platform/organization/tickets"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/platform.v1.PlatformService/SearchOrganizationTickets", runtime.WithHTTPPathPattern("/v1/platform/organization/tickets"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -235,19 +237,15 @@ func RegisterPlatformServiceHandlerServer(ctx context.Context, mux *runtime.Serv
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_PlatformService_SearchOrganizationTickets_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_PlatformService_SearchOrganizationEntitlements_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_PlatformService_SearchOrganizationEntitlements_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/platform.v1.PlatformService/SearchOrganizationEntitlements", runtime.WithHTTPPathPattern("/v1/platform/organization/entitlements"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/platform.v1.PlatformService/SearchOrganizationEntitlements", runtime.WithHTTPPathPattern("/v1/platform/organization/entitlements"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -259,19 +257,15 @@ func RegisterPlatformServiceHandlerServer(ctx context.Context, mux *runtime.Serv
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_PlatformService_SearchOrganizationEntitlements_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_PlatformService_GetContactInformation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_PlatformService_GetContactInformation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/platform.v1.PlatformService/GetContactInformation", runtime.WithHTTPPathPattern("/v1/platform/contact"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/platform.v1.PlatformService/GetContactInformation", runtime.WithHTTPPathPattern("/v1/platform/contact"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -283,19 +277,15 @@ func RegisterPlatformServiceHandlerServer(ctx context.Context, mux *runtime.Serv
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_PlatformService_GetContactInformation_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_PlatformService_ServerInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_PlatformService_ServerInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/platform.v1.PlatformService/ServerInfo", runtime.WithHTTPPathPattern("/v1/platform/server"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/platform.v1.PlatformService/ServerInfo", runtime.WithHTTPPathPattern("/v1/platform/server"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -307,19 +297,15 @@ func RegisterPlatformServiceHandlerServer(ctx context.Context, mux *runtime.Serv
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_PlatformService_ServerInfo_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_PlatformService_UserStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_PlatformService_UserStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/platform.v1.PlatformService/UserStatus", runtime.WithHTTPPathPattern("/v1/platform/user"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/platform.v1.PlatformService/UserStatus", runtime.WithHTTPPathPattern("/v1/platform/user"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -331,7 +317,6 @@ func RegisterPlatformServiceHandlerServer(ctx context.Context, mux *runtime.Serv
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_PlatformService_UserStatus_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@@ -359,7 +344,6 @@ func RegisterPlatformServiceHandlerFromEndpoint(ctx context.Context, mux *runtim
}
}()
}()
-
return RegisterPlatformServiceHandler(ctx, mux, conn)
}
@@ -375,13 +359,11 @@ func RegisterPlatformServiceHandler(ctx context.Context, mux *runtime.ServeMux,
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "PlatformServiceClient" to call the correct interceptors. This client ignores the HTTP middlewares.
func RegisterPlatformServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client PlatformServiceClient) error {
- mux.Handle("POST", pattern_PlatformService_Connect_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_PlatformService_Connect_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/platform.v1.PlatformService/Connect", runtime.WithHTTPPathPattern("/v1/platform:connect"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/platform.v1.PlatformService/Connect", runtime.WithHTTPPathPattern("/v1/platform:connect"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -392,17 +374,13 @@ func RegisterPlatformServiceHandlerClient(ctx context.Context, mux *runtime.Serv
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_PlatformService_Connect_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_PlatformService_Disconnect_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_PlatformService_Disconnect_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/platform.v1.PlatformService/Disconnect", runtime.WithHTTPPathPattern("/v1/platform:disconnect"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/platform.v1.PlatformService/Disconnect", runtime.WithHTTPPathPattern("/v1/platform:disconnect"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -413,17 +391,13 @@ func RegisterPlatformServiceHandlerClient(ctx context.Context, mux *runtime.Serv
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_PlatformService_Disconnect_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_PlatformService_SearchOrganizationTickets_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_PlatformService_SearchOrganizationTickets_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/platform.v1.PlatformService/SearchOrganizationTickets", runtime.WithHTTPPathPattern("/v1/platform/organization/tickets"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/platform.v1.PlatformService/SearchOrganizationTickets", runtime.WithHTTPPathPattern("/v1/platform/organization/tickets"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -434,17 +408,13 @@ func RegisterPlatformServiceHandlerClient(ctx context.Context, mux *runtime.Serv
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_PlatformService_SearchOrganizationTickets_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_PlatformService_SearchOrganizationEntitlements_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_PlatformService_SearchOrganizationEntitlements_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/platform.v1.PlatformService/SearchOrganizationEntitlements", runtime.WithHTTPPathPattern("/v1/platform/organization/entitlements"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/platform.v1.PlatformService/SearchOrganizationEntitlements", runtime.WithHTTPPathPattern("/v1/platform/organization/entitlements"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -455,17 +425,13 @@ func RegisterPlatformServiceHandlerClient(ctx context.Context, mux *runtime.Serv
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_PlatformService_SearchOrganizationEntitlements_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_PlatformService_GetContactInformation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_PlatformService_GetContactInformation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/platform.v1.PlatformService/GetContactInformation", runtime.WithHTTPPathPattern("/v1/platform/contact"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/platform.v1.PlatformService/GetContactInformation", runtime.WithHTTPPathPattern("/v1/platform/contact"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -476,17 +442,13 @@ func RegisterPlatformServiceHandlerClient(ctx context.Context, mux *runtime.Serv
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_PlatformService_GetContactInformation_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_PlatformService_ServerInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_PlatformService_ServerInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/platform.v1.PlatformService/ServerInfo", runtime.WithHTTPPathPattern("/v1/platform/server"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/platform.v1.PlatformService/ServerInfo", runtime.WithHTTPPathPattern("/v1/platform/server"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -497,17 +459,13 @@ func RegisterPlatformServiceHandlerClient(ctx context.Context, mux *runtime.Serv
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_PlatformService_ServerInfo_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_PlatformService_UserStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_PlatformService_UserStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/platform.v1.PlatformService/UserStatus", runtime.WithHTTPPathPattern("/v1/platform/user"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/platform.v1.PlatformService/UserStatus", runtime.WithHTTPPathPattern("/v1/platform/user"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -518,41 +476,27 @@ func RegisterPlatformServiceHandlerClient(ctx context.Context, mux *runtime.Serv
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_PlatformService_UserStatus_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
return nil
}
var (
- pattern_PlatformService_Connect_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "platform"}, "connect"))
-
- pattern_PlatformService_Disconnect_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "platform"}, "disconnect"))
-
- pattern_PlatformService_SearchOrganizationTickets_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "platform", "organization", "tickets"}, ""))
-
+ pattern_PlatformService_Connect_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "platform"}, "connect"))
+ pattern_PlatformService_Disconnect_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "platform"}, "disconnect"))
+ pattern_PlatformService_SearchOrganizationTickets_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "platform", "organization", "tickets"}, ""))
pattern_PlatformService_SearchOrganizationEntitlements_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "platform", "organization", "entitlements"}, ""))
-
- pattern_PlatformService_GetContactInformation_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "platform", "contact"}, ""))
-
- pattern_PlatformService_ServerInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "platform", "server"}, ""))
-
- pattern_PlatformService_UserStatus_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "platform", "user"}, ""))
+ pattern_PlatformService_GetContactInformation_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "platform", "contact"}, ""))
+ pattern_PlatformService_ServerInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "platform", "server"}, ""))
+ pattern_PlatformService_UserStatus_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "platform", "user"}, ""))
)
var (
- forward_PlatformService_Connect_0 = runtime.ForwardResponseMessage
-
- forward_PlatformService_Disconnect_0 = runtime.ForwardResponseMessage
-
- forward_PlatformService_SearchOrganizationTickets_0 = runtime.ForwardResponseMessage
-
+ forward_PlatformService_Connect_0 = runtime.ForwardResponseMessage
+ forward_PlatformService_Disconnect_0 = runtime.ForwardResponseMessage
+ forward_PlatformService_SearchOrganizationTickets_0 = runtime.ForwardResponseMessage
forward_PlatformService_SearchOrganizationEntitlements_0 = runtime.ForwardResponseMessage
-
- forward_PlatformService_GetContactInformation_0 = runtime.ForwardResponseMessage
-
- forward_PlatformService_ServerInfo_0 = runtime.ForwardResponseMessage
-
- forward_PlatformService_UserStatus_0 = runtime.ForwardResponseMessage
+ forward_PlatformService_GetContactInformation_0 = runtime.ForwardResponseMessage
+ forward_PlatformService_ServerInfo_0 = runtime.ForwardResponseMessage
+ forward_PlatformService_UserStatus_0 = runtime.ForwardResponseMessage
)
diff --git a/api/platform/v1/platform.pb.validate.go b/api/platform/v1/platform.pb.validate.go
index cd4bec7526..e738d7f3ab 100644
--- a/api/platform/v1/platform.pb.validate.go
+++ b/api/platform/v1/platform.pb.validate.go
@@ -84,7 +84,7 @@ type ConnectRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ConnectRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -184,7 +184,7 @@ type ConnectResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ConnectResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -286,7 +286,7 @@ type DisconnectRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m DisconnectRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -388,7 +388,7 @@ type DisconnectResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m DisconnectResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -492,7 +492,7 @@ type SearchOrganizationTicketsRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m SearchOrganizationTicketsRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -631,7 +631,7 @@ type SearchOrganizationTicketsResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m SearchOrganizationTicketsResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -779,7 +779,7 @@ type OrganizationTicketMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m OrganizationTicketMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -883,7 +883,7 @@ type SearchOrganizationEntitlementsRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m SearchOrganizationEntitlementsRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1023,7 +1023,7 @@ type SearchOrganizationEntitlementsResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m SearchOrganizationEntitlementsResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1235,7 +1235,7 @@ type OrganizationEntitlementMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m OrganizationEntitlementMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1337,7 +1337,7 @@ type GetContactInformationRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetContactInformationRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1471,7 +1471,7 @@ type GetContactInformationResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetContactInformationResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1574,7 +1574,7 @@ type ServerInfoRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ServerInfoRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1684,7 +1684,7 @@ type ServerInfoResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ServerInfoResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1786,7 +1786,7 @@ type UserStatusRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m UserStatusRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1890,7 +1890,7 @@ type UserStatusResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m UserStatusResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2002,7 +2002,7 @@ type OrganizationEntitlement_PlatformMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m OrganizationEntitlement_PlatformMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2112,7 +2112,7 @@ type GetContactInformationResponse_CustomerSuccessMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetContactInformationResponse_CustomerSuccessMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/api/qan/v1/collector.pb.go b/api/qan/v1/collector.pb.go
index 2a2a08ad52..eeb15a34d3 100644
--- a/api/qan/v1/collector.pb.go
+++ b/api/qan/v1/collector.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: qan/v1/collector.proto
@@ -9,6 +9,7 @@ package qanv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
_ "google.golang.org/genproto/googleapis/api/visibility"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
@@ -25,11 +26,10 @@ const (
)
type CollectRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ MetricsBucket []*MetricsBucket `protobuf:"bytes,1,rep,name=metrics_bucket,json=metricsBucket,proto3" json:"metrics_bucket,omitempty"`
unknownFields protoimpl.UnknownFields
-
- MetricsBucket []*MetricsBucket `protobuf:"bytes,1,rep,name=metrics_bucket,json=metricsBucket,proto3" json:"metrics_bucket,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *CollectRequest) Reset() {
@@ -72,10 +72,7 @@ func (x *CollectRequest) GetMetricsBucket() []*MetricsBucket {
// MetricsBucket is aggregated message created by pmm-agent.
// Contains information about one query selected in defined way from query class in specific period of time.
type MetricsBucket struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// md5 of digest_text/fingerprint.
Queryid string `protobuf:"bytes,1,opt,name=queryid,proto3" json:"queryid,omitempty"`
// contains fingerprint prepared by sql parser, which can be different than fingerprint.
@@ -131,7 +128,7 @@ type MetricsBucket struct {
// Metrics source.
AgentType v1.AgentType `protobuf:"varint,35,opt,name=agent_type,json=agentType,proto3,enum=inventory.v1.AgentType" json:"agent_type,omitempty"`
// Custom labels names:values.
- Labels map[string]string `protobuf:"bytes,36,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ Labels map[string]string `protobuf:"bytes,36,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// Time when collection of bucket started.
PeriodStartUnixSecs uint32 `protobuf:"varint,40,opt,name=period_start_unix_secs,json=periodStartUnixSecs,proto3" json:"period_start_unix_secs,omitempty"`
// Duration of bucket.
@@ -146,11 +143,11 @@ type MetricsBucket struct {
// How many queries was with warnings in bucket.
NumQueriesWithWarnings float32 `protobuf:"fixed32,50,opt,name=num_queries_with_warnings,json=numQueriesWithWarnings,proto3" json:"num_queries_with_warnings,omitempty"`
// List of warnings: {code: count}.
- Warnings map[uint64]uint64 `protobuf:"bytes,51,rep,name=warnings,proto3" json:"warnings,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
+ Warnings map[uint64]uint64 `protobuf:"bytes,51,rep,name=warnings,proto3" json:"warnings,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"`
// How many queries was with error in bucket.
NumQueriesWithErrors float32 `protobuf:"fixed32,52,opt,name=num_queries_with_errors,json=numQueriesWithErrors,proto3" json:"num_queries_with_errors,omitempty"`
// List of errors: {code: count}.
- Errors map[uint64]uint64 `protobuf:"bytes,53,rep,name=errors,proto3" json:"errors,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
+ Errors map[uint64]uint64 `protobuf:"bytes,53,rep,name=errors,proto3" json:"errors,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"`
// Amount queries in this bucket.
NumQueries float32 `protobuf:"fixed32,54,opt,name=num_queries,json=numQueries,proto3" json:"num_queries,omitempty"`
// How many times query_time was found.
@@ -324,10 +321,12 @@ type MetricsBucket struct {
MResponseLengthP99 float32 `protobuf:"fixed32,199,opt,name=m_response_length_p99,json=mResponseLengthP99,proto3" json:"m_response_length_p99,omitempty"`
MDocsScannedCnt float32 `protobuf:"fixed32,200,opt,name=m_docs_scanned_cnt,json=mDocsScannedCnt,proto3" json:"m_docs_scanned_cnt,omitempty"`
// The number of scanned documents.
- MDocsScannedSum float32 `protobuf:"fixed32,201,opt,name=m_docs_scanned_sum,json=mDocsScannedSum,proto3" json:"m_docs_scanned_sum,omitempty"`
- MDocsScannedMin float32 `protobuf:"fixed32,202,opt,name=m_docs_scanned_min,json=mDocsScannedMin,proto3" json:"m_docs_scanned_min,omitempty"`
- MDocsScannedMax float32 `protobuf:"fixed32,203,opt,name=m_docs_scanned_max,json=mDocsScannedMax,proto3" json:"m_docs_scanned_max,omitempty"`
- MDocsScannedP99 float32 `protobuf:"fixed32,204,opt,name=m_docs_scanned_p99,json=mDocsScannedP99,proto3" json:"m_docs_scanned_p99,omitempty"`
+ MDocsScannedSum float32 `protobuf:"fixed32,201,opt,name=m_docs_scanned_sum,json=mDocsScannedSum,proto3" json:"m_docs_scanned_sum,omitempty"`
+ MDocsScannedMin float32 `protobuf:"fixed32,202,opt,name=m_docs_scanned_min,json=mDocsScannedMin,proto3" json:"m_docs_scanned_min,omitempty"`
+ MDocsScannedMax float32 `protobuf:"fixed32,203,opt,name=m_docs_scanned_max,json=mDocsScannedMax,proto3" json:"m_docs_scanned_max,omitempty"`
+ MDocsScannedP99 float32 `protobuf:"fixed32,204,opt,name=m_docs_scanned_p99,json=mDocsScannedP99,proto3" json:"m_docs_scanned_p99,omitempty"`
+ // Plan summary type (COLLSCAN, IXSCAN, etc).
+ PlanSummary string `protobuf:"bytes,205,opt,name=plan_summary,json=planSummary,proto3" json:"plan_summary,omitempty"`
MSharedBlksHitCnt float32 `protobuf:"fixed32,210,opt,name=m_shared_blks_hit_cnt,json=mSharedBlksHitCnt,proto3" json:"m_shared_blks_hit_cnt,omitempty"`
// Total number of shared block cache hits by the statement.
MSharedBlksHitSum float32 `protobuf:"fixed32,211,opt,name=m_shared_blks_hit_sum,json=mSharedBlksHitSum,proto3" json:"m_shared_blks_hit_sum,omitempty"`
@@ -413,6 +412,8 @@ type MetricsBucket struct {
Planid string `protobuf:"bytes,266,opt,name=planid,proto3" json:"planid,omitempty"`
QueryPlan string `protobuf:"bytes,267,opt,name=query_plan,json=queryPlan,proto3" json:"query_plan,omitempty"`
HistogramItems []string `protobuf:"bytes,268,rep,name=histogram_items,json=histogramItems,proto3" json:"histogram_items,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *MetricsBucket) Reset() {
@@ -1656,6 +1657,13 @@ func (x *MetricsBucket) GetMDocsScannedP99() float32 {
return 0
}
+func (x *MetricsBucket) GetPlanSummary() string {
+ if x != nil {
+ return x.PlanSummary
+ }
+ return ""
+}
+
func (x *MetricsBucket) GetMSharedBlksHitCnt() float32 {
if x != nil {
return x.MSharedBlksHitCnt
@@ -2042,9 +2050,9 @@ func (x *MetricsBucket) GetHistogramItems() []string {
}
type CollectResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *CollectResponse) Reset() {
@@ -2079,7 +2087,7 @@ func (*CollectResponse) Descriptor() ([]byte, []int) {
var File_qan_v1_collector_proto protoreflect.FileDescriptor
-var file_qan_v1_collector_proto_rawDesc = []byte{
+var file_qan_v1_collector_proto_rawDesc = string([]byte{
0x0a, 0x16, 0x71, 0x61, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74,
0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31,
0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x69, 0x73,
@@ -2091,7 +2099,7 @@ var file_qan_v1_collector_proto_rawDesc = []byte{
0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01,
0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65,
0x74, 0x72, 0x69, 0x63, 0x73, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x0d, 0x6d, 0x65, 0x74,
- 0x72, 0x69, 0x63, 0x73, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0xac, 0x53, 0x0a, 0x0d, 0x4d,
+ 0x72, 0x69, 0x63, 0x73, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0xd0, 0x53, 0x0a, 0x0d, 0x4d,
0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x18, 0x0a, 0x07,
0x71, 0x75, 0x65, 0x72, 0x79, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x71,
0x75, 0x65, 0x72, 0x79, 0x69, 0x64, 0x12, 0x2f, 0x0a, 0x13, 0x65, 0x78, 0x70, 0x6c, 0x61, 0x69,
@@ -2581,210 +2589,212 @@ var file_qan_v1_collector_proto_rawDesc = []byte{
0x65, 0x64, 0x4d, 0x61, 0x78, 0x12, 0x2c, 0x0a, 0x12, 0x6d, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x5f,
0x73, 0x63, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x70, 0x39, 0x39, 0x18, 0xcc, 0x01, 0x20, 0x01,
0x28, 0x02, 0x52, 0x0f, 0x6d, 0x44, 0x6f, 0x63, 0x73, 0x53, 0x63, 0x61, 0x6e, 0x6e, 0x65, 0x64,
- 0x50, 0x39, 0x39, 0x12, 0x31, 0x0a, 0x15, 0x6d, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x5f,
- 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x68, 0x69, 0x74, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0xd2, 0x01, 0x20,
- 0x01, 0x28, 0x02, 0x52, 0x11, 0x6d, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x42, 0x6c, 0x6b, 0x73,
- 0x48, 0x69, 0x74, 0x43, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x15, 0x6d, 0x5f, 0x73, 0x68, 0x61, 0x72,
- 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x68, 0x69, 0x74, 0x5f, 0x73, 0x75, 0x6d, 0x18,
- 0xd3, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x11, 0x6d, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x42,
- 0x6c, 0x6b, 0x73, 0x48, 0x69, 0x74, 0x53, 0x75, 0x6d, 0x12, 0x33, 0x0a, 0x16, 0x6d, 0x5f, 0x73,
- 0x68, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f,
- 0x63, 0x6e, 0x74, 0x18, 0xd4, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x12, 0x6d, 0x53, 0x68, 0x61,
- 0x72, 0x65, 0x64, 0x42, 0x6c, 0x6b, 0x73, 0x52, 0x65, 0x61, 0x64, 0x43, 0x6e, 0x74, 0x12, 0x33,
- 0x0a, 0x16, 0x6d, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f,
- 0x72, 0x65, 0x61, 0x64, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0xd5, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52,
- 0x12, 0x6d, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x42, 0x6c, 0x6b, 0x73, 0x52, 0x65, 0x61, 0x64,
- 0x53, 0x75, 0x6d, 0x12, 0x39, 0x0a, 0x19, 0x6d, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x5f,
- 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x64, 0x69, 0x72, 0x74, 0x69, 0x65, 0x64, 0x5f, 0x63, 0x6e, 0x74,
- 0x18, 0xd6, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x15, 0x6d, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64,
- 0x42, 0x6c, 0x6b, 0x73, 0x44, 0x69, 0x72, 0x74, 0x69, 0x65, 0x64, 0x43, 0x6e, 0x74, 0x12, 0x39,
- 0x0a, 0x19, 0x6d, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f,
- 0x64, 0x69, 0x72, 0x74, 0x69, 0x65, 0x64, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0xd7, 0x01, 0x20, 0x01,
- 0x28, 0x02, 0x52, 0x15, 0x6d, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x42, 0x6c, 0x6b, 0x73, 0x44,
- 0x69, 0x72, 0x74, 0x69, 0x65, 0x64, 0x53, 0x75, 0x6d, 0x12, 0x39, 0x0a, 0x19, 0x6d, 0x5f, 0x73,
- 0x68, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x74,
- 0x65, 0x6e, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0xd8, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x15, 0x6d,
- 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x42, 0x6c, 0x6b, 0x73, 0x57, 0x72, 0x69, 0x74, 0x74, 0x65,
- 0x6e, 0x43, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x19, 0x6d, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64,
- 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x75,
- 0x6d, 0x18, 0xd9, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x15, 0x6d, 0x53, 0x68, 0x61, 0x72, 0x65,
- 0x64, 0x42, 0x6c, 0x6b, 0x73, 0x57, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x75, 0x6d, 0x12,
- 0x2f, 0x0a, 0x14, 0x6d, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f,
- 0x68, 0x69, 0x74, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0xda, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x10,
- 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x6c, 0x6b, 0x73, 0x48, 0x69, 0x74, 0x43, 0x6e, 0x74,
- 0x12, 0x2f, 0x0a, 0x14, 0x6d, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x62, 0x6c, 0x6b, 0x73,
- 0x5f, 0x68, 0x69, 0x74, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0xdb, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52,
- 0x10, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x6c, 0x6b, 0x73, 0x48, 0x69, 0x74, 0x53, 0x75,
- 0x6d, 0x12, 0x31, 0x0a, 0x15, 0x6d, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x62, 0x6c, 0x6b,
- 0x73, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0xdc, 0x01, 0x20, 0x01, 0x28,
- 0x02, 0x52, 0x11, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x6c, 0x6b, 0x73, 0x52, 0x65, 0x61,
- 0x64, 0x43, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x15, 0x6d, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f,
- 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0xdd, 0x01,
- 0x20, 0x01, 0x28, 0x02, 0x52, 0x11, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x6c, 0x6b, 0x73,
- 0x52, 0x65, 0x61, 0x64, 0x53, 0x75, 0x6d, 0x12, 0x37, 0x0a, 0x18, 0x6d, 0x5f, 0x6c, 0x6f, 0x63,
- 0x61, 0x6c, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x64, 0x69, 0x72, 0x74, 0x69, 0x65, 0x64, 0x5f,
- 0x63, 0x6e, 0x74, 0x18, 0xde, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x14, 0x6d, 0x4c, 0x6f, 0x63,
- 0x61, 0x6c, 0x42, 0x6c, 0x6b, 0x73, 0x44, 0x69, 0x72, 0x74, 0x69, 0x65, 0x64, 0x43, 0x6e, 0x74,
- 0x12, 0x37, 0x0a, 0x18, 0x6d, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x62, 0x6c, 0x6b, 0x73,
- 0x5f, 0x64, 0x69, 0x72, 0x74, 0x69, 0x65, 0x64, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0xdf, 0x01, 0x20,
- 0x01, 0x28, 0x02, 0x52, 0x14, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x6c, 0x6b, 0x73, 0x44,
- 0x69, 0x72, 0x74, 0x69, 0x65, 0x64, 0x53, 0x75, 0x6d, 0x12, 0x37, 0x0a, 0x18, 0x6d, 0x5f, 0x6c,
- 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65,
- 0x6e, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0xe0, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x14, 0x6d, 0x4c,
- 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x6c, 0x6b, 0x73, 0x57, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x43,
- 0x6e, 0x74, 0x12, 0x37, 0x0a, 0x18, 0x6d, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x62, 0x6c,
- 0x6b, 0x73, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0xe1,
- 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x14, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x6c, 0x6b,
- 0x73, 0x57, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x75, 0x6d, 0x12, 0x2f, 0x0a, 0x14, 0x6d,
- 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f,
- 0x63, 0x6e, 0x74, 0x18, 0xe2, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x10, 0x6d, 0x54, 0x65, 0x6d,
- 0x70, 0x42, 0x6c, 0x6b, 0x73, 0x52, 0x65, 0x61, 0x64, 0x43, 0x6e, 0x74, 0x12, 0x2f, 0x0a, 0x14,
- 0x6d, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x72, 0x65, 0x61, 0x64,
- 0x5f, 0x73, 0x75, 0x6d, 0x18, 0xe3, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x10, 0x6d, 0x54, 0x65,
- 0x6d, 0x70, 0x42, 0x6c, 0x6b, 0x73, 0x52, 0x65, 0x61, 0x64, 0x53, 0x75, 0x6d, 0x12, 0x35, 0x0a,
- 0x17, 0x6d, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x77, 0x72, 0x69,
- 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0xe4, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52,
- 0x13, 0x6d, 0x54, 0x65, 0x6d, 0x70, 0x42, 0x6c, 0x6b, 0x73, 0x57, 0x72, 0x69, 0x74, 0x74, 0x65,
- 0x6e, 0x43, 0x6e, 0x74, 0x12, 0x35, 0x0a, 0x17, 0x6d, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x5f, 0x62,
- 0x6c, 0x6b, 0x73, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x75, 0x6d, 0x18,
- 0xe5, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x6d, 0x54, 0x65, 0x6d, 0x70, 0x42, 0x6c, 0x6b,
- 0x73, 0x57, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x75, 0x6d, 0x12, 0x2d, 0x0a, 0x13, 0x6d,
- 0x5f, 0x62, 0x6c, 0x6b, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63,
- 0x6e, 0x74, 0x18, 0xe6, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0f, 0x6d, 0x42, 0x6c, 0x6b, 0x52,
- 0x65, 0x61, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x43, 0x6e, 0x74, 0x12, 0x2d, 0x0a, 0x13, 0x6d, 0x5f,
- 0x62, 0x6c, 0x6b, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x75,
- 0x6d, 0x18, 0xe7, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0f, 0x6d, 0x42, 0x6c, 0x6b, 0x52, 0x65,
- 0x61, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x2f, 0x0a, 0x14, 0x6d, 0x5f, 0x62,
- 0x6c, 0x6b, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x6e,
- 0x74, 0x18, 0xe8, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x10, 0x6d, 0x42, 0x6c, 0x6b, 0x57, 0x72,
- 0x69, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x43, 0x6e, 0x74, 0x12, 0x2f, 0x0a, 0x14, 0x6d, 0x5f,
- 0x62, 0x6c, 0x6b, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73,
- 0x75, 0x6d, 0x18, 0xe9, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x10, 0x6d, 0x42, 0x6c, 0x6b, 0x57,
- 0x72, 0x69, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x3a, 0x0a, 0x1a, 0x6d,
+ 0x50, 0x39, 0x39, 0x12, 0x22, 0x0a, 0x0c, 0x70, 0x6c, 0x61, 0x6e, 0x5f, 0x73, 0x75, 0x6d, 0x6d,
+ 0x61, 0x72, 0x79, 0x18, 0xcd, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x6c, 0x61, 0x6e,
+ 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x31, 0x0a, 0x15, 0x6d, 0x5f, 0x73, 0x68, 0x61,
+ 0x72, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x68, 0x69, 0x74, 0x5f, 0x63, 0x6e, 0x74,
+ 0x18, 0xd2, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x11, 0x6d, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64,
+ 0x42, 0x6c, 0x6b, 0x73, 0x48, 0x69, 0x74, 0x43, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x15, 0x6d, 0x5f,
+ 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x68, 0x69, 0x74, 0x5f,
+ 0x73, 0x75, 0x6d, 0x18, 0xd3, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x11, 0x6d, 0x53, 0x68, 0x61,
+ 0x72, 0x65, 0x64, 0x42, 0x6c, 0x6b, 0x73, 0x48, 0x69, 0x74, 0x53, 0x75, 0x6d, 0x12, 0x33, 0x0a,
+ 0x16, 0x6d, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x72,
+ 0x65, 0x61, 0x64, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0xd4, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x12,
+ 0x6d, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x42, 0x6c, 0x6b, 0x73, 0x52, 0x65, 0x61, 0x64, 0x43,
+ 0x6e, 0x74, 0x12, 0x33, 0x0a, 0x16, 0x6d, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x62,
+ 0x6c, 0x6b, 0x73, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0xd5, 0x01, 0x20,
+ 0x01, 0x28, 0x02, 0x52, 0x12, 0x6d, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x42, 0x6c, 0x6b, 0x73,
+ 0x52, 0x65, 0x61, 0x64, 0x53, 0x75, 0x6d, 0x12, 0x39, 0x0a, 0x19, 0x6d, 0x5f, 0x73, 0x68, 0x61,
+ 0x72, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x64, 0x69, 0x72, 0x74, 0x69, 0x65, 0x64,
+ 0x5f, 0x63, 0x6e, 0x74, 0x18, 0xd6, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x15, 0x6d, 0x53, 0x68,
+ 0x61, 0x72, 0x65, 0x64, 0x42, 0x6c, 0x6b, 0x73, 0x44, 0x69, 0x72, 0x74, 0x69, 0x65, 0x64, 0x43,
+ 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x19, 0x6d, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x62,
+ 0x6c, 0x6b, 0x73, 0x5f, 0x64, 0x69, 0x72, 0x74, 0x69, 0x65, 0x64, 0x5f, 0x73, 0x75, 0x6d, 0x18,
+ 0xd7, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x15, 0x6d, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x42,
+ 0x6c, 0x6b, 0x73, 0x44, 0x69, 0x72, 0x74, 0x69, 0x65, 0x64, 0x53, 0x75, 0x6d, 0x12, 0x39, 0x0a,
+ 0x19, 0x6d, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x77,
+ 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0xd8, 0x01, 0x20, 0x01, 0x28,
+ 0x02, 0x52, 0x15, 0x6d, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x42, 0x6c, 0x6b, 0x73, 0x57, 0x72,
+ 0x69, 0x74, 0x74, 0x65, 0x6e, 0x43, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x19, 0x6d, 0x5f, 0x73, 0x68,
+ 0x61, 0x72, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65,
+ 0x6e, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0xd9, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x15, 0x6d, 0x53,
+ 0x68, 0x61, 0x72, 0x65, 0x64, 0x42, 0x6c, 0x6b, 0x73, 0x57, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e,
+ 0x53, 0x75, 0x6d, 0x12, 0x2f, 0x0a, 0x14, 0x6d, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x62,
+ 0x6c, 0x6b, 0x73, 0x5f, 0x68, 0x69, 0x74, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0xda, 0x01, 0x20, 0x01,
+ 0x28, 0x02, 0x52, 0x10, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x6c, 0x6b, 0x73, 0x48, 0x69,
+ 0x74, 0x43, 0x6e, 0x74, 0x12, 0x2f, 0x0a, 0x14, 0x6d, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f,
+ 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x68, 0x69, 0x74, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0xdb, 0x01, 0x20,
+ 0x01, 0x28, 0x02, 0x52, 0x10, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x6c, 0x6b, 0x73, 0x48,
+ 0x69, 0x74, 0x53, 0x75, 0x6d, 0x12, 0x31, 0x0a, 0x15, 0x6d, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c,
+ 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0xdc,
+ 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x11, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x6c, 0x6b,
+ 0x73, 0x52, 0x65, 0x61, 0x64, 0x43, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x15, 0x6d, 0x5f, 0x6c, 0x6f,
+ 0x63, 0x61, 0x6c, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x73, 0x75,
+ 0x6d, 0x18, 0xdd, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x11, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c,
+ 0x42, 0x6c, 0x6b, 0x73, 0x52, 0x65, 0x61, 0x64, 0x53, 0x75, 0x6d, 0x12, 0x37, 0x0a, 0x18, 0x6d,
+ 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x64, 0x69, 0x72, 0x74,
+ 0x69, 0x65, 0x64, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0xde, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x14,
+ 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x6c, 0x6b, 0x73, 0x44, 0x69, 0x72, 0x74, 0x69, 0x65,
+ 0x64, 0x43, 0x6e, 0x74, 0x12, 0x37, 0x0a, 0x18, 0x6d, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f,
+ 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x64, 0x69, 0x72, 0x74, 0x69, 0x65, 0x64, 0x5f, 0x73, 0x75, 0x6d,
+ 0x18, 0xdf, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x14, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42,
+ 0x6c, 0x6b, 0x73, 0x44, 0x69, 0x72, 0x74, 0x69, 0x65, 0x64, 0x53, 0x75, 0x6d, 0x12, 0x37, 0x0a,
+ 0x18, 0x6d, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x77, 0x72,
+ 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0xe0, 0x01, 0x20, 0x01, 0x28, 0x02,
+ 0x52, 0x14, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x6c, 0x6b, 0x73, 0x57, 0x72, 0x69, 0x74,
+ 0x74, 0x65, 0x6e, 0x43, 0x6e, 0x74, 0x12, 0x37, 0x0a, 0x18, 0x6d, 0x5f, 0x6c, 0x6f, 0x63, 0x61,
+ 0x6c, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73,
+ 0x75, 0x6d, 0x18, 0xe1, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x14, 0x6d, 0x4c, 0x6f, 0x63, 0x61,
+ 0x6c, 0x42, 0x6c, 0x6b, 0x73, 0x57, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x75, 0x6d, 0x12,
+ 0x2f, 0x0a, 0x14, 0x6d, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x72,
+ 0x65, 0x61, 0x64, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0xe2, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x10,
+ 0x6d, 0x54, 0x65, 0x6d, 0x70, 0x42, 0x6c, 0x6b, 0x73, 0x52, 0x65, 0x61, 0x64, 0x43, 0x6e, 0x74,
+ 0x12, 0x2f, 0x0a, 0x14, 0x6d, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f,
+ 0x72, 0x65, 0x61, 0x64, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0xe3, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52,
+ 0x10, 0x6d, 0x54, 0x65, 0x6d, 0x70, 0x42, 0x6c, 0x6b, 0x73, 0x52, 0x65, 0x61, 0x64, 0x53, 0x75,
+ 0x6d, 0x12, 0x35, 0x0a, 0x17, 0x6d, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x5f, 0x62, 0x6c, 0x6b, 0x73,
+ 0x5f, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0xe4, 0x01, 0x20,
+ 0x01, 0x28, 0x02, 0x52, 0x13, 0x6d, 0x54, 0x65, 0x6d, 0x70, 0x42, 0x6c, 0x6b, 0x73, 0x57, 0x72,
+ 0x69, 0x74, 0x74, 0x65, 0x6e, 0x43, 0x6e, 0x74, 0x12, 0x35, 0x0a, 0x17, 0x6d, 0x5f, 0x74, 0x65,
+ 0x6d, 0x70, 0x5f, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f,
+ 0x73, 0x75, 0x6d, 0x18, 0xe5, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x6d, 0x54, 0x65, 0x6d,
+ 0x70, 0x42, 0x6c, 0x6b, 0x73, 0x57, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x75, 0x6d, 0x12,
+ 0x2d, 0x0a, 0x13, 0x6d, 0x5f, 0x62, 0x6c, 0x6b, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x74, 0x69,
+ 0x6d, 0x65, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0xe6, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0f, 0x6d,
+ 0x42, 0x6c, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x43, 0x6e, 0x74, 0x12, 0x2d,
+ 0x0a, 0x13, 0x6d, 0x5f, 0x62, 0x6c, 0x6b, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x74, 0x69, 0x6d,
+ 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0xe7, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0f, 0x6d, 0x42,
+ 0x6c, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x2f, 0x0a,
+ 0x14, 0x6d, 0x5f, 0x62, 0x6c, 0x6b, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d,
+ 0x65, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0xe8, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x10, 0x6d, 0x42,
+ 0x6c, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x43, 0x6e, 0x74, 0x12, 0x2f,
+ 0x0a, 0x14, 0x6d, 0x5f, 0x62, 0x6c, 0x6b, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x69,
+ 0x6d, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0xe9, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x10, 0x6d,
+ 0x42, 0x6c, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x75, 0x6d, 0x12,
+ 0x3a, 0x0a, 0x1a, 0x6d, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6b, 0x5f,
+ 0x72, 0x65, 0x61, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0xea, 0x01,
+ 0x20, 0x01, 0x28, 0x02, 0x52, 0x15, 0x6d, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x42, 0x6c, 0x6b,
+ 0x52, 0x65, 0x61, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x43, 0x6e, 0x74, 0x12, 0x3a, 0x0a, 0x1a, 0x6d,
0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6b, 0x5f, 0x72, 0x65, 0x61, 0x64,
- 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0xea, 0x01, 0x20, 0x01, 0x28, 0x02,
+ 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0xeb, 0x01, 0x20, 0x01, 0x28, 0x02,
0x52, 0x15, 0x6d, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x42, 0x6c, 0x6b, 0x52, 0x65, 0x61, 0x64,
- 0x54, 0x69, 0x6d, 0x65, 0x43, 0x6e, 0x74, 0x12, 0x3a, 0x0a, 0x1a, 0x6d, 0x5f, 0x73, 0x68, 0x61,
- 0x72, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6b, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x74, 0x69, 0x6d,
- 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0xeb, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x15, 0x6d, 0x53,
- 0x68, 0x61, 0x72, 0x65, 0x64, 0x42, 0x6c, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x54, 0x69, 0x6d, 0x65,
- 0x53, 0x75, 0x6d, 0x12, 0x3c, 0x0a, 0x1b, 0x6d, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x5f,
- 0x62, 0x6c, 0x6b, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63,
- 0x6e, 0x74, 0x18, 0xec, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x16, 0x6d, 0x53, 0x68, 0x61, 0x72,
- 0x65, 0x64, 0x42, 0x6c, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x43, 0x6e,
- 0x74, 0x12, 0x3c, 0x0a, 0x1b, 0x6d, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x62, 0x6c,
- 0x6b, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x75, 0x6d,
- 0x18, 0xed, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x16, 0x6d, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64,
+ 0x54, 0x69, 0x6d, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x3c, 0x0a, 0x1b, 0x6d, 0x5f, 0x73, 0x68, 0x61,
+ 0x72, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6b, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x69,
+ 0x6d, 0x65, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0xec, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x16, 0x6d,
+ 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x42, 0x6c, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x65, 0x54, 0x69,
+ 0x6d, 0x65, 0x43, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x1b, 0x6d, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65,
+ 0x64, 0x5f, 0x62, 0x6c, 0x6b, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65,
+ 0x5f, 0x73, 0x75, 0x6d, 0x18, 0xed, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x16, 0x6d, 0x53, 0x68,
+ 0x61, 0x72, 0x65, 0x64, 0x42, 0x6c, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65,
+ 0x53, 0x75, 0x6d, 0x12, 0x38, 0x0a, 0x19, 0x6d, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x62,
+ 0x6c, 0x6b, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x6e, 0x74,
+ 0x18, 0xee, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x14, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42,
+ 0x6c, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x43, 0x6e, 0x74, 0x12, 0x38, 0x0a,
+ 0x19, 0x6d, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x62, 0x6c, 0x6b, 0x5f, 0x72, 0x65, 0x61,
+ 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0xef, 0x01, 0x20, 0x01, 0x28,
+ 0x02, 0x52, 0x14, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x6c, 0x6b, 0x52, 0x65, 0x61, 0x64,
+ 0x54, 0x69, 0x6d, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x3a, 0x0a, 0x1a, 0x6d, 0x5f, 0x6c, 0x6f, 0x63,
+ 0x61, 0x6c, 0x5f, 0x62, 0x6c, 0x6b, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d,
+ 0x65, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0xf0, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x15, 0x6d, 0x4c,
+ 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x6c, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65,
+ 0x43, 0x6e, 0x74, 0x12, 0x3a, 0x0a, 0x1a, 0x6d, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x62,
+ 0x6c, 0x6b, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x75,
+ 0x6d, 0x18, 0xf1, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x15, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c,
0x42, 0x6c, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x75, 0x6d, 0x12,
- 0x38, 0x0a, 0x19, 0x6d, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x62, 0x6c, 0x6b, 0x5f, 0x72,
- 0x65, 0x61, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0xee, 0x01, 0x20,
- 0x01, 0x28, 0x02, 0x52, 0x14, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x6c, 0x6b, 0x52, 0x65,
- 0x61, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x43, 0x6e, 0x74, 0x12, 0x38, 0x0a, 0x19, 0x6d, 0x5f, 0x6c,
- 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x62, 0x6c, 0x6b, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x74, 0x69,
- 0x6d, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0xef, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x14, 0x6d,
- 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x6c, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x54, 0x69, 0x6d, 0x65,
- 0x53, 0x75, 0x6d, 0x12, 0x3a, 0x0a, 0x1a, 0x6d, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x62,
- 0x6c, 0x6b, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x6e,
- 0x74, 0x18, 0xf0, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x15, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c,
- 0x42, 0x6c, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x43, 0x6e, 0x74, 0x12,
- 0x3a, 0x0a, 0x1a, 0x6d, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x62, 0x6c, 0x6b, 0x5f, 0x77,
- 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0xf1, 0x01,
- 0x20, 0x01, 0x28, 0x02, 0x52, 0x15, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x6c, 0x6b, 0x57,
- 0x72, 0x69, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x2d, 0x0a, 0x13, 0x6d,
- 0x5f, 0x63, 0x70, 0x75, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63,
- 0x6e, 0x74, 0x18, 0xf2, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0f, 0x6d, 0x43, 0x70, 0x75, 0x55,
- 0x73, 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x43, 0x6e, 0x74, 0x12, 0x2d, 0x0a, 0x13, 0x6d, 0x5f,
- 0x63, 0x70, 0x75, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x75,
- 0x6d, 0x18, 0xf3, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0f, 0x6d, 0x43, 0x70, 0x75, 0x55, 0x73,
- 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x12, 0x6d, 0x5f, 0x63,
- 0x70, 0x75, 0x5f, 0x73, 0x79, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x6e, 0x74, 0x18,
- 0xf4, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0e, 0x6d, 0x43, 0x70, 0x75, 0x53, 0x79, 0x73, 0x54,
- 0x69, 0x6d, 0x65, 0x43, 0x6e, 0x74, 0x12, 0x2b, 0x0a, 0x12, 0x6d, 0x5f, 0x63, 0x70, 0x75, 0x5f,
- 0x73, 0x79, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0xf5, 0x01, 0x20,
- 0x01, 0x28, 0x02, 0x52, 0x0e, 0x6d, 0x43, 0x70, 0x75, 0x53, 0x79, 0x73, 0x54, 0x69, 0x6d, 0x65,
- 0x53, 0x75, 0x6d, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6d, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18,
- 0xf6, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6d, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12,
+ 0x2d, 0x0a, 0x13, 0x6d, 0x5f, 0x63, 0x70, 0x75, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x74, 0x69,
+ 0x6d, 0x65, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0xf2, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0f, 0x6d,
+ 0x43, 0x70, 0x75, 0x55, 0x73, 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x43, 0x6e, 0x74, 0x12, 0x2d,
+ 0x0a, 0x13, 0x6d, 0x5f, 0x63, 0x70, 0x75, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x74, 0x69, 0x6d,
+ 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0xf3, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0f, 0x6d, 0x43,
+ 0x70, 0x75, 0x55, 0x73, 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x2b, 0x0a,
+ 0x12, 0x6d, 0x5f, 0x63, 0x70, 0x75, 0x5f, 0x73, 0x79, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f,
+ 0x63, 0x6e, 0x74, 0x18, 0xf4, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0e, 0x6d, 0x43, 0x70, 0x75,
+ 0x53, 0x79, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x43, 0x6e, 0x74, 0x12, 0x2b, 0x0a, 0x12, 0x6d, 0x5f,
+ 0x63, 0x70, 0x75, 0x5f, 0x73, 0x79, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x75, 0x6d,
+ 0x18, 0xf5, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0e, 0x6d, 0x43, 0x70, 0x75, 0x53, 0x79, 0x73,
+ 0x54, 0x69, 0x6d, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6d, 0x64, 0x5f, 0x74,
+ 0x79, 0x70, 0x65, 0x18, 0xf6, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6d, 0x64, 0x54,
+ 0x79, 0x70, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x73, 0x5f, 0x63,
+ 0x61, 0x6c, 0x6c, 0x73, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0xfa, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52,
+ 0x0e, 0x6d, 0x50, 0x6c, 0x61, 0x6e, 0x73, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x53, 0x75, 0x6d, 0x12,
0x2a, 0x0a, 0x11, 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x73, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x73,
- 0x5f, 0x73, 0x75, 0x6d, 0x18, 0xfa, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0e, 0x6d, 0x50, 0x6c,
- 0x61, 0x6e, 0x73, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x53, 0x75, 0x6d, 0x12, 0x2a, 0x0a, 0x11, 0x6d,
- 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x73, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x5f, 0x63, 0x6e, 0x74,
- 0x18, 0xfb, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0e, 0x6d, 0x50, 0x6c, 0x61, 0x6e, 0x73, 0x43,
- 0x61, 0x6c, 0x6c, 0x73, 0x43, 0x6e, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x6d, 0x5f, 0x77, 0x61, 0x6c,
- 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0xfc, 0x01, 0x20,
+ 0x5f, 0x63, 0x6e, 0x74, 0x18, 0xfb, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0e, 0x6d, 0x50, 0x6c,
+ 0x61, 0x6e, 0x73, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x43, 0x6e, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x6d,
+ 0x5f, 0x77, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x5f, 0x73, 0x75, 0x6d,
+ 0x18, 0xfc, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0e, 0x6d, 0x57, 0x61, 0x6c, 0x52, 0x65, 0x63,
+ 0x6f, 0x72, 0x64, 0x73, 0x53, 0x75, 0x6d, 0x12, 0x2a, 0x0a, 0x11, 0x6d, 0x5f, 0x77, 0x61, 0x6c,
+ 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0xfd, 0x01, 0x20,
0x01, 0x28, 0x02, 0x52, 0x0e, 0x6d, 0x57, 0x61, 0x6c, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73,
- 0x53, 0x75, 0x6d, 0x12, 0x2a, 0x0a, 0x11, 0x6d, 0x5f, 0x77, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x63,
- 0x6f, 0x72, 0x64, 0x73, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0xfd, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52,
- 0x0e, 0x6d, 0x57, 0x61, 0x6c, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x43, 0x6e, 0x74, 0x12,
- 0x22, 0x0a, 0x0d, 0x6d, 0x5f, 0x77, 0x61, 0x6c, 0x5f, 0x66, 0x70, 0x69, 0x5f, 0x73, 0x75, 0x6d,
- 0x18, 0xfe, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0a, 0x6d, 0x57, 0x61, 0x6c, 0x46, 0x70, 0x69,
- 0x53, 0x75, 0x6d, 0x12, 0x22, 0x0a, 0x0d, 0x6d, 0x5f, 0x77, 0x61, 0x6c, 0x5f, 0x66, 0x70, 0x69,
- 0x5f, 0x63, 0x6e, 0x74, 0x18, 0xff, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0a, 0x6d, 0x57, 0x61,
- 0x6c, 0x46, 0x70, 0x69, 0x43, 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x5f, 0x77, 0x61, 0x6c,
- 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x80, 0x02, 0x20, 0x01, 0x28,
- 0x02, 0x52, 0x0c, 0x6d, 0x57, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x75, 0x6d, 0x12,
- 0x26, 0x0a, 0x0f, 0x6d, 0x5f, 0x77, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x63,
- 0x6e, 0x74, 0x18, 0x81, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0c, 0x6d, 0x57, 0x61, 0x6c, 0x42,
- 0x79, 0x74, 0x65, 0x73, 0x43, 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x5f, 0x70, 0x6c, 0x61,
- 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x82, 0x02, 0x20, 0x01, 0x28,
- 0x02, 0x52, 0x0c, 0x6d, 0x50, 0x6c, 0x61, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x75, 0x6d, 0x12,
- 0x26, 0x0a, 0x0f, 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63,
- 0x6e, 0x74, 0x18, 0x83, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0c, 0x6d, 0x50, 0x6c, 0x61, 0x6e,
- 0x54, 0x69, 0x6d, 0x65, 0x43, 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x5f, 0x70, 0x6c, 0x61,
- 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x18, 0x84, 0x02, 0x20, 0x01, 0x28,
- 0x02, 0x52, 0x0c, 0x6d, 0x50, 0x6c, 0x61, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x4d, 0x69, 0x6e, 0x12,
- 0x26, 0x0a, 0x0f, 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6d,
- 0x61, 0x78, 0x18, 0x85, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0c, 0x6d, 0x50, 0x6c, 0x61, 0x6e,
- 0x54, 0x69, 0x6d, 0x65, 0x4d, 0x61, 0x78, 0x12, 0x20, 0x0a, 0x0b, 0x74, 0x6f, 0x70, 0x5f, 0x71,
- 0x75, 0x65, 0x72, 0x79, 0x69, 0x64, 0x18, 0x87, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74,
- 0x6f, 0x70, 0x51, 0x75, 0x65, 0x72, 0x79, 0x69, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x6f, 0x70,
- 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x88, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74,
- 0x6f, 0x70, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x2a, 0x0a, 0x10, 0x61, 0x70, 0x70, 0x6c, 0x69,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x89, 0x02, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x0f, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e,
- 0x61, 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x06, 0x70, 0x6c, 0x61, 0x6e, 0x69, 0x64, 0x18, 0x8a, 0x02,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x6c, 0x61, 0x6e, 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x0a,
- 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x18, 0x8b, 0x02, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x09, 0x71, 0x75, 0x65, 0x72, 0x79, 0x50, 0x6c, 0x61, 0x6e, 0x12, 0x28, 0x0a, 0x0f,
- 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18,
- 0x8c, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61,
- 0x6d, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73,
- 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
- 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x73, 0x45, 0x6e, 0x74,
- 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52,
- 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x39,
- 0x0a, 0x0b, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
- 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
- 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05,
- 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x11, 0x0a, 0x0f, 0x43, 0x6f, 0x6c,
- 0x6c, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x60, 0x0a, 0x10,
- 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
- 0x12, 0x3a, 0x0a, 0x07, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x12, 0x16, 0x2e, 0x71, 0x61,
- 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6c,
- 0x6c, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x1a, 0x10, 0xfa, 0xd2,
- 0xe4, 0x93, 0x02, 0x0a, 0x12, 0x08, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x42, 0x7e,
- 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x43, 0x6f,
- 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x27,
- 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x65, 0x72, 0x63, 0x6f,
- 0x6e, 0x61, 0x2f, 0x70, 0x6d, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x71, 0x61, 0x6e, 0x2f, 0x76,
- 0x31, 0x3b, 0x71, 0x61, 0x6e, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x51, 0x58, 0x58, 0xaa, 0x02, 0x06,
- 0x51, 0x61, 0x6e, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x06, 0x51, 0x61, 0x6e, 0x5c, 0x56, 0x31, 0xe2,
- 0x02, 0x12, 0x51, 0x61, 0x6e, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61,
- 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x07, 0x51, 0x61, 0x6e, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+ 0x43, 0x6e, 0x74, 0x12, 0x22, 0x0a, 0x0d, 0x6d, 0x5f, 0x77, 0x61, 0x6c, 0x5f, 0x66, 0x70, 0x69,
+ 0x5f, 0x73, 0x75, 0x6d, 0x18, 0xfe, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0a, 0x6d, 0x57, 0x61,
+ 0x6c, 0x46, 0x70, 0x69, 0x53, 0x75, 0x6d, 0x12, 0x22, 0x0a, 0x0d, 0x6d, 0x5f, 0x77, 0x61, 0x6c,
+ 0x5f, 0x66, 0x70, 0x69, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0xff, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52,
+ 0x0a, 0x6d, 0x57, 0x61, 0x6c, 0x46, 0x70, 0x69, 0x43, 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x6d,
+ 0x5f, 0x77, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x80,
+ 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0c, 0x6d, 0x57, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73,
+ 0x53, 0x75, 0x6d, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x5f, 0x77, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74,
+ 0x65, 0x73, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x81, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0c, 0x6d,
+ 0x57, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x43, 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x6d,
+ 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x82,
+ 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0c, 0x6d, 0x50, 0x6c, 0x61, 0x6e, 0x54, 0x69, 0x6d, 0x65,
+ 0x53, 0x75, 0x6d, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x5f, 0x74, 0x69,
+ 0x6d, 0x65, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x83, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0c, 0x6d,
+ 0x50, 0x6c, 0x61, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x43, 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x6d,
+ 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x18, 0x84,
+ 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0c, 0x6d, 0x50, 0x6c, 0x61, 0x6e, 0x54, 0x69, 0x6d, 0x65,
+ 0x4d, 0x69, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x5f, 0x74, 0x69,
+ 0x6d, 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x18, 0x85, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0c, 0x6d,
+ 0x50, 0x6c, 0x61, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x4d, 0x61, 0x78, 0x12, 0x20, 0x0a, 0x0b, 0x74,
+ 0x6f, 0x70, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x69, 0x64, 0x18, 0x87, 0x02, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0a, 0x74, 0x6f, 0x70, 0x51, 0x75, 0x65, 0x72, 0x79, 0x69, 0x64, 0x12, 0x1c, 0x0a,
+ 0x09, 0x74, 0x6f, 0x70, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x88, 0x02, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x08, 0x74, 0x6f, 0x70, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x2a, 0x0a, 0x10, 0x61,
+ 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x89, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x06, 0x70, 0x6c, 0x61, 0x6e, 0x69,
+ 0x64, 0x18, 0x8a, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x6c, 0x61, 0x6e, 0x69, 0x64,
+ 0x12, 0x1e, 0x0a, 0x0a, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x18, 0x8b,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x71, 0x75, 0x65, 0x72, 0x79, 0x50, 0x6c, 0x61, 0x6e,
+ 0x12, 0x28, 0x0a, 0x0f, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x5f, 0x69, 0x74,
+ 0x65, 0x6d, 0x73, 0x18, 0x8c, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x68, 0x69, 0x73, 0x74,
+ 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61,
+ 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67,
+ 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x04, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02,
+ 0x38, 0x01, 0x1a, 0x39, 0x0a, 0x0b, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03,
+ 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x11, 0x0a,
+ 0x0f, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x32, 0x60, 0x0a, 0x10, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x12, 0x3a, 0x0a, 0x07, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x12,
+ 0x16, 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31,
+ 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x1a, 0x10, 0xfa, 0xd2, 0xe4, 0x93, 0x02, 0x0a, 0x12, 0x08, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e,
+ 0x41, 0x4c, 0x42, 0x7e, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x2e, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31,
+ 0x42, 0x0e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
+ 0x50, 0x01, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70,
+ 0x65, 0x72, 0x63, 0x6f, 0x6e, 0x61, 0x2f, 0x70, 0x6d, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x71,
+ 0x61, 0x6e, 0x2f, 0x76, 0x31, 0x3b, 0x71, 0x61, 0x6e, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x51, 0x58,
+ 0x58, 0xaa, 0x02, 0x06, 0x51, 0x61, 0x6e, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x06, 0x51, 0x61, 0x6e,
+ 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x12, 0x51, 0x61, 0x6e, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42,
+ 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x07, 0x51, 0x61, 0x6e, 0x3a, 0x3a,
+ 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+})
var (
file_qan_v1_collector_proto_rawDescOnce sync.Once
- file_qan_v1_collector_proto_rawDescData = file_qan_v1_collector_proto_rawDesc
+ file_qan_v1_collector_proto_rawDescData []byte
)
func file_qan_v1_collector_proto_rawDescGZIP() []byte {
file_qan_v1_collector_proto_rawDescOnce.Do(func() {
- file_qan_v1_collector_proto_rawDescData = protoimpl.X.CompressGZIP(file_qan_v1_collector_proto_rawDescData)
+ file_qan_v1_collector_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_qan_v1_collector_proto_rawDesc), len(file_qan_v1_collector_proto_rawDesc)))
})
return file_qan_v1_collector_proto_rawDescData
}
@@ -2829,7 +2839,7 @@ func file_qan_v1_collector_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_qan_v1_collector_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_qan_v1_collector_proto_rawDesc), len(file_qan_v1_collector_proto_rawDesc)),
NumEnums: 0,
NumMessages: 6,
NumExtensions: 0,
@@ -2840,7 +2850,6 @@ func file_qan_v1_collector_proto_init() {
MessageInfos: file_qan_v1_collector_proto_msgTypes,
}.Build()
File_qan_v1_collector_proto = out.File
- file_qan_v1_collector_proto_rawDesc = nil
file_qan_v1_collector_proto_goTypes = nil
file_qan_v1_collector_proto_depIdxs = nil
}
diff --git a/api/qan/v1/collector.pb.validate.go b/api/qan/v1/collector.pb.validate.go
index b7ad2a95b3..af5993304f 100644
--- a/api/qan/v1/collector.pb.validate.go
+++ b/api/qan/v1/collector.pb.validate.go
@@ -109,7 +109,7 @@ type CollectRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m CollectRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -539,6 +539,8 @@ func (m *MetricsBucket) validate(all bool) error {
// no validation rules for MDocsScannedP99
+ // no validation rules for PlanSummary
+
// no validation rules for MSharedBlksHitCnt
// no validation rules for MSharedBlksHitSum
@@ -661,7 +663,7 @@ type MetricsBucketMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m MetricsBucketMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -761,7 +763,7 @@ type CollectResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m CollectResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/api/qan/v1/collector.proto b/api/qan/v1/collector.proto
index 2a1f61f5af..5e44414e0f 100644
--- a/api/qan/v1/collector.proto
+++ b/api/qan/v1/collector.proto
@@ -304,6 +304,9 @@ message MetricsBucket {
float m_docs_scanned_min = 202;
float m_docs_scanned_max = 203;
float m_docs_scanned_p99 = 204;
+ // Plan summary type (COLLSCAN, IXSCAN, etc).
+ string plan_summary = 205;
+
//
// PostgreSQL metrics.
//
diff --git a/api/qan/v1/filters.pb.go b/api/qan/v1/filters.pb.go
index 8449e2d557..1a295b4616 100644
--- a/api/qan/v1/filters.pb.go
+++ b/api/qan/v1/filters.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: qan/v1/filters.proto
@@ -9,6 +9,7 @@ package qanv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
@@ -24,14 +25,13 @@ const (
// GetFilteredMetricsNamesRequest contains period for which we need filters.
type GetFilteredMetricsNamesRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
PeriodStartFrom *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=period_start_from,json=periodStartFrom,proto3" json:"period_start_from,omitempty"`
PeriodStartTo *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=period_start_to,json=periodStartTo,proto3" json:"period_start_to,omitempty"`
MainMetricName string `protobuf:"bytes,3,opt,name=main_metric_name,json=mainMetricName,proto3" json:"main_metric_name,omitempty"`
Labels []*MapFieldEntry `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GetFilteredMetricsNamesRequest) Reset() {
@@ -95,11 +95,10 @@ func (x *GetFilteredMetricsNamesRequest) GetLabels() []*MapFieldEntry {
// GetFilteredMetricsNamesResponse is map of labels for given period by key.
// Key is label's name and value is label's value and how many times it occur.
type GetFilteredMetricsNamesResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Labels map[string]*ListLabels `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
unknownFields protoimpl.UnknownFields
-
- Labels map[string]*ListLabels `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ sizeCache protoimpl.SizeCache
}
func (x *GetFilteredMetricsNamesResponse) Reset() {
@@ -141,11 +140,10 @@ func (x *GetFilteredMetricsNamesResponse) GetLabels() map[string]*ListLabels {
// ListLabels is list of label's values: duplicates are impossible.
type ListLabels struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Name []*Values `protobuf:"bytes,1,rep,name=name,proto3" json:"name,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Name []*Values `protobuf:"bytes,1,rep,name=name,proto3" json:"name,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *ListLabels) Reset() {
@@ -187,13 +185,12 @@ func (x *ListLabels) GetName() []*Values {
// Values is label values and main metric percent and per second.
type Values struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
- MainMetricPercent float32 `protobuf:"fixed32,2,opt,name=main_metric_percent,json=mainMetricPercent,proto3" json:"main_metric_percent,omitempty"`
- MainMetricPerSec float32 `protobuf:"fixed32,3,opt,name=main_metric_per_sec,json=mainMetricPerSec,proto3" json:"main_metric_per_sec,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+ MainMetricPercent float32 `protobuf:"fixed32,2,opt,name=main_metric_percent,json=mainMetricPercent,proto3" json:"main_metric_percent,omitempty"`
+ MainMetricPerSec float32 `protobuf:"fixed32,3,opt,name=main_metric_per_sec,json=mainMetricPerSec,proto3" json:"main_metric_per_sec,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *Values) Reset() {
@@ -249,7 +246,7 @@ func (x *Values) GetMainMetricPerSec() float32 {
var File_qan_v1_filters_proto protoreflect.FileDescriptor
-var file_qan_v1_filters_proto_rawDesc = []byte{
+var file_qan_v1_filters_proto_rawDesc = string([]byte{
0x0a, 0x14, 0x71, 0x61, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x1a, 0x1f,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f,
@@ -303,16 +300,16 @@ var file_qan_v1_filters_proto_rawDesc = []byte{
0xca, 0x02, 0x06, 0x51, 0x61, 0x6e, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x12, 0x51, 0x61, 0x6e, 0x5c,
0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02,
0x07, 0x51, 0x61, 0x6e, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_qan_v1_filters_proto_rawDescOnce sync.Once
- file_qan_v1_filters_proto_rawDescData = file_qan_v1_filters_proto_rawDesc
+ file_qan_v1_filters_proto_rawDescData []byte
)
func file_qan_v1_filters_proto_rawDescGZIP() []byte {
file_qan_v1_filters_proto_rawDescOnce.Do(func() {
- file_qan_v1_filters_proto_rawDescData = protoimpl.X.CompressGZIP(file_qan_v1_filters_proto_rawDescData)
+ file_qan_v1_filters_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_qan_v1_filters_proto_rawDesc), len(file_qan_v1_filters_proto_rawDesc)))
})
return file_qan_v1_filters_proto_rawDescData
}
@@ -354,7 +351,7 @@ func file_qan_v1_filters_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_qan_v1_filters_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_qan_v1_filters_proto_rawDesc), len(file_qan_v1_filters_proto_rawDesc)),
NumEnums: 0,
NumMessages: 5,
NumExtensions: 0,
@@ -365,7 +362,6 @@ func file_qan_v1_filters_proto_init() {
MessageInfos: file_qan_v1_filters_proto_msgTypes,
}.Build()
File_qan_v1_filters_proto = out.File
- file_qan_v1_filters_proto_rawDesc = nil
file_qan_v1_filters_proto_goTypes = nil
file_qan_v1_filters_proto_depIdxs = nil
}
diff --git a/api/qan/v1/filters.pb.validate.go b/api/qan/v1/filters.pb.validate.go
index 2507a79aee..bdd9c5d42d 100644
--- a/api/qan/v1/filters.pb.validate.go
+++ b/api/qan/v1/filters.pb.validate.go
@@ -165,7 +165,7 @@ type GetFilteredMetricsNamesRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetFilteredMetricsNamesRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -314,7 +314,7 @@ type GetFilteredMetricsNamesResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetFilteredMetricsNamesResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -450,7 +450,7 @@ type ListLabelsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListLabelsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -554,7 +554,7 @@ type ValuesMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ValuesMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/api/qan/v1/object_details.pb.go b/api/qan/v1/object_details.pb.go
index af39a8e43a..fbc0e96324 100644
--- a/api/qan/v1/object_details.pb.go
+++ b/api/qan/v1/object_details.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: qan/v1/object_details.proto
@@ -9,6 +9,7 @@ package qanv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
@@ -24,10 +25,7 @@ const (
// GetMetricsRequest defines filtering of metrics for specific value of dimension (ex.: host=hostname1 or queryid=1D410B4BE5060972.
type GetMetricsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
PeriodStartFrom *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=period_start_from,json=periodStartFrom,proto3" json:"period_start_from,omitempty"`
PeriodStartTo *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=period_start_to,json=periodStartTo,proto3" json:"period_start_to,omitempty"`
// dimension value: ex: queryid - 1D410B4BE5060972.
@@ -37,7 +35,9 @@ type GetMetricsRequest struct {
Labels []*MapFieldEntry `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty"`
IncludeOnlyFields []string `protobuf:"bytes,6,rep,name=include_only_fields,json=includeOnlyFields,proto3" json:"include_only_fields,omitempty"`
// retrieve only values for totals, excluding N/A values
- Totals bool `protobuf:"varint,7,opt,name=totals,proto3" json:"totals,omitempty"`
+ Totals bool `protobuf:"varint,7,opt,name=totals,proto3" json:"totals,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GetMetricsRequest) Reset() {
@@ -121,16 +121,15 @@ func (x *GetMetricsRequest) GetTotals() bool {
// GetMetricsResponse defines metrics for specific value of dimension (ex.: host=hostname1 or queryid=1D410B4BE5060972.
type GetMetricsResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Metrics map[string]*MetricValues `protobuf:"bytes,3,rep,name=metrics,proto3" json:"metrics,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ TextMetrics map[string]string `protobuf:"bytes,7,rep,name=text_metrics,json=textMetrics,proto3" json:"text_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ Sparkline []*Point `protobuf:"bytes,4,rep,name=sparkline,proto3" json:"sparkline,omitempty"`
+ Totals map[string]*MetricValues `protobuf:"bytes,5,rep,name=totals,proto3" json:"totals,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ Fingerprint string `protobuf:"bytes,6,opt,name=fingerprint,proto3" json:"fingerprint,omitempty"`
+ Metadata *GetSelectedQueryMetadataResponse `protobuf:"bytes,8,opt,name=metadata,proto3" json:"metadata,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Metrics map[string]*MetricValues `protobuf:"bytes,3,rep,name=metrics,proto3" json:"metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- TextMetrics map[string]string `protobuf:"bytes,7,rep,name=text_metrics,json=textMetrics,proto3" json:"text_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- Sparkline []*Point `protobuf:"bytes,4,rep,name=sparkline,proto3" json:"sparkline,omitempty"`
- Totals map[string]*MetricValues `protobuf:"bytes,5,rep,name=totals,proto3" json:"totals,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- Fingerprint string `protobuf:"bytes,6,opt,name=fingerprint,proto3" json:"fingerprint,omitempty"`
- Metadata *GetSelectedQueryMetadataResponse `protobuf:"bytes,8,opt,name=metadata,proto3" json:"metadata,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *GetMetricsResponse) Reset() {
@@ -207,18 +206,17 @@ func (x *GetMetricsResponse) GetMetadata() *GetSelectedQueryMetadataResponse {
// MetricValues is statistics of specific metric.
type MetricValues struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Rate float32 `protobuf:"fixed32,1,opt,name=rate,proto3" json:"rate,omitempty"`
- Cnt float32 `protobuf:"fixed32,2,opt,name=cnt,proto3" json:"cnt,omitempty"`
- Sum float32 `protobuf:"fixed32,3,opt,name=sum,proto3" json:"sum,omitempty"`
- Min float32 `protobuf:"fixed32,4,opt,name=min,proto3" json:"min,omitempty"`
- Max float32 `protobuf:"fixed32,5,opt,name=max,proto3" json:"max,omitempty"`
- Avg float32 `protobuf:"fixed32,6,opt,name=avg,proto3" json:"avg,omitempty"`
- P99 float32 `protobuf:"fixed32,7,opt,name=p99,proto3" json:"p99,omitempty"`
- PercentOfTotal float32 `protobuf:"fixed32,8,opt,name=percent_of_total,json=percentOfTotal,proto3" json:"percent_of_total,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Rate float32 `protobuf:"fixed32,1,opt,name=rate,proto3" json:"rate,omitempty"`
+ Cnt float32 `protobuf:"fixed32,2,opt,name=cnt,proto3" json:"cnt,omitempty"`
+ Sum float32 `protobuf:"fixed32,3,opt,name=sum,proto3" json:"sum,omitempty"`
+ Min float32 `protobuf:"fixed32,4,opt,name=min,proto3" json:"min,omitempty"`
+ Max float32 `protobuf:"fixed32,5,opt,name=max,proto3" json:"max,omitempty"`
+ Avg float32 `protobuf:"fixed32,6,opt,name=avg,proto3" json:"avg,omitempty"`
+ P99 float32 `protobuf:"fixed32,7,opt,name=p99,proto3" json:"p99,omitempty"`
+ PercentOfTotal float32 `protobuf:"fixed32,8,opt,name=percent_of_total,json=percentOfTotal,proto3" json:"percent_of_total,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *MetricValues) Reset() {
@@ -309,11 +307,10 @@ func (x *MetricValues) GetPercentOfTotal() float32 {
// Labels are list of labels or dimensions values.
type Labels struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *Labels) Reset() {
@@ -356,18 +353,17 @@ func (x *Labels) GetValue() []string {
// GetQueryExampleRequest defines filtering of query examples for specific value of
// dimension (ex.: host=hostname1 or queryid=1D410B4BE5060972.
type GetQueryExampleRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
PeriodStartFrom *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=period_start_from,json=periodStartFrom,proto3" json:"period_start_from,omitempty"`
PeriodStartTo *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=period_start_to,json=periodStartTo,proto3" json:"period_start_to,omitempty"`
// dimension value: ex: queryid - 1D410B4BE5060972.
FilterBy string `protobuf:"bytes,3,opt,name=filter_by,json=filterBy,proto3" json:"filter_by,omitempty"`
// one of dimension: queryid | host ...
- GroupBy string `protobuf:"bytes,4,opt,name=group_by,json=groupBy,proto3" json:"group_by,omitempty"`
- Labels []*MapFieldEntry `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty"`
- Limit uint32 `protobuf:"varint,6,opt,name=limit,proto3" json:"limit,omitempty"`
+ GroupBy string `protobuf:"bytes,4,opt,name=group_by,json=groupBy,proto3" json:"group_by,omitempty"`
+ Labels []*MapFieldEntry `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty"`
+ Limit uint32 `protobuf:"varint,6,opt,name=limit,proto3" json:"limit,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GetQueryExampleRequest) Reset() {
@@ -444,11 +440,10 @@ func (x *GetQueryExampleRequest) GetLimit() uint32 {
// GetQueryExampleResponse list of query examples.
type GetQueryExampleResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ QueryExamples []*QueryExample `protobuf:"bytes,1,rep,name=query_examples,json=queryExamples,proto3" json:"query_examples,omitempty"`
unknownFields protoimpl.UnknownFields
-
- QueryExamples []*QueryExample `protobuf:"bytes,1,rep,name=query_examples,json=queryExamples,proto3" json:"query_examples,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *GetQueryExampleResponse) Reset() {
@@ -490,21 +485,20 @@ func (x *GetQueryExampleResponse) GetQueryExamples() []*QueryExample {
// QueryExample shows query examples and their metrics.
type QueryExample struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Example string `protobuf:"bytes,1,opt,name=example,proto3" json:"example,omitempty"`
- ExampleType ExampleType `protobuf:"varint,2,opt,name=example_type,json=exampleType,proto3,enum=qan.v1.ExampleType" json:"example_type,omitempty"`
- IsTruncated uint32 `protobuf:"varint,3,opt,name=is_truncated,json=isTruncated,proto3" json:"is_truncated,omitempty"`
- PlaceholdersCount uint32 `protobuf:"varint,4,opt,name=placeholders_count,json=placeholdersCount,proto3" json:"placeholders_count,omitempty"`
- ExplainFingerprint string `protobuf:"bytes,5,opt,name=explain_fingerprint,json=explainFingerprint,proto3" json:"explain_fingerprint,omitempty"`
- QueryId string `protobuf:"bytes,6,opt,name=query_id,json=queryId,proto3" json:"query_id,omitempty"`
- ExampleMetrics string `protobuf:"bytes,7,opt,name=example_metrics,json=exampleMetrics,proto3" json:"example_metrics,omitempty"`
- ServiceId string `protobuf:"bytes,8,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
- ServiceType string `protobuf:"bytes,9,opt,name=service_type,json=serviceType,proto3" json:"service_type,omitempty"`
- Schema string `protobuf:"bytes,10,opt,name=schema,proto3" json:"schema,omitempty"`
- Tables []string `protobuf:"bytes,11,rep,name=tables,proto3" json:"tables,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Example string `protobuf:"bytes,1,opt,name=example,proto3" json:"example,omitempty"`
+ ExampleType ExampleType `protobuf:"varint,2,opt,name=example_type,json=exampleType,proto3,enum=qan.v1.ExampleType" json:"example_type,omitempty"`
+ IsTruncated uint32 `protobuf:"varint,3,opt,name=is_truncated,json=isTruncated,proto3" json:"is_truncated,omitempty"`
+ PlaceholdersCount uint32 `protobuf:"varint,4,opt,name=placeholders_count,json=placeholdersCount,proto3" json:"placeholders_count,omitempty"`
+ ExplainFingerprint string `protobuf:"bytes,5,opt,name=explain_fingerprint,json=explainFingerprint,proto3" json:"explain_fingerprint,omitempty"`
+ QueryId string `protobuf:"bytes,6,opt,name=query_id,json=queryId,proto3" json:"query_id,omitempty"`
+ ExampleMetrics string `protobuf:"bytes,7,opt,name=example_metrics,json=exampleMetrics,proto3" json:"example_metrics,omitempty"`
+ ServiceId string `protobuf:"bytes,8,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
+ ServiceType string `protobuf:"bytes,9,opt,name=service_type,json=serviceType,proto3" json:"service_type,omitempty"`
+ Schema string `protobuf:"bytes,10,opt,name=schema,proto3" json:"schema,omitempty"`
+ Tables []string `protobuf:"bytes,11,rep,name=tables,proto3" json:"tables,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *QueryExample) Reset() {
@@ -617,16 +611,15 @@ func (x *QueryExample) GetTables() []string {
// GetLabelsRequest defines filtering of object detail's labels for specific value of
// dimension (ex.: host=hostname1 or queryid=1D410B4BE5060972.
type GetLabelsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
PeriodStartFrom *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=period_start_from,json=periodStartFrom,proto3" json:"period_start_from,omitempty"`
PeriodStartTo *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=period_start_to,json=periodStartTo,proto3" json:"period_start_to,omitempty"`
// dimension value: ex: queryid - 1D410B4BE5060972.
FilterBy string `protobuf:"bytes,3,opt,name=filter_by,json=filterBy,proto3" json:"filter_by,omitempty"`
// one of dimension: queryid | host ...
- GroupBy string `protobuf:"bytes,4,opt,name=group_by,json=groupBy,proto3" json:"group_by,omitempty"`
+ GroupBy string `protobuf:"bytes,4,opt,name=group_by,json=groupBy,proto3" json:"group_by,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GetLabelsRequest) Reset() {
@@ -689,11 +682,10 @@ func (x *GetLabelsRequest) GetGroupBy() string {
// GetLabelsResponse is a map of labels names as keys and labels values as a list.
type GetLabelsResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Labels map[string]*ListLabelValues `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
unknownFields protoimpl.UnknownFields
-
- Labels map[string]*ListLabelValues `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ sizeCache protoimpl.SizeCache
}
func (x *GetLabelsResponse) Reset() {
@@ -735,11 +727,10 @@ func (x *GetLabelsResponse) GetLabels() map[string]*ListLabelValues {
// ListLabelValues is list of label's values.
type ListLabelValues struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Values []string `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Values []string `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *ListLabelValues) Reset() {
@@ -781,11 +772,10 @@ func (x *ListLabelValues) GetValues() []string {
// GetQueryPlanRequest defines filtering by queryid.
type GetQueryPlanRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Queryid string `protobuf:"bytes,1,opt,name=queryid,proto3" json:"queryid,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Queryid string `protobuf:"bytes,1,opt,name=queryid,proto3" json:"queryid,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *GetQueryPlanRequest) Reset() {
@@ -827,12 +817,11 @@ func (x *GetQueryPlanRequest) GetQueryid() string {
// GetQueryPlanResponse contains planid and query_plan.
type GetQueryPlanResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Planid string `protobuf:"bytes,1,opt,name=planid,proto3" json:"planid,omitempty"`
+ QueryPlan string `protobuf:"bytes,2,opt,name=query_plan,json=queryPlan,proto3" json:"query_plan,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Planid string `protobuf:"bytes,1,opt,name=planid,proto3" json:"planid,omitempty"`
- QueryPlan string `protobuf:"bytes,2,opt,name=query_plan,json=queryPlan,proto3" json:"query_plan,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *GetQueryPlanResponse) Reset() {
@@ -881,14 +870,13 @@ func (x *GetQueryPlanResponse) GetQueryPlan() string {
// GetHistogramRequest defines filtering by time range, labels and queryid.
type GetHistogramRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
PeriodStartFrom *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=period_start_from,json=periodStartFrom,proto3" json:"period_start_from,omitempty"`
PeriodStartTo *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=period_start_to,json=periodStartTo,proto3" json:"period_start_to,omitempty"`
Labels []*MapFieldEntry `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty"`
Queryid string `protobuf:"bytes,4,opt,name=queryid,proto3" json:"queryid,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GetHistogramRequest) Reset() {
@@ -951,11 +939,10 @@ func (x *GetHistogramRequest) GetQueryid() string {
// GetHistogramResponse is histogram items as a list.
type GetHistogramResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- HistogramItems []*HistogramItem `protobuf:"bytes,1,rep,name=histogram_items,json=histogramItems,proto3" json:"histogram_items,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ HistogramItems []*HistogramItem `protobuf:"bytes,1,rep,name=histogram_items,json=histogramItems,proto3" json:"histogram_items,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GetHistogramResponse) Reset() {
@@ -997,12 +984,11 @@ func (x *GetHistogramResponse) GetHistogramItems() []*HistogramItem {
// HistogramItem represents one item in histogram.
type HistogramItem struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Range string `protobuf:"bytes,1,opt,name=range,proto3" json:"range,omitempty"`
+ Frequency uint32 `protobuf:"varint,2,opt,name=frequency,proto3" json:"frequency,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Range string `protobuf:"bytes,1,opt,name=range,proto3" json:"range,omitempty"`
- Frequency uint32 `protobuf:"varint,2,opt,name=frequency,proto3" json:"frequency,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *HistogramItem) Reset() {
@@ -1051,12 +1037,11 @@ func (x *HistogramItem) GetFrequency() uint32 {
// QueryExistsRequest check if provided query exists or not.
type QueryExistsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Serviceid string `protobuf:"bytes,1,opt,name=serviceid,proto3" json:"serviceid,omitempty"`
+ Query string `protobuf:"bytes,2,opt,name=query,proto3" json:"query,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Serviceid string `protobuf:"bytes,1,opt,name=serviceid,proto3" json:"serviceid,omitempty"`
- Query string `protobuf:"bytes,2,opt,name=query,proto3" json:"query,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *QueryExistsRequest) Reset() {
@@ -1105,11 +1090,10 @@ func (x *QueryExistsRequest) GetQuery() string {
// QueryExistsResponse returns true if query exists.
type QueryExistsResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Exists bool `protobuf:"varint,1,opt,name=exists,proto3" json:"exists,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Exists bool `protobuf:"varint,1,opt,name=exists,proto3" json:"exists,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *QueryExistsResponse) Reset() {
@@ -1151,12 +1135,11 @@ func (x *QueryExistsResponse) GetExists() bool {
// SchemaByQueryIDRequest returns schema for given query ID and service ID.
type SchemaByQueryIDRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ ServiceId string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
+ QueryId string `protobuf:"bytes,2,opt,name=query_id,json=queryId,proto3" json:"query_id,omitempty"`
unknownFields protoimpl.UnknownFields
-
- ServiceId string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
- QueryId string `protobuf:"bytes,2,opt,name=query_id,json=queryId,proto3" json:"query_id,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *SchemaByQueryIDRequest) Reset() {
@@ -1205,11 +1188,10 @@ func (x *SchemaByQueryIDRequest) GetQueryId() string {
// SchemaByQueryIDResponse is schema for given query ID and service ID.
type SchemaByQueryIDResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Schema string `protobuf:"bytes,1,opt,name=schema,proto3" json:"schema,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Schema string `protobuf:"bytes,1,opt,name=schema,proto3" json:"schema,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *SchemaByQueryIDResponse) Reset() {
@@ -1251,12 +1233,11 @@ func (x *SchemaByQueryIDResponse) GetSchema() string {
// ExplainFingerprintByQueryIDRequest get explain fingerprint for given query ID.
type ExplainFingerprintByQueryIDRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Serviceid string `protobuf:"bytes,1,opt,name=serviceid,proto3" json:"serviceid,omitempty"`
+ QueryId string `protobuf:"bytes,2,opt,name=query_id,json=queryId,proto3" json:"query_id,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Serviceid string `protobuf:"bytes,1,opt,name=serviceid,proto3" json:"serviceid,omitempty"`
- QueryId string `protobuf:"bytes,2,opt,name=query_id,json=queryId,proto3" json:"query_id,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *ExplainFingerprintByQueryIDRequest) Reset() {
@@ -1305,12 +1286,11 @@ func (x *ExplainFingerprintByQueryIDRequest) GetQueryId() string {
// ExplainFingerprintByQueryIDResponse is explain fingerprint and placeholders count for given query ID.
type ExplainFingerprintByQueryIDResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- ExplainFingerprint string `protobuf:"bytes,1,opt,name=explain_fingerprint,json=explainFingerprint,proto3" json:"explain_fingerprint,omitempty"`
- PlaceholdersCount uint32 `protobuf:"varint,2,opt,name=placeholders_count,json=placeholdersCount,proto3" json:"placeholders_count,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ ExplainFingerprint string `protobuf:"bytes,1,opt,name=explain_fingerprint,json=explainFingerprint,proto3" json:"explain_fingerprint,omitempty"`
+ PlaceholdersCount uint32 `protobuf:"varint,2,opt,name=placeholders_count,json=placeholdersCount,proto3" json:"placeholders_count,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ExplainFingerprintByQueryIDResponse) Reset() {
@@ -1359,22 +1339,21 @@ func (x *ExplainFingerprintByQueryIDResponse) GetPlaceholdersCount() uint32 {
// GetSlecetedQueryMetadataResponse consists selected query metadata to show in details for given query ID.
type GetSelectedQueryMetadataResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"`
- Database string `protobuf:"bytes,2,opt,name=database,proto3" json:"database,omitempty"`
- Schema string `protobuf:"bytes,3,opt,name=schema,proto3" json:"schema,omitempty"`
- Username string `protobuf:"bytes,4,opt,name=username,proto3" json:"username,omitempty"`
- ReplicationSet string `protobuf:"bytes,5,opt,name=replication_set,json=replicationSet,proto3" json:"replication_set,omitempty"`
- Cluster string `protobuf:"bytes,6,opt,name=cluster,proto3" json:"cluster,omitempty"`
- ServiceType string `protobuf:"bytes,7,opt,name=service_type,json=serviceType,proto3" json:"service_type,omitempty"`
- ServiceId string `protobuf:"bytes,8,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
- Environment string `protobuf:"bytes,9,opt,name=environment,proto3" json:"environment,omitempty"`
- NodeId string `protobuf:"bytes,10,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
- NodeName string `protobuf:"bytes,11,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"`
- NodeType string `protobuf:"bytes,12,opt,name=node_type,json=nodeType,proto3" json:"node_type,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"`
+ Database string `protobuf:"bytes,2,opt,name=database,proto3" json:"database,omitempty"`
+ Schema string `protobuf:"bytes,3,opt,name=schema,proto3" json:"schema,omitempty"`
+ Username string `protobuf:"bytes,4,opt,name=username,proto3" json:"username,omitempty"`
+ ReplicationSet string `protobuf:"bytes,5,opt,name=replication_set,json=replicationSet,proto3" json:"replication_set,omitempty"`
+ Cluster string `protobuf:"bytes,6,opt,name=cluster,proto3" json:"cluster,omitempty"`
+ ServiceType string `protobuf:"bytes,7,opt,name=service_type,json=serviceType,proto3" json:"service_type,omitempty"`
+ ServiceId string `protobuf:"bytes,8,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
+ Environment string `protobuf:"bytes,9,opt,name=environment,proto3" json:"environment,omitempty"`
+ NodeId string `protobuf:"bytes,10,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
+ NodeName string `protobuf:"bytes,11,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"`
+ NodeType string `protobuf:"bytes,12,opt,name=node_type,json=nodeType,proto3" json:"node_type,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GetSelectedQueryMetadataResponse) Reset() {
@@ -1493,7 +1472,7 @@ func (x *GetSelectedQueryMetadataResponse) GetNodeType() string {
var File_qan_v1_object_details_proto protoreflect.FileDescriptor
-var file_qan_v1_object_details_proto_rawDesc = []byte{
+var file_qan_v1_object_details_proto_rawDesc = string([]byte{
0x0a, 0x1b, 0x71, 0x61, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f,
0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x71,
0x61, 0x6e, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
@@ -1746,16 +1725,16 @@ var file_qan_v1_object_details_proto_rawDesc = []byte{
0x6e, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x12, 0x51, 0x61, 0x6e, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50,
0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x07, 0x51, 0x61, 0x6e, 0x3a,
0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_qan_v1_object_details_proto_rawDescOnce sync.Once
- file_qan_v1_object_details_proto_rawDescData = file_qan_v1_object_details_proto_rawDesc
+ file_qan_v1_object_details_proto_rawDescData []byte
)
func file_qan_v1_object_details_proto_rawDescGZIP() []byte {
file_qan_v1_object_details_proto_rawDescOnce.Do(func() {
- file_qan_v1_object_details_proto_rawDescData = protoimpl.X.CompressGZIP(file_qan_v1_object_details_proto_rawDescData)
+ file_qan_v1_object_details_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_qan_v1_object_details_proto_rawDesc), len(file_qan_v1_object_details_proto_rawDesc)))
})
return file_qan_v1_object_details_proto_rawDescData
}
@@ -1837,7 +1816,7 @@ func file_qan_v1_object_details_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_qan_v1_object_details_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_qan_v1_object_details_proto_rawDesc), len(file_qan_v1_object_details_proto_rawDesc)),
NumEnums: 0,
NumMessages: 26,
NumExtensions: 0,
@@ -1848,7 +1827,6 @@ func file_qan_v1_object_details_proto_init() {
MessageInfos: file_qan_v1_object_details_proto_msgTypes,
}.Build()
File_qan_v1_object_details_proto = out.File
- file_qan_v1_object_details_proto_rawDesc = nil
file_qan_v1_object_details_proto_goTypes = nil
file_qan_v1_object_details_proto_depIdxs = nil
}
diff --git a/api/qan/v1/object_details.pb.validate.go b/api/qan/v1/object_details.pb.validate.go
index 7a46c21c7e..59861ff0fd 100644
--- a/api/qan/v1/object_details.pb.validate.go
+++ b/api/qan/v1/object_details.pb.validate.go
@@ -169,7 +169,7 @@ type GetMetricsRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetMetricsRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -430,7 +430,7 @@ type GetMetricsResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetMetricsResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -547,7 +547,7 @@ type MetricValuesMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m MetricValuesMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -645,7 +645,7 @@ type LabelsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m LabelsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -843,7 +843,7 @@ type GetQueryExampleRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetQueryExampleRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -979,7 +979,7 @@ type GetQueryExampleResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetQueryExampleResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1100,7 +1100,7 @@ type QueryExampleMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m QueryExampleMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1262,7 +1262,7 @@ type GetLabelsRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetLabelsRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1408,7 +1408,7 @@ type GetLabelsResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetLabelsResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1510,7 +1510,7 @@ type ListLabelValuesMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListLabelValuesMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1612,7 +1612,7 @@ type GetQueryPlanRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetQueryPlanRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1718,7 +1718,7 @@ type GetQueryPlanResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetQueryPlanResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1914,7 +1914,7 @@ type GetHistogramRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetHistogramRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2050,7 +2050,7 @@ type GetHistogramResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetHistogramResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2156,7 +2156,7 @@ type HistogramItemMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m HistogramItemMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2260,7 +2260,7 @@ type QueryExistsRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m QueryExistsRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2364,7 +2364,7 @@ type QueryExistsResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m QueryExistsResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2470,7 +2470,7 @@ type SchemaByQueryIDRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m SchemaByQueryIDRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2574,7 +2574,7 @@ type SchemaByQueryIDResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m SchemaByQueryIDResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2682,7 +2682,7 @@ type ExplainFingerprintByQueryIDRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ExplainFingerprintByQueryIDRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2791,7 +2791,7 @@ type ExplainFingerprintByQueryIDResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ExplainFingerprintByQueryIDResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2920,7 +2920,7 @@ type GetSelectedQueryMetadataResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetSelectedQueryMetadataResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/api/qan/v1/profile.pb.go b/api/qan/v1/profile.pb.go
index 49c017d4a7..6a6fad2928 100644
--- a/api/qan/v1/profile.pb.go
+++ b/api/qan/v1/profile.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: qan/v1/profile.proto
@@ -9,6 +9,7 @@ package qanv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
@@ -24,10 +25,7 @@ const (
// ReportRequest defines filtering of metrics report for db server or other dimentions.
type GetReportRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
PeriodStartFrom *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=period_start_from,json=periodStartFrom,proto3" json:"period_start_from,omitempty"`
PeriodStartTo *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=period_start_to,json=periodStartTo,proto3" json:"period_start_to,omitempty"`
GroupBy string `protobuf:"bytes,3,opt,name=group_by,json=groupBy,proto3" json:"group_by,omitempty"`
@@ -38,6 +36,8 @@ type GetReportRequest struct {
Limit uint32 `protobuf:"varint,8,opt,name=limit,proto3" json:"limit,omitempty"`
MainMetric string `protobuf:"bytes,9,opt,name=main_metric,json=mainMetric,proto3" json:"main_metric,omitempty"`
Search string `protobuf:"bytes,10,opt,name=search,proto3" json:"search,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GetReportRequest) Reset() {
@@ -142,12 +142,11 @@ func (x *GetReportRequest) GetSearch() string {
// ReportMapFieldEntry allows to pass labels/dimentions in form like {"server": ["db1", "db2"...]}.
type ReportMapFieldEntry struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ Value []string `protobuf:"bytes,2,rep,name=value,proto3" json:"value,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- Value []string `protobuf:"bytes,2,rep,name=value,proto3" json:"value,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *ReportMapFieldEntry) Reset() {
@@ -196,14 +195,13 @@ func (x *ReportMapFieldEntry) GetValue() []string {
// ReportReply is list of reports per quieryids, hosts etc.
type GetReportResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ TotalRows uint32 `protobuf:"varint,1,opt,name=total_rows,json=totalRows,proto3" json:"total_rows,omitempty"`
+ Offset uint32 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"`
+ Limit uint32 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"`
+ Rows []*Row `protobuf:"bytes,4,rep,name=rows,proto3" json:"rows,omitempty"`
unknownFields protoimpl.UnknownFields
-
- TotalRows uint32 `protobuf:"varint,1,opt,name=total_rows,json=totalRows,proto3" json:"total_rows,omitempty"`
- Offset uint32 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"`
- Limit uint32 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"`
- Rows []*Row `protobuf:"bytes,4,rep,name=rows,proto3" json:"rows,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *GetReportResponse) Reset() {
@@ -266,19 +264,18 @@ func (x *GetReportResponse) GetRows() []*Row {
// Row define metrics for selected dimention.
type Row struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Rank uint32 `protobuf:"varint,1,opt,name=rank,proto3" json:"rank,omitempty"`
+ Dimension string `protobuf:"bytes,2,opt,name=dimension,proto3" json:"dimension,omitempty"`
+ Database string `protobuf:"bytes,3,opt,name=database,proto3" json:"database,omitempty"`
+ Metrics map[string]*Metric `protobuf:"bytes,4,rep,name=metrics,proto3" json:"metrics,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ Sparkline []*Point `protobuf:"bytes,5,rep,name=sparkline,proto3" json:"sparkline,omitempty"`
+ Fingerprint string `protobuf:"bytes,6,opt,name=fingerprint,proto3" json:"fingerprint,omitempty"`
+ NumQueries uint32 `protobuf:"varint,7,opt,name=num_queries,json=numQueries,proto3" json:"num_queries,omitempty"`
+ Qps float32 `protobuf:"fixed32,8,opt,name=qps,proto3" json:"qps,omitempty"`
+ Load float32 `protobuf:"fixed32,9,opt,name=load,proto3" json:"load,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Rank uint32 `protobuf:"varint,1,opt,name=rank,proto3" json:"rank,omitempty"`
- Dimension string `protobuf:"bytes,2,opt,name=dimension,proto3" json:"dimension,omitempty"`
- Database string `protobuf:"bytes,3,opt,name=database,proto3" json:"database,omitempty"`
- Metrics map[string]*Metric `protobuf:"bytes,4,rep,name=metrics,proto3" json:"metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- Sparkline []*Point `protobuf:"bytes,5,rep,name=sparkline,proto3" json:"sparkline,omitempty"`
- Fingerprint string `protobuf:"bytes,6,opt,name=fingerprint,proto3" json:"fingerprint,omitempty"`
- NumQueries uint32 `protobuf:"varint,7,opt,name=num_queries,json=numQueries,proto3" json:"num_queries,omitempty"`
- Qps float32 `protobuf:"fixed32,8,opt,name=qps,proto3" json:"qps,omitempty"`
- Load float32 `protobuf:"fixed32,9,opt,name=load,proto3" json:"load,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *Row) Reset() {
@@ -376,11 +373,10 @@ func (x *Row) GetLoad() float32 {
// Metric cell.
type Metric struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Stats *Stat `protobuf:"bytes,1,opt,name=stats,proto3" json:"stats,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Stats *Stat `protobuf:"bytes,1,opt,name=stats,proto3" json:"stats,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *Metric) Reset() {
@@ -422,18 +418,17 @@ func (x *Metric) GetStats() *Stat {
// Stat is statistics of specific metric.
type Stat struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Rate float32 `protobuf:"fixed32,1,opt,name=rate,proto3" json:"rate,omitempty"`
+ Cnt float32 `protobuf:"fixed32,2,opt,name=cnt,proto3" json:"cnt,omitempty"`
+ Sum float32 `protobuf:"fixed32,3,opt,name=sum,proto3" json:"sum,omitempty"`
+ Min float32 `protobuf:"fixed32,4,opt,name=min,proto3" json:"min,omitempty"`
+ Max float32 `protobuf:"fixed32,5,opt,name=max,proto3" json:"max,omitempty"`
+ P99 float32 `protobuf:"fixed32,6,opt,name=p99,proto3" json:"p99,omitempty"`
+ Avg float32 `protobuf:"fixed32,7,opt,name=avg,proto3" json:"avg,omitempty"`
+ SumPerSec float32 `protobuf:"fixed32,8,opt,name=sum_per_sec,json=sumPerSec,proto3" json:"sum_per_sec,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Rate float32 `protobuf:"fixed32,1,opt,name=rate,proto3" json:"rate,omitempty"`
- Cnt float32 `protobuf:"fixed32,2,opt,name=cnt,proto3" json:"cnt,omitempty"`
- Sum float32 `protobuf:"fixed32,3,opt,name=sum,proto3" json:"sum,omitempty"`
- Min float32 `protobuf:"fixed32,4,opt,name=min,proto3" json:"min,omitempty"`
- Max float32 `protobuf:"fixed32,5,opt,name=max,proto3" json:"max,omitempty"`
- P99 float32 `protobuf:"fixed32,6,opt,name=p99,proto3" json:"p99,omitempty"`
- Avg float32 `protobuf:"fixed32,7,opt,name=avg,proto3" json:"avg,omitempty"`
- SumPerSec float32 `protobuf:"fixed32,8,opt,name=sum_per_sec,json=sumPerSec,proto3" json:"sum_per_sec,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *Stat) Reset() {
@@ -524,7 +519,7 @@ func (x *Stat) GetSumPerSec() float32 {
var File_qan_v1_profile_proto protoreflect.FileDescriptor
-var file_qan_v1_profile_proto_rawDesc = []byte{
+var file_qan_v1_profile_proto_rawDesc = string([]byte{
0x0a, 0x14, 0x71, 0x61, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x1a, 0x1f,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f,
@@ -612,16 +607,16 @@ var file_qan_v1_profile_proto_rawDesc = []byte{
0x5c, 0x56, 0x31, 0xe2, 0x02, 0x12, 0x51, 0x61, 0x6e, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42,
0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x07, 0x51, 0x61, 0x6e, 0x3a, 0x3a,
0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_qan_v1_profile_proto_rawDescOnce sync.Once
- file_qan_v1_profile_proto_rawDescData = file_qan_v1_profile_proto_rawDesc
+ file_qan_v1_profile_proto_rawDescData []byte
)
func file_qan_v1_profile_proto_rawDescGZIP() []byte {
file_qan_v1_profile_proto_rawDescOnce.Do(func() {
- file_qan_v1_profile_proto_rawDescData = protoimpl.X.CompressGZIP(file_qan_v1_profile_proto_rawDescData)
+ file_qan_v1_profile_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_qan_v1_profile_proto_rawDesc), len(file_qan_v1_profile_proto_rawDesc)))
})
return file_qan_v1_profile_proto_rawDescData
}
@@ -667,7 +662,7 @@ func file_qan_v1_profile_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_qan_v1_profile_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_qan_v1_profile_proto_rawDesc), len(file_qan_v1_profile_proto_rawDesc)),
NumEnums: 0,
NumMessages: 7,
NumExtensions: 0,
@@ -678,7 +673,6 @@ func file_qan_v1_profile_proto_init() {
MessageInfos: file_qan_v1_profile_proto_msgTypes,
}.Build()
File_qan_v1_profile_proto = out.File
- file_qan_v1_profile_proto_rawDesc = nil
file_qan_v1_profile_proto_goTypes = nil
file_qan_v1_profile_proto_depIdxs = nil
}
diff --git a/api/qan/v1/profile.pb.validate.go b/api/qan/v1/profile.pb.validate.go
index 81b895190f..fe2eb0e702 100644
--- a/api/qan/v1/profile.pb.validate.go
+++ b/api/qan/v1/profile.pb.validate.go
@@ -175,7 +175,7 @@ type GetReportRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetReportRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -277,7 +277,7 @@ type ReportMapFieldEntryMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ReportMapFieldEntryMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -419,7 +419,7 @@ type GetReportResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetReportResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -613,7 +613,7 @@ type RowMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m RowMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -740,7 +740,7 @@ type MetricMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m MetricMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -854,7 +854,7 @@ type StatMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StatMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/api/qan/v1/qan.pb.go b/api/qan/v1/qan.pb.go
index 49eb549dbc..c9e26788a5 100644
--- a/api/qan/v1/qan.pb.go
+++ b/api/qan/v1/qan.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: qan/v1/qan.proto
@@ -9,6 +9,7 @@ package qanv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
@@ -80,10 +81,7 @@ func (ExampleType) EnumDescriptor() ([]byte, []int) {
// Point contains values that represents abscissa (time) and ordinate (volume etc.)
// of every point in a coordinate system of Sparklines.
type Point struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The serial number of the chart point from the largest time in the time interval to the lowest time in the time range.
Point uint32 `protobuf:"varint,1,opt,name=point,proto3" json:"point,omitempty"`
// Duration beetween two points.
@@ -216,6 +214,8 @@ type Point struct {
MWalBytesSumPerSec float32 `protobuf:"fixed32,83,opt,name=m_wal_bytes_sum_per_sec,json=mWalBytesSumPerSec,proto3" json:"m_wal_bytes_sum_per_sec,omitempty"`
// Plan time in per seconds.
MPlanTimeSumPerSec float32 `protobuf:"fixed32,84,opt,name=m_plan_time_sum_per_sec,json=mPlanTimeSumPerSec,proto3" json:"m_plan_time_sum_per_sec,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *Point) Reset() {
@@ -712,12 +712,11 @@ func (x *Point) GetMPlanTimeSumPerSec() float32 {
// MapFieldEntry allows to pass labels/dimensions in form like {"server": ["db1", "db2"...]}.
type MapFieldEntry struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ Value []string `protobuf:"bytes,2,rep,name=value,proto3" json:"value,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- Value []string `protobuf:"bytes,2,rep,name=value,proto3" json:"value,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *MapFieldEntry) Reset() {
@@ -766,7 +765,7 @@ func (x *MapFieldEntry) GetValue() []string {
var File_qan_v1_qan_proto protoreflect.FileDescriptor
-var file_qan_v1_qan_proto_rawDesc = []byte{
+var file_qan_v1_qan_proto_rawDesc = string([]byte{
0x0a, 0x10, 0x71, 0x61, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x71, 0x61, 0x6e, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x12, 0x06, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x22, 0xea, 0x1e, 0x0a, 0x05, 0x50,
0x6f, 0x69, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20,
@@ -1037,16 +1036,16 @@ var file_qan_v1_qan_proto_rawDesc = []byte{
0x5c, 0x56, 0x31, 0xe2, 0x02, 0x12, 0x51, 0x61, 0x6e, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42,
0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x07, 0x51, 0x61, 0x6e, 0x3a, 0x3a,
0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_qan_v1_qan_proto_rawDescOnce sync.Once
- file_qan_v1_qan_proto_rawDescData = file_qan_v1_qan_proto_rawDesc
+ file_qan_v1_qan_proto_rawDescData []byte
)
func file_qan_v1_qan_proto_rawDescGZIP() []byte {
file_qan_v1_qan_proto_rawDescOnce.Do(func() {
- file_qan_v1_qan_proto_rawDescData = protoimpl.X.CompressGZIP(file_qan_v1_qan_proto_rawDescData)
+ file_qan_v1_qan_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_qan_v1_qan_proto_rawDesc), len(file_qan_v1_qan_proto_rawDesc)))
})
return file_qan_v1_qan_proto_rawDescData
}
@@ -1078,7 +1077,7 @@ func file_qan_v1_qan_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_qan_v1_qan_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_qan_v1_qan_proto_rawDesc), len(file_qan_v1_qan_proto_rawDesc)),
NumEnums: 1,
NumMessages: 2,
NumExtensions: 0,
@@ -1090,7 +1089,6 @@ func file_qan_v1_qan_proto_init() {
MessageInfos: file_qan_v1_qan_proto_msgTypes,
}.Build()
File_qan_v1_qan_proto = out.File
- file_qan_v1_qan_proto_rawDesc = nil
file_qan_v1_qan_proto_goTypes = nil
file_qan_v1_qan_proto_depIdxs = nil
}
diff --git a/api/qan/v1/qan.pb.validate.go b/api/qan/v1/qan.pb.validate.go
index c1f201b4f3..a1a00eec33 100644
--- a/api/qan/v1/qan.pb.validate.go
+++ b/api/qan/v1/qan.pb.validate.go
@@ -201,7 +201,7 @@ type PointMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m PointMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -303,7 +303,7 @@ type MapFieldEntryMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m MapFieldEntryMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/api/qan/v1/service.pb.go b/api/qan/v1/service.pb.go
index 5787f81592..e8c9b9b0c9 100644
--- a/api/qan/v1/service.pb.go
+++ b/api/qan/v1/service.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: qan/v1/service.proto
@@ -9,6 +9,7 @@ package qanv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
_ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options"
_ "google.golang.org/genproto/googleapis/api/annotations"
@@ -25,9 +26,9 @@ const (
// MetricsNamesRequest is empty.
type GetMetricsNamesRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GetMetricsNamesRequest) Reset() {
@@ -64,11 +65,10 @@ func (*GetMetricsNamesRequest) Descriptor() ([]byte, []int) {
// key is root of metric name in db (Ex:. [m_]query_time[_sum]);
// value - Human readable name of metrics.
type GetMetricsNamesResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Data map[string]string `protobuf:"bytes,1,rep,name=data,proto3" json:"data,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
unknownFields protoimpl.UnknownFields
-
- Data map[string]string `protobuf:"bytes,1,rep,name=data,proto3" json:"data,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ sizeCache protoimpl.SizeCache
}
func (x *GetMetricsNamesResponse) Reset() {
@@ -110,7 +110,7 @@ func (x *GetMetricsNamesResponse) GetData() map[string]string {
var File_qan_v1_service_proto protoreflect.FileDescriptor
-var file_qan_v1_service_proto_rawDesc = []byte{
+var file_qan_v1_service_proto_rawDesc = string([]byte{
0x0a, 0x14, 0x71, 0x61, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x71, 0x61, 0x6e, 0x2e, 0x76, 0x31, 0x1a, 0x1c,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74,
@@ -271,16 +271,16 @@ var file_qan_v1_service_proto_rawDesc = []byte{
0xca, 0x02, 0x06, 0x51, 0x61, 0x6e, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x12, 0x51, 0x61, 0x6e, 0x5c,
0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02,
0x07, 0x51, 0x61, 0x6e, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_qan_v1_service_proto_rawDescOnce sync.Once
- file_qan_v1_service_proto_rawDescData = file_qan_v1_service_proto_rawDesc
+ file_qan_v1_service_proto_rawDescData []byte
)
func file_qan_v1_service_proto_rawDescGZIP() []byte {
file_qan_v1_service_proto_rawDescOnce.Do(func() {
- file_qan_v1_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_qan_v1_service_proto_rawDescData)
+ file_qan_v1_service_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_qan_v1_service_proto_rawDesc), len(file_qan_v1_service_proto_rawDesc)))
})
return file_qan_v1_service_proto_rawDescData
}
@@ -357,7 +357,7 @@ func file_qan_v1_service_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_qan_v1_service_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_qan_v1_service_proto_rawDesc), len(file_qan_v1_service_proto_rawDesc)),
NumEnums: 0,
NumMessages: 3,
NumExtensions: 0,
@@ -368,7 +368,6 @@ func file_qan_v1_service_proto_init() {
MessageInfos: file_qan_v1_service_proto_msgTypes,
}.Build()
File_qan_v1_service_proto = out.File
- file_qan_v1_service_proto_rawDesc = nil
file_qan_v1_service_proto_goTypes = nil
file_qan_v1_service_proto_depIdxs = nil
}
diff --git a/api/qan/v1/service.pb.gw.go b/api/qan/v1/service.pb.gw.go
index 7046112486..ef684a7789 100644
--- a/api/qan/v1/service.pb.gw.go
+++ b/api/qan/v1/service.pb.gw.go
@@ -10,6 +10,7 @@ package qanv1
import (
"context"
+ "errors"
"io"
"net/http"
@@ -28,297 +29,284 @@ var (
_ codes.Code
_ io.Reader
_ status.Status
+ _ = errors.New
_ = runtime.String
_ = utilities.NewDoubleArray
_ = metadata.Join
)
func request_QANService_GetReport_0(ctx context.Context, marshaler runtime.Marshaler, client QANServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetReportRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq GetReportRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.GetReport(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_QANService_GetReport_0(ctx context.Context, marshaler runtime.Marshaler, server QANServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetReportRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq GetReportRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.GetReport(ctx, &protoReq)
return msg, metadata, err
}
func request_QANService_GetFilteredMetricsNames_0(ctx context.Context, marshaler runtime.Marshaler, client QANServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetFilteredMetricsNamesRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq GetFilteredMetricsNamesRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.GetFilteredMetricsNames(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_QANService_GetFilteredMetricsNames_0(ctx context.Context, marshaler runtime.Marshaler, server QANServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetFilteredMetricsNamesRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq GetFilteredMetricsNamesRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.GetFilteredMetricsNames(ctx, &protoReq)
return msg, metadata, err
}
func request_QANService_GetMetricsNames_0(ctx context.Context, marshaler runtime.Marshaler, client QANServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetMetricsNamesRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq GetMetricsNamesRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.GetMetricsNames(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_QANService_GetMetricsNames_0(ctx context.Context, marshaler runtime.Marshaler, server QANServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetMetricsNamesRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq GetMetricsNamesRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.GetMetricsNames(ctx, &protoReq)
return msg, metadata, err
}
func request_QANService_GetMetrics_0(ctx context.Context, marshaler runtime.Marshaler, client QANServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetMetricsRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq GetMetricsRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.GetMetrics(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_QANService_GetMetrics_0(ctx context.Context, marshaler runtime.Marshaler, server QANServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetMetricsRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq GetMetricsRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.GetMetrics(ctx, &protoReq)
return msg, metadata, err
}
func request_QANService_GetLabels_0(ctx context.Context, marshaler runtime.Marshaler, client QANServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetLabelsRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq GetLabelsRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.GetLabels(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_QANService_GetLabels_0(ctx context.Context, marshaler runtime.Marshaler, server QANServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetLabelsRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq GetLabelsRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.GetLabels(ctx, &protoReq)
return msg, metadata, err
}
func request_QANService_GetHistogram_0(ctx context.Context, marshaler runtime.Marshaler, client QANServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetHistogramRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq GetHistogramRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.GetHistogram(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_QANService_GetHistogram_0(ctx context.Context, marshaler runtime.Marshaler, server QANServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetHistogramRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq GetHistogramRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.GetHistogram(ctx, &protoReq)
return msg, metadata, err
}
func request_QANService_ExplainFingerprintByQueryID_0(ctx context.Context, marshaler runtime.Marshaler, client QANServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ExplainFingerprintByQueryIDRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq ExplainFingerprintByQueryIDRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.ExplainFingerprintByQueryID(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_QANService_ExplainFingerprintByQueryID_0(ctx context.Context, marshaler runtime.Marshaler, server QANServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ExplainFingerprintByQueryIDRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq ExplainFingerprintByQueryIDRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.ExplainFingerprintByQueryID(ctx, &protoReq)
return msg, metadata, err
}
func request_QANService_GetQueryPlan_0(ctx context.Context, marshaler runtime.Marshaler, client QANServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetQueryPlanRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq GetQueryPlanRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["queryid"]
+ val, ok := pathParams["queryid"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "queryid")
}
-
protoReq.Queryid, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "queryid", err)
}
-
msg, err := client.GetQueryPlan(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_QANService_GetQueryPlan_0(ctx context.Context, marshaler runtime.Marshaler, server QANServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetQueryPlanRequest
- var metadata runtime.ServerMetadata
-
var (
- val string
- ok bool
- err error
- _ = err
+ protoReq GetQueryPlanRequest
+ metadata runtime.ServerMetadata
+ err error
)
-
- val, ok = pathParams["queryid"]
+ val, ok := pathParams["queryid"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "queryid")
}
-
protoReq.Queryid, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "queryid", err)
}
-
msg, err := server.GetQueryPlan(ctx, &protoReq)
return msg, metadata, err
}
func request_QANService_QueryExists_0(ctx context.Context, marshaler runtime.Marshaler, client QANServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq QueryExistsRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq QueryExistsRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.QueryExists(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_QANService_QueryExists_0(ctx context.Context, marshaler runtime.Marshaler, server QANServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq QueryExistsRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq QueryExistsRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.QueryExists(ctx, &protoReq)
return msg, metadata, err
}
func request_QANService_SchemaByQueryID_0(ctx context.Context, marshaler runtime.Marshaler, client QANServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq SchemaByQueryIDRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq SchemaByQueryIDRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.SchemaByQueryID(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_QANService_SchemaByQueryID_0(ctx context.Context, marshaler runtime.Marshaler, server QANServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq SchemaByQueryIDRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq SchemaByQueryIDRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.SchemaByQueryID(ctx, &protoReq)
return msg, metadata, err
}
func request_QANService_GetQueryExample_0(ctx context.Context, marshaler runtime.Marshaler, client QANServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetQueryExampleRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq GetQueryExampleRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.GetQueryExample(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_QANService_GetQueryExample_0(ctx context.Context, marshaler runtime.Marshaler, server QANServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetQueryExampleRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq GetQueryExampleRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.GetQueryExample(ctx, &protoReq)
return msg, metadata, err
}
@@ -329,15 +317,13 @@ func local_request_QANService_GetQueryExample_0(ctx context.Context, marshaler r
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQANServiceHandlerFromEndpoint instead.
// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
func RegisterQANServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QANServiceServer) error {
- mux.Handle("POST", pattern_QANService_GetReport_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_QANService_GetReport_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/qan.v1.QANService/GetReport", runtime.WithHTTPPathPattern("/v1/qan/metrics:getReport"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/qan.v1.QANService/GetReport", runtime.WithHTTPPathPattern("/v1/qan/metrics:getReport"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -349,19 +335,15 @@ func RegisterQANServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux,
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_QANService_GetReport_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_QANService_GetFilteredMetricsNames_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_QANService_GetFilteredMetricsNames_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/qan.v1.QANService/GetFilteredMetricsNames", runtime.WithHTTPPathPattern("/v1/qan/metrics:getFilters"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/qan.v1.QANService/GetFilteredMetricsNames", runtime.WithHTTPPathPattern("/v1/qan/metrics:getFilters"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -373,19 +355,15 @@ func RegisterQANServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux,
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_QANService_GetFilteredMetricsNames_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_QANService_GetMetricsNames_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_QANService_GetMetricsNames_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/qan.v1.QANService/GetMetricsNames", runtime.WithHTTPPathPattern("/v1/qan/metrics:getNames"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/qan.v1.QANService/GetMetricsNames", runtime.WithHTTPPathPattern("/v1/qan/metrics:getNames"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -397,19 +375,15 @@ func RegisterQANServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux,
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_QANService_GetMetricsNames_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_QANService_GetMetrics_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_QANService_GetMetrics_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/qan.v1.QANService/GetMetrics", runtime.WithHTTPPathPattern("/v1/qan:getMetrics"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/qan.v1.QANService/GetMetrics", runtime.WithHTTPPathPattern("/v1/qan:getMetrics"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -421,19 +395,15 @@ func RegisterQANServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux,
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_QANService_GetMetrics_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_QANService_GetLabels_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_QANService_GetLabels_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/qan.v1.QANService/GetLabels", runtime.WithHTTPPathPattern("/v1/qan:getLabels"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/qan.v1.QANService/GetLabels", runtime.WithHTTPPathPattern("/v1/qan:getLabels"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -445,19 +415,15 @@ func RegisterQANServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux,
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_QANService_GetLabels_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_QANService_GetHistogram_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_QANService_GetHistogram_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/qan.v1.QANService/GetHistogram", runtime.WithHTTPPathPattern("/v1/qan:getHistogram"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/qan.v1.QANService/GetHistogram", runtime.WithHTTPPathPattern("/v1/qan:getHistogram"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -469,19 +435,15 @@ func RegisterQANServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux,
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_QANService_GetHistogram_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_QANService_ExplainFingerprintByQueryID_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_QANService_ExplainFingerprintByQueryID_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/qan.v1.QANService/ExplainFingerprintByQueryID", runtime.WithHTTPPathPattern("/v1/qan:explainFingerprint"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/qan.v1.QANService/ExplainFingerprintByQueryID", runtime.WithHTTPPathPattern("/v1/qan:explainFingerprint"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -493,19 +455,15 @@ func RegisterQANServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux,
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_QANService_ExplainFingerprintByQueryID_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_QANService_GetQueryPlan_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_QANService_GetQueryPlan_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/qan.v1.QANService/GetQueryPlan", runtime.WithHTTPPathPattern("/v1/qan/query/{queryid}/plan"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/qan.v1.QANService/GetQueryPlan", runtime.WithHTTPPathPattern("/v1/qan/query/{queryid}/plan"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -517,19 +475,15 @@ func RegisterQANServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux,
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_QANService_GetQueryPlan_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_QANService_QueryExists_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_QANService_QueryExists_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/qan.v1.QANService/QueryExists", runtime.WithHTTPPathPattern("/v1/qan/query:exists"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/qan.v1.QANService/QueryExists", runtime.WithHTTPPathPattern("/v1/qan/query:exists"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -541,19 +495,15 @@ func RegisterQANServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux,
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_QANService_QueryExists_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_QANService_SchemaByQueryID_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_QANService_SchemaByQueryID_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/qan.v1.QANService/SchemaByQueryID", runtime.WithHTTPPathPattern("/v1/qan/query:getSchema"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/qan.v1.QANService/SchemaByQueryID", runtime.WithHTTPPathPattern("/v1/qan/query:getSchema"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -565,19 +515,15 @@ func RegisterQANServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux,
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_QANService_SchemaByQueryID_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_QANService_GetQueryExample_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_QANService_GetQueryExample_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/qan.v1.QANService/GetQueryExample", runtime.WithHTTPPathPattern("/v1/qan/query:getExample"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/qan.v1.QANService/GetQueryExample", runtime.WithHTTPPathPattern("/v1/qan/query:getExample"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -589,7 +535,6 @@ func RegisterQANServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux,
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_QANService_GetQueryExample_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@@ -617,7 +562,6 @@ func RegisterQANServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.Ser
}
}()
}()
-
return RegisterQANServiceHandler(ctx, mux, conn)
}
@@ -633,13 +577,11 @@ func RegisterQANServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "QANServiceClient" to call the correct interceptors. This client ignores the HTTP middlewares.
func RegisterQANServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QANServiceClient) error {
- mux.Handle("POST", pattern_QANService_GetReport_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_QANService_GetReport_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/qan.v1.QANService/GetReport", runtime.WithHTTPPathPattern("/v1/qan/metrics:getReport"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/qan.v1.QANService/GetReport", runtime.WithHTTPPathPattern("/v1/qan/metrics:getReport"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -650,17 +592,13 @@ func RegisterQANServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux,
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_QANService_GetReport_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_QANService_GetFilteredMetricsNames_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_QANService_GetFilteredMetricsNames_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/qan.v1.QANService/GetFilteredMetricsNames", runtime.WithHTTPPathPattern("/v1/qan/metrics:getFilters"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/qan.v1.QANService/GetFilteredMetricsNames", runtime.WithHTTPPathPattern("/v1/qan/metrics:getFilters"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -671,17 +609,13 @@ func RegisterQANServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux,
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_QANService_GetFilteredMetricsNames_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_QANService_GetMetricsNames_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_QANService_GetMetricsNames_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/qan.v1.QANService/GetMetricsNames", runtime.WithHTTPPathPattern("/v1/qan/metrics:getNames"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/qan.v1.QANService/GetMetricsNames", runtime.WithHTTPPathPattern("/v1/qan/metrics:getNames"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -692,17 +626,13 @@ func RegisterQANServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux,
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_QANService_GetMetricsNames_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_QANService_GetMetrics_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_QANService_GetMetrics_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/qan.v1.QANService/GetMetrics", runtime.WithHTTPPathPattern("/v1/qan:getMetrics"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/qan.v1.QANService/GetMetrics", runtime.WithHTTPPathPattern("/v1/qan:getMetrics"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -713,17 +643,13 @@ func RegisterQANServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux,
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_QANService_GetMetrics_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_QANService_GetLabels_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_QANService_GetLabels_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/qan.v1.QANService/GetLabels", runtime.WithHTTPPathPattern("/v1/qan:getLabels"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/qan.v1.QANService/GetLabels", runtime.WithHTTPPathPattern("/v1/qan:getLabels"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -734,17 +660,13 @@ func RegisterQANServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux,
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_QANService_GetLabels_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_QANService_GetHistogram_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_QANService_GetHistogram_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/qan.v1.QANService/GetHistogram", runtime.WithHTTPPathPattern("/v1/qan:getHistogram"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/qan.v1.QANService/GetHistogram", runtime.WithHTTPPathPattern("/v1/qan:getHistogram"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -755,17 +677,13 @@ func RegisterQANServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux,
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_QANService_GetHistogram_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_QANService_ExplainFingerprintByQueryID_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_QANService_ExplainFingerprintByQueryID_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/qan.v1.QANService/ExplainFingerprintByQueryID", runtime.WithHTTPPathPattern("/v1/qan:explainFingerprint"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/qan.v1.QANService/ExplainFingerprintByQueryID", runtime.WithHTTPPathPattern("/v1/qan:explainFingerprint"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -776,17 +694,13 @@ func RegisterQANServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux,
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_QANService_ExplainFingerprintByQueryID_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_QANService_GetQueryPlan_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_QANService_GetQueryPlan_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/qan.v1.QANService/GetQueryPlan", runtime.WithHTTPPathPattern("/v1/qan/query/{queryid}/plan"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/qan.v1.QANService/GetQueryPlan", runtime.WithHTTPPathPattern("/v1/qan/query/{queryid}/plan"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -797,17 +711,13 @@ func RegisterQANServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux,
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_QANService_GetQueryPlan_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_QANService_QueryExists_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_QANService_QueryExists_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/qan.v1.QANService/QueryExists", runtime.WithHTTPPathPattern("/v1/qan/query:exists"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/qan.v1.QANService/QueryExists", runtime.WithHTTPPathPattern("/v1/qan/query:exists"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -818,17 +728,13 @@ func RegisterQANServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux,
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_QANService_QueryExists_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_QANService_SchemaByQueryID_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_QANService_SchemaByQueryID_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/qan.v1.QANService/SchemaByQueryID", runtime.WithHTTPPathPattern("/v1/qan/query:getSchema"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/qan.v1.QANService/SchemaByQueryID", runtime.WithHTTPPathPattern("/v1/qan/query:getSchema"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -839,17 +745,13 @@ func RegisterQANServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux,
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_QANService_SchemaByQueryID_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_QANService_GetQueryExample_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_QANService_GetQueryExample_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/qan.v1.QANService/GetQueryExample", runtime.WithHTTPPathPattern("/v1/qan/query:getExample"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/qan.v1.QANService/GetQueryExample", runtime.WithHTTPPathPattern("/v1/qan/query:getExample"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -860,57 +762,35 @@ func RegisterQANServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux,
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_QANService_GetQueryExample_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
return nil
}
var (
- pattern_QANService_GetReport_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "qan", "metrics"}, "getReport"))
-
- pattern_QANService_GetFilteredMetricsNames_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "qan", "metrics"}, "getFilters"))
-
- pattern_QANService_GetMetricsNames_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "qan", "metrics"}, "getNames"))
-
- pattern_QANService_GetMetrics_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "qan"}, "getMetrics"))
-
- pattern_QANService_GetLabels_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "qan"}, "getLabels"))
-
- pattern_QANService_GetHistogram_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "qan"}, "getHistogram"))
-
+ pattern_QANService_GetReport_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "qan", "metrics"}, "getReport"))
+ pattern_QANService_GetFilteredMetricsNames_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "qan", "metrics"}, "getFilters"))
+ pattern_QANService_GetMetricsNames_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "qan", "metrics"}, "getNames"))
+ pattern_QANService_GetMetrics_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "qan"}, "getMetrics"))
+ pattern_QANService_GetLabels_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "qan"}, "getLabels"))
+ pattern_QANService_GetHistogram_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "qan"}, "getHistogram"))
pattern_QANService_ExplainFingerprintByQueryID_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "qan"}, "explainFingerprint"))
-
- pattern_QANService_GetQueryPlan_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"v1", "qan", "query", "queryid", "plan"}, ""))
-
- pattern_QANService_QueryExists_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "qan", "query"}, "exists"))
-
- pattern_QANService_SchemaByQueryID_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "qan", "query"}, "getSchema"))
-
- pattern_QANService_GetQueryExample_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "qan", "query"}, "getExample"))
+ pattern_QANService_GetQueryPlan_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"v1", "qan", "query", "queryid", "plan"}, ""))
+ pattern_QANService_QueryExists_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "qan", "query"}, "exists"))
+ pattern_QANService_SchemaByQueryID_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "qan", "query"}, "getSchema"))
+ pattern_QANService_GetQueryExample_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "qan", "query"}, "getExample"))
)
var (
- forward_QANService_GetReport_0 = runtime.ForwardResponseMessage
-
- forward_QANService_GetFilteredMetricsNames_0 = runtime.ForwardResponseMessage
-
- forward_QANService_GetMetricsNames_0 = runtime.ForwardResponseMessage
-
- forward_QANService_GetMetrics_0 = runtime.ForwardResponseMessage
-
- forward_QANService_GetLabels_0 = runtime.ForwardResponseMessage
-
- forward_QANService_GetHistogram_0 = runtime.ForwardResponseMessage
-
+ forward_QANService_GetReport_0 = runtime.ForwardResponseMessage
+ forward_QANService_GetFilteredMetricsNames_0 = runtime.ForwardResponseMessage
+ forward_QANService_GetMetricsNames_0 = runtime.ForwardResponseMessage
+ forward_QANService_GetMetrics_0 = runtime.ForwardResponseMessage
+ forward_QANService_GetLabels_0 = runtime.ForwardResponseMessage
+ forward_QANService_GetHistogram_0 = runtime.ForwardResponseMessage
forward_QANService_ExplainFingerprintByQueryID_0 = runtime.ForwardResponseMessage
-
- forward_QANService_GetQueryPlan_0 = runtime.ForwardResponseMessage
-
- forward_QANService_QueryExists_0 = runtime.ForwardResponseMessage
-
- forward_QANService_SchemaByQueryID_0 = runtime.ForwardResponseMessage
-
- forward_QANService_GetQueryExample_0 = runtime.ForwardResponseMessage
+ forward_QANService_GetQueryPlan_0 = runtime.ForwardResponseMessage
+ forward_QANService_QueryExists_0 = runtime.ForwardResponseMessage
+ forward_QANService_SchemaByQueryID_0 = runtime.ForwardResponseMessage
+ forward_QANService_GetQueryExample_0 = runtime.ForwardResponseMessage
)
diff --git a/api/qan/v1/service.pb.validate.go b/api/qan/v1/service.pb.validate.go
index b25ffedc2b..ce719a826e 100644
--- a/api/qan/v1/service.pb.validate.go
+++ b/api/qan/v1/service.pb.validate.go
@@ -71,7 +71,7 @@ type GetMetricsNamesRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetMetricsNamesRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -175,7 +175,7 @@ type GetMetricsNamesResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetMetricsNamesResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/api/server/v1/httperror.pb.go b/api/server/v1/httperror.pb.go
index 15550a0c7f..f60fbc03d6 100644
--- a/api/server/v1/httperror.pb.go
+++ b/api/server/v1/httperror.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: server/v1/httperror.proto
@@ -9,6 +9,7 @@ package serverv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
@@ -24,17 +25,16 @@ const (
// Error is the generic error returned from unary RPCs.
type HttpError struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
// This is to make the error more compatible with users that expect errors to be Status objects:
// https://github.com/grpc/grpc/blob/master/src/proto/grpc/status/status.proto
// It should be the exact same message as the Error field.
- Code int32 `protobuf:"varint,2,opt,name=code,proto3" json:"code,omitempty"`
- Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"`
- Details []*anypb.Any `protobuf:"bytes,4,rep,name=details,proto3" json:"details,omitempty"`
+ Code int32 `protobuf:"varint,2,opt,name=code,proto3" json:"code,omitempty"`
+ Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"`
+ Details []*anypb.Any `protobuf:"bytes,4,rep,name=details,proto3" json:"details,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *HttpError) Reset() {
@@ -97,7 +97,7 @@ func (x *HttpError) GetDetails() []*anypb.Any {
var File_server_v1_httperror_proto protoreflect.FileDescriptor
-var file_server_v1_httperror_proto_rawDesc = []byte{
+var file_server_v1_httperror_proto_rawDesc = string([]byte{
0x0a, 0x19, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x76, 0x31, 0x2f, 0x68, 0x74, 0x74, 0x70,
0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x73, 0x65, 0x72,
0x76, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
@@ -120,16 +120,16 @@ var file_server_v1_httperror_proto_rawDesc = []byte{
0x5c, 0x56, 0x31, 0xe2, 0x02, 0x15, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5c, 0x56, 0x31, 0x5c,
0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0a, 0x53, 0x65,
0x72, 0x76, 0x65, 0x72, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_server_v1_httperror_proto_rawDescOnce sync.Once
- file_server_v1_httperror_proto_rawDescData = file_server_v1_httperror_proto_rawDesc
+ file_server_v1_httperror_proto_rawDescData []byte
)
func file_server_v1_httperror_proto_rawDescGZIP() []byte {
file_server_v1_httperror_proto_rawDescOnce.Do(func() {
- file_server_v1_httperror_proto_rawDescData = protoimpl.X.CompressGZIP(file_server_v1_httperror_proto_rawDescData)
+ file_server_v1_httperror_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_server_v1_httperror_proto_rawDesc), len(file_server_v1_httperror_proto_rawDesc)))
})
return file_server_v1_httperror_proto_rawDescData
}
@@ -160,7 +160,7 @@ func file_server_v1_httperror_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_server_v1_httperror_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_server_v1_httperror_proto_rawDesc), len(file_server_v1_httperror_proto_rawDesc)),
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
@@ -171,7 +171,6 @@ func file_server_v1_httperror_proto_init() {
MessageInfos: file_server_v1_httperror_proto_msgTypes,
}.Build()
File_server_v1_httperror_proto = out.File
- file_server_v1_httperror_proto_rawDesc = nil
file_server_v1_httperror_proto_goTypes = nil
file_server_v1_httperror_proto_depIdxs = nil
}
diff --git a/api/server/v1/httperror.pb.validate.go b/api/server/v1/httperror.pb.validate.go
index dc6a5fea3c..81217e496f 100644
--- a/api/server/v1/httperror.pb.validate.go
+++ b/api/server/v1/httperror.pb.validate.go
@@ -110,7 +110,7 @@ type HttpErrorMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m HttpErrorMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/api/server/v1/server.pb.go b/api/server/v1/server.pb.go
index 5afb755095..6ef852521f 100644
--- a/api/server/v1/server.pb.go
+++ b/api/server/v1/server.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: server/v1/server.proto
@@ -9,6 +9,7 @@ package serverv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
_ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options"
_ "google.golang.org/genproto/googleapis/api/annotations"
@@ -88,16 +89,15 @@ func (DistributionMethod) EnumDescriptor() ([]byte, []int) {
// VersionInfo describes component version, or PMM Server as a whole.
type VersionInfo struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// User-visible version.
Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
// Full version for debugging.
FullVersion string `protobuf:"bytes,2,opt,name=full_version,json=fullVersion,proto3" json:"full_version,omitempty"`
// Build or release date.
- Timestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+ Timestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *VersionInfo) Reset() {
@@ -152,12 +152,11 @@ func (x *VersionInfo) GetTimestamp() *timestamppb.Timestamp {
}
type VersionRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Dummy parameter for internal testing. Do not use.
- Dummy string `protobuf:"bytes,1,opt,name=dummy,proto3" json:"dummy,omitempty"`
+ Dummy string `protobuf:"bytes,1,opt,name=dummy,proto3" json:"dummy,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *VersionRequest) Reset() {
@@ -198,10 +197,7 @@ func (x *VersionRequest) GetDummy() string {
}
type VersionResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// PMM Server version.
Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
// Detailed PMM Server version information.
@@ -210,6 +206,8 @@ type VersionResponse struct {
Managed *VersionInfo `protobuf:"bytes,3,opt,name=managed,proto3" json:"managed,omitempty"`
// PMM Server distribution method.
DistributionMethod DistributionMethod `protobuf:"varint,4,opt,name=distribution_method,json=distributionMethod,proto3,enum=server.v1.DistributionMethod" json:"distribution_method,omitempty"` // TODO Versions and statuses of Grafana, Prometheus, PostgreSQL, qan-api2, ClickHouse, pmm-agent, etc.
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *VersionResponse) Reset() {
@@ -271,9 +269,9 @@ func (x *VersionResponse) GetDistributionMethod() DistributionMethod {
}
type ReadinessRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ReadinessRequest) Reset() {
@@ -307,9 +305,9 @@ func (*ReadinessRequest) Descriptor() ([]byte, []int) {
}
type ReadinessResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ReadinessResponse) Reset() {
@@ -343,9 +341,9 @@ func (*ReadinessResponse) Descriptor() ([]byte, []int) {
}
type LeaderHealthCheckRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *LeaderHealthCheckRequest) Reset() {
@@ -379,9 +377,9 @@ func (*LeaderHealthCheckRequest) Descriptor() ([]byte, []int) {
}
type LeaderHealthCheckResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *LeaderHealthCheckResponse) Reset() {
@@ -415,14 +413,13 @@ func (*LeaderHealthCheckResponse) Descriptor() ([]byte, []int) {
}
type CheckUpdatesRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// If false, cached information may be returned.
Force bool `protobuf:"varint,1,opt,name=force,proto3" json:"force,omitempty"`
// If true, only installed version will be in response.
OnlyInstalledVersion bool `protobuf:"varint,2,opt,name=only_installed_version,json=onlyInstalledVersion,proto3" json:"only_installed_version,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *CheckUpdatesRequest) Reset() {
@@ -470,10 +467,7 @@ func (x *CheckUpdatesRequest) GetOnlyInstalledVersion() bool {
}
type DockerVersionInfo struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// PMM Version.
Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
// Docker image tag.
@@ -484,6 +478,8 @@ type DockerVersionInfo struct {
ReleaseNotesUrl string `protobuf:"bytes,4,opt,name=release_notes_url,json=releaseNotesUrl,proto3" json:"release_notes_url,omitempty"`
// Release notes text for the version (if available).
ReleaseNotesText string `protobuf:"bytes,5,opt,name=release_notes_text,json=releaseNotesText,proto3" json:"release_notes_text,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *DockerVersionInfo) Reset() {
@@ -552,10 +548,7 @@ func (x *DockerVersionInfo) GetReleaseNotesText() string {
}
type CheckUpdatesResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Currently installed PMM Server version.
Installed *VersionInfo `protobuf:"bytes,1,opt,name=installed,proto3" json:"installed,omitempty"`
// Latest available PMM Server version.
@@ -565,7 +558,9 @@ type CheckUpdatesResponse struct {
// Latest available PMM Server release announcement URL.
LatestNewsUrl string `protobuf:"bytes,4,opt,name=latest_news_url,json=latestNewsUrl,proto3" json:"latest_news_url,omitempty"`
// Last check time.
- LastCheck *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=last_check,json=lastCheck,proto3" json:"last_check,omitempty"`
+ LastCheck *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=last_check,json=lastCheck,proto3" json:"last_check,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *CheckUpdatesResponse) Reset() {
@@ -634,9 +629,9 @@ func (x *CheckUpdatesResponse) GetLastCheck() *timestamppb.Timestamp {
}
type ListChangeLogsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ListChangeLogsRequest) Reset() {
@@ -670,14 +665,13 @@ func (*ListChangeLogsRequest) Descriptor() ([]byte, []int) {
}
type ListChangeLogsResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// List of available updates.
Updates []*DockerVersionInfo `protobuf:"bytes,1,rep,name=updates,proto3" json:"updates,omitempty"`
// Last check time.
- LastCheck *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=last_check,json=lastCheck,proto3" json:"last_check,omitempty"`
+ LastCheck *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=last_check,json=lastCheck,proto3" json:"last_check,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ListChangeLogsResponse) Reset() {
@@ -725,11 +719,10 @@ func (x *ListChangeLogsResponse) GetLastCheck() *timestamppb.Timestamp {
}
type StartUpdateRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ NewImage string `protobuf:"bytes,1,opt,name=new_image,json=newImage,proto3" json:"new_image,omitempty"`
unknownFields protoimpl.UnknownFields
-
- NewImage string `protobuf:"bytes,1,opt,name=new_image,json=newImage,proto3" json:"new_image,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *StartUpdateRequest) Reset() {
@@ -770,14 +763,13 @@ func (x *StartUpdateRequest) GetNewImage() string {
}
type StartUpdateResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Authentication token for getting update statuses.
AuthToken string `protobuf:"bytes,1,opt,name=auth_token,json=authToken,proto3" json:"auth_token,omitempty"`
// Progress log offset.
- LogOffset uint32 `protobuf:"varint,2,opt,name=log_offset,json=logOffset,proto3" json:"log_offset,omitempty"`
+ LogOffset uint32 `protobuf:"varint,2,opt,name=log_offset,json=logOffset,proto3" json:"log_offset,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StartUpdateResponse) Reset() {
@@ -825,14 +817,13 @@ func (x *StartUpdateResponse) GetLogOffset() uint32 {
}
type UpdateStatusRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Authentication token.
AuthToken string `protobuf:"bytes,1,opt,name=auth_token,json=authToken,proto3" json:"auth_token,omitempty"`
// Progress log offset.
- LogOffset uint32 `protobuf:"varint,2,opt,name=log_offset,json=logOffset,proto3" json:"log_offset,omitempty"`
+ LogOffset uint32 `protobuf:"varint,2,opt,name=log_offset,json=logOffset,proto3" json:"log_offset,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *UpdateStatusRequest) Reset() {
@@ -880,16 +871,15 @@ func (x *UpdateStatusRequest) GetLogOffset() uint32 {
}
type UpdateStatusResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Progress log lines.
LogLines []string `protobuf:"bytes,1,rep,name=log_lines,json=logLines,proto3" json:"log_lines,omitempty"`
// Progress log offset for the next request.
LogOffset uint32 `protobuf:"varint,2,opt,name=log_offset,json=logOffset,proto3" json:"log_offset,omitempty"`
// True when update is done.
- Done bool `protobuf:"varint,3,opt,name=done,proto3" json:"done,omitempty"`
+ Done bool `protobuf:"varint,3,opt,name=done,proto3" json:"done,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *UpdateStatusResponse) Reset() {
@@ -945,16 +935,15 @@ func (x *UpdateStatusResponse) GetDone() bool {
// MetricsResolutions represents Prometheus exporters metrics resolutions.
type MetricsResolutions struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// High resolution. Should have a suffix in JSON: 1s, 1m, 1h.
Hr *durationpb.Duration `protobuf:"bytes,1,opt,name=hr,proto3" json:"hr,omitempty"`
// Medium resolution. Should have a suffix in JSON: 1s, 1m, 1h.
Mr *durationpb.Duration `protobuf:"bytes,2,opt,name=mr,proto3" json:"mr,omitempty"`
// Low resolution. Should have a suffix in JSON: 1s, 1m, 1h.
- Lr *durationpb.Duration `protobuf:"bytes,3,opt,name=lr,proto3" json:"lr,omitempty"`
+ Lr *durationpb.Duration `protobuf:"bytes,3,opt,name=lr,proto3" json:"lr,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *MetricsResolutions) Reset() {
@@ -1010,16 +999,15 @@ func (x *MetricsResolutions) GetLr() *durationpb.Duration {
// AdvisorRunIntervals represents intervals between each run of Advisor checks.
type AdvisorRunIntervals struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Standard check interval.
StandardInterval *durationpb.Duration `protobuf:"bytes,1,opt,name=standard_interval,json=standardInterval,proto3" json:"standard_interval,omitempty"`
// Interval for rare check runs.
RareInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=rare_interval,json=rareInterval,proto3" json:"rare_interval,omitempty"`
// Interval for frequent check runs.
FrequentInterval *durationpb.Duration `protobuf:"bytes,3,opt,name=frequent_interval,json=frequentInterval,proto3" json:"frequent_interval,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *AdvisorRunIntervals) Reset() {
@@ -1075,10 +1063,7 @@ func (x *AdvisorRunIntervals) GetFrequentInterval() *durationpb.Duration {
// Settings represents PMM Server settings.
type Settings struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// True if updates are enabled.
UpdatesEnabled bool `protobuf:"varint,1,opt,name=updates_enabled,json=updatesEnabled,proto3" json:"updates_enabled,omitempty"`
// True if telemetry is enabled.
@@ -1109,6 +1094,8 @@ type Settings struct {
EnableAccessControl bool `protobuf:"varint,17,opt,name=enable_access_control,json=enableAccessControl,proto3" json:"enable_access_control,omitempty"`
// Default Access Control role ID for new users.
DefaultRoleId uint32 `protobuf:"varint,18,opt,name=default_role_id,json=defaultRoleId,proto3" json:"default_role_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *Settings) Reset() {
@@ -1261,9 +1248,9 @@ func (x *Settings) GetDefaultRoleId() uint32 {
}
type GetSettingsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GetSettingsRequest) Reset() {
@@ -1297,11 +1284,10 @@ func (*GetSettingsRequest) Descriptor() ([]byte, []int) {
}
type GetSettingsResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Settings *Settings `protobuf:"bytes,1,opt,name=settings,proto3" json:"settings,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Settings *Settings `protobuf:"bytes,1,opt,name=settings,proto3" json:"settings,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *GetSettingsResponse) Reset() {
@@ -1342,13 +1328,10 @@ func (x *GetSettingsResponse) GetSettings() *Settings {
}
type ChangeSettingsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- EnableUpdates *bool `protobuf:"varint,1,opt,name=enable_updates,json=enableUpdates,proto3,oneof" json:"enable_updates,omitempty"`
- EnableTelemetry *bool `protobuf:"varint,2,opt,name=enable_telemetry,json=enableTelemetry,proto3,oneof" json:"enable_telemetry,omitempty"`
- MetricsResolutions *MetricsResolutions `protobuf:"bytes,3,opt,name=metrics_resolutions,json=metricsResolutions,proto3" json:"metrics_resolutions,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ EnableUpdates *bool `protobuf:"varint,1,opt,name=enable_updates,json=enableUpdates,proto3,oneof" json:"enable_updates,omitempty"`
+ EnableTelemetry *bool `protobuf:"varint,2,opt,name=enable_telemetry,json=enableTelemetry,proto3,oneof" json:"enable_telemetry,omitempty"`
+ MetricsResolutions *MetricsResolutions `protobuf:"bytes,3,opt,name=metrics_resolutions,json=metricsResolutions,proto3" json:"metrics_resolutions,omitempty"`
// A number of full days for Prometheus and QAN data retention. Should have a suffix in JSON: 2592000s, 43200m, 720h.
DataRetention *durationpb.Duration `protobuf:"bytes,4,opt,name=data_retention,json=dataRetention,proto3" json:"data_retention,omitempty"`
SshKey *string `protobuf:"bytes,5,opt,name=ssh_key,json=sshKey,proto3,oneof" json:"ssh_key,omitempty"`
@@ -1367,6 +1350,8 @@ type ChangeSettingsRequest struct {
EnableBackupManagement *bool `protobuf:"varint,12,opt,name=enable_backup_management,json=enableBackupManagement,proto3,oneof" json:"enable_backup_management,omitempty"`
// Enable Access Control
EnableAccessControl *bool `protobuf:"varint,13,opt,name=enable_access_control,json=enableAccessControl,proto3,oneof" json:"enable_access_control,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ChangeSettingsRequest) Reset() {
@@ -1491,11 +1476,10 @@ func (x *ChangeSettingsRequest) GetEnableAccessControl() bool {
}
type ChangeSettingsResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Settings *Settings `protobuf:"bytes,1,opt,name=settings,proto3" json:"settings,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Settings *Settings `protobuf:"bytes,1,opt,name=settings,proto3" json:"settings,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *ChangeSettingsResponse) Reset() {
@@ -1537,7 +1521,7 @@ func (x *ChangeSettingsResponse) GetSettings() *Settings {
var File_server_v1_server_proto protoreflect.FileDescriptor
-var file_server_v1_server_proto_rawDesc = []byte{
+var file_server_v1_server_proto_rawDesc = string([]byte{
0x0a, 0x16, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x65, 0x72, 0x76,
0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72,
0x2e, 0x76, 0x31, 0x1a, 0x13, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d,
@@ -1934,16 +1918,16 @@ var file_server_v1_server_proto_rawDesc = []byte{
0x65, 0x72, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
0x61, 0xea, 0x02, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_server_v1_server_proto_rawDescOnce sync.Once
- file_server_v1_server_proto_rawDescData = file_server_v1_server_proto_rawDesc
+ file_server_v1_server_proto_rawDescData []byte
)
func file_server_v1_server_proto_rawDescGZIP() []byte {
file_server_v1_server_proto_rawDescOnce.Do(func() {
- file_server_v1_server_proto_rawDescData = protoimpl.X.CompressGZIP(file_server_v1_server_proto_rawDescData)
+ file_server_v1_server_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_server_v1_server_proto_rawDesc), len(file_server_v1_server_proto_rawDesc)))
})
return file_server_v1_server_proto_rawDescData
}
@@ -2043,7 +2027,7 @@ func file_server_v1_server_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_server_v1_server_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_server_v1_server_proto_rawDesc), len(file_server_v1_server_proto_rawDesc)),
NumEnums: 1,
NumMessages: 23,
NumExtensions: 0,
@@ -2055,7 +2039,6 @@ func file_server_v1_server_proto_init() {
MessageInfos: file_server_v1_server_proto_msgTypes,
}.Build()
File_server_v1_server_proto = out.File
- file_server_v1_server_proto_rawDesc = nil
file_server_v1_server_proto_goTypes = nil
file_server_v1_server_proto_depIdxs = nil
}
diff --git a/api/server/v1/server.pb.gw.go b/api/server/v1/server.pb.gw.go
index 26253aedbb..0ac06c5e58 100644
--- a/api/server/v1/server.pb.gw.go
+++ b/api/server/v1/server.pb.gw.go
@@ -10,6 +10,7 @@ package serverv1
import (
"context"
+ "errors"
"io"
"net/http"
@@ -28,6 +29,7 @@ var (
_ codes.Code
_ io.Reader
_ status.Status
+ _ = errors.New
_ = runtime.String
_ = utilities.NewDoubleArray
_ = metadata.Join
@@ -36,63 +38,67 @@ var (
var filter_ServerService_Version_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
func request_ServerService_Version_0(ctx context.Context, marshaler runtime.Marshaler, client ServerServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq VersionRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq VersionRequest
+ metadata runtime.ServerMetadata
+ )
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ServerService_Version_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.Version(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_ServerService_Version_0(ctx context.Context, marshaler runtime.Marshaler, server ServerServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq VersionRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq VersionRequest
+ metadata runtime.ServerMetadata
+ )
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ServerService_Version_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.Version(ctx, &protoReq)
return msg, metadata, err
}
func request_ServerService_Readiness_0(ctx context.Context, marshaler runtime.Marshaler, client ServerServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ReadinessRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ReadinessRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := client.Readiness(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_ServerService_Readiness_0(ctx context.Context, marshaler runtime.Marshaler, server ServerServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ReadinessRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ReadinessRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := server.Readiness(ctx, &protoReq)
return msg, metadata, err
}
func request_ServerService_LeaderHealthCheck_0(ctx context.Context, marshaler runtime.Marshaler, client ServerServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq LeaderHealthCheckRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq LeaderHealthCheckRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := client.LeaderHealthCheck(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_ServerService_LeaderHealthCheck_0(ctx context.Context, marshaler runtime.Marshaler, server ServerServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq LeaderHealthCheckRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq LeaderHealthCheckRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := server.LeaderHealthCheck(ctx, &protoReq)
return msg, metadata, err
}
@@ -100,135 +106,139 @@ func local_request_ServerService_LeaderHealthCheck_0(ctx context.Context, marsha
var filter_ServerService_CheckUpdates_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
func request_ServerService_CheckUpdates_0(ctx context.Context, marshaler runtime.Marshaler, client ServerServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq CheckUpdatesRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq CheckUpdatesRequest
+ metadata runtime.ServerMetadata
+ )
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ServerService_CheckUpdates_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.CheckUpdates(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_ServerService_CheckUpdates_0(ctx context.Context, marshaler runtime.Marshaler, server ServerServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq CheckUpdatesRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq CheckUpdatesRequest
+ metadata runtime.ServerMetadata
+ )
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ServerService_CheckUpdates_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.CheckUpdates(ctx, &protoReq)
return msg, metadata, err
}
func request_ServerService_ListChangeLogs_0(ctx context.Context, marshaler runtime.Marshaler, client ServerServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListChangeLogsRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ListChangeLogsRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := client.ListChangeLogs(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_ServerService_ListChangeLogs_0(ctx context.Context, marshaler runtime.Marshaler, server ServerServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListChangeLogsRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ListChangeLogsRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := server.ListChangeLogs(ctx, &protoReq)
return msg, metadata, err
}
func request_ServerService_StartUpdate_0(ctx context.Context, marshaler runtime.Marshaler, client ServerServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq StartUpdateRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq StartUpdateRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.StartUpdate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_ServerService_StartUpdate_0(ctx context.Context, marshaler runtime.Marshaler, server ServerServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq StartUpdateRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq StartUpdateRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.StartUpdate(ctx, &protoReq)
return msg, metadata, err
}
func request_ServerService_UpdateStatus_0(ctx context.Context, marshaler runtime.Marshaler, client ServerServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq UpdateStatusRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq UpdateStatusRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.UpdateStatus(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_ServerService_UpdateStatus_0(ctx context.Context, marshaler runtime.Marshaler, server ServerServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq UpdateStatusRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq UpdateStatusRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.UpdateStatus(ctx, &protoReq)
return msg, metadata, err
}
func request_ServerService_GetSettings_0(ctx context.Context, marshaler runtime.Marshaler, client ServerServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetSettingsRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq GetSettingsRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := client.GetSettings(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_ServerService_GetSettings_0(ctx context.Context, marshaler runtime.Marshaler, server ServerServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetSettingsRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq GetSettingsRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := server.GetSettings(ctx, &protoReq)
return msg, metadata, err
}
func request_ServerService_ChangeSettings_0(ctx context.Context, marshaler runtime.Marshaler, client ServerServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ChangeSettingsRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq ChangeSettingsRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.ChangeSettings(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_ServerService_ChangeSettings_0(ctx context.Context, marshaler runtime.Marshaler, server ServerServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ChangeSettingsRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq ChangeSettingsRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.ChangeSettings(ctx, &protoReq)
return msg, metadata, err
}
@@ -239,15 +249,13 @@ func local_request_ServerService_ChangeSettings_0(ctx context.Context, marshaler
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterServerServiceHandlerFromEndpoint instead.
// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
func RegisterServerServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server ServerServiceServer) error {
- mux.Handle("GET", pattern_ServerService_Version_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_ServerService_Version_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/server.v1.ServerService/Version", runtime.WithHTTPPathPattern("/v1/server/version"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/server.v1.ServerService/Version", runtime.WithHTTPPathPattern("/v1/server/version"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -259,19 +267,15 @@ func RegisterServerServiceHandlerServer(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ServerService_Version_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_ServerService_Readiness_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_ServerService_Readiness_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/server.v1.ServerService/Readiness", runtime.WithHTTPPathPattern("/v1/server/readyz"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/server.v1.ServerService/Readiness", runtime.WithHTTPPathPattern("/v1/server/readyz"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -283,19 +287,15 @@ func RegisterServerServiceHandlerServer(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ServerService_Readiness_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_ServerService_LeaderHealthCheck_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_ServerService_LeaderHealthCheck_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/server.v1.ServerService/LeaderHealthCheck", runtime.WithHTTPPathPattern("/v1/server/leaderHealthCheck"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/server.v1.ServerService/LeaderHealthCheck", runtime.WithHTTPPathPattern("/v1/server/leaderHealthCheck"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -307,19 +307,15 @@ func RegisterServerServiceHandlerServer(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ServerService_LeaderHealthCheck_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_ServerService_CheckUpdates_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_ServerService_CheckUpdates_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/server.v1.ServerService/CheckUpdates", runtime.WithHTTPPathPattern("/v1/server/updates"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/server.v1.ServerService/CheckUpdates", runtime.WithHTTPPathPattern("/v1/server/updates"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -331,19 +327,15 @@ func RegisterServerServiceHandlerServer(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ServerService_CheckUpdates_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_ServerService_ListChangeLogs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_ServerService_ListChangeLogs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/server.v1.ServerService/ListChangeLogs", runtime.WithHTTPPathPattern("/v1/server/updates/changelogs"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/server.v1.ServerService/ListChangeLogs", runtime.WithHTTPPathPattern("/v1/server/updates/changelogs"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -355,19 +347,15 @@ func RegisterServerServiceHandlerServer(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ServerService_ListChangeLogs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_ServerService_StartUpdate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_ServerService_StartUpdate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/server.v1.ServerService/StartUpdate", runtime.WithHTTPPathPattern("/v1/server/updates:start"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/server.v1.ServerService/StartUpdate", runtime.WithHTTPPathPattern("/v1/server/updates:start"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -379,19 +367,15 @@ func RegisterServerServiceHandlerServer(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ServerService_StartUpdate_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_ServerService_UpdateStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_ServerService_UpdateStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/server.v1.ServerService/UpdateStatus", runtime.WithHTTPPathPattern("/v1/server/updates:getStatus"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/server.v1.ServerService/UpdateStatus", runtime.WithHTTPPathPattern("/v1/server/updates:getStatus"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -403,19 +387,15 @@ func RegisterServerServiceHandlerServer(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ServerService_UpdateStatus_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_ServerService_GetSettings_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_ServerService_GetSettings_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/server.v1.ServerService/GetSettings", runtime.WithHTTPPathPattern("/v1/server/settings"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/server.v1.ServerService/GetSettings", runtime.WithHTTPPathPattern("/v1/server/settings"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -427,19 +407,15 @@ func RegisterServerServiceHandlerServer(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ServerService_GetSettings_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("PUT", pattern_ServerService_ChangeSettings_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPut, pattern_ServerService_ChangeSettings_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/server.v1.ServerService/ChangeSettings", runtime.WithHTTPPathPattern("/v1/server/settings"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/server.v1.ServerService/ChangeSettings", runtime.WithHTTPPathPattern("/v1/server/settings"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -451,7 +427,6 @@ func RegisterServerServiceHandlerServer(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ServerService_ChangeSettings_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@@ -479,7 +454,6 @@ func RegisterServerServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.
}
}()
}()
-
return RegisterServerServiceHandler(ctx, mux, conn)
}
@@ -495,13 +469,11 @@ func RegisterServerServiceHandler(ctx context.Context, mux *runtime.ServeMux, co
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "ServerServiceClient" to call the correct interceptors. This client ignores the HTTP middlewares.
func RegisterServerServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client ServerServiceClient) error {
- mux.Handle("GET", pattern_ServerService_Version_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_ServerService_Version_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/server.v1.ServerService/Version", runtime.WithHTTPPathPattern("/v1/server/version"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/server.v1.ServerService/Version", runtime.WithHTTPPathPattern("/v1/server/version"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -512,17 +484,13 @@ func RegisterServerServiceHandlerClient(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ServerService_Version_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_ServerService_Readiness_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_ServerService_Readiness_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/server.v1.ServerService/Readiness", runtime.WithHTTPPathPattern("/v1/server/readyz"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/server.v1.ServerService/Readiness", runtime.WithHTTPPathPattern("/v1/server/readyz"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -533,17 +501,13 @@ func RegisterServerServiceHandlerClient(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ServerService_Readiness_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_ServerService_LeaderHealthCheck_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_ServerService_LeaderHealthCheck_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/server.v1.ServerService/LeaderHealthCheck", runtime.WithHTTPPathPattern("/v1/server/leaderHealthCheck"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/server.v1.ServerService/LeaderHealthCheck", runtime.WithHTTPPathPattern("/v1/server/leaderHealthCheck"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -554,17 +518,13 @@ func RegisterServerServiceHandlerClient(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ServerService_LeaderHealthCheck_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_ServerService_CheckUpdates_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_ServerService_CheckUpdates_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/server.v1.ServerService/CheckUpdates", runtime.WithHTTPPathPattern("/v1/server/updates"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/server.v1.ServerService/CheckUpdates", runtime.WithHTTPPathPattern("/v1/server/updates"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -575,17 +535,13 @@ func RegisterServerServiceHandlerClient(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ServerService_CheckUpdates_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_ServerService_ListChangeLogs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_ServerService_ListChangeLogs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/server.v1.ServerService/ListChangeLogs", runtime.WithHTTPPathPattern("/v1/server/updates/changelogs"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/server.v1.ServerService/ListChangeLogs", runtime.WithHTTPPathPattern("/v1/server/updates/changelogs"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -596,17 +552,13 @@ func RegisterServerServiceHandlerClient(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ServerService_ListChangeLogs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_ServerService_StartUpdate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_ServerService_StartUpdate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/server.v1.ServerService/StartUpdate", runtime.WithHTTPPathPattern("/v1/server/updates:start"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/server.v1.ServerService/StartUpdate", runtime.WithHTTPPathPattern("/v1/server/updates:start"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -617,17 +569,13 @@ func RegisterServerServiceHandlerClient(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ServerService_StartUpdate_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("POST", pattern_ServerService_UpdateStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_ServerService_UpdateStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/server.v1.ServerService/UpdateStatus", runtime.WithHTTPPathPattern("/v1/server/updates:getStatus"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/server.v1.ServerService/UpdateStatus", runtime.WithHTTPPathPattern("/v1/server/updates:getStatus"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -638,17 +586,13 @@ func RegisterServerServiceHandlerClient(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ServerService_UpdateStatus_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_ServerService_GetSettings_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_ServerService_GetSettings_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/server.v1.ServerService/GetSettings", runtime.WithHTTPPathPattern("/v1/server/settings"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/server.v1.ServerService/GetSettings", runtime.WithHTTPPathPattern("/v1/server/settings"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -659,17 +603,13 @@ func RegisterServerServiceHandlerClient(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ServerService_GetSettings_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("PUT", pattern_ServerService_ChangeSettings_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPut, pattern_ServerService_ChangeSettings_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/server.v1.ServerService/ChangeSettings", runtime.WithHTTPPathPattern("/v1/server/settings"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/server.v1.ServerService/ChangeSettings", runtime.WithHTTPPathPattern("/v1/server/settings"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -680,49 +620,31 @@ func RegisterServerServiceHandlerClient(ctx context.Context, mux *runtime.ServeM
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_ServerService_ChangeSettings_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
return nil
}
var (
- pattern_ServerService_Version_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "server", "version"}, ""))
-
- pattern_ServerService_Readiness_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "server", "readyz"}, ""))
-
+ pattern_ServerService_Version_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "server", "version"}, ""))
+ pattern_ServerService_Readiness_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "server", "readyz"}, ""))
pattern_ServerService_LeaderHealthCheck_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "server", "leaderHealthCheck"}, ""))
-
- pattern_ServerService_CheckUpdates_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "server", "updates"}, ""))
-
- pattern_ServerService_ListChangeLogs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "server", "updates", "changelogs"}, ""))
-
- pattern_ServerService_StartUpdate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "server", "updates"}, "start"))
-
- pattern_ServerService_UpdateStatus_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "server", "updates"}, "getStatus"))
-
- pattern_ServerService_GetSettings_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "server", "settings"}, ""))
-
- pattern_ServerService_ChangeSettings_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "server", "settings"}, ""))
+ pattern_ServerService_CheckUpdates_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "server", "updates"}, ""))
+ pattern_ServerService_ListChangeLogs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "server", "updates", "changelogs"}, ""))
+ pattern_ServerService_StartUpdate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "server", "updates"}, "start"))
+ pattern_ServerService_UpdateStatus_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "server", "updates"}, "getStatus"))
+ pattern_ServerService_GetSettings_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "server", "settings"}, ""))
+ pattern_ServerService_ChangeSettings_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "server", "settings"}, ""))
)
var (
- forward_ServerService_Version_0 = runtime.ForwardResponseMessage
-
- forward_ServerService_Readiness_0 = runtime.ForwardResponseMessage
-
+ forward_ServerService_Version_0 = runtime.ForwardResponseMessage
+ forward_ServerService_Readiness_0 = runtime.ForwardResponseMessage
forward_ServerService_LeaderHealthCheck_0 = runtime.ForwardResponseMessage
-
- forward_ServerService_CheckUpdates_0 = runtime.ForwardResponseMessage
-
- forward_ServerService_ListChangeLogs_0 = runtime.ForwardResponseMessage
-
- forward_ServerService_StartUpdate_0 = runtime.ForwardResponseMessage
-
- forward_ServerService_UpdateStatus_0 = runtime.ForwardResponseMessage
-
- forward_ServerService_GetSettings_0 = runtime.ForwardResponseMessage
-
- forward_ServerService_ChangeSettings_0 = runtime.ForwardResponseMessage
+ forward_ServerService_CheckUpdates_0 = runtime.ForwardResponseMessage
+ forward_ServerService_ListChangeLogs_0 = runtime.ForwardResponseMessage
+ forward_ServerService_StartUpdate_0 = runtime.ForwardResponseMessage
+ forward_ServerService_UpdateStatus_0 = runtime.ForwardResponseMessage
+ forward_ServerService_GetSettings_0 = runtime.ForwardResponseMessage
+ forward_ServerService_ChangeSettings_0 = runtime.ForwardResponseMessage
)
diff --git a/api/server/v1/server.pb.validate.go b/api/server/v1/server.pb.validate.go
index 48dad7e546..6f8e7a3248 100644
--- a/api/server/v1/server.pb.validate.go
+++ b/api/server/v1/server.pb.validate.go
@@ -103,7 +103,7 @@ type VersionInfoMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m VersionInfoMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -205,7 +205,7 @@ type VersionRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m VersionRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -367,7 +367,7 @@ type VersionResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m VersionResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -467,7 +467,7 @@ type ReadinessRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ReadinessRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -567,7 +567,7 @@ type ReadinessResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ReadinessResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -669,7 +669,7 @@ type LeaderHealthCheckRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m LeaderHealthCheckRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -771,7 +771,7 @@ type LeaderHealthCheckResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m LeaderHealthCheckResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -877,7 +877,7 @@ type CheckUpdatesRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m CheckUpdatesRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1016,7 +1016,7 @@ type DockerVersionInfoMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m DockerVersionInfoMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1209,7 +1209,7 @@ type CheckUpdatesResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m CheckUpdatesResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1311,7 +1311,7 @@ type ListChangeLogsRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListChangeLogsRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1476,7 +1476,7 @@ type ListChangeLogsResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListChangeLogsResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1580,7 +1580,7 @@ type StartUpdateRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartUpdateRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1686,7 +1686,7 @@ type StartUpdateResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StartUpdateResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1792,7 +1792,7 @@ type UpdateStatusRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m UpdateStatusRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -1898,7 +1898,7 @@ type UpdateStatusResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m UpdateStatusResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2087,7 +2087,7 @@ type MetricsResolutionsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m MetricsResolutionsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2276,7 +2276,7 @@ type AdvisorRunIntervalsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m AdvisorRunIntervalsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2488,7 +2488,7 @@ type SettingsMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m SettingsMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2588,7 +2588,7 @@ type GetSettingsRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetSettingsRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2719,7 +2719,7 @@ type GetSettingsResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetSettingsResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -2975,7 +2975,7 @@ type ChangeSettingsRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ChangeSettingsRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -3106,7 +3106,7 @@ type ChangeSettingsResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ChangeSettingsResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/api/uievents/v1/server.pb.go b/api/uievents/v1/server.pb.go
index 22a36775af..be4a90459b 100644
--- a/api/uievents/v1/server.pb.go
+++ b/api/uievents/v1/server.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: uievents/v1/server.proto
@@ -9,6 +9,7 @@ package uieventsv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
_ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options"
_ "google.golang.org/genproto/googleapis/api/annotations"
@@ -24,14 +25,13 @@ const (
)
type NotificationEvent struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"`
- Text string `protobuf:"bytes,2,opt,name=text,proto3" json:"text,omitempty"`
- Location string `protobuf:"bytes,3,opt,name=location,proto3" json:"location,omitempty"`
- LocationParams string `protobuf:"bytes,4,opt,name=location_params,json=locationParams,proto3" json:"location_params,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"`
+ Text string `protobuf:"bytes,2,opt,name=text,proto3" json:"text,omitempty"`
+ Location string `protobuf:"bytes,3,opt,name=location,proto3" json:"location,omitempty"`
+ LocationParams string `protobuf:"bytes,4,opt,name=location_params,json=locationParams,proto3" json:"location_params,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *NotificationEvent) Reset() {
@@ -93,14 +93,13 @@ func (x *NotificationEvent) GetLocationParams() string {
}
type FetchingEvent struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Component string `protobuf:"bytes,1,opt,name=component,proto3" json:"component,omitempty"`
- LoadTime int32 `protobuf:"varint,2,opt,name=load_time,json=loadTime,proto3" json:"load_time,omitempty"`
- Location string `protobuf:"bytes,3,opt,name=location,proto3" json:"location,omitempty"`
- LocationParams string `protobuf:"bytes,4,opt,name=location_params,json=locationParams,proto3" json:"location_params,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Component string `protobuf:"bytes,1,opt,name=component,proto3" json:"component,omitempty"`
+ LoadTime int32 `protobuf:"varint,2,opt,name=load_time,json=loadTime,proto3" json:"load_time,omitempty"`
+ Location string `protobuf:"bytes,3,opt,name=location,proto3" json:"location,omitempty"`
+ LocationParams string `protobuf:"bytes,4,opt,name=location_params,json=locationParams,proto3" json:"location_params,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *FetchingEvent) Reset() {
@@ -162,16 +161,15 @@ func (x *FetchingEvent) GetLocationParams() string {
}
type DashboardUsageEvent struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Uid string `protobuf:"bytes,1,opt,name=uid,proto3" json:"uid,omitempty"`
- Title string `protobuf:"bytes,2,opt,name=title,proto3" json:"title,omitempty"`
- Tags []string `protobuf:"bytes,3,rep,name=tags,proto3" json:"tags,omitempty"`
- LoadTime int32 `protobuf:"varint,4,opt,name=load_time,json=loadTime,proto3" json:"load_time,omitempty"`
- Location string `protobuf:"bytes,5,opt,name=location,proto3" json:"location,omitempty"`
- LocationParams string `protobuf:"bytes,6,opt,name=location_params,json=locationParams,proto3" json:"location_params,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Uid string `protobuf:"bytes,1,opt,name=uid,proto3" json:"uid,omitempty"`
+ Title string `protobuf:"bytes,2,opt,name=title,proto3" json:"title,omitempty"`
+ Tags []string `protobuf:"bytes,3,rep,name=tags,proto3" json:"tags,omitempty"`
+ LoadTime int32 `protobuf:"varint,4,opt,name=load_time,json=loadTime,proto3" json:"load_time,omitempty"`
+ Location string `protobuf:"bytes,5,opt,name=location,proto3" json:"location,omitempty"`
+ LocationParams string `protobuf:"bytes,6,opt,name=location_params,json=locationParams,proto3" json:"location_params,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *DashboardUsageEvent) Reset() {
@@ -247,14 +245,13 @@ func (x *DashboardUsageEvent) GetLocationParams() string {
}
type UserFlowEvent struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ FlowId string `protobuf:"bytes,1,opt,name=flow_id,json=flowId,proto3" json:"flow_id,omitempty"`
+ StoryId string `protobuf:"bytes,2,opt,name=story_id,json=storyId,proto3" json:"story_id,omitempty"`
+ Event string `protobuf:"bytes,3,opt,name=event,proto3" json:"event,omitempty"`
+ Params map[string]string `protobuf:"bytes,4,rep,name=params,proto3" json:"params,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
unknownFields protoimpl.UnknownFields
-
- FlowId string `protobuf:"bytes,1,opt,name=flow_id,json=flowId,proto3" json:"flow_id,omitempty"`
- StoryId string `protobuf:"bytes,2,opt,name=story_id,json=storyId,proto3" json:"story_id,omitempty"`
- Event string `protobuf:"bytes,3,opt,name=event,proto3" json:"event,omitempty"`
- Params map[string]string `protobuf:"bytes,4,rep,name=params,proto3" json:"params,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ sizeCache protoimpl.SizeCache
}
func (x *UserFlowEvent) Reset() {
@@ -316,14 +313,13 @@ func (x *UserFlowEvent) GetParams() map[string]string {
}
type StoreRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
Notifications []*NotificationEvent `protobuf:"bytes,1,rep,name=notifications,proto3" json:"notifications,omitempty"`
Fetching []*FetchingEvent `protobuf:"bytes,2,rep,name=fetching,proto3" json:"fetching,omitempty"`
DashboardUsage []*DashboardUsageEvent `protobuf:"bytes,3,rep,name=dashboard_usage,json=dashboardUsage,proto3" json:"dashboard_usage,omitempty"`
UserFlowEvents []*UserFlowEvent `protobuf:"bytes,4,rep,name=user_flow_events,json=userFlowEvents,proto3" json:"user_flow_events,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StoreRequest) Reset() {
@@ -385,9 +381,9 @@ func (x *StoreRequest) GetUserFlowEvents() []*UserFlowEvent {
}
type StoreResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *StoreResponse) Reset() {
@@ -422,7 +418,7 @@ func (*StoreResponse) Descriptor() ([]byte, []int) {
var File_uievents_v1_server_proto protoreflect.FileDescriptor
-var file_uievents_v1_server_proto_rawDesc = []byte{
+var file_uievents_v1_server_proto_rawDesc = string([]byte{
0x0a, 0x18, 0x75, 0x69, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x65,
0x72, 0x76, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x75, 0x69, 0x65, 0x76,
0x65, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
@@ -515,16 +511,16 @@ var file_uievents_v1_server_proto_rawDesc = []byte{
0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0c, 0x55,
0x69, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x33,
-}
+})
var (
file_uievents_v1_server_proto_rawDescOnce sync.Once
- file_uievents_v1_server_proto_rawDescData = file_uievents_v1_server_proto_rawDesc
+ file_uievents_v1_server_proto_rawDescData []byte
)
func file_uievents_v1_server_proto_rawDescGZIP() []byte {
file_uievents_v1_server_proto_rawDescOnce.Do(func() {
- file_uievents_v1_server_proto_rawDescData = protoimpl.X.CompressGZIP(file_uievents_v1_server_proto_rawDescData)
+ file_uievents_v1_server_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_uievents_v1_server_proto_rawDesc), len(file_uievents_v1_server_proto_rawDesc)))
})
return file_uievents_v1_server_proto_rawDescData
}
@@ -566,7 +562,7 @@ func file_uievents_v1_server_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_uievents_v1_server_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_uievents_v1_server_proto_rawDesc), len(file_uievents_v1_server_proto_rawDesc)),
NumEnums: 0,
NumMessages: 7,
NumExtensions: 0,
@@ -577,7 +573,6 @@ func file_uievents_v1_server_proto_init() {
MessageInfos: file_uievents_v1_server_proto_msgTypes,
}.Build()
File_uievents_v1_server_proto = out.File
- file_uievents_v1_server_proto_rawDesc = nil
file_uievents_v1_server_proto_goTypes = nil
file_uievents_v1_server_proto_depIdxs = nil
}
diff --git a/api/uievents/v1/server.pb.gw.go b/api/uievents/v1/server.pb.gw.go
index 784ea0403f..2bf5076313 100644
--- a/api/uievents/v1/server.pb.gw.go
+++ b/api/uievents/v1/server.pb.gw.go
@@ -10,6 +10,7 @@ package uieventsv1
import (
"context"
+ "errors"
"io"
"net/http"
@@ -28,31 +29,32 @@ var (
_ codes.Code
_ io.Reader
_ status.Status
+ _ = errors.New
_ = runtime.String
_ = utilities.NewDoubleArray
_ = metadata.Join
)
func request_UIEventsService_Store_0(ctx context.Context, marshaler runtime.Marshaler, client UIEventsServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq StoreRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq StoreRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.Store(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_UIEventsService_Store_0(ctx context.Context, marshaler runtime.Marshaler, server UIEventsServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq StoreRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq StoreRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.Store(ctx, &protoReq)
return msg, metadata, err
}
@@ -63,15 +65,13 @@ func local_request_UIEventsService_Store_0(ctx context.Context, marshaler runtim
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterUIEventsServiceHandlerFromEndpoint instead.
// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
func RegisterUIEventsServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server UIEventsServiceServer) error {
- mux.Handle("POST", pattern_UIEventsService_Store_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_UIEventsService_Store_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/uievents.v1.UIEventsService/Store", runtime.WithHTTPPathPattern("/v1/ui-events/Store"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/uievents.v1.UIEventsService/Store", runtime.WithHTTPPathPattern("/v1/ui-events/Store"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -83,7 +83,6 @@ func RegisterUIEventsServiceHandlerServer(ctx context.Context, mux *runtime.Serv
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_UIEventsService_Store_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@@ -111,7 +110,6 @@ func RegisterUIEventsServiceHandlerFromEndpoint(ctx context.Context, mux *runtim
}
}()
}()
-
return RegisterUIEventsServiceHandler(ctx, mux, conn)
}
@@ -127,13 +125,11 @@ func RegisterUIEventsServiceHandler(ctx context.Context, mux *runtime.ServeMux,
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "UIEventsServiceClient" to call the correct interceptors. This client ignores the HTTP middlewares.
func RegisterUIEventsServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client UIEventsServiceClient) error {
- mux.Handle("POST", pattern_UIEventsService_Store_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPost, pattern_UIEventsService_Store_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/uievents.v1.UIEventsService/Store", runtime.WithHTTPPathPattern("/v1/ui-events/Store"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/uievents.v1.UIEventsService/Store", runtime.WithHTTPPathPattern("/v1/ui-events/Store"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -144,10 +140,8 @@ func RegisterUIEventsServiceHandlerClient(ctx context.Context, mux *runtime.Serv
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_UIEventsService_Store_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
return nil
}
diff --git a/api/uievents/v1/server.pb.validate.go b/api/uievents/v1/server.pb.validate.go
index cc2b54ce1a..5996e04b71 100644
--- a/api/uievents/v1/server.pb.validate.go
+++ b/api/uievents/v1/server.pb.validate.go
@@ -79,7 +79,7 @@ type NotificationEventMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m NotificationEventMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -189,7 +189,7 @@ type FetchingEventMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m FetchingEventMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -299,7 +299,7 @@ type DashboardUsageEventMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m DashboardUsageEventMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -409,7 +409,7 @@ type UserFlowEventMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m UserFlowEventMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -644,7 +644,7 @@ type StoreRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StoreRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -744,7 +744,7 @@ type StoreResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m StoreResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/api/user/v1/user.pb.go b/api/user/v1/user.pb.go
index 42b9584246..d70b36bc0b 100644
--- a/api/user/v1/user.pb.go
+++ b/api/user/v1/user.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.35.1
+// protoc-gen-go v1.36.4
// protoc (unknown)
// source: user/v1/user.proto
@@ -9,6 +9,7 @@ package userv1
import (
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
_ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options"
_ "google.golang.org/genproto/googleapis/api/annotations"
@@ -24,9 +25,9 @@ const (
)
type GetUserRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GetUserRequest) Reset() {
@@ -60,10 +61,7 @@ func (*GetUserRequest) Descriptor() ([]byte, []int) {
}
type GetUserResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// User ID
UserId uint32 `protobuf:"varint,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"`
// Product Tour
@@ -74,6 +72,8 @@ type GetUserResponse struct {
SnoozedPmmVersion string `protobuf:"bytes,4,opt,name=snoozed_pmm_version,json=snoozedPmmVersion,proto3" json:"snoozed_pmm_version,omitempty"`
// Snoozed warning about API keys migration
SnoozedApiKeysMigration bool `protobuf:"varint,5,opt,name=snoozed_api_keys_migration,json=snoozedApiKeysMigration,proto3" json:"snoozed_api_keys_migration,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GetUserResponse) Reset() {
@@ -142,10 +142,7 @@ func (x *GetUserResponse) GetSnoozedApiKeysMigration() bool {
}
type UpdateUserRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Product Tour
ProductTourCompleted *bool `protobuf:"varint,2,opt,name=product_tour_completed,json=productTourCompleted,proto3,oneof" json:"product_tour_completed,omitempty"`
// Alerting Tour
@@ -154,6 +151,8 @@ type UpdateUserRequest struct {
SnoozedPmmVersion *string `protobuf:"bytes,4,opt,name=snoozed_pmm_version,json=snoozedPmmVersion,proto3,oneof" json:"snoozed_pmm_version,omitempty"`
// Snoozed warning about API keys migration
SnoozedApiKeysMigration *bool `protobuf:"varint,5,opt,name=snoozed_api_keys_migration,json=snoozedApiKeysMigration,proto3,oneof" json:"snoozed_api_keys_migration,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *UpdateUserRequest) Reset() {
@@ -215,10 +214,7 @@ func (x *UpdateUserRequest) GetSnoozedApiKeysMigration() bool {
}
type UpdateUserResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// User ID
UserId uint32 `protobuf:"varint,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"`
// Product Tour
@@ -229,6 +225,8 @@ type UpdateUserResponse struct {
SnoozedPmmVersion string `protobuf:"bytes,4,opt,name=snoozed_pmm_version,json=snoozedPmmVersion,proto3" json:"snoozed_pmm_version,omitempty"`
// Snoozed warning about API keys migration
SnoozedApiKeysMigration bool `protobuf:"varint,5,opt,name=snoozed_api_keys_migration,json=snoozedApiKeysMigration,proto3" json:"snoozed_api_keys_migration,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *UpdateUserResponse) Reset() {
@@ -297,9 +295,9 @@ func (x *UpdateUserResponse) GetSnoozedApiKeysMigration() bool {
}
type ListUsersRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ListUsersRequest) Reset() {
@@ -333,11 +331,10 @@ func (*ListUsersRequest) Descriptor() ([]byte, []int) {
}
type ListUsersResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Users []*ListUsersResponse_UserDetail `protobuf:"bytes,1,rep,name=users,proto3" json:"users,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Users []*ListUsersResponse_UserDetail `protobuf:"bytes,1,rep,name=users,proto3" json:"users,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *ListUsersResponse) Reset() {
@@ -378,13 +375,12 @@ func (x *ListUsersResponse) GetUsers() []*ListUsersResponse_UserDetail {
}
type ListUsersResponse_UserDetail struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- UserId uint32 `protobuf:"varint,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ UserId uint32 `protobuf:"varint,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"`
// List of role IDs assigned to the user.
- RoleIds []uint32 `protobuf:"varint,2,rep,packed,name=role_ids,json=roleIds,proto3" json:"role_ids,omitempty"`
+ RoleIds []uint32 `protobuf:"varint,2,rep,packed,name=role_ids,json=roleIds,proto3" json:"role_ids,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ListUsersResponse_UserDetail) Reset() {
@@ -433,7 +429,7 @@ func (x *ListUsersResponse_UserDetail) GetRoleIds() []uint32 {
var File_user_v1_user_proto protoreflect.FileDescriptor
-var file_user_v1_user_proto_rawDesc = []byte{
+var file_user_v1_user_proto_rawDesc = string([]byte{
0x0a, 0x12, 0x75, 0x73, 0x65, 0x72, 0x2f, 0x76, 0x31, 0x2f, 0x75, 0x73, 0x65, 0x72, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x75, 0x73, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
@@ -548,16 +544,16 @@ var file_user_v1_user_proto_rawDesc = []byte{
0x72, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x13, 0x55, 0x73, 0x65, 0x72, 0x5c, 0x56, 0x31, 0x5c, 0x47,
0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x08, 0x55, 0x73, 0x65,
0x72, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_user_v1_user_proto_rawDescOnce sync.Once
- file_user_v1_user_proto_rawDescData = file_user_v1_user_proto_rawDesc
+ file_user_v1_user_proto_rawDescData []byte
)
func file_user_v1_user_proto_rawDescGZIP() []byte {
file_user_v1_user_proto_rawDescOnce.Do(func() {
- file_user_v1_user_proto_rawDescData = protoimpl.X.CompressGZIP(file_user_v1_user_proto_rawDescData)
+ file_user_v1_user_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_user_v1_user_proto_rawDesc), len(file_user_v1_user_proto_rawDesc)))
})
return file_user_v1_user_proto_rawDescData
}
@@ -600,7 +596,7 @@ func file_user_v1_user_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_user_v1_user_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_user_v1_user_proto_rawDesc), len(file_user_v1_user_proto_rawDesc)),
NumEnums: 0,
NumMessages: 7,
NumExtensions: 0,
@@ -611,7 +607,6 @@ func file_user_v1_user_proto_init() {
MessageInfos: file_user_v1_user_proto_msgTypes,
}.Build()
File_user_v1_user_proto = out.File
- file_user_v1_user_proto_rawDesc = nil
file_user_v1_user_proto_goTypes = nil
file_user_v1_user_proto_depIdxs = nil
}
diff --git a/api/user/v1/user.pb.gw.go b/api/user/v1/user.pb.gw.go
index 890714f191..0ee0fd526c 100644
--- a/api/user/v1/user.pb.gw.go
+++ b/api/user/v1/user.pb.gw.go
@@ -10,6 +10,7 @@ package userv1
import (
"context"
+ "errors"
"io"
"net/http"
@@ -28,63 +29,68 @@ var (
_ codes.Code
_ io.Reader
_ status.Status
+ _ = errors.New
_ = runtime.String
_ = utilities.NewDoubleArray
_ = metadata.Join
)
func request_UserService_GetUser_0(ctx context.Context, marshaler runtime.Marshaler, client UserServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetUserRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq GetUserRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := client.GetUser(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_UserService_GetUser_0(ctx context.Context, marshaler runtime.Marshaler, server UserServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq GetUserRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq GetUserRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := server.GetUser(ctx, &protoReq)
return msg, metadata, err
}
func request_UserService_UpdateUser_0(ctx context.Context, marshaler runtime.Marshaler, client UserServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq UpdateUserRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq UpdateUserRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := client.UpdateUser(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_UserService_UpdateUser_0(ctx context.Context, marshaler runtime.Marshaler, server UserServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq UpdateUserRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ var (
+ protoReq UpdateUserRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
-
msg, err := server.UpdateUser(ctx, &protoReq)
return msg, metadata, err
}
func request_UserService_ListUsers_0(ctx context.Context, marshaler runtime.Marshaler, client UserServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListUsersRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ListUsersRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := client.ListUsers(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_UserService_ListUsers_0(ctx context.Context, marshaler runtime.Marshaler, server UserServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq ListUsersRequest
- var metadata runtime.ServerMetadata
-
+ var (
+ protoReq ListUsersRequest
+ metadata runtime.ServerMetadata
+ )
msg, err := server.ListUsers(ctx, &protoReq)
return msg, metadata, err
}
@@ -95,15 +101,13 @@ func local_request_UserService_ListUsers_0(ctx context.Context, marshaler runtim
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterUserServiceHandlerFromEndpoint instead.
// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
func RegisterUserServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server UserServiceServer) error {
- mux.Handle("GET", pattern_UserService_GetUser_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_UserService_GetUser_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/user.v1.UserService/GetUser", runtime.WithHTTPPathPattern("/v1/users/me"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/user.v1.UserService/GetUser", runtime.WithHTTPPathPattern("/v1/users/me"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -115,19 +119,15 @@ func RegisterUserServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_UserService_GetUser_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("PUT", pattern_UserService_UpdateUser_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPut, pattern_UserService_UpdateUser_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/user.v1.UserService/UpdateUser", runtime.WithHTTPPathPattern("/v1/users/me"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/user.v1.UserService/UpdateUser", runtime.WithHTTPPathPattern("/v1/users/me"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -139,19 +139,15 @@ func RegisterUserServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_UserService_UpdateUser_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_UserService_ListUsers_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_UserService_ListUsers_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/user.v1.UserService/ListUsers", runtime.WithHTTPPathPattern("/v1/users"))
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/user.v1.UserService/ListUsers", runtime.WithHTTPPathPattern("/v1/users"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -163,7 +159,6 @@ func RegisterUserServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_UserService_ListUsers_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@@ -191,7 +186,6 @@ func RegisterUserServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.Se
}
}()
}()
-
return RegisterUserServiceHandler(ctx, mux, conn)
}
@@ -207,13 +201,11 @@ func RegisterUserServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "UserServiceClient" to call the correct interceptors. This client ignores the HTTP middlewares.
func RegisterUserServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client UserServiceClient) error {
- mux.Handle("GET", pattern_UserService_GetUser_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_UserService_GetUser_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/user.v1.UserService/GetUser", runtime.WithHTTPPathPattern("/v1/users/me"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/user.v1.UserService/GetUser", runtime.WithHTTPPathPattern("/v1/users/me"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -224,17 +216,13 @@ func RegisterUserServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_UserService_GetUser_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("PUT", pattern_UserService_UpdateUser_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodPut, pattern_UserService_UpdateUser_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/user.v1.UserService/UpdateUser", runtime.WithHTTPPathPattern("/v1/users/me"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/user.v1.UserService/UpdateUser", runtime.WithHTTPPathPattern("/v1/users/me"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -245,17 +233,13 @@ func RegisterUserServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_UserService_UpdateUser_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
- mux.Handle("GET", pattern_UserService_ListUsers_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ mux.Handle(http.MethodGet, pattern_UserService_ListUsers_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- var err error
- var annotatedContext context.Context
- annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/user.v1.UserService/ListUsers", runtime.WithHTTPPathPattern("/v1/users"))
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/user.v1.UserService/ListUsers", runtime.WithHTTPPathPattern("/v1/users"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -266,25 +250,19 @@ func RegisterUserServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
-
forward_UserService_ListUsers_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
-
return nil
}
var (
- pattern_UserService_GetUser_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "users", "me"}, ""))
-
+ pattern_UserService_GetUser_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "users", "me"}, ""))
pattern_UserService_UpdateUser_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "users", "me"}, ""))
-
- pattern_UserService_ListUsers_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "users"}, ""))
+ pattern_UserService_ListUsers_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "users"}, ""))
)
var (
- forward_UserService_GetUser_0 = runtime.ForwardResponseMessage
-
+ forward_UserService_GetUser_0 = runtime.ForwardResponseMessage
forward_UserService_UpdateUser_0 = runtime.ForwardResponseMessage
-
- forward_UserService_ListUsers_0 = runtime.ForwardResponseMessage
+ forward_UserService_ListUsers_0 = runtime.ForwardResponseMessage
)
diff --git a/api/user/v1/user.pb.validate.go b/api/user/v1/user.pb.validate.go
index 6998eefd33..cc432998b9 100644
--- a/api/user/v1/user.pb.validate.go
+++ b/api/user/v1/user.pb.validate.go
@@ -71,7 +71,7 @@ type GetUserRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetUserRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -181,7 +181,7 @@ type GetUserResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m GetUserResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -297,7 +297,7 @@ type UpdateUserRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m UpdateUserRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -409,7 +409,7 @@ type UpdateUserResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m UpdateUserResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -511,7 +511,7 @@ type ListUsersRequestMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListUsersRequestMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -645,7 +645,7 @@ type ListUsersResponseMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListUsersResponseMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
@@ -749,7 +749,7 @@ type ListUsersResponse_UserDetailMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m ListUsersResponse_UserDetailMultiError) Error() string {
- var msgs []string
+ msgs := make([]string, 0, len(m))
for _, err := range m {
msgs = append(msgs, err.Error())
}
diff --git a/build/Makefile b/build/Makefile
index 667eb33d7d..548e288da0 100644
--- a/build/Makefile
+++ b/build/Makefile
@@ -13,13 +13,22 @@ fetch:
-o ${PACKER_CACHE_DIR}/id_rsa_vagrant
chmod 600 ${PACKER_CACHE_DIR}/id_rsa_vagrant
- # Add the box using Vagrant
+ test -f ${PACKER_CACHE_DIR}/box/oracle9.ova \
+ || curl -fL https://pmm-build-cache.s3.us-east-2.amazonaws.com/VBOXES/oracle9-202407.23.0.box -o ${PACKER_CACHE_DIR}/box/oracle9.ova
+
test -f ${PACKER_CACHE_DIR}/box/box.ovf \
- || VAGRANT_HOME=${PACKER_CACHE_DIR}/box vagrant box add bento/oraclelinux-9 --box-version ${BOX_VERSION} --provider virtualbox
+ || tar -C ${PACKER_CACHE_DIR}/box -xvf ${PACKER_CACHE_DIR}/box/oracle9.ova
test -f ${PACKER_CACHE_DIR}/box/box.ovf \
|| cp -rp ${PACKER_CACHE_DIR}/box/boxes/bento-VAGRANTSLASH-oraclelinux-9/${BOX_VERSION}/amd64/virtualbox/* ${PACKER_CACHE_DIR}/box
+ # # Add the box using Vagrant
+ # test -f ${PACKER_CACHE_DIR}/box/box.ovf \
+ # || VAGRANT_HOME=${PACKER_CACHE_DIR}/box vagrant box add bento/oraclelinux-9 --box-version ${BOX_VERSION} --provider virtualbox
+
+ # test -f ${PACKER_CACHE_DIR}/box/box.ovf \
+ # || cp -rp ${PACKER_CACHE_DIR}/box/boxes/bento-VAGRANTSLASH-oraclelinux-9/${BOX_VERSION}/amd64/virtualbox/* ${PACKER_CACHE_DIR}/box
+
deps:
mkdir -p ${PACKER_CACHE_DIR} ~/bin || :
curl -fL https://releases.hashicorp.com/packer/${PACKER_VERSION}/packer_${PACKER_VERSION}_linux_amd64.zip -o ${PACKER_CACHE_DIR}/packer.zip
diff --git a/build/docker/server/README.md b/build/docker/server/README.md
index be8c83bd4a..08eef709dd 100644
--- a/build/docker/server/README.md
+++ b/build/docker/server/README.md
@@ -43,4 +43,4 @@ You can use these environment variables (-e VAR) when running the Docker image.
[Percona Monitoring and Management](https://docs.percona.com/percona-monitoring-and-management)
-[Setting up PMM Server with Docker](https://docs.percona.com/percona-monitoring-and-management/setting-up/server/docker.html)
+[Setting up PMM Server with Docker](https://docs.percona.com/percona-monitoring-and-management/3/install-pmm/install-pmm-server/index.html)
diff --git a/build/packer/ansible/roles/ami-ovf/tasks/main.yml b/build/packer/ansible/roles/ami-ovf/tasks/main.yml
index 35b0644261..2fa57d22f5 100644
--- a/build/packer/ansible/roles/ami-ovf/tasks/main.yml
+++ b/build/packer/ansible/roles/ami-ovf/tasks/main.yml
@@ -10,3 +10,37 @@
- name: PMM | Delete Azure user
shell: cd /tmp; nohup sh -c "trap '/usr/sbin/waagent -force -deprovision+user && sync' EXIT; sleep 600" /dev/null 2>&1 &
+
+- name: Lock vagrant user
+ ansible.builtin.user:
+ name: vagrant
+ password_lock: true
+ when: ansible_virtualization_type == "virtualbox"
+
+- name: Configure systemd service to remove vagrant user
+ block:
+ - name: Create systemd service file
+ copy:
+ dest: /etc/systemd/system/remove-vagrant.service
+ content: |
+ [Unit]
+ Description=Remove vagrant user on first boot
+ After=multi-user.target
+
+ [Service]
+ Type=oneshot
+ ExecStart=/usr/sbin/userdel -r vagrant
+ ExecStartPost=/usr/bin/touch /etc/remove-vagrant-done
+
+ [Install]
+ WantedBy=multi-user.target
+ ConditionPathExists=!/etc/remove-vagrant-done
+
+ - name: Reload systemd to recognize the new service
+ command: systemctl daemon-reload
+
+ - name: Enable the remove-vagrant service
+ systemd:
+ name: remove-vagrant
+ enabled: true
+ when: ansible_virtualization_type == "virtualbox"
diff --git a/build/scripts/build-client-srpm b/build/scripts/build-client-srpm
index c0e276f353..8d92cfeabe 100755
--- a/build/scripts/build-client-srpm
+++ b/build/scripts/build-client-srpm
@@ -19,6 +19,7 @@ main() {
if [ -e /usr/bin/sudo ]; then
export sudo_path=\$(ls /usr/bin/sudo)
fi
+ [[ ${IMAGE} = ${rpmbuild_docker_image} ]] || \$sudo_path yum -y install git rpm-build
mkdir -p /tmp/pmm
pushd /home/builder/results
diff --git a/cli-tests/package-lock.json b/cli-tests/package-lock.json
index cb462a60c9..fc179f0045 100644
--- a/cli-tests/package-lock.json
+++ b/cli-tests/package-lock.json
@@ -8,25 +8,25 @@
"name": "cli",
"version": "1.0.0",
"dependencies": {
- "@playwright/test": "^1.34.2",
+ "@playwright/test": "^1.50.1",
"@types/luxon": "^3.4.2",
"dotenv": "^16.4.0",
"luxon": "^3.5.0",
- "playwright": "^1.41.2",
+ "playwright": "^1.50.1",
"promise-retry": "^2.0.1",
"shelljs": "^0.8.5",
- "typescript": "^5.6.2"
+ "typescript": "^5.7.2"
},
"devDependencies": {
"@types/promise-retry": "^1.1.3",
"@types/shelljs": "^0.8.12",
- "@typescript-eslint/eslint-plugin": "^8.14.0",
- "@typescript-eslint/parser": "^7.18.0",
- "eslint": "8.57",
+ "@typescript-eslint/eslint-plugin": "^8.24.0",
+ "@typescript-eslint/parser": "^8.24.0",
+ "eslint": "9.20",
"eslint-config-airbnb-base": "^15.0.0",
"eslint-config-airbnb-typescript": "^18.0.0",
"eslint-plugin-import": "^2.31.0",
- "eslint-plugin-playwright": "^2.0.0"
+ "eslint-plugin-playwright": "^2.2.0"
}
},
"node_modules/@aashutoshrathi/word-wrap": {
@@ -54,24 +54,54 @@
}
},
"node_modules/@eslint-community/regexpp": {
- "version": "4.10.0",
- "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.10.0.tgz",
- "integrity": "sha512-Cu96Sd2By9mCNTx2iyKOmq10v22jUVQv0lQnlGNy16oE9589yE+QADPbrMGCkA51cKZSg3Pu/aTJVTGfL/qjUA==",
+ "version": "4.12.1",
+ "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.1.tgz",
+ "integrity": "sha512-CCZCDJuduB9OUkFkY2IgppNZMi2lBQgD2qzwXkEia16cge2pijY/aXi96CJMquDMn3nJdlPV1A5KrJEXwfLNzQ==",
"dev": true,
+ "license": "MIT",
"engines": {
"node": "^12.0.0 || ^14.0.0 || >=16.0.0"
}
},
+ "node_modules/@eslint/config-array": {
+ "version": "0.19.2",
+ "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.19.2.tgz",
+ "integrity": "sha512-GNKqxfHG2ySmJOBSHg7LxeUx4xpuCoFjacmlCoYWEbaPXLwvfIjixRI12xCQZeULksQb23uiA8F40w5TojpV7w==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "dependencies": {
+ "@eslint/object-schema": "^2.1.6",
+ "debug": "^4.3.1",
+ "minimatch": "^3.1.2"
+ },
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ }
+ },
+ "node_modules/@eslint/core": {
+ "version": "0.10.0",
+ "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.10.0.tgz",
+ "integrity": "sha512-gFHJ+xBOo4G3WRlR1e/3G8A6/KZAH6zcE/hkLRCZTi/B9avAG365QhFA8uOGzTMqgTghpn7/fSnscW++dpMSAw==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "dependencies": {
+ "@types/json-schema": "^7.0.15"
+ },
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ }
+ },
"node_modules/@eslint/eslintrc": {
- "version": "2.1.4",
- "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.4.tgz",
- "integrity": "sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==",
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.2.0.tgz",
+ "integrity": "sha512-grOjVNN8P3hjJn/eIETF1wwd12DdnwFDoyceUJLYYdkpbwq3nLi+4fqrTAONx7XDALqlL220wC/RHSC/QTI/0w==",
"dev": true,
+ "license": "MIT",
"dependencies": {
"ajv": "^6.12.4",
"debug": "^4.3.2",
- "espree": "^9.6.0",
- "globals": "^13.19.0",
+ "espree": "^10.0.1",
+ "globals": "^14.0.0",
"ignore": "^5.2.0",
"import-fresh": "^3.2.1",
"js-yaml": "^4.1.0",
@@ -79,34 +109,95 @@
"strip-json-comments": "^3.1.1"
},
"engines": {
- "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
},
"funding": {
"url": "https://opencollective.com/eslint"
}
},
+ "node_modules/@eslint/eslintrc/node_modules/globals": {
+ "version": "14.0.0",
+ "resolved": "https://registry.npmjs.org/globals/-/globals-14.0.0.tgz",
+ "integrity": "sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
"node_modules/@eslint/js": {
- "version": "8.57.0",
- "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.57.0.tgz",
- "integrity": "sha512-Ys+3g2TaW7gADOJzPt83SJtCDhMjndcDMFVQ/Tj9iA1BfJzFKD9mAUXT3OenpuPHbI6P/myECxRJrofUsDx/5g==",
+ "version": "9.20.0",
+ "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.20.0.tgz",
+ "integrity": "sha512-iZA07H9io9Wn836aVTytRaNqh00Sad+EamwOVJT12GTLw1VGMFV/4JaME+JjLtr9fiGaoWgYnS54wrfWsSs4oQ==",
"dev": true,
+ "license": "MIT",
"engines": {
- "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ }
+ },
+ "node_modules/@eslint/object-schema": {
+ "version": "2.1.6",
+ "resolved": "https://registry.npmjs.org/@eslint/object-schema/-/object-schema-2.1.6.tgz",
+ "integrity": "sha512-RBMg5FRL0I0gs51M/guSAj5/e14VQ4tpZnQNWwuDT66P14I43ItmPfIZRhO9fUVIPOAQXU47atlywZ/czoqFPA==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
}
},
- "node_modules/@humanwhocodes/config-array": {
- "version": "0.11.14",
- "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.14.tgz",
- "integrity": "sha512-3T8LkOmg45BV5FICb15QQMsyUSWrQ8AygVfC7ZG32zOalnqrilm018ZVCw0eapXux8FtA33q8PSRSstjee3jSg==",
- "deprecated": "Use @eslint/config-array instead",
+ "node_modules/@eslint/plugin-kit": {
+ "version": "0.2.5",
+ "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.2.5.tgz",
+ "integrity": "sha512-lB05FkqEdUg2AA0xEbUz0SnkXT1LcCTa438W4IWTUh4hdOnVbQyOJ81OrDXsJk/LSiJHubgGEFoR5EHq1NsH1A==",
"dev": true,
+ "license": "Apache-2.0",
"dependencies": {
- "@humanwhocodes/object-schema": "^2.0.2",
- "debug": "^4.3.1",
- "minimatch": "^3.0.5"
+ "@eslint/core": "^0.10.0",
+ "levn": "^0.4.1"
+ },
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ }
+ },
+ "node_modules/@humanfs/core": {
+ "version": "0.19.1",
+ "resolved": "https://registry.npmjs.org/@humanfs/core/-/core-0.19.1.tgz",
+ "integrity": "sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "engines": {
+ "node": ">=18.18.0"
+ }
+ },
+ "node_modules/@humanfs/node": {
+ "version": "0.16.6",
+ "resolved": "https://registry.npmjs.org/@humanfs/node/-/node-0.16.6.tgz",
+ "integrity": "sha512-YuI2ZHQL78Q5HbhDiBA1X4LmYdXCKCMQIfw0pw7piHJwyREFebJUvrQN4cMssyES6x+vfUbx1CIpaQUKYdQZOw==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "dependencies": {
+ "@humanfs/core": "^0.19.1",
+ "@humanwhocodes/retry": "^0.3.0"
},
"engines": {
- "node": ">=10.10.0"
+ "node": ">=18.18.0"
+ }
+ },
+ "node_modules/@humanfs/node/node_modules/@humanwhocodes/retry": {
+ "version": "0.3.1",
+ "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.3.1.tgz",
+ "integrity": "sha512-JBxkERygn7Bv/GbN5Rv8Ul6LVknS+5Bp6RgDC/O8gEBU/yeH5Ui5C/OlWrTb6qct7LjjfT6Re2NxB0ln0yYybA==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "engines": {
+ "node": ">=18.18"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/nzakas"
}
},
"node_modules/@humanwhocodes/module-importer": {
@@ -122,12 +213,19 @@
"url": "https://github.com/sponsors/nzakas"
}
},
- "node_modules/@humanwhocodes/object-schema": {
- "version": "2.0.3",
- "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.3.tgz",
- "integrity": "sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==",
- "deprecated": "Use @eslint/object-schema instead",
- "dev": true
+ "node_modules/@humanwhocodes/retry": {
+ "version": "0.4.1",
+ "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.4.1.tgz",
+ "integrity": "sha512-c7hNEllBlenFTHBky65mhq8WD2kbN9Q6gk0bTk8lSBvc554jpXSkST1iePudpt7+A/AQvuHs9EMqjHDXMY1lrA==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "engines": {
+ "node": ">=18.18"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/nzakas"
+ }
},
"node_modules/@nodelib/fs.scandir": {
"version": "2.1.5",
@@ -165,32 +263,18 @@
}
},
"node_modules/@playwright/test": {
- "version": "1.34.2",
- "resolved": "https://registry.npmjs.org/@playwright/test/-/test-1.34.2.tgz",
- "integrity": "sha512-v/LBnwzD0225q8xEv3t9DmNPX61yvNnEbiA8PoNk1fbkxApJFCWYLPpQbdVWzHaARdZD9g1PYBoOvnffortfKw==",
+ "version": "1.50.1",
+ "resolved": "https://registry.npmjs.org/@playwright/test/-/test-1.50.1.tgz",
+ "integrity": "sha512-Jii3aBg+CEDpgnuDxEp/h7BimHcUTDlpEtce89xEumlJ5ef2hqepZ+PWp1DDpYC/VO9fmWVI1IlEaoI5fK9FXQ==",
+ "license": "Apache-2.0",
"dependencies": {
- "@types/node": "*",
- "playwright-core": "1.34.2"
- },
- "bin": {
- "playwright": "cli.js"
- },
- "engines": {
- "node": ">=14"
+ "playwright": "1.50.1"
},
- "optionalDependencies": {
- "fsevents": "2.3.2"
- }
- },
- "node_modules/@playwright/test/node_modules/playwright-core": {
- "version": "1.34.2",
- "resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.34.2.tgz",
- "integrity": "sha512-MrkgGWLANc5qthXduvIY1a/tuafROyTORVd86fwKwgoYrmnBooN/GgeZSBm7ljTLV2FCWNSXV3se7qeScKn83g==",
"bin": {
"playwright": "cli.js"
},
"engines": {
- "node": ">=14"
+ "node": ">=18"
}
},
"node_modules/@rtsao/scc": {
@@ -199,6 +283,13 @@
"integrity": "sha512-zt6OdqaDoOnJ1ZYsCYGt9YmWzDXl4vQdKTyJev62gFhRGKdx7mcT54V9KIjg+d2wi9EXsPvAPKe7i7WjfVWB8g==",
"dev": true
},
+ "node_modules/@types/estree": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.6.tgz",
+ "integrity": "sha512-AYnb1nQyY49te+VRAVgmzfcgjYS91mY5P0TKUDCLEM+gNnA+3T6rWITXRLYCpahpqSQbN5cE+gHpnPyXjHWxcw==",
+ "dev": true,
+ "license": "MIT"
+ },
"node_modules/@types/glob": {
"version": "7.2.0",
"resolved": "https://registry.npmjs.org/@types/glob/-/glob-7.2.0.tgz",
@@ -209,6 +300,13 @@
"@types/node": "*"
}
},
+ "node_modules/@types/json-schema": {
+ "version": "7.0.15",
+ "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz",
+ "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==",
+ "dev": true,
+ "license": "MIT"
+ },
"node_modules/@types/json5": {
"version": "0.0.29",
"resolved": "https://registry.npmjs.org/@types/json5/-/json5-0.0.29.tgz",
@@ -229,7 +327,8 @@
"node_modules/@types/node": {
"version": "18.11.9",
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.11.9.tgz",
- "integrity": "sha512-CRpX21/kGdzjOpFsZSkcrXMGIBWMGNIHXXBVFSH+ggkftxg+XYP20TESbh+zFvFj3EQOl5byk0HTRn1IL6hbqg=="
+ "integrity": "sha512-CRpX21/kGdzjOpFsZSkcrXMGIBWMGNIHXXBVFSH+ggkftxg+XYP20TESbh+zFvFj3EQOl5byk0HTRn1IL6hbqg==",
+ "dev": true
},
"node_modules/@types/promise-retry": {
"version": "1.1.3",
@@ -257,20 +356,21 @@
}
},
"node_modules/@typescript-eslint/eslint-plugin": {
- "version": "8.14.0",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.14.0.tgz",
- "integrity": "sha512-tqp8H7UWFaZj0yNO6bycd5YjMwxa6wIHOLZvWPkidwbgLCsBMetQoGj7DPuAlWa2yGO3H48xmPwjhsSPPCGU5w==",
+ "version": "8.24.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.24.0.tgz",
+ "integrity": "sha512-aFcXEJJCI4gUdXgoo/j9udUYIHgF23MFkg09LFz2dzEmU0+1Plk4rQWv/IYKvPHAtlkkGoB3m5e6oUp+JPsNaQ==",
"dev": true,
+ "license": "MIT",
"dependencies": {
"@eslint-community/regexpp": "^4.10.0",
- "@typescript-eslint/scope-manager": "8.14.0",
- "@typescript-eslint/type-utils": "8.14.0",
- "@typescript-eslint/utils": "8.14.0",
- "@typescript-eslint/visitor-keys": "8.14.0",
+ "@typescript-eslint/scope-manager": "8.24.0",
+ "@typescript-eslint/type-utils": "8.24.0",
+ "@typescript-eslint/utils": "8.24.0",
+ "@typescript-eslint/visitor-keys": "8.24.0",
"graphemer": "^1.4.0",
"ignore": "^5.3.1",
"natural-compare": "^1.4.0",
- "ts-api-utils": "^1.3.0"
+ "ts-api-utils": "^2.0.1"
},
"engines": {
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
@@ -281,100 +381,47 @@
},
"peerDependencies": {
"@typescript-eslint/parser": "^8.0.0 || ^8.0.0-alpha.0",
- "eslint": "^8.57.0 || ^9.0.0"
- },
- "peerDependenciesMeta": {
- "typescript": {
- "optional": true
- }
- }
- },
- "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/scope-manager": {
- "version": "8.14.0",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.14.0.tgz",
- "integrity": "sha512-aBbBrnW9ARIDn92Zbo7rguLnqQ/pOrUguVpbUwzOhkFg2npFDwTgPGqFqE0H5feXcOoJOfX3SxlJaKEVtq54dw==",
- "dev": true,
- "dependencies": {
- "@typescript-eslint/types": "8.14.0",
- "@typescript-eslint/visitor-keys": "8.14.0"
- },
- "engines": {
- "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
- },
- "funding": {
- "type": "opencollective",
- "url": "https://opencollective.com/typescript-eslint"
- }
- },
- "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/types": {
- "version": "8.14.0",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.14.0.tgz",
- "integrity": "sha512-yjeB9fnO/opvLJFAsPNYlKPnEM8+z4og09Pk504dkqonT02AyL5Z9SSqlE0XqezS93v6CXn49VHvB2G7XSsl0g==",
- "dev": true,
- "engines": {
- "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
- },
- "funding": {
- "type": "opencollective",
- "url": "https://opencollective.com/typescript-eslint"
- }
- },
- "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/visitor-keys": {
- "version": "8.14.0",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.14.0.tgz",
- "integrity": "sha512-vG0XZo8AdTH9OE6VFRwAZldNc7qtJ/6NLGWak+BtENuEUXGZgFpihILPiBvKXvJ2nFu27XNGC6rKiwuaoMbYzQ==",
- "dev": true,
- "dependencies": {
- "@typescript-eslint/types": "8.14.0",
- "eslint-visitor-keys": "^3.4.3"
- },
- "engines": {
- "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
- },
- "funding": {
- "type": "opencollective",
- "url": "https://opencollective.com/typescript-eslint"
+ "eslint": "^8.57.0 || ^9.0.0",
+ "typescript": ">=4.8.4 <5.8.0"
}
},
"node_modules/@typescript-eslint/parser": {
- "version": "7.18.0",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-7.18.0.tgz",
- "integrity": "sha512-4Z+L8I2OqhZV8qA132M4wNL30ypZGYOQVBfMgxDH/K5UX0PNqTu1c6za9ST5r9+tavvHiTWmBnKzpCJ/GlVFtg==",
+ "version": "8.24.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.24.0.tgz",
+ "integrity": "sha512-MFDaO9CYiard9j9VepMNa9MTcqVvSny2N4hkY6roquzj8pdCBRENhErrteaQuu7Yjn1ppk0v1/ZF9CG3KIlrTA==",
"dev": true,
+ "license": "MIT",
"dependencies": {
- "@typescript-eslint/scope-manager": "7.18.0",
- "@typescript-eslint/types": "7.18.0",
- "@typescript-eslint/typescript-estree": "7.18.0",
- "@typescript-eslint/visitor-keys": "7.18.0",
+ "@typescript-eslint/scope-manager": "8.24.0",
+ "@typescript-eslint/types": "8.24.0",
+ "@typescript-eslint/typescript-estree": "8.24.0",
+ "@typescript-eslint/visitor-keys": "8.24.0",
"debug": "^4.3.4"
},
"engines": {
- "node": "^18.18.0 || >=20.0.0"
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/typescript-eslint"
},
"peerDependencies": {
- "eslint": "^8.56.0"
- },
- "peerDependenciesMeta": {
- "typescript": {
- "optional": true
- }
+ "eslint": "^8.57.0 || ^9.0.0",
+ "typescript": ">=4.8.4 <5.8.0"
}
},
"node_modules/@typescript-eslint/scope-manager": {
- "version": "7.18.0",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-7.18.0.tgz",
- "integrity": "sha512-jjhdIE/FPF2B7Z1uzc6i3oWKbGcHb87Qw7AWj6jmEqNOfDFbJWtjt/XfwCpvNkpGWlcJaog5vTR+VV8+w9JflA==",
+ "version": "8.24.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.24.0.tgz",
+ "integrity": "sha512-HZIX0UByphEtdVBKaQBgTDdn9z16l4aTUz8e8zPQnyxwHBtf5vtl1L+OhH+m1FGV9DrRmoDuYKqzVrvWDcDozw==",
"dev": true,
+ "license": "MIT",
"dependencies": {
- "@typescript-eslint/types": "7.18.0",
- "@typescript-eslint/visitor-keys": "7.18.0"
+ "@typescript-eslint/types": "8.24.0",
+ "@typescript-eslint/visitor-keys": "8.24.0"
},
"engines": {
- "node": "^18.18.0 || >=20.0.0"
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
},
"funding": {
"type": "opencollective",
@@ -382,15 +429,16 @@
}
},
"node_modules/@typescript-eslint/type-utils": {
- "version": "8.14.0",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.14.0.tgz",
- "integrity": "sha512-Xcz9qOtZuGusVOH5Uk07NGs39wrKkf3AxlkK79RBK6aJC1l03CobXjJbwBPSidetAOV+5rEVuiT1VSBUOAsanQ==",
+ "version": "8.24.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.24.0.tgz",
+ "integrity": "sha512-8fitJudrnY8aq0F1wMiPM1UUgiXQRJ5i8tFjq9kGfRajU+dbPyOuHbl0qRopLEidy0MwqgTHDt6CnSeXanNIwA==",
"dev": true,
+ "license": "MIT",
"dependencies": {
- "@typescript-eslint/typescript-estree": "8.14.0",
- "@typescript-eslint/utils": "8.14.0",
+ "@typescript-eslint/typescript-estree": "8.24.0",
+ "@typescript-eslint/utils": "8.24.0",
"debug": "^4.3.4",
- "ts-api-utils": "^1.3.0"
+ "ts-api-utils": "^2.0.1"
},
"engines": {
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
@@ -399,17 +447,17 @@
"type": "opencollective",
"url": "https://opencollective.com/typescript-eslint"
},
- "peerDependenciesMeta": {
- "typescript": {
- "optional": true
- }
+ "peerDependencies": {
+ "eslint": "^8.57.0 || ^9.0.0",
+ "typescript": ">=4.8.4 <5.8.0"
}
},
- "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/types": {
- "version": "8.14.0",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.14.0.tgz",
- "integrity": "sha512-yjeB9fnO/opvLJFAsPNYlKPnEM8+z4og09Pk504dkqonT02AyL5Z9SSqlE0XqezS93v6CXn49VHvB2G7XSsl0g==",
+ "node_modules/@typescript-eslint/types": {
+ "version": "8.24.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.24.0.tgz",
+ "integrity": "sha512-VacJCBTyje7HGAw7xp11q439A+zeGG0p0/p2zsZwpnMzjPB5WteaWqt4g2iysgGFafrqvyLWqq6ZPZAOCoefCw==",
"dev": true,
+ "license": "MIT",
"engines": {
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
},
@@ -418,114 +466,31 @@
"url": "https://opencollective.com/typescript-eslint"
}
},
- "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/typescript-estree": {
- "version": "8.14.0",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.14.0.tgz",
- "integrity": "sha512-OPXPLYKGZi9XS/49rdaCbR5j/S14HazviBlUQFvSKz3npr3NikF+mrgK7CFVur6XEt95DZp/cmke9d5i3vtVnQ==",
+ "node_modules/@typescript-eslint/typescript-estree": {
+ "version": "8.24.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.24.0.tgz",
+ "integrity": "sha512-ITjYcP0+8kbsvT9bysygfIfb+hBj6koDsu37JZG7xrCiy3fPJyNmfVtaGsgTUSEuTzcvME5YI5uyL5LD1EV5ZQ==",
"dev": true,
+ "license": "MIT",
"dependencies": {
- "@typescript-eslint/types": "8.14.0",
- "@typescript-eslint/visitor-keys": "8.14.0",
+ "@typescript-eslint/types": "8.24.0",
+ "@typescript-eslint/visitor-keys": "8.24.0",
"debug": "^4.3.4",
"fast-glob": "^3.3.2",
"is-glob": "^4.0.3",
"minimatch": "^9.0.4",
"semver": "^7.6.0",
- "ts-api-utils": "^1.3.0"
- },
- "engines": {
- "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
- },
- "funding": {
- "type": "opencollective",
- "url": "https://opencollective.com/typescript-eslint"
- },
- "peerDependenciesMeta": {
- "typescript": {
- "optional": true
- }
- }
- },
- "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/visitor-keys": {
- "version": "8.14.0",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.14.0.tgz",
- "integrity": "sha512-vG0XZo8AdTH9OE6VFRwAZldNc7qtJ/6NLGWak+BtENuEUXGZgFpihILPiBvKXvJ2nFu27XNGC6rKiwuaoMbYzQ==",
- "dev": true,
- "dependencies": {
- "@typescript-eslint/types": "8.14.0",
- "eslint-visitor-keys": "^3.4.3"
+ "ts-api-utils": "^2.0.1"
},
"engines": {
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
},
- "funding": {
- "type": "opencollective",
- "url": "https://opencollective.com/typescript-eslint"
- }
- },
- "node_modules/@typescript-eslint/type-utils/node_modules/brace-expansion": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
- "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
- "dev": true,
- "dependencies": {
- "balanced-match": "^1.0.0"
- }
- },
- "node_modules/@typescript-eslint/type-utils/node_modules/minimatch": {
- "version": "9.0.5",
- "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz",
- "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==",
- "dev": true,
- "dependencies": {
- "brace-expansion": "^2.0.1"
- },
- "engines": {
- "node": ">=16 || 14 >=14.17"
- },
- "funding": {
- "url": "https://github.com/sponsors/isaacs"
- }
- },
- "node_modules/@typescript-eslint/types": {
- "version": "7.18.0",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.18.0.tgz",
- "integrity": "sha512-iZqi+Ds1y4EDYUtlOOC+aUmxnE9xS/yCigkjA7XpTKV6nCBd3Hp/PRGGmdwnfkV2ThMyYldP1wRpm/id99spTQ==",
- "dev": true,
- "engines": {
- "node": "^18.18.0 || >=20.0.0"
- },
- "funding": {
- "type": "opencollective",
- "url": "https://opencollective.com/typescript-eslint"
- }
- },
- "node_modules/@typescript-eslint/typescript-estree": {
- "version": "7.18.0",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-7.18.0.tgz",
- "integrity": "sha512-aP1v/BSPnnyhMHts8cf1qQ6Q1IFwwRvAQGRvBFkWlo3/lH29OXA3Pts+c10nxRxIBrDnoMqzhgdwVe5f2D6OzA==",
- "dev": true,
- "dependencies": {
- "@typescript-eslint/types": "7.18.0",
- "@typescript-eslint/visitor-keys": "7.18.0",
- "debug": "^4.3.4",
- "globby": "^11.1.0",
- "is-glob": "^4.0.3",
- "minimatch": "^9.0.4",
- "semver": "^7.6.0",
- "ts-api-utils": "^1.3.0"
- },
- "engines": {
- "node": "^18.18.0 || >=20.0.0"
- },
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/typescript-eslint"
},
- "peerDependenciesMeta": {
- "typescript": {
- "optional": true
- }
+ "peerDependencies": {
+ "typescript": ">=4.8.4 <5.8.0"
}
},
"node_modules/@typescript-eslint/typescript-estree/node_modules/brace-expansion": {
@@ -533,6 +498,7 @@
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
"integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
"dev": true,
+ "license": "MIT",
"dependencies": {
"balanced-match": "^1.0.0"
}
@@ -542,6 +508,7 @@
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz",
"integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==",
"dev": true,
+ "license": "ISC",
"dependencies": {
"brace-expansion": "^2.0.1"
},
@@ -553,15 +520,16 @@
}
},
"node_modules/@typescript-eslint/utils": {
- "version": "8.14.0",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.14.0.tgz",
- "integrity": "sha512-OGqj6uB8THhrHj0Fk27DcHPojW7zKwKkPmHXHvQ58pLYp4hy8CSUdTKykKeh+5vFqTTVmjz0zCOOPKRovdsgHA==",
+ "version": "8.24.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.24.0.tgz",
+ "integrity": "sha512-07rLuUBElvvEb1ICnafYWr4hk8/U7X9RDCOqd9JcAMtjh/9oRmcfN4yGzbPVirgMR0+HLVHehmu19CWeh7fsmQ==",
"dev": true,
+ "license": "MIT",
"dependencies": {
"@eslint-community/eslint-utils": "^4.4.0",
- "@typescript-eslint/scope-manager": "8.14.0",
- "@typescript-eslint/types": "8.14.0",
- "@typescript-eslint/typescript-estree": "8.14.0"
+ "@typescript-eslint/scope-manager": "8.24.0",
+ "@typescript-eslint/types": "8.24.0",
+ "@typescript-eslint/typescript-estree": "8.24.0"
},
"engines": {
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
@@ -571,53 +539,19 @@
"url": "https://opencollective.com/typescript-eslint"
},
"peerDependencies": {
- "eslint": "^8.57.0 || ^9.0.0"
- }
- },
- "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/scope-manager": {
- "version": "8.14.0",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.14.0.tgz",
- "integrity": "sha512-aBbBrnW9ARIDn92Zbo7rguLnqQ/pOrUguVpbUwzOhkFg2npFDwTgPGqFqE0H5feXcOoJOfX3SxlJaKEVtq54dw==",
- "dev": true,
- "dependencies": {
- "@typescript-eslint/types": "8.14.0",
- "@typescript-eslint/visitor-keys": "8.14.0"
- },
- "engines": {
- "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
- },
- "funding": {
- "type": "opencollective",
- "url": "https://opencollective.com/typescript-eslint"
+ "eslint": "^8.57.0 || ^9.0.0",
+ "typescript": ">=4.8.4 <5.8.0"
}
},
- "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/types": {
- "version": "8.14.0",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.14.0.tgz",
- "integrity": "sha512-yjeB9fnO/opvLJFAsPNYlKPnEM8+z4og09Pk504dkqonT02AyL5Z9SSqlE0XqezS93v6CXn49VHvB2G7XSsl0g==",
- "dev": true,
- "engines": {
- "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
- },
- "funding": {
- "type": "opencollective",
- "url": "https://opencollective.com/typescript-eslint"
- }
- },
- "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/typescript-estree": {
- "version": "8.14.0",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.14.0.tgz",
- "integrity": "sha512-OPXPLYKGZi9XS/49rdaCbR5j/S14HazviBlUQFvSKz3npr3NikF+mrgK7CFVur6XEt95DZp/cmke9d5i3vtVnQ==",
+ "node_modules/@typescript-eslint/visitor-keys": {
+ "version": "8.24.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.24.0.tgz",
+ "integrity": "sha512-kArLq83QxGLbuHrTMoOEWO+l2MwsNS2TGISEdx8xgqpkbytB07XmlQyQdNDrCc1ecSqx0cnmhGvpX+VBwqqSkg==",
"dev": true,
+ "license": "MIT",
"dependencies": {
- "@typescript-eslint/types": "8.14.0",
- "@typescript-eslint/visitor-keys": "8.14.0",
- "debug": "^4.3.4",
- "fast-glob": "^3.3.2",
- "is-glob": "^4.0.3",
- "minimatch": "^9.0.4",
- "semver": "^7.6.0",
- "ts-api-utils": "^1.3.0"
+ "@typescript-eslint/types": "8.24.0",
+ "eslint-visitor-keys": "^4.2.0"
},
"engines": {
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
@@ -625,82 +559,27 @@
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/typescript-eslint"
- },
- "peerDependenciesMeta": {
- "typescript": {
- "optional": true
- }
}
},
- "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/visitor-keys": {
- "version": "8.14.0",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.14.0.tgz",
- "integrity": "sha512-vG0XZo8AdTH9OE6VFRwAZldNc7qtJ/6NLGWak+BtENuEUXGZgFpihILPiBvKXvJ2nFu27XNGC6rKiwuaoMbYzQ==",
+ "node_modules/@typescript-eslint/visitor-keys/node_modules/eslint-visitor-keys": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.0.tgz",
+ "integrity": "sha512-UyLnSehNt62FFhSwjZlHmeokpRK59rcz29j+F1/aDgbkbRTk7wIc9XzdoasMUbRNKDM0qQt/+BJ4BrpFeABemw==",
"dev": true,
- "dependencies": {
- "@typescript-eslint/types": "8.14.0",
- "eslint-visitor-keys": "^3.4.3"
- },
+ "license": "Apache-2.0",
"engines": {
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
},
"funding": {
- "type": "opencollective",
- "url": "https://opencollective.com/typescript-eslint"
- }
- },
- "node_modules/@typescript-eslint/utils/node_modules/brace-expansion": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
- "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
- "dev": true,
- "dependencies": {
- "balanced-match": "^1.0.0"
- }
- },
- "node_modules/@typescript-eslint/utils/node_modules/minimatch": {
- "version": "9.0.5",
- "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz",
- "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==",
- "dev": true,
- "dependencies": {
- "brace-expansion": "^2.0.1"
- },
- "engines": {
- "node": ">=16 || 14 >=14.17"
- },
- "funding": {
- "url": "https://github.com/sponsors/isaacs"
- }
- },
- "node_modules/@typescript-eslint/visitor-keys": {
- "version": "7.18.0",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.18.0.tgz",
- "integrity": "sha512-cDF0/Gf81QpY3xYyJKDV14Zwdmid5+uuENhjH2EqFaF0ni+yAyq/LzMaIJdhNJXZI7uLzwIlA+V7oWoyn6Curg==",
- "dev": true,
- "dependencies": {
- "@typescript-eslint/types": "7.18.0",
- "eslint-visitor-keys": "^3.4.3"
- },
- "engines": {
- "node": "^18.18.0 || >=20.0.0"
- },
- "funding": {
- "type": "opencollective",
- "url": "https://opencollective.com/typescript-eslint"
+ "url": "https://opencollective.com/eslint"
}
},
- "node_modules/@ungap/structured-clone": {
- "version": "1.2.0",
- "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.2.0.tgz",
- "integrity": "sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==",
- "dev": true
- },
"node_modules/acorn": {
- "version": "8.11.2",
- "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.2.tgz",
- "integrity": "sha512-nc0Axzp/0FILLEVsm4fNwLCwMttvhEI263QtVPQcbpfZZ3ts0hLsZGOpE6czNlid7CJ9MlyH8reXkpsf3YUY4w==",
+ "version": "8.14.0",
+ "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.14.0.tgz",
+ "integrity": "sha512-cl669nCJTZBsL97OF4kUQm5g5hC2uihk0NxY3WENAC0TYdILVkAyHymAntgxGkl7K+t0cXIrH5siy5S4XkFycA==",
"dev": true,
+ "license": "MIT",
"bin": {
"acorn": "bin/acorn"
},
@@ -713,6 +592,7 @@
"resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz",
"integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==",
"dev": true,
+ "license": "MIT",
"peerDependencies": {
"acorn": "^6.0.0 || ^7.0.0 || ^8.0.0"
}
@@ -722,6 +602,7 @@
"resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz",
"integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==",
"dev": true,
+ "license": "MIT",
"dependencies": {
"fast-deep-equal": "^3.1.1",
"fast-json-stable-stringify": "^2.0.0",
@@ -733,15 +614,6 @@
"url": "https://github.com/sponsors/epoberezkin"
}
},
- "node_modules/ansi-regex": {
- "version": "5.0.1",
- "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
- "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
- "dev": true,
- "engines": {
- "node": ">=8"
- }
- },
"node_modules/ansi-styles": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
@@ -761,7 +633,8 @@
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz",
"integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==",
- "dev": true
+ "dev": true,
+ "license": "Python-2.0"
},
"node_modules/array-buffer-byte-length": {
"version": "1.0.1",
@@ -799,15 +672,6 @@
"url": "https://github.com/sponsors/ljharb"
}
},
- "node_modules/array-union": {
- "version": "2.1.0",
- "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz",
- "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==",
- "dev": true,
- "engines": {
- "node": ">=8"
- }
- },
"node_modules/array.prototype.findlastindex": {
"version": "1.2.5",
"resolved": "https://registry.npmjs.org/array.prototype.findlastindex/-/array.prototype.findlastindex-1.2.5.tgz",
@@ -951,6 +815,7 @@
"resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz",
"integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==",
"dev": true,
+ "license": "MIT",
"engines": {
"node": ">=6"
}
@@ -998,13 +863,15 @@
"version": "1.0.11",
"resolved": "https://registry.npmjs.org/confusing-browser-globals/-/confusing-browser-globals-1.0.11.tgz",
"integrity": "sha512-JsPKdmh8ZkmnHxDk55FZ1TqVLvEQTvoByJZRN9jzI0UjxK/QgAmsphz7PGtqgPieQZ/CQcHWXCR7ATDNhGe+YA==",
- "dev": true
+ "dev": true,
+ "license": "MIT"
},
"node_modules/cross-spawn": {
- "version": "7.0.3",
- "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz",
- "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==",
+ "version": "7.0.6",
+ "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz",
+ "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==",
"dev": true,
+ "license": "MIT",
"dependencies": {
"path-key": "^3.1.0",
"shebang-command": "^2.0.0",
@@ -1122,30 +989,6 @@
"url": "https://github.com/sponsors/ljharb"
}
},
- "node_modules/dir-glob": {
- "version": "3.0.1",
- "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz",
- "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==",
- "dev": true,
- "dependencies": {
- "path-type": "^4.0.0"
- },
- "engines": {
- "node": ">=8"
- }
- },
- "node_modules/doctrine": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz",
- "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==",
- "dev": true,
- "dependencies": {
- "esutils": "^2.0.2"
- },
- "engines": {
- "node": ">=6.0.0"
- }
- },
"node_modules/dotenv": {
"version": "16.4.0",
"resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.4.0.tgz",
@@ -1308,58 +1151,63 @@
}
},
"node_modules/eslint": {
- "version": "8.57.0",
- "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.57.0.tgz",
- "integrity": "sha512-dZ6+mexnaTIbSBZWgou51U6OmzIhYM2VcNdtiTtI7qPNZm35Akpr0f6vtw3w1Kmn5PYo+tZVfh13WrhpS6oLqQ==",
+ "version": "9.20.0",
+ "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.20.0.tgz",
+ "integrity": "sha512-aL4F8167Hg4IvsW89ejnpTwx+B/UQRzJPGgbIOl+4XqffWsahVVsLEWoZvnrVuwpWmnRd7XeXmQI1zlKcFDteA==",
"dev": true,
+ "license": "MIT",
"dependencies": {
"@eslint-community/eslint-utils": "^4.2.0",
- "@eslint-community/regexpp": "^4.6.1",
- "@eslint/eslintrc": "^2.1.4",
- "@eslint/js": "8.57.0",
- "@humanwhocodes/config-array": "^0.11.14",
+ "@eslint-community/regexpp": "^4.12.1",
+ "@eslint/config-array": "^0.19.0",
+ "@eslint/core": "^0.11.0",
+ "@eslint/eslintrc": "^3.2.0",
+ "@eslint/js": "9.20.0",
+ "@eslint/plugin-kit": "^0.2.5",
+ "@humanfs/node": "^0.16.6",
"@humanwhocodes/module-importer": "^1.0.1",
- "@nodelib/fs.walk": "^1.2.8",
- "@ungap/structured-clone": "^1.2.0",
+ "@humanwhocodes/retry": "^0.4.1",
+ "@types/estree": "^1.0.6",
+ "@types/json-schema": "^7.0.15",
"ajv": "^6.12.4",
"chalk": "^4.0.0",
- "cross-spawn": "^7.0.2",
+ "cross-spawn": "^7.0.6",
"debug": "^4.3.2",
- "doctrine": "^3.0.0",
"escape-string-regexp": "^4.0.0",
- "eslint-scope": "^7.2.2",
- "eslint-visitor-keys": "^3.4.3",
- "espree": "^9.6.1",
- "esquery": "^1.4.2",
+ "eslint-scope": "^8.2.0",
+ "eslint-visitor-keys": "^4.2.0",
+ "espree": "^10.3.0",
+ "esquery": "^1.5.0",
"esutils": "^2.0.2",
"fast-deep-equal": "^3.1.3",
- "file-entry-cache": "^6.0.1",
+ "file-entry-cache": "^8.0.0",
"find-up": "^5.0.0",
"glob-parent": "^6.0.2",
- "globals": "^13.19.0",
- "graphemer": "^1.4.0",
"ignore": "^5.2.0",
"imurmurhash": "^0.1.4",
"is-glob": "^4.0.0",
- "is-path-inside": "^3.0.3",
- "js-yaml": "^4.1.0",
"json-stable-stringify-without-jsonify": "^1.0.1",
- "levn": "^0.4.1",
"lodash.merge": "^4.6.2",
"minimatch": "^3.1.2",
"natural-compare": "^1.4.0",
- "optionator": "^0.9.3",
- "strip-ansi": "^6.0.1",
- "text-table": "^0.2.0"
+ "optionator": "^0.9.3"
},
"bin": {
"eslint": "bin/eslint.js"
},
"engines": {
- "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
},
"funding": {
- "url": "https://opencollective.com/eslint"
+ "url": "https://eslint.org/donate"
+ },
+ "peerDependencies": {
+ "jiti": "*"
+ },
+ "peerDependenciesMeta": {
+ "jiti": {
+ "optional": true
+ }
}
},
"node_modules/eslint-config-airbnb-base": {
@@ -1367,6 +1215,7 @@
"resolved": "https://registry.npmjs.org/eslint-config-airbnb-base/-/eslint-config-airbnb-base-15.0.0.tgz",
"integrity": "sha512-xaX3z4ZZIcFLvh2oUNvcX5oEofXda7giYmuplVxoOg5A7EXJMrUyqRgR+mhDhPK8LZ4PttFOBvCYDbX3sUoUig==",
"dev": true,
+ "license": "MIT",
"dependencies": {
"confusing-browser-globals": "^1.0.10",
"object.assign": "^4.1.2",
@@ -1382,10 +1231,11 @@
}
},
"node_modules/eslint-config-airbnb-base/node_modules/semver": {
- "version": "6.3.0",
- "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz",
- "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==",
+ "version": "6.3.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
+ "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
"dev": true,
+ "license": "ISC",
"bin": {
"semver": "bin/semver.js"
}
@@ -1514,10 +1364,14 @@
}
},
"node_modules/eslint-plugin-playwright": {
- "version": "2.0.0",
- "resolved": "https://registry.npmjs.org/eslint-plugin-playwright/-/eslint-plugin-playwright-2.0.0.tgz",
- "integrity": "sha512-nPa44nSp48mp/U+GSneabrhlyIyGvrcv+Z14u6sgno+jX8N0bH+ooSLEC1L6dvMDSHs7tj+kMIbls3l8gCJJSg==",
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-playwright/-/eslint-plugin-playwright-2.2.0.tgz",
+ "integrity": "sha512-qSQpAw7RcSzE3zPp8FMGkthaCWovHZ/BsXtpmnGax9vQLIovlh1bsZHEa2+j2lv9DWhnyeLM/qZmp7ffQZfQvg==",
"dev": true,
+ "license": "MIT",
+ "workspaces": [
+ "examples"
+ ],
"dependencies": {
"globals": "^13.23.0"
},
@@ -1528,6 +1382,23 @@
"eslint": ">=8.40.0"
}
},
+ "node_modules/eslint-scope": {
+ "version": "8.2.0",
+ "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.2.0.tgz",
+ "integrity": "sha512-PHlWUfG6lvPc3yvP5A4PNyBL1W8fkDUccmI21JUu/+GKZBoH/W5u6usENXUrWFRsyoW5ACUjFGgAFQp5gUlb/A==",
+ "dev": true,
+ "license": "BSD-2-Clause",
+ "dependencies": {
+ "esrecurse": "^4.3.0",
+ "estraverse": "^5.2.0"
+ },
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/eslint"
+ }
+ },
"node_modules/eslint-visitor-keys": {
"version": "3.4.3",
"resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz",
@@ -1540,53 +1411,69 @@
"url": "https://opencollective.com/eslint"
}
},
- "node_modules/eslint/node_modules/eslint-scope": {
- "version": "7.2.2",
- "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.2.tgz",
- "integrity": "sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==",
+ "node_modules/eslint/node_modules/@eslint/core": {
+ "version": "0.11.0",
+ "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.11.0.tgz",
+ "integrity": "sha512-DWUB2pksgNEb6Bz2fggIy1wh6fGgZP4Xyy/Mt0QZPiloKKXerbqq9D3SBQTlCRYOrcRPu4vuz+CGjwdfqxnoWA==",
"dev": true,
+ "license": "Apache-2.0",
"dependencies": {
- "esrecurse": "^4.3.0",
- "estraverse": "^5.2.0"
+ "@types/json-schema": "^7.0.15"
},
"engines": {
- "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
- },
- "funding": {
- "url": "https://opencollective.com/eslint"
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
}
},
- "node_modules/eslint/node_modules/estraverse": {
- "version": "5.3.0",
- "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz",
- "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==",
+ "node_modules/eslint/node_modules/eslint-visitor-keys": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.0.tgz",
+ "integrity": "sha512-UyLnSehNt62FFhSwjZlHmeokpRK59rcz29j+F1/aDgbkbRTk7wIc9XzdoasMUbRNKDM0qQt/+BJ4BrpFeABemw==",
"dev": true,
+ "license": "Apache-2.0",
"engines": {
- "node": ">=4.0"
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/eslint"
}
},
"node_modules/espree": {
- "version": "9.6.1",
- "resolved": "https://registry.npmjs.org/espree/-/espree-9.6.1.tgz",
- "integrity": "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==",
+ "version": "10.3.0",
+ "resolved": "https://registry.npmjs.org/espree/-/espree-10.3.0.tgz",
+ "integrity": "sha512-0QYC8b24HWY8zjRnDTL6RiHfDbAWn63qb4LMj1Z4b076A4une81+z03Kg7l7mn/48PUTqoLptSXez8oknU8Clg==",
"dev": true,
+ "license": "BSD-2-Clause",
"dependencies": {
- "acorn": "^8.9.0",
+ "acorn": "^8.14.0",
"acorn-jsx": "^5.3.2",
- "eslint-visitor-keys": "^3.4.1"
+ "eslint-visitor-keys": "^4.2.0"
},
"engines": {
- "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/eslint"
+ }
+ },
+ "node_modules/espree/node_modules/eslint-visitor-keys": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.0.tgz",
+ "integrity": "sha512-UyLnSehNt62FFhSwjZlHmeokpRK59rcz29j+F1/aDgbkbRTk7wIc9XzdoasMUbRNKDM0qQt/+BJ4BrpFeABemw==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
},
"funding": {
"url": "https://opencollective.com/eslint"
}
},
"node_modules/esquery": {
- "version": "1.4.2",
- "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.4.2.tgz",
- "integrity": "sha512-JVSoLdTlTDkmjFmab7H/9SL9qGSyjElT3myyKp7krqjVFQCDLmj1QFaCLRFBszBKI0XVZaiiXvuPIX3ZwHe1Ng==",
+ "version": "1.6.0",
+ "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.6.0.tgz",
+ "integrity": "sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg==",
"dev": true,
+ "license": "BSD-3-Clause",
"dependencies": {
"estraverse": "^5.1.0"
},
@@ -1594,20 +1481,12 @@
"node": ">=0.10"
}
},
- "node_modules/esquery/node_modules/estraverse": {
- "version": "5.3.0",
- "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz",
- "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==",
- "dev": true,
- "engines": {
- "node": ">=4.0"
- }
- },
"node_modules/esrecurse": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz",
"integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==",
"dev": true,
+ "license": "BSD-2-Clause",
"dependencies": {
"estraverse": "^5.2.0"
},
@@ -1615,11 +1494,12 @@
"node": ">=4.0"
}
},
- "node_modules/esrecurse/node_modules/estraverse": {
+ "node_modules/estraverse": {
"version": "5.3.0",
"resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz",
"integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==",
"dev": true,
+ "license": "BSD-2-Clause",
"engines": {
"node": ">=4.0"
}
@@ -1637,7 +1517,8 @@
"version": "3.1.3",
"resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz",
"integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==",
- "dev": true
+ "dev": true,
+ "license": "MIT"
},
"node_modules/fast-glob": {
"version": "3.3.2",
@@ -1671,7 +1552,8 @@
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz",
"integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==",
- "dev": true
+ "dev": true,
+ "license": "MIT"
},
"node_modules/fast-levenshtein": {
"version": "2.0.6",
@@ -1689,15 +1571,16 @@
}
},
"node_modules/file-entry-cache": {
- "version": "6.0.1",
- "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz",
- "integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==",
+ "version": "8.0.0",
+ "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-8.0.0.tgz",
+ "integrity": "sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==",
"dev": true,
+ "license": "MIT",
"dependencies": {
- "flat-cache": "^3.0.4"
+ "flat-cache": "^4.0.0"
},
"engines": {
- "node": "^10.12.0 || >=12.0.0"
+ "node": ">=16.0.0"
}
},
"node_modules/fill-range": {
@@ -1729,23 +1612,25 @@
}
},
"node_modules/flat-cache": {
- "version": "3.0.4",
- "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.0.4.tgz",
- "integrity": "sha512-dm9s5Pw7Jc0GvMYbshN6zchCA9RgQlzzEZX3vylR9IqFfS8XciblUXOKfW6SiuJ0e13eDYZoZV5wdrev7P3Nwg==",
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-4.0.1.tgz",
+ "integrity": "sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw==",
"dev": true,
+ "license": "MIT",
"dependencies": {
- "flatted": "^3.1.0",
- "rimraf": "^3.0.2"
+ "flatted": "^3.2.9",
+ "keyv": "^4.5.4"
},
"engines": {
- "node": "^10.12.0 || >=12.0.0"
+ "node": ">=16"
}
},
"node_modules/flatted": {
- "version": "3.2.7",
- "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.2.7.tgz",
- "integrity": "sha512-5nqDSxl8nn5BSNxyR3n4I6eDmbolI6WT+QqR547RwxQapgjQBmtktdP+HTBb/a/zLsbzERTONyUB5pefh5TtjQ==",
- "dev": true
+ "version": "3.3.2",
+ "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.2.tgz",
+ "integrity": "sha512-AiwGJM8YcNOaobumgtng+6NHuOqC3A7MixFeDafM3X9cIUM+xUXoS5Vfgf+OihAYe20fxqNM9yPBXJzRtZ/4eA==",
+ "dev": true,
+ "license": "ISC"
},
"node_modules/for-each": {
"version": "0.3.3",
@@ -1906,26 +1791,6 @@
"url": "https://github.com/sponsors/ljharb"
}
},
- "node_modules/globby": {
- "version": "11.1.0",
- "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz",
- "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==",
- "dev": true,
- "dependencies": {
- "array-union": "^2.1.0",
- "dir-glob": "^3.0.1",
- "fast-glob": "^3.2.9",
- "ignore": "^5.2.0",
- "merge2": "^1.4.1",
- "slash": "^3.0.0"
- },
- "engines": {
- "node": ">=10"
- },
- "funding": {
- "url": "https://github.com/sponsors/sindresorhus"
- }
- },
"node_modules/gopd": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz",
@@ -2034,10 +1899,11 @@
}
},
"node_modules/import-fresh": {
- "version": "3.3.0",
- "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz",
- "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==",
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.1.tgz",
+ "integrity": "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==",
"dev": true,
+ "license": "MIT",
"dependencies": {
"parent-module": "^1.0.0",
"resolve-from": "^4.0.0"
@@ -2251,15 +2117,6 @@
"url": "https://github.com/sponsors/ljharb"
}
},
- "node_modules/is-path-inside": {
- "version": "3.0.3",
- "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz",
- "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==",
- "dev": true,
- "engines": {
- "node": ">=8"
- }
- },
"node_modules/is-regex": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.1.4.tgz",
@@ -2358,13 +2215,15 @@
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
"integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==",
- "dev": true
+ "dev": true,
+ "license": "ISC"
},
"node_modules/js-yaml": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz",
"integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==",
"dev": true,
+ "license": "MIT",
"dependencies": {
"argparse": "^2.0.1"
},
@@ -2372,11 +2231,19 @@
"js-yaml": "bin/js-yaml.js"
}
},
+ "node_modules/json-buffer": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz",
+ "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==",
+ "dev": true,
+ "license": "MIT"
+ },
"node_modules/json-schema-traverse": {
"version": "0.4.1",
"resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz",
"integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==",
- "dev": true
+ "dev": true,
+ "license": "MIT"
},
"node_modules/json-stable-stringify-without-jsonify": {
"version": "1.0.1",
@@ -2396,6 +2263,16 @@
"json5": "lib/cli.js"
}
},
+ "node_modules/keyv": {
+ "version": "4.5.4",
+ "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz",
+ "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "json-buffer": "3.0.1"
+ }
+ },
"node_modules/levn": {
"version": "0.4.1",
"resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz",
@@ -2544,14 +2421,15 @@
}
},
"node_modules/object.entries": {
- "version": "1.1.6",
- "resolved": "https://registry.npmjs.org/object.entries/-/object.entries-1.1.6.tgz",
- "integrity": "sha512-leTPzo4Zvg3pmbQ3rDK69Rl8GQvIqMWubrkxONG9/ojtFE2rD9fjMKfSI5BxW3osRH1m6VdzmqK8oAY9aT4x5w==",
+ "version": "1.1.8",
+ "resolved": "https://registry.npmjs.org/object.entries/-/object.entries-1.1.8.tgz",
+ "integrity": "sha512-cmopxi8VwRIAw/fkijJohSfpef5PdN0pMQJN6VC/ZKvn0LIknWD8KtgY6KlQdEc4tIjcQ3HxSMmnvtzIscdaYQ==",
"dev": true,
+ "license": "MIT",
"dependencies": {
- "call-bind": "^1.0.2",
- "define-properties": "^1.1.4",
- "es-abstract": "^1.20.4"
+ "call-bind": "^1.0.7",
+ "define-properties": "^1.2.1",
+ "es-object-atoms": "^1.0.0"
},
"engines": {
"node": ">= 0.4"
@@ -2666,6 +2544,7 @@
"resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz",
"integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==",
"dev": true,
+ "license": "MIT",
"dependencies": {
"callsites": "^3.0.0"
},
@@ -2695,6 +2574,7 @@
"resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz",
"integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==",
"dev": true,
+ "license": "MIT",
"engines": {
"node": ">=8"
}
@@ -2704,15 +2584,6 @@
"resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz",
"integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw=="
},
- "node_modules/path-type": {
- "version": "4.0.0",
- "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz",
- "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==",
- "dev": true,
- "engines": {
- "node": ">=8"
- }
- },
"node_modules/picomatch": {
"version": "2.3.1",
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
@@ -2726,31 +2597,33 @@
}
},
"node_modules/playwright": {
- "version": "1.43.0",
- "resolved": "https://registry.npmjs.org/playwright/-/playwright-1.43.0.tgz",
- "integrity": "sha512-SiOKHbVjTSf6wHuGCbqrEyzlm6qvXcv7mENP+OZon1I07brfZLGdfWV0l/efAzVx7TF3Z45ov1gPEkku9q25YQ==",
+ "version": "1.50.1",
+ "resolved": "https://registry.npmjs.org/playwright/-/playwright-1.50.1.tgz",
+ "integrity": "sha512-G8rwsOQJ63XG6BbKj2w5rHeavFjy5zynBA9zsJMMtBoe/Uf757oG12NXz6e6OirF7RCrTVAKFXbLmn1RbL7Qaw==",
+ "license": "Apache-2.0",
"dependencies": {
- "playwright-core": "1.43.0"
+ "playwright-core": "1.50.1"
},
"bin": {
"playwright": "cli.js"
},
"engines": {
- "node": ">=16"
+ "node": ">=18"
},
"optionalDependencies": {
"fsevents": "2.3.2"
}
},
"node_modules/playwright-core": {
- "version": "1.43.0",
- "resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.43.0.tgz",
- "integrity": "sha512-iWFjyBUH97+pUFiyTqSLd8cDMMOS0r2ZYz2qEsPjH8/bX++sbIJT35MSwKnp1r/OQBAqC5XO99xFbJ9XClhf4w==",
+ "version": "1.50.1",
+ "resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.50.1.tgz",
+ "integrity": "sha512-ra9fsNWayuYumt+NiM069M6OkcRb1FZSK8bgi66AtpFoWkg2+y0bJSNmkFrWhMbEBbVKC/EruAHH3g0zmtwGmQ==",
+ "license": "Apache-2.0",
"bin": {
"playwright-core": "cli.js"
},
"engines": {
- "node": ">=16"
+ "node": ">=18"
}
},
"node_modules/possible-typed-array-names": {
@@ -2788,6 +2661,7 @@
"resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz",
"integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==",
"dev": true,
+ "license": "MIT",
"engines": {
"node": ">=6"
}
@@ -2862,6 +2736,7 @@
"resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz",
"integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==",
"dev": true,
+ "license": "MIT",
"engines": {
"node": ">=4"
}
@@ -2884,21 +2759,6 @@
"node": ">=0.10.0"
}
},
- "node_modules/rimraf": {
- "version": "3.0.2",
- "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz",
- "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==",
- "dev": true,
- "dependencies": {
- "glob": "^7.1.3"
- },
- "bin": {
- "rimraf": "bin.js"
- },
- "funding": {
- "url": "https://github.com/sponsors/isaacs"
- }
- },
"node_modules/run-parallel": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz",
@@ -3009,6 +2869,7 @@
"resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz",
"integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==",
"dev": true,
+ "license": "MIT",
"dependencies": {
"shebang-regex": "^3.0.0"
},
@@ -3021,6 +2882,7 @@
"resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz",
"integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==",
"dev": true,
+ "license": "MIT",
"engines": {
"node": ">=8"
}
@@ -3059,15 +2921,6 @@
"url": "https://github.com/sponsors/ljharb"
}
},
- "node_modules/slash": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz",
- "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==",
- "dev": true,
- "engines": {
- "node": ">=8"
- }
- },
"node_modules/string.prototype.trim": {
"version": "1.2.9",
"resolved": "https://registry.npmjs.org/string.prototype.trim/-/string.prototype.trim-1.2.9.tgz",
@@ -3117,18 +2970,6 @@
"url": "https://github.com/sponsors/ljharb"
}
},
- "node_modules/strip-ansi": {
- "version": "6.0.1",
- "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
- "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
- "dev": true,
- "dependencies": {
- "ansi-regex": "^5.0.1"
- },
- "engines": {
- "node": ">=8"
- }
- },
"node_modules/strip-bom": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz",
@@ -3143,6 +2984,7 @@
"resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz",
"integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==",
"dev": true,
+ "license": "MIT",
"engines": {
"node": ">=8"
},
@@ -3173,12 +3015,6 @@
"url": "https://github.com/sponsors/ljharb"
}
},
- "node_modules/text-table": {
- "version": "0.2.0",
- "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz",
- "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==",
- "dev": true
- },
"node_modules/to-regex-range": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
@@ -3192,15 +3028,16 @@
}
},
"node_modules/ts-api-utils": {
- "version": "1.3.0",
- "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-1.3.0.tgz",
- "integrity": "sha512-UQMIo7pb8WRomKR1/+MFVLTroIvDVtMX3K6OUir8ynLyzB8Jeriont2bTAtmNPa1ekAgN7YPDyf6V+ygrdU+eQ==",
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.0.1.tgz",
+ "integrity": "sha512-dnlgjFSVetynI8nzgJ+qF62efpglpWRk8isUEWZGWlJYySCTD6aKvbUDu+zbPeDakk3bg5H4XpitHukgfL1m9w==",
"dev": true,
+ "license": "MIT",
"engines": {
- "node": ">=16"
+ "node": ">=18.12"
},
"peerDependencies": {
- "typescript": ">=4.2.0"
+ "typescript": ">=4.8.4"
}
},
"node_modules/tsconfig-paths": {
@@ -3313,9 +3150,9 @@
}
},
"node_modules/typescript": {
- "version": "5.6.2",
- "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.6.2.tgz",
- "integrity": "sha512-NW8ByodCSNCwZeghjN3o+JX5OFH0Ojg6sadjEKY4huZ52TqbJTJnDo5+Tw98lSy63NZvi4n+ez5m2u5d4PkZyw==",
+ "version": "5.7.2",
+ "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.7.2.tgz",
+ "integrity": "sha512-i5t66RHxDvVN40HfDd1PsEThGNnlMCMT3jMUuoh9/0TaqWevNontacunWyN02LA9/fIbEWlcHZcgTKb9QoaLfg==",
"bin": {
"tsc": "bin/tsc",
"tsserver": "bin/tsserver"
@@ -3344,6 +3181,7 @@
"resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz",
"integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==",
"dev": true,
+ "license": "BSD-2-Clause",
"dependencies": {
"punycode": "^2.1.0"
}
@@ -3353,6 +3191,7 @@
"resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
"integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==",
"dev": true,
+ "license": "ISC",
"dependencies": {
"isexe": "^2.0.0"
},
diff --git a/cli-tests/package.json b/cli-tests/package.json
index 5ebafb563b..7875037add 100644
--- a/cli-tests/package.json
+++ b/cli-tests/package.json
@@ -12,24 +12,24 @@
"pmm": "pmm"
},
"dependencies": {
- "@playwright/test": "^1.34.2",
+ "@playwright/test": "^1.50.1",
"@types/luxon": "^3.4.2",
"dotenv": "^16.4.0",
"luxon": "^3.5.0",
- "playwright": "^1.41.2",
+ "playwright": "^1.50.1",
"promise-retry": "^2.0.1",
"shelljs": "^0.8.5",
- "typescript": "^5.6.2"
+ "typescript": "^5.7.2"
},
"devDependencies": {
"@types/promise-retry": "^1.1.3",
"@types/shelljs": "^0.8.12",
- "@typescript-eslint/eslint-plugin": "^8.14.0",
- "@typescript-eslint/parser": "^7.18.0",
- "eslint": "8.57",
+ "@typescript-eslint/eslint-plugin": "^8.24.0",
+ "@typescript-eslint/parser": "^8.24.0",
+ "eslint": "9.20",
"eslint-config-airbnb-base": "^15.0.0",
"eslint-config-airbnb-typescript": "^18.0.0",
"eslint-plugin-import": "^2.31.0",
- "eslint-plugin-playwright": "^2.0.0"
+ "eslint-plugin-playwright": "^2.2.0"
}
}
diff --git a/docs/api/release-notes/3.0.0.Beta.md b/docs/api/release-notes/3.0.0.md
similarity index 80%
rename from docs/api/release-notes/3.0.0.Beta.md
rename to docs/api/release-notes/3.0.0.md
index 458bc863ba..853ca200ca 100644
--- a/docs/api/release-notes/3.0.0.Beta.md
+++ b/docs/api/release-notes/3.0.0.md
@@ -1,14 +1,20 @@
---
title: PMM v3 API release notes
-slug: release-notes-3-0-0-beta
+slug: release-notes-3-0-0
categorySlug: release-notes
hidden: 0
---
-## Breaking API changes
+
+Percona Monitoring and Management (PMM) version 3 introduces major API enhancements designed to modernize the interface, improve the developer experience, and ensure consistent behavior across all endpoints.
+
+Here are the key breaking changes and new features in the PMM v3 API.
+
+## Breaking changes
### Removed database ID prefixes
We have removed prefixes from database record identifiers (IDs) in PMM to improve API compatibility with REST and to simplify ID handling.
+
For example, an ID that was previously `/agent_id/7cae8a44-8210-4f00-a679-764fa8303ee8` is now simply `7cae8a44-8210-4f00-a679-764fa8303ee8`.
This change affects various components, including nodes, agents, services, and backup-related entities. As a result:
@@ -158,4 +164,41 @@ The Agents API follows the same pattern, with a single endpoint for all agent ty
#### Impact
-Update your API calls to use the new consolidated endpoints. Review the updated API documentation in the Swagger UI of your PMM instance for the new request formats.
\ No newline at end of file
+Update your API calls to use the new consolidated endpoints. Review the updated API documentation in the Swagger UI of your PMM instance for the new request formats.
+
+## New features
+
+### PMM update changelog API
+
+To improve the PMM update experience, we have added a new endpoint (`GET /v1/server/updates/changelogs`) that lists available PMM updates and their release notes.
+
+#### Example response:
+
+```json
+{
+ "updates": [
+ {
+ "version": "3.0.0",
+ "tag": "percona/pmm-server:3.0.0",
+ "timestamp": "2024-11-20T00:00:00Z",
+ "release_notes_url": "https://per.co.na/pmm/0.0.0",
+ "release_notes_text": "..."
+ }
+ ],
+ "last_check": "2025-01-21T11:35:48.847965336Z"
+}
+```
+
+#### Impact
+This is a new endpoint with no impact on existing PMM installations.
+
+
+## Migration to PMM v3 API
+To ensure a smooth migration from PMM 2, follow these steps in a staging environment before production deployment:
+
+1. Review existing integrations and custom scripts.
+2. Update API endpoint references to use new unified endpoints.
+3. Modify ID handling to work with plain UUIDs.
+4. Update feature toggle implementations.
+5. Adjust response parsing to handle comprehensive field sets.
+6. Test thoroughly in a non-production environment before upgrading.
\ No newline at end of file
diff --git a/docs/process/tech_stack.md b/docs/process/tech_stack.md
index a5778b127d..6bce210875 100644
--- a/docs/process/tech_stack.md
+++ b/docs/process/tech_stack.md
@@ -15,7 +15,7 @@ Currently, our development team has fewer people than components/repositories. I
- [prometheus client](https://github.com/prometheus/client_golang) is used for exposing internal metrics of application and gRPC library.
- [testify](https://github.com/stretchr/testify) or stdlib `testing` package should be used for writing tests. Testify should be used only for `assert` and `require` packages – suites here have some problems with logging and parallel tests. Common setups and teardowns should be implemented with `testing` [subtests](https://golang.org/pkg/testing/#hdr-Subtests_and_Sub_benchmarks).
- [golangci-lint](https://github.com/golangci/golangci-lint) is used for static code checks.
-- [gocov.io](http://gocov.io/) and [gocoverutil](https://github.com/AlekSi/gocoverutil) gather code coverage metrics.
+- [gocoverutil](https://github.com/AlekSi/gocoverutil) gather code coverage metrics.
- [Docker Compose](https://docs.docker.com/compose/) is used for a local development environment and in CI.
- [Kong](https://github.com/alecthomas/kong) for PMM CLI and [kingpin.v2](http://gopkg.in/alecthomas/kingpin.v2) for exporters and some other code. Use [Kong](https://github.com/alecthomas/kong) if you want to contribute a brand new CLI or need to make significant changes to the old `kingpin.v2`-based CLI.
- [go modules](https://go.dev/ref/mod#introduction) for vendoring.
diff --git a/documentation/WRITERS-NOTES.md b/documentation/WRITERS-NOTES.md
index 11f2cc0846..6e8ddd5706 100644
--- a/documentation/WRITERS-NOTES.md
+++ b/documentation/WRITERS-NOTES.md
@@ -133,7 +133,7 @@ Currently, we use three different sets of icons:
The preference should be given to Mkdocs Material icons, since they were initially designed for `mkdocs` project and work very well with it. In rare cases, when an icon cannot be found in Mkdocs Material, you can find a suitable one on Iconscout. FontAwesome icons do not play well with Mkdocs, so use them with caution and don't forget to apply additional CSS styles in case they don't render well in PDF.
-Ideally, we should migrate to using just the two first sets.
+Ideally, we should migrate to using just the first set, which offers 10,000+ icons, that are more than sufficient for all our documentation needs.
Use HTML for icons:
@@ -141,6 +141,8 @@ Use HTML for icons:
- For PMM UI (Grafana) icons (`uil-` prefix), go to , find an icon, select 'Font' and copy the code here.
- For Font Awesome (`fa-` prefix), go to , find an icon, copy the code.
+Note: the following list is WIP and will be updated as we go along.
+
| Unicons icon code | Description | Used where |
| -------------------------------------------- | ---------------------------------- | ------------------------------------ |
| | Down chevron | PMM UI |
@@ -151,7 +153,13 @@ Use HTML for icons:
| | Lightening flash/bolt | PMM UI - Nodes compare |
| | Right caret | General |
| | Clock (at nine) | PMM UI - Time range selector |
-| | Cog wheel | PMM UI Configuration |
+| :material-cog: | Cog wheel | PMM UI Configuration |
+| :material-cog-outline: | Cog wheel | PMM UI Configuration->Settings |
+| :material-clipboard-list-outline: | Clipboard list | PMM UI - Inventory |
+| :material-dots-horizontal: | Triple dots, aligned horizontally | PMM UI - Backup in progress |
+| :material-magnify-expand: | Advisors | PMM UI - Advisors |
+| :material-view-dashboard: | Abstract blocks assembly | PMM UI - Dashboards |
+| :material-dots-circle: | A circle surrounded by smaller ones| PMM UI - Node dashboards |
| | Share comment symbol | PMM UI - Share dashboard image |
| | Compass | PMM UI - Explore |
| | Copy | PMM UI - Copy (e.g. backup schedule) |
@@ -171,7 +179,6 @@ Use HTML for icons:
| | Question mark in circle | PMM UI - Help |
| | Minus in magnifying glass | PMM UI - Time range zoom out |
| | Magnifying glass | PMM UI - Search |
-| | Cog wheel | PMM UI Configuration->Settings |
| | Share symbol | PMM UI - Share dashboard |
| | Shield | PMM UI - Server admin |
| | Star | PMM UI - Dashboard favourites |
@@ -183,17 +190,6 @@ Use HTML for icons:
| | Toggle (on) | PMM UI - Toggle switch |
| | Trash can | PMM UI - Various 'Delete' operation |
-Custom (in-house design) icons are defined as SVG code in `variables.yml`.
-
-| Usage | Description | Used where |
-| ---------------------- | ------------------------------------ | -------------------------------- |
-| `{{icon.addinstance}}` | Thin + in circle | PMM Add Instance |
-| `{{icon.checks}}` | Checkbox list items | Inventory list |
-| `{{icon.inventory}}` | Checkbox list items | PMM Inventory & Inventory List |
-| `{{icon.dashboards}}` | Abstract blocks assembly | PMM Dashboards |
-| `{{icon.node}}` | Circle surrounded by smaller circles | Node dashboards |
-| `{{icon.percona}}` | Percona logo | Wherever PMM home icon is needed |
-
## Symbols
While MkDocs will automatically replace certain strings with symbols, it's preferable where possible to use unicode symbols for other icons, so that they appear when the raw Markdown is exported as HTML and imported into Google Docs.
diff --git a/documentation/docs/pmm-admin/_images/PMM-manage-users.png b/documentation/docs/admin/_images/PMM-manage-users.png
similarity index 100%
rename from documentation/docs/pmm-admin/_images/PMM-manage-users.png
rename to documentation/docs/admin/_images/PMM-manage-users.png
diff --git a/documentation/docs/pmm-admin/index.md b/documentation/docs/admin/index.md
similarity index 100%
rename from documentation/docs/pmm-admin/index.md
rename to documentation/docs/admin/index.md
diff --git a/documentation/docs/pmm-admin/manage-orgs/create_org.md b/documentation/docs/admin/manage-orgs/create_org.md
similarity index 100%
rename from documentation/docs/pmm-admin/manage-orgs/create_org.md
rename to documentation/docs/admin/manage-orgs/create_org.md
diff --git a/documentation/docs/pmm-admin/manage-orgs/del_org.md b/documentation/docs/admin/manage-orgs/del_org.md
similarity index 100%
rename from documentation/docs/pmm-admin/manage-orgs/del_org.md
rename to documentation/docs/admin/manage-orgs/del_org.md
diff --git a/documentation/docs/pmm-admin/manage-orgs/edit_org.md b/documentation/docs/admin/manage-orgs/edit_org.md
similarity index 100%
rename from documentation/docs/pmm-admin/manage-orgs/edit_org.md
rename to documentation/docs/admin/manage-orgs/edit_org.md
diff --git a/documentation/docs/pmm-admin/manage-orgs/index.md b/documentation/docs/admin/manage-orgs/index.md
similarity index 100%
rename from documentation/docs/pmm-admin/manage-orgs/index.md
rename to documentation/docs/admin/manage-orgs/index.md
diff --git a/documentation/docs/pmm-admin/manage-users/add_users.md b/documentation/docs/admin/manage-users/add_users.md
similarity index 100%
rename from documentation/docs/pmm-admin/manage-users/add_users.md
rename to documentation/docs/admin/manage-users/add_users.md
diff --git a/documentation/docs/pmm-admin/manage-users/delete_users.md b/documentation/docs/admin/manage-users/delete_users.md
similarity index 100%
rename from documentation/docs/pmm-admin/manage-users/delete_users.md
rename to documentation/docs/admin/manage-users/delete_users.md
diff --git a/documentation/docs/pmm-admin/manage-users/edit_users.md b/documentation/docs/admin/manage-users/edit_users.md
similarity index 100%
rename from documentation/docs/pmm-admin/manage-users/edit_users.md
rename to documentation/docs/admin/manage-users/edit_users.md
diff --git a/documentation/docs/pmm-admin/manage-users/index.md b/documentation/docs/admin/manage-users/index.md
similarity index 100%
rename from documentation/docs/pmm-admin/manage-users/index.md
rename to documentation/docs/admin/manage-users/index.md
diff --git a/documentation/docs/pmm-admin/manage-users/manage_users_in_org.md b/documentation/docs/admin/manage-users/manage_users_in_org.md
similarity index 100%
rename from documentation/docs/pmm-admin/manage-users/manage_users_in_org.md
rename to documentation/docs/admin/manage-users/manage_users_in_org.md
diff --git a/documentation/docs/pmm-admin/roles/access-control/assign_roles.md b/documentation/docs/admin/roles/access-control/assign_roles.md
similarity index 100%
rename from documentation/docs/pmm-admin/roles/access-control/assign_roles.md
rename to documentation/docs/admin/roles/access-control/assign_roles.md
diff --git a/documentation/docs/pmm-admin/roles/access-control/config_access_cntrl.md b/documentation/docs/admin/roles/access-control/config_access_cntrl.md
similarity index 100%
rename from documentation/docs/pmm-admin/roles/access-control/config_access_cntrl.md
rename to documentation/docs/admin/roles/access-control/config_access_cntrl.md
diff --git a/documentation/docs/pmm-admin/roles/access-control/create_roles.md b/documentation/docs/admin/roles/access-control/create_roles.md
similarity index 100%
rename from documentation/docs/pmm-admin/roles/access-control/create_roles.md
rename to documentation/docs/admin/roles/access-control/create_roles.md
diff --git a/documentation/docs/pmm-admin/roles/access-control/intro.md b/documentation/docs/admin/roles/access-control/intro.md
similarity index 100%
rename from documentation/docs/pmm-admin/roles/access-control/intro.md
rename to documentation/docs/admin/roles/access-control/intro.md
diff --git a/documentation/docs/pmm-admin/roles/access-control/labels.md b/documentation/docs/admin/roles/access-control/labels.md
similarity index 100%
rename from documentation/docs/pmm-admin/roles/access-control/labels.md
rename to documentation/docs/admin/roles/access-control/labels.md
diff --git a/documentation/docs/pmm-admin/roles/access-control/manage_roles.md b/documentation/docs/admin/roles/access-control/manage_roles.md
similarity index 100%
rename from documentation/docs/pmm-admin/roles/access-control/manage_roles.md
rename to documentation/docs/admin/roles/access-control/manage_roles.md
diff --git a/documentation/docs/pmm-admin/roles/access-control/usecase.md b/documentation/docs/admin/roles/access-control/usecase.md
similarity index 100%
rename from documentation/docs/pmm-admin/roles/access-control/usecase.md
rename to documentation/docs/admin/roles/access-control/usecase.md
diff --git a/documentation/docs/pmm-admin/roles/delete_users.md b/documentation/docs/admin/roles/delete_users.md
similarity index 100%
rename from documentation/docs/pmm-admin/roles/delete_users.md
rename to documentation/docs/admin/roles/delete_users.md
diff --git a/documentation/docs/pmm-admin/roles/edit_users.md b/documentation/docs/admin/roles/edit_users.md
similarity index 100%
rename from documentation/docs/pmm-admin/roles/edit_users.md
rename to documentation/docs/admin/roles/edit_users.md
diff --git a/documentation/docs/pmm-admin/roles/index.md b/documentation/docs/admin/roles/index.md
similarity index 100%
rename from documentation/docs/pmm-admin/roles/index.md
rename to documentation/docs/admin/roles/index.md
diff --git a/documentation/docs/pmm-admin/roles/manage_users_in_org.md b/documentation/docs/admin/roles/manage_users_in_org.md
similarity index 100%
rename from documentation/docs/pmm-admin/roles/manage_users_in_org.md
rename to documentation/docs/admin/roles/manage_users_in_org.md
diff --git a/documentation/docs/pmm-admin/security/data_encryption.md b/documentation/docs/admin/security/data_encryption.md
similarity index 100%
rename from documentation/docs/pmm-admin/security/data_encryption.md
rename to documentation/docs/admin/security/data_encryption.md
diff --git a/documentation/docs/pmm-admin/security/grafana_cookies.md b/documentation/docs/admin/security/grafana_cookies.md
similarity index 100%
rename from documentation/docs/pmm-admin/security/grafana_cookies.md
rename to documentation/docs/admin/security/grafana_cookies.md
diff --git a/documentation/docs/admin/security/index.md b/documentation/docs/admin/security/index.md
new file mode 100644
index 0000000000..3da2e157de
--- /dev/null
+++ b/documentation/docs/admin/security/index.md
@@ -0,0 +1,10 @@
+# About security in PMM
+
+
+By default, PMM ships with a self-signed certificate to enable usage out of the box. While this does enable users to have encrypted connections between clients (database clients and web/API clients) and the PMM Server, it shouldn't be considered a properly secured connection.
+
+Taking the following precautions will ensure that you are truly secure:
+
+- [SSL encryption with trusted certificates](../../admin/security/ssl_encryption.md) to secure traffic between clients and server;
+
+- [Grafana HTTPS secure cookies](../../admin/security/grafana_cookies.md)
diff --git a/documentation/docs/pmm-admin/security/ssl_encryption.md b/documentation/docs/admin/security/ssl_encryption.md
similarity index 98%
rename from documentation/docs/pmm-admin/security/ssl_encryption.md
rename to documentation/docs/admin/security/ssl_encryption.md
index 54de5ebda7..f6c3f43b5a 100644
--- a/documentation/docs/pmm-admin/security/ssl_encryption.md
+++ b/documentation/docs/admin/security/ssl_encryption.md
@@ -20,7 +20,7 @@ For container-based installation, if your certificates are in a directory called
```sh
docker run -d -p 443:443 --volumes-from pmm-data \
--name pmm-server -v /etc/pmm-certs:/srv/nginx \
- --restart always perconalab/pmm-server:3.0.0-beta
+ --restart always percona/pmm-server:3
```
!!! note alert alert-primary ""
@@ -66,7 +66,7 @@ docker run \
-e PMM_AGENT_CONFIG_FILE=config/pmm-agent.yaml \
-v /your_directory_with/certs:/etc/pki/tls/certs \
--volumes-from pmm-client-data \
-percona/pmm-client:2
+percona/pmm-client:3
```
diff --git a/documentation/docs/pmm-admin/uninstall_docker.md b/documentation/docs/admin/uninstall_docker.md
similarity index 100%
rename from documentation/docs/pmm-admin/uninstall_docker.md
rename to documentation/docs/admin/uninstall_docker.md
diff --git a/documentation/docs/pmm-admin/uninstall_package_manager.md b/documentation/docs/admin/uninstall_package_manager.md
similarity index 100%
rename from documentation/docs/pmm-admin/uninstall_package_manager.md
rename to documentation/docs/admin/uninstall_package_manager.md
diff --git a/documentation/docs/pmm-admin/unregister_client.md b/documentation/docs/admin/unregister_client.md
similarity index 100%
rename from documentation/docs/pmm-admin/unregister_client.md
rename to documentation/docs/admin/unregister_client.md
diff --git a/documentation/docs/advisors/advisors-details.md b/documentation/docs/advisors/advisor-details.md
similarity index 100%
rename from documentation/docs/advisors/advisors-details.md
rename to documentation/docs/advisors/advisor-details.md
diff --git a/documentation/docs/advisors/advisors.md b/documentation/docs/advisors/advisors.md
index d8cc01fdab..68bd746555 100644
--- a/documentation/docs/advisors/advisors.md
+++ b/documentation/docs/advisors/advisors.md
@@ -21,7 +21,7 @@ If you are a Percona customer with a Percona Customer Portal account, you also g
To see the complete list of available checks, see the [Advisor Checks for PMM](https://docs.percona.com/percona-platform/advisors.html) topic in the Percona Platform documentation.
## Enable/Disable
-To download the checks available for your Percona Account, the Advisors and Telemetry options have to be enabled under **Configuration > Settings > Advanced Settings**.
+To download the checks available for your Percona Account, the Advisors and Telemetry options have to be enabled under :material-cog: **Configuration >** :material-cog-outline: **Settings > Advanced Settings**.
These options are enabled by default so that PMM can run automatic Advisor checks in the background. However, you can disable them at any time if you do not need to check the health and performance of your connected databases.
@@ -39,13 +39,13 @@ You can change the standard 24-hour interval to a custom frequency for each Advi
To change the frequency of an automatic check:
{.power-number}
-1. Click **{{icon.checks}} Advisors**.
+1. Click :material-magnify-expand: **Advisors**.
2. Select the Advisor tab that contains the check for which you want to change the frequency.
3. Expand the relevant Advisor and scroll through the list to find your check. Alternatively, use the **Filter** section at the top of the table to search checks by Name, Description, Status, or Interval.
!!! hint alert alert-success "Tip"
If you need to share filtered Advisor results with your team members, send them the PMM URL. This saves your search criteria and results.
-4. Click the  **Interval** icon in the **Actions** column, next to the check you want to update.
+4. Click the :material-pencil-box-outline: **Interval** icon in the **Actions** column, next to the check you want to update.
5. Chose an interval and click **Save**.
## Manual checks
@@ -54,7 +54,7 @@ In addition to the automatic checks that run every 24 hours, you can also run ch
To run checks manually:
{.power-number}
-1. Click **{{icon.checks}} Advisors** on the main menu.
+1. Click :material-magnify-expand: **Advisors** on the main menu.
2. Select the Advisor tab that contains the checks which you want to run manually.
3. Click **Run checks** to run all the available checks for this Advisor group, or expand an Advisor and click **Run** next to each check that you want to run individually.

@@ -69,12 +69,12 @@ The results are sent to PMM Server where you can review any failed checks on the

-To see more details about the available checks and any checks that failed, click the *{{icon.checks}} Advisors* icon on the main menu.
+To see more details about the available checks and any checks that failed, click the :material-magnify-expand: *Advisors* icon on the main menu.
-**Check results data *always* remains on the PMM Server.** This is not related to anonymous data sent for Telemetry purposes.
+Note: Check results *always* remain on the PMM Server. They are never sent as part of Telemetry.
## Create your own Advisors
PMM Advisors offer a set of checks that can detect common security threats, performance degradation, data loss and data corruption.
-Developers can create custom checks to cover additional use cases, relevant to specific database infrastructure. For more information, see [Develop Advisor checks](../details/develop-checks/index.html).
+Developers can create custom checks to cover additional use cases, relevant to specific database infrastructure. For more information, see [Develop Advisor checks](develop_advisor_checks.md).
diff --git a/documentation/docs/advisors/develop_advisor_checks.md b/documentation/docs/advisors/develop-advisor-checks.md
similarity index 91%
rename from documentation/docs/advisors/develop_advisor_checks.md
rename to documentation/docs/advisors/develop-advisor-checks.md
index 58dfa92e96..a8784473b9 100644
--- a/documentation/docs/advisors/develop_advisor_checks.md
+++ b/documentation/docs/advisors/develop-advisor-checks.md
@@ -39,7 +39,7 @@ PMM uses Alertmanager API to get information about failed checks and show them o
## Format for checks
Advisor checks use the following format:
-??? note alert alert-info "Checks Format"
+??? note alert alert-info "Checks format"
{% raw %}
```yaml
@@ -184,7 +184,7 @@ Checks can include the following fields:
Expand the table below for the list of checks types that you can use to define your query type and the PMM Service type for which the check will run.
-??? note alert alert-info "Check Types table"
+??? note alert alert-info "Check types"
| Check type | Description | "query" required (must be empty if "No") |
|---|---|---|
@@ -199,7 +199,7 @@ Expand the table below for the list of checks types that you can use to define y
| MONGODB_GETDIAGNOSTICDATA |Executes db.adminCommand( { getDiagnosticData: 1 } ) against MongoDB's "admin" database. For more information, see [MongoDB Performance](https://docs.mongodb.com/manual/administration/analyzing-mongodb-performance/#full-time-diagnostic-data-capture)| No|
| METRICS_INSTANT |Executes instant [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html) query. Query can use placeholders in query string {% raw %} **{{.NodeName**}} and **{{.ServiceName}}** {% endraw %}. Both match target service/node names. To read more about instant queries, check out the [Prometheus docs](https://prometheus.io/docs/prometheus/latest/querying/api/#instant-queries).|Yes|
| METRICS_RANGE |Executes range [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html) query. Query can use placeholders in query string {% raw %} **{{.NodeName**}} and **{{.ServiceName}}** {% endraw %}. Both match target service/node names. To read more about range queries, check out the [Prometheus docs](https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries).|Yes|
- | CLICKHOUSE_SELECT |Executes 'SELECT ...' statements against PMM's [Query Analytics](https://docs.percona.com/percona-monitoring-and-management/get-started/query-analytics.html) Clickhouse database. Queries can use the {% raw %} **{{.ServiceName**}} and **{{.ServiceID}}** {% endraw %} placeholders in query string. They match the target service name and service ID respectively.|Yes|
+ | CLICKHOUSE_SELECT |Executes 'SELECT ...' statements against PMM's [Query Analytics](../use/qan/index.html) Clickhouse database. Queries can use the {% raw %} **{{.ServiceName**}} and **{{.ServiceID}}** {% endraw %} placeholders in query string. They match the target service name and service ID respectively.|Yes|
## Query parameters
- `METRICS_INSTANT`
@@ -213,8 +213,8 @@ Expand the table below for the list of checks types that you can use to define y
## Develop checks
-!!! note alert alert-primary "Development / Debugging Only"
-Note that check development in PMM is currently for **debugging only** and **NOT for production use!** Future releases plan to include the option to run custom local checks in addition to hosted Percona Platform checks.
+!!! note alert alert-primary "Development/debugging only"
+ Note that check development in PMM is currently for **debugging only** and **NOT for production use!** Future releases plan to include the option to run custom local checks in addition to hosted Percona Platform checks.
To develop custom checks for PMM:
{.power-number}
@@ -227,28 +227,28 @@ To develop custom checks for PMM:
- `PERCONA_TEST_CHECKS_RESEND_INTERVAL=2s` to define the frequency for sending the SA-based alerts to Alertmanager.
```sh
- docker run -p 80:80 -p 443:443 --name pmm-server \
+ docker run -p 443:8443 --name pmm-server \
-e PERCONA_TEST_CHECKS_FILE=/srv/custom-checks.yml \
-e PERCONA_TEST_CHECKS_DISABLE_START_DELAY=true \
-e PERCONA_TEST_CHECKS_RESEND_INTERVAL=2s \
perconalab/pmm-server:3-dev-container
```
-3. Log into Grafana with credentials **admin/admin**.
+3. Log into Grafana with credentials **admin/admin**.
4. Go to **PMM Configuration > Settings > Advanced Settings** and make sure the **Advisors** option is enabled.
-5. Create `/srv/custom-checks.yml` inside the `pmm-server` container with the content of your check. Specify **dev** advisor in your check.
+5. Create `/srv/custom-checks.yml` inside the `pmm-server` container with the content of your check. Specify **dev** advisor in your check.
-6. The checks will run according to the time interval defined on the UI. You can see the result of running the check on the home dashboard:
+6. The checks will run according to the time interval defined on the UI. You can see the result of running the check on the home dashboard:

-7. Click on the number of failed checks to open the Failed Checks dashboard:
+7. Click on the number of failed checks to open the Failed Checks dashboard:

-8. Check out pmm-managed logs:
+8. Check out pmm-managed logs:
```sh
docker exec -it pmm-server supervisorctl tail -f pmm-managed
```
@@ -266,4 +266,4 @@ If debug logging is enabled, you can disable it with the following environment v
## Submit feedback
-We welcome your feedback on the current process for developing and debugging checks. Send us your comments over [Slack](https://percona.slack.com) or post a question on the [Percona Forums](https://forums.percona.com/).
+We welcome your feedback on the current process for developing and debugging checks. Send us your comments or post a question on the [Percona Forums](https://forums.percona.com/c/percona-monitoring-and-management-pmm/pmm-3/84).
diff --git a/documentation/docs/alert/contact_points.md b/documentation/docs/alert/contact_points.md
index 28b6e3e88e..0690877ca7 100644
--- a/documentation/docs/alert/contact_points.md
+++ b/documentation/docs/alert/contact_points.md
@@ -41,9 +41,10 @@ To use SMTP with a PMM Docker installation:
- `GF_SMTP_FROM_NAME`: Name to be used when sending out emails.
*NB: If you are using your Gmail’s SMTP credentials as shown above, you will have to generate an app password and fill it in as the value of your $GF_SMTP_PASSWORD variable.*
+
2. Pass in the `.env` file to Docker run using the `--env-file` flag:
```
- docker run --env-file=.env -p 443:443 -p 80:80 perconalab/pmm-server:3.0.0-beta
+ docker run --env-file=.env -p 443:8443 percona/pmm-server:3
```
This command starts a docker container and will keep running as long as the container is also running. Stopping the command (e.g with Ctrl+C) will stop the container hence, subsequent commands should be run in a new terminal.
@@ -122,7 +123,7 @@ A policy will match an alert if the alert’s labels match all the matching labe
This can be useful, for example, when you want to send notifications to a catch-all contact point as well as to one of more specific contact points handled by subsequent policies.
6. Toggle **Override grouping** if you do not want to use root policy grouping.
7. Toggle **Override general timings** to specify how often you want to wait until the initial notification is sent for a new group. When this is disabled, PMM uses root policy group timings instead.
-8. Add a mute timing if you want to mute notifications or this policy for a specific, regular interval. For example, you can create a mute to suppress trivial notifications during weekends. Mute timings are different from silences in the sense that they are recurring, while silences have a fixed start and end time.
+8. Add a mute timing if you want to mute notifications or this policy for a specific, regular interval. For example, you can create a mute to suppress trivial notifications during weekends. Mute timings are different from silences in the sense that they are recurring, while silences have a fixed start and end time.
!!! caution alert alert-warning "Important"
- Time specified in mute timing must be in UTC and military format i.e. 14:00 not 2:00 PM.
+ Time specified in mute timing must be in UTC format, i.e. 14:00, not 2:00 PM.
diff --git a/documentation/docs/alert/index.md b/documentation/docs/alert/index.md
index f155b580dc..b5d91d12c2 100644
--- a/documentation/docs/alert/index.md
+++ b/documentation/docs/alert/index.md
@@ -13,5 +13,5 @@ Percona Alerting is powered by Grafana infrastructure. It leverages Grafana's ad
Depending on the datasources that you want to query, and the complexity of your required evaluation criteria, Percona Alerting enables you to create the following types of alerts:
- **Percona templated alerts**: alerts based on a set of Percona-supplied templates with common events and expressions for alerting.
-If you need custom expressions on which to base your alert rules, you can also create your own templates.
+If you want to use custom expressions in alert rules, you can create your own alert rule templates.
- **Grafana managed alerts**: alerts that handle complex conditions and can span multiple different data sources like SQL, Prometheus, InfluxDB, etc. These alerts are stored and executed by Grafana.
diff --git a/documentation/docs/alert/templates_list.md b/documentation/docs/alert/templates_list.md
index 1c3336c362..33d35abd7c 100644
--- a/documentation/docs/alert/templates_list.md
+++ b/documentation/docs/alert/templates_list.md
@@ -4,7 +4,7 @@ The table below lists all the alert templates available in Percona Monitoring an
This list includes both built-in templates (accessible to all PMM users), and customer-only templates.
-To access the customer-only templates, you must be a Percona customer and [connect PMM to Percona Platform](../how-to/integrate-platform.md) using a Percona Account.
+To access the customer-only templates, you must be a Percona customer and [connect PMM to Percona Platform](../configure-pmm/percona_platform/check_percona_platform.md) using a Percona Account.
| Template name | Description | Availability | Database technology |
| :------------ | :---------- | :----------- | :------------------ |
diff --git a/documentation/docs/annotate/index.md b/documentation/docs/annotate/index.md
deleted file mode 100644
index c82514455a..0000000000
--- a/documentation/docs/annotate/index.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# PMM annotation
-
-Alerting notifies of important or unusual activity in your database environments so that you can identify and resolve problems quickly. When something needs your attention, PMM automatically sends you an alert through your specified contact points.
\ No newline at end of file
diff --git a/documentation/docs/api/authentication.md b/documentation/docs/api/authentication.md
index 1ddce80a23..59f8cd973a 100644
--- a/documentation/docs/api/authentication.md
+++ b/documentation/docs/api/authentication.md
@@ -58,7 +58,7 @@ curl -H "Authorization: Bearer " https://127.0.0.1/v1/version
## Use a service token in basic authentication
-You can include the service token as a query parameter in a REST API call using the following format. Replace YOUR_SERVICE_TOKEN with the actual service token you obtained in step 12.
+You can include the service token as a query parameter in a REST API call using the following format. Replace YOUR_SERVICE_TOKEN with the actual service token you obtained in step 9.
**Example**
@@ -67,7 +67,7 @@ curl -X GET https://service_token:SERVICE_TOKEN@localhost/v1/version
```
## Use a service token in Bearer authentication (HTTP header)
-You can also include the service token in the header of an HTTP request for authentication. To do this, replace `SERVICE_TOKEN` with the actual service token you obtained in step 12.
+You can also include the service token in the header of an HTTP request for authentication. To do this, replace `SERVICE_TOKEN` with the actual service token you obtained in step 9.
**Example**
```shell
diff --git a/documentation/docs/api/index.md b/documentation/docs/api/index.md
index aaac1dc08f..9a8beb32e6 100644
--- a/documentation/docs/api/index.md
+++ b/documentation/docs/api/index.md
@@ -1,25 +1,67 @@
# About PMM API
-PMM Server lets you visually interact with API resources representing all objects within PMM. You can browse the API using the [Swagger](https://swagger.io/tools/swagger-ui/) UI, accessible at the `/swagger/` endpoint URL:
+PMM Server provides a comprehensive REST API that enables you to monitor databases, manage resources, collect metrics, and automate PMM operations programmatically. The API supports endpoints for managing nodes, services, agents, alerting, backups, and other PMM components.
-
-Clicking an object lets you examine objects and execute requests on them:
+The [complete API documentation on Readme.io](https://percona-pmm.readme.io/reference/introduction) includes detailed endpoint specifications, authentication methods, example requests, and response schemas.
+
+## Interactive API documentation
+
+You can explore and test the API using the built-in [Swagger](https://swagger.io/tools/swagger-ui/) UI, accessible at the `/swagger/` endpoint of your PMM Server. The Swagger interface allows you to:
+
+ - browse available API endpoints
+ - view detailed request and response schemas
+ - execute API requests directly from the browser
+ - test different parameters and see live responses

-The objects visible are nodes, services, and agents:
-- A **Node** represents a bare metal server, a virtual machine, a Docker container, or a more specific type such as an Amazon RDS Node. A node runs zero or more Services and Agents, and has zero or more Agents providing insights for it.
+## Core API resources
+
+The PMM API organizes resources into Nodes, Services and Agents.
+
+
+### Nodes
+
+A Node represents a hosting environment where your services run and monitoring takes place. This can include bare metal servers, virtual machines, Docker containers, or cloud instances like Amazon RDS.
+
+Each Node can:
+
+- host multiple Services and Agents
+- receive insights from zero or more Agents
+- be monitored independently of other components
+
+### Services
+
+A Service represents a monitored database or application instance in your infrastructure. These include database systems like MySQL, MongoDB, Amazon Aurora, PostgreSQL, and other supported database types.
+
+Services can:
+
+ - run on zero nodes (serverless configurations)
+ - run on a single node (standalone installations)
+ - span multiple nodes (distributed systems like Percona XtraDB Cluster)
+ - receive monitoring insights from multiple Agents
+
+### Agents
+Agents are the monitoring components that:
+
+- collect metrics, query data, and system information
+- run on specific Nodes
+- monitor multiple Services and/or Nodes
+- include both internal PMM agents and External Exporters
+
+## Resource types and management
-- A **Service** represents something useful running on the Node: Amazon Aurora MySQL, MySQL, MongoDB, etc. It runs on zero (Amazon Aurora Serverless), single (MySQL), or several (Percona XtraDB Cluster) Nodes. It also has zero or more Agents providing insights for it.
+All resources (Nodes, Services, and Agents) have specific Types which define their properties and operational logic. These Types determine how each resource behaves and what properties it can have within PMM.
-- An **Agent** represents something that runs on the Node which is not useful in itself, but instead provides insights (metrics, query performance data, etc.) about Nodes and/or Services. An agent always runs on the single Node (except External Exporters), and provides insights for zero or more Services and Nodes.
+The management of these resources follows specific patterns:
-Nodes, Services, and Agents have **Types** which define specific their properties, and their specific logic.
+ - **Nodes and Services**: These are inherently external resources. PMM does not manage their actual creation or deletion. Instead, PMM maintains an inventory of these resources within PMM Server, allowing you to add them to or remove them from the inventory as needed.
+- **Agents**: Most Agents are initiated and halted by pmm-agent. The exception is the External Exporter Type, which is initiated externally. Agents are responsible for collecting and reporting monitoring data about Nodes and Services.
-Nodes and Services are inherently external. We don't manage their creation or deletion, but rather maintain a list of them within PMM Server by adding them to or removing them from the inventory. The majority of Agents are initiated and halted by pmm-agent, with one exception being the External Exporter Type, which is initiated externally.
+## Authentication
-## Service accounts and authentication
+API access requires authentication using service account tokens.
-For information about controlling access to the PMM Server components and resources, see the **[Authentication with service accounts](../api/authentication.md)** topic.
\ No newline at end of file
+For details about controlling access to the PMM Server components and resources, see **[Authentication with service accounts](../api/authentication.md)** topic.
\ No newline at end of file
diff --git a/documentation/docs/backup/mongodb-backup/backup_mongo.md b/documentation/docs/backup/mongodb-backup/backup_mongo.md
index 9065749590..7b1f65f6e9 100644
--- a/documentation/docs/backup/mongodb-backup/backup_mongo.md
+++ b/documentation/docs/backup/mongodb-backup/backup_mongo.md
@@ -2,7 +2,11 @@
PMM supports the following actions for MongoDB backups:
+!!! note alert alert-primary "Docker limitations"
+ MongoDB instances running in Docker containers are not supported for backup operations.
+
## Replica set setups
+The following backup operations are supported for replica sets:
- Storing backups on Amazon S3-compatible object storage, and on mounted filesystem
- Creating and restoring Logical snapshot backups
@@ -19,4 +23,4 @@ PMM 3 supports backing up sharded clusters. However, restoring for sharded clust
- Creating Physical snapshot backups
- Creating logical PITR backups both locally and on S3-compatible object storage
-For a detailed overview of the supported setups for MongoDB, check out the [Support matrix](../backup/mongodb_limitations.md).
\ No newline at end of file
+For a detailed overview of the supported setups for MongoDB, check out the [Support matrix](mongodb_limitations.md).
diff --git a/documentation/docs/backup/mongodb-backup/create_PITR_mongo.md b/documentation/docs/backup/mongodb-backup/create_PITR_mongo.md
index 561102d301..36fb5b2a48 100644
--- a/documentation/docs/backup/mongodb-backup/create_PITR_mongo.md
+++ b/documentation/docs/backup/mongodb-backup/create_PITR_mongo.md
@@ -49,7 +49,7 @@ Before creating a backup, make sure to check the [MongoDB backup prerequisites](
Unless you are using verified custom workflows, make sure to keep the default **Folder** value coming from the cluster name. Editing this field will impact PMM-PBM integration workflows.
11. Click **Schedule** to start creating the backup artifact.
-12. Go to the **All Backups** tab, and check the **Status** column. An animated ellipsis indicator {{icon.bouncingellipsis}} shows that a backup is currently being created.
+12. Go to the **All Backups** tab, and check the **Status** column. An animated ellipsis icon :material-dots-horizontal: shows that a backup is currently being created.

@@ -69,4 +69,4 @@ Make sure to disable any other scheduled backup jobs before creating a PITR back

-This constraint applies at the service level. You can still have PITR enabled for one service while having regular scheduled backup jobs for other services.
\ No newline at end of file
+This constraint applies at the service level. You can still have PITR enabled for one service while having regular scheduled backup jobs for other services.
diff --git a/documentation/docs/backup/mongodb-backup/create_mongo_on_demand.md b/documentation/docs/backup/mongodb-backup/create_mongo_on_demand.md
index bd8c1424d3..49a2b19917 100644
--- a/documentation/docs/backup/mongodb-backup/create_mongo_on_demand.md
+++ b/documentation/docs/backup/mongodb-backup/create_mongo_on_demand.md
@@ -30,4 +30,4 @@ To schedule or create an on-demand backup, check the instructions below. If you
Unless you are using verified custom workflows, make sure to keep the default **Folder** value coming from the cluster name. Editing this field will impact PMM-PBM integration workflows.
11. To start creating the backup artifact, click **Backup** or **Schedule** at the top of the window, depending on whether you are creating a scheduled or an on-demand backup.
-12. Go to the **All Backups** tab, and check the **Status** column. An animated ellipsis indicator {{icon.bouncingellipsis}} shows that a backup is currently being created.
+12. Go to the **All Backups** tab, and check the **Status** column. An animated ellipsis icon :material-dots-horizontal: shows that a backup is currently being created.
diff --git a/documentation/docs/backup/mongodb-backup/mongo_prerequisites.md b/documentation/docs/backup/mongodb-backup/mongo_prerequisites.md
index d6c69b5185..8990f21b66 100644
--- a/documentation/docs/backup/mongodb-backup/mongo_prerequisites.md
+++ b/documentation/docs/backup/mongodb-backup/mongo_prerequisites.md
@@ -3,7 +3,7 @@
Before creating MongoDB backups, make sure to:
{.power-number}
-1. Check that **Backup Management** is enabled and the Backup option is available on the side menu. If Backup Management has been disabled on your instance, go to **Configuration > PMM Settings > Advanced Settings**, re-enable **Backup Management** then click **Apply changes**.
+1. Check that **Backup Management** is enabled and the Backup option is available on the side menu. If Backup Management has been disabled on your instance, go to :material-cog: **Configuration > PMM Settings > Advanced Settings**, re-enable **Backup Management** then click **Apply changes**.
2. [Prepare and create a storage location for your backups](../prepare_storage_location.md).
3. Check that [PMM Client](../../install-pmm/install-pmm-client/index.md) is installed and running on all MongoDB nodes in the cluster.
4. Check that [Percona Backup for MongoDB](https://docs.percona.com/percona-backup-mongodb/index.html) (PBM) is installed and `pbm-agent` is running on all MongoDB nodes in the replica set. Make sure to [configure the MongoDB connection URI for pbm-agent](https://docs.percona.com/percona-backup-mongodb/install/initial-setup.html#set-the-mongodb-connection-uri-for-pbm-agent) on all nodes.
diff --git a/documentation/docs/backup/mongodb-backup/restore_MongoDB_backups.md b/documentation/docs/backup/mongodb-backup/restore_MongoDB_backups.md
index d27909f0d8..02771a026f 100644
--- a/documentation/docs/backup/mongodb-backup/restore_MongoDB_backups.md
+++ b/documentation/docs/backup/mongodb-backup/restore_MongoDB_backups.md
@@ -6,7 +6,7 @@ To restore a backup:
{.power-number}
1. Go to **Backup > All backups** and find the backup that you want to restore.
-2. Click the arrow in the **Actions** column to check all the information for the backup, then click  **Restore from backup**.
+2. Click the arrow in the **Actions** column to check all the information for the backup, then click  **Restore from backup**.
This opens the **Restore from backup** dialog, with the **Same service** option automatically preselected. This is because, currently, MongoDB backups can only be restored to a service with identical properties.
3. If you are restoring a PITR backup, select the point for the date and time that you want to restore the database to.
4. Click **Restore** then go to the **Restores** tab to check the status of the restored backup.
@@ -110,7 +110,7 @@ To restore to a new cluster manually:
`pbm restore --time="2022-11-23T19:40:26`
- For more information, see the [Point-in-time Recovery topic in the PBM documentation](https://docs.percona.com/percona-backup-mongodb/usage/point-in-time-recovery.html).
+ For more information, see the [Point-in-time Recovery topic in the PBM documentation](https://docs.percona.com/percona-backup-mongodb/features/point-in-time-recovery.html?h=point).
6. Check the progress of the restore operation, using one of the commands below:
diff --git a/documentation/docs/backup/mysql-backup/create_mysql_backup.md b/documentation/docs/backup/mysql-backup/create_mysql_backup.md
index 7bad8a9ed5..4f8a58910b 100644
--- a/documentation/docs/backup/mysql-backup/create_mysql_backup.md
+++ b/documentation/docs/backup/mysql-backup/create_mysql_backup.md
@@ -20,4 +20,4 @@ To create a backup:
8. Leave the **Folder** field as is. This field is relevant for MongoDB backups to ensure compatibility with PBM wokflows and comes prefilled with the cluster label.
9. Expand **Advanced Settings** to specify the settings for retrying the backup in case of any issues. You can either let PMM retry the backup again (**Auto**), or do it again yourself (**Manual**). Auto retry mode enables you to select up to ten retries and an interval of up to eight hours between retries.
10. To start creating the backup artifact, click **Backup** or **Schedule** at the top of the window, depending on whether you are creating a scheduled or an on-demand backup.
-11. Go to the **All Backups** tab, and check the **Status** column. An animated ellipsis indicator {{icon.bouncingellipsis}} shows that a backup is currently being created.
+11. Go to the **All Backups** tab, and check the **Status** column. An animated ellipsis icon :material-dots-horizontal: shows that a backup is currently being created.
diff --git a/documentation/docs/backup/mysql-backup/mysql_prerequisites.md b/documentation/docs/backup/mysql-backup/mysql_prerequisites.md
index 71df9815f9..57f7eb1879 100644
--- a/documentation/docs/backup/mysql-backup/mysql_prerequisites.md
+++ b/documentation/docs/backup/mysql-backup/mysql_prerequisites.md
@@ -3,7 +3,7 @@
Before creating MySQL backups, make sure to:
{.power-number}
-1. Check that **Backup Management** is enabled and the Backup option is available on the side menu. If Backup Managemt has been disabled on your instance, go to **Configuration > PMM Settings > Advanced Settings**, re-enable **Backup Management** then click **Apply changes**.
+1. Check that **Backup Management** is enabled and the Backup option is available on the side menu. If Backup Managemt has been disabled on your instance, go to :material-cog: **Configuration > PMM Settings > Advanced Settings**, re-enable **Backup Management** then click **Apply changes**.
!!! caution alert alert-warning "Important"
If PMM Server runs as a Docker container, enable backup features at container creation time by adding `-e ENABLE_BACKUP_MANAGEMENT=1` to your `docker run` command.
diff --git a/documentation/docs/backup/mysql-backup/restore_mysql_backup.md b/documentation/docs/backup/mysql-backup/restore_mysql_backup.md
index 2a68e7f4be..5ad17d9c83 100644
--- a/documentation/docs/backup/mysql-backup/restore_mysql_backup.md
+++ b/documentation/docs/backup/mysql-backup/restore_mysql_backup.md
@@ -8,7 +8,7 @@ To restore a backup:
{.power-number}
1. Go to **Backup > All backups** and find the backup that you want to restore.
-2. Click the three dots  in the **Actions** column to check all the information for the backup, then click  **Restore from backup**.
+2. Click the three dots  in the **Actions** column to check all the information for the backup, then click  **Restore from backup**.
3. In the **Restore from backup** dialog, select **Same service** to restore to a service with identical properties or **Compatible services** to restore to a compatible service.
4. Select one of the available service names from the drop-down menu.
5. Check the values, then click **Restore**.
diff --git a/documentation/docs/backup/prepare_storage_location.md b/documentation/docs/backup/prepare_storage_location.md
index 6d3f2a8d27..2c918378f7 100644
--- a/documentation/docs/backup/prepare_storage_location.md
+++ b/documentation/docs/backup/prepare_storage_location.md
@@ -46,7 +46,7 @@ A sample [IAM](https://aws.amazon.com/iam/) policy is:
}
```
-## [Create the storage location](#create-a-storage-location)
+## Create the storage location
1. Go to **Backup > Storage Locations**:
diff --git a/documentation/docs/configure-pmm/advanced_settings.md b/documentation/docs/configure-pmm/advanced_settings.md
index e0ec60696f..a5ae53aa85 100644
--- a/documentation/docs/configure-pmm/advanced_settings.md
+++ b/documentation/docs/configure-pmm/advanced_settings.md
@@ -16,7 +16,7 @@ PMM Telemetry is based on data collected by various PMM components and stored in
-!!! note alert alert-primary ""
When PMM is installed, telemetry is not sent immediately. Before the first telemetry report is generated, PMM provides users with a 24-hour grace period to disable telemetry.
-To see the metrics being collected by telemetry, from the [main menu](../reference/ui/ui_components.md#main-menu) navigate to **PMM Configuration** > **Settings** > **Advanced Settings** > **Telemetry** and hover over the exclamation mark.
+To see the metrics being collected by telemetry, from the [main menu](../reference/ui/ui_components.md#main-menu) navigate to **PMM Configuration** > :material-cog-outline: **Settings** > **Advanced Settings** > **Telemetry** and hover over the exclamation mark.

diff --git a/documentation/docs/configure-pmm/alertmanager.md b/documentation/docs/configure-pmm/alertmanager.md
deleted file mode 100644
index 7c88ae7b3d..0000000000
--- a/documentation/docs/configure-pmm/alertmanager.md
+++ /dev/null
@@ -1,14 +0,0 @@
-# Alertmanager integration
-
-Alertmanager manages alerts, de-duplicating, grouping, and routing them to the appropriate receiver or display component.
-
-This section lets you configure how VictoriaMetrics integrates with an external Alertmanager.
-
-!!! hint alert alert-success "Tip"
- If possible, use [Integrated Alerting](../alert/index.md) instead of Alertmanager.
-
-- The **Alertmanager URL** field should contain the URL of the Alertmanager which would serve your PMM alerts.
-- The **Prometheus Alerting rules** field is used to specify alerting rules in the YAML configuration format.
-
-Fill in both fields and click the **Apply Alertmanager settings** button to proceed.
-
diff --git a/documentation/docs/configure-pmm/configure.md b/documentation/docs/configure-pmm/configure.md
index 13134e460a..af9abe6322 100644
--- a/documentation/docs/configure-pmm/configure.md
+++ b/documentation/docs/configure-pmm/configure.md
@@ -6,20 +6,16 @@ The **PMM Configuration** page gives you access to PMM setup's settings and inve
* [Metrics resolution](metrics_res.md)
* [Advanced Settings](advanced_settings.md)
- * [Data Retention](advanced_settings.md#data-retention)
+ * [Data retention](advanced_settings.md#data-retention)
* [Telemetry](advanced_settings.md#telemetry)
* [Check for updates](advanced_settings.md#check-for-updates)
* [Advisors](advanced_settings.md#advisors)
* [Public address](public-address.md)
* [Alerting](public-address.md#alerting)
* [Microsoft Azure Monitoring](public-address.md#microsoft-azure-monitoring)
- * [Public Address {: #public-address-1 }](public-address.md#public-address--public-address-1-)
* [SSH Key](ssh.md)
-* [Alertmanager integration](alertmanager.md)
-* [Percona Platform](percona_platform.md)
- * [Connect PMM to Percona Platform](percona_platform.md#connect-pmm-to-percona-platform)
- * [Password Reset](percona_platform.md#password-reset)
- * [Password Forgotten](percona_platform.md#password-forgotten)
- * [Change Password after Login](percona_platform.md#change-password-after-login)
+* [Percona Portal](../configure-pmm/percona_platform/integrate_with_percona_platform.md)
+ * [Check Percona Portal account information](../configure-pmm/percona_platform/account-info.md)
+ * [Connect PMM to Percona Platform](../configure-pmm/percona_platform/check_percona_platform.md)
-You can also use the [**Administration** page](../pmm-admin/index.md) to manage Grafana-related configurations and account settings.
+You can also use the [**Administration** page](../admin/index.md) to manage Grafana-related configurations and account settings.
diff --git a/documentation/docs/how-to/account-info.md b/documentation/docs/configure-pmm/percona_platform/account-info.md
similarity index 89%
rename from documentation/docs/how-to/account-info.md
rename to documentation/docs/configure-pmm/percona_platform/account-info.md
index 34cf59fc60..fdd22ffd9d 100644
--- a/documentation/docs/how-to/account-info.md
+++ b/documentation/docs/configure-pmm/percona_platform/account-info.md
@@ -12,6 +12,6 @@ You can check the list of available Paid Advisor checks on the [Advisors details
When you connect with a customer account, PMM reveals two new tabs on the main menu, where you can check all the information available for your customer accounts: **Entitlements** and **Support tickets**:
-
+
-
+
diff --git a/documentation/docs/configure-pmm/percona_platform/check_percona_platform.md b/documentation/docs/configure-pmm/percona_platform/check_percona_platform.md
index f1c279d4d5..c16318e09a 100644
--- a/documentation/docs/configure-pmm/percona_platform/check_percona_platform.md
+++ b/documentation/docs/configure-pmm/percona_platform/check_percona_platform.md
@@ -19,16 +19,14 @@ To confirm that you have successfully connected the server and check the list of
## Check Percona Portal entitlements
-After connecting to the Percona Platform, PMM has access to [additional alert templates](/docs/alert/templates_list.md), [Advisor checks](/docs/advisors/advisors-details.md), and account information. See [Check Percona Portal account information](../how-to/account-info.md).
+After connecting to the Percona Platform, PMM has access to [additional alert templates](../../alert/templates_list.md), [Advisor checks](../../advisors/advisors-details.md), and account information. See [Check Percona Portal account information](account-info.md).
-After connecting to the Percona Platform, PMM has access to additional alert templates, Advisor checks, and account information. See [Check Percona Portal account information](../how-to/account-info.md).
-
### Disconnect a PMM instance
Disconnect a PMM instance when you want to unlink it from your Percona Platform organization or stop monitoring it there.
-To disconnect a PMM Server, go to **Configuration > Settings > Percona Platform** and click **Disconnect**.
+To disconnect a PMM Server, go to :material-cog: **Configuration > Settings > Percona Platform** and click **Disconnect**.
#### Disconnecting instances as an Admin
diff --git a/documentation/docs/configure-pmm/public-address.md b/documentation/docs/configure-pmm/public-address.md
index e300756881..cf765314e9 100644
--- a/documentation/docs/configure-pmm/public-address.md
+++ b/documentation/docs/configure-pmm/public-address.md
@@ -4,7 +4,7 @@ The address or hostname PMM Server will be accessible at. Click **Get from brows
## Alerting
-Enables [Percona Alerting](../get-started/alerting.md) and reveals the **Percona templated alerts** option on the Alerting page.
+Enables [Percona Alerting](../alert/index.md) and reveals the **Percona templated alerts** option on the Alerting page.
## Microsoft Azure Monitoring
diff --git a/documentation/docs/configure-pmm/ssh.md b/documentation/docs/configure-pmm/ssh.md
index 17eeb67234..48f2a94b52 100644
--- a/documentation/docs/configure-pmm/ssh.md
+++ b/documentation/docs/configure-pmm/ssh.md
@@ -1,6 +1,6 @@
# SSH Key
-This section enables you to upload your public SSH key for SSH access to the PMM Server, such as when accessing it as a [virtual appliance](../install-pmm/install-pmm-server/baremetal/virtual/index.md).
+This section enables you to upload your public SSH key for SSH access to the PMM Server, such as when accessing it as a [virtual appliance](../install-pmm/install-pmm-server/deployment-options/virtual/index.md).

diff --git a/documentation/docs/css/design.css b/documentation/docs/css/design.css
index a5271fb756..e99de139a8 100644
--- a/documentation/docs/css/design.css
+++ b/documentation/docs/css/design.css
@@ -90,6 +90,7 @@
/* Defaults */
--md-default-bg-color: var(--white);
+ --md-default-fg-color: var(--stone900);
--md-default-fg-color--light: rgba(44, 50, 62, 0.72);
--md-default-fg-color--lighter: rgba(44, 50, 62, 0.40);
--md-default-fg-color--lightest: rgba(44, 50, 62, 0.25);
@@ -124,6 +125,7 @@
/* Defaults */
--md-default-bg-color: var(--stone900);
+ --md-default-fg-color: var(--white);
--md-default-fg-color--light: rgba(251, 251, 251, 0.72);
--md-default-fg-color--lighter: rgba(251, 251, 251, 0.4);
--md-default-fg-color--lightest: rgba(209, 213, 222, 0.25);
@@ -171,7 +173,7 @@
margin: 0 0 0.75em;
}
-.md-header {
+.md-header :not(.md-search__suggest) {
font-family: var(--fHeading);
font-weight: bold;
}
@@ -854,4 +856,4 @@ i[warning] [class*="moji"] {
}
}
-/**/
\ No newline at end of file
+/**/
diff --git a/documentation/docs/css/extra.css b/documentation/docs/css/extra.css
index d1dc1c36bd..225345f3eb 100644
--- a/documentation/docs/css/extra.css
+++ b/documentation/docs/css/extra.css
@@ -4,12 +4,6 @@
top: 0.6rem;
left: 0.6rem;
}
-
- /* Do not render html tags with this class in PDF */
- .no-pdf {
- display: none !important;
- }
-
/* Modify rendering of numbered lists in PDF */
.power-number+ol>li::before,
.power-number+ol ol>li::before {
@@ -20,9 +14,15 @@
margin-left: -1.7em;
opacity: 1;
}
-
+ /* Do not render html tags with this class in PDF */
+ .no-pdf {
+ display: none !important;
+ }
.tabbed-content {
display: contents;
}
-}
-
\ No newline at end of file
+}
+.md-sidebar__inner {
+ font-size: 0.65rem; /* Font size */
+ line-height: 1.6;
+}
\ No newline at end of file
diff --git a/documentation/docs/get-help.md b/documentation/docs/get-help.md
new file mode 100644
index 0000000000..70e7a51e2a
--- /dev/null
+++ b/documentation/docs/get-help.md
@@ -0,0 +1,25 @@
+# Get help from Percona
+
+Our documentation guides are packed with information, but they can’t cover everything you need to know about Percona Monitoring and Management (PMM). They also won’t cover every scenario you might come across. Don’t be afraid to try things out and ask questions when you get stuck.
+
+## Percona's Community Forum
+
+Be a part of a space where you can tap into a wealth of knowledge from other database enthusiasts and experts who work with Percona’s software every day. While our service is entirely free, keep in mind that response times can vary depending on the complexity of the question. You are engaging with people who genuinely love solving database challenges.
+
+
+Visit the [PMM Community Forum](https://forums.percona.com/c/percona-monitoring-and-management-pmm/30/all){:target="_blank"}. It’s an excellent place for discussions, technical insights, and support around Percona database software. If you’re new and feeling a bit unsure, our [FAQ](https://forums.percona.com/faq){:target="_blank"} and [Guide for new users](https://forums.percona.com/t/faq-guide-for-new-users/8562){:target="_blank"} can ease you in.
+
+If you have thoughts, feedback, or ideas, the community team would like to hear from you at [Any ideas on how to make the forum better?](https://forums.percona.com/t/any-ideas-on-how-to-make-the-forum-better/11522){:target=”_blank”}. We’re always excited to connect and improve everyone’s experience.
+
+## Percona Experts
+
+[Percona Experts](https://www.percona.com/services/consulting){:target="_blank"} bring years of experience in tackling tough database performance issues and design challenges. We understand your challenges when managing complex database environments. That's why we offer various services to help you simplify your operations and achieve your goals.
+
+| Service | Description |
+|----------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| 24/7 Expert support | Our dedicated team of database experts is available 24/7 to assist you with any database issues. We provide flexible support plans tailored to your specific needs. |
+| Hands-on database management | Our managed services team can take over the day-to-day management of your database infrastructure, freeing up your time to focus on other priorities. |
+| Expert consulting | Our experienced consultants provide guidance on database topics like architecture design, migration planning, performance optimization, and security best practices. |
+| Comprehensive training | Our training programs help your team develop skills to manage databases effectively, offering virtual and in-person courses. |
+
+We're here to help you every step of the way. Whether you need a quick fix or a long-term partnership, we're ready to provide your expertise and support.
\ No newline at end of file
diff --git a/documentation/docs/how-to/extend-metrics.md b/documentation/docs/how-to/extend-metrics.md
deleted file mode 100644
index e9daabd684..0000000000
--- a/documentation/docs/how-to/extend-metrics.md
+++ /dev/null
@@ -1,37 +0,0 @@
-# Extend Metrics
-
-When you need a metric that’s not present in the default list of `node_exporter` metrics you may be able to use the `textfile` collector.
-The textfile collector allows exporting of statistics from batch jobs. It can also be used to export static metrics, such as what role a machine has.
-
-## Enable the textfile collector
-
-The collector is enabled by default. The following folders are used for different resolutions:
-
-| Resolution | Folder |
-|------------|---------------------------------------------------------------------------|
-| High | `/usr/local/percona/pmm/collectors/textfile-collector/high-resolution` |
-| Medium | `/usr/local/percona/pmm/collectors/textfile-collector/medium-resolution` |
-| Low | `/usr/local/percona/pmm/collectors/textfile-collector/low-resolution` |
-
-
-
-The exporter parses all files in these directories that match the filename wildcard expression `*.prom` using a simple text-based [exposition format](https://prometheus.io/docs/instrumenting/exposition_formats/#text-based-format).
-Metrics are stored on the PMM Server-side with additional labels related to this Node.
-
-## Examples of shell commands for custom metrics
-
-To statically set roles for a machine using labels:
-
-```sh
-echo 'node_role{role="my_monitored_server_1"} 1' > /usr/local/percona/pmm/collectors/textfile-collector/low-resolution/node_role.prom
-```
-
-Here's an example of a `cron` job that automatically pushes logged-in users:
-
-```sh
-$ cat /etc/cron.d/loggedin_users
-*/1 * * * * root /usr/bin/who | /usr/bin/wc -l | sed -ne 's/^/node_loggedin_users /p' > /usr/local/percona/pmm/collectors/textfile-collector/high-resolution/node_users.prom
-```
-
-
-
diff --git a/documentation/docs/how-to/index.md b/documentation/docs/how-to/index.md
deleted file mode 100644
index 3e0056a948..0000000000
--- a/documentation/docs/how-to/index.md
+++ /dev/null
@@ -1,11 +0,0 @@
-# How to
-
-- [Configure](configure.md) via the PMM Settings page.
-- [Manage users](manage-users.md) via the PMM Users page.
-- [Upgrade](upgrade.md) PMM Server via the user interface.
-- [Secure](secure.md) your PMM installation.
-- [Optimize](optimize.md) the performance of your PMM installation.
-- [Annotate](annotate.md) charts to mark significant events.
-- [Share dashboards and panels](share-dashboard.md) to save or share.
-- [Extend Metrics](extend-metrics.md) with textfile collector.
-- [Troubleshoot](troubleshoot.md)
diff --git a/documentation/docs/how-to/integrate-platform.md b/documentation/docs/how-to/integrate-platform.md
deleted file mode 100644
index 1a880fe454..0000000000
--- a/documentation/docs/how-to/integrate-platform.md
+++ /dev/null
@@ -1,68 +0,0 @@
-# Integrate PMM with Percona Platform
-Percona Platform brings together database distributions, support expertise, services, management, and automated insights.
-
-Connect your PMM Servers to Percona Platform to boost the monitoring capabilities of your PMM installations and manage database deployments easier. In addition, you get access to PMM updates, automated insights, advanced advisor checks and more alert rule templates.
-
-### Connect PMM to Percona Platform
-You can connect to Percona Platform with a Percona Account or via Google or GitHub authentication. If [Percona Support](https://www.percona.com/about-percona/contact) has enabled a custom identity provider for your account, you can also log in using your company's credentials.
-
-We recommend that you connect with a Percona Account, as this gives you access to other Percona services, including Percona Platform, Percona Customer Portal, and Community Forum. If you don’t have a Percona Account, you can create one on the [Percona Platform homepage](https://portal.percona.com/login) using the **Don't have an account? Create one?** link.
-
-#### Prerequisites
-To ensure that PMM can establish a connection to Percona Platform:
-
-### Check that you are a member of an existing Platform organization
-
-1. Log in to [Percona Platform](https://portal.percona.com) using your Percona Account. If you are connecting via GitHub, make sure you set your email address as **public** in your GitHub account. If your email address is private instead, Percona Platform cannot access it to authenticate you.
-
-2. On the **Getting Started** page, check that the **Create organization** step shows an option to view your organization.
-
-Contact your account administrator or create a new organization for your Percona Account if this is the case.
-
-### Set the public address of your PMM Server
-PMM automatically detects and populates the public address of the PMM Server when this is not set up.
-If you need to set it differently, go to **Settings > Advanced Settings** and edit the
-**Public Address** field.
-
-## Connect PMM to Percona Platform
-To connect your PMM Server to Percona Platform, copy your personal access token from Platform Portal and paste it into PMM. You will find your access token in Platform Portal as part of your user profile page.
-#### Token validity
-For security reasons, access tokens expire after 30 minutes. Make sure to paste the code before that, or generate a new one if it expires.
-
-To connect your PMM Server to Percona Platform:
-1. In PMM, go to **Settings > Percona Platform** tab to fill in the **Connect PMM to Percona Portal** form: 
-
-2. The **PMM Server ID** field is automatically populated with the ID identified for your PMM instance. Enter the name of your PMM instance and click **Get token** to go to Percona Platform Portal and generate your access token.
-3. Log into Percona Platform using your Percona Account (if you don't have an active current session).
-4. On the **Profile Settings page**, copy the code from the **Percona Platform Access Token** field.
-5. Back into PMM, paste the Access Token into the **Percona Platform Access Token** field, and click **Connect**.
-
-To confirm that you have successfully connected the server and check the list of all servers currently connected to an organization, go to [Percona Platform](https://portal.percona.com) > **Dashboard** tab and click **View Instances** next to the **Connect your PMM** step.
-
-## Check Percona Portal entitlements
-After connecting to the Percona Platform, PMM has access to additional alert templates, Advisors checks, and account information. See (../how-to/account-info.md)
-
-### Disconnect a PMM instance
-Disconnect a PMM instance when you want to unlink it from your Percona Platform organization or stop monitoring it there.
-
-To disconnect a PMM Server, go to > **Configuration > Settings > Percona Platform** and click **Disconnect**.
-
-#### Disconnecting instances as an Admin
-
-In situations where you are not able to disconnect servers yourself, ask your PMM Admin to disconnect the server for you. For example, you may not be able to disconnect servers when PMM is moved to a network segment without outbound connections to public networks.
-
-If you cannot disconnect servers yourself, ask your PMM Admin to disconnect the server for you. For example, you may not be able to disconnect servers when PMM is moved to a network segment without outbound connections to public networks.
-
-If you are a PMM Admin, you can terminate any connections to Percona Platform, even if you are not logged into PMM with a Percona Account. However, we recommend logging in with a Percona Account before disconnecting servers, as this will automatically remove the disconnected servers from Percona Platform as well.
-
-If you do disconnect servers without being connected with a Percona Account, you'll have to manually remove the unavailable servers from Percona Platform. This ensures that your list of connected PMM instances stays up-to-date in Percona Platform.
-
-To do this, go to [PMM instances](https://portal.percona.com/login), and remove any servers that you have already disconnected from PMM.
-
-## Sign into PMM with your Percona Account
-
-Once you've successfully connected your PMM instance to the Percona Platform, you can also sign into PMM using your Percona Account:
-
-1. Log out of your existing PMM session.
-2. On the PMM login screen, click *Sign in with Percona Account*.
- If you have an active Percona Account session on the same browser, PMM will log you in automatically. Otherwise, enter your Percona Account credentials to start a new session.
\ No newline at end of file
diff --git a/documentation/docs/how-to/manage-users.md b/documentation/docs/how-to/manage-users.md
deleted file mode 100644
index c676ec6f8b..0000000000
--- a/documentation/docs/how-to/manage-users.md
+++ /dev/null
@@ -1,87 +0,0 @@
-# Manage users
-
-This topic explains user management in PMM.
-
-You can manage users from the [main menu](../details/interface.md#main-menu) by navigating to *Server Admin → Users* page.
-
-
-
-
-
-
-## Add users
-
-You can add a user in PMM from *User → New user* tab.
-
-
-
-To add a new user in PMM:
-
-1. On the *Users* tab, click *New user*.
-2. On the *Add new user* dialog box, enter the following:
- - Name
- - email address or username (if this is an existing grafana user)
- - Username
- - Password
-
-3. Click *create user*.
-
-
-## Edit users
-
-You can edit users by changing the information or settings for an individual user account.
-
-!!! caution alert alert-warning "Important"
- After changing the default admin password for the PMM Server, register the pmm-agent using the same credentials and add the services again. Otherwise, PMM will cease to monitor the service/nodes.
-
-### Grant or Revoke admin privileges
-
-You can grant or revoke admin access to a user as follows:
-
-1. On the *Users* tab, click the user account you want to edit.
-
-2. To grant or revoke the privileges, click the user. User information dialog box opens.
-
-3. In the *Permissions* section, click *Change* and then select *Yes/No*, depending on whether you want to provide admin access or not.
-
-4. Click *Change*.
-
-!!! caution alert alert-warning "Important"
- After connecting your PMM instance to the Percona Platform, when you log in using your Percona account, you will be granted the *Viewer* access. For *Admin* access, log in to PMM as an admin, and change the permissions for this user.
-
-### Change organization role
-
-You can change the organization role assigned to your user account.
-
-
-
-
-To change the role:
-
-1. On the *Users* tab, click the user for whom you want to change the role.
-
-2. In the *Organisations* section, click *Change role*.
-
-3. Select the role from the drop-down and click *save*.
-
-The following are the privileges for the various roles:
-
-- Admin - Managing data sources, teams, and users within an organization.
-
-- Editor - Creating and editing dashboards.
-
-- Viewer - Viewing dashboards.
-
-For detailed information on the privileges for these roles and the different tasks that they can perform, refer to: [Grafana organization roles](https://grafana.com/docs/grafana/latest/permissions/organization_roles/).
-
-
-
-## Delete Users
-
-You can delete a user in PMM as follows:
-
-1. On the *User* tab, click the user you want to delete.
-
-2. Click *Delete user*.
-
-
diff --git a/documentation/docs/how-to/optimize.md b/documentation/docs/how-to/optimize.md
deleted file mode 100644
index d488e81255..0000000000
--- a/documentation/docs/how-to/optimize.md
+++ /dev/null
@@ -1,40 +0,0 @@
-# Optimize
-
-## Improving PMM Performance with Table Statistics Options
-
-If a MySQL instance has a lot of schemas or tables, there are two options to help improve the performance of PMM when adding instances with `pmm-admin add`:
-
-- `--disable-tablestats`, or,
-- `--disable-tablestats-limit`.
-
-!!! caution alert alert-warning "Important"
- - These settings are only for adding an instance. To change them, you must remove and re-add the instances.
- - Only one of these options can be used when adding an instance.
-
-## Disable per-table statistics for an instance
-
-When adding an instance with `pmm-admin add`, the `--disable-tablestats` option disables table statistics collection when there are more than the default number (1000) of tables in the instance.
-
-### USAGE
-
-```sh
-pmm-admin add mysql --disable-tablestats
-```
-
-## Change the number of tables beyond which per-table statistics is disabled
-
-When adding an instance with `pmm-admin add`, the `--disable-tablestats-limit` option changes the number of tables (from the default of 1000) beyond which per-table statistics collection is disabled.
-
-### USAGE
-
-```sh
-pmm-admin add mysql --disable-tablestats-limit=
-```
-
-### EXAMPLE
-
-Add a MySQL instance, disabling per-table statistics collection when the number of tables in the instance reaches 2000.
-
-```sh
-pmm-admin add mysql --disable-tablestats-limit=2000
-```
diff --git a/documentation/docs/how-to/secure.md b/documentation/docs/how-to/secure.md
deleted file mode 100644
index 7210c3a0cd..0000000000
--- a/documentation/docs/how-to/secure.md
+++ /dev/null
@@ -1,80 +0,0 @@
-# Secure
-
-You can improve the security of your PMM installation with:
-
-- [SSL encryption](#ssl-encryption) to secure traffic between client and server;
-
-- [Grafana HTTPS secure cookies](#grafana-https-secure-cookies)
-
-To see which security features are enabled:
-
-```sh
-pmm-admin status
-```
-
-!!! hint alert alert-success "Tip"
- You can gain an extra level of security by keeping PMM Server isolated from the internet, if possible.
-
-## SSL encryption
-
-You need valid SSL certificates to encrypt traffic between client and server.
-
-With our Docker, OVF and AMI images, self-signed certificates are in `/srv/nginx`.
-
-To use your own, you can either:
-
-- mount the local certificate directory to the same location, or,
-
-- copy your certificates to a running PMM Server container.
-
-### Mounting certificates
-
-For example, if your own certificates are in `/etc/pmm-certs`:
-
-```sh
-docker run -d -p 443:443 --volumes-from pmm-data \
- --name pmm-server -v /etc/pmm-certs:/srv/nginx \
- --restart always perconalab/pmm-server:3.0.0-beta
-```
-
-!!! note alert alert-primary ""
- - The certificates must be owned by root. You can do this with: `chown 0:0 /etc/pmm-certs/*`
- - The mounted certificate directory (`/etc/pmm-certs` in this example) must contain the files `certificate.crt`, `certificate.key`, `ca-certs.pem` and `dhparam.pem`.
- - For SSL encryption, the container must publish on port 443 instead of 80.
-
-### Copying certificates
-
-If PMM Server is running as a Docker image, use `docker cp` to copy certificates. This example copies certificate files from the current working directory to a running PMM Server docker container.
-
-```sh
-docker cp certificate.crt pmm-server:/srv/nginx/certificate.crt
-docker cp certificate.key pmm-server:/srv/nginx/certificate.key
-docker cp ca-certs.pem pmm-server:/srv/nginx/ca-certs.pem
-docker cp dhparam.pem pmm-server:/srv/nginx/dhparam.pem
-```
-
-### Enabling SSL when connecting PMM Client to PMM Server
-
-```sh
-pmm-admin config --server-url=https://:@
-```
-
-## Grafana HTTPS secure cookies
-
-To enable:
-
-1. Start a shell within the Docker container.
-
- ```sh
- docker exec -it pmm-server bash
- ```
-
-2. Edit `/etc/grafana/grafana.ini`.
-
-3. Enable `cookie_secure` and set the value to `true`.
-
-4. Restart Grafana.
-
- ```sh
- supervisorctl restart grafana
- ```
diff --git a/documentation/docs/images/MongoDB_Router_Summary.png b/documentation/docs/images/MongoDB_Router_Summary.png
new file mode 100644
index 0000000000..4ff397679a
Binary files /dev/null and b/documentation/docs/images/MongoDB_Router_Summary.png differ
diff --git a/documentation/docs/images/OplogGBperHour.png b/documentation/docs/images/OplogGBperHour.png
new file mode 100644
index 0000000000..99d8e31d97
Binary files /dev/null and b/documentation/docs/images/OplogGBperHour.png differ
diff --git a/documentation/docs/images/PMM_MongoDB_Cluster_Summary.jpg b/documentation/docs/images/PMM_MongoDB_Cluster_Summary.jpg
index 1c011657e6..bde48ea890 100644
Binary files a/documentation/docs/images/PMM_MongoDB_Cluster_Summary.jpg and b/documentation/docs/images/PMM_MongoDB_Cluster_Summary.jpg differ
diff --git a/documentation/docs/images/PMM_MongoDB_ReplSet_Summary.jpg b/documentation/docs/images/PMM_MongoDB_ReplSet_Summary.jpg
index ccc97f0cf2..2a986ac6a5 100644
Binary files a/documentation/docs/images/PMM_MongoDB_ReplSet_Summary.jpg and b/documentation/docs/images/PMM_MongoDB_ReplSet_Summary.jpg differ
diff --git a/documentation/docs/images/PMM_click_to_add_favorite_dashboard.png b/documentation/docs/images/PMM_click_to_add_favorite_dashboard.png
index 5598651fa6..bb946ae126 100644
Binary files a/documentation/docs/images/PMM_click_to_add_favorite_dashboard.png and b/documentation/docs/images/PMM_click_to_add_favorite_dashboard.png differ
diff --git a/documentation/docs/images/PMM_click_to_add_favorite_dashboard.png).png b/documentation/docs/images/PMM_click_to_add_favorite_dashboard.png).png
new file mode 100644
index 0000000000..bb946ae126
Binary files /dev/null and b/documentation/docs/images/PMM_click_to_add_favorite_dashboard.png).png differ
diff --git a/documentation/docs/images/Service_Accounts.png b/documentation/docs/images/Service_Accounts.png
index 11b5781eb8..1f2761ae16 100644
Binary files a/documentation/docs/images/Service_Accounts.png and b/documentation/docs/images/Service_Accounts.png differ
diff --git a/documentation/docs/images/edit.png b/documentation/docs/images/edit.png
deleted file mode 100644
index 3eef1d9a8c..0000000000
Binary files a/documentation/docs/images/edit.png and /dev/null differ
diff --git a/documentation/docs/images/haproxy-dashboard.png b/documentation/docs/images/haproxy-dashboard.png
deleted file mode 100644
index d0061e9b53..0000000000
Binary files a/documentation/docs/images/haproxy-dashboard.png and /dev/null differ
diff --git a/documentation/docs/images/mysql-dashboard.png b/documentation/docs/images/mysql-dashboard.png
deleted file mode 100644
index b80a35dcaf..0000000000
Binary files a/documentation/docs/images/mysql-dashboard.png and /dev/null differ
diff --git a/documentation/docs/images/postresql-dashboard.png b/documentation/docs/images/postresql-dashboard.png
deleted file mode 100644
index 52db692393..0000000000
Binary files a/documentation/docs/images/postresql-dashboard.png and /dev/null differ
diff --git a/documentation/docs/index.md b/documentation/docs/index.md
index 0d0efb0613..73cef1a116 100644
--- a/documentation/docs/index.md
+++ b/documentation/docs/index.md
@@ -1,22 +1,20 @@
# About PMM
-!!! info ""
- This is the documentation for the latest release, **PMM {{release}}** Beta. See the [Release Notes](release-notes/3.0.0_Beta.md) for details.
+:material-information: Info: This is the documentation for the latest PMM release. For details, see the [PMM {{release}} release notes](release-notes/3.0.0.md) and the [3.0.0-1 update](release-notes/3.0.0_1.md).
-Percona Monitoring and Management (PMM) is an open source database observability, monitoring, and management tool for use with MySQL, PostgreSQL, MongoDB, and the servers on which they run. It enables you to view node- to single-query performance metrics for all of your databases in a single place.
-- PMM is designed to facilitate seamless access to comprehensive performance metrics for all the nodes and queries associated with your databases via a user-friendly interface.
-- PMM is highly versatile and can be deployed behind a firewall, on the cloud, or across hybrid platforms.
-- It is a complete package comprising in-house and third-party components and tools for all your monitoring requirements.
-- To set up basic PMM, you just need to install a [server](reference/index.md#server) and a [client](reference/index.md#client) on each system you intend to monitor.
+Percona Monitoring and Management (PMM) is an open source database observability, monitoring, and management platform that gives you a single view of performance metrics - from entire database clusters down to individual queries.
+Key features:
-!!! quote ""
+- Complete visibility of MySQL, PostgreSQL, and MongoDB performance
+- Unified dashboard for all your database metrics and query analytics
+- Flexible deployment options: on-premises, cloud, or hybrid environments
+- Easy setup with just two components: Server and Client
- Would you like to see a preview of our Home page? Take a look at our free, live [demo](https://pmmdemo.percona.com/).
-
+
## :material-telescope: Discover { .title }
@@ -24,7 +22,6 @@ Discover how PMM can help you monitor your systems and make informed decisions.
[Discover PMM :material-arrow-right:](discover-pmm/features.md){ .md-button .md-button--primary }
-
## :material-progress-download: Install { .title }
diff --git a/documentation/docs/install-pmm/HA.md b/documentation/docs/install-pmm/HA.md
index 890d2a4ac8..c20fd81509 100644
--- a/documentation/docs/install-pmm/HA.md
+++ b/documentation/docs/install-pmm/HA.md
@@ -1,7 +1,7 @@
# Install PMM in HA mode
!!! caution alert alert-warning "Important"
- This feature is currently in [Technical Preview](https://docs.percona.com/percona-monitoring-and-management/details/glossary.html#technical-preview). Early adopters are advised to use this feature for testing purposes only as it is subject to change.
+ This feature is currently in [Technical Preview](../reference/glossary.md#technical-preview). Early adopters are advised to use this feature for testing purposes only as it is subject to change.
Set up PMM using Docker containers in a high-availability (HA) configuration following these instructions.
@@ -54,7 +54,7 @@ For all IP addresses, use the format `17.10.1.x`, and for all usernames and pass
| `PMM_PASSIVE_NODE_ID` | The unique ID for your first passive PMM Server node.Example: `pmm-server-passive`
| `PMM_PASSIVE2_IP` | The IP address of the instance where the second passive PMM Server is running or the desired IP address for your second passive PMM Server container within the Docker network, depending on your setup.Example: `17.10.1.7`
| `PMM_PASSIVE2_NODE_ID` | The unique ID for your second passive PMM Server node.Example: `pmm-server-passive2`
-| `PMM_DOCKER_IMAGE` | The specific PMM Server Docker image for this guide.Example: `perconalab/pmm-server:3.0.0-beta`
+| `PMM_DOCKER_IMAGE` | The specific PMM Server Docker image for this guide.Example: `percona/pmm-server:3`
??? example "Expected output"
@@ -73,7 +73,7 @@ For all IP addresses, use the format `17.10.1.x`, and for all usernames and pass
export PMM_PASSIVE_NODE_ID=pmm-server-passive
export PMM_PASSIVE2_IP=17.10.1.7
export PMM_PASSIVE2_NODE_ID=pmm-server-passive2
- export PMM_DOCKER_IMAGE=perconalab/pmm-server:3.0.0-beta
+ export PMM_DOCKER_IMAGE=percona/pmm-server:3
```
!!! note alert alert-primary "Note"
@@ -365,8 +365,8 @@ The PMM Server orchestrates the collection, storage, and visualization of metric
```sh
docker run -d \
--name ${PMM_ACTIVE_NODE_ID} \
- -p 80:80 \
- -p 443:443 \
+ -p 80:8080 \
+ -p 443:8443 \
-p 9094:9094 \
-p 9096:9096 \
-p 9094:9094/udp \
@@ -435,8 +435,8 @@ The PMM Server orchestrates the collection, storage, and visualization of metric
```sh
docker run -d \
--name ${PMM_PASSIVE_NODE_ID} \
- -p 80:80 \
- -p 443:443 \
+ -p 80:8080 \
+ -p 443:8443 \
-p 9094:9094 \
-p 9096:9096 \
-p 9094:9094/udp \
@@ -505,8 +505,8 @@ The PMM Server orchestrates the collection, storage, and visualization of metric
```sh
docker run -d \
--name ${PMM_PASSIVE2_NODE_ID} \
- -p 80:80 \
- -p 443:443 \
+ -p 80:8080 \
+ -p 443:8443 \
-p 9094:9094 \
-p 9096:9096 \
-p 9094:9094/udp \
@@ -545,11 +545,10 @@ The PMM Server orchestrates the collection, storage, and visualization of metric
### **Step 7: Running HAProxy**
-HAProxy provides high availability for your PMM setup by directing traffic to the current leader server via the `/v1/leaderHealthCheck` endpoint.
-
+HAProxy provides high availability for your PMM setup by directing traffic to the current leader server via the `/v1/leaderHealthCheck` endpoint:
+{.power-number}
-1. Pull the HAProxy Docker image.
-{.power-number}
+1. Pull the HAProxy Docker image:
```bash
docker pull haproxy:2.4.2-alpine
diff --git a/documentation/docs/install-pmm/index.md b/documentation/docs/install-pmm/index.md
index 03e56a7e0b..36737cba65 100644
--- a/documentation/docs/install-pmm/index.md
+++ b/documentation/docs/install-pmm/index.md
@@ -17,11 +17,11 @@ Install and run at least one PMM Server. Choose from the following options:
| Use | :material-thumb-up: **Benefits** | :material-thumb-down: **Drawbacks**|
|---|---|---
-| [Docker] | 1. Quick 2. Simple 3. Rootless | Additional network configuration required.
-| [Podman] | 1. Quick 2. Simple 3. Rootless | Podman installation required.
-| [Helm] (Technical Preview) | 1. Quick 2. Simple 3. Cloud-compatible 4. Rootless| Requires running a Kubernetes cluster.
-| [Virtual appliance] | 1. Easily import into Hypervisor of your choice 2. Rootless| More system resources compared to Docker footprint.
-| [Amazon AWS] | 1. Wizard-driven install. 2. Rootless| Paid, incurs infrastructure costs.
+| [Docker](../install-pmm/install-pmm-server/deployment-options/docker/index.md) | 1. Quick 2. Simple 3. Rootless | Additional network configuration required.
+| [Podman](../install-pmm/install-pmm-server/deployment-options/podman/index.md) | 1. Quick 2. Simple 3. Rootless | Podman installation required.
+| [Helm](../install-pmm/install-pmm-server/deployment-options/helm/index.md) (Technical Preview) | 1. Quick 2. Simple 3. Cloud-compatible 4. Rootless| Requires running a Kubernetes cluster.
+| [Virtual appliance](../install-pmm/install-pmm-server/deployment-options/virtual/index.md) | 1. Easily import into Hypervisor of your choice 2. Rootless| More system resources compared to Docker footprint.
+| [Amazon AWS](../install-pmm/install-pmm-server/deployment-options/aws/aws.md) | 1. Wizard-driven install. 2. Rootless| Paid, incurs infrastructure costs.
## Install PMM Client
@@ -31,15 +31,15 @@ The installation choices are:
=== "With Docker"
- - [Docker installation](client/index.md#docker) simplifies deployment across different architectures and automatically selects the appropriate image for your architecture (x86_64 or ARM64).
+ [Running PMM Client as a Docker container](../install-pmm/install-pmm-client/docker.md) simplifies deployment across different architectures and automatically selects the appropriate image for your architecture (x86_64 or ARM64).
=== "With package manager"
- - [Linux package](client/index.md#package-manager). Use `apt`, `apt-get`, `dnf`, `yum`. The package manager automatically selects the correct version for your architecture.
+ [Linux package](../install-pmm/install-pmm-client/package_manager.md): Use `apt`, `apt-get`, `dnf`, `yum`. The package manager automatically selects the correct version for your architecture.
=== "With binary package"
- - [Binary package](client/index.md#binary-package): Download the appropriate `.tar.gz` file for your architecture (x86_64 or ARM64).
+ [Binary package](../install-pmm/install-pmm-client/binary_package.md): Download the appropriate `.tar.gz` file for your architecture (x86_64 or ARM64).
!!! hint alert "Tips"
@@ -51,33 +51,14 @@ On each PMM Client instance, configure the nodes and services you want to monito
??? info "Which services you can monitor?"
- - [MySQL] (and variants: Percona Server for MySQL, Percona XtraDB Cluster, MariaDB);
- - [MongoDB];
- - [PostgreSQL];
- - [ProxySQL];
- - [Amazon RDS];
- - [Microsoft Azure];
- - [Google Cloud Platform] (MySQL and PostgreSQL);
- - [Linux];
- - [External services];
- - [HAProxy];
- - [Remote instances].
-
-[MySQL]: client/mysql.md
-[MongoDB]: client/mongodb.md
-[PostgreSQL]: client/postgresql.md
-[ProxySQL]: client/proxysql.md
-[Amazon RDS]: client/aws.md
-[Microsoft Azure]: client/azure.md
-[Google Cloud Platform]: client/google.md
-[Linux]: client/linux.md
-[External services]: client/external.md
-[HAProxy]: client/haproxy.md
-[Remote instances]: client/remote.md
-[dashboards]: ../details/dashboards/
-[Docker]: ../install-pmm/install-pmm-server/baremetal/docker/index.md
-[Podman]: ../install-pmm/install-pmm-server/baremetal/podman/index.md
-[Helm]: ../install-pmm/install-pmm-server/baremetal/helm/index.md
-[virtual appliance]: ../install-pmm/install-pmm-server/baremetal/virtual/index.md
-[Amazon AWS]: ../install-pmm/install-pmm-server/aws/aws.md
-[easy install]: ../install-pmm/install-pmm-server/baremetal/easy-install.md
+ - [MySQL](../install-pmm/install-pmm-client/connect-database/mysql.md) and variants: Percona Server for MySQL, Percona XtraDB Cluster, MariaDB
+ - [MongoDB](../install-pmm/install-pmm-client/connect-database/mongodb.md)
+ - [PostgreSQL](../install-pmm/install-pmm-client/connect-database/postgresql.md)
+ - [ProxySQL](../install-pmm/install-pmm-client/connect-database/proxysql.md)
+ - [Amazon RDS](../install-pmm/install-pmm-client/connect-database/aws.md)
+ - [Microsoft Azure](../install-pmm/install-pmm-client/connect-database/azure.md)
+ - [Google Cloud Platform](../install-pmm/install-pmm-client/connect-database/google.md)
+ - [Linux](../install-pmm/install-pmm-client/connect-database/linux.md)
+ - [External services](../install-pmm/install-pmm-client/connect-database/external.md)
+ - [HAProxy](../install-pmm/install-pmm-client/connect-database/haproxy.md)
+ - [Remote instances](../install-pmm/install-pmm-client/connect-database/remote.md)
diff --git a/documentation/docs/install-pmm/install-pmm-client/binary_package.md b/documentation/docs/install-pmm/install-pmm-client/binary_package.md
index a0fb011694..df706d40a1 100644
--- a/documentation/docs/install-pmm/install-pmm-client/binary_package.md
+++ b/documentation/docs/install-pmm/install-pmm-client/binary_package.md
@@ -10,24 +10,24 @@ Choose your installation instructions based on whether you have root permissions
=== "For x86_64 (AMD64)"
```sh
- wget https://downloads.percona.com/downloads/pmm/{{release}}/binary/tarball/pmm-client-{{release}}-x86_64.tar.gz
+ wget https://downloads.percona.com/downloads/pmm3/{{release}}/binary/tarball/pmm-client-{{release}}-x86_64.tar.gz
```
=== "For ARM64 (aarch64)"
```sh
- wget https://downloads.percona.com/downloads/pmm/{{release}}/binary/tarball/pmm-client-{{release}}-aarch64.tar.gz
+ wget https://downloads.percona.com/downloads/pmm3/{{release}}/binary/tarball/pmm-client-{{release}}-aarch64.tar.gz
```
2. Download the corresponding checksum file:
=== "For x86_64 (AMD64)"
```sh
- wget https://downloads.percona.com/downloads/pmm/{{release}}/binary/tarball/pmm-client-{{release}}-x86_64.tar.gz.sha256sum
+ wget https://downloads.percona.com/downloads/pmm3/{{release}}/binary/tarball/pmm-client-{{release}}-x86_64.tar.gz.sha256sum
```
=== "For ARM64 (aarch64)"
```sh
- wget https://downloads.percona.com/downloads/pmm/{{release}}/binary/tarball/pmm-client-{{release}}-aarch64.tar.gz.sha256sum
+ wget https://downloads.percona.com/downloads/pmm3/{{release}}/binary/tarball/pmm-client-{{release}}-aarch64.tar.gz.sha256sum
```
3. Verify the download:
@@ -99,24 +99,24 @@ Choose your installation instructions based on whether you have root permissions
=== "For x86_64 (AMD64)"
```sh
- wget https://downloads.percona.com/downloads/pmm/{{release}}/binary/tarball/pmm-client-{{release}}-x86_64.tar.gz
+ wget https://downloads.percona.com/downloads/pmm3/{{release}}/binary/tarball/pmm-client-{{release}}-x86_64.tar.gz
```
=== "For ARM64 (aarch64)"
```sh
- wget https://downloads.percona.com/downloads/pmm/{{release}}/binary/tarball/pmm-client-{{release}}-aarch64.tar.gz
+ wget https://downloads.percona.com/downloads/pmm3/{{release}}/binary/tarball/pmm-client-{{release}}-aarch64.tar.gz
```
2. Download the corresponding checksum file:
=== "For x86_64 (AMD64)"
```sh
- wget https://downloads.percona.com/downloads/pmm/{{release}}/binary/tarball/pmm-client-{{release}}-x86_64.tar.gz.sha256sum
+ wget https://downloads.percona.com/downloads/pmm3/{{release}}/binary/tarball/pmm-client-{{release}}-x86_64.tar.gz.sha256sum
```
=== "For ARM64 (aarch64)"
```sh
- wget https://downloads.percona.com/downloads/pmm/{{release}}/binary/tarball/pmm-client-{{release}}-aarch64.tar.gz.sha256sum
+ wget https://downloads.percona.com/downloads/pmm3/{{release}}/binary/tarball/pmm-client-{{release}}-aarch64.tar.gz.sha256sum
```
3. Verify the download:
@@ -186,4 +186,4 @@ Choose your installation instructions based on whether you have root permissions
- Extract it.
- Run `./install_tarball script `with the `-u` flag.
-The configuration file will be overwritten if you do not provide the -`u` flag while the pmm-agent is updated.
\ No newline at end of file
+The configuration file will be overwritten if you do not provide the -`u` flag while the pmm-agent is updated.
diff --git a/documentation/docs/install-pmm/install-pmm-client/connect-database/aws.md b/documentation/docs/install-pmm/install-pmm-client/connect-database/aws.md
index ed119c1d76..04be4228bd 100644
--- a/documentation/docs/install-pmm/install-pmm-client/connect-database/aws.md
+++ b/documentation/docs/install-pmm/install-pmm-client/connect-database/aws.md
@@ -153,7 +153,7 @@ GRANT SELECT, UPDATE, DELETE, DROP ON performance_schema.* TO 'pmm'@'%';
It may take longer for PMM to discover Amazon RDS instances in the `creating` state. You must wait a bit longer until PMM discovers these instances.
-The preferred method of adding an Amazon RDS database instance to PMM is via the **Configuration** → {{icon.inventory}} **PMM Inventory** → {{icon.addinstance}} **Add Instance** menu option.
+The preferred method of adding an Amazon RDS database instance to PMM is via the :material-cog: **PMM Configuration > PMM Inventory > Add Instance** menu option.
This method supports Amazon RDS database instances that use Amazon Aurora, MySQL, or MariaDB engines, as well as any remote PostgreSQL, ProxySQL, MySQL and MongoDB instances.
diff --git a/documentation/docs/install-pmm/install-pmm-client/connect-database/external.md b/documentation/docs/install-pmm/install-pmm-client/connect-database/external.md
index f4ebb44055..2c2403c037 100644
--- a/documentation/docs/install-pmm/install-pmm-client/connect-database/external.md
+++ b/documentation/docs/install-pmm/install-pmm-client/connect-database/external.md
@@ -29,11 +29,11 @@ Here are the differences between `external` and `external-serverless` types.
Connection schema of external exporter:
-
+
Connection schema of external serverless exporter:
-
+
## Add a service not supported by PMM
@@ -43,7 +43,7 @@ From this point, PMM will collect and store available metrics.
To browse and visualize collected metrics as a first step, we can look at the Advanced Data Exploration dashboard and select informative services and metrics.
-
+
Another way is to create a [new Grafana Dashboard to PMM as needed](https://grafana.com/docs/grafana/latest/best-practices/best-practices-for-creating-dashboards/).
@@ -80,14 +80,14 @@ To add an external service via PMM UI:
1. In the PMM web interface, go to **PMM Configuration > PMM Inventory > Add Service > External Service**.
- 
+ 
2. Fill in the form and set the external service endpoint:
- - manually OR:
+ - manually OR:
- 
+ 
-- by parsing required data from a URL string. In this case you only need to pass a valid URL:
+ - by parsing required data from a URL string. In this case you only need to pass a valid URL:
- 
+ 
diff --git a/documentation/docs/install-pmm/install-pmm-client/connect-database/google.md b/documentation/docs/install-pmm/install-pmm-client/connect-database/google.md
index cb76639f85..5c87ee2a88 100644
--- a/documentation/docs/install-pmm/install-pmm-client/connect-database/google.md
+++ b/documentation/docs/install-pmm/install-pmm-client/connect-database/google.md
@@ -49,8 +49,6 @@ To add a PostgreSQL instance on Google Cloud:
6. Fill in the details for the remote PostgreSQL instance and make sure to **PG Stat Statements** option under **Stat tracking options**.
- 
-
7. Click **Add service**.
8. Go to **Dashboards** and check for values in the **PostgreSQL Instances Overview** and **Query Analytics**.
@@ -93,7 +91,7 @@ To add a MySQL instance:
-credential_file=/path/to/credential-file.json
```
-6. Add instance.
+6. Add instance:
```sh
pmm-admin add mysql --host=127.0.0.1 --port=3306 \
@@ -112,7 +110,7 @@ To add a PostgreSQL instance:
3. [Enable Admin API][GOOGLE_CLOUD_ADMIN_API] and download the JSON credential file.
-4. Run Cloud SQL Proxy.
+4. Run Cloud SQL Proxy:
```sh
./cloud_sql_proxy -instances=example-project-NNNN:us-central1:pg-for-pmm=tcp:5432 \
diff --git a/documentation/docs/install-pmm/install-pmm-client/connect-database/index.md b/documentation/docs/install-pmm/install-pmm-client/connect-database/index.md
index 2b5877f79d..51369934ef 100644
--- a/documentation/docs/install-pmm/install-pmm-client/connect-database/index.md
+++ b/documentation/docs/install-pmm/install-pmm-client/connect-database/index.md
@@ -8,7 +8,7 @@ You must configure and add database/service according to the service type.
- [ProxySQL](proxysql.md)
- [Amazon RDS](aws.md)
- [Microsoft Azure](azure.md)
-- [Google Cloud Platform](google) (MySQL and PostgreSQL)
+- [Google Cloud Platform](google.md) (MySQL and PostgreSQL)
- [Linux](linux.md)
- [External services](external.md)
- [HAProxy](haproxy.md)
diff --git a/documentation/docs/install-pmm/install-pmm-client/connect-database/mongodb.md b/documentation/docs/install-pmm/install-pmm-client/connect-database/mongodb.md
index 840ee009fb..de15f4bae9 100644
--- a/documentation/docs/install-pmm/install-pmm-client/connect-database/mongodb.md
+++ b/documentation/docs/install-pmm/install-pmm-client/connect-database/mongodb.md
@@ -88,7 +88,7 @@ Create or update a user with the minimum required privileges for monitoring by a
"roles": [
{ "db": "admin", "role": "explainRole" },
{ "db": "local", "role": "read" },
- { "db": "admin". "role": "clusterMonitor" }
+ { "db": "admin", "role": "clusterMonitor" }
]
})
```
@@ -202,7 +202,7 @@ Use `pmm-admin` to add the database server as a service using one of these examp
When successful, PMM Client will print `MongoDB Service added` with the service's ID and name. Use the `--environment` and `-custom-labels` options to set tags for the service to help identify them.
!!! hint alert alert-success "Tips"
- - When adding nodes to a sharded cluster, ensure to add each node separately using the `--cluster mycluster` option. This allows the [MongoDB Cluster Summary](../../details/dashboards/dashboard-mongodb-cluster-summary.md) dashboard to populate correctly.
+ - When adding nodes to a sharded cluster, ensure to add each node separately using the `--cluster mycluster` option. This allows the [MongoDB Cluster Summary](../../../reference/dashboards/dashboard-mongodb-cluster-summary.md) dashboard to populate correctly.
- You can also use the `--replication-set` option to specify a replication set, altough they are automatically detected. For instance, you can use `--replication-set config` for your config servers; `--replication-set rs1` for your servers in the first replica set, `--replication-set rs2` for your servers in the second replica set, and so on.
- When running mongos routers in containers, specify the `diagnosticDataCollectionDirectoryPath` to ensure that pmm-agent can properly capture mongos metrics. For example: `mongos --setParameter diagnosticDataCollectionDirectoryPath=/var/log/mongo/mongos.diagnostic.data/`
diff --git a/documentation/docs/install-pmm/install-pmm-client/connect-database/mysql.md b/documentation/docs/install-pmm/install-pmm-client/connect-database/mysql.md
index f0fe7f39ac..fd32a47b07 100644
--- a/documentation/docs/install-pmm/install-pmm-client/connect-database/mysql.md
+++ b/documentation/docs/install-pmm/install-pmm-client/connect-database/mysql.md
@@ -27,19 +27,28 @@ Check that:
## Create a database account for PMM
-It is good practice to use a non-superuser account to connect PMM Client to the monitored database instance. This example creates a database user with name `pmm`, password `pass`, and the necessary permissions.
+For security, connect PMM Client to your database using a dedicated monitoring user with limited permissions. This example creates a pmm user account that has just enough access to collect monitoring data without full administrative privileges.
+
+!!! warning "Password security"
+ - Use a strong, unique password for the PMM database user
+ - At least 12 characters long
+ - Mix of uppercase and lowercase letters
+ - Include numbers and special characters
+ - Avoid common words or patterns
+ - Never use default, test, or example passwords in production
+
=== "On MySQL 5.7"
```sql
- CREATE USER 'pmm'@'127.0.0.1' IDENTIFIED BY 'pass' WITH MAX_USER_CONNECTIONS 10;
+ CREATE USER 'pmm'@'127.0.0.1' IDENTIFIED BY '' WITH MAX_USER_CONNECTIONS 10;
GRANT SELECT, PROCESS, REPLICATION CLIENT, RELOAD ON *.* TO 'pmm'@'localhost';
```
=== "On MySQL 8.0"
```sql
- CREATE USER 'pmm'@'localhost' IDENTIFIED BY 'pass' WITH MAX_USER_CONNECTIONS 10;
+ CREATE USER 'pmm'@'localhost' IDENTIFIED BY '' WITH MAX_USER_CONNECTIONS 10;
GRANT SELECT, PROCESS, REPLICATION CLIENT, RELOAD, BACKUP_ADMIN ON *.* TO 'pmm'@'localhost';
```
@@ -93,7 +102,7 @@ The *slow query log* records the details of queries that take more than a certai
??? info "Examples"
- - Configuration file.
+ - Configuration file:
```ini
slow_query_log=ON
@@ -103,7 +112,7 @@ The *slow query log* records the details of queries that take more than a certai
log_slow_slave_statements=ON
```
- - Session.
+ - Session:
```sql
SET GLOBAL slow_query_log = 1;
@@ -137,7 +146,7 @@ Some MySQL-based database servers support extended slow query log variables.
??? info "Examples"
- - Configuration file (Percona Server for MySQL, Percona XtraDB Cluster).
+ - Configuration file (Percona Server for MySQL, Percona XtraDB Cluster):
```sh
log_slow_rate_limit=100
@@ -147,13 +156,13 @@ Some MySQL-based database servers support extended slow query log variables.
slow_query_log_use_global_control='all'
```
- - Configuration file (MariaDB).
+ - Configuration file (MariaDB):
```sh
log_slow_rate_limit=100
```
- - Session (Percona Server for MySQL, Percona XtraDB Cluster).
+ - Session (Percona Server for MySQL, Percona XtraDB Cluster):
```sh
SET GLOBAL log_slow_rate_limit = 100;
@@ -167,7 +176,7 @@ Some MySQL-based database servers support extended slow query log variables.
Slow query log files can grow quickly and must be managed.
-When adding a service with the command line use the `pmm-admin` option `--size-slow-logs` to set at what size the slow query log file is rotated. (The size is specified as a number with a suffix. See [`pmm-admin add mysql`](../../details/commands/pmm-admin.md#mysql).)
+When adding a service with the command line use the `pmm-admin` option `--size-slow-logs` to set at what size the slow query log file is rotated. (The size is specified as a number with a suffix. See [`pmm-admin add mysql`](../../../use/commands/pmm-admin.md#mysql).)
When the limit is reached, PMM Client will:
@@ -191,9 +200,9 @@ This section covers how to configure a MySQL-based database server to use *Perfo
| Percona XtraDB Cluster | 5.6, 5.7, 8.0
| MariaDB | [10.3+][mariadb_perfschema_instr_table]
-PMM's [*MySQL Performance Schema Details* dashboard](../../../use/dashboards/dashboard-mysql-performance-schema-details.md) charts the various [`performance_schema`][performance-schema-startup-configuration] metrics.
+PMM's [*MySQL Performance Schema Details* dashboard](../../../reference/dashboards/dashboard-mysql-performance-schema-details.md) charts the various [`performance_schema`][performance-schema-startup-configuration] metrics.
-To use *Performance Schema*, set these variables.
+To use **Performance Schema**, set these variables:
| Variable | Value | Description
|--------------------------------------------------------------------------------------------|--------------------|---------------------------------------------------------------------------------
@@ -204,7 +213,7 @@ To use *Performance Schema*, set these variables.
??? info "Examples"
- - Configuration file.
+ - Configuration file:
```ini
performance_schema=ON
@@ -213,7 +222,7 @@ To use *Performance Schema*, set these variables.
innodb_monitor_enable=all
```
- - Session.
+ - Session:
(`performance_schema` cannot be set in a session and must be set at server start-up.)
@@ -267,7 +276,7 @@ Set this variable to see query time distribution charts.
|-------------------------------------------------------------|-------|-----------------------------------------------------------------------------------
| [`query_response_time_stats`][ps_query_response_time_stats] | ON | Report *query response time distributions*. (Requires plugin installation. See below.)
-- Configuration file.
+- Configuration file:
```ini
query_response_time_stats=ON
@@ -276,6 +285,8 @@ Set this variable to see query time distribution charts.
You must also install the plugins.
- **Session**
+ Before installing the plugins, ensure you have the necessary plugin files and run these commands in your MySQL session:
+ {.power-number}
1. Check that `/usr/lib/mysql/plugin/query_response_time.so` exists.
2. Install the plugins and activate.
@@ -338,15 +349,17 @@ User activity, individual table and index access details are shown on the [MySQL
## Add service
There are two ways to install PMM Client for monitoring your MySQL database:
+{.power-number}
-1. [Local installation](#Install-PMM-Client locally): Installs PMM Client directly on the database node, collecting both database and OS/host metrics. This option enables more effective comparison and problem identification.
-2. [Remote instance](#Install-PMM-Client-as-a-remote-instance): Use when local installation isn't possible. This method doesn't provide OS/Node metrics in PMM.
+1. [Local installation](#install-pmm-client-locally): Installs PMM Client directly on the database node, collecting both database and OS/host metrics. This option enables more effective comparison and problem identification.
+2. [Remote instance](#install-pmm-client-as-a-remote-instance): Use when local installation isn't possible. This method doesn't provide OS/Node metrics in PMM.
### Install PMM Client locally
Add the MySQL server as a service using one of the following example commands.
Upon successful addition, PMM Client will display "MySQL Service added" along with the service's ID and name.
+{.power-number}
1. Select **PMM Configuration > PMM Inventory > Add Service > MySQL**.
@@ -354,9 +367,9 @@ Upon successful addition, PMM Client will display "MySQL Service added" along wi
3. Click **Add service**.
-
+ 
-If your MySQL instance is configured to use TLS, click on the *Use TLS for database connections* check box and fill in your TLS certificates and key.
+If your MySQL instance is configured to use TLS, click on the **Use TLS for database connections*** check box and fill in your TLS certificates and key:

@@ -364,17 +377,18 @@ If your MySQL instance is configured to use TLS, click on the *Use TLS for datab
Add the database server as a service using one of these example commands. If successful, PMM Client will print `MySQL Service added` with the service's ID and name. Use the `--environment` and `-custom-labels` options to set tags for the service to help identify them.
-??? info "Examples"
#### TLS connection
```sh
-pmm-admin add mysql --environment=test --custom-labels='source=slowlog' --username=root --password=password --query-source=slowlog MySQLSlowLog localhost:3306
+pmm-admin add mysql --environment=test --custom-labels='source=slowlog' --username=root --password=password --tls --tls-skip-verify --tls-ca=pathtoca.pem --tls-cert=pathtocert.pem --tls-key=pathtocertkey.pem --query-source=slowlog MySQLSlowLog localhost:3306
```
### Install PMM Client as a remote instance
+If you need to monitor a MySQL instance from a different server where PMM Client is installed, follow these steps in the PMM web interface:
+{.power-number}
-1. Select ** PMM Configuration > PMM Inventory > {{icon.addinstance}} Add Service**.
+1. Select :material-cog: **PMM Configuration > PMM Inventory > :material-plus-circle-outline: Add Service**.
2. Choose **MySQL > Add a remote instance**.
@@ -382,16 +396,17 @@ pmm-admin add mysql --environment=test --custom-labels='source=slowlog' --usern
4. Click **Add service**.
-
+ 
#### For MySQL instances using TLS
If your MySQL instance is configured to use TLS:
+{.power-number}
1. Click on the **Use TLS for database connections** check box.
2. Fill in your TLS certificates and key.
-
+ 
## Check the service
@@ -442,8 +457,8 @@ Open the [*PXC/Galera Cluster Summary* dashboard][DASH_PXCGALERACLUSTER].
- [Percona Blog -- Impact of logging on MySQL's performance][BLOG_LOGGING]
- [Percona Blog -- Running Custom MySQL Queries in Percona Monitoring and Management][BLOG_CUSTOM_QUERIES_MYSQL]
-[DASH_MYSQLUSERDETAILS]: ../../../use/dashboards/dashboard-mysql-user-details.md
-[DASH_PXCGALERACLUSTER]: ../../../use/dashboards/dashboard-pxc-galera-cluster-summary.md
+[DASH_MYSQLUSERDETAILS]: ../../../reference/dashboards/dashboard-mysql-user-details.md
+[DASH_PXCGALERACLUSTER]: ../../../reference/dashboards/dashboard-pxc-galera-cluster-summary.md
[LOGROTATE]: https://linux.die.net/man/8/logrotate
[PERCONA_SERVER_MYSQL]: https://www.percona.com/software/mysql-database/percona-server
[PERCONA_XTRADB_CLUSTER]: https://www.percona.com/software/mysql-database/percona-xtradb-cluster
@@ -455,19 +470,19 @@ Open the [*PXC/Galera Cluster Summary* dashboard][DASH_PXCGALERACLUSTER].
[BLOG_LOG_ROTATION]: https://www.percona.com/blog/2013/04/18/rotating-mysql-slow-logs-safely/
[BLOG_PS_VS_SLOW]: https://www.percona.com/blog/2014/02/11/performance_schema-vs-slow-query-log/
[PS_FEATURES_REMOVED]: https://www.percona.com/doc/percona-server/LATEST/changed_in_version.html
-[ps_slow_query_ext]: https://www.percona.com/doc/percona-server/LATEST/diagnostics/slow_extended.html
+[ps_slow_query_ext]: https://docs.percona.com/percona-server/latest/slow-extended.html
[ps_query_response_time_stats]: https://www.percona.com/doc/percona-server/5.7/diagnostics/response_time_distribution.html#usage
-[ps_userstats]: https://www.percona.com/doc/percona-server/LATEST/diagnostics/user_stats.html
+[ps_userstats]: https://docs.percona.com/percona-server/latest/user-stats.html
[mariadb_slow_query_log]: https://mariadb.com/kb/en/slow-query-log-overview/
[mariadb_slow_query_ext]: https://mariadb.com/kb/en/slow-query-log-extended-statistics/
[mariadb_query_response_time]: https://mariadb.com/kb/en/query-response-time-plugin/
[mariadb_perfschema_instr_table]: https://mariadb.com/kb/en/performance-schema-setup_instruments-table/
[mariadb_userstats]: https://mariadb.com/kb/en/user-statistics/
-[log_slow_rate_limit]: https://www.percona.com/doc/percona-server/LATEST/diagnostics/slow_extended.html#log_slow_rate_limit
-[log_slow_rate_type]: https://www.percona.com/doc/percona-server/LATEST/diagnostics/slow_extended.html#log_slow_rate_type
-[log_slow_verbosity]: https://www.percona.com/doc/percona-server/LATEST/diagnostics/slow_extended.html#log_slow_verbosity
-[slow_query_log_always_write_time]: https://www.percona.com/doc/percona-server/LATEST/diagnostics/slow_extended.html#slow_query_log_always_write_time
-[slow_query_log_use_global_control]: https://www.percona.com/doc/percona-server/LATEST/diagnostics/slow_extended.html#slow_query_log_use_global_control
+[log_slow_rate_limit]: https://www.percona.com/doc/percona-server/LATEST/slow-extended.html?h=log_slow_rate_limit#log_slow_rate_limit
+[log_slow_rate_type]: https://docs.percona.com/percona-server/latest/slow-extended.html?h=log_slow_rate_limit#log_slow_rate_limit
+[log_slow_verbosity]: https://docs.percona.com/percona-server/latest/slow-extended.html?h=log_slow_rate_limit#log_slow_verbosity
+[slow_query_log_always_write_time]: https://docs.percona.com/percona-server/latest/slow-extended.html?h=log_slow_rate_limit#slow_query_log_always_write_time
+[slow_query_log_use_global_control]: https://docs.percona.com/percona-server/latest/slow-extended.html?h=log_slow_rate_limit#slow_query_log_use_global_control
[sysvar_innodb_monitor_enable]: https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_monitor_enable
[sysvar_log_output]: https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_log_output
[sysvar_log_slow_admin_statements]: https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_log_slow_admin_statements
diff --git a/documentation/docs/install-pmm/install-pmm-client/connect-database/postgresql.md b/documentation/docs/install-pmm/install-pmm-client/connect-database/postgresql.md
index fa2f8743c2..a9c4dcc7dc 100644
--- a/documentation/docs/install-pmm/install-pmm-client/connect-database/postgresql.md
+++ b/documentation/docs/install-pmm/install-pmm-client/connect-database/postgresql.md
@@ -88,7 +88,7 @@ Here are the benefits and drawbacks of each.
| `pg_stat_statements` | 1. Part of official `postgresql-contrib` package. | 1. No aggregated statistics or histograms. 2. No Query Examples.
| `pg_stat_monitor` | 1. Builds on `pg_stat_monitor` features. 2. Bucket-based aggregation. |
-For a more detailed comparison of extensions, follow [pg_stat monitor User Guide](https://github.com/percona/pg_stat_monitor/blob/master/docs/USER_GUIDE.md#usage)
+For a more detailed comparison of extensions, follow [pg_stat monitor User Guide](https://docs.percona.com/pg-stat-monitor/user_guide.html)
!!! note alert alert-primary "Bucket-based data aggregation"
`pg_stat_monitor` collects statistics and aggregates data in a data collection unit called a *bucket*. These are linked together to form a *bucket chain*.
@@ -126,6 +126,7 @@ For a more detailed comparison of extensions, follow [pg_stat monitor User Guide
#### Configure
To configure the extension:
+{.power-number}
1. Add these lines to your `postgresql.conf` file:
@@ -373,7 +374,7 @@ The `pmm-admin` flag limits Auto-discovery:
To check the service from the PMM UI:
{.power-number}
-1. Select **Configuration** → {{icon.inventory}} **Inventory**.
+1. Select :material-cog: **Configuration** → :material-clipboard-list-outline: **Inventory**.
2. In the **Services** tab, verify the **Service name**, **Address** and any other relevant details.
3. In the **Options** column, expand the **Details** section and check that the Agents are using the desired data source.
diff --git a/documentation/docs/install-pmm/install-pmm-client/connect-database/remove-services/index.md b/documentation/docs/install-pmm/install-pmm-client/connect-database/remove-services/index.md
index e414c53f66..ce326bfb76 100644
--- a/documentation/docs/install-pmm/install-pmm-client/connect-database/remove-services/index.md
+++ b/documentation/docs/install-pmm/install-pmm-client/connect-database/remove-services/index.md
@@ -11,4 +11,4 @@ pmm-admin remove
!!! seealso alert alert-info "See also"
- [Percona release](https://www.percona.com/doc/percona-repo-config/percona-release.html)
- - [PMM Client architecture](../../../../reference/index.md#pmm-client_1)
+ - [PMM Client architecture](../../../../reference/index.md#pmm-client1)
diff --git a/documentation/docs/install-pmm/install-pmm-client/docker.md b/documentation/docs/install-pmm/install-pmm-client/docker.md
index e571c51349..f219803f12 100644
--- a/documentation/docs/install-pmm/install-pmm-client/docker.md
+++ b/documentation/docs/install-pmm/install-pmm-client/docker.md
@@ -1,4 +1,4 @@
-# Run PMM client as a Docker container
+# Run PMM Client as a Docker container
The [PMM Client Docker image](https://hub.docker.com/r/percona/pmm-client/tags/) is a convenient way to run PMM Client as a preconfigured [Docker](https://docs.docker.com/get-docker/) container.
@@ -8,58 +8,58 @@ The PMM Client Docker image is available for both x86_64 and ARM64 architectures
1. Pull the PMM Client Docker image:
```sh
- docker pull \
- percona/pmm-client:2
+ docker pull percona/pmm-client:3
```
-2. Use the image as a template to create a persistent data store that preserves local data when the image is updated:
+2. Create a Docker volume to store persistent data:
++ ```sh
++ docker volume create pmm-client-data
++ ```
- ```sh
- docker create \
- --volume /srv \
- --name pmm-client-data \
- percona/pmm-client:2 /bin/true
- ```
-
-3. Run the container to start [pmm-agent](../../use/commands/pmm-agent.md) in setup mode. Set `X.X.X.X` to the IP address of your PMM Server. (Do not use the `docker --detach` option as PMM agent only logs to the console.)
+3. Execute the following command to start the [pmm-agent](../../use/commands/pmm-agent.md) in Setup mode. Replace `X.X.X.X` with the IP address of your PMM Server:
```sh
- PMM_SERVER=X.X.X.X:443
- docker run \
- --rm \
- --name pmm-client \
- -e PMM_AGENT_SERVER_ADDRESS=${PMM_SERVER} \
- -e PMM_AGENT_SERVER_USERNAME=admin \
- -e PMM_AGENT_SERVER_PASSWORD=admin \
- -e PMM_AGENT_SERVER_INSECURE_TLS=1 \
- -e PMM_AGENT_SETUP=1 \
- -e PMM_AGENT_CONFIG_FILE=config/pmm-agent.yaml \
- --volumes-from pmm-client-data \
- percona/pmm-client:2
+ PMM_SERVER=X.X.X.X:443
+ docker run \
+ --rm \
+ --name pmm-client \
+ -e PMM_AGENT_SERVER_ADDRESS=${PMM_SERVER} \
+ -e PMM_AGENT_SERVER_USERNAME=admin \
+ -e PMM_AGENT_SERVER_PASSWORD=admin \
+ -e PMM_AGENT_SERVER_INSECURE_TLS=1 \
+ -e PMM_AGENT_SETUP=1 \
+ -e PMM_AGENT_CONFIG_FILE=config/pmm-agent.yaml \
+ -v pmm-client-data:/srv \
+ percona/pmm-client:3
```
-!!! hint alert-success "Tips"
- You can find a complete list of compatible environment variables [here](../../use/commands/pmm-agent.md).
+ !!! hint alert-success "Important"
+ - Do not use the `docker --detach` option with Docker, as the pmm-agent logs output directly to the console, and detaching the container will prevent you from seeing these logs:
+ - You can find a complete list of compatible environment variables [here](../../use/commands/pmm-agent.md).
-3. Check status.
+3. Check status:
```sh
- docker exec pmm-client \
- pmm-admin status
+ docker exec -t pmm-client pmm-admin status
```
In the PMM user interface you will also see an increase in the number of monitored nodes.
You can now add services with [`pmm-admin`](../../use/commands/pmm-admin.md) by prefixing commands with `docker exec pmm-client`.
-!!! hint alert alert-success "Tips"
- - Adjust host firewall and routing rules to allow Docker communications. ([Read more](../../troubleshoot/checklist.md)
- - For help: `docker run --rm percona/pmm-client:2 --help`
+!!! hint alert alert-success "Tips for Docker configuration"
+
+ - Firewall and routing rules: Ensure your host's firewall and routing rules are configured to allow Docker communications. This is crucial for Docker containers to communicate properly. For more details, see to the [troubleshooting checklist](https://chat.deepseek.com/a/troubleshoot/checklist.md).
+
+ - Help command: If you need assistance with PMM Client, you can run the following command to display help information: `docker run --rm percona/pmm-client:3 --help`.
+
+How to view your monitored node
+{.power-number}
+
+ 1. Go to the main menu and select **Operating System (OS) > Overview**.
- In the GUI:
+ 2. In the **Node Names** drop-down menu, select the node you recently registered.
- - Select {{icon.dashboards}} *PMM Dashboards* → {{icon.node}} *System (Node)* → {{icon.node}} *Node Overview*.
- - In the *Node Names* menu, select the new node.
- - Change the time range to see data.
+ 3. Modify the time range to view the relevant data for your selected node.
!!! danger alert alert-danger "Danger"
`pmm-agent.yaml` contains sensitive credentials and should not be shared.
diff --git a/documentation/docs/install-pmm/install-pmm-client/index.md b/documentation/docs/install-pmm/install-pmm-client/index.md
index 6d61e7b8a1..cf0c11dc50 100644
--- a/documentation/docs/install-pmm/install-pmm-client/index.md
+++ b/documentation/docs/install-pmm/install-pmm-client/index.md
@@ -1,4 +1,4 @@
-# About PMM client installation
+# About PMM Client installation
There are different ways to install PMM Client on a node and register it with PMM Server. Choose from:
@@ -16,7 +16,7 @@ When you have installed PMM Client, you must:
- [Register the node with PMM Server](../register-client-node/index.md).
- [Configure and add services according to type](connect-database/index.md).
-If you need to, you can [unregister](../../uninstall-pmm/unregister_client.md), [remove services](..//..//uninstall-pmm/remove_services.md) or [remove PMM Client](..//..//uninstall-pmm/uninstall_docker.md).
+If you need to, you can [unregister](../../uninstall-pmm/unregister_client.md), [remove services](../install-pmm-client/connect-database/remove-services/index.md) or [remove PMM Client](../../uninstall-pmm/uninstall_docker.md).
---
diff --git a/documentation/docs/install-pmm/install-pmm-client/package_manager.md b/documentation/docs/install-pmm/install-pmm-client/package_manager.md
index 6e8fcb1eb4..a72f9c7a09 100644
--- a/documentation/docs/install-pmm/install-pmm-client/package_manager.md
+++ b/documentation/docs/install-pmm/install-pmm-client/package_manager.md
@@ -1,102 +1,93 @@
-# Install PMM client with Percona repositories
-
+# Install PMM Client with Percona repositories
PMM Client supports both x86_64 and ARM64 architectures.
+
On Debian or Red Hat Linux, install `percona-release` and use a Linux package manager (`apt`/`dnf`) to install PMM Client.
The package manager will automatically select the appropriate version for your system architecture.
!!! hint alert alert-success "Tip"
If you have used `percona-release` before, disable and re-enable the repository:
-
```sh
percona-release disable all
- percona-release percona-release enable pmm3-client
+ percona-release enable pmm3-client
```
=== "Debian-based"
- To install PMM client:
+ To install PMM Client:
{.power-number}
- 1. Configure repositories.
-
+ 1. Configure repositories:
```sh
wget https://repo.percona.com/apt/percona-release_latest.generic_all.deb
dpkg -i percona-release_latest.generic_all.deb
```
-
- 2. Install the PMM Client package.
-
+ 2. Enable pmm3-client repository:
+ ```sh
+ percona-release enable pmm3-client
+ ```
+ 3. Install the PMM Client package:
!!! hint "Root permissions"
```sh
apt update
apt install -y pmm-client
```
-
- 3. Check.
-
+ 4. Verify the installation by checking the PMM Client version:
```sh
pmm-admin --version
```
-
- 4. [Register the node](..//register-client-node/index.md).
+ 5. [Register the node](..//register-client-node/index.md).
=== "Red Hat-based"
+ To install PMM Client:
+ {.power-number}
- 1. Configure repositories.
-
+ 1. Configure repositories:
```sh
yum install -y https://repo.percona.com/yum/percona-release-latest.noarch.rpm
```
-
- 2. Install the PMM Client package.
-
+ 2. Enable pmm3-client repository:
+ ```sh
+ percona-release enable pmm3-client
+ ```
+ 3. Install the PMM Client package:
```sh
yum install -y pmm-client
```
-
- 3. Check.
-
+ 4. Verify the installation by checking the PMM Client version:
```sh
pmm-admin --version
```
-
- 4. [Register the node](../register-client-node/index.md).
+ 5. [Register the node](../register-client-node/index.md).
## Package manager -- manual download
To manually download package manager:
{.power-number}
-1. Visit the [Percona Monitoring and Management 3 download](https://www.percona.com/downloads//) page.
-2. Under *Version:*, select the one you want (usually the latest).
-3. Under *Software:*, select the item matching your software platform and architecture (x86_64 or ARM64).
+1. Visit the [Percona Monitoring and Management 3 download](https://www.percona.com/downloads/) page.
+2. Under **Select Product Version** select the one you want (usually the latest).
+3. Under **Select Platform**, select the item matching your software platform and architecture (x86_64 or ARM64).
4. Click to download the package file:
-
- For Debian, Ubuntu: `.deb`
- For Red Hat, CentOS, Oracle Linux: `.rpm`
-
(Alternatively, copy the link and use `wget` to download it.)
=== "Debian-based"
-
```sh
dpkg -i *.deb
```
=== "Red Hat-based"
-
```sh
dnf localinstall *.rpm
```
-??? info "Download page links"
- Here are the download page links for each supported platform.
+??? info "Download page links"
+ Here are the download page links for each supported platform:
- - [Debian 9 (Stretch)](https://www.percona.com/downloads/pmm3/{{release}}/binary/debian/stretch/)
- - [Debian 10 (Buster)](https://www.percona.com/downloads/pmm3/{{release}}/binary/debian/buster/)
- - [Debian 11 (Bullseye)](https://www.percona.com/downloads/pmm3/{{release}}/binary/debian/bullseye/)
- - [Red Hat/CentOS/Oracle 7](https://www.percona.com/downloads/pmm3/{{release}}/binary/redhat/7/)
- [Red Hat/CentOS/Oracle 8](https://www.percona.com/downloads/pmm3/{{release}}/binary/redhat/8/)
- - [Ubuntu 18.04 (Bionic Beaver)](https://www.percona.com/downloads/pmm3/{{release}}/binary/debian/bionic/)
+ - [Red Hat/CentOS/Oracle 9](https://www.percona.com/downloads/pmm3/{{release}}/binary/redhat/9/)
+ - [Debian 11 (Bullseye)](https://www.percona.com/downloads/pmm3/{{release}}/binary/debian/bullseye/)
+ - [Debian 12 (Bookworm)](https://www.percona.com/downloads/pmm3/{{release}}/binary/debian/bookworm/)
- [Ubuntu 20.04 (Focal Fossa)](https://www.percona.com/downloads/pmm3/{{release}}/binary/debian/focal/)
- - [Ubuntu 22.04 (Jammy Jellyfish)](https://www.percona.com/downloads//{{release}}/binary/debian/jammy/)
-
+ - [Ubuntu 22.04 (Jammy Jellyfish)](https://www.percona.com/downloads/pmm3/{{release}}/binary/debian/jammy/)
+ - [Ubuntu 24.04 (Noble Numbat)](https://www.percona.com/downloads/pmm3/{{release}}/binary/debian/noble/)
diff --git a/documentation/docs/install-pmm/install-pmm-client/prerequisites.md b/documentation/docs/install-pmm/install-pmm-client/prerequisites.md
index ddff288d9c..2786d637ed 100644
--- a/documentation/docs/install-pmm/install-pmm-client/prerequisites.md
+++ b/documentation/docs/install-pmm/install-pmm-client/prerequisites.md
@@ -1,16 +1,12 @@
-# Prerequisites
+# Prerequisites for PMM Client
-The prerequisites to install PMM client are:
+Before installing PMM Client, ensure you meet the following requirements:
{.power-number}
-1. [Install PMM Server](../install-pmm-server/index.md) with a known IP address accessible from the client node.
-
-2. Check that you have superuser (root) access on the client host.
-
-3. Check that you have superuser access to any database servers that you want to monitor.
-
-4. Install the following Linux packages:
-
+1. [Install PMM Server](../install-pmm-server/index.md) and note the server's IP address - it must be accessible from the Client node.
+2. Check that you have superuser (`root`) access on the client host.
+3. Check that you have superuser access to all database servers you plan to monitor.
+4. Verify you have these Linux packages installed:
* `curl`
* `gnupg`
@@ -19,6 +15,5 @@ The prerequisites to install PMM client are:
* `wget`
-5. If you use it, install [Docker](https://docs.docker.com/get-docker/).
-
-6. Check [system requirements](../plan-pmm-installation/hardware_and_system.md#client-requirements).
+5. [Install Docker](https://docs.docker.com/get-started/get-docker/).
+6. Check [hardware and system requirements for PMM Client](../plan-pmm-installation/hardware_and_system.md).
\ No newline at end of file
diff --git a/documentation/docs/install-pmm/install-pmm-server/baremetal/docker/backup_container.md b/documentation/docs/install-pmm/install-pmm-server/baremetal/docker/backup_container.md
deleted file mode 100644
index 1f225880bf..0000000000
--- a/documentation/docs/install-pmm/install-pmm-server/baremetal/docker/backup_container.md
+++ /dev/null
@@ -1,45 +0,0 @@
-
-# Backup container
-
-??? info "Summary"
-
- !!! summary alert alert-info ""
- - Stop and rename the `pmm-server` container.
- - Take a local copy of the `pmm-data` container's `/srv` directory.
-
- ---
-
-!!! caution alert alert-warning "Important"
- Grafana plugins have been moved to the data volume `/srv` since the 2.23.0 version. So if you are upgrading PMM from any version before 2.23.0 and have installed additional plugins then plugins should be installed again after the upgrade.
-
- To check used Grafana plugins:
-
- ```sh
- docker exec -it pmm-server ls /var/lib/grafana/plugins
- ```
-To backup container:
-{.power-number}
-
-1. Stop the container:
-
- ```sh
- docker stop pmm-server
- ```
-
-2. Move the image:
-
- ```sh
- docker rename pmm-server pmm-server-backup
- ```
-
-3. Create a subdirectory (e.g., `pmm-data-backup`) and move to it:
-
- ```sh
- mkdir pmm-data-backup && cd pmm-data-backup
- ```
-
-4. Back up the data:
-
- ```sh
- docker cp pmm-data:/srv .
- ```
diff --git a/documentation/docs/install-pmm/install-pmm-server/baremetal/docker/env_var.md b/documentation/docs/install-pmm/install-pmm-server/baremetal/docker/env_var.md
deleted file mode 100644
index 145d5b02d9..0000000000
--- a/documentation/docs/install-pmm/install-pmm-server/baremetal/docker/env_var.md
+++ /dev/null
@@ -1,150 +0,0 @@
-# Environment variables in PMM
-
-Configure PMM Server by setting Docker container environment variables using the `-e var=value` syntax:
-
-```bash
-docker run -e PMM_DATA_RETENTION=720h -e PMM_DEBUG=true perconalab/pmm-server:3.0.0-beta
-```
-
-## Core configuration variables
-
-### Performance & storage
-
-| Variable | Default | Description | Example |
-|----------|---------|-------------|----------|
-| `PMM_DATA_RETENTION` | `30d` | Duration to retain metrics data. Must be in multiples of 24h. | `720h` (30 days) |
-| `PMM_METRICS_RESOLUTION` | `1s` | Base metrics collection interval | `5s` |
-| `PMM_METRICS_RESOLUTION_HR` | `5s` | High-resolution metrics interval | `10s` |
-| `PMM_METRICS_RESOLUTION_MR` | `10s` | Medium-resolution metrics interval | `30s` |
-| `PMM_METRICS_RESOLUTION_LR` | `60s` | Low-resolution metrics interval | `300s` |
-
-### Feature flags
-
-| Variable | Default | Effect when enabled |
-|----------|---------|-------------------|
-| `PMM_ENABLE_UPDATES` | `true` | Allows version checks and UI updates |
-| `PMM_ENABLE_TELEMETRY` | `true` | Enables usage data collection |
-| `PMM_ENABLE_ALERTING` | `true` | Enables Percona Alerting system |
-| `PMM_ENABLE_BACKUP_MANAGEMENT` | `true` | Enables backup features |
-| `PMM_ENABLE_AZURE_DISCOVER` | `false` | Enables Azure database discovery |
-
-### Debugging
-
-| Variable | Default | Purpose |
-|----------|---------|---------|
-| `PMM_DEBUG` | `false` | Enables verbose logging |
-| `PMM_TRACE` | `false` | Enables detailed trace logging |
-
-## Advanced configuration
-
-### Networking
-
-| Variable | Description |
-|----------|-------------|
-| `PMM_PUBLIC_ADDRESS` | External DNS/IP for PMM server |
-| `PMM_INTERFACE_TO_BIND` | Network interface binding |
-
-### Database connections
-
-| Variable | Purpose |
-|----------|----------|
-| `PMM_CLICKHOUSE_*` | ClickHouse connection settings |
-| `PMM_POSTGRES_*` | PostgreSQL connection settings |
-
-
-### Supported external variables
-
-- **Grafana**: All `GF_*` variables
-- **VictoriaMetrics**: All `VM_*` variables
-- **Kubernetes**: All `KUBERNETES_*` variables
-- **System**: Standard variables like `HOME`, `PATH`, etc.
-
-## Variables for migrating from PMM v2 to PMM v3
-
-When migrating from PMM v2 to PMM v3, you'll need to update your environment variables to match the new naming convention. This is because PMM v3 introduces several important changes to improve consistency and clarity:
-
-- environment variables now use `PMM_` prefix
-- some boolean flags reversed (e.g., `DISABLE_` → `ENABLE_`)
-- removed deprecated variables
-
-### Examples
-
-```bash
-# PMM v2
--e DISABLE_UPDATES=true -e DATA_RETENTION=720h
-
-# PMM v3 equivalent
--e PMM_ENABLE_UPDATES=false -e PMM_DATA_RETENTION=720h
-```
-
-### Migration reference table
-
-??? note "Click to expand migration reference table"
-
- #### Configuration variables
- | PMM 2 | PMM 3 | Comments |
- |---------------------------------|------------------------------------|------------------------------|
- | `DATA_RETENTION` | `PMM_DATA_RETENTION` | |
- | `DISABLE_ALERTING` | `PMM_ENABLE_ALERTING` | |
- | `DISABLE_UPDATES` | `PMM_ENABLE_UPDATES` | |
- | `DISABLE_TELEMETRY` | `PMM_ENABLE_TELEMETRY` | |
- | `DISABLE_BACKUP_MANAGEMENT` | `PMM_ENABLE_BACKUP_MANAGEMENT` | Note the reverted boolean |
- | `ENABLE_AZUREDISCOVER` | `PMM_ENABLE_AZURE_DISCOVER` | |
- | `ENABLE_RBAC` | `PMM_ENABLE_ACCESS_CONTROL` | |
- | `LESS_LOG_NOISE` | | Removed in PMM v3 |
-
- #### Metrics configuration
- | PMM 2 | PMM 3 |
- |---------------------------------|------------------------------------|
- | `METRICS_RESOLUTION` | `PMM_METRICS_RESOLUTION` |
- | `METRICS_RESOLUTION_HR` | `PMM_METRICS_RESOLUTION_HR` |
- | `METRICS_RESOLUTION_LR` | `PMM_METRICS_RESOLUTION_LR` |
- | `METRICS_RESOLUTION_MR` | `PMM_METRICS_RESOLUTION_MR` |
-
-
- #### ClickHouse configuration
- | PMM 2 | PMM 3 | Comments |
- |-------------------------------------|------------------------------------|--------------------------|
- | `PERCONA_TEST_PMM_CLICKHOUSE_ADDR` | `PMM_CLICKHOUSE_ADDR` | |
- | `PERCONA_TEST_PMM_CLICKHOUSE_DATABASE` | `PMM_CLICKHOUSE_DATABASE` | |
- | `PERCONA_TEST_PMM_CLICKHOUSE_DATASOURCE` | `PMM_CLICKHOUSE_DATASOURCE` | |
- | `PERCONA_TEST_PMM_CLICKHOUSE_HOST` | `PMM_CLICKHOUSE_HOST` | |
- | `PERCONA_TEST_PMM_CLICKHOUSE_PORT` | `PMM_CLICKHOUSE_PORT` | |
- | `PERCONA_TEST_PMM_DISABLE_BUILTIN_CLICKHOUSE` | `PMM_DISABLE_BUILTIN_CLICKHOUSE` | |
- | `PERCONA_TEST_PMM_CLICKHOUSE_BLOCK_SIZE` | | Removed in PMM v3, new version|
- | `PERCONA_TEST_PMM_CLICKHOUSE_POOL_SIZE` | | Removed in PMM v3, new version|
-
- #### PostgreSQL configuration
- | PMM 2 | PMM 3 |
- |-------------------------------------|------------------------------------|
- | `PERCONA_TEST_POSTGRES_ADDR` | `PMM_POSTGRES_ADDR` |
- | `PERCONA_TEST_POSTGRES_DBNAME` | `PMM_POSTGRES_DBNAME` |
- | `PERCONA_TEST_POSTGRES_USERNAME` | `PMM_POSTGRES_USERNAME` |
- | `PERCONA_TEST_POSTGRES_DBPASSWORD` | `PMM_POSTGRES_DBPASSWORD` |
- | `PERCONA_TEST_POSTGRES_SSL_CA_PATH` | `PMM_POSTGRES_SSL_CA_PATH` |
- | `PERCONA_TEST_POSTGRES_SSL_CERT_PATH` | `PMM_POSTGRES_SSL_CERT_PATH` |
- | `PERCONA_TEST_POSTGRES_SSL_KEY_PATH` | `PMM_POSTGRES_SSL_KEY_PATH` |
- | `PERCONA_TEST_POSTGRES_SSL_MODE` | `PMM_POSTGRES_SSL_MODE` |
- | `PERCONA_TEST_PMM_DISABLE_BUILTIN_POSTGRES` | `PMM_DISABLE_BUILTIN_POSTGRES` |
-
- #### Telemetry & development
- | PMM 2 | PMM 3 |
- |-------------------------------------|------------------------------------|
- | `PMM_TEST_TELEMETRY_DISABLE_SEND` | `PMM_DEV_TELEMETRY_DISABLE_SEND` |
- | `PERCONA_TEST_TELEMETRY_DISABLE_START_DELAY` | `PMM_DEV_TELEMETRY_DISABLE_START_DELAY` |
- | `PMM_TEST_TELEMETRY_FILE` | `PMM_DEV_TELEMETRY_FILE` |
- | `PERCONA_TEST_TELEMETRY_HOST` | `PMM_DEV_TELEMETRY_HOST` |
- | `PERCONA_TEST_TELEMETRY_INTERVAL` | `PMM_DEV_TELEMETRY_INTERVAL` |
- | `PERCONA_TEST_TELEMETRY_RETRY_BACKOFF` | `PMM_DEV_TELEMETRY_RETRY_BACKOFF` |
- | `PERCONA_TEST_VERSION_SERVICE_URL` | `PMM_DEV_VERSION_SERVICE_URL` |
- | `PERCONA_TEST_STARLARK_ALLOW_RECURSION` | `PMM_DEV_ADVISOR_STARLARK_ALLOW_RECURSION` |
-
- #### Removed variables
- | PMM 2 | PMM 3 | Comments |
- |-------------------------------------|------------------------------------|------------------------------|
- | `PERCONA_TEST_AUTH_HOST` | | Removed, use `PMM_DEV_PERCONA_PLATFORM_ADDRESS` |
- | `PERCONA_TEST_CHECKS_HOST` | | Removed, use `PMM_DEV_PERCONA_PLATFORM_ADDRESS` |
- | `PERCONA_TEST_CHECKS_INTERVAL` | | Removed, not used |
- | `PERCONA_TEST_CHECKS_PUBLIC_KEY` | | Removed, use `PMM_DEV_PERCONA_PLATFORM_PUBLIC_KEY` |
- | `PERCONA_TEST_NICER_API` | | Removed in PMM v3 |
- | `PERCONA_TEST_SAAS_HOST` | | Removed, use `PMM_DEV_PERCONA_PLATFORM_ADDRESS` |
\ No newline at end of file
diff --git a/documentation/docs/install-pmm/install-pmm-server/baremetal/docker/index.md b/documentation/docs/install-pmm/install-pmm-server/baremetal/docker/index.md
deleted file mode 100644
index d73990beb9..0000000000
--- a/documentation/docs/install-pmm/install-pmm-server/baremetal/docker/index.md
+++ /dev/null
@@ -1,49 +0,0 @@
-# Install PMM Server with Docker container
-
-This section provides instructions for running PMM Server with Docker based on the [PMM Docker image](https://hub.docker.com/r/percona/pmm-server).
-
-## Running PMM Server with Watchtower
-
-To enable PMM Server upgrades via the **Upgrade page** and the **Upgrade Now** button on the Home dashboard, you must configure Watchtower during the PMM Server installation. Watchtower is a container monitoring tool that helps update Docker containers to their latest version when triggered.
-
-The [Easy-install script](../easy-install.md) script includes Watchtower commands, allowing for a one-step setup of PMM alongside Watchtower.
-
-You can also install PMM 3 manually, following the instructions below.
-
-## Installing PMM Server manually
-
-Before starting the installation, review the installation prerequisites below and choose a method to run PMM Server with Docker based on your preferred data storage option:
-
-- [Running Docker with Data container](../docker/run_with_data_container.md)
-- [Running Docker with host directory](../docker/run_with_host_dir.md)
-- [Running Docker with volume](../docker/run_with_vol.md)
-
-### Manual installation prerequisites
-
-- Install [Docker](https://docs.docker.com/get-docker/) version 17.03 or higher.
-- Ensure your CPU (and any virtualization layer you may be using) supports `x86-64-v2`.
-- Install Watchtower to automatically update your containers with the following considerations:
-
- - Ensure Watchtower is only accessible from within the Docker network or local host to prevent unauthorized access and enhance container security.
- - Configure network settings to expose only the PMM Server container to the external network, keeping Watchtower isolated within the Docker network.
- - Grant Watchtower access to the Docker socket to monitor and manage containers effectively, ensuring proper security measures are in place to protect the Docker socket.
- - Verify that both Watchtower and PMM Server are on the same network, or ensure PMM Server can connect to Watchtower for communication. This network setup is essential for PMM Server to initiate updates through Watchtower.
-
-## Run Docker container
-
-??? info "Summary"
-
- !!! summary alert alert-info ""
- - Pull the Docker image.
- - Copy it to create a persistent data container.
- - Run the image.
- - Open the PMM UI in a browser.
-
- ---
-??? info "Key points"
-
- - To disable the Home Dashboard **PMM Upgrade** panel you can either add `-e DISABLE_UPDATES=true` to the `docker run` command (for the life of the container) or navigate to _PMM --> PMM Settings --> Advanced Settings_ and disable "Check for Updates" (can be turned back on by any admin in the UI).
-
- - Eliminate browser certificate warnings by configuring a [trusted certificate](https://docs.percona.com/percona-monitoring-and-management/how-to/secure.html#ssl-encryption).
-
- - You can optionally enable an (insecure) HTTP connection by adding `--publish 80:80` to the `docker run` command. However, running PMM insecure is not recommended. You should also note that PMM Client *requires* TLS to communicate with the server, only working on a secure port.
diff --git a/documentation/docs/install-pmm/install-pmm-server/baremetal/docker/restore_container.md b/documentation/docs/install-pmm/install-pmm-server/baremetal/docker/restore_container.md
deleted file mode 100644
index 1ba2b6fdd8..0000000000
--- a/documentation/docs/install-pmm/install-pmm-server/baremetal/docker/restore_container.md
+++ /dev/null
@@ -1,71 +0,0 @@
-# Restore container
-
-??? info "Summary"
-
- !!! summary alert alert-info ""
- - Stop and remove the container.
- - Restore (rename) the backup container.
- - Restore saved data to the data container.
- - Restore permissions to the data.
-
- ---
-
-!!! caution alert alert-warning "Important"
- You must have a [backup](backup_container.md) to restore from.
-
-To restore the container:
-{.power-number}
-
-1. Stop the container.
-
- ```sh
- docker stop pmm-server
- ```
-
-2. Remove it.
-
- ```sh
- docker rm pmm-server
- ```
-
-3. Revert to the saved image.
-
- ```sh
- docker rename pmm-server-backup pmm-server
- ```
-
-4. Change directory to the backup directory (e.g. `pmm-data-backup`).
-
-5. Remove Victoria Metrics data folder.
-
- ```sh
- docker run --rm --volumes-from pmm-data -it perconalab/pmm-server:3.0.0-beta rm -r /srv/victoriametrics/data
- ```
-
-6. Copy the data.
-
- ```sh
- docker cp srv pmm-data:/
- ```
-
-7. Restore permissions.
-
- ```sh
- docker run --rm --volumes-from pmm-data -it perconalab/pmm-server:3.0.0-beta chown -R root:root /srv && \
- docker run --rm --volumes-from pmm-data -it perconalab/pmm-server:3.0.0-beta chown -R pmm:pmm /srv/alertmanager && \
- docker run --rm --volumes-from pmm-data -it perconalab/pmm-server:3.0.0-beta chown -R root:pmm /srv/clickhouse && \
- docker run --rm --volumes-from pmm-data -it perconalab/pmm-server:3.0.0-beta chown -R grafana:grafana /srv/grafana && \
- docker run --rm --volumes-from pmm-data -it perconalab/pmm-server:3.0.0-beta chown -R pmm:pmm /srv/logs && \
- docker run --rm --volumes-from pmm-data -it perconalab/pmm-server:3.0.0-beta chown -R postgres:postgres /srv/postgres14 && \
- docker run --rm --volumes-from pmm-data -it perconalab/pmm-server:3.0.0-beta chown -R pmm:pmm /srv/prometheus && \
- docker run --rm --volumes-from pmm-data -it perconalab/pmm-server:3.0.0-beta chown -R pmm:pmm /srv/victoriametrics && \
- docker run --rm --volumes-from pmm-data -it perconalab/pmm-server:3.0.0-beta chown -R postgres:postgres /srv/logs/postgresql14.log
- ```
-
-8. Start the image.
-
- ```sh
- docker start pmm-server
- ```
-
-
diff --git a/documentation/docs/install-pmm/install-pmm-server/baremetal/docker/run_with_data_container.md b/documentation/docs/install-pmm/install-pmm-server/baremetal/docker/run_with_data_container.md
deleted file mode 100644
index 0d75300d01..0000000000
--- a/documentation/docs/install-pmm/install-pmm-server/baremetal/docker/run_with_data_container.md
+++ /dev/null
@@ -1,56 +0,0 @@
-
-# Run Docker with data container
-
-To run Docker with data container:
-{.power-number}
-
-1. Create a persistent data container:
-
- ```sh
- docker create --volume /srv \
- --name pmm-data \
- perconalab/pmm-server:3.0.0-beta /bin/true
- ```
-
- !!! caution alert alert-warning "Important"
- PMM Server expects the data volume to be `/srv`. Using any other value will result in **data loss** when upgrading.
-
- To check server and data container mount points:
-
- ```sh
- docker inspect pmm-data | grep Destination && \
- docker inspect pmm-server | grep Destination
- ```
-
-2. Create the Docker network:
-
- ```sh
- docker network create pmm_default
- ```
-
-3. Run the image:
-
- ```sh
- docker run --detach --restart always \
- --publish 443:8443 \
- --env PMM_WATCHTOWER_HOST=your_watchtower_host \
- --env PMM_WATCHTOWER_TOKEN=your_watchtower_token \
- --volumes-from pmm-data \
- --network=pmm_default \
- --name pmm-server \
- perconalab/pmm-server:3.0.0-beta
- ```
-
-4. Change the password for the default `admin` user, replacing `your_secure_password123` with a strong, unique password:
-
- ```sh
- docker exec -t pmm-server change-admin-password your_secure_password123
- ```
-
-5. Check the [WatchTower prerequisites](../docker/index.md|#prerequisites) and pass the following command to Docker Socket to start [Watchtower](https://containrrr.dev/watchtower/):
-
- ```sh
- docker run -v /var/run/docker.sock:/var/run/docker.sock -e WATCHTOWER_HTTP_API_UPDATE=1 -e WATCHTOWER_HTTP_API_TOKEN=your_watchtower_token --hostname=your_watchtower_host --network=pmm_default docker.io/perconalab/watchtower
- ```
-
-6. Visit `https://localhost:443` to see the PMM user interface in a web browser. If you are accessing the docker host remotely, replace `localhost` with the IP or server name of the host.
diff --git a/documentation/docs/install-pmm/install-pmm-server/baremetal/docker/run_with_host_dir.md b/documentation/docs/install-pmm/install-pmm-server/baremetal/docker/run_with_host_dir.md
deleted file mode 100644
index b27a4cfb24..0000000000
--- a/documentation/docs/install-pmm/install-pmm-server/baremetal/docker/run_with_host_dir.md
+++ /dev/null
@@ -1,46 +0,0 @@
-
-# Run Docker with the host directory
-
-To run Docker with the host directory:
-{.power-number}
-
-1. Pull the image:
-
- ```sh
- docker pull perconalab/pmm-server:3.0.0-beta
- ```
-
-2. Run the image:
-
- ```sh
- docker run --detach --restart always \
- --publish 443:8443 \
- --env PMM_WATCHTOWER_HOST=your_watchtower_host \
- --env PMM_WATCHTOWER_TOKEN=your_watchtower_token \
- --volumes-from pmm-data \
- --network=pmm_default \
- --name pmm-server \
- perconalab/pmm-server:3.0.0-beta
- ```
-
-3. Change the password for the default `admin` user:
-
- ```sh
- docker exec -t pmm-server change-admin-password
- ```
-
-4. Check the [WatchTower prerequisites](../docker/index.md|#prerequisites) and pass the following command to Docker Socket to start [Watchtower](https://containrrr.dev/watchtower/):
-
- ```sh
- docker run -v /var/run/docker.sock:/var/run/docker.sock -e WATCHTOWER_HTTP_API_UPDATE=1 -e WATCHTOWER_HTTP_API_TOKEN=your_watchtower_token --hostname=your_watchtower_host --network=pmm_default docker.io/perconalab/watchtower
- ```
-
-5. Visit `https://localhost:443` to see the PMM user interface in a web browser. (If you are accessing the docker host remotely, replace `localhost` with the IP or server name of the host.)
-
-## Migrate from data container to host directory/volume
-
-To migrate your PMM from data container to host directory or volume run the following command:
-
-```sh
-docker cp :/srv /target/host/directory
-```
diff --git a/documentation/docs/install-pmm/install-pmm-server/baremetal/docker/run_with_vol.md b/documentation/docs/install-pmm/install-pmm-server/baremetal/docker/run_with_vol.md
deleted file mode 100644
index 90c1cdc6bf..0000000000
--- a/documentation/docs/install-pmm/install-pmm-server/baremetal/docker/run_with_vol.md
+++ /dev/null
@@ -1,44 +0,0 @@
-
-# Run Docker with volume
-
-To run Docker with volume:
-{.power-number}
-
-1. Pull the image:
-
- ```sh
- docker pull perconalab/pmm-server:3.0.0-beta
- ```
-
-2. Create a volume:
-
- ```sh
- docker volume create pmm-data
- ```
-
-3. Run the image:
-
- ```sh
- docker run --detach --restart always \
- --publish 443:8443 \
- --env PMM_WATCHTOWER_HOST=your_watchtower_host \
- --env PMM_WATCHTOWER_TOKEN=your_watchtower_token \
- --volumes-from pmm-data \
- --network=pmm_default \
- --name pmm-server \
- perconalab/pmm-server:3.0.0-beta
- ```
-
-4. Change the password for the default `admin` user, replacing `your_secure_password123` with a strong, unique password:
-
- ```sh
- docker exec -t pmm-server change-admin-password your_secure_password123
- ```
-
-5. Check the [WatchTower prerequisites](../docker/index.md|#prerequisites) and pass the following command to Docker Socket to start [Watchtower](https://containrrr.dev/watchtower/):
-
- ```sh
- docker run -v /var/run/docker.sock:/var/run/docker.sock -e WATCHTOWER_HTTP_API_UPDATE=1 -e WATCHTOWER_HTTP_API_TOKEN=your_watchtower_token --hostname=your_watchtower_host --network=pmm_default docker.io/perconalab/watchtower
- ```
-
-6. Visit `https://localhost:443` to see the PMM user interface in a web browser. If you are accessing the Docker host remotely, replace `localhost` with the IP or server name of the host.
\ No newline at end of file
diff --git a/documentation/docs/install-pmm/install-pmm-server/baremetal/helm/index.md b/documentation/docs/install-pmm/install-pmm-server/baremetal/helm/index.md
deleted file mode 100644
index 0c936e711a..0000000000
--- a/documentation/docs/install-pmm/install-pmm-server/baremetal/helm/index.md
+++ /dev/null
@@ -1,170 +0,0 @@
-# Install PMM Server with Helm on the Kubernetes clusters
-
-
-[Helm](https://github.com/helm/helm) is the package manager for Kubernetes. Percona Helm charts can be found in [percona/percona-helm-charts](https://github.com/percona/percona-helm-charts) repository on Github.
-
-## Before you start
-
-- Install Helm following its [official installation instructions](https://docs.helm.sh/using_helm/#installing-helm).
-- Kubernetes cluster that [Helm supports](https://helm.sh/docs/topics/kubernetes_distros/)
-
-!!! note alert alert-primary ""
- Helm v3 is needed to run the following steps.
-
-Refer to [Kubernetes Supported versions](https://kubernetes.io/releases/version-skew-policy/#supported-versions) and [Helm Version Support Policy](https://helm.sh/docs/topics/version_skew/) to find the supported versions.
-
-PMM should be platform-agnostic, but it requires escalated privileges inside a container. It is necessary to have a `root` user inside the PMM container. Thus, PMM would not work for Kubernetes Platforms such as OpenShift or others that have hardened Security Context Constraints, for example:
-
-- [Security context constraints (SCCs)
-](https://docs.openshift.com/container-platform/latest/security/container_security/security-platform.html#security-deployment-sccs_security-platform)
-- [Managing security context constraints](https://docs.openshift.com/container-platform/latest/authentication/managing-security-context-constraints.html)
-
-Kubernetes platforms offer a different set of capabilities. To use PMM in production, you would need backups and, thus storage driver that supports snapshots. Consult your provider for Kubernetes and Cloud storage capabilities.
-
-## Locality and Availability
-
-You should not run the PMM monitoring server along with the monitored database clusters and services on the same system.
-
-Please ensure proper locality either by physically separating workloads in Kubernetes clusters or running separate Kubernetes clusters for the databases and monitoring workloads.
-
-You can physically separate workloads by properly configuring Kubernetes nodes, affinity rules, label selections, etc.
-
-Also, ensure that the Kubernetes cluster has [high availability](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/ha-topology/) so that in case of a node failure, the monitoring service will be running and capturing the required data.
-
-## Install PMM Server
-
-??? info "Summary"
-
- !!! summary alert alert-info ""
- - Setup pmm-admin password
- - Install
- - Configuration parameters
- - PMM environment variables
- - PMM SSL certificates
- - Backup
- - Upgrade
- - Restore
- - Uninstall
-
- ---
-
-### Set up pmm-admin password
-
-Create Kubernetes secret with pmm-admin password:
-```sh
-cat <
-```sh
-helm uninstall pmm
-```
-
-
-!!! hint alert alert-success "Tip"
- List all releases using `helm list`.
-
-### Parameters
-
-The list of Parameters is subject to change from release to release. Check the [Parameters](https://github.com/percona/percona-helm-charts/tree/main/charts/pmm#parameters) section of the PMM Helm Chart.
-
-!!! hint alert alert-success "Tip"
- You can list the default parameters [values.yaml](https://github.com/percona/percona-helm-charts/blob/main/charts/pmm/values.yaml) or get them from chart definition: `helm show values percona/pmm`
-
-Specify each parameter using the `--set key=value[,key=value]` or `--set-string key=value[,key=value]` arguments to `helm install`. For example,
-
-```sh
-helm install pmm \
---set secret.create=false --set secret.name=pmm-secret \
---set service.type="NodePort" \
---set storage.storageClassName="linode-block-storage-retain" \
- percona/pmm
-```
-
-The above command installs PMM and sets the Service network type to `NodePort` and storage class to `linode-block-storage-retain` for persistence storage on LKE.
-
-
-```sh
-helm uninstall pmm
-```
-
-
-!!! caution alert alert-warning "Important"
- Once this chart is deployed, it is impossible to change the application's access credentials, such as password, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools (if available)
-
-Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example:
-
-```sh
-helm show values percona/pmm > values.yaml
-
-#change needed parameters in values.yaml, you need `yq` tool pre-installed
-yq -i e '.secret.create |= false' values.yaml
-
-helm install pmm -f values.yaml percona/pmm
-```
-
-### [PMM environment variables](../docker/env_var.md)
-
-In case you want to add extra environment variables (useful for advanced operations like custom init scripts), you can use the `pmmEnv` property.
-
-```yaml
-pmmEnv:
- DISABLE_UPDATES: "1"
-```
-
-### PMM SSL certificates
-
-PMM ships with self signed SSL certificates to provide secure connection between client and server ([check here](../../../../pmm-admin/security/ssl_encryption.md)).
-
-You will see the warning when connecting to PMM. To further increase security, you should provide your certificates and add values of credentials to the fields of the `cert` section:
-
-```yaml
-certs:
- name: pmm-certs
- files:
- certificate.crt:
- certificate.key:
- ca-certs.pem:
- dhparam.pem:
-```
-
-Another approach to set up TLS certificates is to use the Ingress controller, see [TLS](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls). PMM helm chart supports Ingress. See [PMM network configuration](https://github.com/percona/percona-helm-charts/tree/main/charts/pmm#pmm-network-configuration).
-
-
-
-
-
-
-
-
diff --git a/documentation/docs/install-pmm/install-pmm-server/aws/aws.md b/documentation/docs/install-pmm/install-pmm-server/deployment-options/aws/aws.md
similarity index 93%
rename from documentation/docs/install-pmm/install-pmm-server/aws/aws.md
rename to documentation/docs/install-pmm/install-pmm-server/deployment-options/aws/aws.md
index 451e24f7cb..819e424766 100644
--- a/documentation/docs/install-pmm/install-pmm-server/aws/aws.md
+++ b/documentation/docs/install-pmm/install-pmm-server/deployment-options/aws/aws.md
@@ -31,7 +31,7 @@ While PMM Server itself is provided at no cost, be aware that you will incur AWS
The disk space required by PMM Server depends on the number of monitored hosts and the retention period for the data.
-As a reference, the [PMM Demo](https://pmmdemo.percona.com/) site consumes approximately 230 MB per host per day, which totals around 6.9 GB per host over a 30-day retention period.
+As a reference, the [PMM2 Demo](https://pmmdemo.percona.com/) site consumes approximately 230 MB per host per day, which totals around 6.9 GB per host over a 30-day retention period.
Tip: You can estimate your disk space needs based on the number of hosts and the desired retention period.
For more information, see our blog post [How much disk space should I allocate for Percona Monitoring and Management](https://www.percona.com/blog/2017/05/04/how-much-disk-space-should-i-allocate-for-percona-monitoring-and-management/).
diff --git a/documentation/docs/install-pmm/install-pmm-server/aws/back_pmm_server.md b/documentation/docs/install-pmm/install-pmm-server/deployment-options/aws/back_pmm_server.md
similarity index 100%
rename from documentation/docs/install-pmm/install-pmm-server/aws/back_pmm_server.md
rename to documentation/docs/install-pmm/install-pmm-server/deployment-options/aws/back_pmm_server.md
diff --git a/documentation/docs/install-pmm/install-pmm-server/aws/limit_access.md b/documentation/docs/install-pmm/install-pmm-server/deployment-options/aws/limit_access.md
similarity index 100%
rename from documentation/docs/install-pmm/install-pmm-server/aws/limit_access.md
rename to documentation/docs/install-pmm/install-pmm-server/deployment-options/aws/limit_access.md
diff --git a/documentation/docs/install-pmm/install-pmm-server/aws/remove_pmm_server.md b/documentation/docs/install-pmm/install-pmm-server/deployment-options/aws/remove_pmm_server.md
similarity index 100%
rename from documentation/docs/install-pmm/install-pmm-server/aws/remove_pmm_server.md
rename to documentation/docs/install-pmm/install-pmm-server/deployment-options/aws/remove_pmm_server.md
diff --git a/documentation/docs/install-pmm/install-pmm-server/aws/restore_pmm_server.md b/documentation/docs/install-pmm/install-pmm-server/deployment-options/aws/restore_pmm_server.md
similarity index 100%
rename from documentation/docs/install-pmm/install-pmm-server/aws/restore_pmm_server.md
rename to documentation/docs/install-pmm/install-pmm-server/deployment-options/aws/restore_pmm_server.md
diff --git a/documentation/docs/install-pmm/install-pmm-server/aws/run.md b/documentation/docs/install-pmm/install-pmm-server/deployment-options/aws/run.md
similarity index 100%
rename from documentation/docs/install-pmm/install-pmm-server/aws/run.md
rename to documentation/docs/install-pmm/install-pmm-server/deployment-options/aws/run.md
diff --git a/documentation/docs/install-pmm/install-pmm-server/aws/settings.md b/documentation/docs/install-pmm/install-pmm-server/deployment-options/aws/settings.md
similarity index 100%
rename from documentation/docs/install-pmm/install-pmm-server/aws/settings.md
rename to documentation/docs/install-pmm/install-pmm-server/deployment-options/aws/settings.md
diff --git a/documentation/docs/install-pmm/install-pmm-server/deployment-options/docker/backup_container.md b/documentation/docs/install-pmm/install-pmm-server/deployment-options/docker/backup_container.md
new file mode 100644
index 0000000000..95db4da927
--- /dev/null
+++ b/documentation/docs/install-pmm/install-pmm-server/deployment-options/docker/backup_container.md
@@ -0,0 +1,46 @@
+
+# Backup container
+
+??? info "Summary"
+
+ !!! summary alert alert-info ""
+ - Stop and rename the `pmm-server` container.
+ - Take a local copy of the `pmm-server` container's `/srv` directory.
+
+ ---
+
+!!! caution alert alert-warning "Important"
+ Grafana plugins have been moved to the `/srv` directory since the 2.23.0 version. So if you are upgrading PMM from any version before 2.23.0 and have installed additional plugins then plugins should be installed again after the upgrade.
+
+ To check used Grafana plugins:
+
+ ```sh
+ docker exec -t pmm-server ls -l /var/lib/grafana/plugins
+ ```
+
+To back up the container:
+{.power-number}
+
+1. Stop the container:
+
+ ```sh
+ docker stop pmm-server
+ ```
+
+2. Rename the image:
+
+ ```sh
+ docker rename pmm-server pmm-server-backup
+ ```
+
+3. Create a subdirectory (e.g., `pmm-data-backup`) and change directory to it:
+
+ ```sh
+ mkdir pmm-data-backup && cd pmm-data-backup
+ ```
+
+4. Back up the data:
+
+ ```sh
+ docker cp pmm-server-backup:/srv .
+ ```
diff --git a/documentation/docs/install-pmm/install-pmm-server/baremetal/docker/easy-install.md b/documentation/docs/install-pmm/install-pmm-server/deployment-options/docker/easy-install.md
similarity index 87%
rename from documentation/docs/install-pmm/install-pmm-server/baremetal/docker/easy-install.md
rename to documentation/docs/install-pmm/install-pmm-server/deployment-options/docker/easy-install.md
index 6162acb16c..efd818bddb 100644
--- a/documentation/docs/install-pmm/install-pmm-server/baremetal/docker/easy-install.md
+++ b/documentation/docs/install-pmm/install-pmm-server/deployment-options/docker/easy-install.md
@@ -10,14 +10,12 @@ Download and install PMM Server using `cURL` or `wget`:
=== "cURL"
```sh
- export PMM_REPO=perconalab/pmm-server PMM_TAG=3.0.0-beta
curl -fsSL https://raw.githubusercontent.com/percona/pmm/refs/heads/v3/get-pmm.sh | /bin/bash
```
=== "wget"
```sh
- export PMM_REPO=perconalab/pmm-server PMM_TAG=3.0.0-beta
wget -O - https://raw.githubusercontent.com/percona/pmm/refs/heads/v3/get-pmm.sh | /bin/bash
```
@@ -42,4 +40,4 @@ Download and install PMM Server using `cURL` or `wget`:
Start by installing PMM client:
-[Install PMM client :material-arrow-right:](../../../install-pmm-client/index.md){.md-button}
+[Install PMM Client :material-arrow-right:](../../../install-pmm-client/index.md){.md-button}
diff --git a/documentation/docs/install-pmm/install-pmm-server/deployment-options/docker/env_var.md b/documentation/docs/install-pmm/install-pmm-server/deployment-options/docker/env_var.md
new file mode 100644
index 0000000000..a9a81c7698
--- /dev/null
+++ b/documentation/docs/install-pmm/install-pmm-server/deployment-options/docker/env_var.md
@@ -0,0 +1,66 @@
+# Environment variables in PMM
+
+Configure PMM Server by setting Docker container environment variables using the `-e var=value` syntax:
+
+```bash
+docker run -e PMM_DATA_RETENTION=720h -e PMM_DEBUG=true percona/pmm-server:3
+```
+
+## Core configuration variables
+
+### Performance & storage
+
+| Variable | Default | Description | Example |
+|----------|---------|-------------|----------|
+| `PMM_DATA_RETENTION` | `30d` | Duration to retain metrics data. Must be in multiples of 24h. | `720h` (30 days) |
+| `PMM_METRICS_RESOLUTION` | `1s` | Base metrics collection interval | `5s` |
+| `PMM_METRICS_RESOLUTION_HR` | `5s` | High-resolution metrics interval | `10s` |
+| `PMM_METRICS_RESOLUTION_MR` | `10s` | Medium-resolution metrics interval | `30s` |
+| `PMM_METRICS_RESOLUTION_LR` | `60s` | Low-resolution metrics interval | `300s` |
+
+### Feature flags
+
+| Variable | Default | Effect when enabled |
+|----------|---------|-------------------|
+| `PMM_ENABLE_UPDATES` | `true` | Allows version checks and UI updates |
+| `PMM_ENABLE_TELEMETRY` | `true` | Enables usage data collection |
+| `PMM_ENABLE_ALERTING` | `true` | Enables Percona Alerting system |
+| `PMM_ENABLE_BACKUP_MANAGEMENT` | `true` | Enables backup features |
+| `PMM_ENABLE_AZURE_DISCOVER` | `false` | Enables Azure database discovery |
+
+### Debugging
+
+| Variable | Default | Purpose |
+|----------|---------|---------|
+| `PMM_DEBUG` | `false` | Enables verbose logging |
+| `PMM_TRACE` | `false` | Enables detailed trace logging |
+
+## Advanced configuration
+
+### Networking
+
+| Variable | Description |
+|----------|-------------|
+| `PMM_PUBLIC_ADDRESS` | External DNS/IP for PMM server |
+| `PMM_INTERFACE_TO_BIND` | Network interface binding |
+
+### Database connections
+
+| Variable | Purpose |
+|----------|----------|
+| `PMM_CLICKHOUSE_*` | ClickHouse connection settings |
+| `PMM_POSTGRES_*` | PostgreSQL connection settings |
+
+
+### Supported external variables
+
+- **Grafana**: All `GF_*` variables
+- **VictoriaMetrics**: All `VM_*` variables
+- **Kubernetes**: All `KUBERNETES_*` variables
+- **System**: Standard variables like `HOME`, `PATH`, etc.
+
+### Variables for migrating from PMM v2 to PMM v3
+
+When migrating from PMM v2 to PMM v3, you'll need to update your environment variables to match the new naming convention. This is because PMM v3 introduces several important changes to improve consistency and clarity.
+
+ To see the full lists of variable name changes between PMM v2 and PMM v3, see the [Migration guide](../../../../pmm-upgrade/migrating_from_pmm_2.md#variables-for-migrating-from-pmm-v2-to-pmm-v3).
\ No newline at end of file
diff --git a/documentation/docs/install-pmm/install-pmm-server/deployment-options/docker/index.md b/documentation/docs/install-pmm/install-pmm-server/deployment-options/docker/index.md
new file mode 100644
index 0000000000..9ba013b03b
--- /dev/null
+++ b/documentation/docs/install-pmm/install-pmm-server/deployment-options/docker/index.md
@@ -0,0 +1,89 @@
+# Install PMM Server with Docker container
+
+This section explains how to install PMM Server as a Docker container. While PMM Server runs independently, we highly recommend installing [Watchtower](https://containrrr.dev/watchtower/) alongside it to enable convenient [PMM Server upgrades](../../../../pmm-upgrade/ui_upgrade.md) through the PMM user interface.
+
+With Watchtower, you can upgrade PMM Server directly from the **Upgrade** page or by clicking the **Upgrade Now** button on the **Home** dashboard.
+
+## Prerequisites
+
+Before starting the installation:
+
+- Install Docker version 17.03 or higher
+- Ensure your CPU supports `x86-64-v2`
+- Security requirements for Watchtower:
+
+ - restrict Watchtower access to Docker network or localhost
+ - configure network to expose only PMM Server externally
+ - secure Docker socket access for Watchtower
+ - place both Watchtower and PMM Server on the same network
+
+## Installation options
+
+You can install PMM Server with Watchtower in two ways:
+
+
+=== "Easy-install script"
+
+ The [Easy-install script](../docker/easy-install.md) simplifies setup by including Watchtower commands, enabling a one-step installation of PMM with Watchtower. Run the following command:
+
+ ```sh
+ curl -fsSL https://www.percona.com/get/pmm | /bin/bash
+ ```
+
+=== "Manual installation"
+
+ For a more customizable setup, follow these steps:
+ {.power-number}
+
+ 1. Create a Docker network for PMM and Watchtower:
+ ```sh
+ docker network create pmm-network
+ ```
+
+ 2. (Optional) Install Watchtower to enable PMM Server upgrades via the UI.
+ {.power-number}
+
+ 1. Create a user-defined token to secure Watchtower's HTTP API. You can use any value or generate a secure token using `openssl` or another method. Ensure the same token is used in both the Watchtower and PMM Server configurations:
+
+ ```sh
+ openssl rand -hex 16
+ # Example output:
+ e09541c81e672bf0e48dbc72d4f92790
+ ```
+
+ 2. Install Watchtower using your token:
+
+ ```sh
+ docker run --detach \
+ --restart always \
+ --network=pmm-network \
+ -e WATCHTOWER_HTTP_API_TOKEN=your_token \
+ -e WATCHTOWER_HTTP_API_UPDATE=1 \
+ --volume /var/run/docker.sock:/var/run/docker.sock \
+ --name watchtower \
+ percona/watchtower:latest
+ ```
+
+ 3. Run PMM Server with Docker based on your preferred data storage method:
+ - [Run Docker with host directory](../docker/run_with_host_dir.md)
+ - [Run Docker with volume](../docker/run_with_vol.md)
+
+
+## Run Docker container
+
+??? info "Summary"
+
+ !!! summary alert alert-info ""
+ - Pull the Docker image.
+ - Choose how you want to store data.
+ - Run the image.
+ - Open the PMM UI in a browser.
+
+ ---
+??? info "Key points"
+
+ - To disable the Home Dashboard **PMM Upgrade** panel you can either add `-e PMM_ENABLE_UPDATES=false` to the `docker run` command (for the life of the container) or navigate to **PMM Configuration > Settings > Advanced Settings** and disable **Check for Updates** (can be turned back on by any admin in the UI).
+
+ - Eliminate browser certificate warnings by configuring a [trusted certificate](../../../../how-to/secure.html#ssl-encryption).
+
+ - You can optionally enable an (insecure) HTTP connection by adding `--publish 80:8080` to the `docker run` command. However, running PMM insecure is not recommended. You should also note that PMM Client *requires* TLS to communicate with the server, only working on a secure port.
diff --git a/documentation/docs/install-pmm/install-pmm-server/baremetal/docker/isolated_hosts.md b/documentation/docs/install-pmm/install-pmm-server/deployment-options/docker/isolated_hosts.md
similarity index 53%
rename from documentation/docs/install-pmm/install-pmm-server/baremetal/docker/isolated_hosts.md
rename to documentation/docs/install-pmm/install-pmm-server/deployment-options/docker/isolated_hosts.md
index 4d81fe4fc9..3a4ace3ab4 100644
--- a/documentation/docs/install-pmm/install-pmm-server/baremetal/docker/isolated_hosts.md
+++ b/documentation/docs/install-pmm/install-pmm-server/deployment-options/docker/isolated_hosts.md
@@ -1,10 +1,9 @@
-
# Isolated hosts
-If the host where you will run PMM Server has no internet connection, you can download the Docker image on a separate (internet-connected) host and securely copy it.
+If the host where you will run [PMM Server][Docker image] has no internet connection, you can download the Docker image on a separate (internet-connected) host and securely copy it.
{.power-number}
-1. On an internet-connected host, download the Docker image and its checksum file.
+1. On an internet-connected host, download the [Docker][Docker] image and its checksum file:
```sh
wget https://downloads.percona.com/downloads/pmm/{{release}}/docker/pmm-server-{{release}}.docker
@@ -15,13 +14,13 @@ If the host where you will run PMM Server has no internet connection, you can do
3. Open a terminal on the PMM Server host.
-4. (Optional) Check the Docker image file integrity.
+4. (Optional) Check the Docker image file integrity:
```sh
shasum -ca 256 pmm-server-{{release}}.sha256sum
```
-5. Load the image.
+5. Load the image:
```sh
docker load -i pmm-server-{{release}}.docker
@@ -29,11 +28,18 @@ If the host where you will run PMM Server has no internet connection, you can do
6. [Run the container](index.md#run-docker-container) as if your image is already pulled using your desired method for a storage volume (you can step over any docker pull commands as the image has been pre-staged).
+For more information, see:
+- [Docker installation guide][Docker]
+- [Docker Compose installation][Docker compose]
+- [PMM Server Docker tags][tags]
+- [PMM Client Docker setup][PMMC_COMPOSE]
+- [Setting up trusted certificates][trusted certificate]
+- [Easy installation script][Easy-install script]
[tags]: https://hub.docker.com/r/percona/pmm-server/tags
[Docker]: https://docs.docker.com/get-docker/
[Docker image]: https://hub.docker.com/r/percona/pmm-server
[Docker compose]: https://docs.docker.com/compose/
-[PMMC_COMPOSE]: ../client/index.md#docker-compose
-[trusted certificate]: ../../how-to/secure.md#ssl-encryption
-[Easy-install script]: easy-install.md
+[PMMC_COMPOSE]: ../../../install-pmm-client/docker.md
+[trusted certificate]: ../../../../how-to/secure.md#ssl-encryption
+[Easy-install script]: easy-install.md
\ No newline at end of file
diff --git a/documentation/docs/install-pmm/install-pmm-server/baremetal/docker/preview_env_var.md b/documentation/docs/install-pmm/install-pmm-server/deployment-options/docker/preview_env_var.md
similarity index 100%
rename from documentation/docs/install-pmm/install-pmm-server/baremetal/docker/preview_env_var.md
rename to documentation/docs/install-pmm/install-pmm-server/deployment-options/docker/preview_env_var.md
diff --git a/documentation/docs/install-pmm/install-pmm-server/baremetal/docker/remove_container.md b/documentation/docs/install-pmm/install-pmm-server/deployment-options/docker/remove_container.md
similarity index 50%
rename from documentation/docs/install-pmm/install-pmm-server/baremetal/docker/remove_container.md
rename to documentation/docs/install-pmm/install-pmm-server/deployment-options/docker/remove_container.md
index b16b1bd0ee..8812609869 100644
--- a/documentation/docs/install-pmm/install-pmm-server/baremetal/docker/remove_container.md
+++ b/documentation/docs/install-pmm/install-pmm-server/deployment-options/docker/remove_container.md
@@ -1,34 +1,31 @@
# Remove container
-??? info "Summary"
-
- !!! summary alert alert-info ""
- - Stop the container.
- - Remove (delete) both the server and data containers.
- - Remove (delete) both images.
-
- ---
-
!!! caution alert alert-warning "Caution"
These steps delete the PMM Server Docker image and any accumulated PMM metrics data.
To remove the container:
{.power-number}
-1. Stop pmm-server container.
+1. Stop pmm-server container:
```sh
docker stop pmm-server
```
-2. Remove containers.
+2. Remove the container:
+
+ ```sh
+ docker rm pmm-server
+ ```
+
+3. Remove the data volume:
```sh
- docker rm pmm-server pmm-data
+ docker volume rm pmm-data
```
-3. Remove the image.
+4. Remove the image:
```sh
- docker rmi $(docker images | grep "percona/pmm-server" | awk {'print $3'})
- ```
\ No newline at end of file
+ docker rmi $(docker images | grep "percona/pmm-server" | awk '{print $3}')
+ ```
diff --git a/documentation/docs/install-pmm/install-pmm-server/deployment-options/docker/restore_container.md b/documentation/docs/install-pmm/install-pmm-server/deployment-options/docker/restore_container.md
new file mode 100644
index 0000000000..70b400720a
--- /dev/null
+++ b/documentation/docs/install-pmm/install-pmm-server/deployment-options/docker/restore_container.md
@@ -0,0 +1,84 @@
+# Restore Docker container
+You can restore PMM Server either from a manual backup or from an automated backup volume that was created during migration to PMM v3.
+
+Before proceeding with restoration, ensure you have either a [manual backup](backup_container.md) or an [automated backup volume](../../../../pmm-upgrade/migrating_from_pmm_2.md#step-2-migrate-pmm-2-server-to-pmm-3) to restore from.
+
+=== "Restore from manual backup"
+ To restore the container from a manual backup:
+ {.power-number}
+
+ 1. Stop the container:
+
+ ```sh
+ docker stop pmm-server
+ ```
+
+ 2. Remove the container:
+
+ ```sh
+ docker rm pmm-server
+ ```
+
+ 3. Revert to the saved image:
+
+ ```sh
+ docker rename pmm-server-backup pmm-server
+ ```
+
+ 4. Change directory to the backup directory (e.g. `pmm-data-backup`):
+
+ ```shc
+ cd pmm-data-backup
+ ```
+
+ 5. Copy the data:
+
+ ```sh
+ docker run --rm -v $(pwd)/srv:/backup -v pmm-data:/srv -t percona/pmm-server:3 cp -r /backup/* /srv
+ ```
+
+ 6. Restore permissions:
+
+ ```sh
+ docker run --rm -v pmm-data:/srv -t percona/pmm-server:3 chown -R pmm:pmm /srv
+ ```
+
+ 7. Start the image:
+
+ ```sh
+ docker start pmm-server
+ ```
+
+=== "Restore from automated migration backup"
+
+ To restore from an automated backup volume created during [migration to PMM v3](../../../../pmm-upgrade/migrating_from_pmm_2.md#step-2-migrate-pmm-2-server-to-pmm-3):
+ {.power-number}
+
+ 1. Stop the current PMM v3 container:
+ ```sh
+ docker stop pmm-server
+ ```
+ 2. Remove the container (optional):
+ ```sh
+ docker rm pmm-server
+ ```
+ 3. Start a PMM v2 container using your backup volume, replacing `` with your PMM v2 backup volume name (e.g., `pmm-data-2025-01-16-165135`):
+
+ ```sh
+ docker run -d \
+ -p 443:443 \
+ --volume :/srv \
+ --name pmm-server \
+ --restart always \
+ percona/pmm-server:2.44.0
+ ```
+
+ 4. Verify that your PMM v2 instance is running correctly and all your data is accessible.
+
+ !!! note alert alert-primary "Finding your backup volume name"
+ - Your backup volume name was displayed during the [automated upgrade process](../../../../pmm-upgrade/migrating_from_pmm_2.md#step-2-migrate-pmm-2-server-to-pmm-3).
+ - To list all available Docker volumes, use the following command, and look for volumes with names like `pmm-data-YYYY-MM-DD-HHMMSS`:
+
+ ```sh
+ docker volume ls
+ ```
diff --git a/documentation/docs/install-pmm/install-pmm-server/deployment-options/docker/run_with_host_dir.md b/documentation/docs/install-pmm/install-pmm-server/deployment-options/docker/run_with_host_dir.md
new file mode 100644
index 0000000000..823188424e
--- /dev/null
+++ b/documentation/docs/install-pmm/install-pmm-server/deployment-options/docker/run_with_host_dir.md
@@ -0,0 +1,42 @@
+
+# Run Docker with the host directory
+
+To run Docker with the host directory:
+{.power-number}
+
+1. Pull the image:
+
+ ```sh
+ docker pull percona/pmm-server:3
+ ```
+
+2. Identify a directory on the host that you want to use to persist PMM data. For example, `/home/user/srv`.
+
+3. Run the image:
+
+ ```sh
+ docker run --detach --restart always \
+ --publish 443:8443 \
+ --env PMM_WATCHTOWER_HOST=your_watchtower_host \
+ --env PMM_WATCHTOWER_TOKEN=your_watchtower_token \
+ --volume /home/user/srv:/srv \
+ --network=pmm_default \
+ --name pmm-server \
+ percona/pmm-server:3
+ ```
+
+4. Change the password for the default `admin` user, replacing `your_secure_password` with a strong, unique password:
+
+ ```sh
+ docker exec -t pmm-server change-admin-password your_secure_password
+ ```
+
+5. Visit `https://localhost:443` to see the PMM user interface in a web browser. (If you are accessing the docker host remotely, replace `localhost` with the IP or server name of the host.)
+
+## Migrate from data container to host directory
+
+To migrate your PMM from data container to host directory, run the following command:
+
+```sh
+docker cp :/srv /target/host/directory
+```
diff --git a/documentation/docs/install-pmm/install-pmm-server/deployment-options/docker/run_with_vol.md b/documentation/docs/install-pmm/install-pmm-server/deployment-options/docker/run_with_vol.md
new file mode 100644
index 0000000000..6ba8eeefa4
--- /dev/null
+++ b/documentation/docs/install-pmm/install-pmm-server/deployment-options/docker/run_with_vol.md
@@ -0,0 +1,38 @@
+
+# Run Docker with volume
+
+To run Docker with volume:
+{.power-number}
+
+1. Pull the image:
+
+ ```sh
+ docker pull percona/pmm-server:3
+ ```
+
+2. Create a volume:
+
+ ```sh
+ docker volume create pmm-data
+ ```
+
+3. Run the image:
+
+ ```sh
+ docker run --detach --restart always \
+ --publish 443:8443 \
+ --env PMM_WATCHTOWER_HOST=your_watchtower_host \
+ --env PMM_WATCHTOWER_TOKEN=your_watchtower_token \
+ --volume pmm-data:/srv \
+ --network=pmm-network \
+ --name pmm-server \
+ percona/pmm-server:3
+ ```
+
+4. Change the password for the default `admin` user, replacing `your_secure_password` with a strong, unique password:
+
+ ```sh
+ docker exec -t pmm-server change-admin-password your_secure_password
+ ```
+
+5. Visit `https://localhost:443` to see the PMM user interface in a web browser. If you are accessing the Docker host remotely, replace `localhost` with the IP or server name of the host.
diff --git a/documentation/docs/install-pmm/install-pmm-server/baremetal/docker/upgrade_container.md b/documentation/docs/install-pmm/install-pmm-server/deployment-options/docker/upgrade_container.md
similarity index 50%
rename from documentation/docs/install-pmm/install-pmm-server/baremetal/docker/upgrade_container.md
rename to documentation/docs/install-pmm/install-pmm-server/deployment-options/docker/upgrade_container.md
index 8167bb96e7..522d9caa55 100644
--- a/documentation/docs/install-pmm/install-pmm-server/baremetal/docker/upgrade_container.md
+++ b/documentation/docs/install-pmm/install-pmm-server/deployment-options/docker/upgrade_container.md
@@ -1,55 +1,43 @@
# Upgrade container
-??? info "Summary"
-
- !!! summary alert alert-info ""
- - Stop the running container.
- - Backup (rename) the container and copy data.
- - Pull the latest Docker image.
- - Run it.
-
- ---
-
!!! caution alert alert-warning "Important"
Downgrades are not possible. To go back to using a previous version you must have created a backup of it before upgrading.
!!! hint alert alert-success "Tip"
- To see what release you are running, use the *PMM Upgrade* panel on the *Home Dashboard*, or run:
+ To see what release you are running, use the **PMM Upgrade** panel on the **Home Dashboard**, or run the following command (replace localhost with your PMM server's address for remote access):
```sh
docker exec -it pmm-server \
curl -ku admin:admin https://localhost/v1/version
```
- (If you are accessing the docker host remotely, replace `localhost` with the IP or server name of the host.)
-
To upgrade the container:
{.power-number}
-1. Stop the container.
+1. Stop the container:
```sh
docker stop pmm-server
```
-2. Perform a [backup](#backup).
+2. Perform a [backup](../docker/backup_container.md).
-3. Pull the latest image.
+3. Pull the latest image:
```sh
- docker pull perconalab/pmm-server:3.0.0-beta
+ docker pull percona/pmm-server:3
```
-4. Rename the original container
+4. Rename the original container:
```sh
docker rename pmm-server pmm-server-old
```
-5. Run it.
+5. Run it:
```sh
docker run \
@@ -58,7 +46,7 @@ To upgrade the container:
--publish 443:443 \
--volumes-from pmm-data \
--name pmm-server \
- perconalab/pmm-server:3.0.0-beta
+ percona/pmm-server:3
```
diff --git a/documentation/docs/install-pmm/install-pmm-server/baremetal/helm/backup_container_helm.md b/documentation/docs/install-pmm/install-pmm-server/deployment-options/helm/backup_container_helm.md
similarity index 100%
rename from documentation/docs/install-pmm/install-pmm-server/baremetal/helm/backup_container_helm.md
rename to documentation/docs/install-pmm/install-pmm-server/deployment-options/helm/backup_container_helm.md
diff --git a/documentation/docs/install-pmm/install-pmm-server/deployment-options/helm/index.md b/documentation/docs/install-pmm/install-pmm-server/deployment-options/helm/index.md
new file mode 100644
index 0000000000..0d5cbce2c0
--- /dev/null
+++ b/documentation/docs/install-pmm/install-pmm-server/deployment-options/helm/index.md
@@ -0,0 +1,142 @@
+# Install PMM Server with Helm on Kubernetes clusters
+
+[Helm](https://github.com/helm/helm) is the package manager for Kubernetes. You can find Percona Helm charts in [our GitHub repository](https://github.com/percona/percona-helm-charts).
+
+## Prerequisites
+
+ - [Helm v3](https://docs.helm.sh/using_helm/#installing-helm)
+ - Supported cluster according to [Supported Kubernetes](https://kubernetes.io/releases/version-skew-policy/#supported-versions) and [Supported Helm](https://helm.sh/docs/topics/version_skew/) versions
+ - Storage driver with snapshot support (for backups)
+
+## Storage requirements
+
+Different Kubernetes platforms offer varying capabilities.
+
+To use PMM in production:
+
+- ensure your platform provides storage drivers supporting snapshots for backups
+- consult your provider about Kubernetes and Cloud storage capabilities
+
+
+## Deployment best practices
+
+For optimal monitoring:
+{.power-number}
+
+1. Separate PMM Server from monitored systems by either:
+
+ - using separate Kubernetes clusters for monitoring and databases
+ - configuring workload separation through node configurations, affinity rules, and label selectors
+
+2. Enable [high availability](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/ha-topology/) to ensure continuous monitoring during node failures
+
+## Installation PMM Server on your Kubernetes cluster
+
+Create the required Kubernetes secret and deploy PMM Server using Helm:
+{.power-number}
+
+1. Create Kubernetes secret to set up `pmm-admin` password:
+ ```sh
+ cat < values.yaml
+ ```
+
+#### Change credentials
+
+!!! caution alert alert-warning "Important"
+ Helm cannot modify application credentials after deployment.
+
+Credential changes after deployment require either:
+
+- redeploying PMM Server with new persistent volumes
+- using PMM's built-in administrative tools
+
+### PMM environment variables
+
+Add [environment variables](../docker/env_var.md) for advanced operations (like custom init scripts) using the `pmmEnv` property:
+
+```yaml
+pmmEnv:
+PMM_ENABLE_UPDATES: "1"
+```
+
+### SSL certificates
+
+PMM comes with [self-signed SSL certificates](../../../../admin/security/ssl_encryption.md), ensuring a secure connection between the client and server. However, since these certificates are not issued by a trusted authority, you may encounter a security warning when connecting to PMM.
+
+To enhance security, you have two options:
+{.power-number}
+
+1. Configure custom certificates:
+
+ ```yaml
+ certs:
+ name: pmm-certs
+ files:
+ certificate.crt:
+ certificate.key:
+ ca-certs.pem:
+ dhparam.pem:
+ ```
+
+2. Use [Ingress controller with TLS](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls). See [PMM network configuration](https://github.com/percona/percona-helm-charts/tree/main/charts/pmm#pmm-network-configuration) for details.
+
+
+
+
+
diff --git a/documentation/docs/install-pmm/install-pmm-server/baremetal/helm/restore_container_helm.md b/documentation/docs/install-pmm/install-pmm-server/deployment-options/helm/restore_container_helm.md
similarity index 100%
rename from documentation/docs/install-pmm/install-pmm-server/baremetal/helm/restore_container_helm.md
rename to documentation/docs/install-pmm/install-pmm-server/deployment-options/helm/restore_container_helm.md
diff --git a/documentation/docs/install-pmm/install-pmm-server/baremetal/podman/backup_container_podman.md b/documentation/docs/install-pmm/install-pmm-server/deployment-options/podman/backup_container_podman.md
similarity index 100%
rename from documentation/docs/install-pmm/install-pmm-server/baremetal/podman/backup_container_podman.md
rename to documentation/docs/install-pmm/install-pmm-server/deployment-options/podman/backup_container_podman.md
diff --git a/documentation/docs/install-pmm/install-pmm-server/baremetal/podman/index.md b/documentation/docs/install-pmm/install-pmm-server/deployment-options/podman/index.md
similarity index 58%
rename from documentation/docs/install-pmm/install-pmm-server/baremetal/podman/index.md
rename to documentation/docs/install-pmm/install-pmm-server/deployment-options/podman/index.md
index b0b5468ba4..d44b2968d2 100644
--- a/documentation/docs/install-pmm/install-pmm-server/baremetal/podman/index.md
+++ b/documentation/docs/install-pmm/install-pmm-server/deployment-options/podman/index.md
@@ -4,7 +4,6 @@ This section provides instructions for running PMM Server with Podman based on o
## About Podman
-
!!! seealso alert alert-info "See also"
- [Docker](../docker/index.md)
- Other [tags](https://hub.docker.com/r/percona/pmm-server/tags) are available.
@@ -59,10 +58,12 @@ On the other hand, the manual method offers a simpler setup with complete contro
After=time-sync.target
[Service]
EnvironmentFile=~/.config/systemd/user/pmm-server.env
+ Environment=PMM_VOLUME_NAME=%N
Restart=on-failure
RestartSec=20
ExecStart=/usr/bin/podman run \
--volume ~/.config/systemd/user/:/home/pmm/update/ \
+ --volume=${PMM_VOLUME_NAME}:/srv
--rm --replace=true --name %N \
--env-file=~/.config/systemd/user/pmm-server.env \
--net pmm_default \
@@ -74,12 +75,16 @@ On the other hand, the manual method offers a simpler setup with complete contro
WantedBy=default.target
```
- 2. Create the environment file at `~/.config/systemd/user/pmm-server.env`:
+ 2. Create the environment file at `~/.config/systemd/user/pmm-server.env`. If current user is `root`, modify permissions as well:
```sh
PMM_WATCHTOWER_HOST=http://watchtower:8080
PMM_WATCHTOWER_TOKEN=123
- PMM_IMAGE=docker.io/perconalab/pmm-server:3.0.0-beta
+ PMM_IMAGE=docker.io/percona/pmm-server:3
+ ```
+
+ ```
+ chmod 777 ~/.config/systemd/user/pmm-server.env # Only if current user is root
```
3. Create or update the Watchtower service file at `~/.config/systemd/user/watchtower.service`:
@@ -92,34 +97,41 @@ On the other hand, the manual method offers a simpler setup with complete contro
After=nss-user-lookup.target nss-lookup.target
After=time-sync.target
[Service]
+ EnvironmentFile=/home/pmm/watchtower.env
Restart=on-failure
RestartSec=20
- Environment=WATCHTOWER_HTTP_API_UPDATE=1
- Environment=WATCHTOWER_HTTP_API_TOKEN=123
- Environment=WATCHTOWER_NO_RESTART=1
- Environment=WATCHTOWER_DEBUG=1
ExecStart=/usr/bin/podman run --rm --replace=true --name %N \
-v ${XDG_RUNTIME_DIR}/podman/podman.sock:/var/run/docker.sock \
- -e WATCHTOWER_HTTP_API_UPDATE=${WATCHTOWER_HTTP_API_UPDATE} \
- -e WATCHTOWER_HTTP_API_TOKEN=${WATCHTOWER_HTTP_API_TOKEN} \
- -e WATCHTOWER_NO_RESTART=${WATCHTOWER_NO_RESTART} \
- -e WATCHTOWER_DEBUG=${WATCHTOWER_DEBUG} \
+ --env-file=~/.config/systemd/user/watchtower.env \
--net pmm_default \
--cap-add=net_admin,net_raw \
- docker.io/perconalab/watchtower:latest
+ ${WATCHTOWER_IMAGE}
ExecStop=/usr/bin/podman stop -t 10 %N
[Install]
WantedBy=default.target
```
- 4. Start services:
+ 4. Create the environment file for Watchtower at `~/.config/systemd/user/watchtower.env`. If current user is `root`, modify permissions as well:
+
+ ```sh
+ WATCHTOWER_HTTP_API_UPDATE=1
+ WATCHTOWER_HTTP_API_TOKEN=123
+ WATCHTOWER_NO_RESTART=1
+ WATCHTOWER_IMAGE=docker.io/percona/watchtower:latest
+ ```
+
+ ```
+ chmod 777 ~/.config/systemd/user/watchtower.env # Only if current user is root
+ ```
+
+ 5. Start services:
```sh
systemctl --user enable --now pmm-server
systemctl --user enable --now watchtower
```
- 5. Go to `https://localhost:8443` to access the PMM user interface in a web browser. If you are accessing the host remotely, replace `localhost` with the IP or server name of the host.
+ 6. Go to `https://localhost:443` to access the PMM user interface in a web browser. If you are accessing the host remotely, replace `localhost` with the IP or server name of the host.
=== "Installation with manual updates"
@@ -137,9 +149,11 @@ On the other hand, the manual method offers a simpler setup with complete contro
After=time-sync.target
[Service]
EnvironmentFile=~/.config/systemd/user/pmm-server.env
+ Environment=PMM_VOLUME_NAME=%N
Restart=on-failure
RestartSec=20
ExecStart=/usr/bin/podman run \
+ --volume=${PMM_VOLUME_NAME}:/srv
--rm --replace=true --name %N \
--env-file=~/.config/systemd/user/pmm-server.env \
--net pmm_default \
@@ -154,7 +168,7 @@ On the other hand, the manual method offers a simpler setup with complete contro
2. Create the environment file at `~/.config/systemd/user/pmm-server.env`:
```sh
- PMM_IMAGE=docker.io/perconalab/pmm-server:3.0.0-beta
+ PMM_IMAGE=docker.io/percona/pmm-server:3
```
3. Start services:
@@ -163,126 +177,10 @@ On the other hand, the manual method offers a simpler setup with complete contro
systemctl --user enable --now pmm-server
```
- 4. Go to `https://localhost:8443` to access the PMM user interface in a web browser. If you are accessing the host remotely, replace `localhost` with the IP or server name of the host.
+ 4. Go to `https://localhost:443` to access the PMM user interface in a web browser. If you are accessing the host remotely, replace `localhost` with the IP or server name of the host.
For information on manually upgrading, see [Upgrade PMM Server using Podman](../../../../pmm-upgrade/upgrade_podman.md).
-
-## Run as non-privileged user to start PMM
-
-??? info "Summary"
-
- !!! summary alert alert-info ""
- - Install.
- - Configure.
- - Enable and Start.
- - Open the PMM UI in a browser.
-
- ---
-To run Podman as a non-privileged user:
-{.power-number}
-
-1. Install:
-
- Create `~/.config/systemd/user/pmm-server.service` file:
-
- ```sh
- mkdir -p ~/.config/systemd/user/
- cat << "EOF" > ~/.config/systemd/user/pmm-server.service
- [Unit]
- Description=pmm-server
- Wants=network-online.target
- After=network-online.target
- After=nss-user-lookup.target nss-lookup.target
- After=time-sync.target
-
- [Service]
- Type=simple
-
- # set environment for this unit
- Environment=PMM_PUBLIC_PORT=8443
- Environment=PMM_VOLUME_NAME=%N
- Environment=PMM_TAG=2.33.0
- Environment=PMM_IMAGE=docker.io/percona/pmm-server
- Environment=PMM_ENV_FILE=%h/.config/pmm-server/pmm-server.env
-
- # optional env file that could override previous env settings for this unit
- EnvironmentFile=-%h/.config/pmm-server/env
-
- ExecStart=/usr/bin/podman run --rm --replace=true --name=%N -p ${PMM_PUBLIC_PORT}:443/tcp --ulimit=host --volume=${PMM_VOLUME_NAME}:/srv --env-file=${PMM_ENV_FILE} --health-cmd=none --health-interval=disable ${PMM_IMAGE}:${PMM_TAG}
- ExecStop=/usr/bin/podman stop -t 10 %N
- Restart=on-failure
- RestartSec=20
-
- [Install]
- Alias=%N
- WantedBy=default.target
-
- EOF
- ```
-
- Create `~/.config/pmm-server/pmm-server.env` file:
-
- ```sh
- mkdir -p ~/.config/pmm-server/
- cat << "EOF" > ~/.config/pmm-server/pmm-server.env
- # env file passed to the container
- # full list of environment variables:
- # https://www.percona.com/doc/percona-monitoring-and-management/2.x/setting-up/server/docker.html#environment-variables
-
- # keep updates disabled
- # do image replacement instead (update the tag and restart the service)
- DISABLE_UPDATES=1
- EOF
- ```
-
-2. Configure:
-
- There are 2 configuration files:
- 1. `~/.config/pmm-server/pmm-server.env` defines environment variables for PMM Server (PMM parameters like RBAC feature and etc)
- 2. `~/.config/pmm-server/env` defines environment variables for SystemD service (image tags, repo and etc)
-
- SystemD service passes the environment parameters from the `pmm-server.env `file (in `~/.config/pmm-server/pmm-server.env`) to PMM. For more information about container environment variables, check [Docker Environment].
-
- SystemD service uses some environment variables that could be customized if needed:
-
- ```text
- Environment=PMM_PUBLIC_PORT=8443
- Environment=PMM_VOLUME_NAME=%N
- Environment=PMM_TAG=2.33.0
- Environment=PMM_IMAGE=docker.io/percona/pmm-server
- ```
-
- You can override the environment variables by defining them in the file `~/.config/pmm-server/env`. For example, to override the path to a custom registry `~/.config/pmm-server/env`:
-
- ```sh
- mkdir -p ~/.config/pmm-server/
- cat << "EOF" > ~/.config/pmm-server/env
- PMM_TAG=2.31.0
- PMM_IMAGE=docker.io/percona/pmm-server
- PMM_PUBLIC_PORT=8443
- EOF
- ```
-
- !!! caution alert alert-warning "Important"
- Ensure that you modify PMM_TAG in `~/.config/pmm-server/env` and update it regularly as Percona cannot update it. It needs to be done by you.
-
-3. Enable and start:
-
- ```sh
- systemctl --user enable --now pmm-server
- ```
-
-4. Activate the podman socket using the [Podman socket activation instructions](https://github.com/containers/podman/blob/main/docs/tutorials/socket_activation.md).
-
-5. Pass the following command to Docker Socket to start [Watchtower](https://containrrr.dev/watchtower/). Make sure to modify the command to use your Podman socket path:
-
- ```sh
- docker run -v $XDG_RUNTIME_DIR/podman/podman.sock:/var/run/docker.sock -e WATCHTOWER_HTTP_API_UPDATE=1 -e WATCHTOWER_HTTP_API_TOKEN=123 --hostname=watchtower --network=pmm_default docker.io/perconalab/watchtower
- ```
-
-6. Visit `https://localhost:8443` to see the PMM user interface in a web browser. (If you are accessing host remotely, replace `localhost` with the IP or server name of the host.)
-
```sh
#first pull can take time
diff --git a/documentation/docs/install-pmm/install-pmm-server/baremetal/podman/remove_container_podman.md b/documentation/docs/install-pmm/install-pmm-server/deployment-options/podman/remove_container_podman.md
similarity index 61%
rename from documentation/docs/install-pmm/install-pmm-server/baremetal/podman/remove_container_podman.md
rename to documentation/docs/install-pmm/install-pmm-server/deployment-options/podman/remove_container_podman.md
index ad4ed93a16..be32aa7eb1 100644
--- a/documentation/docs/install-pmm/install-pmm-server/baremetal/podman/remove_container_podman.md
+++ b/documentation/docs/install-pmm/install-pmm-server/deployment-options/podman/remove_container_podman.md
@@ -1,28 +1,18 @@
# Remove podman container
-
-??? info "Summary"
-
- !!! summary alert alert-info ""
- - Stop PMM Server.
- - Remove (delete) volume.
- - Remove (delete) images.
-
- ---
-
!!! caution alert alert-warning "Caution"
These steps delete the PMM Server Docker image and the associated PMM metrics data.
-To remove your contiainer:
+To remove your container:
{.power-number}
-1. Stop PMM Server.
+1. Stop PMM Server:
```sh
systemctl --user stop pmm-server
```
-2. Remove volume.
+2. Remove volume:
```sh
@@ -36,7 +26,7 @@ To remove your contiainer:
podman volume rm --force pmm-server
```
-3. Remove the PMM images.
+3. Remove the PMM images:
```sh
podman rmi $(podman images | grep "pmm-server" | awk {'print $3'})
@@ -44,8 +34,7 @@ To remove your contiainer:
[tags]: https://hub.docker.com/r/percona/pmm-server/tags
[Podman]: https://podman.io/getting-started/installation
-[Docker]: docker.md
+[Docker]: ../docker/index.md
[Docker image]: https://hub.docker.com/r/percona/pmm-server
-[Docker Environment]: docker.md#environment-variables
-[trusted certificate]: ../../how-to/secure.md#ssl-encryption
-[Set up repos]: ../client/index.md#package-manager
\ No newline at end of file
+[Docker environment variables]: ../docker/env_var.md
+[trusted certificate]: ../../../../how-to/secure.md#ssl-encryption
\ No newline at end of file
diff --git a/documentation/docs/install-pmm/install-pmm-server/baremetal/podman/restore_container_podman.md b/documentation/docs/install-pmm/install-pmm-server/deployment-options/podman/restore_container_podman.md
similarity index 100%
rename from documentation/docs/install-pmm/install-pmm-server/baremetal/podman/restore_container_podman.md
rename to documentation/docs/install-pmm/install-pmm-server/deployment-options/podman/restore_container_podman.md
diff --git a/documentation/docs/install-pmm/install-pmm-server/baremetal/virtual/download_ova.md b/documentation/docs/install-pmm/install-pmm-server/deployment-options/virtual/download_ova.md
similarity index 89%
rename from documentation/docs/install-pmm/install-pmm-server/baremetal/virtual/download_ova.md
rename to documentation/docs/install-pmm/install-pmm-server/deployment-options/virtual/download_ova.md
index 8069b70a2d..f8844f0e50 100644
--- a/documentation/docs/install-pmm/install-pmm-server/baremetal/virtual/download_ova.md
+++ b/documentation/docs/install-pmm/install-pmm-server/deployment-options/virtual/download_ova.md
@@ -17,7 +17,7 @@ This section contains guidelines on how to download and verify the OVA file.
=== "Download from the CLI"
- Download the latest PMM Server OVA and checksum files.
+ Download the latest PMM Server OVA and checksum files:
```sh
wget https://www.percona.com/downloads/pmm/{{release}}/ova/pmm-server-{{release}}.ova
@@ -26,7 +26,7 @@ This section contains guidelines on how to download and verify the OVA file.
## Verify OVA file from CLI
-Verify the checksum of the downloaded .ova file.
+Verify the checksum of the downloaded .ova file:
```sh
shasum -ca 256 pmm-server-{{release}}.sha256sum
diff --git a/documentation/docs/install-pmm/install-pmm-server/baremetal/virtual/index.md b/documentation/docs/install-pmm/install-pmm-server/deployment-options/virtual/index.md
similarity index 100%
rename from documentation/docs/install-pmm/install-pmm-server/baremetal/virtual/index.md
rename to documentation/docs/install-pmm/install-pmm-server/deployment-options/virtual/index.md
diff --git a/documentation/docs/install-pmm/install-pmm-server/baremetal/virtual/login_UI.md b/documentation/docs/install-pmm/install-pmm-server/deployment-options/virtual/login_UI.md
similarity index 100%
rename from documentation/docs/install-pmm/install-pmm-server/baremetal/virtual/login_UI.md
rename to documentation/docs/install-pmm/install-pmm-server/deployment-options/virtual/login_UI.md
diff --git a/documentation/docs/install-pmm/install-pmm-server/baremetal/virtual/remove_virtual_machine.md b/documentation/docs/install-pmm/install-pmm-server/deployment-options/virtual/remove_virtual_machine.md
similarity index 100%
rename from documentation/docs/install-pmm/install-pmm-server/baremetal/virtual/remove_virtual_machine.md
rename to documentation/docs/install-pmm/install-pmm-server/deployment-options/virtual/remove_virtual_machine.md
diff --git a/documentation/docs/install-pmm/install-pmm-server/baremetal/virtual/virtualbox.md b/documentation/docs/install-pmm/install-pmm-server/deployment-options/virtual/virtualbox.md
similarity index 100%
rename from documentation/docs/install-pmm/install-pmm-server/baremetal/virtual/virtualbox.md
rename to documentation/docs/install-pmm/install-pmm-server/deployment-options/virtual/virtualbox.md
diff --git a/documentation/docs/install-pmm/install-pmm-server/baremetal/virtual/vmware.md b/documentation/docs/install-pmm/install-pmm-server/deployment-options/virtual/vmware.md
similarity index 98%
rename from documentation/docs/install-pmm/install-pmm-server/baremetal/virtual/vmware.md
rename to documentation/docs/install-pmm/install-pmm-server/deployment-options/virtual/vmware.md
index 0c4796576a..34589b7654 100644
--- a/documentation/docs/install-pmm/install-pmm-server/baremetal/virtual/vmware.md
+++ b/documentation/docs/install-pmm/install-pmm-server/deployment-options/virtual/vmware.md
@@ -95,5 +95,5 @@ To start the guest and get the IP address from the CLI:
[OVA]: https://www.percona.com/downloads/pmm/{{release}}/ova
[OVF]: https://wikipedia.org/wiki/Open_Virtualization_Format
[VirtualBox]: https://www.virtualbox.org/
-[VMware]: https://www.vmware.com/products/workstation-player/
+[VMware]: https://www.vmware.com/products/
[OVFTool]: https://code.vmware.com/tool/ovf
\ No newline at end of file
diff --git a/documentation/docs/install-pmm/install-pmm-server/index.md b/documentation/docs/install-pmm/install-pmm-server/index.md
index 3e91bf7e0d..63d2939c4b 100644
--- a/documentation/docs/install-pmm/install-pmm-server/index.md
+++ b/documentation/docs/install-pmm/install-pmm-server/index.md
@@ -4,10 +4,10 @@ Before installing PMM Server, read the [Prerequisites to install PMM Server](pre
Install and run at least one PMM Server using one of the following ways:
-- [Docker](../install-pmm-server/baremetal/docker/index.md)
-- [Podman](../install-pmm-server/baremetal/podman/index.md)
-- [Helm](../install-pmm-server/baremetal/helm/index.md)
-- [Virtual appliance](../install-pmm-server/baremetal/virtual/index.md)
-- [Amazon AWS](../install-pmm-server/aws/aws.md)
+- [Docker](../install-pmm-server/deployment-options/docker/index.md)
+- [Podman](../install-pmm-server/deployment-options/podman/index.md)
+- [Helm](../install-pmm-server/deployment-options/helm/index.md)
+- [Virtual appliance](../install-pmm-server/deployment-options/virtual/index.md)
+- [Amazon AWS](../install-pmm-server/deployment-options/aws/aws.md)
diff --git a/documentation/docs/install-pmm/install-pmm-server/prerequisites.md b/documentation/docs/install-pmm/install-pmm-server/prerequisites.md
index 33fdfbd92e..f5e82fae87 100644
--- a/documentation/docs/install-pmm/install-pmm-server/prerequisites.md
+++ b/documentation/docs/install-pmm/install-pmm-server/prerequisites.md
@@ -1,9 +1,14 @@
# Prerequisites
-1. Check your system [requirements](..//..//plan-pmm-installation/hardware_and_system.md#server-requirements).
+Before setting up PMM Server, ensure you complete the following requirements:
+{.power-number}
-2. Configure your [network](..//..//plan-pmm-installation/network_and_firewall.md).
+1. Check that your system meets the [hardware and software requirements](../plan-pmm-installation/hardware_and_system.md).
-3. Authenticate using Service Accounts.
+2. Configure your [network settings](../plan-pmm-installation/network_and_firewall.md).
- While adding clients to the PMM Server, you use the `admin` user. However, if you change the password for the admin user from the PMM UI, then the clients will not be able to access PMM. Also, due to multiple unsuccessful login attempts Grafana will lock out the `admin` user. The solution is to use [Service Accounts](../../api/authentication.md) for authentication. You can use Service Accounts as a replacement for basic authentication and API keys.
\ No newline at end of file
+3. Use Grafana [Service Accounts](../../api/authentication.md) for secure and consistent authentication.
+
+PMM 3 uses Grafana service accounts for authentication instead of API keys, which provide fine-grained access control and enhanced security.
+
+Service accounts prevent issues such as Clients losing access due to admin password changes or account lockouts caused by multiple failed login attempts.
\ No newline at end of file
diff --git a/documentation/docs/install-pmm/plan-pmm-installation/hardware_and_system.md b/documentation/docs/install-pmm/plan-pmm-installation/hardware_and_system.md
index b9cae292b4..405f90071e 100644
--- a/documentation/docs/install-pmm/plan-pmm-installation/hardware_and_system.md
+++ b/documentation/docs/install-pmm/plan-pmm-installation/hardware_and_system.md
@@ -1,38 +1,59 @@
# Hardware and system requirements
-## Server requirements
+## PMM Server requirements
-* **Disk**
+PMM Server's resource requirements depend on your monitoring environment. Here are our recommendations for different deployment scales:
- Approximately 1 GB of storage per monitored database node with data retention set to one week. By default, [retention](../../configure-pmm/advanced_settings.md#data-retention) is 30 days.
+### Typical deployment (up to 30 nodes)
+This is the most common deployment scenario, suitable for small to medium-sized environments:
- !!! hint alert alert-success "Tip"
- [Disable table statistics](../../optimize/disable_table_stats.md) to decrease the VictoriaMetrics database size.
+- **CPU**: 4 cores
+- **Memory**: 8 GB
+- **Storage**: 100 GB
-* **Memory**
+### Medium deployment (up to 200 nodes)
+Recommended for environments monitoring MySQL, PostgreSQL, or MongoDB at scale:
- A minimum of 2 GB per monitored database node. The increase in memory usage is not proportional to the number of nodes. For example, data from 20 nodes should be easily handled with 16 GB.
+- **CPU**: 8-16 cores
+- **Memory**: 16-32 GB
+- **Storage**: 200 GB
+- **CPU usage**: Expect 20-70% utilization
-* **Architecture**
+### Large deployment (500+ nodes)
+Designed for extensive monitoring environments with high node counts:
- - Your CPU must support the [`SSE4.2`](https://wikipedia.org/wiki/SSE4#SSE4.2) instruction set, a requirement of ClickHouse, a third-party column-oriented database used by Query Analytics. If your CPU is lacking this instruction set you won't be able to use Query Analytics.
- - For ARM64 systems, ensure you're using a supported ARM64 architecture (e.g., ARMv8). Note that some features may have different performance characteristics on ARM compared to x86_64 systems.
+- **CPU**: 16+ cores
+- **Memory**: 32+ GB
+- **Storage**: 500+ GB
-## Client requirements
+## Storage calculation
+Adjust storage calculations based on your data retention period and the number of metrics collected. To estimate storage requirements:
+
+- allow approximately 1 GB of storage per monitored node per week.
+- for the default 30-day retention period, use the formula: `number_of_nodes * 4 GB`.
+
+### Server architecture requirements
-* **Disk**
+- **CPU**: Must support the [`SSE4.2`](https://wikipedia.org/wiki/SSE4#SSE4.2), which is required for Query Analytics (QAN).
+- **ARM64**: Ensure your system uses a supported ARM64 architecture (e.g., ARMv8).
+- **ARM limitations**: PMM Server is not currently available as a native ARM64 build. For ARM-based systems, use Docker or Podman to run x86_64 images via emulation.
+
+!!! hint alert alert-success "Tip"
+ To reduce storage usage, consider [disabling table statistics](../../optimize/disable_table_stats.md), which can significantly decrease the size of the VictoriaMetrics database.
+
+## Client requirements
- A minimum of 100 MB of storage is required for installing the PMM Client package. With a good connection to PMM Server, additional storage is not required. However, the client needs to store any collected data that it cannot dispatch immediately, so additional storage may be required if the connection is unstable or the throughput is low. VM Agent uses 1 GB of disk space for cache during a network outage. QAN, on the other hand, uses RAM to store cache.
+### Storage
-* **Operating system**
+The PMM Client package requires 100 MB of storage for installation. Under normal operation with a stable connection to PMM Server, no additional storage is needed. During network instability or low throughput periods, the Client temporarily stores collected data that cannot be immediately dispatched. The VM Agent reserves 1 GB of disk space for caching during network outages, while Query Analytics (QAN) utilizes RAM instead of disk storage for its cache.
- PMM Client runs on any modern 64-bit Linux distribution, including ARM-based systems. It is tested on supported versions of Debian, Ubuntu, CentOS, and Red Hat Enterprise Linux, on both x86_64 and ARM64 architectures. See [Percona software support life cycle](https://www.percona.com/services/policies/percona-software-support-lifecycle#pt).
+### Operating system
+PMM Client is compatible with modern 64-bit Linux distributions on both x86_64 and ARM64 architectures. Supported platforms include current versions of Debian, Ubuntu, CentOS, and Red Hat Enterprise Linux. For specific version support details, see [Percona software support life cycle](https://www.percona.com/services/policies/percona-software-support-lifecycle#pt).
### ARM-specific considerations
- **Docker**: If using Docker for PMM Client on ARM systems, ensure you're using the ARM64-compatible Docker images.
- **Performance**: Performance may vary across different ARM implementations. Conduct thorough testing to ensure optimal performance in your environment.
- **Compatibility**: Ensure you're using ARM-compatible versions of any additional software or databases you're monitoring with PMM.
-- **PMM Server**: PMM Server is not currently available as a native ARM64 build. For ARM-based systems, consider using the Docker or Podman installation methods, which can run x86_64 images via emulation on ARM platforms.
-- **Resource usage**: Monitor resource usage closely on ARM systems, as it may differ from x86_64 systems. Adjust your configuration as needed for optimal performance.
+- **Resource usage**: Monitor resource usage closely on ARM systems, as it may differ from x86_64 systems. Adjust your configuration as needed for optimal performance.
\ No newline at end of file
diff --git a/documentation/docs/install-pmm/plan-pmm-installation/network_and_firewall.md b/documentation/docs/install-pmm/plan-pmm-installation/network_and_firewall.md
index bb8bdfbfa1..49bd1c8ed8 100644
--- a/documentation/docs/install-pmm/plan-pmm-installation/network_and_firewall.md
+++ b/documentation/docs/install-pmm/plan-pmm-installation/network_and_firewall.md
@@ -28,7 +28,7 @@ Other ports:
Depending on your architecture other ports may also need to be exposed.
- For `pmm-agent`, the default listen port is 7777.
- - The default port range for `pmm-agent` is large by default to accommodate any architecture size but it can be modified using the `--ports-min` and `--ports-max` flags, or by changing the configuration file. In network constraint environments, the range can be reduced to a minimum by allocating at least one port per agent monitored. Learn more about available settings for `pmm-agent` in [Percona PMM-Agent documentation](https://docs.percona.com/percona-monitoring-and-management/3/use/commands/pmm-agent.html).
+ - The default port range for `pmm-agent` is large by default to accommodate any architecture size but it can be modified using the `--ports-min` and `--ports-max` flags, or by changing the configuration file. In network constraint environments, the range can be reduced to a minimum by allocating at least one port per agent monitored. Learn more about available settings for `pmm-agent` in [Percona PMM-Agent documentation](../../use/commands/pmm-agent.md).
## Network configuration for locked-down environments
For computers in a locked-down corporate environment without direct access to the Internet, make sure to enable access to Percona Platform services following the instructions in the [Percona Platform documentation](https://docs.percona.com/percona-platform/network.html).
\ No newline at end of file
diff --git a/documentation/docs/pmm-admin/security/index.md b/documentation/docs/pmm-admin/security/index.md
deleted file mode 100644
index 5216d308d2..0000000000
--- a/documentation/docs/pmm-admin/security/index.md
+++ /dev/null
@@ -1,8 +0,0 @@
-# About security in PMM
-
-
-By Default, PMM ships with a self-signed certificate to enable usage out of the box. While this does enable users to have encrypted connections between clients (database clients and web/API clients) and the PMM Server, it shouldn't be considered a properly secured connection. Taking the following precautions will ensure that you are truly secure:
-
-- [SSL encryption with trusted certificates](#ssl-encryption) to secure traffic between clients and server;
-
-- [Grafana HTTPS secure cookies](#grafana-https-secure-cookies)
diff --git a/documentation/docs/pmm-upgrade/index.md b/documentation/docs/pmm-upgrade/index.md
index 5fb2185f8c..1186a0258f 100644
--- a/documentation/docs/pmm-upgrade/index.md
+++ b/documentation/docs/pmm-upgrade/index.md
@@ -3,12 +3,15 @@
!!! caution alert alert-warning "Upgrade PMM Server before Clients"
- When upgrading PMM, always upgrade the PMM Server before upgrading any PMM Clients.
- Make sure that the PMM Server version is higher than or equal to the PMM Client version. Mismatched versions can lead to configuration issues and failures in Client-Server communication, as the PMM Server may not recognize all parameters in the client configuration.
+ - For the UI upgrade option, Watchtower must be installed with PMM Server
-Find the detailed information on how to upgrade PMM in the following sections:
+## Available upgrade methods
+
+Choose your preferred upgrade method based on your setup:
* [Upgrade PMM Server from the UI](ui_upgrade.md)
-* [Upgrade PMM Client](upgrade_agent.md)
+* [Upgrade PMM Client](upgrade_client.md)
* [Upgrade PMM Server using Docker](upgrade_docker.md)
diff --git a/documentation/docs/pmm-upgrade/migrating_from_pmm_2.md b/documentation/docs/pmm-upgrade/migrating_from_pmm_2.md
index c34688452e..a870e1edcb 100644
--- a/documentation/docs/pmm-upgrade/migrating_from_pmm_2.md
+++ b/documentation/docs/pmm-upgrade/migrating_from_pmm_2.md
@@ -1,10 +1,14 @@
# Migrate PMM 2 to PMM 3
-PMM 3 introduces significant architectural changes that require gradual transition from PMM 2:
+PMM 3 introduces significant architectural changes that require gradual transition from PMM 2.
+
+You can migrate to PMM 3 either automatically using the automated migration script (recommended), or manually by following step-by-step instructions.
+
+To graduallly migrate to PMM 3:
## Step 1: Upgrade PMM 2 Server to the latest version
-Before upgrading to PMM 3, ensure your PMM 2 Server is running the latest version:
+Before migrating PMM 2 to PMM 3, ensure your PMM 2 Server is running the latest version:
{.power-number}
1. From the **Home** page, scroll to the **PMM Upgrade** panel and click the Refresh button to manually check for updates.
@@ -13,104 +17,259 @@ Before upgrading to PMM 3, ensure your PMM 2 Server is running the latest versio
## Step 2: Migrate PMM 2 Server to PMM 3
-=== "PMM 2 with Docker volume"
-
- Follow these manual steps to upgrade your PMM 2 Server to PMM 3:
+=== "Automated Docker migration (Recommended)"
+ Use this upgrade script for a simplified migration process:
{ .power-number}
- 1. Stop all PMM Server services:
+ 1. Download and run the [automated migration script](https://www.percona.com/get/pmm) to start the migration. The `-b` flag creates a backup of your PMM2 instance to ensure that your data is backed up before the migration.
```sh
- docker exec -t supervisorctl stop all
+ ./get-pmm.sh -n -b
```
+ 2. Note the backup volume name displayed during the migration (e.g., `pmm-data-2025-01-16-165135`) so that you can restore this backup if needed.
- 2. Transfer `/srv` directory ownership:
-
+ 3. Check additional script options:
```sh
- docker exec -t chown -R pmm:pmm /srv
+ ./get-pmm.sh -h
```
+ !!! note alert alert-primary "Restore PMM 2 backup"
+ If you need to revert to the PMM 2 instance, restore the backup created above:
+ { .power-number}
- 3. List and note down your Docker volume:
-
- ```sh
- {% raw %}
- docker inspect -f '{{ range .Mounts }}{{ if eq .Type "volume" }}{{ .Name }}{{ "\n" }}{{ end }}{{ end }}'
- {% endraw %}
- ```
+ 1. Stop the PMM 3 container:
+ ```sh
+ docker stop pmm-server
+ ```
+ 2. Start a PMM 2 container using the backup volume, replacing `` (e.g., `pmm-data-2025-01-16-165135`) with your actual backup volume name:
- 4. Stop and remove existing container:
+ ```sh
+ docker run -d -p 443:443 --volume :/srv --name pmm-server --restart always percona/pmm-server:2.44.0
+ ```
+ 3. Verify that your PMM 2 instance is running correctly and all your data is accessible.
- ```sh
- docker stop pmm-server && docker rm pmm-server
- ```
+=== "Manual migration (Docker/Kubernetes/Podman/AMI/OVF)"
+ === "Docker with volume"
+ Follow these manual steps to migrate your PMM 2 Server to PMM 3:
+ { .power-number}
- 5. Pull PMM 3 Server image:
+ 1. Stop all PMM Server services:
- ```sh
- docker pull perconalab/pmm-server:3.0.0-beta
- ```
+ ```sh
+ docker exec -t supervisorctl stop all
+ ```
- 6. Run new container with existing volume:
-
- ```sh
- docker run -d -v pmm-server-data:/srv -p 443:8443 --name pmm-server --restart always perconalab/pmm-server:3.0.0-beta
- ```
+ 2. Transfer `/srv` directory ownership:
-=== "PMM 2 with data container"
+ ```sh
+ docker exec -t chown -R pmm:pmm /srv
+ ```
- Follow these manual steps to upgrade your PMM 2 Server to PMM 3:
- { .power-number}
+ 3. List and note down your Docker volume:
+
+ ```sh
+ {% raw %}
+ docker inspect -f '{{ range .Mounts }}{{ if eq .Type "volume" }}{{ .Name }}{{ "\n" }}{{ end }}{{ end }}'
+ {% endraw %}
+ ```
- 1. Stop all PMM Server services:
+ 4. Stop and remove existing container:
- ```sh
- docker exec -t supervisorctl stop all
- ```
+ ```sh
+ docker stop pmm-server && docker rm pmm-server
+ ```
- 2. Transfer `/srv` directory ownership:
+ 5. Pull PMM 3 Server image:
- ```sh
- docker exec -t chown -R pmm:pmm /srv
- ```
+ ```sh
+ docker pull percona/pmm-server:3
+ ```
- 3. Identify data container using either:
-
- ```sh
- docker ps -a --filter "status=created"
- ```
+ 6. Run new new version of PMM Server with the existing volume:
- OR
+ ```sh
+ docker run -d -v pmm-server-data:/srv -p 443:8443 --name pmm-server --restart always percona/pmm-server:3
+ ```
- ```sh
- {% raw %}
- docker inspect -f '{{ range .Mounts }}{{ if eq .Type "volume" }}{{ .Name }}{{ "\n" }}{{ end }}{{ end }}'
- {% endraw %}
- ```
-
- 4. Stop and remove existing container:
+ === "Docker with data container"
+ Follow these manual steps to upgrade your PMM 2 Server to PMM 3:
+ { .power-number}
- ```sh
- docker stop pmm-server && docker rm pmm-server
- ```
+ 1. Stop all PMM Server services:
- 5. Pull PMM 3 Server image:
-
- ```sh
- docker pull perconalab/pmm-server:3.0.0-beta
- ```
+ ```sh
+ docker exec -t supervisorctl stop all
+ ```
- 6. Run new container with existing data container:
+ 2. Transfer `/srv` directory ownership:
- ```sh
- docker run -d --volumes-from pmm-server-data -p 443:8443 --name pmm-server --restart always perconalab/pmm-server:3.0.0-beta
- ```
+ ```sh
+ docker exec -t chown -R pmm:pmm /srv
+ ```
+
+ 3. Identify the data container using either:
+
+ ```sh
+ docker ps -a --filter "status=created"
+ ```
+
+ OR
+
+ ```sh
+ {% raw %}
+ docker inspect -f '{{ range .Mounts }}{{ if eq .Type "volume" }}{{ .Name }}{{ "\n" }}{{ end }}{{ end }}'
+ {% endraw %}
+ ```
+
+ 4. Stop and remove the existing container:
+
+ ```sh
+ docker stop pmm-server && docker rm pmm-server
+ ```
+
+ 5. Pull PMM 3 Server image:
+
+ ```sh
+ docker pull percona/pmm-server:3
+ ```
+
+ 6. Run the new version of PMM Server with the existing data container:
+
+ ```sh
+ docker run -d --volumes-from pmm-server-data -p 443:8443 --name pmm-server --restart always percona/pmm-server:3
+ ```
+
+ === "Helm"
+ Follow these steps to migrate your PMM 2 Server deployed with Helm to PMM 3:
+ {.power-number}
+
+ 1. Update the Percona Helm repository:
+
+ ```sh
+ helm repo update percona
+ ```
+
+ 2. Export current values to a file:
+
+ ```sh
+ helm show values percona/pmm > values.yaml
+ ```
+
+ 3. Update the `values.yaml` file to match your PMM 2 configuration
+
+ 4. Stop all PMM Server services:
+
+ ```sh
+ kubectl exec pmm-0 -- supervisorctl stop all
+ ```
+
+ 5. Transfer `/srv` directory ownership:
+
+ ```sh
+ kubectl exec pmm-0 -- chown -R pmm:pmm /srv
+ ```
+
+ 6. Upgrade PMM using Helm:
+
+ ```sh
+ helm upgrade pmm -f values.yaml --set podSecurityContext.runAsGroup=null --set podSecurityContext.fsGroup=null percona/pmm
+ ```
+
+ 7. If Kubernetes did not trigger the upgrade automatically, delete the pod to force recreation:
+ ```sh
+ kubectl delete pod pmm-0
+ ```
+
+ === "Podman"
+ Follow these steps to migrate to PMM 3 a PMM 2 Server deployed with Podman:
+ {.power-number}
+
+ 1. Pull the PMM 3 Server image:
+
+ ```sh
+ podman pull percona/pmm-server:3
+ ```
+
+ 2. Stop all PMM Server services:
+
+ ```sh
+ podman exec pmm-server supervisorctl stop all
+ ```
+
+ 3. Transfer `/srv` directory ownership:
+ ```sh
+ podman exec pmm-server chown -R pmm:pmm /srv
+ ```
+
+ 4. Remove the existing systemd service file:
+
+ ```sh
+ rm ~/.config/systemd/user/pmm-server.service
+ ```
+
+ 5. Follow the installation steps from the [PMM 3 Podman installation guide](../install-pmm/install-pmm-server/deployment-options/podman/index.md) to complete the upgrade.
+
+ === "AMI/OVF instance"
+ Follow these steps to migrate a PMM 2 Server deployed as an AMI/OVF instance to PMM 3:
+ {.power-number}
+
+ 1. Back up your current instance and keep your PMM 2 instance running until confirm a successful migration.
+
+ 2. Deploy a new PMM 3 AMI/OVF instance.
+
+ 3. On the new instance, stop the Podman service:
+
+ ```sh
+ systemctl --user stop pmm-server
+ ```
+
+ 4. Clear the service volume directory:
+
+ ```sh
+ rm -rf /home/admin/volume/srv/*
+ ```
+
+ 5. On the old instance, stop all services:
+
+ ```sh
+ sudo supervisorctl stop all
+ ```
+
+ 6. Transfer data from old to new instance:
+
+ ```sh
+ sudo scp -r /srv/* admin@newhost:/home/admin/volume/srv
+ ```
+
+ 7. Set proper permissions on the new instance:
+
+ ```sh
+ chown -R admin:admin /home/admin/volume/srv/
+ ```
+
+ 8. Start the PMM service on the new instance:
+
+ ```sh
+ systemctl --user start pmm-server
+ ```
+
+ 9. Verify that PMM 3 is working correctly with the migrated data.
+
+ 10. Update PMM Client configurations by editing the `/usr/local/percona/pmm2/config/pmm-agent.yml` with the new server address, then restart the PMM Client.
+
+ !!! note alert alert-primary "Revert AMI/OVF instance to PMM 2"
+ If you need to restore to the PMM 2 instance after the migration:
+ {.power-number}
+
+ 1. Access old instance via SSH.
+ 2. Start services: `supervisorctl start all`.
+ 3. Update client configurations to point to old instance.
## Step 3: Migrate PMM 2 Clients to PMM 3
!!! caution alert alert-warning "Important"
PMM 3 Server provides limited support for PMM 2 Clients (metrics and Query Analytics only). This support will be removed in PMM 3.3.
-Depending on your initial installation method, update PMM Clients using your operating system's package manager or by updating from a tarball.
+Depending on your initial installation method, update PMM Clients using your operating system's package manager or using a tarball.
For detailed instructions, see the [Upgrade PMM Client topic](../pmm-upgrade/upgrade_client.md).
## Step 4: Migrate your API keys to service accounts
@@ -157,4 +316,97 @@ After you finish migrating PMM:
1. Verify that all PMM Clients are up to date by checking **PMM Configuration > Updates**.
2. Confirm all previously monitored services are reporting correctly to the new PMM 3 Server by reviewing **Configuration > PMM Inventory > Services**.
-3. Check the dashboards to make sure you're receiving the metrics information and QAN data.
+3. Check the dashboards to make sure you're receiving the metrics and QAN data.
+
+
+### Variables for migrating from PMM v2 to PMM v3
+
+When migrating from PMM v2 to PMM v3, you'll need to update your environment variables to match the new naming convention. This is because PMM v3 introduces several important changes to improve consistency and clarity:
+
+- environment variables now use `PMM_` prefix
+- some boolean flags reversed (e.g., `DISABLE_` → `ENABLE_`)
+- removed deprecated variables
+
+### Examples
+
+```bash
+# PMM v2
+-e DISABLE_UPDATES=true -e DATA_RETENTION=720h
+
+# PMM v3 equivalent
+-e PMM_ENABLE_UPDATES=false -e PMM_DATA_RETENTION=720h
+```
+
+#### Migration reference table
+
+The following table lists all the environment variable changes between PMM v2 and PMM v3. Make sure to review this table when updating your deployment configurations.
+
+??? note "Click to expand migration reference table"
+
+ #### Configuration variables
+ | PMM 2 | PMM 3 | Comments |
+ |---------------------------------|------------------------------------|------------------------------|
+ | `DATA_RETENTION` | `PMM_DATA_RETENTION` | |
+ | `DISABLE_ALERTING` | `PMM_ENABLE_ALERTING` | |
+ | `DISABLE_UPDATES` | `PMM_ENABLE_UPDATES` | |
+ | `DISABLE_TELEMETRY` | `PMM_ENABLE_TELEMETRY` | |
+ | `DISABLE_BACKUP_MANAGEMENT` | `PMM_ENABLE_BACKUP_MANAGEMENT` | Note the reverted boolean |
+ | `ENABLE_AZUREDISCOVER` | `PMM_ENABLE_AZURE_DISCOVER` | |
+ | `ENABLE_RBAC` | `PMM_ENABLE_ACCESS_CONTROL` | |
+ | `LESS_LOG_NOISE` | | Removed in PMM v3 |
+
+ #### Metrics configuration
+ | PMM 2 | PMM 3 |
+ |---------------------------------|------------------------------------|
+ | `METRICS_RESOLUTION` | `PMM_METRICS_RESOLUTION` |
+ | `METRICS_RESOLUTION_HR` | `PMM_METRICS_RESOLUTION_HR` |
+ | `METRICS_RESOLUTION_LR` | `PMM_METRICS_RESOLUTION_LR` |
+ | `METRICS_RESOLUTION_MR` | `PMM_METRICS_RESOLUTION_MR` |
+
+
+ #### ClickHouse configuration
+ | PMM 2 | PMM 3 | Comments |
+ |-------------------------------------|------------------------------------|--------------------------|
+ | `PERCONA_TEST_PMM_CLICKHOUSE_ADDR` | `PMM_CLICKHOUSE_ADDR` | |
+ | `PERCONA_TEST_PMM_CLICKHOUSE_DATABASE` | `PMM_CLICKHOUSE_DATABASE` | |
+ | `PERCONA_TEST_PMM_CLICKHOUSE_DATASOURCE` | `PMM_CLICKHOUSE_DATASOURCE` | |
+ | `PERCONA_TEST_PMM_CLICKHOUSE_HOST` | `PMM_CLICKHOUSE_HOST` | |
+ | `PERCONA_TEST_PMM_CLICKHOUSE_PORT` | `PMM_CLICKHOUSE_PORT` | |
+ | `PERCONA_TEST_PMM_DISABLE_BUILTIN_CLICKHOUSE` | `PMM_DISABLE_BUILTIN_CLICKHOUSE` | |
+ | `PERCONA_TEST_PMM_CLICKHOUSE_BLOCK_SIZE` | | Removed in PMM v3, new version|
+ | `PERCONA_TEST_PMM_CLICKHOUSE_POOL_SIZE` | | Removed in PMM v3, new version|
+
+ #### PostgreSQL configuration
+ | PMM 2 | PMM 3 |
+ |-------------------------------------|------------------------------------|
+ | `PERCONA_TEST_POSTGRES_ADDR` | `PMM_POSTGRES_ADDR` |
+ | `PERCONA_TEST_POSTGRES_DBNAME` | `PMM_POSTGRES_DBNAME` |
+ | `PERCONA_TEST_POSTGRES_USERNAME` | `PMM_POSTGRES_USERNAME` |
+ | `PERCONA_TEST_POSTGRES_DBPASSWORD` | `PMM_POSTGRES_DBPASSWORD` |
+ | `PERCONA_TEST_POSTGRES_SSL_CA_PATH` | `PMM_POSTGRES_SSL_CA_PATH` |
+ | `PERCONA_TEST_POSTGRES_SSL_CERT_PATH` | `PMM_POSTGRES_SSL_CERT_PATH` |
+ | `PERCONA_TEST_POSTGRES_SSL_KEY_PATH` | `PMM_POSTGRES_SSL_KEY_PATH` |
+ | `PERCONA_TEST_POSTGRES_SSL_MODE` | `PMM_POSTGRES_SSL_MODE` |
+ | `PERCONA_TEST_PMM_DISABLE_BUILTIN_POSTGRES` | `PMM_DISABLE_BUILTIN_POSTGRES` |
+
+ #### Telemetry & development
+ | PMM 2 | PMM 3 |
+ |-------------------------------------|------------------------------------|
+ | `PMM_TEST_TELEMETRY_DISABLE_SEND` | `PMM_DEV_TELEMETRY_DISABLE_SEND` |
+ | `PERCONA_TEST_TELEMETRY_DISABLE_START_DELAY` | `PMM_DEV_TELEMETRY_DISABLE_START_DELAY` |
+ | `PMM_TEST_TELEMETRY_FILE` | `PMM_DEV_TELEMETRY_FILE` |
+ | `PERCONA_TEST_TELEMETRY_HOST` | `PMM_DEV_TELEMETRY_HOST` |
+ | `PERCONA_TEST_TELEMETRY_INTERVAL` | `PMM_DEV_TELEMETRY_INTERVAL` |
+ | `PERCONA_TEST_TELEMETRY_RETRY_BACKOFF` | `PMM_DEV_TELEMETRY_RETRY_BACKOFF` |
+ | `PERCONA_TEST_VERSION_SERVICE_URL` | `PMM_DEV_VERSION_SERVICE_URL` |
+ | `PERCONA_TEST_STARLARK_ALLOW_RECURSION` | `PMM_DEV_ADVISOR_STARLARK_ALLOW_RECURSION` |
+
+ #### Removed variables
+ | PMM 2 | PMM 3 | Comments |
+ |-------------------------------------|------------------------------------|------------------------------|
+ | `PERCONA_TEST_AUTH_HOST` | | Removed, use `PMM_DEV_PERCONA_PLATFORM_ADDRESS` |
+ | `PERCONA_TEST_CHECKS_HOST` | | Removed, use `PMM_DEV_PERCONA_PLATFORM_ADDRESS` |
+ | `PERCONA_TEST_CHECKS_INTERVAL` | | Removed, not used |
+ | `PERCONA_TEST_CHECKS_PUBLIC_KEY` | | Removed, use `PMM_DEV_PERCONA_PLATFORM_PUBLIC_KEY` |
+ | `PERCONA_TEST_NICER_API` | | Removed in PMM v3 |
+ | `PERCONA_TEST_SAAS_HOST` | | Removed, use `PMM_DEV_PERCONA_PLATFORM_ADDRESS` |
\ No newline at end of file
diff --git a/documentation/docs/pmm-upgrade/ui_upgrade.md b/documentation/docs/pmm-upgrade/ui_upgrade.md
index c8ab54a3f9..bf40b445d2 100644
--- a/documentation/docs/pmm-upgrade/ui_upgrade.md
+++ b/documentation/docs/pmm-upgrade/ui_upgrade.md
@@ -4,6 +4,12 @@ PMM Server and Client components are installed and updated separately.
PMM v3 Server can run natively, as a Docker image, a virtual appliance, or an AWS cloud instance. While each environment has its own specific installation and update steps, the UI-based upgrade method is universal and recommended for most users.
+## Prerequisites
+
+To use the UI upgrade feature, you must have Watchtower installed and properly configured with your PMM Server.
+
+If Watchtower is not installed, the UI upgrade options will not be available. See [Running PMM Server with Watchtower](../install-pmm/install-pmm-server/deployment-options/docker/index.md) for setup instructions.
+
## Upgrade process
The preferred and simplest way to update PMM v3 Server is via the **Updates** page:
diff --git a/documentation/docs/pmm-upgrade/upgrade_aws.md b/documentation/docs/pmm-upgrade/upgrade_aws.md
index 62d2bf81bd..bc2ede2b67 100644
--- a/documentation/docs/pmm-upgrade/upgrade_aws.md
+++ b/documentation/docs/pmm-upgrade/upgrade_aws.md
@@ -7,7 +7,7 @@ To assign a public IP address for an Amazon EC2 instance, follow these steps:
1. Allocate Elastic IP address
- 
+ 
2. Associate Elastic IP address with a Network interface ID of your EC2 instance
diff --git a/documentation/docs/pmm-upgrade/upgrade_client.md b/documentation/docs/pmm-upgrade/upgrade_client.md
index 61c80f7d5e..3a7ddb59f0 100644
--- a/documentation/docs/pmm-upgrade/upgrade_client.md
+++ b/documentation/docs/pmm-upgrade/upgrade_client.md
@@ -21,14 +21,14 @@ For example, to install the latest version of the PMM Client on Red Hat or its d
=== "Debian-based"
```sh
- percona-release enable pmm-client
+ percona-release enable pmm3-client
apt update
apt install pmm-client
```
=== "Red Hat-based"
```sh
- percona-release enable pmm-client
+ percona-release enable pmm3-client
yum update pmm-client
```
diff --git a/documentation/docs/pmm-upgrade/upgrade_docker.md b/documentation/docs/pmm-upgrade/upgrade_docker.md
index 5e76d457f1..b22d354b2f 100644
--- a/documentation/docs/pmm-upgrade/upgrade_docker.md
+++ b/documentation/docs/pmm-upgrade/upgrade_docker.md
@@ -1,16 +1,16 @@
-# Upgrade PMM Server using Docker
+# Manual upgrade: Upgrade PMM Server using Docker
## Before you begin
Before starting the upgrade, complete these preparation steps to ensure you can recover your system if needed and confirm compatibility with the new version:
{.power-number}
-1. Create a backup before upgrading, as downgrades are not possible. Therefore, reverting to a previous version requires an backup made prior to the upgrade.
+1. [Create a backup](../install-pmm/install-pmm-server/deployment-options/docker/backup_container.md) before upgrading, as downgrades are not possible. Therefore, reverting to a previous version requires an backup made prior to the upgrade.
2. Verify your current PMM version: Check your current PMM version by navigating to **PMM Configuration > Updates** or by running the following command:
```sh
- docker exec -it pmm-server curl -ku admin:admin https://localhost:8443/v1/version
+ docker exec -it pmm-server curl -ku admin:admin https://localhost:8443/v1/version
```
## Upgrade steps
@@ -21,33 +21,32 @@ Follow these steps to upgrade your PMM Server while preserving your monitoring d
1. Stop the current container:
```sh
- docker stop pmm-server
+ docker stop pmm-server
```
-
-2. [Back up your data](../install-pmm/install-pmm-server/baremetal/docker/backup_container.md).
-
-3. Pull the latest image:
+
+2. Pull the latest image:
```sh
- docker pull perconalab/pmm-server:3.0.0-beta
+ docker pull percona/pmm-server:3
```
-4. Rename the original container:
+3. Rename the original container:
```sh
- docker rename pmm-server pmm-server-old
+ docker rename pmm-server pmm-server-old
```
-5. Run the new container:
+4. Run the new container:
```sh
- docker run \
- --detach \
- --restart always \
- --publish 443:8443 \
- --volumes-from pmm-data \
- --name pmm-server \
- perconalab/pmm-server:3.0.0-beta
- ```
-
-6. After upgrading, verify that PMM Server is running correctly and all your data is accessible.
+ docker run \
+ --detach \
+ --restart always \
+ --publish 443:8443 \
+ --volumes-from pmm-data \
+ --name pmm-server \
+ percona/pmm-server:3
+ ```
+
+5. After upgrading, verify that PMM Server is running correctly and all your data is accessible. You can always [rerestore your PMM Server](../install-pmm/install-pmm-server/deployment-options/docker/restore_container.md) using the backup you created above.
+
diff --git a/documentation/docs/pmm-upgrade/upgrade_helm.md b/documentation/docs/pmm-upgrade/upgrade_helm.md
index d034be6231..42e7efe439 100644
--- a/documentation/docs/pmm-upgrade/upgrade_helm.md
+++ b/documentation/docs/pmm-upgrade/upgrade_helm.md
@@ -10,7 +10,7 @@ Percona releases new chart versions to update containers when:
The UI update feature is disabled by default and should remain so. Do not modify or add the following parameter in your custom `values.yaml` file:
```yaml
pmmEnv:
- DISABLE_UPDATES: "1"
+ PMM_ENABLE_UPDATES: 'false'
```
## Before you begin
@@ -18,13 +18,13 @@ Percona releases new chart versions to update containers when:
Before starting the upgrade, complete these preparation steps to ensure you can recover your system if needed and confirm compatibility with the new version:
{.power-number}
-1. Create a backup before upgrading, as downgrades are not possible. Therefore, reverting to a previous version requires a backup made prior to the upgrade.
+1. [Create a backup](../install-pmm/install-pmm-server/deployment-options/helm/backup_container_helm.md) before upgrading, as downgrades are not possible. Therefore, reverting to a previous version requires a backup made prior to the upgrade.
2. To reduce downtime, pre-pull the new image on the node where PMM is running:
```sh
# Replace with the latest PMM version
- docker pull perconalab/pmm-server:3.0.0-beta
+ docker pull percona/pmm-server:3
```
## Upgrade steps
@@ -41,15 +41,15 @@ Follow these steps to upgrade your PMM Server while preserving your monitoring d
2. Upgrade PMM:
```sh
- helm upgrade pmm -f values.yaml percona/pmm
+ helm upgrade pmm -f values.yaml --set podSecurityContext.runAsGroup=null --set podSecurityContext.fsGroup=null percona/pmm
```
-3. After the upgrade, verify that PMM Server is running correctly:
+
+3. After the upgrade, verify that PMM Server is running correctly and all your data is accessible:
```sh
kubectl get pods | grep pmm-server
```
-
4. Check the logs for any errors:
```sh
diff --git a/documentation/docs/pmm-upgrade/upgrade_podman.md b/documentation/docs/pmm-upgrade/upgrade_podman.md
index c99ad89d75..90c0b71b54 100644
--- a/documentation/docs/pmm-upgrade/upgrade_podman.md
+++ b/documentation/docs/pmm-upgrade/upgrade_podman.md
@@ -1,11 +1,11 @@
-# Upgrade PMM Server using Podman
+# Manual upgrade: Upgrade PMM Server using Podman
## Before you begin
Before starting the upgrade, complete these preparation steps to ensure you can recover your system if needed and confirm compatibility with the new version:
{.power-number}
-1. Create a backup before upgrading, as downgrades are not possible. Therefore, reverting to a previous version requires an backup made prior to the upgrade.
+1. [Create a backup](../install-pmm/install-pmm-server/deployment-options/podman/backup_container_podman.md) before upgrading, as downgrades are not possible. Therefore, reverting to a previous version requires an backup made prior to the upgrade.
2. Verify your current PMM version: Check your current PMM version by navigating to **PMM Configuration > Updates** or by running the following command:
@@ -16,38 +16,37 @@ Before starting the upgrade, complete these preparation steps to ensure you can
## Upgrade steps
-Follow these steps to upgrade your PMM Server while preserving your monitoring data and settings—you can restore from your backup if needed.
+Follow these steps to upgrade your PMM Server while preserving your monitoring data and settings. In case of any issues, you can restore your system using the backup created in the preparation steps.
{.power-number}
-1. [Back up your data](../install-pmm/install-pmm-server/baremetal/podman/backup_container_podman.md).
-2. Update PMM tag by editing `~/.config/systemd/user/pmm-server.env` file and running the following command to set the latest release version:
+1. Update PMM version by editing the PMM Server environment file. Replace 3.0.0 with your target version number:
```sh
sed -i "s/PMM_IMAGE=.*/PMM_IMAGE=docker.io/percona/pmm-server:3.0.0/g" ~/.config/systemd/user/pmm-server.env
```
-3. Pre-pull the new image to ensure a faster restart:
+2. Pre-pull the new image to ensure a faster restart:
```sh
source ~/.config/systemd/user/pmm-server.env
- podman pull ${PMM_IMAGE}:${PMM_TAG}
+ podman pull ${PMM_IMAGE}
```
-4. Restart PMM Server:
+3. Restart PMM Server:
```sh
systemctl --user restart pmm-server
```
-5. After the upgrade, verify that PMM Server is running correctly:
+4. After the upgrade, verify that PMM Server is running correctly:
```sh
podman ps | grep pmm-server
```
-
-6. Check the logs for any errors:
+
+5. Check the logs for any errors:
```sh
podman logs pmm-server
- ```
+ ```
\ No newline at end of file
diff --git a/documentation/docs/quickstart.md b/documentation/docs/quickstart.md
index ce662e7793..d201743b7e 100644
--- a/documentation/docs/quickstart.md
+++ b/documentation/docs/quickstart.md
@@ -7,11 +7,11 @@ This is the simplest and most efficient way to install PMM.
??? info "Alternative installation options"
For alternative setups, explore the additional installation options detailed in the **Setting up** chapter:
- - [Deploy on Podman](install-pmm/install-pmm-server/baremetal/podman/index.md)
- - [Deploy based on a Docker image](install-pmm/install-pmm-server/baremetal/docker/index.md)
- - [Deploy on Virtual Appliance](install-pmm/install-pmm-server/baremetal/virtual/index.md)
- - [Deploy on Kubernetes via Helm](install-pmm/install-pmm-server/baremetal/helm/index.md)
- - [Run a PMM instance hosted at AWS Marketplace](install-pmm/install-pmm-server/aws/aws.md)
+ - [Deploy on Podman](install-pmm/install-pmm-server/deployment-options/podman/index.md)
+ - [Deploy based on a Docker image](install-pmm/install-pmm-server/deployment-options/docker/index.md)
+ - [Deploy on Virtual Appliance](install-pmm/install-pmm-server/deployment-options/virtual/index.md)
+ - [Deploy on Kubernetes via Helm](install-pmm/install-pmm-server/deployment-options/helm/index.md)
+ - [Run a PMM instance hosted at AWS Marketplace](install-pmm/install-pmm-server/deployment-options/aws/aws.md)
#### Prerequisites
@@ -142,7 +142,7 @@ Once PMM is set up, choose the database or the application that you want it to m
3. Set up the `pg_stat_monitor` database extension and configure your database server accordingly.
- If you need to use the `pg_stat_statements` extension instead, see [Adding a PostgreSQL database](../setting-up/client/postgresql.md) and the [`pg_stat_monitor` online documentation](https://docs.percona.com/pg-stat-monitor/configuration.html) for details about available parameters.
+ If you need to use the `pg_stat_statements` extension instead, see [Adding a PostgreSQL database](../install-pmm/install-pmm-client/connect-database/postgresql.md) and the [`pg_stat_monitor` online documentation](https://docs.percona.com/pg-stat-monitor/configuration.html) for details about available parameters.
4. Set or change the value for `shared_preload_library` in your `postgresql.conf` file:
@@ -211,7 +211,7 @@ Once PMM is set up, choose the database or the application that you want it to m
pmm-admin add postgresql --username=pmm --password=
```
- For detailed instructions and advanced installation options, see [Adding a PostgreSQL database](../setting-up/client/postgresql.md).
+ For detailed instructions and advanced installation options, see [Adding a PostgreSQL database](../install-pmm/install-pmm-client/connect-database/postgresql.md).
=== ":simple-mongodb: MongoDB"
@@ -315,7 +315,7 @@ Once PMM is set up, choose the database or the application that you want it to m
pmm-admin add mongodb --username=pmm --password=
```
- For detailed instructions, see [Adding a MongoDB database for monitoring](https://docs.percona.com/percona-monitoring-and-management/setting-up/client/mongodb.html).
+ For detailed instructions, see [Adding a MongoDB database for monitoring](install-pmm/install-pmm-client/connect-database/mongodb.html).
=== ":simple-nginxproxymanager: ProxySQL"
To connect a ProxySQL service:
@@ -372,7 +372,7 @@ Once PMM is set up, choose the database or the application that you want it to m
pmm-admin add proxysql --username=pmm --password=
```
- For detailed instructions, see [Enable ProxySQL performance metrics monitoring](../setting-up/client/proxysql.md).
+ For detailed instructions, see [Enable ProxySQL performance metrics monitoring](../install-pmm/install-pmm-client/connect-database/proxysql.md).
=== ":material-database: HAProxy"
To connect an HAProxy service:
diff --git a/documentation/docs/quickstart/index.md b/documentation/docs/quickstart/index.md
index 3727e91ee4..e5a62fed2f 100644
--- a/documentation/docs/quickstart/index.md
+++ b/documentation/docs/quickstart/index.md
@@ -2,16 +2,16 @@
To get up and running with Percona Monitoring and Management (PMM) in no time, install PMM on Bare Metal/Virtual using the Easy-install script for Docker.
-This is the simplest and most efficient way to install PMM.
+This is the simplest and most efficient way to install PMM with Docker.
??? info "Alternative installation options"
- For alternative setups, explore the additional installation options detailed in the **Setting up** chapter:
+ For alternative setups or if you're not using Docker, explore the additional installation options detailed in the **Setting up** chapter:
- - [Deploy on Podman](../setting-up/server/podman.md)
- - [Deploy based on a Docker image](../setting-up/server/docker.md)
- - [Deploy on Virtual Appliance](../setting-up/server/virtual-appliance.md)
- - [Deploy on Kubernetes via Helm](../setting-up/server/helm.md)
- - [Run a PMM instance hosted at AWS Marketplace](../setting-up/server/aws.md)
+ - [Deploy on Podman](../install-pmm/install-pmm-server/deployment-options/podman/index.md)
+ - [Deploy based on a Docker image](../install-pmm/install-pmm-server/deployment-options/docker/index.md)
+ - [Deploy on Virtual Appliance](../install-pmm/install-pmm-server/deployment-options/virtual/index.md)
+ - [Deploy on Kubernetes via Helm](../install-pmm/install-pmm-server/deployment-options/helm/index.md)
+ - [Run a PMM instance hosted at AWS Marketplace](../install-pmm/install-pmm-server/deployment-options/aws/aws.md)
#### Prerequisites
@@ -126,11 +126,11 @@ Once PMM is set up, choose the database or the application that you want it to m
pmm-admin add mysql --query-source=perfschema --username=pmm --password=
```
??? info "Alternative database connection workflows"
- While the default instructions above focus on connecting a self-hosted MySQL database, PMM offers the flexibility to connect to various MySQL databases, including [AWS RDS](../setting-up/client/aws.md), [Azure MySQL](../setting-up/client/azure.md) or [Google Cloud MySQL](../setting-up/client/google.md).
+ While the default instructions above focus on connecting a self-hosted MySQL database, PMM offers the flexibility to connect to various MySQL databases, including [AWS RDS](../install-pmm/install-pmm-client/connect-database/aws.md), [Azure MySQL](../install-pmm/install-pmm-client/connect-database/azure.md) or [Google Cloud MySQL](../install-pmm/install-pmm-client/connect-database/google.md).
- The PMM Client installation also comes with options: in addition to the installation via Package Manager described above, you can also install it as a Docker container or as a binary package. Explore [alternative PMM Client installation options](../setting-up/client/index.html#binary-package) for more information.
+ The PMM Client installation also comes with options: in addition to the installation via Package Manager described above, you can also install it as a Docker container or as a binary package. Explore [alternative PMM Client installation options](../install-pmm/install-pmm-client/connect-database/index.html#binary-package) for more information.
- Additionally, if direct access to the database node isn't available, opt to [Add remote instance via User Interface](../setting-up/client/mysql.html#with-the-user-interface) instead.
+ Additionally, if direct access to the database node isn't available, opt to [Add remote instance via User Interface](../install-pmm/install-pmm-client/connect-database/mysql.html#with-the-user-interface) instead.
=== ":simple-postgresql: PostgreSQL"
@@ -152,7 +152,7 @@ Once PMM is set up, choose the database or the application that you want it to m
3. Set up the `pg_stat_monitor` database extension and configure your database server accordingly.
- If you need to use the `pg_stat_statements` extension instead, see [Adding a PostgreSQL database](../setting-up/client/postgresql.md) and the [`pg_stat_monitor` online documentation](https://docs.percona.com/pg-stat-monitor/configuration.html) for details about available parameters.
+ If you need to use the `pg_stat_statements` extension instead, see [Adding a PostgreSQL database](../install-pmm/install-pmm-client/connect-database/postgresql.md) and the [`pg_stat_monitor` online documentation](https://docs.percona.com/pg-stat-monitor/configuration.html) for details about available parameters.
4. Set or change the value for `shared_preload_library` in your `postgresql.conf` file:
@@ -231,7 +231,7 @@ Once PMM is set up, choose the database or the application that you want it to m
pmm-admin add postgresql --username=pmm --password=
```
- For detailed instructions and advanced installation options, see [Adding a PostgreSQL database](../setting-up/client/postgresql.md).
+ For detailed instructions and advanced installation options, see [Adding a PostgreSQL database](../install-pmm/install-pmm-client/connect-database/postgresql.md).
=== ":simple-mongodb: MongoDB"
@@ -345,7 +345,7 @@ Once PMM is set up, choose the database or the application that you want it to m
pmm-admin add mongodb --username=pmm --password=
```
- For detailed instructions, see [Adding a MongoDB database for monitoring](https://docs.percona.com/percona-monitoring-and-management/setting-up/client/mongodb.html).
+ For detailed instructions, see [Adding a MongoDB database for monitoring](../install-pmm/install-pmm-client/connect-database/mongodb.html).
=== ":simple-nginxproxymanager: ProxySQL"
To connect a ProxySQL service:
@@ -412,7 +412,7 @@ Once PMM is set up, choose the database or the application that you want it to m
pmm-admin add proxysql --username=pmm --password=
```
- For detailed instructions, see [Enable ProxySQL performance metrics monitoring](../setting-up/client/proxysql.md).
+ For detailed instructions, see [Enable ProxySQL performance metrics monitoring](../install-pmm/install-pmm-client/connect-database/proxysql.md).
=== ":material-database: HAProxy"
To connect an HAProxy service:
@@ -478,17 +478,17 @@ Once PMM is set up, choose the database or the application that you want it to m
pmm-admin add haproxy --listen-port=8404
```
- For detailed instructions and more information on the command arguments, see the [HAProxy topic](../setting-up/client/haproxy.md).
+ For detailed instructions and more information on the command arguments, see the [HAProxy topic](../install-pmm/install-pmm-client/connect-database/haproxy.md).
## Check database monitoring results
After installing PMM and connecting the database, go to the database's Instance Summary dashboard. This shows essential information about your database performance and an overview of your environment.
-For more information, see [PMM Dashboards](../details//dashboards/index.md).
+For more information, see [PMM Dashboards](../use/dashboards-panels/index.md).
## Next steps
-- [Configure PMM via the interface](../how-to/configure.md)
-- [Manage users in PMM](../how-to/manage-users.md)
-- [Set up roles and permissions](../get-started/roles-and-permissions/index.md)
-- [Back up and restore data in PMM](../get-started/backup/index.md)
\ No newline at end of file
+- [Configure PMM via the interface](../configure-pmm/configure.md)
+- [Manage users in PMM](../admin/manage-users/index.md)
+- [Set up roles and permissions](../admin/roles/index.md)
+- [Back up and restore data in PMM](../backup/index.md)
\ No newline at end of file
diff --git a/documentation/docs/reference/dashboards/dashboard-mongodb-cluster-summary.md b/documentation/docs/reference/dashboards/dashboard-mongodb-cluster-summary.md
index 39ea20cd5b..febe581225 100644
--- a/documentation/docs/reference/dashboards/dashboard-mongodb-cluster-summary.md
+++ b/documentation/docs/reference/dashboards/dashboard-mongodb-cluster-summary.md
@@ -1,35 +1,61 @@
-# MongoDB Cluster Summary
+# MongoDB Sharded Cluster Summary

-## Current Connections Per Shard
+## Overview
-TCP connections (Incoming) in mongod processes.
+Displays essential data for individual nodes, such as their role, CPU usage, memory consumption, disk space, network traffic, uptime, and the current MongoDB version.
-## Total Connections
+## Node States
+Shows the state timeline of MongoDB replica set members during the selected time range. Each node's state (PRIMARY, SECONDARY, ARBITER, etc.) is color-coded for easy monitoring, with green indicating healthy states and red showing potential issues.
-Incoming connections to mongos nodes.
+Use this to track role changes and identify stability problems across your replica set.
-## Cursors Per Shard
+## Collection Details
-The Cursor is a MongoDB Collection of the document which is returned upon the find method execution.
+### Size of Collections in Shards
+Visualizes the storage size distribution across MongoDB collections in different shards, excluding system databases. Use this metric to monitor space utilization across collections and plan capacity based on storage growth patterns in your MongoDB cluster.
-## Mongos Cursors
+### Number of Collections in Shards
+Displays the total number of collections per database across different shards in your MongoDB cluster, excluding system databases.
-The Cursor is a MongoDB Collection of the document which is returned upon the find method execution.
+Use this to track collection growth and identify databases that may need optimization based on their collection count.
-## Operations Per Shard
+## Connections
-Ops/sec, classified by legacy wire protocol type (`query`, `insert`, `update`, `delete`, `getmore`).
+### Current Connections Per Shard
+Displays the current number of incoming TCP connections for each MongoDB shard, showing trends over time with mean, maximum, and minimum values.
-## Total Mongos Operations
+Use this to monitor connection patterns and ensure your MongoDB cluster maintains healthy connection levels across all shards.
-Ops/sec, classified by legacy wire protocol type (`query`, `insert`, `update`, `delete`, `getmore`).
+### Available Connections
+Tracks the number of available MongoDB connections across your replica sets over time, with statistical breakdowns.
-## Change Log Events
+Use this metric to monitor connection capacity and ensure your MongoDB cluster maintains sufficient connection availability for client requests.
-Count, over last 10 minutes, of all types of configuration db changelog events.
+## Chunks in Shards
-## Oplog Range by Set
+### Amount of Chunks in Shards
+Displays the number of chunks distributed across each shard in your MongoDB cluster, excluding system databases. Use this to monitor data distribution and identify potential balancing needs across your sharded cluster.
-Timespan 'window' between oldest and newest ops in the Oplog collection.
+### Dynamic of Chunks
+Shows the rate of change in chunk distribution across MongoDB shards over time, with statistical breakdowns for each shard. Use this to monitor chunk migration patterns and ensure proper data balancing across your sharded cluster.
+
+### Chunks Move Events
+Displays the frequency of chunk movement operations between shards in your MongoDB cluster over time. Use this metric to track balancing activity and identify periods of high chunk migration that might impact cluster performance.
+
+### Chunks Split Events
+Shows the rate at which chunks are being split across your MongoDB sharded cluster due to size growth. Use this metric to identify when collections grow rapidly and determine if you need to rebalance or optimize shard keys.
+
+## Replication
+
+### Replication Lag by Shard
+Tracks the maximum replication delay (in seconds) between primary and secondary nodes for each shard in your MongoDB cluster.
+
+Use this to monitor replication health and detect when secondaries fall too far behind their primary nodes.
+
+### Oplog Range by Shard
+Shows the time window between the oldest and newest operations in the MongoDB oplog for each shard. Use this to monitor oplog capacity and ensure there's enough history for replica set members to sync after maintenance or failures.
+
+### Oplog GB/Hour
+Shows the size of the MongoDB oplog generated by the Primary server. Use this to track oplog growth, plan storage needs, and detect high-write periods. Values are displayed in bytes with hourly intervals.
\ No newline at end of file
diff --git a/documentation/docs/reference/dashboards/dashboard-mongodb-replset-summary.md b/documentation/docs/reference/dashboards/dashboard-mongodb-replset-summary.md
index f95a9ec5b1..d073837e72 100644
--- a/documentation/docs/reference/dashboards/dashboard-mongodb-replset-summary.md
+++ b/documentation/docs/reference/dashboards/dashboard-mongodb-replset-summary.md
@@ -2,26 +2,86 @@

-## Replication Lag
+## Overview
+Displays essential data for individual nodes, such as their role, CPU usage, memory consumption, disk space, network traffic, uptime, and the current MongoDB version.
-MongoDB replication lag occurs when the secondary node cannot replicate data fast enough to keep up with the rate that data is being written to the primary node. It could be caused by something as simple as network latency, packet loss within your network, or a routing issue.
+## Node States
+Shows the state timeline of MongoDB replica set members during the selected time range. Each node's state (PRIMARY, SECONDARY, ARBITER, etc.) is color-coded for easy monitoring, with green indicating healthy states and red showing potential issues. Use this to track role changes and identify stability problems across your replica set.
-## Operations - by service name
+## Details
-Operations are classified by legacy wire protocol type (insert, update, and delete only).
+### Command Operations
+Shows the rate of MongoDB operations per second, including both regular and replicated operations (query, insert, update, delete, getmore), as well as document deletions by TTL indexes. Use this metric to monitor database activity patterns and identify potential performance bottlenecks.
-## Max Member Ping Time - by service name
+### Top Hottest Collections by Read
+Shows the five MongoDB collections with the highest read operations per second. Use this to identify your most frequently accessed collections and optimize their performance.
-This metric can show a correlation with the replication lag value.
+### Top Hottest Collections by Write
+Shows the five MongoDB collections with the highest write operations (inserts, updates, and deletes) per second. Use this to identify your most frequently modified collections and optimize their write performance.
-## Max Heartbeat Time
+### Query Efficiency
+Shows the ratio of documents or index entries scanned versus documents returned. A ratio of 1 indicates optimal query performance where each scanned document matches the query criteria.
-Time span between now and last heartbeat from replicaset members.
+Higher values suggest less efficient queries that scan many documents to find matches. Use this to identify queries that might need index optimization.
-## Elections
+### Queued Operations
+Shows the number of operations waiting because the database is busy with other operations. Use this to identify when MongoDB operations are being delayed due to resource conflicts.
-Count of elections. Usually zero; 1 count by each healthy node will appear in each election. Happens when the primary role changes due to either normal maintenance or trouble events.
+### Reads & Writes
+Shows both active and queued read/write operations in your MongoDB deployment. Use this to monitor database activity and identify when operations are being delayed due to high load.
-## Oplog Recovery Window - by service name
+### Connections
+Shows the number of current and available MongoDB connections. Use this to monitor connection usage and ensure your deployment has sufficient capacity for new client connections.
-Timespan 'window' between newest and the oldest op in the Oplog collection.
+### Query Execution Times
+Shows the average latency in microseconds (µs) for read, write, and command operations. Use this metric to monitor query performance and identify slow operations that may need optimization.
+
+## Collection Details
+
+### Size of Collections
+Shows storage size of MongoDB collections across different databases. Use this to monitor database growth and plan storage capacity needs.
+
+### Number of Collections
+Shows the total number of collections in each MongoDB database. Use this to track database organization and growth patterns.
+
+## Replication
+
+### Replication Lag
+Shows how many seconds Secondary nodes are behind the Primary in replicating data. Higher values indicate potential issues with network latency or system resources. The red threshold line at 10 seconds helps identify when lag requires attention.
+
+### Oplog Recovery Window
+Shows the time range (in seconds) between the newest and oldest operations in the oplog. Use this to ensure sufficient history is maintained for recovery and secondary synchronization.
+
+### Oplog GB/Hour
+Shows the size of the MongoDB oplog generated by the Primary server. Use this to track oplog growth, plan storage needs, and detect high-write periods. Values are displayed in bytes with hourly intervals.
+
+## Performance
+
+### Flow Control
+Shows the frequency and duration (in microseconds) of MongoDB write throttling. Use this to understand when your deployment is slowing down writes to keep replication lag under control.
+
+### WiredTiger Concurrency Tickets Available
+Shows how many more read and write operations your MongoDB deployment can handle simultaneously. Use this to monitor database concurrency limits and potential bottlenecks.
+
+## Nodes Summary
+
+### Nodes Overview
+Shows key system metrics for each node: uptime, load average, memory usage, disk space, and more. Use this table to monitor the health and resource utilization of your infrastructure at a glance.
+
+## CPU Usage
+Shows CPU utilization as a percentage of total capacity, broken down by user and system activity. Use this to monitor CPU load and identify potential performance bottlenecks.
+
+## CPU Saturation
+
+### CPU Saturation and Max Core Usage
+Shows how heavily your CPU is loaded with waiting processes and maximum core utilization. Use this to identify when your system needs more CPU capacity or when processes are competing for CPU time.
+
+## Disk I/O and Swap Activity
+Shows disk I/O operations (reads/writes) and memory swap activity for each MongoDB node, measuring data flow between storage and RAM.
+
+Use this metric to monitor storage performance, detect memory pressure, and identify when MongoDB's working set may exceed available RAM.
+
+## Network Traffic
+Shows inbound and outbound network traffic for each MongoDB node, measuring data flow in bytes per second.
+
+Use this metric to monitor bandwidth usage, identify unusual traffic patterns, and detect potential network bottlenecks that could affect replication performance.
diff --git a/documentation/docs/reference/dashboards/dashboard-mongodb-router-summary.md b/documentation/docs/reference/dashboards/dashboard-mongodb-router-summary.md
new file mode 100644
index 0000000000..5533b2adc2
--- /dev/null
+++ b/documentation/docs/reference/dashboards/dashboard-mongodb-router-summary.md
@@ -0,0 +1,62 @@
+# MongoDB Router Summary
+
+This dashboard monitors MongoS router nodes in sharded MongoDB clusters.
+
+
+
+## Overview
+For each MongoS in the cluster, this section includes main monitoring metrics like CPU, memory and disk usage. Uptime and MongoS version are reported as well.
+
+### CPU Usage
+Shows CPU usage as a percentage from 0% to 100%. It updates every minute, turning from green to red when usage exceeds 80%. This helps quickly spot high CPU load, which could affect system performance, and monitor how hard the CPU is working at a glance.
+
+### Memory Used
+Displays the percentage of total system memory currently in use. It updates regularly, showing green up to 80% of usage and red beyond that threshold.
+
+Use this for a quick visual indicator of memory consumption to monitor available memory without swapping as it's an easy way to assess how close the system is to its memory limits.
+
+### Disk IO Utilization
+Shows how busy the disk is handling read/write requests. The meter turns red above 80%, warning of potential slowdowns. It updates regularly, giving administrators a quick way to check if the disk is keeping up with demand or if it's becoming a bottleneck in system performance.
+
+### Disk Space Utilization
+Shows how much of the total disk space is currently in use. The meter turns red when usage exceeds 80%, warning of low free space. It updates regularly, giving you a quick way to check if the disk is nearing capacity. This helps prevent "disk full" errors that could disrupt services or system operation.
+
+### Disk IOPS
+Shows how many read and write operations the disk performs each second. The blue color helps spot spikes in disk activity. These spikes could mean the disk is struggling to keep up, which might slow down the system. It's a quick way for you to check if the disk is working too hard.
+
+### Network Traffic
+Combines both incoming (received) and outgoing (transmitted) data, excluding local traffic. It gives you a quick view of overall network activity, helping spot unusual spikes or drops in data flow that might affect system performance.
+
+### Uptime
+Shows how long the system has been running without a restart. As uptime increases, the color changes from red to orange to green, giving a quick visual indicator of system stability. Red indicates very recent restarts (less than 5 minutes), orange shows short uptimes (5 minutes to 1 hour), and green represents longer uptimes (over 1 hour). This helps you easily spot recent system restarts or confirm continuous operation.
+
+### Version
+Displays the current version of MongoDB running on the system. This information is crucial for ensuring the system is running the intended version and for quickly identifying any nodes that might need updates.
+
+## Node States
+Shows the status of all MongoDB Shard (MongoS) nodes in the selected cluster over time. It uses a color-coded timeline: green bars mean a node is "UP" and working, while red bars show it's "DOWN" or unreachable. This simple view helps you quickly spot which nodes are active, see any recent status changes, and identify patterns in node availability.
+
+## Details
+This section includes additional information like "Command Operations", "Connections", "Query execution times" and "Query efficiency".
+
+### Command Operations
+Shows MongoDB command operations over time, displaying rates for inserts, updates, deletes, queries, and TTL deletions per second.
+
+Use this to monitor overall database workload, compare operation types, spot peak usage and unusual patterns, assess replication activity, and track automatic data cleanup.
+
+### Connections
+Displays MongoDB connection metrics over time, showing both current and available connections. Use this to monitor connection usage trends, identify periods of high demand, and ensure the database isn't reaching its connection limits.
+
+By comparing current to available connections, it's easy to spot potential bottlenecks or capacity issues before they impact performance.
+
+### Query execution times
+Shows the average execution times for MongoDB queries over time, categorized into read, write, and other command operations.
+
+Use this to identify slow queries, performance bottlenecks, and unusual spikes in execution times. Comparing latencies across operation types can also guide decisions on indexing strategies and query optimizations.
+
+### Query Efficiency
+Visualizes MongoDB query efficiency over time, displaying the ratio of scanned documents or index entries to returned documents, along with operation latencies.
+
+A ratio near 1 indicates highly efficient queries, while higher values (e.g., 100) suggest inefficiency.
+
+Compare document scans, index scans, and operation latencies to quickly identify poorly performing queries, and ensure that queries execute as efficiently as possible.
diff --git a/documentation/docs/reference/dashboards/dashboard-mysql-user-details.md b/documentation/docs/reference/dashboards/dashboard-mysql-user-details.md
index f22d1e992a..4a2f46b0e0 100644
--- a/documentation/docs/reference/dashboards/dashboard-mysql-user-details.md
+++ b/documentation/docs/reference/dashboards/dashboard-mysql-user-details.md
@@ -3,7 +3,7 @@

!!! note alert alert-primary ""
- This dashboard requires Percona Server for MySQL 5.1+ or MariaDB 10.1/10.2 with XtraDB. Also `userstat` should be enabled, for example with the `SET GLOBAL userstat=1` statement. See [Setting up MySQL](../../setting-up/client/mysql.md).
+ This dashboard requires Percona Server for MySQL 5.1+ or MariaDB 10.1/10.2 with XtraDB. Also `userstat` should be enabled, for example with the `SET GLOBAL userstat=1` statement. See [Setting up MySQL](../../install-pmm/install-pmm-client/connect-database/mysql.md).
Data is displayed for the 5 top users.
diff --git a/documentation/docs/reference/dashboards/kubernetes_monitor_db_clusters_managed.md b/documentation/docs/reference/dashboards/kubernetes_monitor_db_clusters_managed.md
index 457a05fc9d..6ebf6bfadd 100644
--- a/documentation/docs/reference/dashboards/kubernetes_monitor_db_clusters_managed.md
+++ b/documentation/docs/reference/dashboards/kubernetes_monitor_db_clusters_managed.md
@@ -2,7 +2,7 @@
!!! caution alert alert-warning "Important"
- This feature is still in [Technical Preview](https://docs.percona.com/percona-monitoring-and-management/details/glossary.html#technical-preview) and is subject to change. We recommend that early adopters use this feature for testing purposes only.
+ This feature is still in [Technical Preview](../../reference/glossary.md#technical-preview) and is subject to change. We recommend that early adopters use this feature for testing purposes only.
This dashboard displays the primary parameters of database clusters created by Percona Operators for various databases and helps identify the performance issues.
diff --git a/documentation/docs/reference/dashboards/kubernetes_monitor_operators.md b/documentation/docs/reference/dashboards/kubernetes_monitor_operators.md
index 7a225ae49b..27a9643b7f 100644
--- a/documentation/docs/reference/dashboards/kubernetes_monitor_operators.md
+++ b/documentation/docs/reference/dashboards/kubernetes_monitor_operators.md
@@ -1,7 +1,7 @@
# Kubernetes monitoring for Percona Operators
!!! caution alert alert-warning "Important"
- This feature is still in [Technical Preview](https://docs.percona.com/percona-monitoring-and-management/details/glossary.html#technical-preview) and is subject to change. We recommend that early adopters use this feature for testing purposes only.
+ This feature is still in [Technical Preview](../../reference/glossary.md#technical-preview) and is subject to change. We recommend that early adopters use this feature for testing purposes only.
Monitoring the state of the database is crucial to timely identify and react to performance issues. Percona Monitoring and Management (PMM) solution enables you to do just that.
diff --git a/documentation/docs/reference/faq.md b/documentation/docs/reference/faq.md
index b8f8147452..2c91a10a0c 100644
--- a/documentation/docs/reference/faq.md
+++ b/documentation/docs/reference/faq.md
@@ -7,16 +7,23 @@
## What are the minimum system requirements?
-- Server:
- - Disk: 1 GB per monitored database (1 week data retention)
- - Memory: 2 GB per monitored database
- - CPU: Supports [`SSE4.2`](https://wikipedia.org/wiki/SSE4#SSE4.2)
-- Client:
- - Disk: 100 MB
+See our detailed guides:
-!!! seealso alert alert-info "See also"
- - [Setting up PMM Server](setting-up/server/index.md)
- - [Setting up PMM Client](setting-up/client/index.md)
+- [PMM hardware and system requirements](../install-pmm/plan-pmm-installation/hardware_and_system.md) for complete specifications
+- [Setting up PMM Server](../install-pmm/install-pmm-server/index.md) for server installation
+- [Setting up PMM Client](../install-pmm/install-pmm-client/index.md) for client setup
+
+Quick reference for typical deployment (up to 30 nodes):
+- **Server**:
+ - CPU: 4 cores (must support SSE4.2)
+ - Memory: 8 GB
+ - Storage: 100 GB (approximately 1 GB per node per week)
+- **Client**:
+ - Storage: 100 MB
+ - Supports x86_64 and ARM64 architectures
+
+!!! note alert alert-info "Note"
+ For larger deployments (200+ nodes) or longer retention periods, see our [Hardware and system requirements](../install-pmm/plan-pmm-installation/hardware_and_system.md) for detailed sizing recommendations.
## How can I upgrade from version 2?
@@ -27,7 +34,7 @@ PMM 3 introduces significant architectural changes that require gradual transiti
Go to **PMM Configuration > Settings > Advanced Settings > Data retention** to adjust the value in days.
!!! seealso alert alert-info "See also"
- [Configure data retention](how-to/configure.md#data-retention)
+ [Configure data retention](../configure-pmm/advanced_settings.md#data-retention)
## How are PMM Server logs rotated?
@@ -40,7 +47,7 @@ SELECT, PROCESS, SUPER, REPLICATION CLIENT, RELOAD
```
!!! seealso alert alert-info "See also"
- [Setting Up/Client/MySQL](setting-up/client/mysql.md#create-a-database-account-for-pmm).
+ [Setting Up/Client/MySQL](../install-pmm/install-pmm-client/connect-database/mysql.md#create-a-database-account-for-pmm).
## Can I monitor multiple service instances?
@@ -58,7 +65,7 @@ pmm-admin add mysql --username root --password root instance-02 127.0.0.1:3002
```
!!! seealso alert alert-info "See also"
- [`pmm-admin add mysql`](details/commands/pmm-admin.md#mysql)
+ [`pmm-admin add mysql`](../use/commands/pmm-admin.md#mysql)
## Can I rename instances?
@@ -68,7 +75,7 @@ When you remove a monitoring service, previously collected data remains availabl
## Can I add an AWS RDS MySQL or Aurora MySQL instance from a non-default AWS partition?
-By default, the RDS discovery works with the default `aws` partition. But you can switch to special regions, like the [GovCloud](https://aws.amazon.com/govcloud-us/) one, with the alternative [AWS partitions](https://docs.aws.amazon.com/sdk-for-go/api/aws/endpoints/#pkg-constants) (e.g. `aws-us-gov`) adding them to the *Settings* via the PMM Server [API](details/api.md).
+By default, the RDS discovery works with the default `aws` partition. But you can switch to special regions, like the [GovCloud](https://aws.amazon.com/govcloud-us/) one, with the alternative [AWS partitions](https://docs.aws.amazon.com/sdk-for-go/api/aws/endpoints/#pkg-constants) (e.g. `aws-us-gov`) adding them to the *Settings* via the PMM Server [API](../api/index.md).

@@ -86,7 +93,7 @@ The default values (in seconds):
| Custom (defaults) | 60 | 10 | 5 |
!!! seealso alert alert-info "See also"
- [Metrics resolution](how-to/configure.md#metrics-resolution)
+ [Metrics resolution](../configure-pmm/metrics_res.md)
## How do I set up Alerting?
@@ -110,12 +117,12 @@ From version 2.4.0, when `pmm-managed` starts the Prometheus file generation pro
The `prometheus.yml` file can be regenerated by restarting the PMM Server container, or by using the `SetSettings` API call with an empty body.
!!! seealso alert alert-info "See also"
- - [API](details/api.md)
+ - [API](../api/index.md)
- [Percona blog: Extending PMM’s Prometheus Configuration](https://www.percona.com/blog/2020/03/23/extending-pmm-prometheus-configuration/)
-## How to troubleshoot an update?
+## How to troubleshoot an upgrade?
-See [Troubleshoot update](how-to/troubleshoot.md#update).
+See [Troubleshoot update](../troubleshoot/upgrade_issues.md).
## What are my login credentials when I try to connect to a Prometheus Exporter?
@@ -125,7 +132,7 @@ See [Troubleshoot update](how-to/troubleshoot.md#update).
PMM protects an exporter's output from unauthorized access by adding an authorization layer. To access an exporter, you can use `pmm` as a user name and the Agent ID as a password. You can find the Agent ID corresponding to a given exporter by running `pmm-admin list`.
!!! seealso alert alert-info "See also"
- [`pmm-admin list`](details/commands/pmm-admin.md#information-commands)
+ [`pmm-admin list`](../use/commands/pmm-admin.md#information-commands)
## How to provision PMM Server with non-default admin password?
@@ -170,7 +177,7 @@ change-admin-password
## How does PMM handle personal and confidential data?
-Read our [Privacy Policy](https://www.percona.com/privacy-policy) to learn how PMM manages personal and confidential data. More technical details can be found in [Data handling in PMM](details/personal_data_handling.md).
+Read our [Privacy Policy](https://www.percona.com/privacy-policy) to learn how PMM manages personal and confidential data. More technical details can be found in [Data handling in PMM](personal_data_handling.md).
## Why am I getting a "User already exists" error when logging back into PMM?
diff --git a/documentation/docs/reference/index.md b/documentation/docs/reference/index.md
index 34a73ad0ee..eb0704063c 100644
--- a/documentation/docs/reference/index.md
+++ b/documentation/docs/reference/index.md
@@ -8,7 +8,7 @@ PMM is a client/server application built by Percona comprising its own and third
## PMM Server
-PMM Server is the heart of PMM. It receives data from clients, collects it, and stores it. Metrics are drawn as tables, charts and graphs within [_dashboards_](../use/dashboards-panels/index.md), each a part of the web-based [user interface](../reference/ui/index.md).
+PMM Server is the heart of PMM. It receives data from clients, collects it, and stores it. Metrics are drawn as tables, charts and graphs within [_dashboards_](../use/dashboards-panels/index.md), each a part of the web-based [user interface](../reference/ui/ui_components.md).
## PMM Client
@@ -62,7 +62,7 @@ PMM Server includes the following tools:
The PMM Client package consists of the following:
-- `pmm-admin` is a command-line tool for managing PMM Client, for example, adding and removing database instances that you want to monitor. ([Read more](../details/commands/pmm-admin.md)).
+- `pmm-admin` is a command-line tool for managing PMM Client, for example, adding and removing database instances that you want to monitor. ([Read more](../use/commands/pmm-admin.md)).
- `pmm-agent` is a client-side component of a minimal command-line interface, which is a central entry point in charge of bringing the client functionality: it carries on client’s authentication, gets the client configuration stored on the PMM Server, manages exporters and other agents.
diff --git a/documentation/docs/reference/personal_data_handling.md b/documentation/docs/reference/personal_data_handling.md
index 3d8ae69ae5..3ce08a8a6a 100644
--- a/documentation/docs/reference/personal_data_handling.md
+++ b/documentation/docs/reference/personal_data_handling.md
@@ -11,7 +11,7 @@ The following questions are being answered related to personal and confidential
| DB host to PMM | Database performance metrics SQL query examples for query analytics (optional).
| PMM to DB Host | DSN and credentials for database access. A separate DB user is used (limited access) to retrieve metrics from the database.
| DB Host to S3 compatible storage location | Database backup - optional if PMM Administrator configures it with Public Cloud (AWS, GCP, etc) as a possible storage location.
- | PMM Server to Percona Cloud | Telemetry data is collected. PMM Server collects varying amounts of data from version to version, and no personal or confidential information is collected. See [here](https://docs.percona.com/percona-monitoring-and-management/how-to/configure.html#telemetry) for details on the data being transmitted.
+ | PMM Server to Percona Cloud | Telemetry data is collected. PMM Server collects varying amounts of data from version to version, and no personal or confidential information is collected. See [Telemetry](../configure-pmm/advanced_settings#telemetry) for details on the data being transmitted.
2. Where is the data obtained from the DB host transmitted?
diff --git a/documentation/docs/reference/pmm_components_and_versions.md b/documentation/docs/reference/pmm_components_and_versions.md
index 06e341ae01..329ec1590a 100644
--- a/documentation/docs/reference/pmm_components_and_versions.md
+++ b/documentation/docs/reference/pmm_components_and_versions.md
@@ -6,11 +6,10 @@ The following table lists all the PMM client/server components and their version
|-----------------------------|--------|---------------|------------------|
| Grafana | 11.1.8* | [Grafana Documentation](https://grafana.com/docs/grafana/latest/)|[Github Grafana](https://github.com/percona-platform/grafana)|
| VictoriaMetrics| 1.93.4 | [VictoriaMetrics Documentation](https://docs.victoriametrics.com/)|[Github VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) |
-| Nginx | 1.20.1 | [Nginx Documentation](http://nginx.org/en/docs/)|[Github Nginx](https://github.com/nginx/nginx-releases) |
+| Nginx | 1.20.1 | [Nginx Documentation](http://nginx.org/en/docs/)|[Github Nginx](https://github.com/nginx/nginx) |
| Percona Distribution for PostgreSQL | 14.5 | [Percona Distribution for PostgreSQL 14 Documentation](https://www.percona.com/doc/postgresql/LATEST/index.html)| |
| Clickhouse| 23.8.2.7 |[ClickHouse Documentation Documentation](https://clickhouse.com/docs/en/)|[Github ClickHouse](https://github.com/ClickHouse/ClickHouse)|
| PerconaToolkit | 3.4.0 | [Percona Toolkit Documentation](https://www.percona.com/doc/percona-toolkit/3.0/index.html)|[Github Percona Toolkit](https://github.com/percona/percona-toolkit)|
-| Alertmanager | 0.22.0 | [Alertmanager Documentation](https://prometheus.io/docs/alerting/latest/alertmanager/)|[Github Alertmanager](https://github.com/prometheus/alertmanager)|
| MongoDB exporter | 0.37.0 | |[Github MongoDB Exporter](https://github.com/percona/mongodb_exporter)|
| MySQL exporter| v0.14.0* | [MySQL Server Exporter Documentation](https://grafana.com/oss/prometheus/exporters/mysql-exporter/)|[Github MySQL Server Exporter](https://github.com/percona/mysqld_exporter) |
| PostgreSQL exporter| v0.14.0* | |[Github PostgreSQL Server Exporter](https://github.com/percona/postgres_exporter) |
diff --git a/documentation/docs/reference/third-party/postgresql.md b/documentation/docs/reference/third-party/postgresql.md
index c8bc839074..530afeca32 100644
--- a/documentation/docs/reference/third-party/postgresql.md
+++ b/documentation/docs/reference/third-party/postgresql.md
@@ -6,22 +6,19 @@ You can use an external PostgreSQL database instance outside the PMM Server cont
PMM predefines certain flags that allow you to use PostgreSQL parameters as environment variables:
-!!! caution alert alert-warning "Warning"
- The `PERCONA_TEST_*` environment variables are experimental and subject to change. It is recommended that you use these variables for testing purposes only and not on production. The minimum supported PostgreSQL server version is 14.
-
To use PostgreSQL as an external database instance, use the following environment variables:
| Environment variable | Flag | Description |
| ---------------------------- | ------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
-| PERCONA_TEST_POSTGRES_ADDR | [postgres-addr](https://www.postgresql.org/docs/14/libpq-connect.html#LIBPQ-CONNECT-HOST) | Hostname and port for external PostgreSQL database. |
-| PERCONA_TEST_POSTGRES_DBNAME | [postgres-name](https://www.postgresql.org/docs/14/libpq-connect.html#LIBPQ-CONNECT-DBNAME) | Database name for external or internal PostgreSQL database. |
-| PERCONA_TEST_POSTGRES_USERNAME | [postgres-username](https://www.postgresql.org/docs/14/libpq-connect.html#LIBPQ-CONNECT-USER) | PostgreSQL user name to connect as. |
-| PERCONA_TEST_POSTGRES_DBPASSWORD | [postgres-password](https://www.postgresql.org/docs/14/libpq-connect.html#LIBPQ-CONNECT-PASSWORD) | Password to be used for database authentication. |
-| PERCONA_TEST_POSTGRES_SSL_MODE | [postgres-ssl-mode](https://www.postgresql.org/docs/14/libpq-connect.html#LIBPQ-CONNECT-SSLMODE) | This option determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the database. Currently supported: `disable`, `require`, `verify-ca`, `verify-full`. |
-| PERCONA_TEST_POSTGRES_SSL_CA_PATH | [postgres-ssl-ca-path](https://www.postgresql.org/docs/14/libpq-connect.html#LIBPQ-CONNECT-SSLROOTCERT) | This parameter specifies the name of a file containing SSL certificate authority (CA) certificate(s). |
-| PERCONA_TEST_POSTGRES_SSL_KEY_PATH | [postgres-ssl-key-path](https://www.postgresql.org/docs/14/libpq-connect.html#LIBPQ-CONNECT-SSLKEY) | This parameter specifies the location for the secret key used for the client certificate. |
-| PERCONA_TEST_POSTGRES_SSL_CERT_PATH | [postgres-ssl-cert-path](https://www.postgresql.org/docs/14/libpq-connect.html#LIBPQ-CONNECT-SSLCERT) | This parameter specifies the file name of the client SSL certificate. |
-| PERCONA_TEST_PMM_DISABLE_BUILTIN_POSTGRES | | Environment variable to disable built-in PMM Server database. Note that Grafana depends on built-in PostgreSQL. And if the value of this variable is "true", then it is necessary to pass all the parameters associated with Grafana to use external PostgreSQL. |
+| PMM_POSTGRES_ADDR | [postgres-addr](https://www.postgresql.org/docs/14/libpq-connect.html#LIBPQ-CONNECT-HOST) | Hostname and port for external PostgreSQL database. |
+| PMM_POSTGRES_DBNAME | [postgres-name](https://www.postgresql.org/docs/14/libpq-connect.html#LIBPQ-CONNECT-DBNAME) | Database name for external or internal PostgreSQL database. |
+| PMM_POSTGRES_USERNAME | [postgres-username](https://www.postgresql.org/docs/14/libpq-connect.html#LIBPQ-CONNECT-USER) | PostgreSQL user name to connect as. |
+| PMM_POSTGRES_DBPASSWORD | [postgres-password](https://www.postgresql.org/docs/14/libpq-connect.html#LIBPQ-CONNECT-PASSWORD) | Password to be used for database authentication. |
+| PMM_POSTGRES_SSL_MODE | [postgres-ssl-mode](https://www.postgresql.org/docs/14/libpq-connect.html#LIBPQ-CONNECT-SSLMODE) | This option determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the database. Currently supported: `disable`, `require`, `verify-ca`, `verify-full`. |
+| PMM_POSTGRES_SSL_CA_PATH | [postgres-ssl-ca-path](https://www.postgresql.org/docs/14/libpq-connect.html#LIBPQ-CONNECT-SSLROOTCERT) | This parameter specifies the name of a file containing SSL certificate authority (CA) certificate(s). |
+| PMM_POSTGRES_SSL_KEY_PATH | [postgres-ssl-key-path](https://www.postgresql.org/docs/14/libpq-connect.html#LIBPQ-CONNECT-SSLKEY) | This parameter specifies the location for the secret key used for the client certificate. |
+| PMM_POSTGRES_SSL_CERT_PATH | [postgres-ssl-cert-path](https://www.postgresql.org/docs/14/libpq-connect.html#LIBPQ-CONNECT-SSLCERT) | This parameter specifies the file name of the client SSL certificate. |
+| PMM_DISABLE_BUILTIN_POSTGRES | | Environment variable to disable built-in PMM Server database. Note that Grafana depends on built-in PostgreSQL. And if the value of this variable is "true", then it is necessary to pass all the parameters associated with Grafana to use external PostgreSQL. |
By default, communication between the PMM Server and the database is not encrypted. To secure a connection, follow [PostgeSQL SSL instructions](https://www.postgresql.org/docs/14/ssl-tcp.html) and provide `POSTGRES_SSL_*` variables.
@@ -63,7 +60,7 @@ To use PostgreSQL as an external database:
4. Create `user` and `database` for pmm-server to use. Set appropriate rights and access.
-5. Install `pg_stat_statements` in PostgreSQL in order to have all metrics according to [this](../setting-up/client/postgresql.md) handy document.
+5. Install `pg_stat_statements` in PostgreSQL in order to have all metrics according to [this](../../install-pmm/install-pmm-client/connect-database/postgresql.md) handy document.
6. Run PostgreSQL server.
@@ -89,19 +86,19 @@ To use PostgreSQL as an external database:
```sh
docker run
--name pmm-server
- -e PERCONA_TEST_POSTGRES_ADDR=$ADDRESS:$PORT
- -e PERCONA_TEST_POSTGRES_DBNAME=$DBNAME
- -e PERCONA_TEST_POSTGRES_USERNAME=$USER
- -e PERCONA_TEST_POSTGRES_DBPASSWORD=$PASSWORD
- -e PERCONA_TEST_POSTGRES_SSL_MODE=$SSL_MODE
- -e PERCONA_TEST_POSTGRES_SSL_CA_PATH=$CA_PATH
- -e PERCONA_TEST_POSTGRES_SSL_KEY_PATH=$KEY_PATH
- -e PERCONA_TEST_POSTGRES_SSL_CERT_PATH=$CERT_PATH
- -e PERCONA_TEST_PMM_DISABLE_BUILTIN_POSTGRES=true
+ -e PMM_POSTGRES_ADDR=$ADDRESS:$PORT
+ -e PMM_POSTGRES_DBNAME=$DBNAME
+ -e PMM_POSTGRES_USERNAME=$USER
+ -e PMM_POSTGRES_DBPASSWORD=$PASSWORD
+ -e PMM_POSTGRES_SSL_MODE=$SSL_MODE
+ -e PMM_POSTGRES_SSL_CA_PATH=$CA_PATH
+ -e PMM_POSTGRES_SSL_KEY_PATH=$KEY_PATH
+ -e PMM_POSTGRES_SSL_CERT_PATH=$CERT_PATH
+ -e PMM_DISABLE_BUILTIN_POSTGRES=true
-e GF_DATABASE_URL=$GF_DATABASE_URL
-e GF_DATABASE_SSL_MODE=$GF_SSL_MODE
-e GF_DATABASE_CA_CERT_PATH=$GF_CA_PATH
-e GF_DATABASE_CLIENT_KEY_PATH=$GF_KEY_PATH
-e GF_DATABASE_CLIENT_CERT_PATH=$GF_CERT_PATH
- perconalab/pmm-server:3.0.0-beta
+ percona/pmm-server:3
```
diff --git a/documentation/docs/reference/third-party/victoria.md b/documentation/docs/reference/third-party/victoria.md
index 2d17e1e496..b767bded43 100644
--- a/documentation/docs/reference/third-party/victoria.md
+++ b/documentation/docs/reference/third-party/victoria.md
@@ -51,7 +51,7 @@ This instructs VictoriaMetrics to [deduplicate](https://docs.victoriametrics.com
## Using VictoriaMetrics external database instance
!!! caution alert alert-warning "Important/Caution"
- This feature is still in [Technical Preview](https://docs.percona.com/percona-monitoring-and-management/details/glossary.html#technical-preview) and is subject to change. We recommend that early adopters use this feature for evaluation purposes only.
+ This feature is still in [Technical Preview](../../reference/glossary.md#technical-preview) and is subject to change. We recommend that early adopters use this feature for evaluation purposes only.
You can use an external VictoriaMetrics database for monitoring in PMM.
diff --git a/documentation/docs/reference/ui/dashboards-panels/annotate/annotate.md b/documentation/docs/reference/ui/dashboards-panels/annotate/annotate.md
deleted file mode 100644
index ec56b87f5b..0000000000
--- a/documentation/docs/reference/ui/dashboards-panels/annotate/annotate.md
+++ /dev/null
@@ -1,13 +0,0 @@
-# Annotation in dashboards
-
-Annotations mark a moment in time. They are useful for marking system changes or other significant application events. They can be set globally or for specific nodes or services.
-
-You create them on the command line with the [`pmm-admin annotate` command.](../../../../use/commands/pmm-admin.md)
-
-Annotations show as a vertical dashed line on a dashboard graph. Reveal the annotation text by mousing over the caret indicator below the line.
-
-
-
-You turn annotations on or off with the *PMM Annotations* switch in the second row menu bar.
-
-
diff --git a/documentation/docs/reference/ui/dashboards-panels/custom_dashboard/set_custom_for_org.md b/documentation/docs/reference/ui/dashboards-panels/custom_dashboard/set_custom_for_org.md
deleted file mode 100644
index 00b028aecc..0000000000
--- a/documentation/docs/reference/ui/dashboards-panels/custom_dashboard/set_custom_for_org.md
+++ /dev/null
@@ -1,11 +0,0 @@
-
-# Set home dashboard for your organization
-
-Organization Admins can set the home dashboard for their organization. For information on managing users in an organization, see [Manage Users](../../how-to/manage-users.md)
-
-1. Navigate to the dashboard that you want to set as the home dashboard.
-2. Click the star next to the dashboard title to mark the dashboard as a favorite.
-3. Hover your cursor over *Configuration*
-4. Click *Preferences*.
-5. In the Home Dashboard field, select the dashboard that you want to set as your home dashboard.
-6. Click *Save*.
diff --git a/documentation/docs/reference/ui/dashboards-panels/custom_dashboard/set_custom_team.md b/documentation/docs/reference/ui/dashboards-panels/custom_dashboard/set_custom_team.md
deleted file mode 100644
index 17605adc61..0000000000
--- a/documentation/docs/reference/ui/dashboards-panels/custom_dashboard/set_custom_team.md
+++ /dev/null
@@ -1,12 +0,0 @@
-# Set home dashboard for your team
-
-Organization and team Admins can set the home dashboard for their team as follows:
-
-1. Navigate to the dashboard that you want to set as your home dashboard.
-2. Click star next to the dashboard to mark the dashboard as a favorite.
-3. On the main menu, hover your cursor over *Configuration*.
-4. Click *Teams*. Grafana displays the team list.
-5. Click on the team for whom you want to set the home dashboard and then navigate to the *Settings* tab.
-6. In the Home Dashboard field, select the dashboard that you want to use for your home dashboard.
-7. Click *Save*.
-
diff --git a/documentation/docs/reference/ui/dashboards-panels/custom_dashboard/set_personal_home.md b/documentation/docs/reference/ui/dashboards-panels/custom_dashboard/set_personal_home.md
deleted file mode 100644
index 332c96bf5f..0000000000
--- a/documentation/docs/reference/ui/dashboards-panels/custom_dashboard/set_personal_home.md
+++ /dev/null
@@ -1,16 +0,0 @@
-# Set custom Home Dashboard
-
-
-The home dashboard you set is the dashboard all the users will see after logging in to PMM UI. You can set the home dashboard for a server, an organization, a team, or your user account.
-
-1. From the main menu, go to *Dashboards > Browse* and select the dashboard you want to set as your home dashboard.
-2. Click the star next to the dashboard title to mark it as a favorite.
-
- 
-
-
-3. From the side menu go to *Configuration > Preferences*. In the *Home Dashboard* field, select the dashboard that you want to set as your home dashboard.
-
- 
-
-4. Click *Save*.
diff --git a/documentation/docs/reference/ui/dashboards-panels/export-dashboards/export_dashboards.md b/documentation/docs/reference/ui/dashboards-panels/export-dashboards/export_dashboards.md
deleted file mode 100644
index e90751b493..0000000000
--- a/documentation/docs/reference/ui/dashboards-panels/export-dashboards/export_dashboards.md
+++ /dev/null
@@ -1,4 +0,0 @@
-# Export a dashboard
-
-!!! caution alert alert-warning "Important"
- The content for this topic is under development.
diff --git a/documentation/docs/reference/ui/dashboards-panels/export-dashboards/import_dashboards.md b/documentation/docs/reference/ui/dashboards-panels/export-dashboards/import_dashboards.md
deleted file mode 100644
index c41eded709..0000000000
--- a/documentation/docs/reference/ui/dashboards-panels/export-dashboards/import_dashboards.md
+++ /dev/null
@@ -1,4 +0,0 @@
-# Import a dashboard
-
-!!! caution alert alert-warning "Important"
- The content for this topic is under development.
\ No newline at end of file
diff --git a/documentation/docs/reference/ui/dashboards-panels/index.md b/documentation/docs/reference/ui/dashboards-panels/index.md
deleted file mode 100644
index 1f0d05cf12..0000000000
--- a/documentation/docs/reference/ui/dashboards-panels/index.md
+++ /dev/null
@@ -1,10 +0,0 @@
-# About dashboards
-
-!!! caution alert alert-warning "Important"
- The content is under development.
-
-The interface is a collection of web pages called dashboards.
-
-Dashboards are grouped into folders. You can customize these, by renaming them or creating new ones.
-
-The area inside dashboards is populated by panels. Some are in collapsible panel groups. A panel can show a value, a graph, a chart, or a visual representation of a set.
\ No newline at end of file
diff --git a/documentation/docs/reference/ui/dashboards-panels/manage-dashboards/create-folders.md b/documentation/docs/reference/ui/dashboards-panels/manage-dashboards/create-folders.md
deleted file mode 100644
index 20879a528e..0000000000
--- a/documentation/docs/reference/ui/dashboards-panels/manage-dashboards/create-folders.md
+++ /dev/null
@@ -1,13 +0,0 @@
-
-# Create dashboard folders
-
-Folders help you organize and group PMM dashboards, which is crucial when you have multiple dashboards or teams using the same PMM instance.
-
-!!! note alert alert-primary "Note"
- To create a dashboard folder, you must have PMM's *Admin* privileges.
-
-To create a dashboard folder:
-
-1. On the PMM dashboards page, from the side menu, go to *Dashboards > New folder*.
-
-2. Enter a unique name for your folder and click *Create*.
\ No newline at end of file
diff --git a/documentation/docs/reference/ui/dashboards-panels/manage-dashboards/manage-folders.md b/documentation/docs/reference/ui/dashboards-panels/manage-dashboards/manage-folders.md
deleted file mode 100644
index cc277eb24b..0000000000
--- a/documentation/docs/reference/ui/dashboards-panels/manage-dashboards/manage-folders.md
+++ /dev/null
@@ -1,88 +0,0 @@
-# Manage dashboard folders
-
-This section describes how to delete multiple dashboards, move dashboards from one folder to another and navigate to a folder page where you can assign folder and dashboard permissions.
-
-## Delete multiple dashboards
-
-To delete multiple dashboards at once:
-
-From the side menu, go to *Dashboards > Browse* and check the dashboards that you want to delete, and click *Delete*.
-
-
-
-
-## Move dashboards from one folder to another
-
-You can move dashboards from one folder to another in the following two ways:
-
-
-1. From the side menu, go to *Dashboards > Browse* and check the dashboards that you want to move. Click *Move*.
-
- 
-
-2. On the *Choose Dashboard Folder* dialog box select the dashboards that you want to move from the drop-down. Click *Move*.
-
-The other way of moving dashboards from one folder to another is:
-
-
-1. Open the dashboard that you want to move to another folder.
-2. Click on icon to open *Dashboard Settings*.
-3. On the *General* page, under *Folder* select the folder name that you want to move from the dropdown.
-
- 
-
-4. Click *Save Dashboard* on the the left to save the change.
-
-!!! note alert alert-primary "Note"
- You should have atleast an *Editor* role to move a dashboard.
-
-## Navigate to a dashboard folder page to assign permissions
-
-1. From the side menu, go to *Dashboards > Browse* and hover over the dashboard folder whose permissions you want to set. Click *Go to Folder*.
-2. Go to the *Permissions* tab and select the requisite permission from the drop-down for the various roles.
-
- 
-
-
-## Setting custom Home Dashboard
-
-The home dashboard you set is the dashboard all the users will see after logging in to PMM UI. You can set the home dashboard for a server, an organization, a team, or your user account.
-
-### Set home dashboard for your organization
-
-Organization Admins can set the home dashboard for their organization. For information on managing users in an organization, see [Manage Users](../../../how-to/manage-users.md)
-
-1. Navigate to the dashboard that you want to set as the home dashboard.
-2. Click the star next to the dashboard title to mark the dashboard as a favorite.
-3. Hover your cursor over *Configuration*
-4. Click *Preferences*.
-5. In the Home Dashboard field, select the dashboard that you want to set as your home dashboard.
-6. Click *Save*.
-
-
-### Set home dashboard for your team
-
-Organization and team Admins can set the home dashboard for their team as follows:
-
-1. Navigate to the dashboard that you want to set as your home dashboard.
-2. Click star next to the dashboard to mark the dashboard as a favorite.
-3. On the main menu, hover your cursor over *Configuration*.
-4. Click *Teams*. Grafana displays the team list.
-5. Click on the team for whom you want to set the home dashboard and then navigate to the *Settings* tab.
-6. In the Home Dashboard field, select the dashboard that you want to use for your home dashboard.
-7. Click *Save*.
-
-
-### Set your Personal Home Dashboard
-
-1. From the main menu, go to *Dashboards > Browse* and select the dashboard you want to set as your home dashboard.
-2. Click the star next to the dashboard title to mark it as a favorite.
-
- 
-
-
-3. From the side menu go to *Configuration > Preferences*. In the *Home Dashboard* field, select the dashboard that you want to set as your home dashboard.
-
- 
-
-4. Click *Save*.
diff --git a/documentation/docs/reference/ui/dashboards-panels/share-dashboards/publish_snapshot.md b/documentation/docs/reference/ui/dashboards-panels/share-dashboards/publish_snapshot.md
deleted file mode 100644
index b8531a108f..0000000000
--- a/documentation/docs/reference/ui/dashboards-panels/share-dashboards/publish_snapshot.md
+++ /dev/null
@@ -1,4 +0,0 @@
-# Publish snapshot
-
-!!! caution alert alert-warning "Important"
- The content for this topic is under development.
diff --git a/documentation/docs/reference/ui/dashboards-panels/share-dashboards/share_dashboard.md b/documentation/docs/reference/ui/dashboards-panels/share-dashboards/share_dashboard.md
deleted file mode 100644
index 43ad98b1e4..0000000000
--- a/documentation/docs/reference/ui/dashboards-panels/share-dashboards/share_dashboard.md
+++ /dev/null
@@ -1,64 +0,0 @@
-# Share dashboards
-
-When you need to share a dashboard with your team members, you can either send them a direct link to the dashboard, or render and send the dashboard as a .PNG image.
-
-## Share as direct link
-1. Go to the dashboard that you want to share.
-2. Click at the top of the dashboard to display the panel menu.
-3. Select **Share** to reveal the **Share Panel** and either:
- - copy and send the full URL for the dashboard, OR
- - toggle the **Short URL** option to generate a simple link with a unique identifier
-
-!!! hint alert alert-success "Tip"
- If your current domain is different than the one specified in the Grafana .INI configuration file, PMM will ask you to correct this mismatch before you can generate a short URL:
- 
- To fix this
-
-## Share as a PNG file
-
-Rendering images requires the Image Renderer plug-in. If your PMM Admin has not installed this for your PMM instance, you will see the following error message under **Share Panel > Link**.
-
-
-To install the dependencies:
-
-1. Connect to your PMM Server Docker container.
-
- ```sh
- docker exec -it pmm-server bash
- ```
-
-2. Install Grafana plug-ins.
-
- ```sh
- grafana-cli plugins install grafana-image-renderer
- ```
-
-3. Restart Grafana.
-
- ```sh
- supervisorctl restart grafana
- ```
-
-4. Install libraries.
-
- ```sh
- yum install -y libXcomposite libXdamage libXtst cups libXScrnSaver pango \
- atk adwaita-cursor-theme adwaita-icon-theme at at-spi2-atk at-spi2-core \
- cairo-gobject colord-libs dconf desktop-file-utils ed emacs-filesystem \
- gdk-pixbuf2 glib-networking gnutls gsettings-desktop-schemas \
- gtk-update-icon-cache gtk3 hicolor-icon-theme jasper-libs json-glib \
- libappindicator-gtk3 libdbusmenu libdbusmenu-gtk3 libepoxy \
- liberation-fonts liberation-narrow-fonts liberation-sans-fonts \
- liberation-serif-fonts libgusb libindicator-gtk3 libmodman libproxy \
- libsoup libwayland-cursor libwayland-egl libxkbcommon m4 mailx nettle \
- patch psmisc redhat-lsb-core redhat-lsb-submod-security rest spax time \
- trousers xdg-utils xkeyboard-config alsa-lib
- ```
-
-To render the image:
-
-1. Go to the dashboard that you want to share.
-2. Click at the top of the dashboard to display the panel menu.
-3. Select **Share** to reveal the **Share Panel**.
-4. Click **Direct link rendered image**. This opens a new browser tab.
-5. Wait for the image to be rendered, then use your browser's Image Save function to download the image.
\ No newline at end of file
diff --git a/documentation/docs/reference/ui/dashboards-panels/use-dashboards/dashboard-feature.md b/documentation/docs/reference/ui/dashboards-panels/use-dashboards/dashboard-feature.md
deleted file mode 100644
index d9049f921a..0000000000
--- a/documentation/docs/reference/ui/dashboards-panels/use-dashboards/dashboard-feature.md
+++ /dev/null
@@ -1,4 +0,0 @@
-# Dashboard feature overview
-
-!!! caution alert alert-warning "Important"
- The content for this topic is under development.
diff --git a/documentation/docs/reference/ui/dashboards-panels/use-dashboards/dashboard-settings.md b/documentation/docs/reference/ui/dashboards-panels/use-dashboards/dashboard-settings.md
deleted file mode 100644
index 73f8e95438..0000000000
--- a/documentation/docs/reference/ui/dashboards-panels/use-dashboards/dashboard-settings.md
+++ /dev/null
@@ -1,5 +0,0 @@
-# Dashboard settings
-
-
-!!! caution alert alert-warning "Important"
- The content for this topic is under development.
\ No newline at end of file
diff --git a/documentation/docs/reference/ui/dashboards-panels/use-dashboards/dashboard-time-range.md b/documentation/docs/reference/ui/dashboards-panels/use-dashboards/dashboard-time-range.md
deleted file mode 100644
index fdb15eb88c..0000000000
--- a/documentation/docs/reference/ui/dashboards-panels/use-dashboards/dashboard-time-range.md
+++ /dev/null
@@ -1,7 +0,0 @@
-# Set dashboard time range
-
-## Time units and relative ranges
-
-
-!!! caution alert alert-warning "Important"
- The content for this topic is under development.
\ No newline at end of file
diff --git a/documentation/docs/reference/ui/ui_components.md b/documentation/docs/reference/ui/ui_components.md
index 8edbc1c55f..c2b8bc018c 100644
--- a/documentation/docs/reference/ui/ui_components.md
+++ b/documentation/docs/reference/ui/ui_components.md
@@ -16,16 +16,16 @@ The main menu is part of the Grafana framework and is visible on every page.
| | Dashboards | Create dashboards or [folders][Folders], manage dashboards, import dashboards, create playlists, manage snapshots.
| | Explore | Run queries with [PromQL].
| | Operating System (OS) | Operating System dashboard
-|    | Service Type dashboards | Navigate to the dashboards available for the [services added for monitoring](../../install-pmm/install-pmm-client/connect-database/index.md) (MySQL, MongoDB, PostgreSQL, HAproxy or ProxySQL).
-| | Query Analytics (QAN) | Navigate to the Query Analytics dashboard where you can analyze database queries over time, optimize database performance, and identify the source of problems.
+| :simple-mysql: :simple-mongodb: :simple-postgresql: | Service Type dashboards | Navigate to the dashboards available for the [services added for monitoring](../../install-pmm/install-pmm-client/connect-database/index.md) (MySQL, MongoDB, PostgreSQL, HAproxy or ProxySQL).
+| | Query Analytics (QAN) | Navigate to the Query Analytics dashboard where you can analyze database queries over time, optimize database performance, and identify the source of problems.
| | Alerting | [Alerting](../../alert/index.md), Create new alerts and manage your alert rules and alert templates.
-| | Advisors | Run health assessment checks against your connected databases and check any failed checks.
-| | Backup | [Backup management and storage location configuration][BACKUP]. The Backup icon appears when **Backup Management** is activated in **PMM Configuration > Settings > Advanced Settings**.
-| | Connections | Access Grafana's built-in data sources within PMM to seamlessly integrate and visualize data from various systems like Prometheus, MySQL, PostgreSQL, InfluxDB, and Elasticsearch.
-| | PMM Configuration|| Hosts all PMM-related configuration and inventory options. |
+| | Advisors | Run health assessment checks against your connected databases and check any failed checks.
+| | Backup | [Backup management and storage location configuration][BACKUP]. The Backup icon appears when **Backup Management** is activated in :material-cog: **PMM Configuration** > :material-cog-outline: **Settings** > **Advanced Settings**.
+| :material-cog: | Connections | Access Grafana's built-in data sources within PMM to seamlessly integrate and visualize data from various systems like Prometheus, MySQL, PostgreSQL, InfluxDB, and Elasticsearch.
+| :material-cog: | PMM Configuration|| Hosts all PMM-related configuration and inventory options. |
| | Administration |Hosts all Grafana-related configuration and inventory options.
| | Entitlements |This tab is displayed after connecting PMM to Percona Portal, and shows all your Percona Platform account information.
-| | List of tickets opened by Customer Support | Shows the list of tickets opened across your organization. This tab is only available after you connect PMM to Percona Platform.
+| | List of tickets opened by Customer Support | Shows the list of tickets opened across your organization. This tab is only available after you connect PMM to Percona Portal.
| | Environment Overview | This tab is displayed after connecting PMM to Percona Portal. Shows the name and email of the Customer Success Manager assigned to your organization, who can help with any PMM queries. This tab will soon be populated with more useful information about your PMM environment.
diff --git a/documentation/docs/release-notes/3.0.0_Beta.md b/documentation/docs/release-notes/3.0.0.md
similarity index 65%
rename from documentation/docs/release-notes/3.0.0_Beta.md
rename to documentation/docs/release-notes/3.0.0.md
index c8183f66ea..3a14afec27 100644
--- a/documentation/docs/release-notes/3.0.0_Beta.md
+++ b/documentation/docs/release-notes/3.0.0.md
@@ -1,14 +1,35 @@
-# Percona Monitoring and Management 3.0.0 Beta
+# Percona Monitoring and Management 3.0.0
-| **Release date** | November 22nd, 2024 |
-| ----------------- | ----------------------------------------------------------------------------------------------- |
+| **Release date** | January 30th, 2025 |
+| ----------------- | :---------------------------------------------------------------------------------------------- |
| **Installation** | [Installing Percona Monitoring and Management](../quickstart/index.md) |
+| **Upgrade**| [Migrate PMM 2 to PMM 3](../pmm-upgrade/migrating_from_pmm_2.md)
+
Percona Monitoring and Management (PMM) is an open source database monitoring, management, and observability solution for MySQL, PostgreSQL, and MongoDB.
-It enables you to observe the health of your database systems, explore new patterns in their behavior, troubleshoot them and execute database management operations—regardless of whether your databases are located on-premises or in the cloud.
+PMM empowers you to:
+
+- monitor the health and performance of your database systems
+- identify patterns and trends in database behavior
+- troubleshoot issues effectively
+- execute database management tasks seamlessly, whether your databases are on-premises or in the cloud
+
+## General availability of PMM 3.0.0
+
+We're excited to announce the General Availability of PMM 3.0.0, a new major version that introduces a complete architectural overhaul of PMM.
-PMM 3.0.0 Beta introduces major security improvements with rootless deployments, encryption of sensitive data, enhanced stability through containerized architecture, and improved user experience with flexible monitoring configurations. Key changes include official ARM support, MongoDB 8.0 monitoring, and a streamlined upgrade process.
+This milestone delivers major security improvements with rootless deployments, encryption of sensitive data, enhanced stability through containerized architecture, and improved user experience with flexible monitoring configurations. Key changes include official ARM support, MongoDB 8.0 monitoring, and a streamlined upgrade process.
+
+## Release summary
+- [Security enhancements](#security-enhancements)
+- [Enhanced stability](#enhanced-stability)
+- [Improved user experience](#improved-user-experience)
+- [Monitoring improvements](#monitoring-improvements)
+- [Breaking API changes](#breaking-api-changes)
+- [Component upgrades](#component-upgrades)
+- [Improvements](#improvements)
+- [Fixed issues](#fixed-issues)
## Security enhancements
@@ -20,7 +41,7 @@ PMM Server now supports rootless deployment through multiple methods, including
This rootless setup enhances security by eliminating the need for root privileges to create, run, and manage containers. By running PMM Server as a non-root user, you avoid granting root permissions on the host system, providing an additional layer of protection against potential vulnerabilities and security breaches.
-For instructions on deploying rootless PMM, check the [Setting up PMM Server](https://docs.percona.com/percona-monitoring-and-management/setting-up/index.html#set-up-pmm-server) topic.
+For instructions on deploying rootless PMM, check the [Setting up PMM Server](../install-pmm/install-pmm-server/index.md) topic.
### UI-based upgrades for Podman installations
@@ -28,30 +49,42 @@ You can now upgrade PMM Server installations running under Podman directly throu
This functionality integrates Watchtower for automated container updates and requires configuration of new environment variables (`PMM_WATCHTOWER_HOST`, `PMM_WATCHTOWER_TOKEN`) as well as relevant systemd service settings.
-For detailed configuration instructions, see [Installation with UI updates](../install-pmm/install-pmm-server/baremetal/podman/index.md).
+For detailed configuration instructions, see [Installation with UI updates](../install-pmm/install-pmm-server/deployment-options/podman/index.md).
### Encryption of sensitive data
Plaintext passwords and credentials are among the top ten security risks identified by OWASP (Open Web Application Security Project).
-To address this risk, PMM now encrypts all sensitive information stored in its database. This includes usernames, passwords, AWS keys, Azure credentials, and TLS/SSL certificates, significantly enhancing the security of your monitoring environment. Even though we recommend minimal privileges for monitoring user accounts, you can rest assured that the sensitive data is protected!
+To address this risk, PMM now encrypts all sensitive information stored in its database. This includes usernames, passwords, AWS keys, Azure credentials, and TLS/SSL certificates, significantly enhancing the security of your monitoring environment. Even though we recommend minimal privileges for monitoring user accounts, you can rest assured that the sensitive data is protected!
By default, PMM generates an encryption key and stores it at `/srv/pmm-encryption.key`. Alternatively, you can define a custom path for the encryption key using the new environment variable `PMM_ENCRYPTION_KEY_PATH`.
When upgrading to PMM 3, any existing unencrypted PMM 2 data will be encrypted automatically.
-For more information, see [PMM data encryption](../pmm-admin/security/data_encryption.md).
+For more information, see [PMM data encryption](../admin/security/data_encryption.md).
### Enhanced API authentication with Grafana service accounts
We've made a significant enhancement to the way API authentication is handled. PMM no longer relies on API keys as the primary method for controlling access to the PMM Server components and resources. Instead, PMM now leverages Grafana service accounts, which provide a more secure and manageable authentication mechanism compared to API keys.
-The transition to service accounts brings fine-grained access control and logging of all actions performed, providing more security, better visibility and auditing capabilities.
+Service accounts provide better security through fine-grained access control while maintaining detailed audit logs of all actions. This includes comprehensive tracking of elevated privilege operations and improved visibility into access patterns - continuing the logging capabilities previously available with API keys.
+
+#### Migration from API keys
+With this change, API keys are now deprecated and will be automatically converted to service accounts. Here's what happens when upgrading from PMM 2 to PMM 3:
-When you install PMM 3, any existing API keys will be seamlessly converted to service accounts with corresponding service tokens. For more information about using service accounts in PMM, see [Service account authentication](../api/authentication.md).
+ - your existing API keys remain functional but are moved to service accounts
+ - the conversion happens when you first log in as an Admin user or via CLI
+ - your integrations will continue working with the same API key credentials (hash values remain unchanged)
+ - the keys will appear under **Administration > Users and Access > Service Accounts**
+
+For more information see [Migrate to PMM 2 to PMM 3](../pmm-upgrade/migrating_from_pmm_2.md#step-3-migrate-pmm-2-clients-to-pmm-3) and [Service account authentication](../api/authentication.md).

+#### CVEs eliminated through architectural changes
+
+The removal of legacy components like Integrated Alerting and DBaaS, combined with the security enhancements above, eliminates multiple potential vulnerabilities and CVEs. This makes PMM 3 our most secure release yet.
+
## Enhanced stability
### Containerized PMM architecture for AMI and OVF deployments
@@ -74,13 +107,16 @@ At the same time, we're maintaining the UI upgrade option by integrating [Watcht
For easy adoption, Watchtower comes pre-configured in our [Easy-Install script](../quickstart.md), enabling one-step PMM setup.
-## Improved user experience
+### Reduced PMM container image size
+We've optimized the PMM 3 container image from 3.1GB to 2.6GB, reducing its size by 0.5GB. This reduction improves download speeds and enhances deployment reliability, particularly beneficial for environments with limited storage capacity or bandwidth constraints.
+
+## Improved user experience
### New upgrade UI
-We’ve introduced a new **Updates** page under **PMM Configuration** to support the new container-based upgrade system. This centralized interface offers clear visibility into the versions and configurations of both your PMM Server and Clients, simplifying the update process.
+We've introduced a new **Updates** page under **PMM Configuration** to support the new container-based upgrade system. This centralized interface offers clear visibility into the versions and configurations of both your PMM Server and Clients, simplifying the update process.
-With this update, you’ll receive proactive notifications whenever new versions are release to help you make informed decisions before proceeding with available upgrades.
+With this update, you'll receive proactive notifications whenever new versions are released to help you make informed decisions before proceeding with available upgrades.

@@ -104,11 +140,11 @@ We've simplified AWS installations to match our standard Docker/Podman workflow.
Use **admin** as the username and your EC2 Instance ID as the password (the default PMM password cannot be used for security reasons).
-You can find your Instance ID in the AWS Console. For detailed instructions, see [Install PMM Server on AWS Marketplace](../install-pmm/install-pmm-server/aws/aws.md).
+You can find your Instance ID in the AWS Console. For detailed instructions, see [Install PMM Server on AWS Marketplace](../install-pmm/install-pmm-server/deployment-options/aws/aws.md).
### Official ARM support for PMM Client
-PMM 3 Beta now officially supports ARM architecture, upgrading from its experimental status in PMM 2.43. This means you can reliably monitor databases on ARM platforms, taking advantage of their cost-effective infrastructure and energy efficiency in data centers and cloud environments.
+PMM 3 now officially supports ARM architecture, upgrading from its experimental status in PMM 2.43. This means you can reliably monitor databases on ARM platforms, taking advantage of their cost-effective infrastructure and energy efficiency in data centers and cloud environments.
Installation follows the standard PMM Client process, with no special requirements for ARM systems.
@@ -116,9 +152,9 @@ Try out this feature and share your experience on the [PMM forum](https://forums
### Improved UX with Grafana's latest release
-PMM now integrates Grafana 11.1.8, which delivers the following important enhancements alongside all the advancements introduced since the previous Grafana 9.2.20 integration in PMM2.
+PMM now integrates Grafana 11.1.8, which delivers the following important enhancements alongside all the advancements introduced since the previous Grafana 9.2.20 integration in PMM 2.
-For the full list of Grafana changes included with this update, see [Grafana’s 11.1.8 changelog](https://community.grafana.com/t/changelog-updates-in-grafana-11-1-8/134843) and [Grafana release highlights](https://grafana.com/docs/grafana/latest/whatsnew/).
+For the full list of Grafana changes included with this update, see [Grafana's 11.1.8 changelog](https://community.grafana.com/t/changelog-updates-in-grafana-11-1-8/134843) and [Grafana release highlights](https://grafana.com/docs/grafana/latest/whatsnew/).
#### Improved navigation
@@ -150,14 +186,30 @@ PMM now provides full monitoring support for the default `postgres` database on
This enhancement resolves a previous visibility gap where database activity was hidden when applications used the default database.
-While using the default database for applications is not recommended, PMM v3 ensures comprehensive visibility, empowering teams to identify and address this practice proactively and maintain better database management.
+While using the default database for applications is not recommended, PMM 3 ensures comprehensive visibility, empowering teams to identify and address this practice proactively and maintain better database management.
+
+### Added Oplog generation rate panel to MongoDB dashboards
+
+The **MongoDB Sharded Cluster Summary** and **MongoDB ReplSet Summary** dashboards now include an **Oplog GB/Hour** panel showing the oplog generation rate per hour in a column format.
+
+The panel is located in the **Replication** section, helping you monitor oplog generation alongside other replication metrics for better visibility into your database replication patterns:
+
+
+
+### General availability of MongoDB Router Summary dashboard
+
+The [MongoDB Router Summary](../reference/dashboards/dashboard-mongodb-router-summary.md), initially introduced as an experimental dashboard in PMM 2, is now generally available in PMM 3. This dashboard provides comprehensive monitoring for MongoS routers in sharded MongoDB clusters, offering insights into MongoS availability, version details and resource utilization.
+
+You can access this dashboard at **MongoDB > High availability > Router summary**.
+
+
### [Tech Preview] Support for PSMDB and Community MongoDB 8.0
The latest version of MongoDB, along with [Percona Server for MongoDB 8.0](https://www.percona.com/software/mongodb/percona-server-for-mongodb), brings [numerous improvements and significant performance enhancements](https://www.mongodb.com/docs/manual/release-notes/8.0/).
In this version of PMM, we are also adding support for MongoDB 8, allowing MongoDB users to monitor their new version and observe its performance impact.
-This includes updates to `mongodb_exporter` to accommodate PSMDB 8.0’s revised metrics structure and renamed metrics (e.g., `wiredTiger.concurrentTransactions` is now `queues.execution`).
+This includes updates to `mongodb_exporter` to accommodate PSMDB 8.0's revised metrics structure and renamed metrics (e.g., `wiredTiger.concurrentTransactions` is now `queues.execution`).
This enhances monitoring, particularly for sharded cluster deployments, and requires PMM Agent version 2.43.1 or later.
@@ -184,20 +236,29 @@ We began this transition from CentOS 7 to EL9 with the latest PMM 2 releases, an
By moving to EL9, we ensure that PMM is built on most recent library versions and stays compatible with new technologies. Moreover, EL9 grants access to faster upstream responses to issues, particularly those concerning security, so that your PMM setup remains up-to-date and secure.
-Due to this change, PMM 3 cannot be started on host servers running EL7.
+**Due to this change, PMM 3 cannot be started on host servers running EL7**.
### Finalized DBaaS migration to Percona Everest
-In previous PMM releases, the Database as a Service (DBaaS) functionality has been gradually transferred to Percona Everest, an open source cloud-native database platform that solves the challenge of public cloud DBaaS vendor lock-in.
+In previous PMM releases, the Database as a Service (DBaaS) functionality has been gradually transferred to [Percona Everest](https://www.percona.com/resources/percona-everest), an open source cloud-native database platform that solves the challenge of public cloud DBaaS vendor lock-in.
With Percona Everest, you gain the ability to provision and oversee highly performant database clusters on the infrastructure you manage, whether it's your preferred cloud environment or on-premises. This empowerment extends to regaining control over critical aspects such as data access, database configuration, and the costs associated with cloud-based database operations.
While PMM 2.x versions continue to support existing DBaaS functionality, PMM 3 marks the complete deprecation of this feature, removing all references to DBaaS.
-If you are an existing PMM user who relies on DBaaS functionality, we encourage you to explore [Percona Everest](https://www.percona.com/resources/percona-everest) and leverage its advanced features for database deployment. Percona Everest also integrates with PMM to provide monitoring capabilities for your database infrastructure.
+If you are an existing PMM user who relies on DBaaS functionality, we encourage you to explore Percona Everest and leverage its advanced features for database deployment. Percona Everest also integrates with PMM to provide monitoring capabilities for your database infrastructure.
To learn more about integrating Percona Everest with PMM and adding monitoring endpoints, see [Add monitoring endpoints in the Everest documentation](https://docs.percona.com/everest/use/monitor_endpoints.html).
+### Finalized Integrated Alerting deprecation and API removal
+
+This release completes the deprecation of Integrated Alerting started in PMM 2.31.0 by removing its remaining components and APIs:
+
+- Removed all Integrated Alerting API endpoints, including `/v1/Settings/TestEmailAlertingSettings`
+- Removed Integrated Alerting-related fields from the PMM Settings API (`email_alerting_settings` and `slack_alerting_settings`)
+
+If you still have alert rules that haven't been migrated to Percona Alerting, use the [Integrated Alerting Migration Script](https://github.com/percona/pmm/blob/main/ia_migration.py) to migrate them. [Percona Alerting](../alert/index.md) provides enhanced capabilities through Grafana's alerting infrastructure and pre-configured Alert Rule Templates.
+
### Breaking API changes
This release introduces major breaking API changes:
@@ -208,17 +269,17 @@ This release introduces major breaking API changes:
- Service, node, and agent management has been streamlined through consolidated endpoints where the resource type is specified as a top-level property in the request payload.
- Low-level Inventory API sections have been removed from documentation in favor of the Management API for inventory-related tasks.
-For detailed information about all these API changes and new endpoints, see the [PMM API documentation](https://percona-pmm.readme.io/v3/reference/release-notes-3-0-0-beta).
+For detailed information about all these API changes and new endpoints, see the [PMM API documentation](https://percona-pmm.readme.io/v3/reference/release-notes-3-0-0).
### New upgrade environment variables
-When migrating from PMM v2 to PMM v3, you’ll need to update your environment variables to match the new naming convention. This is because PMM v3 introduces several important changes to improve consistency and clarity:
+When migrating from PMM 2 to PMM 3, you’ll need to update your environment variables to match the new naming convention. This is because PMM 3 introduces several important changes to improve consistency and clarity:
- environment variables now use PMM_ prefix
- some boolean flags reversed (e.g., `DISABLE_` > `ENABLE_`)
- removed deprecated variables
-To check the Migration reference table, see [Environment variables in PMM](../install-pmm/install-pmm-server/baremetal/docker/env_var.md##variables-for-migrating-from-pmm-v2-to-pmm-v3).
+To check the Migration reference table, see [Environment variables in PMM](../install-pmm/install-pmm-server/deployment-options/docker/env_var.md#variables-for-migrating-from-pmm-v2-to-pmm-v3).
### Grafana Angular support discontinuation
@@ -230,15 +291,16 @@ For the full list of affected plugins and guidance on migration, see [Grafana's
We will provide regular updates on our migration progress in future releases to help you prepare for this change and modernize your dashboards.
-## Components upgrade
+## Component upgrades
We've upgraded following PMM components to their latest stable versions to enhance functionality, security, and performance:
-- **Grafana 11.1.8**: Includes significant improvements over the previous version 9.2.20 integration in PMM2.
+- **Grafana 11.1.8**: Includes significant improvements over the previous version 9.2.20 integration in PMM 2.
- **Node Exporter 1.8.2**: The latest stable release enhances system metrics collection with improved security, additional metrics for custom dashboards, and critical bug fixes. This version strengthens our ability to monitor crucial system-level metrics through upstream improvements.
- **ClickHouse Datasource plugin**: Updated to address security vulnerabilities and maintain system integrity. This update ensures continued reliable operation of ClickHouse-related dashboards.
- **ClickHouse-go driver**: Upgraded QAN to use version 2 of the driver, improving database connectivity and performance.
+
## Improvements
- [PMM-13399](https://perconadev.atlassian.net/browse/PMM-13399) - PMM Client packages (DEB, RPM, and tarball) now include the Nomad binary, laying the foundation for expanded functionality in future PMM releases.
@@ -260,9 +322,20 @@ While the Nomad binary is now included and properly configured within the PMM Cl
- [PMM-12940](https://perconadev.atlassian.net/browse/PMM-12940) - We've added automated update support for AMI/OVF deployments. The new **Updates** page also enables AMI and OVF deployments to update PMM Server directly from the UI, following the integration of the Watchtower container.
- [PMM-11216](https://perconadev.atlassian.net/browse/PMM-11216) - Added ability to upgrade PMM Server between different version tags, enabling more flexible version management for Docker-based deployments.
-
## Fixed issues
- [PMM-13122](https://perconadev.atlassian.net/browse/PMM-13122) - Fixed navigation between pages to properly maintain selected service names and timeframes when switching between different dashboards and metrics views.
-- [PMM-12013](https://perconadev.atlassian.net/browse/PMM-12013) - Fixed reliability and memory usage issues with RDS monitoring in large deployments by running separate RDS exporters per AWS access key. This improves metric collection stability and reduces memory consumption when monitoring multiple RDS instances.
\ No newline at end of file
+- [PMM-12013](https://perconadev.atlassian.net/browse/PMM-12013) - Fixed reliability and memory usage issues with RDS monitoring in large deployments by running separate RDS exporters per AWS access key. This improves metric collection stability and reduces memory consumption when monitoring multiple RDS instances.
+
+- [PMM-13360](https://perconadev.atlassian.net/browse/PMM-13360) - Fixed an issue in the MongoDB ReplSet Summary dashboard where nodes in `down` state would sometimes disappear from the **States** panel and their version information would be removed from the MongoDB Versions panel. Nodes in `down` state now remain visible with their last known version information preserved.
+
+- [PMM-13584](https://perconadev.atlassian.net/browse/PMM-13584) - Fixed an issue in the MongoDB ReplSet Summary dashboard where the bottom graphs displayed "no data" due to incorrect metric query syntax.
+
+
+!!! seealso alert alert-info "Ready to install or migrate to PMM 3?"
+
+ We provide two installation scripts to help you get started with this new version:
+
+ - For new installations, the [Easy-Install script](../quickstart/index.md) comes with Watchtower pre-configured, enabling one-step PMM setup with automatic updates.
+ - For existing PMM 2 users, we provide a dedicated [Upgrade script](../pmm-upgrade/migrating_from_pmm_2.md) that safely migrates your installation to PMM 3 and ensures data is backed up before the upgrade.
diff --git a/documentation/docs/release-notes/3.0.0_1.md b/documentation/docs/release-notes/3.0.0_1.md
new file mode 100644
index 0000000000..0281cdf505
--- /dev/null
+++ b/documentation/docs/release-notes/3.0.0_1.md
@@ -0,0 +1,161 @@
+# Percona Monitoring and Management 3.0.0-1
+
+**Release date**: February 10th, 2025
+
+Percona Monitoring and Management (PMM) is an open-source database monitoring, management, and observability solution for MySQL, PostgreSQL, and MongoDB.
+
+It provides tools to observe database health, analyze performance trends, troubleshoot issues, and execute database management tasks—whether your databases are on-premises or in the cloud.
+
+
+## SEVERITY: CRITICAL - IMMEDIATE ACTION REQUIRED
+
+A critical vulnerability has been identified in PMM Open Virtual Appliance (OVA) installations that enables unauthorized `root` access and potential exposure of system credentials. This vulnerability is tracked as [CVE-2025-26701](https://cve.mitre.org/cgi-bin/cvename.cgi?name=2025-26701).
+
+### Immediate actions required
+Your system may be exposed to unauthorized root access and credential theft. Take the following steps immediately to secure your infrastructure:
+{ .power-number }
+
+1. UPGRADE IMMEDIATELY to PMM 3.0.0-1 (strongly recommended).
+2. CHANGE ALL CREDENTIALS for connected services and databases.
+3. AUDIT ACCESS LOGS for potential unauthorized access.
+
+### Vulnerability details
+This vulnerability stems from default service account credentials in OVA provisioning that enables:
+
+ - unauthorized SSH access
+ - privilege escalation to `root` via `sudo` capabilities
+ - potential exposure of service credentials and configurations
+
+### Affected installations
+The table below lists currently known affected deployments. We will update it if additional products are identified:
+
+| Affected deployments | Version | Notes |
+|-------------------------------------------|---------|-------|
+| PMM Open Virtual Appliance (OVA) installations | ≥ 2.38 | |
+
+
+!!! warning "Restrict SSH access"
+ PMM OVA installations should never have port 22 exposed to the public Internet unless you have implemented additional security hardening measures. Always use firewalls, VPNs, or other secure remote access methods.
+
+### Mitigation options
+
+
+=== "PREFERRED: Upgrade to PMM 3.0.0-1"
+ This release enhances security in OVA deployments by automatically removing unnecessary system accounts during the initial setup.
+
+ To secure your system, follow these steps to upgrade:
+ { .power-number }
+
+ 1. Download and deploy the new OVA file from [Percona website](https://www.percona.com/downloads).
+
+ 2. Log in to your system:
+ ```sh
+ ssh admin@your-pmm-server
+ ```
+
+ 3. Switch to `root` or use `sudo` for the following commands:
+ ```sh
+ sudo -i
+ # or use sudo before each command
+ ```
+
+ 4. Stop services on your current installation:
+ ```sh
+ supervisorctl stop all
+ ```
+
+ 5. Back up and transfer your data:
+ ```sh
+ cd /home/admin/volume/srv
+ tar -cvf srv.tar .
+ ```
+ 6. Transfer srv.tar to new server via scp.
+
+ 7. Deploy data on the new installation:
+ ```sh
+ # Stop all services
+ supervisorctl stop all
+
+ # Clear existing data
+ cd /home/admin/volume/srv
+ rm -rf *
+
+ # Extract backup
+ tar -xvf /home/admin/volume/srv.tar
+
+ # Start all services
+ supervisorctl start all
+ ```
+
+ 8. Update DNS records or swap IP addresses to ensure uninterrupted Client connectivity.
+
+ #### Verification steps
+
+ After upgrading, verify that your system is functioning correctly:
+ { .power-number }
+
+ 1. Check service status to confirm both PMM Client and PMM Server are running:
+ ```sh
+ supervisorctl status
+ ```
+ 2. Ensure Client connectivity to validate data flow.
+
+ 3. Test system functionality by performing basic monitoring tasks.
+
+=== "TEMPORARY: If you cannot upgrade immediately"
+ If an upgrade is not immediately possible, follow these steps to mitigate the vulnerability:
+ { .power-number }
+
+ 1. Secure SSH access:
+
+ - block port 22 access at firewall level
+ - if remote access is required, restrict it to specific IP addresses
+ - consider using a VPN for remote management
+
+ 2. Log in to your system:
+ ```sh
+ ssh admin@your-pmm-server
+ ```
+
+ 3. Switch to `root` or use `sudo` for the following commands.
+ ```sh
+ sudo -i
+ # or use sudo before each command
+ ```
+ 4. Execute ONE of these commands to secure the vulnerable account:
+
+ - disable login:
+ ```sh
+ usermod -s /sbin/nologin vagrant
+ ```
+
+ - lock account:
+ ```sh
+ passwd -l vagrant
+ ```
+
+ - remove user completely:
+ ```sh
+ kill -9 $(pgrep -f vagrant)
+ userdel -r vagrant
+ ```
+
+ 5. Update service credentials:
+
+ - change monitoring user passwords in your databases (MySQL, PostgreSQL, MongoDB)
+ - update any custom service accounts you've created
+ - rotate authentication tokens for monitored services
+ - update corresponding credentials in PMM configuration
+ - configure SSH access: add public key via **PMM Configuration > Settings > SSH Key**
+
+ 6. Monitor system logs for unauthorized access.
+
+### Support & additional resources
+If you require further clarification or assistance, we are available to assist you 24/7:
+
+ - [Technical support portal for customers](https://my.percona.com)
+ - [Technical support for community](https://forums.percona.com/c/percona-monitoring-and-management-pmm)
+ - [Security advisory announcement](https://www.percona.com/blog/security-advisory-cve-affecting-percona-monitoring-and-management-pmm/)
+
+### Upgrading from PMM 2.x?
+If you are running PMM 2.38 or later, make sure to upgrade to PMM 2.44.0-1. For upgrade instructions, see the [PMM 2.44.0-1 Release Notes](https://docs.percona.com/percona-monitoring-and-management/2/release-notes/2.44.0.1.html) for specific upgrade instructions.
\ No newline at end of file
diff --git a/documentation/docs/release-notes/index.md b/documentation/docs/release-notes/index.md
index 7b0478b76d..1e9a3a4b8b 100644
--- a/documentation/docs/release-notes/index.md
+++ b/documentation/docs/release-notes/index.md
@@ -1,2 +1,3 @@
# Release Notes
-- [Percona Monitoring and Management 3.0.0 Beta](3.0.0_Beta.md)
\ No newline at end of file
+- [Percona Monitoring and Management 3.0.0-1](3.0.0_1.md)
+- [Percona Monitoring and Management 3.0.0](3.0.0.md)
\ No newline at end of file
diff --git a/documentation/docs/trademark-policy.md b/documentation/docs/trademark-policy.md
deleted file mode 100644
index 431e3dcd9b..0000000000
--- a/documentation/docs/trademark-policy.md
+++ /dev/null
@@ -1,64 +0,0 @@
-# Trademark policy
-
-This [Trademark Policy](https://www.percona.com/trademark-policy) is to ensure that users of Percona-branded products or
-services know that what they receive has really been developed, approved,
-tested and maintained by Percona. Trademarks help to prevent confusion in the
-marketplace, by distinguishing one company’s or person’s products and services
-from another’s.
-
-Percona owns a number of marks, including but not limited to Percona, XtraDB,
-Percona XtraDB, XtraBackup, Percona XtraBackup, Percona Server, and Percona
-Live, plus the distinctive visual icons and logos associated with these marks.
-Both the unregistered and registered marks of Percona are protected.
-
-Use of any Percona trademark in the name, URL, or other identifying
-characteristic of any product, service, website, or other use is not permitted
-without Percona’s written permission with the following three limited
-exceptions.
-
-*First*, you may use the appropriate Percona mark when making a nominative fair
-use reference to a bona fide Percona product.
-
-*Second*, when Percona has released a product under a version of the GNU
-General Public License (“GPL”), you may use the appropriate Percona mark when
-distributing a verbatim copy of that product in accordance with the terms and
-conditions of the GPL.
-
-*Third*, you may use the appropriate Percona mark to refer to a distribution of
-GPL-released Percona software that has been modified with minor changes for
-the sole purpose of allowing the software to operate on an operating system or
-hardware platform for which Percona has not yet released the software, provided
-that those third party changes do not affect the behavior, functionality,
-features, design or performance of the software. Users who acquire this
-Percona-branded software receive substantially exact implementations of the
-Percona software.
-
-Percona reserves the right to revoke this authorization at any time in its sole
-discretion. For example, if Percona believes that your modification is beyond
-the scope of the limited license granted in this Policy or that your use of the
-Percona mark is detrimental to Percona, Percona will revoke this authorization.
-Upon revocation, you must immediately cease using the applicable Percona mark.
-If you do not immediately cease using the Percona mark upon revocation, Percona
-may take action to protect its rights and interests in the Percona mark.
-Percona does not grant any license to use any Percona mark for any other
-modified versions of Percona software; such use will require our prior written
-permission.
-
-Neither trademark law nor any of the exceptions set forth in this Trademark
-Policy permit you to truncate, modify or otherwise use any Percona mark as part
-of your own brand. For example, if XYZ creates a modified version of the
-Percona Server, XYZ may not brand that modification as “XYZ Percona Server” or
-“Percona XYZ Server”, even if that modification otherwise complies with the
-third exception noted above.
-
-In all cases, you must comply with applicable law, the underlying license, and
-this Trademark Policy, as amended from time to time. For instance, any mention
-of Percona trademarks should include the full trademarked name, with proper
-spelling and capitalization, along with attribution of ownership to Percona
-Inc. For example, the full proper name for XtraBackup is Percona XtraBackup.
-However, it is acceptable to omit the word “Percona” for brevity on the second
-and subsequent uses, where such omission does not cause confusion.
-
-In the event of doubt as to any of the conditions or exceptions outlined in
-this Trademark Policy, please contact [trademarks@percona.com](mailto:trademarks@percona.com) for assistance and
-we will do our very best to be helpful.
diff --git a/documentation/docs/troubleshoot/checklist.md b/documentation/docs/troubleshoot/checklist.md
index d1475a1188..a75ac5d94b 100644
--- a/documentation/docs/troubleshoot/checklist.md
+++ b/documentation/docs/troubleshoot/checklist.md
@@ -16,7 +16,7 @@ The following questions might help you identify the origin of the problem while
9. Are you using a Technical Preview feature? Technical Preview features are not production-ready and should only be used in testing environments. For more information, see the relevant Release Notes.
10. For installing the PMM client, are you using a package other than a binary package without root permissions?
11. Is your [PMM Server](../install-pmm/install-pmm-server/index.md) installed and running with a known IP address accessible from the client node?
-12. Is the [PMM Client](../install-pmm/install-pmm-client/index.md) installed, and is the node [registered with PMM Server](../setting-up/client/index.md#register)?
+12. Is the [PMM Client](../install-pmm/install-pmm-client/index.md) installed, and is the node [registered with PMM Server](../../../documentation/docs/install-pmm/register-client-node/index.md)?
13. Is PMM Client configured correctly and has access to the config file?
14. For monitoring MongoDB, do you have adminUserAnyDatabase or superuser role privilege to any database servers you want to monitor?
15. For monitoring Amazon RDS using PMM, is there too much latency between PMM Server and the Amazon RDS instance?
diff --git a/documentation/docs/troubleshoot/qan_issues.md b/documentation/docs/troubleshoot/qan_issues.md
index 879bb5e7c6..aa9648ca15 100644
--- a/documentation/docs/troubleshoot/qan_issues.md
+++ b/documentation/docs/troubleshoot/qan_issues.md
@@ -16,4 +16,4 @@ There might be multiple places where the problem might come from:
**Why don't I see the whole query?**
-Long query examples and fingerprints can be truncated to 1024 symbols to reduce space usage. In this case, the query explains section will not work.
\ No newline at end of file
+Long query examples and fingerprints is truncated to 2048 symbols by default to reduce space usage. In this case, the query explains section will not work. Max query size can be configured using flag `--max-query-length` while adding a service.
diff --git a/documentation/docs/use/commands/pmm-admin.md b/documentation/docs/use/commands/pmm-admin.md
index 9c89c102a5..1ea7af2396 100644
--- a/documentation/docs/use/commands/pmm-admin.md
+++ b/documentation/docs/use/commands/pmm-admin.md
@@ -209,8 +209,7 @@ DATABASE:= [[MongoDB](#mongodb) | [MySQL](#mysql) | [PostgreSQL](#postgresql) |
`--force`
: Remove service with that name or ID and all dependent services and agents.
-When you remove a service, collected data remains on PMM Server for the specified [retention period](../../reference/faq.md#retention).
-
+When you remove a service, collected data remains on PMM Server for the specified [retention period](../../reference/faq.md#how-to-control-data-retention--retention-).
#### `pmm-admin annotate`
`pmm-admin annotate [--node|--service] [--tags ] [--node-name=] [--service-name=]`
@@ -446,7 +445,7 @@ Enable all collectors and limit monitoring for `dbstats`, `indexstats`, `collsta
##### Resolutions
-PMM collects metrics in two [resolutions](../../how-to/configure.md#metrics-resolution) to decrease CPU and Memory usage: high and low resolutions.
+PMM collects metrics in two [resolutions](../../configure-pmm/metrics_res.md) to decrease CPU and Memory usage: high and low resolutions.
In high resolution we collect metrics from collectors which work fast:
- `diagnosticdata`
diff --git a/documentation/docs/use/dashboard-inventory.md b/documentation/docs/use/dashboard-inventory.md
index 167da7f01b..b70cda158b 100644
--- a/documentation/docs/use/dashboard-inventory.md
+++ b/documentation/docs/use/dashboard-inventory.md
@@ -2,7 +2,7 @@
The **Inventory** dashboard is a high-level overview of all objects registered in PMM.
-To check your inventory list, go to **Configuration** > **Inventory**.
+To check your inventory list, go to :material-cog: **Configuration** > **Inventory**.

diff --git a/documentation/docs/use/dashboards-panels/index.md b/documentation/docs/use/dashboards-panels/index.md
index ec2e8d6a55..c877426ce0 100644
--- a/documentation/docs/use/dashboards-panels/index.md
+++ b/documentation/docs/use/dashboards-panels/index.md
@@ -71,63 +71,63 @@ Performance Monitoring and Management (PMM) offers a range of dashboards you can
| High-availability | [PXC/Galera Nodes Compare] | 55
| High-availability | [HAProxy Instance Summary] | 113
-[Advanced Data Exploration]: ../../reference/dashboards/dashboard-advanced-data-exploration.md
-[Home Dashboard]: dashboard-home.md
-[DB Cluster Summary]: ../../reference/dashboard-cluster-summary.md
-[Prometheus Exporter Status]: ../../reference/dashboard-prometheus-exporter-status.md
-[Prometheus Exporters Overview]: ../../reference/dashboard-prometheus-exporters-overview.md
-[VictoriaMetrics]: ../../reference/dashboard-victoriametrics.md
-[VictoriaMetrics Agents Overview]: ../../reference/dashboard-victoriametrics-agents-overview.md
-[PMM Inventory]: dashboard-inventory.md
-[Environment Overview]: ../../reference/dashboard-env-overview.md
-[Environment Summary]: ../../reference/dashboard-environent-summary.md
-[CPU Utilization Details]: ../../reference/dashboard-cpu-utilization-details.md
-[Disk Details]: ../../reference/dashboard-disk-details.md
-[Network Details]: ../../reference/dashboard-network-details.md
-[Memory Details]: ../../reference/dashboard-memory-details.md
-[Node Temperature Details]: ../../reference/dashboard-node-temperature-details.md
-[Nodes Compare]: ../../reference/dashboard-nodes-compare.md
-[Nodes Overview]: ../../reference/dashboard-nodes-overview.md
-[Node Summary]: ../../reference/dashboard-node-summary.md
-[NUMA Details]: ../../reference/dashboard-numa-details.md
-[Processes Details]: ../../reference/dashboard-processes-details.md
-[Prometheus Exporter Status]: ../../reference/dashboard-prometheus-exporter-status.md
-[Prometheus Exporters Overview]: ../../reference/dashboard-prometheus-exporters-overview.md
-[MySQL Amazon Aurora Details]: ../../reference/dashboard-mysql-amazon-aurora-details.md
-[MySQL Command/Handler Counters Compare]: ../../reference/dashboard-mysql-command-handler-counters-compare.md
-[MySQL InnoDB Compression Details]: ../../reference/dashboard-mysql-innodb-compression-details.md
-[MySQL InnoDB Details]: ../../reference/dashboard-mysql-innodb-details.md
-[MySQL MyISAM/Aria Details]: ../../reference/dashboard-mysql-myisam-aria-details.md
-[MySQL MyRocks Details]: ../../reference/dashboard-mysql-myrocks-details.md
-[MySQL Instance Summary]: ../../reference/dashboard-mysql-instance-summary.md
-[MySQL Instances Compare]: ../../reference/dashboard-mysql-instances-compare.md
-[MySQL Instances Overview]: ../../reference/dashboard-mysql-instances-overview.md
-[MySQL Wait Event Analyses Details]: ../../reference/dashboard-mysql-wait-event-analyses-details.md
-[MySQL Performance Schema Details]: ../../reference/dashboard-mysql-performance-schema-details.md
-[MySQL Query Response Time Details]: ../../reference/dashboard-mysql-query-response-time-details.md
-[MySQL Replication Summary]: ../../reference/dashboard-mysql-replication-summary.md
-[MySQL Group Replication Summary]: ../../reference/dashboard-mysql-group-replication-summary.md
-[MySQL Table Details]: ../../reference/dashboard-mysql-table-details.md
-[MySQL User Details]: ../../reference/dashboard-mysql-user-details.md
-[MySQL TokuDB Details]: ../../reference/dashboard-mysql-tokudb-details.md
-[Experimental MongoDB Collection Overview]: ../../reference/dashboard-mongodb-experimental_collection_overview.md
-[Experimental MongoDB Collection Details]: ../../reference/dashboard-mongodb-experimental_collection_details.md
-[Experimental MongoDB Oplog Details]: ../../reference/dashboard-mongodb-experimental_oplog.md
-[MongoDB Cluster Summary]: ../../reference/dashboard-mongodb-cluster-summary.md
-[MongoDB Instance Summary]: ../../reference/dashboard-mongodb-instance-summary.md
-[MongoDB Instances Overview]: ../../reference/dashboard-mongodb-instances-overview.md
-[MongoDB Instances Compare]: ../../reference/dashboard-mongodb-instances-compare.md
-[MongoDB ReplSet Summary]: ../../reference/dashboard-mongodb-replset-summary.md
-[MongoDB InMemory Details]: ../../reference/dashboard-mongodb-inmemory-details.md
-[MongoDB MMAPv1 Details]: ../../reference/dashboard-mongodb-mmapv1-details.md
-[MongoDB WiredTiger Details]: ../../reference/dashboard-mongodb-wiredtiger-details.md
+[Advanced Data Exploration]: ../../reference/dashboards/dashboards/dashboard-advanced-data-exploration.md
+[Home Dashboard]: ../../reference/dashboards/dashboards/dashboard-home.md
+[DB Cluster Summary]: ../../reference/dashboards/dashboard-cluster-summary.md
+[Prometheus Exporter Status]: ../../reference/dashboards/dashboards/dashboard-prometheus-exporter-status.md
+[Prometheus Exporters Overview]: ../../reference/dashboards/dashboards/dashboard-prometheus-exporters-overview.md
+[VictoriaMetrics]: ../../reference/dashboards/dashboards/dashboard-victoriametrics.md
+[VictoriaMetrics Agents Overview]: ../../reference/dashboards/dashboards/dashboard-victoriametrics-agents-overview.md
+[PMM Inventory]: ../../use/dashboard-inventory.md
+[Environment Overview]: ../../reference/dashboards/dashboards/dashboard-env-overview.md
+[Environment Summary]: ../../reference/dashboards/dashboards/dashboard-environent-summary.md
+[CPU Utilization Details]: ../../reference/dashboards/dashboards/dashboard-cpu-utilization-details.md
+[Disk Details]: ../../reference/dashboards/dashboards/dashboard-disk-details.md
+[Network Details]: ../../reference/dashboards/dashboards/dashboard-network-details.md
+[Memory Details]: ../../reference/dashboards/dashboards/dashboard-memory-details.md
+[Node Temperature Details]: ../../reference/dashboards/dashboards/dashboard-node-temperature-details.md
+[Nodes Compare]: ../../reference/dashboards/dashboards/dashboard-nodes-compare.md
+[Nodes Overview]: ../../reference/dashboards/dashboards/dashboard-nodes-overview.md
+[Node Summary]: ../../reference/dashboards/dashboards/dashboard-node-summary.md
+[NUMA Details]: ../../reference/dashboards/dashboards/dashboard-numa-details.md
+[Processes Details]: ../../reference/dashboards/dashboards/dashboard-processes-details.md
+[Prometheus Exporter Status]: ../../reference/dashboards/dashboards/dashboard-prometheus-exporter-status.md
+[Prometheus Exporters Overview]: ../../reference/dashboards/dashboards/dashboard-prometheus-exporters-overview.md
+[MySQL Amazon Aurora Details]: ../../reference/dashboards/dashboards/dashboard-mysql-amazon-aurora-details.md
+[MySQL Command/Handler Counters Compare]: ../../reference/dashboards/dashboard-mysql-command-handler-counters-compare.md
+[MySQL InnoDB Compression Details]: ../../reference/dashboards/dashboard-mysql-innodb-compression-details.md
+[MySQL InnoDB Details]: ../../reference/dashboards/dashboard-mysql-innodb-details.md
+[MySQL MyISAM/Aria Details]: ../../reference/dashboards/dashboard-mysql-myisam-aria-details.md
+[MySQL MyRocks Details]: ../../reference/dashboards/dashboard-mysql-myrocks-details.md
+[MySQL Instance Summary]: ../../reference/dashboards/dashboard-mysql-instance-summary.md
+[MySQL Instances Compare]: ../../reference/dashboards/dashboard-mysql-instances-compare.md
+[MySQL Instances Overview]: ../../reference/dashboards/dashboard-mysql-instances-overview.md
+[MySQL Wait Event Analyses Details]: ../../reference/dashboards/dashboard-mysql-wait-event-analyses-details.md
+[MySQL Performance Schema Details]: ../../reference/dashboards/dashboard-mysql-performance-schema-details.md
+[MySQL Query Response Time Details]: ../../reference/dashboards/dashboard-mysql-query-response-time-details.md
+[MySQL Replication Summary]: ../../reference/dashboards/dashboard-mysql-replication-summary.md
+[MySQL Group Replication Summary]: ../../reference/dashboards/dashboard-mysql-group-replication-summary.md
+[MySQL Table Details]: ../../reference/dashboards/dashboard-mysql-table-details.md
+[MySQL User Details]: ../../reference/dashboards/dashboard-mysql-user-details.md
+[MySQL TokuDB Details]: ../../reference/dashboards/dashboard-mysql-tokudb-details.md
+[Experimental MongoDB Collection Overview]: ../../reference/dashboards/dashboard-mongodb-experimental_collection_overview.md
+[Experimental MongoDB Collection Details]: ../../reference/dashboards/dashboard-mongodb-experimental_collection_details.md
+[Experimental MongoDB Oplog Details]: ../../reference/dashboards/dashboard-mongodb-experimental_oplog.md
+[MongoDB Cluster Summary]: ../../reference/dashboards/dashboards/dashboard-mongodb-cluster-summary.md
+[MongoDB Instance Summary]: ../../reference/dashboards/dashboard-mongodb-instance-summary.md
+[MongoDB Instances Overview]: ../../reference/dashboards/dashboard-mongodb-instances-overview.md
+[MongoDB Instances Compare]: ../../reference/dashboards/dashboard-mongodb-instances-compare.md
+[MongoDB ReplSet Summary]: ../../reference/dashboards/dashboard-mongodb-replset-summary.md
+[MongoDB InMemory Details]: ../../reference/dashboards/dashboard-mongodb-inmemory-details.md
+[MongoDB MMAPv1 Details]: ../../reference/dashboards/dashboard-mongodb-mmapv1-details.md
+[MongoDB WiredTiger Details]: ../../reference/dashboards/dashboard-mongodb-wiredtiger-details.md
[Experimental PostgreSQL Vacuum Monitoring]: dashboard-postgresql-vacuum-monitoring-experimental.md
-[PostgreSQL Instances Overview]: ../../reference/dashboard-postgresql-instances-overview.md
-[PostgreSQL Instance Summary]: ../../reference/dashboard-postgresql-instance-summary.md
-[PostgreSQL Instances Compare]: ../../reference/dashboard-postgresql-instances-compare.md
-[ProxySQL Instance Summary]: ../../reference/dashboard-proxysql-instance-summary.md
-[PXC/Galera Node Summary]: ../../reference/dashboard-pxc-galera-node-summary.md
-[PXC/Galera Cluster Summary]: ../../reference/dashboard-pxc-galera-cluster-summary.md
-[Experimental PXC/Galera Cluster Summary]: ../../reference/dashboard-pxc-galera-cluster-summary-experimental.md
-[PXC/Galera Nodes Compare]: ../../reference/dashboard-pxc-galera-nodes-compare.md
-[HAProxy Instance Summary]: ../../reference/dashboard-haproxy-instance-summary.md
\ No newline at end of file
+[PostgreSQL Instances Overview]: ../../reference/dashboards/dashboard-postgresql-instances-overview.md
+[PostgreSQL Instance Summary]: ../../reference/dashboards/dashboard-postgresql-instance-summary.md
+[PostgreSQL Instances Compare]: ../../reference/dashboards/dashboard-postgresql-instances-compare.md
+[ProxySQL Instance Summary]: ../../reference/dashboards/dashboard-proxysql-instance-summary.md
+[PXC/Galera Node Summary]: ../../reference/dashboards/dashboard-pxc-galera-node-summary.md
+[PXC/Galera Cluster Summary]: ../../reference/dashboards/dashboard-pxc-galera-cluster-summary.md
+[Experimental PXC/Galera Cluster Summary]: ../../reference/dashboards/dashboard-pxc-galera-cluster-summary-experimental.md
+[PXC/Galera Nodes Compare]: ../../reference/dashboards/dashboard-pxc-galera-nodes-compare.md
+[HAProxy Instance Summary]: ../../reference/dashboards/dashboard-haproxy-instance-summary.md
\ No newline at end of file
diff --git a/documentation/docs/use/dashboards-panels/manage-dashboards/manage-folders.md b/documentation/docs/use/dashboards-panels/manage-dashboards/manage-folders.md
index 2fcfc90dd0..a215658af3 100644
--- a/documentation/docs/use/dashboards-panels/manage-dashboards/manage-folders.md
+++ b/documentation/docs/use/dashboards-panels/manage-dashboards/manage-folders.md
@@ -23,7 +23,7 @@ The other way of moving dashboards from one folder to another is:
{.power-number}
1. On the **Dashboards** page, click on the dashboard that you want to move to another folder.
-2. Click on Dashboard settings icon at the top of the page.
+2. Click on :material-cog: Dashboard settings icon at the top of the page.
3. On the **General** tab, use the **Folder** drop-down menu to select the new target folder.
4. Click **Save Dashboard** on the the left to save the change.

diff --git a/documentation/docs/use/dashboards-panels/manage-dashboards/set-custom-dashboard.md b/documentation/docs/use/dashboards-panels/manage-dashboards/set-custom-dashboard.md
index e8a819979b..6f54b6d768 100644
--- a/documentation/docs/use/dashboards-panels/manage-dashboards/set-custom-dashboard.md
+++ b/documentation/docs/use/dashboards-panels/manage-dashboards/set-custom-dashboard.md
@@ -4,12 +4,13 @@ The home dashboard you set is the dashboard all the users will see after logging
### Set home dashboard for your organization
-Organization Admins can set the home dashboard for their organization. For information on managing users in an organization, see [Manage Users](../../../how-to/manage-users.md).
+Organization Admins can set the home dashboard for their organization. For information on managing users in an organization, see [Manage Users](../../../admin/manage-users/index.md)
+
{.power-number}
1. From the side menu, go to **Dashboards** and click on the dashboard that you want to set as the home dashboard.
2. Click the star on top of the page to mark the dashboard as a favorite.
-3. From the main menu on the left, go to **Administration > Default preferences**.
+3. From the main menu on the left, go to :material-cog: **Administration > Default preferences**.
4. In the **Home Dashboard** field, select the dashboard that you want to set as your home dashboard.
5. Click **Save**.
@@ -20,7 +21,7 @@ Organization and team Admins can set the home dashboard for their team as follow
1. Navigate to the dashboard that you want to set as your home dashboard.
2. Click star next to the dashboard title to mark the dashboard as a favorite.
-3. From the main menu on the left, go to **Administration > Users and access > Teams**.
+3. From the main menu on the left, go to :material-cog: **Administration > Users and access > Teams**.
4. Click on the team for whom you want to set the home dashboard and then navigate to the **Settings** tab.
5. In the **Home Dashboard** field, select the dashboard that you want to use for your home dashboard.
6. Click **Save**.
\ No newline at end of file
diff --git a/documentation/docs/use/dashboards-panels/share-dashboards/share_dashboard.md b/documentation/docs/use/dashboards-panels/share-dashboards/share_dashboard.md
index 82ae55bc27..6066e94caa 100644
--- a/documentation/docs/use/dashboards-panels/share-dashboards/share_dashboard.md
+++ b/documentation/docs/use/dashboards-panels/share-dashboards/share_dashboard.md
@@ -50,7 +50,7 @@ To enable image rendering:
-p 8443:443 \
-e GF_RENDERING_SERVER_URL=http://renderer:8081/render \
-e GF_RENDERING_CALLBACK_URL=https://pmm-server:8443/graph/ \
- perconalab/pmm-server:3.0.0-beta
+ percona/pmm-server:3
```
### Render panel image
diff --git a/documentation/docs/use/dashboards/dashboard-inventory.md b/documentation/docs/use/dashboards/dashboard-inventory.md
index 1247d1aadb..709972fa02 100644
--- a/documentation/docs/use/dashboards/dashboard-inventory.md
+++ b/documentation/docs/use/dashboards/dashboard-inventory.md
@@ -2,7 +2,7 @@
The **Inventory** dashboard is a high-level overview of all objects registered in PMM.
-To check your inventory list, go to **PMM Configuration** > **PMM Inventory**.
+To check your inventory list, go to :material-cog: **PMM Configuration** > **PMM Inventory**.
Inventory objects form a hierarchy with Node at the top, then Service and Agents assigned to a Node. This information is detailed in the two tabs available on this page.
diff --git a/documentation/docs/use/dashboards/dashboard-manage-dashboards.md b/documentation/docs/use/dashboards/dashboard-manage-dashboards.md
index 21b7b66481..9ef91c6413 100644
--- a/documentation/docs/use/dashboards/dashboard-manage-dashboards.md
+++ b/documentation/docs/use/dashboards/dashboard-manage-dashboards.md
@@ -88,7 +88,7 @@ The Home dashboard you set is the dashboard all the users will see after logging
### For your organization
-Organization Admins can set the home dashboard for their organization. For information on managing users in an organization, see [Manage Users](../../how-to/manage-users.md)
+Organization Admins can set the home dashboard for their organization. For information on managing users in an organization, see [Manage Users](../../admin/manage-users/index.md).
1. Navigate to the dashboard that you want to set as the home dashboard.
2. Click the star next to the dashboard title to mark the dashboard as a favorite.
diff --git a/documentation/docs/use/qan/panels/details.md b/documentation/docs/use/qan/panels/details.md
index 56d866dcd1..32ea4fb07d 100644
--- a/documentation/docs/use/qan/panels/details.md
+++ b/documentation/docs/use/qan/panels/details.md
@@ -33,7 +33,7 @@ For PostgreSQL queries (when using `pg_stat_monitor`) the top query will also be

-Other useful metrics (when using **pg_stat_monitor**) to monitor PostgreSQL Server performance are [Histograms](https://github.com/percona/pg_stat_monitor/blob/master/docs/USER_GUIDE.md#histogram).
+Other useful metrics (when using **pg_stat_monitor**) to monitor PostgreSQL Server performance are [Histograms](https://docs.percona.com/pg-stat-monitor/user_guide.html#histogram).
**Histograms** provide more explicit information about number of queries for fingerprint (`queryid`). Ranges are from 0 seconds up to 100 seconds.
Here is picture of **histogram** in graph:
diff --git a/documentation/docs/use/qan/panels/filters.md b/documentation/docs/use/qan/panels/filters.md
index cc9066cb29..9f2672e54c 100644
--- a/documentation/docs/use/qan/panels/filters.md
+++ b/documentation/docs/use/qan/panels/filters.md
@@ -14,7 +14,7 @@
## Custom filter groups
!!! caution alert alert-warning "Important/Caution"
- This feature is still in [Technical Preview](https://docs.percona.com/percona-monitoring-and-management/details/glossary.html#technical-preview) and is subject to change. We recommend that early adopters use this feature for testing purposes only.
+ This feature is still in [Technical Preview](../../../reference/glossary.md#technical-preview) and is subject to change. We recommend that early adopters use this feature for testing purposes only.
Filter queries using custom key=value pairs from query comments. This feature is disabled by default.
diff --git a/documentation/docs/use/qan/share_link.md b/documentation/docs/use/qan/share_link.md
index da92333b37..10e9affd70 100644
--- a/documentation/docs/use/qan/share_link.md
+++ b/documentation/docs/use/qan/share_link.md
@@ -9,7 +9,3 @@ To share a link for Query Analytics, use **Copy Link**. It copies the link to th

-
-[SLOW_QUERY_LOG]: ../setting-up/client/mysql.md#slow-query-log
-[PERFORMANCE_SCHEMA]: ../setting-up/client/mysql.md#performance-schema
-
diff --git a/documentation/mkdocs-base.yml b/documentation/mkdocs-base.yml
index 4e6275a191..8ea40d852f 100644
--- a/documentation/mkdocs-base.yml
+++ b/documentation/mkdocs-base.yml
@@ -15,7 +15,6 @@ theme:
name: material
custom_dir: overrides
logo: assets/pmm-mark.svg
- favicon: assets/pmm-fav.svg
font:
text: Roboto
code: Roboto Mono
@@ -55,7 +54,7 @@ theme:
- content.action.view
extra_css:
- - https://unicons.iconscout.com/release/v3.0.3/css/line.css
+ - https://unicons.iconscout.com/release/v4.0.8/css/line.css
- https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.4.0/css/font-awesome.min.css
- css/nocopy.css
- css/design.css
@@ -118,7 +117,7 @@ plugins:
# https://pypi.org/project/mkdocs-exclude/
exclude:
glob:
- - "setting-up/client/docker.md"
+ # - "install-pmm/install-pmm-client/docker.md"
mike:
version_selector: true
css_dir: css
@@ -134,79 +133,91 @@ extra:
# Common navigation for percona.com, render.com and PDF
nav:
- Home: index.md
+ - Get help from Percona: get-help.md
- Discover:
- discover-pmm/why-pmm.md
- discover-pmm/features.md
- Quickstart guide: quickstart.md
- Install:
- - install-pmm/index.md
- - Plan your PMM installation:
- - install-pmm/plan-pmm-installation/hardware_and_system.md
- - install-pmm/plan-pmm-installation/network_and_firewall.md
- - Install PMM in HA mode: install-pmm/HA.md
- - Install PMM Server:
- - install-pmm/install-pmm-server/index.md
- - install-pmm/install-pmm-server/prerequisites.md
- - Bare metal:
- - Docker:
- - install-pmm/install-pmm-server/baremetal/docker/easy-install.md
- - install-pmm/install-pmm-server/baremetal/docker/index.md
- - install-pmm/install-pmm-server/baremetal/docker/run_with_vol.md
- - install-pmm/install-pmm-server/baremetal/docker/run_with_data_container.md
- - install-pmm/install-pmm-server/baremetal/docker/run_with_host_dir.md
- - install-pmm/install-pmm-server/baremetal/docker/backup_container.md
- - install-pmm/install-pmm-server/baremetal/docker/restore_container.md
- - install-pmm/install-pmm-server/baremetal/docker/remove_container.md
- - install-pmm/install-pmm-server/baremetal/docker/env_var.md
- - install-pmm/install-pmm-server/baremetal/docker/preview_env_var.md
- - install-pmm/install-pmm-server/baremetal/docker/isolated_hosts.md
- - Podman:
- - install-pmm/install-pmm-server/baremetal/podman/index.md
- - install-pmm/install-pmm-server/baremetal/podman/backup_container_podman.md
- - install-pmm/install-pmm-server/baremetal/podman/restore_container_podman.md
- - install-pmm/install-pmm-server/baremetal/podman/remove_container_podman.md
- - Virtual Applinace:
- - install-pmm/install-pmm-server/baremetal/virtual/index.md
- - install-pmm/install-pmm-server/baremetal/virtual/download_ova.md
- - install-pmm/install-pmm-server/baremetal/virtual/vmware.md
- - install-pmm/install-pmm-server/baremetal/virtual/virtualbox.md
- - install-pmm/install-pmm-server/baremetal/virtual/login_UI.md
- - install-pmm/install-pmm-server/baremetal/virtual/remove_virtual_machine.md
- - Helm:
- - install-pmm/install-pmm-server/baremetal/helm/index.md
- - install-pmm/install-pmm-server/baremetal/helm/backup_container_helm.md
- - install-pmm/install-pmm-server/baremetal/helm/restore_container_helm.md
- - AWS Marketplace:
- - install-pmm/install-pmm-server/aws/aws.md
- - install-pmm/install-pmm-server/aws/limit_access.md
- - install-pmm/install-pmm-server/aws/settings.md
- - install-pmm/install-pmm-server/aws/run.md
- - install-pmm/install-pmm-server/aws/back_pmm_server.md
- - install-pmm/install-pmm-server/aws/restore_pmm_server.md
- - install-pmm/install-pmm-server/aws/remove_pmm_server.md
- - Set up PMM Client:
+ - install-pmm/index.md
+ - Plan your PMM installation:
+ - install-pmm/plan-pmm-installation/hardware_and_system.md
+ - install-pmm/plan-pmm-installation/network_and_firewall.md
+ - Install PMM in HA mode: install-pmm/HA.md
+ - Install PMM Server:
+ - install-pmm/install-pmm-server/index.md
+ - install-pmm/install-pmm-server/prerequisites.md
+ - Deployment options:
+ - Install on Docker:
+ - install-pmm/install-pmm-server/deployment-options/docker/index.md
+ - install-pmm/install-pmm-server/deployment-options/docker/easy-install.md
+ - Setup and configuration:
+ - install-pmm/install-pmm-server/deployment-options/docker/run_with_vol.md
+ - install-pmm/install-pmm-server/deployment-options/docker/run_with_host_dir.md
+ - install-pmm/install-pmm-server/deployment-options/docker/env_var.md
+ - install-pmm/install-pmm-server/deployment-options/docker/preview_env_var.md
+ - install-pmm/install-pmm-server/deployment-options/docker/isolated_hosts.md
+ - Maintenance:
+ - install-pmm/install-pmm-server/deployment-options/docker/backup_container.md
+ - install-pmm/install-pmm-server/deployment-options/docker/restore_container.md
+ - install-pmm/install-pmm-server/deployment-options/docker/remove_container.md
+ - Install on Podman:
+ - install-pmm/install-pmm-server/deployment-options/podman/index.md
+ - Maintenance:
+ - install-pmm/install-pmm-server/deployment-options/podman/backup_container_podman.md
+ - install-pmm/install-pmm-server/deployment-options/podman/restore_container_podman.md
+ - install-pmm/install-pmm-server/deployment-options/podman/remove_container_podman.md
+ - Install on Virtual Appliance:
+ - install-pmm/install-pmm-server/deployment-options/virtual/index.md
+ - Setup:
+ - install-pmm/install-pmm-server/deployment-options/virtual/download_ova.md
+ - install-pmm/install-pmm-server/deployment-options/virtual/vmware.md
+ - install-pmm/install-pmm-server/deployment-options/virtual/virtualbox.md
+ - install-pmm/install-pmm-server/deployment-options/virtual/login_UI.md
+ - Maintenance:
+ - install-pmm/install-pmm-server/deployment-options/virtual/remove_virtual_machine.md
+ - Install on Helm:
+ - install-pmm/install-pmm-server/deployment-options/helm/index.md
+ - Maintenance:
+ - install-pmm/install-pmm-server/deployment-options/helm/backup_container_helm.md
+ - install-pmm/install-pmm-server/deployment-options/helm/restore_container_helm.md
+ - Install on AWS Marketplace:
+ - install-pmm/install-pmm-server/deployment-options/aws/aws.md
+ - Setup and configuration:
+ - install-pmm/install-pmm-server/deployment-options/aws/limit_access.md
+ - install-pmm/install-pmm-server/deployment-options/aws/settings.md
+ - install-pmm/install-pmm-server/deployment-options/aws/run.md
+ - Maintenance:
+ - install-pmm/install-pmm-server/deployment-options/aws/back_pmm_server.md
+ - install-pmm/install-pmm-server/deployment-options/aws/restore_pmm_server.md
+ - install-pmm/install-pmm-server/deployment-options/aws/remove_pmm_server.md
- Install PMM Client:
- install-pmm/install-pmm-client/index.md
- install-pmm/install-pmm-client/prerequisites.md
- - Percona repositories: install-pmm/install-pmm-client/package_manager.md
- - Images: install-pmm/install-pmm-client/docker.md
- - Manual binaries: install-pmm/install-pmm-client/binary_package.md
- - Register Client node: install-pmm/register-client-node/index.md
- - Add services to PMM:
- - install-pmm/install-pmm-client/connect-database/index.md
- - MySQL: install-pmm/install-pmm-client/connect-database/mysql.md
- - MongoDB: install-pmm/install-pmm-client/connect-database/mongodb.md
- - PostgreSQL: install-pmm/install-pmm-client/connect-database/postgresql.md
- - AWS: install-pmm/install-pmm-client/connect-database/aws.md
- - Azure: install-pmm/install-pmm-client/connect-database/azure.md
- - Google Cloud: install-pmm/install-pmm-client/connect-database/google.md
- - Linux: install-pmm/install-pmm-client/connect-database/linux.md
- - Remote: install-pmm/install-pmm-client/connect-database/remote.md
- - External:
- - install-pmm/install-pmm-client/connect-database/external.md
- - HAproxy: install-pmm/install-pmm-client/connect-database/haproxy.md
- - ProxySQL: install-pmm/install-pmm-client/connect-database/proxysql.md
- - Remove services: install-pmm/install-pmm-client/connect-database/remove-services/index.md
+ - Deployment options:
+ - Install from Percona repositories: install-pmm/install-pmm-client/package_manager.md
+ - Install on Docker: install-pmm/install-pmm-client/docker.md
+ - Install from binaries: install-pmm/install-pmm-client/binary_package.md
+ - Configuration:
+ - Register Client node: install-pmm/register-client-node/index.md
+ - Configure monitoring:
+ - install-pmm/install-pmm-client/connect-database/index.md
+ - Databases:
+ - MySQL: install-pmm/install-pmm-client/connect-database/mysql.md
+ - MongoDB: install-pmm/install-pmm-client/connect-database/mongodb.md
+ - PostgreSQL: install-pmm/install-pmm-client/connect-database/postgresql.md
+ - Cloud services:
+ - AWS: install-pmm/install-pmm-client/connect-database/aws.md
+ - Azure: install-pmm/install-pmm-client/connect-database/azure.md
+ - Google Cloud: install-pmm/install-pmm-client/connect-database/google.md
+ - System monitoring:
+ - Linux: install-pmm/install-pmm-client/connect-database/linux.md
+ - Remote: install-pmm/install-pmm-client/connect-database/remote.md
+ - Proxy services:
+ - HAproxy: install-pmm/install-pmm-client/connect-database/haproxy.md
+ - ProxySQL: install-pmm/install-pmm-client/connect-database/proxysql.md
+ - Maintenance:
+ - Remove services: install-pmm/install-pmm-client/connect-database/remove-services/index.md
- Upgrade:
- Upgrade PMM Server:
@@ -250,7 +261,7 @@ nav:
- Manage dashboards:
- use/dashboards-panels/manage-dashboards/create-folders.md
- use/dashboards-panels/manage-dashboards/manage-folders.md
- - use/dashboard-panels/set-custom-dashboard
+ - use/dashboards-panels/manage-dashboards/set-custom-dashboard.md
- Share dashboards and panels: use/dashboards-panels/share-dashboards/share_dashboard.md
- Annotate: use/dashboards-panels/annotate/annotate.md
@@ -269,9 +280,10 @@ nav:
- Percona Alerting:
- alert/index.md
- alert/alert_rules.md
- - alert/alert_notifications.md
+ - alert/contact_points.md
- alert/templates_list.md
- alert/silence_alerts.md
+ - alert/disable_alerts.md
- Backup and restore:
- backup/index.md
- backup/prepare_storage_location.md
@@ -299,30 +311,30 @@ nav:
- configure-pmm/advanced_settings.md
- configure-pmm/ssh.md
- Security in PMM:
- - pmm-admin/security/index.md
- - pmm-admin/security/ssl_encryption.md
- - pmm-admin/security/grafana_cookies.md
- - pmm-admin/security/data_encryption.md
+ - admin/security/index.md
+ - admin/security/ssl_encryption.md
+ - admin/security/grafana_cookies.md
+ - admin/security/data_encryption.md
- Percona Platform:
- configure-pmm/percona_platform/integrate_with_percona_platform.md
- configure-pmm/percona_platform/check_percona_platform.md
- Administer:
- - pmm-admin/index.md
+ - admin/index.md
- Manage users:
- - pmm-admin/manage-users/index.md
- - pmm-admin/manage-users/add_users.md
- - pmm-admin/manage-users/edit_users.md
- - pmm-admin/manage-users/delete_users.md
+ - admin/manage-users/index.md
+ - admin/manage-users/add_users.md
+ - admin/manage-users/edit_users.md
+ - admin/manage-users/delete_users.md
- Roles and permissions:
- - pmm-admin/roles/index.md
+ - admin/roles/index.md
- Access Control:
- - pmm-admin/roles/access-control/intro.md
- - pmm-admin/roles/access-control/labels.md
- - pmm-admin/roles/access-control/config_access_cntrl.md
- - pmm-admin/roles/access-control/create_roles.md
- - pmm-admin/roles/access-control/manage_roles.md
- - pmm-admin/roles/access-control/assign_roles.md
- - pmm-admin/roles/access-control/usecase.md
+ - admin/roles/access-control/intro.md
+ - admin/roles/access-control/labels.md
+ - admin/roles/access-control/config_access_cntrl.md
+ - admin/roles/access-control/create_roles.md
+ - admin/roles/access-control/manage_roles.md
+ - admin/roles/access-control/assign_roles.md
+ - admin/roles/access-control/usecase.md
- Troubleshoot:
@@ -337,7 +349,8 @@ nav:
- Release notes:
- Release notes index: release-notes/index.md
- - "PMM 3.0.0-BETA (2024-11-22)": release-notes/3.0.0_Beta.md
+ - "PMM 3.0.0-1 (2025-02-10)": release-notes/3.0.0_1.md
+ - "PMM 3.0.0 (2025-01-30)": release-notes/3.0.0.md
- Reference:
- Architecture:
@@ -360,7 +373,7 @@ nav:
- reference/dashboards/dashboard-victoriametrics-agents-overview.md
- Environments:
- reference/dashboards/dashboard-env-overview.md
- - reference/dashboards/dashboard-environent-summary.md
+ - reference/dashboards/dashboard-environment-summary.md
- Kubernetes:
- reference/dashboards/kubernetes_cluster_summary.md
- reference/dashboards/kubernetes_monitor_operators.md
@@ -406,6 +419,7 @@ nav:
- reference/dashboards/dashboard-mongodb-instances-overview.md
- reference/dashboards/dashboard-mongodb-instances-compare.md
- reference/dashboards/dashboard-mongodb-replset-summary.md
+ - reference/dashboards/dashboard-mongodb-router-summary.md
- reference/dashboards/dashboard-mongodb-inmemory-details.md
- reference/dashboards/dashboard-mongodb-mmapv1-details.md
- reference/dashboards/dashboard-mongodb-wiredtiger-details.md
diff --git a/documentation/mkdocs-pdf.yml b/documentation/mkdocs-pdf.yml
index 0ae7e55926..d0a13f2fa2 100644
--- a/documentation/mkdocs-pdf.yml
+++ b/documentation/mkdocs-pdf.yml
@@ -7,9 +7,9 @@ plugins:
version_selector: false
# https://github.com/orzih/mkdocs-with-pdf
with-pdf:
- output_path: "pdf/PerconaMonitoringAndManagement-3.0.0-beta.pdf"
+ output_path: "pdf/PerconaMonitoringAndManagement-3.0.0.pdf"
cover_title: "Percona Monitoring and Management Documentation"
- cover_subtitle: 3.0.0 (December 13, 2024)
+ cover_subtitle: 3.0.0 (January 30, 2025)
author: "Percona Technical Documentation Team"
cover_logo: docs/images/Percona_Logo_Color.png
custom_template_path: resources/templates
diff --git a/documentation/overrides/main.html b/documentation/overrides/main.html
index 14b62fd135..c7768e0c98 100644
--- a/documentation/overrides/main.html
+++ b/documentation/overrides/main.html
@@ -1,12 +1,6 @@
{# Import the theme's layout. #}
{% extends "base.html" %}
-{% block announce %}
- This is an Beta version of Percona Monitoring and Management 3.0.0 and it is
- not recommended for production environments yet. We encourage you to test it and give your feedback.
- This will help us improve the product and make it production-ready faster.
-{% endblock %}
-
{% block scripts %}
{{ super() }}
@@ -37,7 +31,7 @@
{% include "partials/nav.html" %}
@@ -54,6 +48,9 @@
diff --git a/documentation/resources/bin/make_glossary.pl b/documentation/resources/bin/make_glossary.pl
index 1b875fe09b..7b110a8703 100755
--- a/documentation/resources/bin/make_glossary.pl
+++ b/documentation/resources/bin/make_glossary.pl
@@ -4,7 +4,7 @@
# Master copy: https://docs.google.com/spreadsheets/d/1KUL-dcfBrR3bWsFUcugy5SsJcR5ot-P4Bt27z3ka0x0/edit#gid=0
# Export this sheet as tab-separated values into source/_res/glossary.tsv
# Usage:
-# sort resources/bin/glossary.tsv | resources/bin/make_glossary_md.pl > docs/details/glossary.md
+# sort resources/bin/glossary.tsv | resources/bin/make_glossary_md.pl > docs/reference/glossary.md
use File::Basename;
my $prog = basename($0);
diff --git a/documentation/snippets/services-banner.md b/documentation/snippets/services-banner.md
deleted file mode 100644
index 2c5d14caa1..0000000000
--- a/documentation/snippets/services-banner.md
+++ /dev/null
@@ -1,10 +0,0 @@
-
-
-## Get expert help { .title }
-
-If you need assistance, visit the community forum for comprehensive and free database knowledge, or contact our Percona Database Experts for professional support and services.
-
-
-
-[:material-forum-outline: Community Forum](https://forums.percona.com/c/percona-monitoring-and-management-pmm/percona-monitoring-and-management-pmm-v2/31) [:percona-logo: Get a Percona Expert](https://www.percona.com/about/contact)
-