diff --git a/.goreleaser.yml b/.goreleaser.yml
index 3f88053..9781b94 100644
--- a/.goreleaser.yml
+++ b/.goreleaser.yml
@@ -8,7 +8,7 @@ builds:
- CGO_ENABLED=0
- GO111MODULE=on
binary: kubectl-debug
- main: ./cmd/plugin/main.go
+ main: ./cmd/kubectl-debug/main.go
goos:
- freebsd
- windows
@@ -21,7 +21,7 @@ builds:
- goos: darwin
goarch: 386
ldflags:
- - -s -w -X 'github.com/aylei/kubectl-debug/version.gitVersion={{.Version}}'
+ - -s -w -X 'github.com/jamestgrant/kubectl-debug/version.gitVersion={{.Version}}'
checksum:
name_template: 'checksums.txt'
snapshot:
@@ -34,12 +34,12 @@ changelog:
- '^test:'
brew:
github:
- owner: aylei
+ owner: jamestgrant
name: homebrew-tap
commit_author:
- name: aylei
- email: rayingecho@gmail.com
+ name: jamestgrant
+ email: jamesrgrant@mediakind.com
install: |
bin.install "kubectl-debug"
- homepage: "https://www.github.com/aylei/kubectl-debug"
- description: "Debug your pod by a new container with every troubleshooting tools pre-installed"
+ homepage: "https://www.github.com/jamestgrant/kubectl-debug"
+ description: "Debug a troublesome container using a dubug container which contains all your favorite troubleshooting tools pre-installed and runs in the same cgroup/ipc/network namespace as your troublesome container"
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index d8097bc..0000000
--- a/.travis.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-sudo: required
-services:
- - docker
-language: go
-go:
- - "1.12.x"
-env:
- - GO111MODULE=on make build
-
-install:
- - go mod download
-
-script:
- - make check
-
-before_deploy:
- - make agent-docker
-
-deploy:
- - provider: script
- script: bash ./scripts/docker_push.sh
diff --git a/Dockerfile b/Dockerfile
index e4d9aa4..6730ad5 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,8 +1,8 @@
-FROM alpine:3.11.5 as build
+FROM alpine:3.15.0 as build
RUN apk add lxcfs containerd
-FROM alpine:3.11.5
+FROM alpine:3.15.0
COPY --from=build /usr/bin/lxcfs /usr/bin/lxcfs
COPY --from=build /usr/lib/*fuse* /usr/lib/
diff --git a/Makefile b/Makefile
index 7edfa22..5f6664b 100644
--- a/Makefile
+++ b/Makefile
@@ -1,4 +1,4 @@
-.PHONY: build plugin agent check
+.PHONY: build kubectl-debug-binary debug-agent-binary debug-agent-docker-image check
LDFLAGS = $(shell ./version.sh)
GOENV := GO15VENDOREXPERIMENT="1" GO111MODULE=on CGO_ENABLED=0 GOOS=linux GOARCH=amd64
@@ -6,16 +6,16 @@ GO := $(GOENV) go
default: build
-build: plugin agent-docker
+build: kubectl-debug-binary debug-agent-docker-image
-plugin:
- GO111MODULE=on CGO_ENABLED=0 go build -ldflags '$(LDFLAGS)' -o kubectl-debug cmd/plugin/main.go
+kubectl-debug-binary:
+ GO111MODULE=on CGO_ENABLED=0 go build -ldflags '$(LDFLAGS)' -o kubectl-debug cmd/kubectl-debug/main.go
-agent-docker: agent
- docker build . -t aylei/debug-agent:latest
+debug-agent-docker-image: debug-agent-binary
+ docker build . -t jamesgrantmediakind/debug-agent:latest
-agent:
- $(GO) build -ldflags '$(LDFLAGS)' -o debug-agent cmd/agent/main.go
+debug-agent-binary:
+ $(GO) build -ldflags '$(LDFLAGS)' -o debug-agent cmd/debug-agent/main.go
check:
find . -iname '*.go' -type f | grep -v /vendor/ | xargs gofmt -l
diff --git a/README.md b/README.md
index 5a4a1fb..aa1a2a1 100644
--- a/README.md
+++ b/README.md
@@ -1,243 +1,413 @@
# Kubectl-debug

-[](https://travis-ci.org/aylei/kubectl-debug)
-[](https://goreportcard.com/report/github.com/aylei/kubectl-debug)
-[](https://hub.docker.com/r/aylei/debug-agent)
+[](https://goreportcard.com/report/github.com/jamesTGrant/kubectl-debug)
+[](https://hub.docker.com/r/jamesgrantmediakind/debug-agent)
-[简体中文](/docs/zh-cn.md)
-
-# Overview
-
-`kubectl-debug` is an out-of-tree solution for [troubleshooting running pods](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/node/troubleshoot-running-pods.md), which allows you to run a new container in running pods for debugging purpose ([examples](/docs/examples.md)). The new container will join the `pid`, `network`, `user` and `ipc` namespaces of the target container, so you can use arbitrary trouble-shooting tools without pre-installing them in your production container image.
-
-- [Kubectl-debug](#kubectl-debug)
- [Overview](#overview)
-- [Screenshots](#screenshots)
-- [Quick Start](#quick-start)
- - [Install the kubectl debug plugin](#install-the-kubectl-debug-plugin)
- - [(Optional) Install the debug agent DaemonSet](#optional-install-the-debug-agent-daemonset)
- - [Debug instructions](#debug-instructions)
-- [Build from source](#build-from-source)
-- [port-forward mode And agentless mode(Default opening)](#port-forward-mode-and-agentless-modedefault-opening)
-- [Configuration](#configuration)
-- [Authorization](#authorization)
+- [Quick start](#quick-start)
+ - [Download the binary](#download-the-binary)
+ - [Usage instructions](#usage-instructions)
+ - [Build from source](#build-from-source)
+ - [Under the hood](#under-the-hood)
+- [Configuration options and overrides](#configuration-options-and-overrides)
+- [Authorization / required privileges](#authorization-required-privileges)
+- [(Optional) Create a Secret for use with Private Docker Registries](#create-a-secret-for-use-with-private-docker-registries)
- [Roadmap](#roadmap)
- [Contribute](#contribute)
- [Acknowledgement](#acknowledgement)
-# Screenshots
-
+# Overview
-# Quick Start
+This project is a fork of this fine project: https://github.com/aylei/kubectl-debug which is no longer maintained (hence this fork). The credit for this project belongs with [aylei](https://github.com/aylei). Aylei and I have chatted and we are happy that this project will live on and get maintained here.
-## Install the kubectl debug plugin
+`kubectl-debug` is an 'out-of-tree' solution for connecting to and troubleshooting an existing, running, 'target' container in an existing pod in a Kubernetes cluster.
+The target container may have a shell and busybox utils and hence provide some debug capability or it may be very minimal and not even provide a shell - which makes any real-time troubleshooting/debugging very difficult. kubectl-debug is designed to overcome that difficulty.
-Homebrew:
-```shell
-brew install aylei/tap/kubectl-debug
-```
+There's a short video on YouTube: https://www.youtube.com/watch?v=jJHCxCqPn1g
-Download the binary:
+How does it work?
+
+
+- User invokes kubectl-debug like this:
kubectl-debug --namespace NAMESPACE POD_NAME -c TARGET_CONTAINER_NAME
+- kubectl-debug communicates with the cluster using the same interface as kubectl and instructs kubernetes to request the launch of a new 'debug-agent' container on the same node as the 'target' container
+- debug-agent process within the debug-agent pod connects directly to containerd (or dockerd if applicable) on the host which is running the 'target' container and requests the launch of a new 'debug' container in the same
pid
, network
, user
and ipc
namespaces as the target container
+- In summary: 'kubectl-debug' causes the launch of the 'debug-agent' container, 'debug-agent' the causes the launch of the 'debug' pod/container
+- 'debug-agent' pod redirects the terminal output of the 'debug' container to the 'kubectl-debug' executable and so you can interact directly with the shell running in the 'debug' container. You can now use of the troubleshooting tools available in the debug container (BASH, cURL, tcpdump, etc) without the need to have these utilities in the target container image.
+
+
+
+`kubectl-debug` is not related to `kubectl debug`
+
+`kubectl-debug` has been largely replaced by kubernetes [ephemeral containers](https://kubernetes.io/docs/concepts/workloads/pods/ephemeral-containers).
+ Ephemeral containers feature is in beta (enabled by default) from kubernetes 1.23
+ Ephemeral containers feature is in alpha from kubernetes 1.16 to 1.22
+ In Kuberenetes, by default, you are required to explicitly enable alpha features (alpha features are not enabled by default). If you are using Azure AKS (and perhaps others) you are not able, nor permitted, to configure kubernetes feature flags and so you will need a solution like the one provided by this github project.
+
+# Quick start
+
+## Download the binary
+(I'm testing Linux only):
```bash
-export PLUGIN_VERSION=0.1.1
+export RELEASE_VERSION=1.0.0
# linux x86_64
-curl -Lo kubectl-debug.tar.gz https://github.com/aylei/kubectl-debug/releases/download/v${PLUGIN_VERSION}/kubectl-debug_${PLUGIN_VERSION}_linux_amd64.tar.gz
-# macos
-curl -Lo kubectl-debug.tar.gz https://github.com/aylei/kubectl-debug/releases/download/v${PLUGIN_VERSION}/kubectl-debug_${PLUGIN_VERSION}_darwin_amd64.tar.gz
+curl -Lo kubectl-debug https://github.com/JamesTGrant/kubectl-debug/releases/download/v${RELEASE_VERSION}/kubectl-debug
+
+# make the binary executable
+chmod +x kubectl-debug
-tar -zxvf kubectl-debug.tar.gz kubectl-debug
-sudo mv kubectl-debug /usr/local/bin/
+# run the binary pointing at whatever cluster kubectl points at
+./kubectl-debug --namespace NAMESPACE TARGET_POD_NAME -c TARGET_CONTAINER_NAME
```
+## Build from source
-For windows users, download the latest archive from the [release page](https://github.com/aylei/kubectl-debug/releases/tag/v0.1.1), decompress the package and add it to your PATH.
+Clone this repo and:
+```bash
+# to use this kubectl-debug utility, you only need to take the resultant kubectl-debug binary
+# file which is created by:
+make kubectl-debug-binary
-## (Optional) Install the debug agent DaemonSet
+# to 'install' the kubectl-debug binary, make it executable and either call it directy, put
+# it in your PATH, or move it to a location which is already in your PATH:
-`kubectl-debug` requires an agent pod to communicate with the container runtime. In the [agentless mode](#port-forward-mode-And-agentless-mode), the agent pod can be created when a debug session starts and to be cleaned up when the session ends.(Turn on agentless mode by default)
+chmod +x kubectl-debug
+mv kubectl-debug /usr/local/bin
-While convenient, creating pod before debugging can be time consuming. You can install the debug agent DaemonSet and use --agentless=false params in advance to skip this:
-```bash
-# if your kubernetes version is v1.16 or newer
-kubectl apply -f https://raw.githubusercontent.com/aylei/kubectl-debug/master/scripts/agent_daemonset.yml
-# if your kubernetes is old version(,,
+# In 'fork' mode, if you want the copied pod to retain the labels of the original pod, you can use
+# the --fork-pod-retain-labels parameter (comma separated, no spaces). If not set (default), this parameter
+# is empty and so any labels of the original pod are not retained, and the labels of the copied pods are empty.
+# Example of fork mode:
+kubectl-debug --namespace NAMESPACE POD_NAME -c CONTAINER_NAME --fork --fork-pod-retain-labels=,,
+
+# in order to interact with the debug-agent pod on a node which doesn't have a public IP or direct access
+# (firewall and other reasons) to access, port-forward mode is enabled by default. If you don't want
+# port-forward mode, you can use --port-forward false to turn off it. I don't know why you'd want to do
+# this, but you can if you want.
+kubectl-debug --port-forward=false --namespace NAMESPACE POD_NAME -c CONTAINER_NAME
-# in order to enable node without public IP or direct access (firewall and other reasons) to access, port-forward mode is enabled by default.
-# if you don't need to turn on port-forward mode, you can use --port-forward false to turn off it.
-kubectl debug POD_NAME --port-forward=false --agentless=false --daemonset-ns=kube-system --daemonset-name=debug-agent
+# you can choose a different debug container image. By default, nicolaka/netshoot:latest will be
+# used but you can specify anything you like
+kubectl-debug --namespace NAMESPACE POD_NAME -c CONTAINER_NAME --image nicolaka/netshoot:latest
-# old versions of kubectl cannot discover plugins, you may execute the binary directly
-kubectl-debug POD_NAME
+# you can set the debug-agent pod's resource limits/requests, for example:
+# default is not set
+kubectl-debug --namespace NAMESPACE POD_NAME -c CONTAINER_NAME --agent-pod-cpu-requests=250m --agent-pod-cpu-limits=500m --agent-pod-memory-requests=200Mi --agent-pod-memory-limits=500Mi
-# use primary docker registry, set registry kubernets secret to pull image
+# use primary docker registry, set registry kubernetes secret to pull image
# the default registry-secret-name is kubectl-debug-registry-secret, the default namespace is default
# please set the secret data source as {Username: , Password: }
-kubectl-debug POD_NAME --image calmkart/netshoot:latest --registry-secret-name --registry-secret-namespace
-# in default agentless mode, you can set the agent pod's resource limits/requests, for example:
-# default is not set
-kubectl-debug POD_NAME --agent-pod-cpu-requests=250m --agent-pod-cpu-limits=500m --agent-pod-memory-requests=200Mi --agent-pod-memory-limits=500Mi
+kubectl-debug --namespace NAMESPACE POD_NAME --image nicolaka/netshoot:latest --registry-secret-name --registry-secret-namespace
+
+# in addition to passing cli arguments, you can use a config file if you would like to
+# non-default values for various things.
+kubectl-debug --configfile /PATH/FILENAME --namespace NAMESPACE POD_NAME -c TARGET_CONTAINER_NAME
+
```
+## Debugging examples
-* You can configure the default arguments to simplify usage, refer to [Configuration](#configuration)
-* Refer to [Examples](/docs/examples.md) for practical debugging examples
+This guide shows a few typical example of debugging a target container.
-## (Optional) Create a Secret for Use with Private Docker Registries
+### Basic
-You can use a new or existing [Kubernetes `dockerconfigjson` secret](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#registry-secret-existing-credentials). For example:
+When you run `kubectl-debug` it causes a 'debug container' to be created on the same node, and which runs in the same `pid`, `network`, `ipc` and `user` namespace, as the target container.
+By default, `kubectl-debug` uses [`nicolaka/netshoot`](https://github.com/nicolaka/netshoot) as container image for the 'debug container'.
+The netshoot [project documentation](https://github.com/nicolaka/netshoot/blob/master/README.md) provides excellent guides and examples for using various tools to troubleshoot your target container.
-```bash
-# Be sure to run "docker login" beforehand.
-kubectl create secret generic kubectl-debug-registry-secret \
- --from-file=.dockerconfigjson= \
- --type=kubernetes.io/dockerconfigjson
+Here are a few examples to show `netshoot` working with `kubectl-debug`:
+
+Connect to a running container 'demo-container' in pod 'demo-pod' in the default namespace:
+
+```shell
+➜ ~ kubectl-debug --namespace default target-pod -c target-container
+
+Agent Pod info: [Name:debug-agent-pod-da46a000-8429-11e9-a40c-8c8590147766, Namespace:default, Image:jamesgrantmediakind/debug-agent:latest, HostPort:10027, ContainerPort:10027]
+Waiting for pod debug-agent-pod-da46a000-8429-11e9-a40c-8c8590147766 to run...
+pod target-pod pod IP: 10.233.111.78, agentPodIP 172.16.4.160
+wait for forward port to debug agent ready...
+Forwarding from 127.0.0.1:10027 -> 10027
+Forwarding from [::1]:10027 -> 10027
+Handling connection for 10027
+ pulling image nicolaka/netshoot:latest...
+latest: Pulling from nicolaka/netshoot
+Digest: sha256:5b1f5d66c4fa48a931ff54f2f34e5771eff2bc5e615fef441d5858e30e9bb921
+Status: Image is up to date for nicolaka/netshoot:latest
+starting debug container...
+container created, open tty...
+
+ [1] 🐳 → hostname
+target-container
```
+
+
+Navigating the filesystem of the target container:
-Alternatively, you can create a secret with the key `authStr` and a JSON payload containing a `Username` and `Password`. For example:
+The root filesystem of target container is located in `/proc/{pid}/root/`, and the `pid` is typically '1'.
+You can `chroot` to the root filesystem of target container to navigate the target container filesystem or
+`cd /proc/1/root` works just as well (assuming PID '1' is the correct PID).
-```bash
-echo -n '{"Username": "calmkart", "Password": "calmkart"}' > ./authStr
-kubectl create secret generic kubectl-debug-registry-secret --from-file=./authStr
+```shell
+root @ /
+ [2] 🐳 → chroot /proc/1/root
+
+ root @ /
+ [3] 🐳 → cd /proc/1/root
+
+root @ /
+ [#] 🐳 → ls
+ bin entrypoint.sh home lib64 mnt root sbin sys tmp var
+ dev etc lib media proc run srv usr
+ (you can navigate the target containers filesystem and view/edit files)
+
+root @ /
+ [#] 🐳 → ./entrypoint.sh
+ (you can attempt to run the target containers entrypoint.sh script and perhaps see what errors are produced)
```
+
+
+Using **iftop** to inspect network traffic:
+```shell
+root @ /
+ [4] 🐳 → iftop -i eth0
+interface: eth0
+IP address is: 10.233.111.78
+MAC address is: 86:c3:ae:9d:46:2b
+(CLI graph omitted)
+```
+
+
+Using **drill** to diagnose DNS:
+```shell
+root @ /
+ [5] 🐳 → drill -V 5 demo-service
+;; ->>HEADER<<- opcode: QUERY, rcode: NOERROR, id: 0
+;; flags: rd ; QUERY: 1, ANSWER: 0, AUTHORITY: 0, ADDITIONAL: 0
+;; QUESTION SECTION:
+;; demo-service. IN A
-Refer to [the official Kubernetes documentation on Secrets](https://kubernetes.io/docs/concepts/configuration/secret/) for more ways to create them.
+;; ANSWER SECTION:
-# Build from source
+;; AUTHORITY SECTION:
-Clone this repo and:
-```bash
-# make will build plugin binary and debug-agent image
-make
-# install plugin
-mv kubectl-debug /usr/local/bin
+;; ADDITIONAL SECTION:
+
+;; Query time: 0 msec
+;; WHEN: Sat Jun 1 05:05:39 2019
+;; MSG SIZE rcvd: 0
+;; ->>HEADER<<- opcode: QUERY, rcode: NXDOMAIN, id: 62711
+;; flags: qr rd ra ; QUERY: 1, ANSWER: 0, AUTHORITY: 1, ADDITIONAL: 0
+;; QUESTION SECTION:
+;; demo-service. IN A
-# build plugin only
-make plugin
-# build agent only
-make agent-docker
+;; ANSWER SECTION:
+
+;; AUTHORITY SECTION:
+. 30 IN SOA a.root-servers.net. nstld.verisign-grs.com. 2019053101 1800 900 604800 86400
+
+;; ADDITIONAL SECTION:
+
+;; Query time: 58 msec
+;; SERVER: 10.233.0.10
+;; WHEN: Sat Jun 1 05:05:39 2019
+;; MSG SIZE rcvd: 121
```
-# port-forward mode And agentless mode(Default opening)
+### `proc` filesystem and FUSE
+
+It is common to use tools like `top`, `free` to inspect system metrics like CPU usage and memory. Using these commands will display the metrics from the host system by default. Because they read the metrics from the `proc` filesystem (`/proc/*`), which is mounted from the host system. This can be extremely useful (you can still inspect the pod/container metrics of as part of the host metrics) You may find [this blog post](https://fabiokung.com/2014/03/13/memory-inside-linux-containers/) useful.
+
+## Debug Pod in "CrashLoopBackoff"
+
+Troubleshooting kubernetes containers in the `CrashLoopBackoff` state can be tricky. Using kubectl-debug 'normally' probably wont help you as the debug container processed will be terminated reaped once the target container (process with pid 1) exits. To tackle with this, `kubectl-debug` provides the `--fork` flag, which borrows the idea from the `oc debug` command: copy the currently crashing pod and (hopefully) the issue will reproduce in the forked Pod with the added ability to debug via the debug container.
+
+Under the hood, `kubectl debug --fork` will copy the entire Pod spec and:
-- `port-foward` mode: By default, `kubectl-debug` will directly connect with the target host. When `kubectl-debug` cannot connect to `targetHost:agentPort`, you can enable `port-forward` mode. In `port-forward` mode, the local machine listens on `localhost:agentPort` and forwards data to/from `targetPod:agentPort`.
+* strip all the labels, so that no traffic will be routed from service to this pod (see[Readme.md](/README.md) for instructions on duplicating the labels);
+* modify the entry-point of target container in order to hold the pid namespace and avoid the Pod crash again;
-- `agentless` mode: By default, `debug-agent` needs to be pre-deployed on each node of the cluster, which consumes cluster resources all the time. Unfortunately, debugging Pod is a low-frequency operation. To avoid loss of cluster resources, the `agentless` mode has been added in [#31](https://github.com/aylei/kubectl-debug/pull/31). In `agentless` mode, `kubectl-debug` will first start `debug-agent` on the host where the target Pod is located, and then `debug-agent` starts the debug container. After the user exits, `kubectl-debug` will delete the debug container and `kubectl-debug` will delete the `debug-agent` pod at last.
+Here's an example:
+
+```shell
+➜ ~ kubectl-debug demo-pod -c demo-container --fork
+Agent Pod info: [Name:debug-agent-pod-dea9e7c8-8439-11e9-883a-8c8590147766, Namespace:default, Image:jamesgrantmediakind/debug-agent:latest, HostPort:10027, ContainerPort:10027]
+Waiting for pod debug-agent-pod-dea9e7c8-8439-11e9-883a-8c8590147766 to run...
+Waiting for pod demo-pod-e23c1b68-8439-11e9-883a-8c8590147766-debug to run...
+pod demo-pod PodIP 10.233.111.90, agentPodIP 172.16.4.160
+wait for forward port to debug agent ready...
+Forwarding from 127.0.0.1:10027 -> 10027
+Forwarding from [::1]:10027 -> 10027
+Handling connection for 10027
+ pulling image nicolaka/netshoot:latest...
+latest: Pulling from nicolaka/netshoot
+Digest: sha256:5b1f5d66c4fa48a931ff54f2f34e5771eff2bc5e615fef441d5858e30e9bb921
+Status: Image is up to date for nicolaka/netshoot:latest
+starting debug container...
+container created, open tty...
+
+ [1] 🐳 → ps -ef
+PID USER TIME COMMAND
+ 1 root 0:00 sh -c -- while true; do sleep 30; done;
+ 6 root 0:00 sleep 30
+ 7 root 0:00 /bin/bash -l
+ 15 root 0:00 ps -ef
+```
+
+
+## Debug init container
+
+Just like debugging the ordinary container, we can debug the init-container of a pod. You must specify the container name of init-container:
+
+```shell
+➜ ~ kubectl-debug demo-pod -c init-container
+```
+
+
+# Under the hood
-# Configuration
+`kubectl-debug` consists of 3 components:
-`kubectl-debug` uses [nicolaka/netshoot](https://github.com/nicolaka/netshoot) as the default image to run debug container, and use `bash` as default entrypoint.
+* the 'kubectl-debug' executable serves the `kubectl-debug` command and interfaces with the kube-api-server
+* the 'debug-agent' pod is a temporary pod that is started in the cluster by kubectl-debug. The 'debug-agent' container is responsible for starting and manipulating the 'debug container'. The 'debug-agent' will also act as a websockets relay for remote tty to join the output of the 'debug container' to the terminal from which the kubectl-debug command was issued
+* the 'debug container' which is the container that provides the debugging utilities and the shell in which the human user performs their debugging activity. `kubectl-debug` doesn't provide this - it's an 'off-the-shelf container image (nicolaka/netshoot:latest by default), it is invoked and configured by 'debug-agent'.
-You can override the default image and entrypoint with cli flag, or even better, with config file `~/.kube/debug-config`:
+The following occurs when the user runs the command: `kubectl-debug --namespace -c `
+1. 'kubectl-debug' gets the target-pod info from kube-api-server and extracts the `host` information (the target-pod within the namespace )
+2. 'kubectl-debug' sends a 'debug-agent' pod specification to kube-api-server with a node-selector matching the `host`. By default the container image is `docker.io/jamesgrantmediakind/debug-agent:latest`
+3. kube-api-server requests the creation of 'debug-agent' pod. 'debug-agent' pod is created in the default namespace (doesn't have to be the same namespace as the target pod)
+4. 'kubectl-debug' sends an HTTP request to the 'debug-agent' pod running on the `host` which includes a protocol upgrade from HTTP to SPDY
+5. debug-agent' checks if the target container is actively running, if not, write an error to client
+6. 'debug-agent' interfaces with containerd (or dockerd if applicable) on the host to request the creation of the 'debug-container'. `debug container` with `tty` and `stdin` opened, the 'debug-agent' configures the `debug container`'s `pid`, `network`, `ipc` and `user` namespace to be that of the target container
+7. 'debug-agent' pipes the connection into the `debug container` using `attach`
+8. Human performs debugging/troubleshooting on the target container from 'within' in the debug container with access to the target container process/network/ipc namespaces and root filesystem
+9. debugging complete, user exits the debug-container shell which closes the SPDY connection
+10. 'debug-agent' closes the SPDY connection, then waits for the `debug container` to exit and do the cleanup
+11. 'debug-agent' pod is deleted
+
+
+# Configuration options and overrides
+
+The `debug-agent` uses [nicolaka/netshoot](https://github.com/nicolaka/netshoot) as the default image to run debug container, and uses `bash` as default entrypoint. You can override the default image and entrypoint, as well as a number of other useful things, by passing the config file to the kubectl-debug command like this:
+```bash
+kubectl-debug --configfile CONFIGFILE --namespace NAMESPACE POD_NAME -c TARGET_CONTAINER_NAME
+```
+Example configfile:
```yaml
-# debug agent listening port(outside container)
-# default to 10027
+# debug agent listening port (outside container)
+# default: 10027
agentPort: 10027
-# whether using agentless mode
-# default to true
-agentless: true
-# namespace of debug-agent pod, used in agentless mode
-# default to 'default'
+# namespace of debug-agent pod (does'nt need to be in the same namespace as the target container)
+# default: 'default'
agentPodNamespace: default
-# prefix of debug-agent pod, used in agentless mode
-# default to 'debug-agent-pod'
+
+# prefix of debug-agent pod
+# default: 'debug-agent-pod'
agentPodNamePrefix: debug-agent-pod
-# image of debug-agent pod, used in agentless mode
-# default to 'aylei/debug-agent:latest'
-agentImage: aylei/debug-agent:latest
-
-# daemonset name of the debug-agent, used in port-forward
-# default to 'debug-agent'
-debugAgentDaemonset: debug-agent
-# daemonset namespace of the debug-agent, used in port-forwad
-# default to 'default'
-debugAgentNamespace: kube-system
+
+# image of debug-agent pod
+# default: jamesgrantmediakind/debug-agent:latest
+agentImage: jamesgrantmediakind/debug-agent:latest
+
+# auditing can be enabled by setting 'audit' to 'true'
+# default: false
+audit: false
+
# whether using port-forward when connecting debug-agent
-# default true
+# default: true
portForward: true
-# image of the debug container
-# default as showed
+
+# the 'debug container' image
+# default: nicolaka/netshoot:latest
+# for most reliable result, use the full path - for example: docker.io/library/busybox:latest will
+# work but busybox:latest may not (depending on the cluster)
image: nicolaka/netshoot:latest
+
# start command of the debug container
+# `kubectl-debug` always specifies this explicitly and you can not override this without making code changes to `kubectl-debug`) this is by design.
# default ['bash']
command:
- '/bin/bash'
- '-l'
-# private docker registry auth kuberntes secret
-# default registrySecretName is kubectl-debug-registry-secret
-# default registrySecretNamespace is default
+
+# private docker registry auth kubernetes secret
+# default registrySecretName: kubectl-debug-registry-secret
+# default registrySecretNamespace: default
registrySecretName: my-debug-secret
registrySecretNamespace: debug
-# in agentless mode, you can set the agent pod's resource limits/requests:
+
+# you can set the agent pod's resource limits/requests:
# default is not set
agentCpuRequests: ""
agentCpuLimits: ""
agentMemoryRequests: ""
agentMemoryLimits: ""
+
# in fork mode, if you want the copied pod retains the labels of the original pod, you can change this params
# format is []string
# If not set, this parameter is empty by default (Means that any labels of the original pod are not retained, and the labels of the copied pods are empty.)
forkPodRetainLabels: []
+
# You can disable SSL certificate check when communicating with image registry by
# setting registrySkipTLSVerify to true.
registrySkipTLSVerify: false
-# You can set the log level with the verbosity setting
+
+# You can set the debug logging output level with the verbosity setting. There are two levels of verbosity, 0 and any positive integer (ie; 'verbosity : 1' will produce the same debug output as 'verbosity : 5')
verbosity : 0
```
-If the debug-agent is not accessible from host port, it is recommended to set `portForward: true` to using port-forawrd mode.
-
-PS: `kubectl-debug` will always override the entrypoint of the container, which is by design to avoid users running an unwanted service by mistake(of course you can always do this explicitly).
+# Authorization / required privileges
-# Authorization
+Put simply - if you can successfully issue the command `kubectl exec` to a container in your cluster then `kubectl-debug` will work for you!
+Detail: `kubectl-debug` reuses the privilege of the `pod/exec` sub-resource to do authorization, which means that it has the same privilege requirements as the `kubectl exec` command.
-Currently, `kubectl-debug` reuse the privilege of the `pod/exec` sub resource to do authorization, which means that it has the same privilege requirements with the `kubectl exec` command.
+The processes in the debug-agent container run as `root` and the debug-agent container `securityContext` is configured with `privileged: true` Some clusters such as OpenShift may not, by default, allow either of these practices.
# Auditing / Security
Some teams may want to limit what debug image users are allowed to use and to have an audit record for each command they run in the debug container.
-You can use the environment variable ```KCTLDBG_RESTRICT_IMAGE_TO``` restrict the agent to using a specific container image. For example putting the following in the container spec section of your daemonset yaml will force the agent to always use the image ```docker.io/nicolaka/netshoot:latest``` regardless of what the user specifies on the kubectl-debug command line
+You can add ```KCTLDBG_RESTRICT_IMAGE_TO``` to the config file to restrict the debug-agent to use a specific container image. For example putting the following in the config file will force the agent to always use the image ```docker.io/nicolaka/netshoot:latest``` regardless of what the user specifies on the kubectl-debug command line. This may be helpful for restrictive environments that mandate the use of ```KCTLDBG_RESTRICT_IMAGE_TO```
```
- env :
- - name: KCTLDBG_RESTRICT_IMAGE_TO
- value: docker.io/nicolaka/netshoot:latest
+KCTLDBG_RESTRICT_IMAGE_TO: docker.io/nicolaka/netshoot:latest
```
-If ```KCTLDBG_RESTRICT_IMAGE_TO``` is set and as a result agent is using an image that is different than what the user requested then the agent will log to standard out a message that announces what is happening. The message will include the URI's of both images.
-
-Auditing can be enabled by placing
-```audit: true```
-in the agent's config file.
+If ```KCTLDBG_RESTRICT_IMAGE_TO``` is set and as a result agent is using an image that is different than what the user requested then the agent will log to standard out a message that announces what is happening. The message will include the URI's of both images.
There are 3 settings related to auditing.
@@ -260,47 +430,44 @@ Where USERNAME is the kubernetes user as determined by the client that launched
- String array that will be placed before the command that will be run in the debug container. The default value is
{"/usr/bin/strace", "-o", "KCTLDBG-FIFO", "-f", "-e", "trace=/exec"}
. The agent will replace KCTLDBG-FIFO with the fifo path ( see above ) If auditing is enabled then agent will use the concatenation of the array specified by audit_shim
and the original command array it was going to use.
-The easiest way to enable auditing is to define a config map in the yaml you use to deploy the deamonset. You can do this by place
-```
-apiVersion : v1
-kind: ConfigMap
-metadata:
- name : kubectl-debug-agent-config
-data:
- agent-config.yml: |
- audit: true
----
-```
-at the top of the file, adding a ```configmap``` volume like so
-```
- - name: config
- configMap:
- name: kubectl-debug-agent-config
-```
-and a volume mount like so
+# (Optional) Create a Secret for Use with Private Docker Registries
+
+You can use a new or existing [Kubernetes `dockerconfigjson` secret](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#registry-secret-existing-credentials). For example:
+
+```bash
+# Be sure to run "docker login" beforehand.
+kubectl create secret generic kubectl-debug-registry-secret \
+ --from-file=.dockerconfigjson= \
+ --type=kubernetes.io/dockerconfigjson
```
- - name: config
- mountPath: "/etc/kubectl-debug/agent-config.yml"
- subPath: agent-config.yml
+
+Alternatively, you can create a secret with the key `authStr` and a JSON payload containing a `Username` and `Password`. For example:
+
+```bash
+echo -n '{"Username": "calmkart", "Password": "calmkart"}' > ./authStr
+kubectl create secret generic kubectl-debug-registry-secret --from-file=./authStr
```
-.
+Refer to [the official Kubernetes documentation on Secrets](https://kubernetes.io/docs/concepts/configuration/secret/) for more ways to create them.
-# Roadmap
-`kubectl-debug` is supposed to be just a troubleshooting helper, and is going be replaced by the native `kubectl debug` command when [this proposal](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/node/troubleshoot-running-pods.md) is implemented and merged in the future kubernetes release. But for now, there is still some works to do to improve `kubectl-debug`.
+# Roadmap
-- [ ] Security: currently, `kubectl-debug` do authorization in the client-side, which should be moved to the server-side (debug-agent)
-- [ ] More unit tests
-- [ ] More real world debugging example
-- [ ] e2e tests
+Jan '22 - plan to add support for k3s enviroments
+March '22 - actually add support for k3s enviroments and auto LXCFS detection handling
+
+`kubectl-debug` has been replaced by kubernetes [ephemeral containers](https://kubernetes.io/docs/concepts/workloads/pods/ephemeral-containers).
+ Ephemeral containers feature is in beta from kubernetes 1.23
+ Ephemeral containers feature is in alpha from kubernetes 1.16 to 1.22
+
+ In Kuberenetes, by default, you are required to explicitly enable alpha features (alpha features are not enabled by default). If you are using Azure AKS (and perhaps others) you are not able, nor permitted, to configure kubernetes feature flags and so you will need a solution like the one provided by this github project.
-If you are interested in any of the above features, please file an issue to avoid potential duplication.
# Contribute
-Feel free to open issues and pull requests. Any feedback is highly appreciated!
+Feel free to open issues and pull requests. Any feedback is much appreciated!
# Acknowledgement
-This project would not be here without the effort of [our contributors](https://github.com/aylei/kubectl-debug/graphs/contributors), thanks!
+This project is a fork of [this project](https://github.com/aylei/kubectl-debug) which is no longer maintained (hence this fork).
+This project would not be here without the effort of [aylei contributors](https://github.com/aylei/kubectl-debug/graphs/contributors) and [this fork](https://github.com/JamesTGrant/kubectl-debug/graphs/contributors)
diff --git a/cmd/agent/main.go b/cmd/debug-agent/main.go
similarity index 65%
rename from cmd/agent/main.go
rename to cmd/debug-agent/main.go
index 0955ac9..0956aa4 100644
--- a/cmd/agent/main.go
+++ b/cmd/debug-agent/main.go
@@ -2,9 +2,8 @@ package main
import (
"flag"
+ "github.com/jamestgrant/kubectl-debug/pkg/debug-agent"
"log"
-
- "github.com/aylei/kubectl-debug/pkg/agent"
)
func main() {
@@ -13,12 +12,12 @@ func main() {
flag.StringVar(&configFile, "config.file", "", "Config file location.")
flag.Parse()
- config, err := agent.LoadFile(configFile)
+ config, err := debugagent.LoadFile(configFile)
if err != nil {
log.Fatalf("error reading config %v", err)
}
- server, err := agent.NewServer(config)
+ server, err := debugagent.NewServer(config)
if err != nil {
log.Fatal(err)
}
@@ -27,5 +26,5 @@ func main() {
log.Fatal(err)
}
- log.Println("sever stopped, see you next time!")
+ log.Println("server stopped, see you next time!")
}
diff --git a/cmd/kubectl-debug/main.go b/cmd/kubectl-debug/main.go
new file mode 100644
index 0000000..aba2051
--- /dev/null
+++ b/cmd/kubectl-debug/main.go
@@ -0,0 +1,19 @@
+package main
+
+import (
+ "github.com/jamestgrant/kubectl-debug/pkg/kubectl-debug"
+ "github.com/spf13/pflag"
+ "k8s.io/cli-runtime/pkg/genericclioptions"
+ "os"
+)
+
+func main() {
+ flags := pflag.NewFlagSet("kubectldebug", pflag.ExitOnError)
+ pflag.CommandLine = flags
+
+ // bypass to DebugCmd
+ cmd := kubectldebug.NewDebugCmd(genericclioptions.IOStreams{In: os.Stdin, Out: os.Stdout, ErrOut: os.Stderr})
+ if err := cmd.Execute(); err != nil {
+ os.Exit(1)
+ }
+}
diff --git a/cmd/plugin/main.go b/cmd/plugin/main.go
deleted file mode 100644
index 7582625..0000000
--- a/cmd/plugin/main.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package main
-
-import (
- "github.com/aylei/kubectl-debug/pkg/plugin"
- "github.com/spf13/pflag"
- "k8s.io/cli-runtime/pkg/genericclioptions"
- _ "k8s.io/client-go/plugin/pkg/client/auth"
- "os"
-)
-
-func main() {
- flags := pflag.NewFlagSet("kubectl-debug", pflag.ExitOnError)
- pflag.CommandLine = flags
-
- // bypass to DebugCmd
- cmd := plugin.NewDebugCmd(genericclioptions.IOStreams{In: os.Stdin, Out: os.Stdout, ErrOut: os.Stderr})
- if err := cmd.Execute(); err != nil {
- os.Exit(1)
- }
-}
diff --git a/contrib/helm/kubectl-debug/.gitignore b/contrib/helm/kubectl-debug/.gitignore
deleted file mode 100644
index 4519e33..0000000
--- a/contrib/helm/kubectl-debug/.gitignore
+++ /dev/null
@@ -1,5 +0,0 @@
-charts/
-kubeconfig.yaml
-kubeconfig.yml
-cdsctl
-.cds
\ No newline at end of file
diff --git a/contrib/helm/kubectl-debug/.helmignore b/contrib/helm/kubectl-debug/.helmignore
deleted file mode 100644
index b2767ae..0000000
--- a/contrib/helm/kubectl-debug/.helmignore
+++ /dev/null
@@ -1,3 +0,0 @@
-.git
-# OWNERS file for Kubernetes
-OWNERS
diff --git a/contrib/helm/kubectl-debug/Chart.yaml b/contrib/helm/kubectl-debug/Chart.yaml
deleted file mode 100644
index ea93c5b..0000000
--- a/contrib/helm/kubectl-debug/Chart.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-name: kubectl-debug
-version: v0.1.1
-appVersion: v0.1.1
-description: Out-of-tree solution for troubleshooting running pods, which allows you to run a new container in running pods for debugging purpose
-keywords:
-- troubleshooting
-- debuging
-- devops
-home: https://github.com/aylei/kubectl-debug
-# No known icon yet
-# icon: https://
-sources:
-- https://github.com/aylei/kubectl-debug
-maintainers:
-- name: Joel Seguillon
- email: joel.seguillon@gmail.com
-- name: Yeh-lei Wu
- email: rayingecho@gmail.com
-engine: gotpl
diff --git a/contrib/helm/kubectl-debug/OWNERS b/contrib/helm/kubectl-debug/OWNERS
deleted file mode 100644
index e9da2a0..0000000
--- a/contrib/helm/kubectl-debug/OWNERS
+++ /dev/null
@@ -1,7 +0,0 @@
-approvers:
-- aylei
-- jseguillon
-reviewers:
-- aylei
-- jseguillon
-
diff --git a/contrib/helm/kubectl-debug/README.md b/contrib/helm/kubectl-debug/README.md
deleted file mode 100644
index b26727d..0000000
--- a/contrib/helm/kubectl-debug/README.md
+++ /dev/null
@@ -1,58 +0,0 @@
-# Kubectl-debug
-
-[Kubectl-debug](https://github.com/aylei/kubectl-debug) is an out-of-tree solution for troubleshooting running pods, which allows you to run a new container in running pods for debugging purpose
-
-Documentation is available at https://github.com/aylei/kubectl-debug
-
-## TL;DR;
-
-```console
-cd contrib/helm/kubectl-debug
-helm install .
-```
-
-## Introduction
-
-This chart bootstraps a [Kubectl-debug](https://github.com/aylei/kubectl-debug) deployment of agent on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
-
-## Prerequisites
-
-- Kubernetes 1.4+ with Beta APIs enabled
-
-## Installing the Chart
-
-To install the chart with the release name `my-release`:
-
-```console
-# Inside of kubectl-debug/contrib/helm/kubectl-debug
-helm install --name my-release .
-```
-
-> **Tip**: List all releases using `helm list`
-
-## Uninstalling the Chart
-
-To uninstall/delete the `my-release` deployment:
-
-```console
-$ helm delete my-release --purge
-```
-
-The command removes all the Kubernetes components associated with the chart and deletes the release.
-
-## Configuration
-
-Please refer to default values.yaml and source code
-Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`.
-
-Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
-
-```console
-$ helm install --name my-release -f values.yaml .
-```
-
-> **Tip**: You can use the default [values.yaml](values.yaml)
-
-## Image
-
-The `image` parameter allows specifying which image will be pulled for the chart.
diff --git a/contrib/helm/kubectl-debug/templates/NOTES.txt b/contrib/helm/kubectl-debug/templates/NOTES.txt
deleted file mode 100644
index 7f55dc8..0000000
--- a/contrib/helm/kubectl-debug/templates/NOTES.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-
-********************************************************************************
-*** PLEASE BE PATIENT: debug-agent may take a few minutes to install ***
-********************************************************************************
-
-Once the agent is running on each nodes, you can install the client :
-```
-# Linux
-curl -Lo kubectl-debug https://github.com/aylei/kubectl-debug/releases/download/0.0.1/kubectl-debug_0.0.1_linux-amd64
-
-# MacOS
-curl -Lo kubectl-debug https://github.com/aylei/kubectl-debug/releases/download/0.0.1/kubectl-debug_0.0.1_macos-amd64
-
-chmod +x ./kubectl-debug
-mv kubectl-debug /usr/local/bin/
-```
\ No newline at end of file
diff --git a/contrib/helm/kubectl-debug/templates/_helpers.tpl b/contrib/helm/kubectl-debug/templates/_helpers.tpl
deleted file mode 100644
index ea11f4b..0000000
--- a/contrib/helm/kubectl-debug/templates/_helpers.tpl
+++ /dev/null
@@ -1,16 +0,0 @@
-{{/* vim: set filetype=mustache: */}}
-{{/*
-Expand the name of the chart.
-*/}}
-{{- define "kubectl-debug.name" -}}
-{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
-
-{{/*
-Create a default fully qualified app name.
-We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
-*/}}
-{{- define "kubectl-debug.fullname" -}}
-{{- $name := default .Chart.Name .Values.nameOverride -}}
-{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
diff --git a/contrib/helm/kubectl-debug/templates/agent-ds.yaml b/contrib/helm/kubectl-debug/templates/agent-ds.yaml
deleted file mode 100644
index 8b16344..0000000
--- a/contrib/helm/kubectl-debug/templates/agent-ds.yaml
+++ /dev/null
@@ -1,72 +0,0 @@
-{{- if ge .Capabilities.KubeVersion.Minor "16" }}
-apiVersion: apps/v1
-{{- else }}
-apiVersion: extensions/v1beta1
-{{- end }}
-kind: DaemonSet
-metadata:
- name: {{ template "kubectl-debug.fullname" . }}-agent
- labels:
- app: {{ template "kubectl-debug.name" . }}-agent
- chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
- release: "{{ .Release.Name }}"
- heritage: "{{ .Release.Service }}"
-spec:
- selector:
- matchLabels:
- app: {{ template "kubectl-debug.name" . }}-agent
- revisionHistoryLimit: {{ .Values.revisionHistoryLimit }}
- template:
- metadata:
- labels:
- app: {{ template "kubectl-debug.name" . }}-agent
- spec:
- {{- if .Values.image.pullSecrets }}
- imagePullSecrets:
- {{- range .Values.image.pullSecrets }}
- - name: {{ . }}
- {{- end}}
- {{- end }}
- containers:
- - image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}"
- imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
- livenessProbe:
- httpGet:
- path: /healthz
- port: {{ .Values.livenessProbe.port }}
- scheme: HTTP
- initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
- periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
- timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
- successThreshold: {{ .Values.livenessProbe.successThreshold }}
- failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
- name: {{ template "kubectl-debug.fullname" . }}-agent
- ports:
- - containerPort: 10027
- hostPort: 10027
- name: http
- protocol: TCP
- volumeMounts:
- - name: docker
- mountPath: "/var/run/docker.sock"
- hostNetwork: true
- volumes:
- - name: docker
- hostPath:
- path: /var/run/docker.sock
- {{- if .Values.nodeSelector }}
- nodeSelector:
-{{ toYaml .Values.nodeSelector | indent 8 }}
- {{- end }}
- {{- if .Values.tolerations }}
- tolerations:
-{{ toYaml .Values.tolerations | indent 8 }}
- {{- end }}
- {{- if .Values.affinity }}
- affinity:
-{{ toYaml .Values.affinity | indent 8 }}
- {{- end }}
- updateStrategy:
- rollingUpdate:
- maxUnavailable: 5
- type: RollingUpdate
diff --git a/contrib/helm/kubectl-debug/values.yaml b/contrib/helm/kubectl-debug/values.yaml
deleted file mode 100644
index 649d64d..0000000
--- a/contrib/helm/kubectl-debug/values.yaml
+++ /dev/null
@@ -1,43 +0,0 @@
-image:
- registry: docker.io
- repository: aylei/debug-agent
- tag: v0.1.1
- ## Specify a imagePullPolicy
- ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
- ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
- ##
- pullPolicy: IfNotPresent
- ## Optionally specify an array of imagePullSecrets.
- ## Secrets must be manually created in the namespace.
- ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
- ##
- # pullSecrets:
- # - myRegistrKeySecretName
-
-# The update strategy to apply to the Deployment or DaemonSet
-##
-revisionHistoryLimit: 10
-
-## Node tolerations for server scheduling to nodes with taints
-## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
-##
-tolerations: []
-# - key: "key"
-# operator: "Equal|Exists"
-# value: "value"
-# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
-
-affinity: {}
-
-## Node labels for controller pod assignment
-## Ref: https://kubernetes.io/docs/user-guide/node-selection/
-##
-nodeSelector: {}
-
-livenessProbe:
- failureThreshold: 3
- port: 10027
- initialDelaySeconds: 10
- periodSeconds: 10
- successThreshold: 1
- timeoutSeconds: 1
\ No newline at end of file
diff --git a/docs/design.md b/docs/design.md
deleted file mode 100644
index deff6ae..0000000
--- a/docs/design.md
+++ /dev/null
@@ -1,18 +0,0 @@
-# Under the hood
-
-`kubectl-debug` consists of 2 components:
-
-* the kubectl plugin: a cli client of `node agent`, serves `kubectl debug` command,
-* the node agent: responsible for manipulating the "debug container"; node agent will also act as a websockets relay for remote tty
-
-When user run `kubectl debug target-pod -c /bin/bash`:
-
-1. The plugin gets the pod info from apiserver and extract the `hostIP`, if the target container does not exist or is not currently running, an error is raised.
-2. The plugin sends an HTTP request to the specific node agent running on the `hostIP`, which includes a protocol upgrade from HTTP to SPDY.
-3. The agent runs a container in the pod's namespaces (ipc, pid, network, etc) with the STDIN stay open (`-i` flag).
-4. The agent checks if the target container is actively running, if not, write an error to client.
-5. The agent runs a `debug container` with `tty` and `stdin` opened, the `debug container` will join the `pid`, `network`, `ipc` and `user` namespace of the target container.
-6. The agent pipes the connection into the `debug container` using `attach`
-7. **Debug in the debug container**.
-8. Job is done, user closes the SPDY connection.
-9. The node agent closes the SPDY connection, then waits for the `debug container` to exit and do the cleanup.
\ No newline at end of file
diff --git a/docs/design/centralized-auth-and-proxy.md b/docs/design/centralized-auth-and-proxy.md
deleted file mode 100644
index e350ae6..0000000
--- a/docs/design/centralized-auth-and-proxy.md
+++ /dev/null
@@ -1,31 +0,0 @@
-# Centralized auth and proxy
-
-## Problems
-
-`kubectl-debug` is relying on the `debug-agent` DaemonSet to establish debug connection, which has the following problems:
-
-* `debug-agent` runs in host network, but the node is not always accessible publicly, especially in the public cloud, e.g. GKE.
-* `debug-agent` do not authz & authn request currently, which limits the usage of `kubectl debug` in serious environment.
-* `debug-agent` is a rare operation, but `debug agent` is always consuming a few resources. We may consider agentless mode as an option.
-
-## Proposal Design
-
-The general idea is: instead of talking to `debug-agent` directly, `kubectl-debug` should talk to `apiserver` only.
-
-To achieve this goal, we introduce an `ExtendAPIServer` to handle the debug request, our `ExtendAPIServer`(`server` in short) should do the following things:
-
-* Transform the authz & authn request and delegate it to `APIServer`. In detail, `server` transform the `debug` request of a pod to a exec request of that pod, namely we inherit `debug` privilege from `exec`.
-* Send request to debug launching pod.
- * debug launching pod is either a pod in pre-installed DaemonSet or a pod created on demand.
- * launching pod is responsible to create the debug container and proxy back the terminal.
-* Proxy the terminal connection.
-* Coordinate cleanups.
-
-If we configure the `server` to create debug launching pod in target host on demand, then this is agent-less mode. Agent-less mode will leads to longer debug preparation time obviously.
-
-There's still a security issue about the debug launching pod, because it has the privilege to operate the local containers directly, which is really a super user power.
-
-Thus, we design the debug launching pod using an single purpose image: only the binary is installed like `distroless`.
-
-
-
diff --git a/docs/examples.md b/docs/examples.md
deleted file mode 100644
index d0365c4..0000000
--- a/docs/examples.md
+++ /dev/null
@@ -1,172 +0,0 @@
-# Debugging examples
-
-This guide will walk-through the typical debugging workflow of `kubectl-debug`.
-
-> **Note:** The rest of this document assumes you have installed and properly configured `kubectl-debug` according to the [Project REAMDE](/README.md).
-
-If you have any real world examples to share with `kubectl-debug`, feel free to open a pull request.
-
-Here's the config file for the following commands for you to re-produce all the command outputs:
-
-```yaml
-agent_port: 10027
-portForward: true
-agentless: true
-command:
-- '/bin/bash'
-- '-l'
-```
-
-## Basic
-
-`kubectl-debug` use [`nicolaka/netshoot`](https://github.com/nicolaka/netshoot) as the default debug image, the [project document](https://github.com/nicolaka/netshoot/blob/master/README.md) is a great guide about using various tools to troubleshoot your container network.
-
-We will take a few examples here to show how does the powerful `netshoot` work in the `kubectl-debug` context:
-
-Connect to pod:
-
-```shell
-➜ ~ kubectl debug demo-pod
-
-Agent Pod info: [Name:debug-agent-pod-da46a000-8429-11e9-a40c-8c8590147766, Namespace:default, Image:aylei/debug-agent:latest, HostPort:10027, ContainerPort:10027]
-Waiting for pod debug-agent-pod-da46a000-8429-11e9-a40c-8c8590147766 to run...
-pod demo-pod PodIP 10.233.111.78, agentPodIP 172.16.4.160
-wait for forward port to debug agent ready...
-Forwarding from 127.0.0.1:10027 -> 10027
-Forwarding from [::1]:10027 -> 10027
-Handling connection for 10027
- pulling image nicolaka/netshoot:latest...
-latest: Pulling from nicolaka/netshoot
-Digest: sha256:5b1f5d66c4fa48a931ff54f2f34e5771eff2bc5e615fef441d5858e30e9bb921
-Status: Image is up to date for nicolaka/netshoot:latest
-starting debug container...
-container created, open tty...
-
- [1] 🐳 → hostname
-demo-pod
-```
-
-Using **iftop** to inspect network traffic:
-```shell
-root @ /
- [2] 🐳 → iftop -i eth0
-interface: eth0
-IP address is: 10.233.111.78
-MAC address is: 86:c3:ae:9d:46:2b
-(CLI graph omitted)
-```
-
-Using **drill** to diagnose DNS:
-```shell
-root @ /
- [3] 🐳 → drill -V 5 demo-service
-;; ->>HEADER<<- opcode: QUERY, rcode: NOERROR, id: 0
-;; flags: rd ; QUERY: 1, ANSWER: 0, AUTHORITY: 0, ADDITIONAL: 0
-;; QUESTION SECTION:
-;; demo-service. IN A
-
-;; ANSWER SECTION:
-
-;; AUTHORITY SECTION:
-
-;; ADDITIONAL SECTION:
-
-;; Query time: 0 msec
-;; WHEN: Sat Jun 1 05:05:39 2019
-;; MSG SIZE rcvd: 0
-;; ->>HEADER<<- opcode: QUERY, rcode: NXDOMAIN, id: 62711
-;; flags: qr rd ra ; QUERY: 1, ANSWER: 0, AUTHORITY: 1, ADDITIONAL: 0
-;; QUESTION SECTION:
-;; demo-service. IN A
-
-;; ANSWER SECTION:
-
-;; AUTHORITY SECTION:
-. 30 IN SOA a.root-servers.net. nstld.verisign-grs.com. 2019053101 1800 900 604800 86400
-
-;; ADDITIONAL SECTION:
-
-;; Query time: 58 msec
-;; SERVER: 10.233.0.10
-;; WHEN: Sat Jun 1 05:05:39 2019
-;; MSG SIZE rcvd: 121
-```
-
-### `proc` filesystem and FUSE
-
-It is common to use tools like `top`, `free` to inspect system metrics like CPU usage and memory. Unfortunately, these commands will display the metrics from the host system by default. Because they read the metrics from the `proc` filesystem (`/proc/*`), which is mounted from the host system.
-
-While this is acceptable (you can still inspect the metrics of container process in the host metrics), this can be misleading and
-counter-intuitive. A common solution is using a [FUSE](https://en.wikipedia.org/wiki/Filesystem_in_Userspace) filesystem, which is out of the scope of `kubectl-debug` plugin.
-
-You may find [this blog post](https://fabiokung.com/2014/03/13/memory-inside-linux-containers/) useful if you want to investigate this problem in depth.
-
-## Access the root filesystem of target container
-
-The root filesystem of target container is located in `/proc/{pid}/root/`, and the `pid` is 1 typically (Pod with [`sharingProcessNamespace`](https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/) enabled is an exception).
-
-```shell
-root @ /
- [4] 🐳 → tail /proc/1/root/log_
-Hello, world!
-```
-
-## Debug Pod in "CrashLoopBackoff"
-
-Troubleshooting `CrashLoopBackoff` of Kubernetes Pod can be tricky. The debug container process will be reaped once the target container (process with pid 1) exists. To tackle with this, `kubectl-debug` provides the `--fork` flag, which borrow the idea from the `oc debug` command: copy the currently Pod and re-produce the issue in the forked Pod.
-
-Under the hood, `kubectl debug --fork` will copy the entire Pod spec and:
-
-* strip all the labels, so that no traffic will be routed from service to this pod;
-* modify the entry-point of target container in order to hold the pid namespace and avoid the Pod crash again;
-
-Here's an example:
-
-```shell
-➜ ~ kubectl debug demo-pod --fork
-Agent Pod info: [Name:debug-agent-pod-dea9e7c8-8439-11e9-883a-8c8590147766, Namespace:default, Image:aylei/debug-agent:latest, HostPort:10027, ContainerPort:10027]
-Waiting for pod debug-agent-pod-dea9e7c8-8439-11e9-883a-8c8590147766 to run...
-Waiting for pod demo-pod-e23c1b68-8439-11e9-883a-8c8590147766-debug to run...
-pod demo-pod PodIP 10.233.111.90, agentPodIP 172.16.4.160
-wait for forward port to debug agent ready...
-Forwarding from 127.0.0.1:10027 -> 10027
-Forwarding from [::1]:10027 -> 10027
-Handling connection for 10027
- pulling image nicolaka/netshoot:latest...
-latest: Pulling from nicolaka/netshoot
-Digest: sha256:5b1f5d66c4fa48a931ff54f2f34e5771eff2bc5e615fef441d5858e30e9bb921
-Status: Image is up to date for nicolaka/netshoot:latest
-starting debug container...
-container created, open tty...
-
- [1] 🐳 → ps -ef
-PID USER TIME COMMAND
- 1 root 0:00 sh -c -- while true; do sleep 30; done;
- 6 root 0:00 sleep 30
- 7 root 0:00 /bin/bash -l
- 15 root 0:00 ps -ef
-```
-
-You can `chroot` to the root filesystem of target container to re-produce the error that causes the Pod to crash:
-
-```shell
-root @ /
- [4] 🐳 → chroot /proc/1/root
-
-root @ /
- [#] 🐳 → ls
- bin entrypoint.sh home lib64 mnt root sbin sys tmp var
- dev etc lib media proc run srv usr
-
-root @ /
- [#] 🐳 → ./entrypoint.sh
- (...errors)
-```
-
-## Debug init container
-
-Just like debugging the ordinary container, we can debug the init-container of Pod. In this case, you must specify the container name of init-container:
-
-```shell
-➜ ~ kubectl debug demo-pod --container=init-pod
-```
diff --git a/docs/kube-debug.gif b/docs/kube-debug.gif
deleted file mode 100644
index 8e31825..0000000
Binary files a/docs/kube-debug.gif and /dev/null differ
diff --git a/docs/zh-cn.md b/docs/zh-cn.md
deleted file mode 100644
index 8c38ffe..0000000
--- a/docs/zh-cn.md
+++ /dev/null
@@ -1,210 +0,0 @@
-# Kubectl debug
-
-
-[](https://travis-ci.org/aylei/kubectl-debug)
-[](https://goreportcard.com/report/github.com/aylei/kubectl-debug)
-[](https://hub.docker.com/r/aylei/debug-agent)
-
-[English](/README.md)
-
-# Overview
-
-`kubectl-debug` 是一个简单的 kubectl 插件, 能够帮助你便捷地进行 Kubernetes 上的 Pod 排障诊断. 背后做的事情很简单: 在运行中的 Pod 上额外起一个新容器, 并将新容器加入到目标容器的 `pid`, `network`, `user` 以及 `ipc` namespace 中, 这时我们就可以在新容器中直接用 `netstat`, `tcpdump` 这些熟悉的工具来解决问题了, 而旧容器可以保持最小化, 不需要预装任何额外的排障工具.
-
-更详细的介绍信息,可以参考这篇[博客文章](https://aleiwu.com/post/kubectl-debug-intro/)
-
-- [截图](#截图)
-- [快速开始](#快速开始)
-- [构建项目](#构建项目)
-- [port-forward 和 agentless 模式](#port-forward-模式和-agentless-模式)
-- [配置](#配置)
-- [权限](#权限)
-- [路线图](#路线图)
-- [贡献代码](#贡献代码)
-
-# 截图
-
-
-
-# 快速开始
-
-## 安装 kubectl debug 插件
-
-安装 kubectl 插件:
-
-使用 Homebrew:
-```shell
-brew install aylei/tap/kubectl-debug
-```
-
-直接下载预编译的压缩包:
-```bash
-export PLUGIN_VERSION=0.1.1
-# linux x86_64
-curl -Lo kubectl-debug.tar.gz https://github.com/aylei/kubectl-debug/releases/download/v${PLUGIN_VERSION}/kubectl-debug_${PLUGIN_VERSION}_linux_amd64.tar.gz
-# macos
-curl -Lo kubectl-debug.tar.gz https://github.com/aylei/kubectl-debug/releases/download/v${PLUGIN_VERSION}/kubectl-debug_${PLUGIN_VERSION}_darwin_amd64.tar.gz
-
-tar -zxvf kubectl-debug.tar.gz kubectl-debug
-sudo mv kubectl-debug /usr/local/bin/
-```
-
-Windows 用户可以从 [release page](https://github.com/aylei/kubectl-debug/releases/tag/v0.1.1) 进行下载并添加到 PATH 中
-
-## (可选) 安装 debug-agent DaemonSet
-
-`kubectl-debug` 包含两部分, 一部分是用户侧的 kubectl 插件, 另一部分是部署在所有 k8s 节点上的 agent(用于启动"新容器", 同时也作为 SPDY 连接的中继). 在 `agentless` 中, `kubectl-debug` 会在 debug 开始时创建 debug-agent Pod, 并在结束后自动清理.(默认开启agentless模式)
-
-`agentless` 虽然方便, 但会让 debug 的启动速度显著下降, 你可以通过预先安装 debug-agent 的 DaemonSet 并配合 --agentless=false 参数来使用 agent 模式, 加快启动速度:
-
-```bash
-# 如果你的kubernetes版本为v1.16或更高
-kubectl apply -f https://raw.githubusercontent.com/aylei/kubectl-debug/master/scripts/agent_daemonset.yml
-# 如果你使用的是旧版本的kubernetes(,,
-
-# 为了使 没有公网 IP 或无法直接访问(防火墙等原因)的 NODE 能够访问, 默认开启 port-forward 模式
-# 如果不需要开启port-forward模式, 可以使用 --port-forward=false 来关闭
-kubectl debug POD_NAME --port-forward=false --agentless=false --daemonset-ns=kube-system --daemonset-name=debug-agent
-
-# 老版本的 kubectl 无法自动发现插件, 需要直接调用 binary
-kubectl-debug POD_NAME
-
-# 使用私有仓库镜像,并设置私有仓库使用的kubernetes secret
-# secret data原文请设置为 {Username: , Password: }
-# 默认secret_name为kubectl-debug-registry-secret,默认namspace为default
-kubectl-debug POD_NAME --image calmkart/netshoot:latest --registry-secret-name --registry-secret-namespace
-
-# 在默认的agentless模式中,你可以设置agent pod的resource资源限制,如下示例
-# 若不设置,默认为空
-kubectl-debug POD_NAME --agent-pod-cpu-requests=250m --agent-pod-cpu-limits=500m --agent-pod-memory-requests=200Mi --agent-pod-memory-limits=500Mi
-```
-
-举例:
-```bash
-# 怎样创建一个私有仓库镜像secret
-# 以用户名'calmkart' 密码'calmkart'为例
-# 更多创建方式请参考kubernetes官方文档
-# https://kubernetes.io/docs/concepts/configuration/secret/
-echo -n '{Username: calmkart, Password: calmkart}' > ./registrySecret.txt
-kubectl create secret generic kubectl-debug-registry-secret --from-file=./registrySecret.txt
-```
-
-# 构建项目
-
-克隆仓库, 然后执行:
-```bash
-# make will build plugin binary and debug-agent image
-make
-# install plugin
-mv kubectl-debug /usr/local/bin
-
-# build plugin only
-make plugin
-# build agent only
-make agent-docker
-```
-
-# port-forward 模式和 agentless 模式(默认开启)
-
-- `port-foward`模式:默认情况下,`kubectl-debug`会直接与目标宿主机建立连接。当`kubectl-debug`无法与目标宿主机直连时,可以开启`port-forward`模式。`port-forward`模式下,本机会监听localhost:agentPort,并将数据转发至目标Pod的agentPort端口。
-
-- `agentless`模式: 默认情况下,`debug-agent`需要预先部署在集群每个节点上,会一直消耗集群资源,然而调试 Pod 是低频操作。为避免集群资源损失,在[#31](https://github.com/aylei/kubectl-debug/pull/31)增加了`agentless`模式。`agentless`模式下,`kubectl-debug`会先在目标Pod所在宿主机上启动`debug-agent`,然后再启动调试容器。用户调试结束后,`kubectl-debug`会依次删除调试容器和在目的主机启动的`debug-agent`。
-
-
-# 配置
-
-`kubectl-debug` 使用 [nicolaka/netshoot](https://github.com/nicolaka/netshoot) 作为默认镜像. 默认镜像和指令都可以通过命令行参数进行覆盖. 考虑到每次都指定有点麻烦, 也可以通过文件配置的形式进行覆盖, 编辑 `~/.kube/debug-config` 文件:
-
-```yaml
-# debug-agent 映射到宿主机的端口
-# 默认 10027
-agentPort: 10027
-
-# 是否开启ageless模式
-# 默认 true
-agentless: true
-# agentPod 的 namespace, agentless模式可用
-# 默认 default
-agentPodNamespace: default
-# agentPod 的名称前缀,后缀是目的主机名, agentless模式可用
-# 默认 debug-agent-pod
-agentPodNamePrefix: debug-agent-pod
-# agentPod 的镜像, agentless模式可用
-# 默认 aylei/debug-agent:latest
-agentImage: aylei/debug-agent:latest
-
-# debug-agent DaemonSet 的名字, port-forward 模式时会用到
-# 默认 'debug-agent'
-debugAgentDaemonset: debug-agent
-# debug-agent DaemonSet 的 namespace, port-forward 模式会用到
-# 默认 'default'
-debugAgentNamespace: kube-system
-# 是否开启 port-forward 模式
-# 默认 true
-portForward: true
-# image of the debug container
-# default as showed
-image: nicolaka/netshoot:latest
-# start command of the debug container
-# default ['bash']
-command:
-- '/bin/bash'
-- '-l'
-# private docker registry auth kuberntes secret, default is kubectl-debug-registry-secret
-# 使用私有仓库镜像,并设置私有仓库使用的kubernetes secret
-# secret data原文请设置为 {Username: , Password: }
-# 默认RegistrySecretName为kubectl-debug-registry-secret,默认RegistrySecretNamespace为default
-RegistrySecretName: my-debug-secret
-RegistrySecretNamespace: debug
-# 在默认的agentless模式下可以设置agent pod的resource资源限制
-# 若不设置,默认为空
-agentCpuRequests: ""
-agentCpuLimits: ""
-agentMemoryRequests: ""
-agentMemoryLimits: ""
-# 当使用fork mode时,如果需要复制出来的pod保留原pod的labels,可以设置需要保留的labels列表
-# 格式为[]string
-# 默认为空(既不保留任何原POD的labels,新fork出pod的labels)
-forkPodRetainLabels: []
-```
-
-> `kubectl-debug` 会将容器的 entrypoint 直接覆盖掉, 这是为了避免在 debug 时不小心启动非 shell 进程.
-
-# 权限
-
-目前, `kubectl-debug` 复用了 `pod/exec` 资源的权限来做鉴权. 也就是说, `kubectl-debug` 的权限要求是和 `kubectl exec` 一致的.
-
-# 路线图
-
-- [ ] 安全: 目前, `kubectl-debug` 是在客户端做鉴权的, 这部分应当被移动到服务端(debug-agent) 中
-- [ ] 更多的单元测试
-- [ ] 更多的故障诊断实例
-- [ ] e2e 测试
-
-# 贡献代码
-
-欢迎贡献代码或 issue!
diff --git a/go.mod b/go.mod
index 902aff2..719117b 100644
--- a/go.mod
+++ b/go.mod
@@ -1,4 +1,4 @@
-module github.com/aylei/kubectl-debug
+module github.com/jamestgrant/kubectl-debug
go 1.12
@@ -64,6 +64,7 @@ require (
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910
github.com/prometheus/common v0.0.0-20181218105931-67670fe90761
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a
+ github.com/rs/xid v1.3.0
github.com/russross/blackfriday v0.0.0-20151117072312-300106c228d5
github.com/shurcooL/sanitized_anchor_name v1.0.0
github.com/sirupsen/logrus v1.4.2
diff --git a/go.sum b/go.sum
index d3fb9bf..2434b12 100644
--- a/go.sum
+++ b/go.sum
@@ -202,6 +202,8 @@ github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE=
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/rs/xid v1.3.0 h1:6NjYksEUlhurdVehpc7S7dk6DAmcKv8V9gG0FsVN2U4=
+github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/russross/blackfriday v0.0.0-20151117072312-300106c228d5 h1:+6eORf9Bt4C3Wjt91epyu6wvLW+P6+AEODb6uKgO+4g=
github.com/russross/blackfriday v0.0.0-20151117072312-300106c228d5/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
diff --git a/pkg/agent/config.go b/pkg/debug-agent/config.go
similarity index 68%
rename from pkg/agent/config.go
rename to pkg/debug-agent/config.go
index 06e73a8..2952da7 100644
--- a/pkg/agent/config.go
+++ b/pkg/debug-agent/config.go
@@ -1,11 +1,10 @@
-package agent
+package debugagent
import (
"fmt"
+ "gopkg.in/yaml.v2"
"io/ioutil"
"time"
-
- "gopkg.in/yaml.v2"
)
var (
@@ -15,11 +14,9 @@ var (
RuntimeTimeout: 30 * time.Second,
StreamIdleTimeout: 10 * time.Minute,
StreamCreationTimeout: 15 * time.Second,
-
- ListenAddress: "0.0.0.0:10027",
-
- AuditFifo: "/var/data/kubectl-debug-audit-fifo/KCTLDBG-CONTAINER-ID",
- AuditShim: []string{"/usr/bin/strace", "-o", "KCTLDBG-FIFO", "-f", "-e", "trace=/exec"},
+ ListenAddress: "0.0.0.0:10027",
+ AuditFifo: "/var/data/kubectl-debug-audit-fifo/KCTLDBG-CONTAINER-ID",
+ AuditShim: []string{"/usr/bin/strace", "-o", "KCTLDBG-FIFO", "-f", "-e", "trace=/exec"},
}
)
@@ -29,13 +26,11 @@ type Config struct {
RuntimeTimeout time.Duration `yaml:"runtime_timeout,omitempty"`
StreamIdleTimeout time.Duration `yaml:"stream_idle_timeout,omitempty"`
StreamCreationTimeout time.Duration `yaml:"stream_creation_timeout,omitempty"`
-
- ListenAddress string `yaml:"listen_address,omitempty"`
- Verbosity int `yaml:"verbosity,omitempty"`
-
- Audit bool `yaml:"audit,omitempty"`
- AuditFifo string `yaml:"audit_fifo,omitempty"`
- AuditShim []string `yaml:"audit_shim,omitempty"`
+ ListenAddress string `yaml:"listen_address,omitempty"`
+ Verbosity int `yaml:"verbosity,omitempty"`
+ Audit bool `yaml:"audit,omitempty"`
+ AuditFifo string `yaml:"audit_fifo,omitempty"`
+ AuditShim []string `yaml:"audit_shim,omitempty"`
}
func Load(s string) (*Config, error) {
@@ -55,7 +50,7 @@ func Load(s string) (*Config, error) {
func LoadFile(filename string) (*Config, error) {
if len(filename) < 1 {
- fmt.Println("No config file provided. Using all default values.")
+ fmt.Println("No config file provided. Using default values.\r\n")
return &DefaultConfig, nil
}
fmt.Printf("Reading config file %v.\r\n", filename)
diff --git a/pkg/agent/lxcfs.go b/pkg/debug-agent/lxcfs.go
similarity index 98%
rename from pkg/agent/lxcfs.go
rename to pkg/debug-agent/lxcfs.go
index 6ebdf17..c2381f8 100644
--- a/pkg/agent/lxcfs.go
+++ b/pkg/debug-agent/lxcfs.go
@@ -1,4 +1,4 @@
-package agent
+package debugagent
import (
"bufio"
diff --git a/pkg/agent/resize.go b/pkg/debug-agent/resize.go
similarity index 98%
rename from pkg/agent/resize.go
rename to pkg/debug-agent/resize.go
index f70e6cf..50751d3 100644
--- a/pkg/agent/resize.go
+++ b/pkg/debug-agent/resize.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package agent
+package debugagent
import (
"k8s.io/apimachinery/pkg/util/runtime"
diff --git a/pkg/agent/runtime.go b/pkg/debug-agent/runtime.go
similarity index 98%
rename from pkg/agent/runtime.go
rename to pkg/debug-agent/runtime.go
index 9f84df6..c9f49e3 100644
--- a/pkg/agent/runtime.go
+++ b/pkg/debug-agent/runtime.go
@@ -1,4 +1,4 @@
-package agent
+package debugagent
import (
"bufio"
@@ -22,8 +22,6 @@ import (
"text/tabwriter"
"time"
- "github.com/aylei/kubectl-debug/pkg/nsenter"
- term "github.com/aylei/kubectl-debug/pkg/util"
containerd "github.com/containerd/containerd"
"github.com/containerd/containerd/cio"
"github.com/containerd/containerd/content"
@@ -41,6 +39,8 @@ import (
dockerclient "github.com/docker/docker/client"
"github.com/docker/docker/pkg/stdcopy"
"github.com/google/uuid"
+ "github.com/jamestgrant/kubectl-debug/pkg/nsenter"
+ term "github.com/jamestgrant/kubectl-debug/pkg/util"
"github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/opencontainers/runtime-spec/specs-go"
@@ -980,7 +980,7 @@ func (a *DebugAttacher) AttachContainer(name string, uid kubetype.UID, container
func (m *DebugAttacher) DebugContainer(cfg RunConfig) error {
if m.verbosity > 0 {
- log.Printf("Accept new debug request:\n\t target container: %s \n\t image: %s \n\t command: %v \n", m.idOfContainerToDebug, m.image, m.command)
+ log.Printf("Accept new debug request:\n\t target container: %s \n\t image: %s \n\t command: %v \n\r", m.idOfContainerToDebug, m.image, m.command)
}
// the following steps may takes much time,
@@ -1008,9 +1008,9 @@ func (m *DebugAttacher) DebugContainer(cfg RunConfig) error {
// }
// }
//} ()
- // step 0: set container procfs correct by lxcfs
+ // step 0: set container procfs to lxcfs
if cfg.verbosity > 0 {
- cfg.stdout.Write([]byte(fmt.Sprintf("set container procfs correct %t .. \n\r", m.lxcfsEnabled)))
+ cfg.stdout.Write([]byte(fmt.Sprintf("set container procfs lxcfs: %t .. \n\r", m.lxcfsEnabled)))
}
if m.lxcfsEnabled {
if err := CheckLxcfsMount(); err != nil {
@@ -1026,9 +1026,9 @@ func (m *DebugAttacher) DebugContainer(cfg RunConfig) error {
if cfg.verbosity > 0 {
cfg.stdout.Write([]byte(fmt.Sprintf("pulling image %s, skip TLS %v... \n\r", m.image, m.registrySkipTLS)))
}
- err := m.containerRuntime.PullImage(m.context, m.image,
- m.registrySkipTLS, m.authStr, cfg)
+ err := m.containerRuntime.PullImage(m.context, m.image, m.registrySkipTLS, m.authStr, cfg)
if err != nil {
+ cfg.stdout.Write([]byte(fmt.Sprintf("pulling image %s, \n\r", m.image)))
return err
}
diff --git a/pkg/agent/server.go b/pkg/debug-agent/server.go
similarity index 97%
rename from pkg/agent/server.go
rename to pkg/debug-agent/server.go
index d8e1113..9f4aea7 100644
--- a/pkg/agent/server.go
+++ b/pkg/debug-agent/server.go
@@ -1,4 +1,4 @@
-package agent
+package debugagent
import (
"context"
@@ -43,7 +43,7 @@ func (s *Server) Run() error {
}()
<-stop
- log.Println("shutting done server...")
+ log.Println("shutting down server...")
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
@@ -67,7 +67,7 @@ func maxInt(lhs, rhs int) int {
// if any error occurs above, an error status were written to the user's stderr.
func (s *Server) ServeDebug(w http.ResponseWriter, req *http.Request) {
- log.Println("receive debug request")
+ log.Println("received debug request")
containerUri := req.FormValue("container")
sverbosity := req.FormValue("verbosity")
@@ -162,5 +162,5 @@ func (s *Server) ServeDebug(w http.ResponseWriter, req *http.Request) {
}
func (s *Server) Healthz(w http.ResponseWriter, req *http.Request) {
- w.Write([]byte("I'm OK!"))
+ w.Write([]byte("I am OK"))
}
diff --git a/pkg/plugin/cmd.go b/pkg/kubectl-debug/cmd.go
similarity index 69%
rename from pkg/plugin/cmd.go
rename to pkg/kubectl-debug/cmd.go
index e20aef0..f6650c5 100644
--- a/pkg/plugin/cmd.go
+++ b/pkg/kubectl-debug/cmd.go
@@ -1,4 +1,4 @@
-package plugin
+package kubectldebug
import (
"context"
@@ -10,15 +10,11 @@ import (
"net/http"
"net/url"
"os"
- "os/user"
"path/filepath"
"strconv"
"sync"
"time"
- "github.com/aylei/kubectl-debug/version"
-
- term "github.com/aylei/kubectl-debug/pkg/util"
dockerterm "github.com/docker/docker/pkg/term"
"github.com/rs/xid"
"github.com/spf13/cobra"
@@ -42,97 +38,114 @@ import (
"k8s.io/kubernetes/pkg/client/conditions"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/util/interrupt"
+
+ term "github.com/jamestgrant/kubectl-debug/pkg/util"
+ "github.com/jamestgrant/kubectl-debug/version"
)
const (
example = `
- # debug a container in the running pod, the first container will be picked by default
- kubectl debug POD_NAME
-
- # specify namespace or container
- kubectl debug --namespace foo POD_NAME -c CONTAINER_NAME
-
- # override the default troubleshooting image
- kubectl debug POD_NAME --image aylei/debug-jvm
-
- # override entrypoint of debug container
- kubectl debug POD_NAME --image aylei/debug-jvm /bin/bash
-
- # override the debug config file
- kubectl debug POD_NAME --debug-config ./debug-config.yml
-
- # check version
- kubectl --version
+ # print the help
+ kubectl-debug -h
+
+ # start the debug container in the same namespace, and cgroup etc as container 'CONTAINER_NAME'
+ # in pod 'POD_NAME' in namespace 'NAMESPACE'
+ kubectl-debug --namespace NAMESPACE POD_NAME -c TARGET_CONTAINER_NAME
+
+ # in case of your pod stuck in CrashLoopBackoff state and cannot be connected to,
+ # you can fork a new pod and diagnose the problem in the forked pod
+ kubectl-debug --namespace NAMESPACE POD_NAME -c CONTAINER_NAME --fork
+
+ # In 'fork' mode, if you want the copied pod to retain the labels of the original pod, you can
+ # use the --fork-pod-retain-labels parameter (comma separated, no spaces). If not set (default),
+ # this parameter is empty and so any labels of the original pod are not retained, and the labels
+ # of the copied pods are empty.
+ # Example of fork mode:
+ kubectl-debug --namespace NAMESPACE POD_NAME -c CONTAINER_NAME --fork --fork-pod-retain-labels=,,
+
+ # in order to interact with the debug-agent pod on a node which doesn't have a public IP or direct
+ # access (firewall and other reasons) to access, port-forward mode is enabled by default. if you don't
+ # want port-forward mode, you can use --port-forward false to turn off it. I don't know why you'd want
+ # to do this, but you can if you want.
+ kubectl-debug --port-forward=false --namespace NAMESPACE POD_NAME -c CONTAINER_NAME
+
+ # you can choose a different debug container image. By default, nicolaka/netshoot:latest will be used
+ # but you can specify anything you like
+ kubectl-debug --namespace NAMESPACE POD_NAME -c CONTAINER_NAME --image nicolaka/netshoot:latest
+
+ # you can set the debug-agent pod's resource limits/requests, for example:
+ # default is not set
+ kubectl-debug --namespace NAMESPACE POD_NAME -c CONTAINER_NAME --agent-pod-cpu-requests=250m --agent-pod-cpu-limits=500m --agent-pod-memory-requests=200Mi --agent-pod-memory-limits=500Mi
+
+ # use primary docker registry, set registry kubernetes secret to pull image
+ # the default registry-secret-name is kubectl-debug-registry-secret, the default namespace is default
+ # please set the secret data source as {Username: , Password: }
+ kubectl-debug --namespace NAMESPACE POD_NAME --image nicolaka/netshoot:latest --registry-secret-name --registry-secret-namespace
`
longDesc = `
-Run a container in a running pod, this container will join the namespaces of an existing container of the pod.
-
-You may set default configuration such as image and command in the config file, which locates in "~/.kube/debug-config" by default.
+ kubectl-debug is an 'out-of-tree' solution for connecting to and troubleshooting an existing,
+ running, 'target' container in an existing pod in a Kubernetes cluster.
+ The target container may have a shell and busybox utils and hence provide some debug capability or it
+ may be very minimal and not even provide a shell - which makes any real-time troubleshooting/debugging
+ very difficult. kubectl-debug is designed to overcome that difficulty.
`
- defaultImage = "docker.io/nicolaka/netshoot:latest"
- defaultAgentPort = 10027
- defaultConfigLocation = "/.kube/debug-config"
- defaultDaemonSetName = "debug-agent"
- defaultDaemonSetNs = "default"
-
- usageError = "expects 'debug POD_NAME' for debug command"
-
- defaultAgentImage = "aylei/debug-agent:latest"
- defaultAgentImagePullPolicy = string(corev1.PullIfNotPresent)
- defaultAgentImagePullSecretName = ""
- defaultAgentPodNamePrefix = "debug-agent-pod"
- defaultAgentPodNamespace = "default"
- defaultAgentPodCpuRequests = ""
- defaultAgentPodCpuLimits = ""
- defaultAgentPodMemoryRequests = ""
- defaultAgentPodMemoryLimits = ""
+ usageError = "run like this: kubectl-debug --namespace NAMESPACE POD_NAME -c TARGET_CONTAINER_NAME"
+ defaultDebugContainerImage = "docker.io/nicolaka/netshoot:latest"
+
+ defaultDebugAgentPort = 10027
+ defaultDebugAgentConfigFileLocation = "/tmp/debugAgentConfigFile"
+ defaultDebugAgentImage = "jamesgrantmediakind/debug-agent:latest"
+ defaultDebugAgentImagePullPolicy = string(corev1.PullIfNotPresent)
+ defaultDebugAgentImagePullSecretName = ""
+ defaultDebugAgentPodNamePrefix = "debug-agent-pod"
+ defaultDebugAgentPodNamespace = "default"
+ defaultDebugAgentPodCpuRequests = ""
+ defaultDebugAgentPodCpuLimits = ""
+ defaultDebugAgentPodMemoryRequests = ""
+ defaultDebugAgentPodMemoryLimits = ""
+ defaultDebugAgentDaemonSetName = "debug-agent"
defaultRegistrySecretName = "kubectl-debug-registry-secret"
defaultRegistrySecretNamespace = "default"
defaultRegistrySkipTLSVerify = false
-
- defaultPortForward = true
- defaultAgentless = true
- defaultLxcfsEnable = true
- defaultVerbosity = 0
-
- enableLxcsFlag = "enable-lxcfs"
- portForwardFlag = "port-forward"
- agentlessFlag = "agentless"
+ defaultPortForward = true
+ defaultCreateDebugAgentPod = true
+ defaultLxcfsEnable = true
+ defaultVerbosity = 0
)
// DebugOptions specify how to run debug container in a running pod
type DebugOptions struct {
- // Pod select options
- Namespace string
- PodName string
+ // target pod select options
+ Namespace string
+ PodName string
+ Fork bool
+ ForkPodRetainLabels []string
- // Debug options
+ // Debug-container options
Image string
RegistrySecretName string
RegistrySecretNamespace string
RegistrySkipTLSVerify bool
-
- ContainerName string
- Command []string
- AgentPort int
- AppName string
- ConfigLocation string
- Fork bool
- ForkPodRetainLabels []string
- //used for agentless mode
- AgentLess bool
+ IsLxcfsEnabled bool
+ ContainerName string
+ Command []string
+ AppName string
+ ConfigLocation string
+
+ // Debug-agent options
+ CreateDebugAgentPod bool
AgentImage string
+ AgentPort int
AgentImagePullPolicy string
AgentImagePullSecretName string
+
// agentPodName = agentPodNamePrefix + nodeName
AgentPodName string
AgentPodNamespace string
AgentPodNode string
AgentPodResource agentPodResources
- // enable lxcfs
- IsLxcfsEnabled bool
Flags *genericclioptions.ConfigFlags
CoreClient coreclient.CoreV1Interface
@@ -184,9 +197,9 @@ func NewDebugCmd(streams genericclioptions.IOStreams) *cobra.Command {
opts := NewDebugOptions(streams)
cmd := &cobra.Command{
- Use: "debug POD [-c CONTAINER] -- COMMAND [args...]",
+ Use: "kubectl-debug --namespace NAMESPACE POD_NAME -c TARGET_CONTAINER_NAME",
DisableFlagsInUseLine: true,
- Short: "Run a container in a running pod",
+ Short: "Launch a debug container, attached to a target container in a running pod",
Long: longDesc,
Example: example,
Version: version.Version(),
@@ -197,57 +210,81 @@ func NewDebugCmd(streams genericclioptions.IOStreams) *cobra.Command {
cmdutil.CheckErr(opts.Run())
},
}
- //cmd.Flags().BoolVarP(&opts.RetainContainer, "retain", "r", defaultRetain,
- // fmt.Sprintf("Retain container after debug session closed, default to %s", defaultRetain))
+
cmd.Flags().StringVar(&opts.Image, "image", "",
- fmt.Sprintf("Container Image to run the debug container, default to %s", defaultImage))
+ fmt.Sprintf("the debug container image, default: %s", defaultDebugContainerImage))
+
cmd.Flags().StringVar(&opts.RegistrySecretName, "registry-secret-name", "",
- "private registry secret name, default is kubectl-debug-registry-secret")
+ fmt.Sprintf("private registry secret name, default: %s", defaultRegistrySecretName))
+
cmd.Flags().StringVar(&opts.RegistrySecretNamespace, "registry-secret-namespace", "",
- "private registry secret namespace, default is default")
+ fmt.Sprintf("private registry secret namespace, default: %s", defaultRegistrySecretNamespace))
+
cmd.Flags().BoolVar(&opts.RegistrySkipTLSVerify, "registry-skip-tls-verify", false,
- "If true, the registry's certificate will not be checked for validity. This will make your HTTPS connections insecure")
+ fmt.Sprintf("if true, the registry's certificate will not be checked for validity. This will make your HTTPS connections insecure, default: %s", defaultRegistrySkipTLSVerify))
+
cmd.Flags().StringSliceVar(&opts.ForkPodRetainLabels, "fork-pod-retain-labels", []string{},
- "in fork mode the pod labels retain labels name list, default is not set")
+ "list of pod labels to retain when in fork mode, default: not set")
+
cmd.Flags().StringVarP(&opts.ContainerName, "container", "c", "",
- "Target container to debug, default to the first container in pod")
+ "target container to debug, defaults to the first container in target pod spec")
+
cmd.Flags().IntVarP(&opts.AgentPort, "port", "p", 0,
- fmt.Sprintf("Agent port for debug cli to connect, default to %d", defaultAgentPort))
- cmd.Flags().StringVar(&opts.ConfigLocation, "debug-config", "",
- fmt.Sprintf("Debug config file, default to ~%s", filepath.FromSlash(defaultConfigLocation)))
+ fmt.Sprintf("debug-agent port to which kubectl-debug will connect, default: %d", defaultDebugAgentPort))
+
+ cmd.Flags().StringVar(&opts.ConfigLocation, "configfile", "",
+ fmt.Sprintf("debug-agent config file (including path), if no config file is present at the specified location then default values are used. Default: %s", filepath.FromSlash(defaultDebugAgentConfigFileLocation)))
+
cmd.Flags().BoolVar(&opts.Fork, "fork", false,
- "Fork a new pod for debugging (useful if the pod status is CrashLoopBackoff)")
- cmd.Flags().BoolVar(&opts.PortForward, portForwardFlag, true,
- fmt.Sprintf("Whether using port-forward to connect debug-agent, default to %t", defaultPortForward))
+ "fork a new pod for debugging (useful if the pod status is CrashLoopBackoff)")
+
+ cmd.Flags().BoolVar(&opts.PortForward, "port-forward", true,
+ fmt.Sprintf("use port-forward to connect from kubectl-debug to debug-agent pod, default: %t", defaultPortForward))
+
+ // it may be that someone has already deployed a daemonset containing with the debug-agent pod and so we can use that (create-debug-agent-pod must be 'false' for this param to be used)
cmd.Flags().StringVar(&opts.DebugAgentDaemonSet, "daemonset-name", opts.DebugAgentDaemonSet,
- "Debug agent daemonset name when using port-forward")
- cmd.Flags().StringVar(&opts.DebugAgentNamespace, "daemonset-ns", opts.DebugAgentNamespace,
- "Debug agent namespace, default to 'default'")
- // flags used for agentless mode.
- cmd.Flags().BoolVarP(&opts.AgentLess, agentlessFlag, "a", true,
- fmt.Sprintf("Whether to turn on agentless mode. Agentless mode: debug target pod if there isn't an agent running on the target host, default to %t", defaultAgentless))
- cmd.Flags().StringVar(&opts.AgentImage, "agent-image", "",
- fmt.Sprintf("Agentless mode, the container Image to run the agent container , default to %s", defaultAgentImage))
+ fmt.Sprintf("debug agent daemonset name when using port-forward, default: %s", defaultDebugAgentDaemonSetName))
+
+ cmd.Flags().StringVar(&opts.DebugAgentNamespace, "debug-agent-namespace", opts.DebugAgentNamespace,
+ fmt.Sprintf("namespace in which to create the debug-agent pod, default: %s", defaultDebugAgentPodNamespace))
+
+ // flags used for daemonsetless, aka createDebugAgentPod mode.
+ cmd.Flags().BoolVarP(&opts.CreateDebugAgentPod, "create-debug-agent-pod", "a", true,
+ fmt.Sprintf("debug-agent pod will be automatically created on the target host, default: %t", defaultCreateDebugAgentPod))
+
+ cmd.Flags().StringVar(&opts.AgentImage, "debug-agent-image", "",
+ fmt.Sprintf("the image of the debug-agent container, default: %s", defaultDebugAgentImage))
+
cmd.Flags().StringVar(&opts.AgentImagePullPolicy, "agent-pull-policy", "",
- fmt.Sprintf("Agentless mode, the container Image pull policy , default to %s", defaultAgentImagePullPolicy))
+ fmt.Sprintf("the debug-agent container image pull policy, default: %s", defaultDebugAgentImagePullPolicy))
+
cmd.Flags().StringVar(&opts.AgentImagePullSecretName, "agent-pull-secret-name", "",
- fmt.Sprintf("Agentless mode, the container Image pull secret name , default to empty"))
+ fmt.Sprintf("the debug-agent container image pull secret name, default to empty"))
+
cmd.Flags().StringVar(&opts.AgentPodName, "agent-pod-name-prefix", "",
- fmt.Sprintf("Agentless mode, pod name prefix , default to %s", defaultAgentPodNamePrefix))
+ fmt.Sprintf("debug-agent pod name prefix , default to %s", defaultDebugAgentPodNamePrefix))
+
cmd.Flags().StringVar(&opts.AgentPodNamespace, "agent-pod-namespace", "",
- fmt.Sprintf("Agentless mode, agent pod namespace, default to %s", defaultAgentPodNamespace))
+ fmt.Sprintf("agent pod namespace, default: %s", defaultDebugAgentPodNamespace))
+
cmd.Flags().StringVar(&opts.AgentPodResource.CpuRequests, "agent-pod-cpu-requests", "",
- fmt.Sprintf("Agentless mode, agent pod cpu requests, default is not set"))
+ fmt.Sprintf("agent pod cpu requests, default is not set"))
+
cmd.Flags().StringVar(&opts.AgentPodResource.MemoryRequests, "agent-pod-memory-requests", "",
- fmt.Sprintf("Agentless mode, agent pod memory requests, default is not set"))
+ fmt.Sprintf("agent pod memory requests, default is not set"))
+
cmd.Flags().StringVar(&opts.AgentPodResource.CpuLimits, "agent-pod-cpu-limits", "",
- fmt.Sprintf("Agentless mode, agent pod cpu limits, default is not set"))
+ fmt.Sprintf("agent pod cpu limits, default is not set"))
+
cmd.Flags().StringVar(&opts.AgentPodResource.MemoryLimits, "agent-pod-memory-limits", "",
- fmt.Sprintf("Agentless mode, agent pod memory limits, default is not set"))
- cmd.Flags().BoolVarP(&opts.IsLxcfsEnabled, enableLxcsFlag, "", true,
- fmt.Sprintf("Enable Lxcfs, the target container can use its proc files, default to %t", defaultLxcfsEnable))
- cmd.Flags().IntVarP(&opts.Verbosity, "verbosity ", "v", 0,
- fmt.Sprintf("Set logging verbosity, default to %d", defaultVerbosity))
+ fmt.Sprintf("agent pod memory limits, default is not set"))
+
+ cmd.Flags().BoolVarP(&opts.IsLxcfsEnabled, "enable-lxcfs", "", true,
+ fmt.Sprintf("Enable Lxcfs, the target container can use its proc files, default: %t", defaultLxcfsEnable))
+
+ cmd.Flags().IntVarP(&opts.Verbosity, "verbosity", "v", 0,
+ fmt.Sprintf("Set logging verbosity, default: %d", defaultVerbosity))
+
opts.Flags.AddFlags(cmd.Flags())
return cmd
@@ -276,12 +313,11 @@ func (o *DebugOptions) Complete(cmd *cobra.Command, args []string, argsLenAtDash
o.PodName = args[0]
- // read defaults from config file
+ // read values from config file
configFile := o.ConfigLocation
if len(o.ConfigLocation) < 1 {
- usr, err := user.Current()
if err == nil {
- configFile = usr.HomeDir + filepath.FromSlash(defaultConfigLocation)
+ configFile = filepath.FromSlash(defaultDebugAgentConfigFileLocation)
}
}
config, err := LoadFile(configFile)
@@ -293,7 +329,7 @@ func (o *DebugOptions) Complete(cmd *cobra.Command, args []string, argsLenAtDash
config = &Config{}
}
- // combine defaults, config file and user parameters
+ // combine hardcoded default values, configfile specified values and user cli specified values
o.Command = args[1:]
if len(o.Command) < 1 {
if len(config.Command) > 0 {
@@ -302,13 +338,15 @@ func (o *DebugOptions) Complete(cmd *cobra.Command, args []string, argsLenAtDash
o.Command = []string{"bash"}
}
}
+
if len(o.Image) < 1 {
if len(config.Image) > 0 {
o.Image = config.Image
} else {
- o.Image = defaultImage
+ o.Image = defaultDebugContainerImage
}
}
+
if len(o.RegistrySecretName) < 1 {
if len(config.RegistrySecretName) > 0 {
o.RegistrySecretName = config.RegistrySecretName
@@ -316,6 +354,7 @@ func (o *DebugOptions) Complete(cmd *cobra.Command, args []string, argsLenAtDash
o.RegistrySecretName = defaultRegistrySecretName
}
}
+
if len(o.RegistrySecretNamespace) < 1 {
if len(config.RegistrySecretNamespace) > 0 {
o.RegistrySecretNamespace = config.RegistrySecretNamespace
@@ -323,6 +362,7 @@ func (o *DebugOptions) Complete(cmd *cobra.Command, args []string, argsLenAtDash
o.RegistrySecretNamespace = defaultRegistrySecretNamespace
}
}
+
if !o.RegistrySkipTLSVerify {
if config.RegistrySkipTLSVerify {
o.RegistrySkipTLSVerify = config.RegistrySkipTLSVerify
@@ -330,24 +370,18 @@ func (o *DebugOptions) Complete(cmd *cobra.Command, args []string, argsLenAtDash
o.RegistrySkipTLSVerify = defaultRegistrySkipTLSVerify
}
}
+
if len(o.ForkPodRetainLabels) < 1 {
if len(config.ForkPodRetainLabels) > 0 {
o.ForkPodRetainLabels = config.ForkPodRetainLabels
}
}
+
if o.AgentPort < 1 {
if config.AgentPort > 0 {
o.AgentPort = config.AgentPort
} else {
- o.AgentPort = defaultAgentPort
- }
- }
-
- if o.Verbosity < 1 {
- if config.Verbosity > 0 {
- o.Verbosity = config.Verbosity
- } else {
- o.Verbosity = defaultVerbosity
+ o.AgentPort = defaultDebugAgentPort
}
}
@@ -355,14 +389,15 @@ func (o *DebugOptions) Complete(cmd *cobra.Command, args []string, argsLenAtDash
if len(config.DebugAgentNamespace) > 0 {
o.DebugAgentNamespace = config.DebugAgentNamespace
} else {
- o.DebugAgentNamespace = defaultDaemonSetNs
+ o.DebugAgentNamespace = defaultDebugAgentPodNamespace
}
}
+
if len(o.DebugAgentDaemonSet) < 1 {
if len(config.DebugAgentDaemonSet) > 0 {
o.DebugAgentDaemonSet = config.DebugAgentDaemonSet
} else {
- o.DebugAgentDaemonSet = defaultDaemonSetName
+ o.DebugAgentDaemonSet = defaultDebugAgentDaemonSetName
}
}
@@ -370,7 +405,7 @@ func (o *DebugOptions) Complete(cmd *cobra.Command, args []string, argsLenAtDash
if len(config.AgentPodNamePrefix) > 0 {
o.AgentPodName = config.AgentPodNamePrefix
} else {
- o.AgentPodName = defaultAgentPodNamePrefix
+ o.AgentPodName = defaultDebugAgentPodNamePrefix
}
}
@@ -378,7 +413,7 @@ func (o *DebugOptions) Complete(cmd *cobra.Command, args []string, argsLenAtDash
if len(config.AgentImage) > 0 {
o.AgentImage = config.AgentImage
} else {
- o.AgentImage = defaultAgentImage
+ o.AgentImage = defaultDebugAgentImage
}
}
@@ -386,7 +421,7 @@ func (o *DebugOptions) Complete(cmd *cobra.Command, args []string, argsLenAtDash
if len(config.AgentImagePullPolicy) > 0 {
o.AgentImagePullPolicy = config.AgentImagePullPolicy
} else {
- o.AgentImagePullPolicy = defaultAgentImagePullPolicy
+ o.AgentImagePullPolicy = defaultDebugAgentImagePullPolicy
}
}
@@ -394,7 +429,7 @@ func (o *DebugOptions) Complete(cmd *cobra.Command, args []string, argsLenAtDash
if len(config.AgentImagePullSecretName) > 0 {
o.AgentImagePullSecretName = config.AgentImagePullSecretName
} else {
- o.AgentImagePullSecretName = defaultAgentImagePullSecretName
+ o.AgentImagePullSecretName = defaultDebugAgentImagePullSecretName
}
}
@@ -402,7 +437,7 @@ func (o *DebugOptions) Complete(cmd *cobra.Command, args []string, argsLenAtDash
if len(config.AgentPodNamespace) > 0 {
o.AgentPodNamespace = config.AgentPodNamespace
} else {
- o.AgentPodNamespace = defaultAgentPodNamespace
+ o.AgentPodNamespace = defaultDebugAgentPodNamespace
}
}
@@ -410,7 +445,7 @@ func (o *DebugOptions) Complete(cmd *cobra.Command, args []string, argsLenAtDash
if len(config.AgentPodCpuRequests) > 0 {
o.AgentPodResource.CpuRequests = config.AgentPodCpuRequests
} else {
- o.AgentPodResource.CpuRequests = defaultAgentPodCpuRequests
+ o.AgentPodResource.CpuRequests = defaultDebugAgentPodCpuRequests
}
}
@@ -418,7 +453,7 @@ func (o *DebugOptions) Complete(cmd *cobra.Command, args []string, argsLenAtDash
if len(config.AgentPodMemoryRequests) > 0 {
o.AgentPodResource.MemoryRequests = config.AgentPodMemoryRequests
} else {
- o.AgentPodResource.MemoryRequests = defaultAgentPodMemoryRequests
+ o.AgentPodResource.MemoryRequests = defaultDebugAgentPodMemoryRequests
}
}
@@ -426,7 +461,7 @@ func (o *DebugOptions) Complete(cmd *cobra.Command, args []string, argsLenAtDash
if len(config.AgentPodCpuLimits) > 0 {
o.AgentPodResource.CpuLimits = config.AgentPodCpuLimits
} else {
- o.AgentPodResource.CpuLimits = defaultAgentPodCpuLimits
+ o.AgentPodResource.CpuLimits = defaultDebugAgentPodCpuLimits
}
}
@@ -434,20 +469,40 @@ func (o *DebugOptions) Complete(cmd *cobra.Command, args []string, argsLenAtDash
if len(config.AgentPodMemoryLimits) > 0 {
o.AgentPodResource.MemoryLimits = config.AgentPodMemoryLimits
} else {
- o.AgentPodResource.MemoryLimits = defaultAgentPodMemoryLimits
+ o.AgentPodResource.MemoryLimits = defaultDebugAgentPodMemoryLimits
+ }
+ }
+
+ if o.Verbosity < 1 {
+ if config.Verbosity > 0 {
+ o.Verbosity = config.Verbosity
+ } else {
+ o.Verbosity = defaultVerbosity
}
}
- if !cmd.Flag(enableLxcsFlag).Changed {
- o.IsLxcfsEnabled = config.IsLxcfsEnabled
+ if !o.IsLxcfsEnabled {
+ if config.IsLxcfsEnabled {
+ o.IsLxcfsEnabled = config.IsLxcfsEnabled
+ } else {
+ o.IsLxcfsEnabled = defaultLxcfsEnable
+ }
}
- if !cmd.Flag(portForwardFlag).Changed {
- o.PortForward = config.PortForward
+ if !o.CreateDebugAgentPod {
+ if config.CreateDebugAgentPod {
+ o.CreateDebugAgentPod = config.CreateDebugAgentPod
+ } else {
+ o.CreateDebugAgentPod = defaultCreateDebugAgentPod
+ }
}
- if !cmd.Flag(agentlessFlag).Changed {
- o.AgentLess = config.Agentless
+ if !o.PortForward {
+ if config.PortForward {
+ o.PortForward = config.PortForward
+ } else {
+ o.PortForward = defaultPortForward
+ }
}
o.Ports = []string{strconv.Itoa(o.AgentPort)}
@@ -477,13 +532,13 @@ func (o *DebugOptions) Complete(cmd *cobra.Command, args []string, argsLenAtDash
if o.Flags.Context != nil && len(*o.Flags.Context) > 0 {
// --context : "The name of the kubeconfig context to use"
cfgCtxt = rwCfg.Contexts[*o.Flags.Context]
- log.Printf("Getting user name from context '%v' received from switch --context\r\n", *o.Flags.Context)
+ log.Printf("Getting user name from kubectl context '%v' received from switch --context\r\n", *o.Flags.Context)
} else {
cfgCtxt = rwCfg.Contexts[rwCfg.CurrentContext]
- log.Printf("Getting user name from default context '%v'\r\n", rwCfg.CurrentContext)
+ log.Printf("Getting user name from default kubectl context '%v'\r\n", rwCfg.CurrentContext)
}
o.UserName = cfgCtxt.AuthInfo
- log.Printf("User name '%v' received from context\r\n", o.UserName)
+ log.Printf("User name '%v' received from kubectl context\r\n", o.UserName)
}
clientset, err := kubernetes.NewForConfig(o.Config)
@@ -500,7 +555,7 @@ func (o *DebugOptions) Complete(cmd *cobra.Command, args []string, argsLenAtDash
// Validate validate
func (o *DebugOptions) Validate() error {
if len(o.PodName) == 0 {
- return fmt.Errorf("pod name must be specified")
+ return fmt.Errorf("target pod name must be specified")
}
if len(o.Command) == 0 {
return fmt.Errorf("you must specify at least one command for the container")
@@ -513,13 +568,14 @@ func (o *DebugOptions) Validate() error {
func (o *DebugOptions) Run() error {
pod, err := o.CoreClient.Pods(o.Namespace).Get(o.PodName, v1.GetOptions{})
if err != nil {
+ o.Logger.Printf("error with pod spec")
return err
}
containerName := o.ContainerName
if len(containerName) == 0 {
if len(pod.Spec.Containers) > 1 {
- usageString := fmt.Sprintf("Defaulting container name to %s.", pod.Spec.Containers[0].Name)
+ usageString := fmt.Sprintf("No container name specified, choosing container: %s.", pod.Spec.Containers[0].Name)
fmt.Fprintf(o.ErrOut, "%s\n\r", usageString)
}
containerName = pod.Spec.Containers[0].Name
@@ -528,15 +584,15 @@ func (o *DebugOptions) Run() error {
if err != nil {
return err
}
- // Launch debug launching pod in agentless mode.
+ // Launch debug launching pod in createDebugAgentPod mode.
var agentPod *corev1.Pod
- if o.AgentLess {
+ if o.CreateDebugAgentPod {
o.AgentPodNode = pod.Spec.NodeName
o.AgentPodName = fmt.Sprintf("%s-%s", o.AgentPodName, uuid.NewUUID())
agentPod = o.getAgentPod()
agentPod, err = o.launchPod(agentPod)
if err != nil {
- fmt.Fprintf(o.Out, "the agentPod is not running, you should check the reason and delete the failed agentPod and retry.\n")
+ fmt.Fprintf(o.Out, "the debug-agent pod is not running, you should check the reason, delete any failed debug-agent pod(s) and retry.\r\n")
return err
}
}
@@ -546,12 +602,13 @@ func (o *DebugOptions) Run() error {
// which keeps the container running.
if o.Fork {
// build the fork pod labels
+ fmt.Fprintf(o.Out, "Forked mode selected\n")
podLabels := o.buildForkPodLabels(pod)
// copy pod and run
pod = copyAndStripPod(pod, containerName, podLabels)
pod, err = o.launchPod(pod)
if err != nil {
- fmt.Fprintf(o.Out, "the ForkedPod is not running, you should check the reason and delete the failed ForkedPod and retry\n")
+ fmt.Fprintf(o.Out, "the ForkedPod is not running, you should check the reason and delete the failed ForkedPod and retry\r\n")
o.deleteAgent(agentPod)
return err
}
@@ -559,11 +616,12 @@ func (o *DebugOptions) Run() error {
if pod.Status.Phase == corev1.PodSucceeded || pod.Status.Phase == corev1.PodFailed {
o.deleteAgent(agentPod)
- return fmt.Errorf("cannot debug in a completed pod; current phase is %s", pod.Status.Phase)
+ return fmt.Errorf("cannot debug in a completed pod; current pod phase is %s", pod.Status.Phase)
}
containerID, err := o.getContainerIDByName(pod, containerName)
if err != nil {
+ fmt.Fprintf(o.Out, "an error occured, pod is %s, container name is: %s . Will clean up and exit.\r\n", pod, containerName)
o.deleteAgent(agentPod)
return err
}
@@ -580,11 +638,10 @@ func (o *DebugOptions) Run() error {
if o.PortForward {
var agent *corev1.Pod
- if !o.AgentLess {
- // Agent is running
- if o.Verbosity > 0 {
- o.Logger.Printf("Fetching daemonset '%v' from namespace %v\r\n", o.DebugAgentDaemonSet, o.DebugAgentNamespace)
- }
+ if !o.CreateDebugAgentPod {
+ // See if there is a debug-agent pod running as a daemonset
+ o.Logger.Printf("See if there is a debug-agent pod running in a daemonset. daemonset '%v' from namespace %v\r\n", o.DebugAgentDaemonSet, o.DebugAgentNamespace)
+
daemonSet, err := o.KubeCli.AppsV1().DaemonSets(o.DebugAgentNamespace).Get(o.DebugAgentDaemonSet, v1.GetOptions{})
if err != nil {
return err
@@ -607,13 +664,14 @@ func (o *DebugOptions) Run() error {
}
if agent == nil {
- return fmt.Errorf("there is no agent pod in the same node with your specified pod %s", o.PodName)
+ return fmt.Errorf("there is no debug-agent pod running on the same node as your target pod %s\r\n", o.PodName)
}
if o.Verbosity > 0 {
- fmt.Fprintf(o.Out, "pod %s PodIP %s, agentPodIP %s\n", o.PodName, pod.Status.PodIP, agent.Status.HostIP)
+ fmt.Fprintf(o.Out, "target pod: %s target pod IP: %s, debug-agent pod IP: %s\r\n", o.PodName, pod.Status.PodIP, agent.Status.HostIP)
}
err = o.runPortForward(agent)
if err != nil {
+ fmt.Fprintf(o.Out, "an error has occured, will delete debug-agent pod and exit\r\n")
o.deleteAgent(agentPod)
return err
}
@@ -622,7 +680,7 @@ func (o *DebugOptions) Run() error {
// on specified ports in localhost, the ports can not access until receive the
// ready signal
if o.Verbosity > 0 {
- fmt.Fprintln(o.Out, "wait for forward port to debug agent ready...")
+ fmt.Fprintln(o.Out, "using port-forwarding. Waiting for port-forward connection with debug-agent...\r\n")
}
<-o.ReadyChannel
}
@@ -637,6 +695,7 @@ func (o *DebugOptions) Run() error {
}
uri, err := url.Parse(fmt.Sprintf("http://%s:%d", targetHost, o.AgentPort))
if err != nil {
+ o.Logger.Printf("error parsing url http://%s:%d", targetHost, o.AgentPort)
return err
}
uri.Path = fmt.Sprintf("/api/v1/debug")
@@ -662,7 +721,7 @@ func (o *DebugOptions) Run() error {
if err != nil {
if errors.IsNotFound(err) {
if o.Verbosity > 0 {
- o.Logger.Printf("Secret %v not found in namespace %v\r\n", o.RegistrySecretName, o.RegistrySecretNamespace)
+ o.Logger.Printf("Secret: %v not found in namespace: %v\r\n", o.RegistrySecretName, o.RegistrySecretNamespace)
}
authStr = ""
} else {
@@ -670,7 +729,7 @@ func (o *DebugOptions) Run() error {
}
} else {
if o.Verbosity > 1 {
- o.Logger.Printf("Found secret %v:%v\r\n", o.RegistrySecretNamespace, o.RegistrySecretName)
+ o.Logger.Printf("Found secret: %v:%v\r\n", o.RegistrySecretNamespace, o.RegistrySecretName)
}
authStr, _ = o.extractSecret(registrySecret.Data)
}
@@ -684,15 +743,15 @@ func (o *DebugOptions) Run() error {
return o.remoteExecute("POST", uri, o.Config, o.In, o.Out, o.ErrOut, t.Raw, sizeQueue)
}
- // ensure forked pod is deleted on cancelation
+ // ensure debug pod is deleted
withCleanUp := func() error {
return interrupt.Chain(nil, func() {
if o.Fork {
- fmt.Fprintf(o.Out, "Start deleting forked pod %s \n\r", pod.Name)
+ fmt.Fprintf(o.Out, "deleting forked pod: %s \n\r", pod.Name)
err := o.CoreClient.Pods(pod.Namespace).Delete(pod.Name, v1.NewDeleteOptions(0))
if err != nil {
- // we may leak pod here, but we have nothing to do except noticing the user
- fmt.Fprintf(o.ErrOut, "failed to delete forked pod[Name:%s, Namespace:%s], consider manual deletion.\n\r", pod.Name, pod.Namespace)
+ // we may leak pod here, but we have nothing to do except notify the user
+ fmt.Fprintf(o.ErrOut, "failed to delete forked pod: %s Namespace: %s, you may have to manually delete the pod.\n\r", pod.Name, pod.Namespace)
}
}
@@ -703,15 +762,15 @@ func (o *DebugOptions) Run() error {
}
}
// delete agent pod
- if o.AgentLess && agentPod != nil {
- fmt.Fprintf(o.Out, "Start deleting agent pod %s \n\r", pod.Name)
+ if o.CreateDebugAgentPod && agentPod != nil {
+ fmt.Fprintf(o.Out, "\n\rdeleting debug-agent pod\n\r")
o.deleteAgent(agentPod)
}
}).Run(fn)
}
if err := t.Safe(withCleanUp); err != nil {
- fmt.Fprintf(o.Out, "error execute remote, %v\n", err)
+ fmt.Fprintf(o.Out, "an error occured executing remote command(s), %v\r\n", err)
return err
}
o.wait.Wait()
@@ -828,7 +887,7 @@ func (o *DebugOptions) setupTTY() term.TTY {
t.Raw = true
if !t.IsTerminalIn() {
if o.ErrOut != nil {
- fmt.Fprintln(o.ErrOut, "Unable to use a TTY - input is not a terminal or the right kind of file")
+ fmt.Fprintln(o.ErrOut, "Unable to use a TTY - input is not a terminal or the right kind of file\r\n")
}
return t
}
@@ -888,11 +947,13 @@ func copyAndStripPod(pod *corev1.Pod, targetContainer string, podLabels map[stri
func (o *DebugOptions) launchPod(pod *corev1.Pod) (*corev1.Pod, error) {
pod, err := o.CoreClient.Pods(pod.Namespace).Create(pod)
if err != nil {
+ o.Logger.Printf("error with launch pod")
return pod, err
}
watcher, err := o.CoreClient.Pods(pod.Namespace).Watch(v1.SingleObject(pod.ObjectMeta))
if err != nil {
+ o.Logger.Printf("error with watching pod")
return nil, err
}
// FIXME: hard code -> config
@@ -901,14 +962,14 @@ func (o *DebugOptions) launchPod(pod *corev1.Pod) (*corev1.Pod, error) {
fmt.Fprintf(o.Out, "Waiting for pod %s to run...\n", pod.Name)
event, err := watch.UntilWithoutRetry(ctx, watcher, conditions.PodRunning)
if err != nil {
- fmt.Fprintf(o.ErrOut, "Error occurred while waiting for pod to run: %v\n", err)
+ fmt.Fprintf(o.ErrOut, "Error occurred while waiting for pod to run: %v\r\n", err)
return nil, err
}
pod = event.Object.(*corev1.Pod)
return pod, nil
}
-// getAgentPod construnct agentPod from agent pod template
+// getAgentPod construct debug-agent pod template
func (o *DebugOptions) getAgentPod() *corev1.Pod {
prop := corev1.MountPropagationBidirectional
directoryCreate := corev1.HostPathDirectoryOrCreate
@@ -961,11 +1022,15 @@ func (o *DebugOptions) getAgentPod() *corev1.Pod {
Name: "cgroup",
MountPath: "/sys/fs/cgroup",
},
- // containerd client will need to access /var/data, /run/containerd and /run/runc
+ // containerd client needs to access /var/data, /run/containerd, /var/lib/containerd and /run/runc
{
Name: "vardata",
MountPath: "/var/data",
},
+ {
+ Name: "varlibcontainerd",
+ MountPath: "/var/lib/containerd",
+ },
{
Name: "runcontainerd",
MountPath: "/run/containerd",
@@ -1031,6 +1096,14 @@ func (o *DebugOptions) getAgentPod() *corev1.Pod {
},
},
},
+ {
+ Name: "varlibcontainerd",
+ VolumeSource: corev1.VolumeSource{
+ HostPath: &corev1.HostPathVolumeSource{
+ Path: "/var/lib/containerd",
+ },
+ },
+ },
{
Name: "runrunc",
VolumeSource: corev1.VolumeSource{
@@ -1049,7 +1122,7 @@ func (o *DebugOptions) getAgentPod() *corev1.Pod {
func (o *DebugOptions) runPortForward(pod *corev1.Pod) error {
if pod.Status.Phase != corev1.PodRunning {
- return fmt.Errorf("unable to forward port because pod is not running. Current status=%v", pod.Status.Phase)
+ return fmt.Errorf("unable to forward port because pod is not running. Current status=%v\r\n", pod.Status.Phase)
}
o.wait.Add(1)
go func() {
@@ -1084,11 +1157,13 @@ type defaultPortForwarder struct {
func (f *defaultPortForwarder) ForwardPorts(method string, url *url.URL, opts *DebugOptions) error {
transport, upgrader, err := spdy.RoundTripperFor(opts.Config)
if err != nil {
+ opts.Logger.Printf("error with setting up spdy forwarder")
return err
}
dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, method, url)
fw, err := portforward.New(dialer, opts.Ports, opts.StopChannel, opts.ReadyChannel, f.Out, f.ErrOut)
if err != nil {
+ opts.Logger.Printf("error with NewDialer")
return err
}
return fw.ForwardPorts()
@@ -1111,7 +1186,7 @@ func (o *DebugOptions) auth(pod *corev1.Pod) error {
}
response, err := sarClient.SelfSubjectAccessReviews().Create(sar)
if err != nil {
- fmt.Fprintf(o.ErrOut, "Failed to create SelfSubjectAccessReview: %v \n", err)
+ fmt.Fprintf(o.ErrOut, "Failed to create SelfSubjectAccessReview: %v \r\n", err)
return err
}
if !response.Status.Allowed {
@@ -1129,17 +1204,17 @@ func (o *DebugOptions) auth(pod *corev1.Pod) error {
// delete the agent pod
func (o *DebugOptions) deleteAgent(agentPod *corev1.Pod) {
- // only with agentless flag we can delete the agent pod
- if !o.AgentLess {
+ // only if createDebugAgentPod=true should we manage the debug-agent pod
+ if !o.CreateDebugAgentPod {
return
}
err := o.CoreClient.Pods(agentPod.Namespace).Delete(agentPod.Name, v1.NewDeleteOptions(0))
if err != nil {
- fmt.Fprintf(o.ErrOut, "failed to delete agent pod[Name:%s, Namespace: %s], consider manual deletion.\nerror msg: %v", agentPod.Name, agentPod.Namespace, err)
+ fmt.Fprintf(o.ErrOut, "failed to delete agent pod[Name:%s, Namespace: %s], consider manual deletion.\r\nerror msg: %v", agentPod.Name, agentPod.Namespace, err)
}
}
-// build the agent pod Resource Requirements
+// build the debug-agent pod Resource Requirements
func (o *DebugOptions) buildAgentResourceRequirements() corev1.ResourceRequirements {
return getResourceRequirements(getResourceList(o.AgentPodResource.CpuRequests, o.AgentPodResource.MemoryRequests), getResourceList(o.AgentPodResource.CpuLimits, o.AgentPodResource.MemoryLimits))
}
diff --git a/pkg/plugin/config.go b/pkg/kubectl-debug/config.go
similarity index 85%
rename from pkg/plugin/config.go
rename to pkg/kubectl-debug/config.go
index 6cfae84..ab47bc0 100644
--- a/pkg/plugin/config.go
+++ b/pkg/kubectl-debug/config.go
@@ -1,9 +1,8 @@
-package plugin
+package kubectldebug
import (
- "io/ioutil"
-
"gopkg.in/yaml.v2"
+ "io/ioutil"
)
type Config struct {
@@ -17,7 +16,7 @@ type Config struct {
DebugAgentNamespace string `yaml:"debugAgentNamespace,omitempty"`
Command []string `yaml:"command,omitempty"`
PortForward bool `yaml:"portForward,omitempty"`
- Agentless bool `yaml:"agentless,omitempty"`
+ CreateDebugAgentPod bool `yaml:"createDebugAgentPod,omitempty"`
AgentPodNamePrefix string `yaml:"agentPodNamePrefix,omitempty"`
AgentPodNamespace string `yaml:"agentPodNamespace,omitempty"`
AgentImage string `yaml:"agentImage,omitempty"`
@@ -29,23 +28,14 @@ type Config struct {
AgentPodMemoryLimits string `yaml:"agentMemoryLimits,omitempty"`
IsLxcfsEnabled bool `yaml:"isLxcfsEnabled,omitempty"`
Verbosity int `yaml:"verbosity,omitempty"`
- // deprecated
- AgentPortOld int `yaml:"agent_port,omitempty"`
}
func Load(s string) (*Config, error) {
cfg := &Config{}
- cfg.Agentless = true
- cfg.PortForward = true
- cfg.IsLxcfsEnabled = true
err := yaml.Unmarshal([]byte(s), cfg)
if err != nil {
return nil, err
}
- // be compatible with old configuration key
- if cfg.AgentPort == 0 {
- cfg.AgentPort = cfg.AgentPortOld
- }
return cfg, nil
}
diff --git a/pkg/util/resizeevents.go b/pkg/util/resizeevents.go
index e3476f9..e361b1a 100644
--- a/pkg/util/resizeevents.go
+++ b/pkg/util/resizeevents.go
@@ -1,3 +1,4 @@
+//go:build !windows
// +build !windows
/*
diff --git a/scripts/agent_daemonset.yml b/scripts/agent_daemonset.yml
deleted file mode 100755
index ff63444..0000000
--- a/scripts/agent_daemonset.yml
+++ /dev/null
@@ -1,80 +0,0 @@
-apiVersion: apps/v1
-kind: DaemonSet
-metadata:
- labels:
- app: debug-agent
- name: debug-agent
-spec:
- selector:
- matchLabels:
- app: debug-agent
- template:
- metadata:
- labels:
- app: debug-agent
- spec:
- hostPID: true
- tolerations:
- - key: node-role.kubernetes.io/master
- effect: NoSchedule
- containers:
- - name: debug-agent
- image: aylei/debug-agent:latest
- imagePullPolicy: Always
- securityContext:
- privileged: true
- livenessProbe:
- failureThreshold: 3
- httpGet:
- path: /healthz
- port: 10027
- scheme: HTTP
- initialDelaySeconds: 10
- periodSeconds: 10
- successThreshold: 1
- timeoutSeconds: 1
- ports:
- - containerPort: 10027
- hostPort: 10027
- name: http
- protocol: TCP
- volumeMounts:
- - name: cgroup
- mountPath: /sys/fs/cgroup
- - name: lxcfs
- mountPath: /var/lib/lxc
- mountPropagation: Bidirectional
- - name: docker
- mountPath: "/var/run/docker.sock"
- - name: runcontainerd
- mountPath: "/run/containerd"
- - name: runrunc
- mountPath: "/run/runc"
- - name: vardata
- mountPath: "/var/data"
- # hostNetwork: true
- volumes:
- - name: cgroup
- hostPath:
- path: /sys/fs/cgroup
- - name: lxcfs
- hostPath:
- path: /var/lib/lxc
- type: DirectoryOrCreate
- - name: docker
- hostPath:
- path: /var/run/docker.sock
- # containerd client will need to access /var/data, /run/containerd and /run/runc
- - name: vardata
- hostPath:
- path: /var/data
- - name: runcontainerd
- hostPath:
- path: /run/containerd
- - name: runrunc
- hostPath:
- path: /run/runc
- updateStrategy:
- rollingUpdate:
- maxUnavailable: 5
- type: RollingUpdate
diff --git a/scripts/docker_push.sh b/scripts/docker_push.sh
index 165011f..a81901f 100644
--- a/scripts/docker_push.sh
+++ b/scripts/docker_push.sh
@@ -1,3 +1,3 @@
#!/bin/bash
echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin
-docker push aylei/debug-agent:latest
\ No newline at end of file
+docker push jamesgrantmediakind/debug-agent:latest
\ No newline at end of file
diff --git a/version.sh b/version.sh
index 5b4e628..452ae41 100755
--- a/version.sh
+++ b/version.sh
@@ -43,4 +43,4 @@ if [[ -n ${GIT_COMMIT-} ]] || GIT_COMMIT=$(git rev-parse "HEAD^{commit}" 2>/dev/
fi
fi
-echo "-X 'github.com/aylei/kubectl-debug/version.gitVersion=${GIT_VERSION}'"
+echo "-X 'github.com/jamestgrant/kubectl-debug/version.gitVersion=${GIT_VERSION}'"
diff --git a/version/version.go b/version/version.go
index 9273f03..b6a4d19 100644
--- a/version/version.go
+++ b/version/version.go
@@ -1,7 +1,7 @@
package version
var (
- gitVersion = "v0.0.0-master+$Format:%h$"
+ gitVersion = "v1.0.0-master+$Format:%h$"
)
func Version() string {