diff --git a/.github/workflows/pr-packaging.yml b/.github/workflows/pr-packaging.yml
index 9571f988..5a31d841 100644
--- a/.github/workflows/pr-packaging.yml
+++ b/.github/workflows/pr-packaging.yml
@@ -32,7 +32,7 @@ jobs:
run: |
set -euo pipefail
ct lint --chart-dirs deploy/charts --target-branch ${{ github.base_ref }} \
- --validate-maintainers=false --check-version-increment=false
+ --validate-maintainers=false --check-version-increment=false
# FIXME
#- name: Create KinD cluster
diff --git a/.github/workflows/pr-security.yml b/.github/workflows/pr-security.yml
index e79d7794..8714c498 100644
--- a/.github/workflows/pr-security.yml
+++ b/.github/workflows/pr-security.yml
@@ -31,7 +31,7 @@ jobs:
uses: actions/checkout@v4
- name: Run Trivy vulnerability scanner
- uses: aquasecurity/trivy-action@0.24.0
+ uses: aquasecurity/trivy-action@0.25.0
with:
scan-type: 'fs'
ignore-unfixed: true
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 3068e863..03ab59d0 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -110,6 +110,7 @@ jobs:
ct lint \
--charts helm/kube-image-keeper \
--chart-repos bitnami=https://charts.bitnami.com/bitnami \
+ --chart-repos joxit=https://helm.joxit.dev \
--validate-maintainers=false --check-version-increment=false
build:
@@ -219,6 +220,7 @@ jobs:
ct lint \
--charts helm/kube-image-keeper \
--chart-repos bitnami=https://charts.bitnami.com/bitnami \
+ --chart-repos joxit=https://helm.joxit.dev \
--validate-maintainers=false --check-version-increment=false
- name: Run helm (install)
@@ -351,6 +353,7 @@ jobs:
ct lint \
--charts helm/kube-image-keeper \
--chart-repos bitnami=https://charts.bitnami.com/bitnami \
+ --chart-repos joxit=https://helm.joxit.dev \
--validate-maintainers=false --check-version-increment=false
- name: Run helm (install latest release)
@@ -562,6 +565,7 @@ jobs:
- name: Helm repository deps
run: |
helm repo add bitnami https://charts.bitnami.com/bitnami
+ helm repo add joxit https://helm.joxit.dev
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
@@ -814,6 +818,7 @@ jobs:
header('generate chart readme')
+ run('make', '-C', '../../', 'helm-docs')
run(os.environ['HELM_DOCS_PATH'], '--dry-run')
run(os.environ['HELM_DOCS_PATH'])
diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml
index 0315e64e..8f12718d 100644
--- a/.github/workflows/tests.yaml
+++ b/.github/workflows/tests.yaml
@@ -28,7 +28,7 @@ jobs:
uses: webiny/action-conventional-commits@v1.3.0
- name: Run Trivy vulnerability scanner
- uses: aquasecurity/trivy-action@0.24.0
+ uses: aquasecurity/trivy-action@0.25.0
with:
scan-type: 'fs'
ignore-unfixed: true
@@ -129,6 +129,7 @@ jobs:
ct lint \
--charts helm/kube-image-keeper \
--chart-repos bitnami=https://charts.bitnami.com/bitnami \
+ --chart-repos joxit=https://helm.joxit.dev \
--validate-maintainers=false --check-version-increment=false
# Need wait for the next release with flash --skip-clean-up
@@ -156,7 +157,7 @@ jobs:
run: |
set -euo pipefail
kubectl create deploy nginx --image=nginx:stable-alpine --replicas=2
- kubectl rollout status deploy nginx
+ kubectl rollout status deploy nginx
kubectl wait deployment nginx --for condition=Available=True --timeout=30s
echo "kubectl get cachedimage"
kubectl get cachedimages
@@ -198,7 +199,7 @@ jobs:
set -euo pipefail
## Check for kuik's components metrics
for component in proxy controllers
- do
+ do
echo "Testing $component metrics endpoint"
for ip in $(kubectl get po -l "app.kubernetes.io/component=$component" -n kuik-system -o jsonpath='{range .items[*]}{.status.podIP}{"\n"}{end}')
do
@@ -269,6 +270,7 @@ jobs:
ct lint \
--charts helm/kube-image-keeper \
--chart-repos bitnami=https://charts.bitnami.com/bitnami \
+ --chart-repos joxit=https://helm.joxit.dev \
--validate-maintainers=false --check-version-increment=false
# Need wait for the next release with flash --skip-clean-up
@@ -306,7 +308,7 @@ jobs:
run: |
set -euo pipefail
kubectl create deploy nginx --image=nginx:stable-alpine --replicas=2
- kubectl rollout status deploy nginx
+ kubectl rollout status deploy nginx
kubectl wait deployment nginx --for condition=Available=True --timeout=30s
echo "kubectl get cachedimage"
kubectl get cachedimages
@@ -348,7 +350,7 @@ jobs:
set -euo pipefail
## Check for kuik's components metrics
for component in proxy controllers
- do
+ do
echo "Testing $component metrics endpoint"
for ip in $(kubectl get po -l "app.kubernetes.io/component=$component" -n kuik-system -o jsonpath='{range .items[*]}{.status.podIP}{"\n"}{end}')
do
@@ -374,4 +376,3 @@ jobs:
fi
done
done
-
diff --git a/.gitignore b/.gitignore
index c17fc016..011c4516 100644
--- a/.gitignore
+++ b/.gitignore
@@ -28,3 +28,6 @@ zz_generated.*
# helm dependencies
helm/**/charts/*.tgz
+
+# generated helm README template
+helm/**/README.md.gotmpl
diff --git a/README.md b/README.md
index b7c8e265..ff5ea734 100644
--- a/README.md
+++ b/README.md
@@ -17,7 +17,7 @@ It saves the container images used by your pods in its own local registry so tha
To follow Helm3 best pratices, we moved `cachedimage` and `repository` custom resources definition from the helm templates directory to the dedicated `crds` directory.
This will cause the `cachedimage` CRD to be deleted during the 1.7.0 upgrade.
-We advice you to uninstall your helm release, clean the remaining custom resources by removing their finalizer, then reinstall kuik in 1.7.0
+We advise you to uninstall your helm release, clean the remaining custom resources by removing their finalizer, and then reinstall kuik in 1.7.0
You may also recreate the custom resource definition right after the upgrade to 1.7.0 using
```
@@ -25,7 +25,6 @@ kubectl apply -f https://raw.githubusercontent.com/enix/kube-image-keeper/main/h
kubectl apply -f https://raw.githubusercontent.com/enix/kube-image-keeper/main/helm/kube-image-keeper/crds/repository-crd.yaml
```
-
## Why and when is it useful?
At [Enix](https://enix.io/), we manage production Kubernetes clusters both for our internal use and for various customers; sometimes on premises, sometimes in various clouds, public or private. We regularly run into image availability issues, for instance:
@@ -94,7 +93,7 @@ web-8667899c97-89j2h localhost:7439/nginx
web-8667899c97-fl54b localhost:7439/nginx
```
-The kuik controllers keep track of how many pods use a given image. When an image isn't used anymore, it is flagged for deletion, and removed one month later. This expiration delay can be configured. You can see kuik's view of your images by looking at the `CachedImages` custom resource:
+The kuik controllers keep track of how many pods use a given image. When an image isn't used anymore, it is flagged for deletion and removed one month later. This expiration delay can be configured. You can see kuik's view of your images by looking at the `CachedImages` custom resource:
```bash
$ kubectl get cachedimages
@@ -166,6 +165,17 @@ kubectl create namespace kuik-system
kubectl apply -f /tmp/kuik.yaml --namespace kuik-system
```
+## Uninstall kuik (whyyyy? 😢)
+
+We are very proud of kube-image-keeper and we believe that it is an awesome project that should be used as often as possible. However, we understand that it may not fit your needs, that it may contain a bug that occurs only in some very peculiar circumstances or even that you're not sure about how and why to use it. In the 2 first cases, please [open an issue](https://github.com/enix/kube-image-keeper/issues/new), we will be very happy to address your issue or implement a new feature if we think it can make kuik better! In the case you're not sure how and why to use it, and assuming that you've already read the corresponding section of the readme, you can contact us at [contact@enix.fr](mailto:contact@enix.fr). If none of those solution made you happy, we're sad to let you go but here is the uninstall procedure:
+
+- Disable rewriting of the pods by deleting the kuik mutating webhook.
+- Restart pods using cached images, or manually rewrite them, in order to stop using images from the kuik cache.
+- Delete kuik custom resources (`CachedImages` and `Repositories`).
+- Uninstall kuik helm chart.
+
+It is very important to stop using images from kuik before uninstalling. Indeed, if some pods are configured with the `imagePullPolicy: Always` and `.controllers.webhook.ignorePullPolicyAlways` value of the helm chart is set to `false`, then, in a case of a restart of a container (for example in an OOM scenario), the pod would not be able to pull its image anymore and will go in the `ImagePullBackOff` error state until someone manually fix its image.
+
## Configuration and customization
If you want to change e.g. the expiration delay, the port number used by the proxy, enable persistence (with a PVC) for the registry cache... You can do that with standard Helm values.
@@ -242,7 +252,7 @@ No manual action is required when migrating an amd64-only cluster from v1.3.0 to
### Corporate proxy
-To configure kuik to work behind a corporate proxy, you can set the well known `http_proxy` and `https_proxy` environment variables (upper and lowercase variant both works) through helm values `proxy.env` and `controllers.env` like shown below:
+To configure kuik to work behind a corporate proxy, you can set the well-known `http_proxy` and `https_proxy` environment variables (upper and lowercase variant both works) through helm values `proxy.env` and `controllers.env` like shown below:
```yaml
controllers:
@@ -263,7 +273,7 @@ Be careful that both the proxy and the controllers need to access the kubernetes
### Insecure registries & self-signed certificates
-In some cases, you may want to use images from self-hosted registries that are insecure (without TLS or with an invalid certificate for instance) or using a self-signed certificate. By default, kuik will not allow to cache images from those registries for security reasons, even though you configured your container runtime (e.g. Docker, containerd) to do so. However you can choose to trust a list of insecure registries to pull from using the helm value `insecureRegistries`. If you use a self-signed certificate you can store the root certificate authority in a secret and reference it with the helm value `rootCertificateAuthorities`. Here is an example of the use of those two values:
+In some cases, you may want to use images from self-hosted registries that are insecure (without TLS or with an invalid certificate for instance) or using a self-signed certificate. By default, kuik will not allow to cache images from those registries for security reasons, even though you configured your container runtime (e.g. Docker, containerd) to do so. However, you can choose to trust a list of insecure registries to pull from using the helm value `insecureRegistries`. If you use a self-signed certificate you can store the root certificate authority in a secret and reference it with the helm value `rootCertificateAuthorities`. Here is an example of the use of those two values:
```yaml
insecureRegistries:
@@ -280,11 +290,11 @@ You can of course use as many insecure registries or root certificate authoritie
### Registry UI
-For debugging reasons, it may be useful to be able to access the registry through an UI. This can be achieved by enabling the registry UI with the value `registryUI.enabled=true`. The UI will not be publicly available through an ingress, you will need to open a port-forward from port `80`. You can set a custom username and password with values `registryUI.auth.username` (default is `admin`) and `registryUI.auth.password` (empty by default).
+For debugging reasons, it may be useful to be able to access the registry through an UI. This can be achieved by enabling the registry UI with the value `docker-registry-ui.enabled=true`. The UI will not be publicly available through an ingress, you will need to open a port-forward from port `80`. For more information about the UI and how to configure it, please see https://artifacthub.io/packages/helm/joxit/docker-registry-ui.
## Garbage collection and limitations
-When a CachedImage expires because it is not used anymore by the cluster, the image is deleted from the registry. However, since kuik uses [Docker's registry](https://docs.docker.com/registry/), this only deletes **reference files** like tags. It doesn't delete blobs, which account for most of the used disk space. [Garbage collection](https://docs.docker.com/registry/garbage-collection/) allows removing those blobs and free up space. The garbage collecting job can be configured to run thanks to the `registry.garbageCollectionSchedule` configuration in a cron-like format. It is disabled by default, because running garbage collection without persistence would just wipe out the cache registry.
+When a CachedImage expires because it is not used anymore by the cluster, the image is deleted from the registry. However, since kuik uses [Docker's registry](https://docs.docker.com/registry/), this only deletes **reference files** like tags. It doesn't delete blobs, which account for most of the used disk space. [Garbage collection](https://docs.docker.com/registry/garbage-collection/) allows removing those blobs, freeing up space. The garbage collecting job can be configured to run thanks to the `registry.garbageCollection.schedule` configuration in a cron-like format. It is disabled by default, because running garbage collection without persistence would just wipe out the cache registry.
Garbage collection can only run when the registry is read-only (or stopped), otherwise image corruption may happen. (This is described in the [registry documentation](https://docs.docker.com/registry/garbage-collection/).) Before running garbage collection, kuik stops the registry. During that time, all image pulls are automatically proxified to the source registry so that garbage collection is mostly transparent for cluster nodes.
@@ -320,13 +330,13 @@ Imagine the following scenario:
- pods A and B use a private image, `example.com/myimage:latest`
- pod A correctly references `imagePullSecrets, but pod B does not
-On a normal Kubernetes cluster (without kuik), if pods A and B are on the same node, then pod B will run correctly, even though it doesn't reference `imagePullSecrets`, because the image gets pulled when starting pod A, and once it's available on the node, any other pod can use it. However, if pods A and B are on different nodes, pod B won't start, because it won't be able to pull the private image. Some folks may use that to segregate sensitive image to specific nodes using a combination of taints, tolerations, or node selectors.
+On a normal Kubernetes cluster (without kuik), if pods A and B are on the same node, then pod B will run correctly, even though it doesn't reference `imagePullSecrets`, because the image gets pulled when starting pod A, and once it's available on the node, any other pod can use it. However, if pods A and B are on different nodes, pod B won't start, because it won't be able to pull the private image. Some folks may use that to segregate sensitive images to specific nodes using a combination of taints, tolerations, or node selectors.
-Howevever, when using kuik, once an image has been pulled and stored in kuik's registry, it becomes available for any node on the cluster. This means that using taints, tolerations, etc. to limit sensitive images to specific nodes won't work anymore.
+However, when using kuik, once an image has been pulled and stored in kuik's registry, it becomes available for any node on the cluster. This means that using taints, tolerations, etc. to limit sensitive images to specific nodes won't work anymore.
### Cluster autoscaling delays
-With kuik, all image pulls (except in the namespaces excluded from kuik) go through kuik's registry proxy, which runs on each node thanks to a DaemonSet. When a node gets added to a Kubernetes cluster (for instance, by the cluster autoscaler), a kuik registry proxy Pod gets scheduled on that node, but it will take a brief moment to start. During that time, all other image pulls will fail. Thanks to Kubernetes automatic retry mechanisms, they will eventually succeed, but on new nodes, you may see Pods in `ErrImagePull` or `ImagePullBackOff` status for a minute before everything works correctly. If you are using cluster autoscaling and try to achieve very fast scale-up times, this is something that you might want to keep in mind.
+With kuik, all image pulls (except in the namespaces excluded from kuik) go through kuik's registry proxy, which runs on each node thanks to a DaemonSet. When a node gets added to a Kubernetes cluster (for instance, by the cluster autoscaler), a kuik registry proxy Pod gets scheduled on that node, but it will take a brief moment to start. During that time, all other image pulls will fail. Thanks to Kubernetes automatic retry mechanisms, they will eventually succeed, but on new nodes, you may see Pods in `ErrImagePull` or `ImagePullBackOff` status for a minute before everything works correctly. If you are using cluster autoscaling trying to achieve very fast scale-up times, this is something that you might want to keep in mind.
### Garbage collection issue
@@ -334,4 +344,4 @@ We use Docker Distribution in Kuik, along with the integrated garbage collection
### Images with digest
-As of today, there is no way to manage container images based on a digest. The rational behind this limitation is that a digest is an image manifest hash, and the manifest contains the registry URL associated with the image. Thus, pushing the image to another registry (our cache registry) changes its digest and as a consequence, it is not anymore referenced by its original digest. Digest validation prevent from pushing a manifest with an invalid digest. Therefore, we currently ignore all images based on a digest, those images will not be rewritten nor put in cache to prevent malfunctionning of kuik.
+As of today, there is no way to manage container images based on a digest. The rationale behind this limitation is that a digest is an image manifest hash, and the manifest contains the registry URL associated with the image. Thus, pushing the image to another registry (our cache registry) changes its digest and as a consequence, it is no longer referenced by its original digest. Digest validation prevents from pushing a manifest with an invalid digest. Therefore, we currently ignore all images based on a digest. Those images will not be rewritten nor put into the cache to prevent kuik from malfunctioning.
\ No newline at end of file
diff --git a/api/core/v1/pod_webhook.go b/api/core/v1/pod_webhook.go
index 9a3ae133..4a861f6f 100644
--- a/api/core/v1/pod_webhook.go
+++ b/api/core/v1/pod_webhook.go
@@ -181,12 +181,13 @@ func (p *PodInitializer) Start(ctx context.Context) error {
}
for _, pod := range pods.Items {
- setupLog.Info("patching " + pod.Namespace + "/" + pod.Name)
+ setupLog.Info("patching", "pod", pod.Namespace+"/"+pod.Name)
err := p.Client.Patch(ctx, &pod, client.RawPatch(types.JSONPatchType, []byte("[]")))
if err != nil && !apierrors.IsNotFound(err) {
- return err
+ setupLog.Info("patching failed", "pod", pod.Namespace+"/"+pod.Name, "err", err)
}
}
+ setupLog.Info("completed")
return nil
}
diff --git a/api/kuik/v1alpha1/cachedimage_types.go b/api/kuik/v1alpha1/cachedimage_types.go
index 9ddf41ef..76ae9821 100644
--- a/api/kuik/v1alpha1/cachedimage_types.go
+++ b/api/kuik/v1alpha1/cachedimage_types.go
@@ -8,19 +8,26 @@ var RepositoryLabelName = "kuik.enix.io/repository"
// CachedImageSpec defines the desired state of CachedImage
type CachedImageSpec struct {
+ // SourceImage is the path of the image to cache
SourceImage string `json:"sourceImage"`
+ // ExpiresAt is the time when the image should be deleted from cache if not in use (unset when the image is used again)
// +optional
ExpiresAt *metav1.Time `json:"expiresAt,omitempty"`
+ // Retain defines if the image should be retained in cache even when not used (will prevent ExpiresAt to be populated)
// +optional
Retain bool `json:"retain,omitempty"`
}
type PodReference struct {
+ // NamespacedName is the namespaced name of a pod (namespace/name)
NamespacedName string `json:"namespacedName,omitempty"`
}
type UsedBy struct {
+ // Pods is a list of reference to pods using this CachedImage
Pods []PodReference `json:"pods,omitempty" patchStrategy:"merge" patchMergeKey:"namespacedName"`
+ // Count is the number of pods using this image
+ //
// jsonpath function .length() is not implemented, so the count field is required to display pods count in additionalPrinterColumns
// see https://github.com/kubernetes-sigs/controller-tools/issues/447
Count int `json:"count,omitempty"`
@@ -28,18 +35,28 @@ type UsedBy struct {
// CachedImageStatus defines the observed state of CachedImage
type CachedImageStatus struct {
- IsCached bool `json:"isCached,omitempty"`
- Phase string `json:"phase,omitempty"`
- UsedBy UsedBy `json:"usedBy,omitempty"`
+ // IsCached indicate whether the image is already cached or not
+ IsCached bool `json:"isCached,omitempty"`
+ // Phase is the current phase of the image
+ Phase string `json:"phase,omitempty"`
+ // UsedBy is the list of pods using this image
+ UsedBy UsedBy `json:"usedBy,omitempty"`
- Digest string `json:"digest,omitempty"`
- UpstreamDigest string `json:"upstreamDigest,omitempty"`
- UpToDate bool `json:"upToDate,omitempty"`
- LastSync metav1.Time `json:"lastSync,omitempty"`
+ // Digest is the digest of the cached image
+ Digest string `json:"digest,omitempty"`
+ // UpstreamDigest is the upstream image digest
+ UpstreamDigest string `json:"upstreamDigest,omitempty"`
+ // UpToDate indicate whether if the cached image is up to date with the upstream one or not
+ UpToDate bool `json:"upToDate,omitempty"`
+ // LastSync is the last time the remote image digest has been checked
+ LastSync metav1.Time `json:"lastSync,omitempty"`
+ // LastSuccessfulPull is the last time the upstream image has been successfully cached
LastSuccessfulPull metav1.Time `json:"lastSuccessfulPull,omitempty"`
- AvailableUpstream bool `json:"availableUpstream,omitempty"`
- LastSeenUpstream metav1.Time `json:"lastSeenUpstream,omitempty"`
+ // AvailableUpstream indicate whether if the referenced image is available upstream or not
+ AvailableUpstream bool `json:"availableUpstream,omitempty"`
+ // LastSeenUpstream is the last time the referenced image has been seen upstream
+ LastSeenUpstream metav1.Time `json:"lastSeenUpstream,omitempty"`
}
//+kubebuilder:object:root=true
diff --git a/api/kuik/v1alpha1/repository_types.go b/api/kuik/v1alpha1/repository_types.go
index 7b4528a1..247af4e9 100644
--- a/api/kuik/v1alpha1/repository_types.go
+++ b/api/kuik/v1alpha1/repository_types.go
@@ -6,17 +6,25 @@ import (
// RepositorySpec defines the desired state of Repository
type RepositorySpec struct {
- Name string `json:"name"`
- PullSecretNames []string `json:"pullSecretNames,omitempty"`
- PullSecretsNamespace string `json:"pullSecretsNamespace,omitempty"`
- UpdateInterval *metav1.Duration `json:"updateInterval,omitempty"`
- UpdateFilters []string `json:"updateFilters,omitempty"`
+ // Name is the path of the repository (for instance enix/kube-image-keeper)
+ Name string `json:"name"`
+ // PullSecretNames is the names of pull secret to use to pull CachedImages of this Repository
+ PullSecretNames []string `json:"pullSecretNames,omitempty"`
+ // PullSecretsNamespace is the namespace where pull secrets can be found for CachedImages of this Repository
+ PullSecretsNamespace string `json:"pullSecretsNamespace,omitempty"`
+ // UpdateInterval is the interval in human readable format (1m, 1h, 1d...) at which matched CachedImages from this Repository are updated (see spec.UpdateFilters)
+ UpdateInterval *metav1.Duration `json:"updateInterval,omitempty"`
+ // UpdateFilters is a list of regexps that need to match (at least one of them) the .spec.SourceImage of a CachedImage from this Repository to update it at regular interval
+ UpdateFilters []string `json:"updateFilters,omitempty"`
}
// RepositoryStatus defines the observed state of Repository
type RepositoryStatus struct {
- Images int `json:"images,omitempty"`
- Phase string `json:"phase,omitempty"`
+ // Images is the count of CachedImages that come from this repository
+ Images int `json:"images,omitempty"`
+ // Phase is the current phase of this repository
+ Phase string `json:"phase,omitempty"`
+ // LastUpdate is the last time images of this repository has been updated
LastUpdate metav1.Time `json:"lastUpdate,omitempty"`
//+listType=map
//+listMapKey=type
diff --git a/config/crd/bases/kuik.enix.io_cachedimages.yaml b/config/crd/bases/kuik.enix.io_cachedimages.yaml
index efbe1f37..fb7aa973 100644
--- a/config/crd/bases/kuik.enix.io_cachedimages.yaml
+++ b/config/crd/bases/kuik.enix.io_cachedimages.yaml
@@ -61,11 +61,16 @@ spec:
description: CachedImageSpec defines the desired state of CachedImage
properties:
expiresAt:
+ description: ExpiresAt is the time when the image should be deleted
+ from cache if not in use (unset when the image is used again)
format: date-time
type: string
retain:
+ description: Retain defines if the image should be retained in cache
+ even when not used (will prevent ExpiresAt to be populated)
type: boolean
sourceImage:
+ description: SourceImage is the path of the image to cache
type: string
required:
- sourceImage
@@ -74,37 +79,59 @@ spec:
description: CachedImageStatus defines the observed state of CachedImage
properties:
availableUpstream:
+ description: AvailableUpstream indicate whether if the referenced
+ image is available upstream or not
type: boolean
digest:
+ description: Digest is the digest of the cached image
type: string
isCached:
+ description: IsCached indicate whether the image is already cached
+ or not
type: boolean
lastSeenUpstream:
+ description: LastSeenUpstream is the last time the referenced image
+ has been seen upstream
format: date-time
type: string
lastSuccessfulPull:
+ description: LastSuccessfulPull is the last time the upstream image
+ has been successfully cached
format: date-time
type: string
lastSync:
+ description: LastSync is the last time the remote image digest has
+ been checked
format: date-time
type: string
phase:
+ description: Phase is the current phase of the image
type: string
upToDate:
+ description: UpToDate indicate whether if the cached image is up to
+ date with the upstream one or not
type: boolean
upstreamDigest:
+ description: UpstreamDigest is the upstream image digest
type: string
usedBy:
+ description: UsedBy is the list of pods using this image
properties:
count:
description: |-
+ Count is the number of pods using this image
+
+
jsonpath function .length() is not implemented, so the count field is required to display pods count in additionalPrinterColumns
see https://github.com/kubernetes-sigs/controller-tools/issues/447
type: integer
pods:
+ description: Pods is a list of reference to pods using this CachedImage
items:
properties:
namespacedName:
+ description: NamespacedName is the namespaced name of a
+ pod (namespace/name)
type: string
type: object
type: array
diff --git a/config/crd/bases/kuik.enix.io_repositories.yaml b/config/crd/bases/kuik.enix.io_repositories.yaml
index 0a72227c..ef1d8da6 100644
--- a/config/crd/bases/kuik.enix.io_repositories.yaml
+++ b/config/crd/bases/kuik.enix.io_repositories.yaml
@@ -52,18 +52,29 @@ spec:
description: RepositorySpec defines the desired state of Repository
properties:
name:
+ description: Name is the path of the repository (for instance enix/kube-image-keeper)
type: string
pullSecretNames:
+ description: PullSecretNames is the names of pull secret to use to
+ pull CachedImages of this Repository
items:
type: string
type: array
pullSecretsNamespace:
+ description: PullSecretsNamespace is the namespace where pull secrets
+ can be found for CachedImages of this Repository
type: string
updateFilters:
+ description: UpdateFilters is a list of regexps that need to match
+ (at least one of them) the .spec.SourceImage of a CachedImage from
+ this Repository to update it at regular interval
items:
type: string
type: array
updateInterval:
+ description: UpdateInterval is the interval in human readable format
+ (1m, 1h, 1d...) at which matched CachedImages from this Repository
+ are updated (see spec.UpdateFilters)
type: string
required:
- name
@@ -144,11 +155,16 @@ spec:
- type
x-kubernetes-list-type: map
images:
+ description: Images is the count of CachedImages that come from this
+ repository
type: integer
lastUpdate:
+ description: LastUpdate is the last time images of this repository
+ has been updated
format: date-time
type: string
phase:
+ description: Phase is the current phase of this repository
type: string
type: object
type: object
diff --git a/docs/mutable-tags.md b/docs/mutable-tags.md
new file mode 100644
index 00000000..726f597b
--- /dev/null
+++ b/docs/mutable-tags.md
@@ -0,0 +1,38 @@
+# How to handle mutable tags
+
+> If one deploys a statefulset for an image like postgres:15 (PostgreSQL database), kube-image-keeper will cache the postgres:15 image the moment a corresponding pod gets created. This exact image is stored inside the kuik registry.
+> Now if postgres:15 gets an update, which might be important for security reasons, and a developer tries to upgrade the pods, the cached version will be used and it won't be updated to the newer, security fixed version of postgres:15.
+> And that person has to watch the log outputs in depth to find out that there was no update.
+>
+> For mutable tags like :latest the situation can be even worse as an developer assumes imagePullPolicy: Always. But unfortunately the image never gets an update in the future while kube-image-keeper is actively caching that image. This behavior is clearly completely different from the expected default behavior of imagePullPolicy: Always.
+>
+> @BernhardGruen in [#156](https://github.com/enix/kube-image-keeper/issues/156)
+
+As described in the above issue, you may want to use mutable tags, but using kuik prevent you from getting updates on those images. We have implemented two features to tackle this issue.
+
+## Filter containers based on their pull policy
+
+Using the value `.Values.controllers.webhook.ignorePullPolicyAlways`, you can ignore rewriting of containers that use the `imagePullPolicy: Always`, keeping them out of kuik and thus staying on the original behavior. It will also ignore images with the tag `:latest`.
+
+The caveat with this method is that you no longer cache corresponding images, so you should enable it carefully.
+
+## Periodic updates
+
+Another option is to periodically update images in the cache based on rules defined in the corresponding `Repository`, using `spec.updateInterval` and `spec.updateFilters` to update `CachedImages` at regular interval. For instance, to update the `:latest` and `:15` tag every hour, you can configure your repository as following:
+
+```yaml
+apiVersion: kuik.enix.io/v1alpha1
+kind: Repository
+metadata:
+ name: docker.io-library-postgres:15
+spec:
+ name: docker.io/library/postgres
+ updateInterval: 1h
+ updateFilters:
+ - 'latest'
+ - '15'
+```
+
+You can also use regexps to match images, for instance if you want to match all major versions, you could use `:\d+$`, which wil match `:14`, `:15`, `:16` (and so on...) but not `:15.8`.
+
+It will then check every hour for updates on tags matching the updateFilters and pull new version of the image if the digest has changed.
diff --git a/go.mod b/go.mod
index 2bbe44f3..3dfab548 100644
--- a/go.mod
+++ b/go.mod
@@ -5,16 +5,16 @@ go 1.22
require (
github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20230519004202-7f2db5bd753e
github.com/distribution/reference v0.6.0
- github.com/docker/cli v27.1.1+incompatible
- github.com/docker/docker v27.1.1+incompatible
+ github.com/docker/cli v27.3.1+incompatible
+ github.com/docker/docker v27.3.1+incompatible
github.com/docker/go-connections v0.5.0
github.com/gin-gonic/gin v1.10.0
github.com/go-logr/logr v1.4.2
- github.com/google/go-containerregistry v0.20.1
- github.com/onsi/ginkgo/v2 v2.19.1
- github.com/onsi/gomega v1.34.1
- github.com/prometheus/client_golang v1.19.1
- go.uber.org/automaxprocs v1.5.3
+ github.com/google/go-containerregistry v0.20.2
+ github.com/onsi/ginkgo/v2 v2.20.2
+ github.com/onsi/gomega v1.34.2
+ github.com/prometheus/client_golang v1.20.4
+ go.uber.org/automaxprocs v1.6.0
go.uber.org/zap v1.27.0
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
k8s.io/api v0.27.13
@@ -48,7 +48,7 @@ require (
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/bytedance/sonic v1.11.6 // indirect
github.com/bytedance/sonic/loader v0.1.1 // indirect
- github.com/cespare/xxhash/v2 v2.2.0 // indirect
+ github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/cloudwego/base64x v0.1.4 // indirect
github.com/cloudwego/iasm v0.2.0 // indirect
github.com/containerd/log v0.1.0 // indirect
@@ -79,13 +79,13 @@ require (
github.com/google/gnostic v0.5.7-v3refs // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
- github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 // indirect
+ github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/imdario/mergo v0.3.12 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
- github.com/klauspost/compress v1.16.5 // indirect
+ github.com/klauspost/compress v1.17.9 // indirect
github.com/klauspost/cpuid/v2 v2.2.7 // indirect
github.com/leodido/go-urn v1.4.0 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
@@ -99,9 +99,9 @@ require (
github.com/opencontainers/image-spec v1.1.0-rc3 // indirect
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
github.com/pkg/errors v0.9.1 // indirect
- github.com/prometheus/client_model v0.5.0 // indirect
- github.com/prometheus/common v0.48.0 // indirect
- github.com/prometheus/procfs v0.12.0 // indirect
+ github.com/prometheus/client_model v0.6.1 // indirect
+ github.com/prometheus/common v0.55.0 // indirect
+ github.com/prometheus/procfs v0.15.1 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
@@ -115,19 +115,18 @@ require (
go.opentelemetry.io/otel/trace v1.26.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/arch v0.8.0 // indirect
- golang.org/x/crypto v0.25.0 // indirect
- golang.org/x/mod v0.19.0 // indirect
- golang.org/x/net v0.27.0 // indirect
- golang.org/x/oauth2 v0.16.0 // indirect
- golang.org/x/sync v0.7.0 // indirect
- golang.org/x/sys v0.22.0 // indirect
- golang.org/x/term v0.22.0 // indirect
- golang.org/x/text v0.16.0 // indirect
+ golang.org/x/crypto v0.26.0 // indirect
+ golang.org/x/mod v0.20.0 // indirect
+ golang.org/x/net v0.28.0 // indirect
+ golang.org/x/oauth2 v0.21.0 // indirect
+ golang.org/x/sync v0.8.0 // indirect
+ golang.org/x/sys v0.24.0 // indirect
+ golang.org/x/term v0.23.0 // indirect
+ golang.org/x/text v0.17.0 // indirect
golang.org/x/time v0.3.0 // indirect
- golang.org/x/tools v0.23.0 // indirect
+ golang.org/x/tools v0.24.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
- google.golang.org/appengine v1.6.7 // indirect
- google.golang.org/protobuf v1.34.1 // indirect
+ google.golang.org/protobuf v1.34.2 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
diff --git a/go.sum b/go.sum
index 5bfdda7e..b3055b39 100644
--- a/go.sum
+++ b/go.sum
@@ -62,8 +62,8 @@ github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
-github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y=
github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
@@ -81,12 +81,12 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
-github.com/docker/cli v27.1.1+incompatible h1:goaZxOqs4QKxznZjjBWKONQci/MywhtRv2oNn0GkeZE=
-github.com/docker/cli v27.1.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+github.com/docker/cli v27.3.1+incompatible h1:qEGdFBF3Xu6SCvCYhc7CzaQTlBmqDuzxPDpigSyeKQQ=
+github.com/docker/cli v27.3.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/docker v27.1.1+incompatible h1:hO/M4MtV36kzKldqnA37IWhebRA+LnqqcqDja6kVaKY=
-github.com/docker/docker v27.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI=
+github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0=
github.com/docker/docker-credential-helpers v0.8.1 h1:j/eKUktUltBtMzKqmfLB0PAgqYyMHOp5vfsD1807oKo=
github.com/docker/docker-credential-helpers v0.8.1/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
@@ -145,7 +145,6 @@ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
@@ -168,13 +167,13 @@ github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/go-containerregistry v0.20.1 h1:eTgx9QNYugV4DN5mz4U8hiAGTi1ybXn0TPi4Smd8du0=
-github.com/google/go-containerregistry v0.20.1/go.mod h1:YCMFNQeeXeLF+dnhhWkqDItx/JSkH01j1Kis4PsjzFI=
+github.com/google/go-containerregistry v0.20.2 h1:B1wPJ1SN/S7pB+ZAimcciVD+r+yV/l/DSArMxlbwseo=
+github.com/google/go-containerregistry v0.20.2/go.mod h1:z38EKdKh4h7IP2gSfUUqEvalZBqs6AoLeWfUy34nQC8=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg=
-github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
+github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 h1:5iH8iuqE5apketRbSFBy+X1V0o+l+8NF1avt4HWl7cA=
+github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
@@ -193,8 +192,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI=
-github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
+github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
+github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
@@ -207,6 +206,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
+github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
@@ -228,10 +229,10 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
-github.com/onsi/ginkgo/v2 v2.19.1 h1:QXgq3Z8Crl5EL1WBAC98A5sEBHARrAJNzAmMxzLcRF0=
-github.com/onsi/ginkgo/v2 v2.19.1/go.mod h1:O3DtEWQkPa/F7fBMgmZQKKsluAy8pd3rEQdrjkPb9zA=
-github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k=
-github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY=
+github.com/onsi/ginkgo/v2 v2.20.2 h1:7NVCeyIWROIAheY21RLS+3j2bb52W0W82tkberYytp4=
+github.com/onsi/ginkgo/v2 v2.20.2/go.mod h1:K9gyxPIlb+aIvnZ8bd9Ak+YP18w3APlR+5coaZoE2ag=
+github.com/onsi/gomega v1.34.2 h1:pNCwDkzrsv7MS9kpaQvVb1aVLahQXyJ/Tv5oAZMI3i8=
+github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.0-rc3 h1:fzg1mXZFj8YdPeNkRXMg+zb88BFV0Ys52cJydRwBkb8=
@@ -245,15 +246,15 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
-github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
-github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
+github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI=
+github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
-github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
-github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE=
-github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
-github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
-github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
+github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
+github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
+github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
+github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
+github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
+github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
@@ -303,8 +304,8 @@ go.opentelemetry.io/otel/trace v1.26.0 h1:1ieeAUb4y0TE26jUFrCIXKpTuVK7uJGN9/Z/2L
go.opentelemetry.io/otel/trace v1.26.0/go.mod h1:4iDxvGDQuUkHve82hJJ8UqrwswHYsZuWCBllGV2U2y0=
go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94=
go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A=
-go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8=
-go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0=
+go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
+go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
@@ -317,8 +318,8 @@ golang.org/x/arch v0.8.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30=
-golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
+golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
+golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
@@ -327,29 +328,28 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8=
-golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0=
+golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys=
-golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE=
+golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
+golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ=
-golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o=
+golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
+golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
-golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
+golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -361,15 +361,14 @@ golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220906165534-d0df966e6959/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
-golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk=
-golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4=
+golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg=
+golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU=
+golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
-golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
+golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
+golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -380,8 +379,8 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg=
-golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI=
+golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24=
+golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -390,8 +389,6 @@ gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
-google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
@@ -416,8 +413,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
-google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
+google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
diff --git a/helm/kube-image-keeper/Chart.lock b/helm/kube-image-keeper/Chart.lock
index 4a055a61..59eeee8b 100644
--- a/helm/kube-image-keeper/Chart.lock
+++ b/helm/kube-image-keeper/Chart.lock
@@ -2,5 +2,8 @@ dependencies:
- name: minio
repository: https://charts.bitnami.com/bitnami
version: 13.2.0
-digest: sha256:deb5af1d98c80ea52289c771f4cae41c7ef73fbb231c86f8eda553a9d4d53cc8
-generated: "2024-01-23T16:17:31.822508041+01:00"
+- name: docker-registry-ui
+ repository: https://helm.joxit.dev
+ version: 1.1.3
+digest: sha256:6388d25ea6877863eeb3e1ac5e85f4da8c5b1032e8eebedde87779d1f1737f55
+generated: "2024-08-07T14:48:17.51529339+02:00"
diff --git a/helm/kube-image-keeper/Chart.yaml b/helm/kube-image-keeper/Chart.yaml
index 202ddd4a..6d8f9fde 100644
--- a/helm/kube-image-keeper/Chart.yaml
+++ b/helm/kube-image-keeper/Chart.yaml
@@ -32,3 +32,7 @@ dependencies:
version: "13.2.0"
repository: "https://charts.bitnami.com/bitnami"
condition: minio.enabled
+ - name: docker-registry-ui
+ version: "1.1.3"
+ repository: "https://helm.joxit.dev"
+ condition: docker-registry-ui.enabled
diff --git a/helm/kube-image-keeper/README.md.gotmpl b/helm/kube-image-keeper/README.md.gotmpl
deleted file mode 100644
index 91c9598d..00000000
--- a/helm/kube-image-keeper/README.md.gotmpl
+++ /dev/null
@@ -1,362 +0,0 @@
-# kube-image-keeper (kuik)
-
-[](https://github.com/enix/kube-image-keeper/releases)
-[](https://goreportcard.com/report/github.com/enix/kube-image-keeper)
-[](https://opensource.org/licenses/MIT)
-[](https://enix.io)
-
-kube-image-keeper (a.k.a. *kuik*, which is pronounced /kwɪk/, like "quick") is a container image caching system for Kubernetes.
-It saves the container images used by your pods in its own local registry so that these images remain available if the original becomes unavailable.
-
-## Upgrading
-
-### From 1.6.0 o 1.7.0
-
-***ACTION REQUIRED***
-
-To follow Helm3 best pratices, we moved `cachedimage` and `repository` custom resources definition from the helm templates directory to the dedicated `crds` directory.
-This will cause the `cachedimage` CRD to be deleted during the 1.7.0 upgrade.
-
-We advice you to uninstall your helm release, clean the remaining custom resources by removing their finalizer, then reinstall kuik in 1.7.0
-
-You may also recreate the custom resource definition right after the upgrade to 1.7.0 using
-```
-kubectl apply -f https://raw.githubusercontent.com/enix/kube-image-keeper/main/helm/kube-image-keeper/crds/cachedimage-crd.yaml
-kubectl apply -f https://raw.githubusercontent.com/enix/kube-image-keeper/main/helm/kube-image-keeper/crds/repository-crd.yaml
-```
-
-
-## Why and when is it useful?
-
-At [Enix](https://enix.io/), we manage production Kubernetes clusters both for our internal use and for various customers; sometimes on premises, sometimes in various clouds, public or private. We regularly run into image availability issues, for instance:
-
-- the registry is unavailable or slow;
-- a critical image was deleted from the registry (by accident or because of a misconfigured retention policy),
-- the registry has pull quotas (or other rate-limiting mechanisms) and temporarily won't let us pull more images.
-
-(The last point is a well-known challenge when pulling lots of images from the Docker Hub, and becomes particularly painful when private Kubernetes nodes access the registry through a single NAT gateway!)
-
-We needed a solution that would:
-
-- work across a wide range of Kubernetes versions, container engines, and image registries,
-- preserve Kubernetes' out-of-the-box image caching behavior and [image pull policies](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy),
-- have fairly minimal requirements,
-- and be easy and quick to install.
-
-We investigated other options, and we didn't find any that would quite fit our requirements, so we wrote kuik instead.
-
-## Prerequisites
-
-- A Kubernetes cluster¹ (duh!)
-- Admin permissions²
-- cert-manager³
-- Helm⁴ >= 3.2.0
-- CNI plugin with [port-mapper⁵](https://www.cni.dev/plugins/current/meta/portmap/) enabled
-- In a production environment, we definitely recommend that you use persistent⁶ storage
-
-¹A local development cluster like minikube or KinD is fine.
-
-²In addition to its own pods, kuik needs to register a MutatingWebhookConfiguration.
-
-³kuik uses cert-manager to issue and configure its webhook certificate. You don't need to configure cert-manager in a particular way (you don't even need to create an Issuer or ClusterIssuer). It's alright to just `kubectl apply` the YAML as shown in the [cert-manager installation instructions](https://cert-manager.io/docs/installation/).
-
-⁴If you prefer to install with "plain" YAML manifests, we'll tell you how to generate these manifests.
-
-⁵Most CNI plugins these days enable port-mapper out of the box, so this shouldn't be an issue, but we're mentioning it just in case.
-
-⁶You can use kuik without persistence, but if the pod running the registry gets deleted, you will lose your cached images. They will be automatically pulled again when needed, though.
-
-## Supported Kubernetes versions
-
-kuik has been developed for, and tested with, Kubernetes 1.24 to 1.28; but the code doesn't use any deprecated (or new) feature or API, and should work with newer versions as well.
-
-## How it works
-
-When a pod is created, kuik's **mutating webhook** rewrites its images on the fly to point to the local caching registry, adding a `localhost:{port}/` prefix (the `port` is 7439 by default, and is configurable). This means that you don't need to modify/rewrite the source registry url of your manifest/helm chart used to deploy your solution, kuik will take care of it.
-
-On `localhost:{port}`, there is an **image proxy** that serves images from kuik's **caching registry** (when the images have been cached) or directly from the original registry (when the images haven't been cached yet).
-
-One **controller** watches pods, and when it notices new images, it creates `CachedImage` custom resources for these images.
-
-Another **controller** watches these `CachedImage` custom resources, and copies images from source registries to kuik's caching registry accordingly. When images come from a private registry, the controller will use the `imagePullSecrets` from the `CachedImage` spec, those are set from the pod that produced the `CachedImage`.
-
-Here is what our images look like when using kuik:
-
-```bash
-$ kubectl get pods -o custom-columns=NAME:metadata.name,IMAGES:spec.containers[*].image
-NAME IMAGES
-debugger localhost:7439/registrish.s3.amazonaws.com/alpine
-factori-0 localhost:7439/factoriotools/factorio:1.1
-nvidiactk-b5f7m localhost:7439/nvcr.io/nvidia/k8s/container-toolkit:v1.12.0-ubuntu20.04
-sshd-8b8c6cfb6-l2tc9 localhost:7439/ghcr.io/jpetazzo/shpod
-web-8667899c97-2v88h localhost:7439/nginx
-web-8667899c97-89j2h localhost:7439/nginx
-web-8667899c97-fl54b localhost:7439/nginx
-```
-
-The kuik controllers keep track of how many pods use a given image. When an image isn't used anymore, it is flagged for deletion, and removed one month later. This expiration delay can be configured. You can see kuik's view of your images by looking at the `CachedImages` custom resource:
-
-```bash
-$ kubectl get cachedimages
-NAME CACHED EXPIRES AT PODS COUNT AGE
-docker.io-dockercoins-hasher-v0.1 true 2023-03-07T10:50:14Z 36m
-docker.io-factoriotools-factorio-1.1 true 1 4m1s
-docker.io-jpetazzo-shpod-latest true 2023-03-07T10:53:57Z 9m18s
-docker.io-library-nginx-latest true 3 36m
-ghcr.io-jpetazzo-shpod-latest true 1 36m
-nvcr.io-nvidia-k8s-container-toolkit-v1.12.0-ubuntu20.04 true 1 29m
-registrish.s3.amazonaws.com-alpine-latest 1 35m
-```
-
-## Architecture and components
-
-In kuik's namespace, you will find:
-
-- a `Deployment` to run kuik's controllers,
-- a `DaemonSet` to run kuik's image proxy,
-- a `StatefulSet` to run kuik's image cache, a `Deployment` is used instead when this component runs in HA mode.
-
-The image cache will obviously require a bit of disk space to run (see [Garbage collection and limitations](#garbage-collection-and-limitations) below). Otherwise, kuik's components are fairly lightweight in terms of compute resources. This shows CPU and RAM usage with the default setup, featuring two controllers in HA mode:
-
-```bash
-$ kubectl top pods
-NAME CPU(cores) MEMORY(bytes)
-kube-image-keeper-0 1m 86Mi
-kube-image-keeper-controllers-5b5cc9fcc6-bv6cp 1m 16Mi
-kube-image-keeper-controllers-5b5cc9fcc6-tjl7t 3m 24Mi
-kube-image-keeper-proxy-54lzk 1m 19Mi
-```
-
-
-
-### Metrics
-
-Refer to the [dedicated documentation](https://github.com/enix/kube-image-keeper/blob/main/docs/metrics.md).
-
-## Installation
-
-1. Make sure that you have cert-manager installed. If not, check its [installation page](https://cert-manager.io/docs/installation/) (it's fine to use the `kubectl apply` one-liner, and no further configuration is required).
-1. Install kuik's Helm chart from our [charts](https://charts.enix.io) repository:
-
-```bash
-helm upgrade --install \
- --create-namespace --namespace kuik-system \
- kube-image-keeper kube-image-keeper \
- --repo https://charts.enix.io/
-```
-
-That's it!
-
-Our container images are available across multiple registries for reliability. You can find them on [Github Container Registry](https://github.com/enix/kube-image-keeper/pkgs/container/kube-image-keeper), [Quay](https://quay.io/repository/enix/kube-image-keeper) and [DockerHub](https://hub.docker.com/r/enix/kube-image-keeper).
-
-CAUTION: If you use a storage backend that runs in the same cluster as kuik but in a different namespace, be sure to filter the storage backend's pods. Failure to do so may lead to interdependency issues, making it impossible to start both kuik and its storage backend if either encounters an issue.
-
-{{ template "chart.valuesSection" . }}
-
-## Installation with plain YAML files
-
-You can use Helm to generate plain YAML files and then deploy these YAML files with `kubectl apply` or whatever you want:
-
-```bash
-helm template --namespace kuik-system \
- kube-image-keeper kube-image-keeper \
- --repo https://charts.enix.io/ \
- > /tmp/kuik.yaml
-kubectl create namespace kuik-system
-kubectl apply -f /tmp/kuik.yaml --namespace kuik-system
-```
-
-## Configuration and customization
-
-If you want to change e.g. the expiration delay, the port number used by the proxy, enable persistence (with a PVC) for the registry cache... You can do that with standard Helm values.
-
-You can see the full list of parameters (along with their meaning and default values) in the chart's [values.yaml](https://github.com/enix/kube-image-keeper/blob/main/helm/kube-image-keeper/values.yaml) file, or on [kuik's page on the Artifact Hub](https://artifacthub.io/packages/helm/enix/kube-image-keeper).
-
-For instance, to extend the expiration delay to 3 months (90 days), you can deploy kuik like this:
-
-```bash
-helm upgrade --install \
- --create-namespace --namespace kuik-system \
- kube-image-keeper kube-image-keeper \
- --repo https://charts.enix.io/ \
- --set cachedImagesExpiryDelay=90
-```
-
-## Advanced usage
-
-### Pod filtering
-
-There are 3 ways to tell kuik which pods it should manage (or, conversely, which ones it should ignore).
-
-- If a pod has the label `kube-image-keeper.enix.io/image-caching-policy=ignore`, kuik will ignore the pod (it will not rewrite its image references).
-- If a pod is in an ignored Namespace, it will also be ignored. Namespaces can be ignored by setting the Helm value `controllers.webhook.ignoredNamespaces` (`kube-system` and the kuik namespace will be ignored whatever the value of this parameter). (Note: this feature relies on the [NamespaceDefaultLabelName](https://kubernetes.io/docs/concepts/services-networking/network-policies/#targeting-a-namespace-by-its-name) feature gate to work.)
-- Finally, kuik will only work on pods matching a specific selector. By default, the selector is empty, which means "match all the pods". The selector can be set with the Helm value `controllers.webhook.objectSelector.matchExpressions`.
-
-This logic isn't implemented by the kuik controllers or webhook directly, but through Kubernetes' standard webhook object selectors. In other words, these parameters end up in the `MutatingWebhookConfiguration` template to filter which pods get presented to kuik's webhook. When the webhook rewrites the images for a pod, it adds a label to that pod, and the kuik controllers then rely on that label to know which `CachedImages` resources to create.
-
-Keep in mind that kuik will ignore pods scheduled into its own namespace or in the `kube-system` namespace as recommended in the kubernetes documentation ([Avoiding operating on the kube-system namespace](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#avoiding-operating-on-the-kube-system-namespace)).
-
-> It is recommended to exclude the namespace where your webhook is running with a namespaceSelector.
-> [...]
-> Accidentally mutating or rejecting requests in the kube-system namespace may cause the control plane components to stop functioning or introduce unknown behavior.
-
-#### Image pull policy
-
-In the case of a container configured with `imagePullPolicy: Never`, the container will always be filtered out as it makes no sense to cache an image that would never be cached and always read from the disk.
-
-In the case of a container configured with `imagePullPolicy: Always`, or with the tag `latest`, or with no tag (defaulting to `latest`), by default, the container will be filtered out in order to keep the default behavior of kubernetes, which is to always pull the new version of the image (thus not using the cache of kuik). This can be disabled by setting the value `controllers.webhook.ignorePullPolicyAlways` to `false`.
-
-### Cache persistence
-
-Persistence is disabled by default. You can enable it by setting the Helm value `registry.persistence.enabled=true`. This will create a PersistentVolumeClaim with a default size of 20 GiB. You can change that size by setting the value `registry.persistence.size`. Keep in mind that enabling persistence isn't enough to provide high availability of the registry! If you want kuik to be highly available, please refer to the [high availability guide](https://github.com/enix/kube-image-keeper/blob/main/docs/high-availability.md).
-
-Note that persistence requires your cluster to have some PersistentVolumes. If you don't have PersistentVolumes, kuik's registry Pod will remain `Pending` and your images won't be cached (but they will still be served transparently by kuik's image proxy).
-
-### Retain policy
-
-Sometimes, you want images to stay cached even when they are not used anymore (for instance when you run a workload for a fixed amount of time, stop it, and run it again later). You can choose to prevent `CachedImages` from expiring by manually setting the `spec.retain` flag to `true` like shown below:
-
-```yaml
-apiVersion: kuik.enix.io/v1alpha1
-kind: CachedImage
-metadata:
- name: docker.io-library-nginx-1.25
-spec:
- retain: true # here
- sourceImage: nginx:1.25
-```
-
-### Multi-arch cluster / Non-amd64 architectures
-
-By default, kuik only caches the `amd64` variant of an image. To cache more/other architectures, you need to set the `architectures` field in your helm values.
-
-Example:
-
-```yaml
-architectures: [amd64, arm]
-```
-
-Kuik will only cache available architectures for an image, but will not crash if the architecture doesn't exist.
-
-No manual action is required when migrating an amd64-only cluster from v1.3.0 to v1.4.0.
-
-### Corporate proxy
-
-To configure kuik to work behind a corporate proxy, you can set the well known `http_proxy` and `https_proxy` environment variables (upper and lowercase variant both works) through helm values `proxy.env` and `controllers.env` like shown below:
-
-```yaml
-controllers:
- env:
- - name: http_proxy
- value: https://proxy.mycompany.org:3128
- - name: https_proxy
- value: https://proxy.mycompany.org:3128
-proxy:
- env:
- - name: http_proxy
- value: https://proxy.mycompany.org:3128
- - name: https_proxy
- value: https://proxy.mycompany.org:3128
-```
-
-Be careful that both the proxy and the controllers need to access the kubernetes API, so you might need to define the `no_proxy` variable as well to ignore the kubernetes API in case it is not reachable from your proxy (which is true most of the time).
-
-### Insecure registries & self-signed certificates
-
-In some cases, you may want to use images from self-hosted registries that are insecure (without TLS or with an invalid certificate for instance) or using a self-signed certificate. By default, kuik will not allow to cache images from those registries for security reasons, even though you configured your container runtime (e.g. Docker, containerd) to do so. However you can choose to trust a list of insecure registries to pull from using the helm value `insecureRegistries`. If you use a self-signed certificate you can store the root certificate authority in a secret and reference it with the helm value `rootCertificateAuthorities`. Here is an example of the use of those two values:
-
-```yaml
-insecureRegistries:
- - http://some-registry.com
- - https://some-other-registry.com
-
-rootCertificateAuthorities:
- secretName: some-secret
- keys:
- - root.pem
-```
-
-You can of course use as many insecure registries or root certificate authorities as you want. In the case of a self-signed certificate, you can either use the `insecureRegistries` or the `rootCertificateAuthorities` value, but trusting the root certificate will always be more secure than allowing insecure registries.
-
-### Registry UI
-
-For debugging reasons, it may be useful to be able to access the registry through an UI. This can be achieved by enabling the registry UI with the value `registryUI.enabled=true`. The UI will not be publicly available through an ingress, you will need to open a port-forward from port `80`. You can set a custom username and password with values `registryUI.auth.username` (default is `admin`) and `registryUI.auth.password` (empty by default).
-
-## Garbage collection and limitations
-
-When a CachedImage expires because it is not used anymore by the cluster, the image is deleted from the registry. However, since kuik uses [Docker's registry](https://docs.docker.com/registry/), this only deletes **reference files** like tags. It doesn't delete blobs, which account for most of the used disk space. [Garbage collection](https://docs.docker.com/registry/garbage-collection/) allows removing those blobs and free up space. The garbage collecting job can be configured to run thanks to the `registry.garbageCollectionSchedule` configuration in a cron-like format. It is disabled by default, because running garbage collection without persistence would just wipe out the cache registry.
-
-Garbage collection can only run when the registry is read-only (or stopped), otherwise image corruption may happen. (This is described in the [registry documentation](https://docs.docker.com/registry/garbage-collection/).) Before running garbage collection, kuik stops the registry. During that time, all image pulls are automatically proxified to the source registry so that garbage collection is mostly transparent for cluster nodes.
-
-Reminder: since garbage collection recreates the cache registry pod, if you run garbage collection without persistence, this will wipe out the cache registry. It is not recommended for production setups!
-
-Currently, if the cache gets deleted, the `status.isCached` field of `CachedImages` isn't updated automatically, which means that `kubectl get cachedimages` will incorrectly report that images are cached. However, you can trigger a controller reconciliation with the following command, which will pull all images again:
-
-```bash
-kubectl annotate cachedimages --all --overwrite "timestamp=$(date +%s)"
-```
-
-## Known issues
-
-### Conflicts with other mutating webhooks
-
-Kuik's core functionality intercepts pod creation events to modify the definition of container images, facilitating image caching. However, some Kubernetes operators create pods autonomously and don't expect modifications to the image definitions (for example cloudnative-pg), the unexpected rewriting of the `pod.specs.containers.image` field can lead to inifinite reconciliation loop because the operator's expected target container image will be endlessly rewritten by the kuik `MutatingWebhookConfiguration`. In that case, you may want to disable kuik for specific pods using the following Helm values:
-
-```bash
-controllers:
- webhook:
- objectSelector:
- matchExpressions:
- - key: cnpg.io/podRole
- operator: NotIn
- values:
- - instance
-```
-
-### Private images are a bit less private
-
-Imagine the following scenario:
-
-- pods A and B use a private image, `example.com/myimage:latest`
-- pod A correctly references `imagePullSecrets, but pod B does not
-
-On a normal Kubernetes cluster (without kuik), if pods A and B are on the same node, then pod B will run correctly, even though it doesn't reference `imagePullSecrets`, because the image gets pulled when starting pod A, and once it's available on the node, any other pod can use it. However, if pods A and B are on different nodes, pod B won't start, because it won't be able to pull the private image. Some folks may use that to segregate sensitive image to specific nodes using a combination of taints, tolerations, or node selectors.
-
-Howevever, when using kuik, once an image has been pulled and stored in kuik's registry, it becomes available for any node on the cluster. This means that using taints, tolerations, etc. to limit sensitive images to specific nodes won't work anymore.
-
-### Cluster autoscaling delays
-
-With kuik, all image pulls (except in the namespaces excluded from kuik) go through kuik's registry proxy, which runs on each node thanks to a DaemonSet. When a node gets added to a Kubernetes cluster (for instance, by the cluster autoscaler), a kuik registry proxy Pod gets scheduled on that node, but it will take a brief moment to start. During that time, all other image pulls will fail. Thanks to Kubernetes automatic retry mechanisms, they will eventually succeed, but on new nodes, you may see Pods in `ErrImagePull` or `ImagePullBackOff` status for a minute before everything works correctly. If you are using cluster autoscaling and try to achieve very fast scale-up times, this is something that you might want to keep in mind.
-
-### Garbage collection issue
-
-We use Docker Distribution in Kuik, along with the integrated garbage collection tool. There is a bug that occurs when untagged images are pushed into the registry, causing it to crash. It's possible to end up in a situation where the registry is in read-only mode and becomes unusable. Until a permanent solution is found, we advise keeping the value `registry.garbageCollection.deleteUntagged` set to false.
-
-### Images with digest
-
-As of today, there is no way to manage container images based on a digest. The rational behind this limitation is that a digest is an image manifest hash, and the manifest contains the registry URL associated with the image. Thus, pushing the image to another registry (our cache registry) changes its digest and as a consequence, it is not anymore referenced by its original digest. Digest validation prevent from pushing a manifest with an invalid digest. Therefore, we currently ignore all images based on a digest, those images will not be rewritten nor put in cache to prevent malfunctionning of kuik.
-
-
-## License
-
-MIT License
-
-Copyright (c) 2020-2023 Enix SAS
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/helm/kube-image-keeper/crds/cachedimage-crd.yaml b/helm/kube-image-keeper/crds/cachedimage-crd.yaml
index cb9a882e..ae5363bf 100644
--- a/helm/kube-image-keeper/crds/cachedimage-crd.yaml
+++ b/helm/kube-image-keeper/crds/cachedimage-crd.yaml
@@ -60,11 +60,16 @@ spec:
description: CachedImageSpec defines the desired state of CachedImage
properties:
expiresAt:
+ description: ExpiresAt is the time when the image should be deleted
+ from cache if not in use (unset when the image is used again)
format: date-time
type: string
retain:
+ description: Retain defines if the image should be retained in cache
+ even when not used (will prevent ExpiresAt to be populated)
type: boolean
sourceImage:
+ description: SourceImage is the path of the image to cache
type: string
required:
- sourceImage
@@ -73,37 +78,59 @@ spec:
description: CachedImageStatus defines the observed state of CachedImage
properties:
availableUpstream:
+ description: AvailableUpstream indicate whether if the referenced
+ image is available upstream or not
type: boolean
digest:
+ description: Digest is the digest of the cached image
type: string
isCached:
+ description: IsCached indicate whether the image is already cached
+ or not
type: boolean
lastSeenUpstream:
+ description: LastSeenUpstream is the last time the referenced image
+ has been seen upstream
format: date-time
type: string
lastSuccessfulPull:
+ description: LastSuccessfulPull is the last time the upstream image
+ has been successfully cached
format: date-time
type: string
lastSync:
+ description: LastSync is the last time the remote image digest has
+ been checked
format: date-time
type: string
phase:
+ description: Phase is the current phase of the image
type: string
upToDate:
+ description: UpToDate indicate whether if the cached image is up to
+ date with the upstream one or not
type: boolean
upstreamDigest:
+ description: UpstreamDigest is the upstream image digest
type: string
usedBy:
+ description: UsedBy is the list of pods using this image
properties:
count:
description: |-
+ Count is the number of pods using this image
+
+
jsonpath function .length() is not implemented, so the count field is required to display pods count in additionalPrinterColumns
see https://github.com/kubernetes-sigs/controller-tools/issues/447
type: integer
pods:
+ description: Pods is a list of reference to pods using this CachedImage
items:
properties:
namespacedName:
+ description: NamespacedName is the namespaced name of a
+ pod (namespace/name)
type: string
type: object
type: array
diff --git a/helm/kube-image-keeper/crds/repository-crd.yaml b/helm/kube-image-keeper/crds/repository-crd.yaml
index 5bf51081..f9fbf589 100644
--- a/helm/kube-image-keeper/crds/repository-crd.yaml
+++ b/helm/kube-image-keeper/crds/repository-crd.yaml
@@ -51,18 +51,29 @@ spec:
description: RepositorySpec defines the desired state of Repository
properties:
name:
+ description: Name is the path of the repository (for instance enix/kube-image-keeper)
type: string
pullSecretNames:
+ description: PullSecretNames is the names of pull secret to use to
+ pull CachedImages of this Repository
items:
type: string
type: array
pullSecretsNamespace:
+ description: PullSecretsNamespace is the namespace where pull secrets
+ can be found for CachedImages of this Repository
type: string
updateFilters:
+ description: UpdateFilters is a list of regexps that need to match
+ (at least one of them) the .spec.SourceImage of a CachedImage from
+ this Repository to update it at regular interval
items:
type: string
type: array
updateInterval:
+ description: UpdateInterval is the interval in human readable format
+ (1m, 1h, 1d...) at which matched CachedImages from this Repository
+ are updated (see spec.UpdateFilters)
type: string
required:
- name
@@ -143,11 +154,16 @@ spec:
- type
x-kubernetes-list-type: map
images:
+ description: Images is the count of CachedImages that come from this
+ repository
type: integer
lastUpdate:
+ description: LastUpdate is the last time images of this repository
+ has been updated
format: date-time
type: string
phase:
+ description: Phase is the current phase of this repository
type: string
type: object
type: object
diff --git a/helm/kube-image-keeper/templates/garbage-collection-cron-job.yaml b/helm/kube-image-keeper/templates/garbage-collection-cron-job.yaml
index 10f8ba06..b0c02d2d 100644
--- a/helm/kube-image-keeper/templates/garbage-collection-cron-job.yaml
+++ b/helm/kube-image-keeper/templates/garbage-collection-cron-job.yaml
@@ -15,14 +15,20 @@ spec:
jobTemplate:
spec:
backoffLimit: 3
- activeDeadlineSeconds: 600
+ activeDeadlineSeconds: {{ .Values.registry.garbageCollection.activeDeadlineSeconds }}
template:
spec:
serviceAccountName: {{ include "kube-image-keeper.fullname" . }}-registry-restart
restartPolicy: Never
+ {{- with .Values.registry.garbageCollection.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
containers:
- name: kubectl
image: "{{ .Values.registry.garbageCollection.image.repository }}:{{ .Values.registry.garbageCollection.image.tag }}"
+ resources:
+ {{- toYaml .Values.registry.garbageCollection.resources | nindent 16 }}
imagePullPolicy: {{ .Values.registry.garbageCollection.image.pullPolicy }}
command:
- bash
diff --git a/helm/kube-image-keeper/templates/registry-ui-deployment.yaml b/helm/kube-image-keeper/templates/registry-ui-deployment.yaml
deleted file mode 100644
index b425a276..00000000
--- a/helm/kube-image-keeper/templates/registry-ui-deployment.yaml
+++ /dev/null
@@ -1,69 +0,0 @@
-{{- if .Values.registryUI.enabled -}}
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: {{ include "kube-image-keeper.fullname" . }}-registry-ui
- labels:
- {{- include "kube-image-keeper.registry-ui-labels" . | nindent 4 }}
-spec:
- replicas: 1
- selector:
- matchLabels:
- {{- include "kube-image-keeper.registry-ui-selectorLabels" . | nindent 6 }}
- template:
- metadata:
- {{- with .Values.registryUI.podAnnotations }}
- annotations:
- {{- toYaml . | nindent 8 }}
- {{- end }}
- labels:
- {{- include "kube-image-keeper.registry-ui-selectorLabels" . | nindent 8 }}
- spec:
- {{- with .Values.registryUI.imagePullSecrets }}
- imagePullSecrets:
- {{- toYaml . | nindent 8 }}
- {{- end }}
- securityContext:
- {{- toYaml .Values.registryUI.podSecurityContext | nindent 8 }}
- containers:
- - name: registry-ui
- securityContext:
- {{- toYaml .Values.registryUI.securityContext | nindent 12 }}
- image: {{ .Values.registryUI.image.repository }}:{{ .Values.registryUI.image.tag }}
- imagePullPolicy: {{ .Values.registryUI.image.pullPolicy }}
- ports:
- - containerPort: 80
- resources:
- {{- toYaml .Values.registryUI.resources | nindent 12 }}
- env:
- - name: REGISTRY_HOST
- value: {{ include "kube-image-keeper.fullname" . }}-registry
- - name: REGISTRY_PORT
- value: "5000"
- - name: REGISTRY_PROTOCOL
- value: "http"
- - name: SSL_VERIFY
- value: "false"
- - name: USERNAME
- valueFrom:
- secretKeyRef:
- name: {{ include "kube-image-keeper.fullname" . }}-registry-ui-basic-auth
- key: username
- - name: PASSWORD
- valueFrom:
- secretKeyRef:
- name: {{ include "kube-image-keeper.fullname" . }}-registry-ui-basic-auth
- key: password
- {{- with .Values.registryUI.nodeSelector }}
- nodeSelector:
- {{- toYaml . | nindent 8 }}
- {{- end }}
- {{- with .Values.registryUI.affinity }}
- affinity:
- {{- toYaml . | nindent 8 }}
- {{- end }}
- {{- with .Values.registryUI.tolerations }}
- tolerations:
- {{- toYaml . | nindent 8 }}
- {{- end }}
-{{- end -}}
diff --git a/helm/kube-image-keeper/templates/registry-ui-secret.yaml b/helm/kube-image-keeper/templates/registry-ui-secret.yaml
deleted file mode 100644
index de4b3cd1..00000000
--- a/helm/kube-image-keeper/templates/registry-ui-secret.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-{{- if .Values.registryUI.enabled -}}
-apiVersion: v1
-kind: Secret
-metadata:
- name: {{ include "kube-image-keeper.fullname" . }}-registry-ui-basic-auth
- annotations:
- "helm.sh/resource-policy": "keep"
-type: kubernetes.io/basic-auth
-stringData:
- {{- $secretName := printf "%s-%s" (include "kube-image-keeper.fullname" .) "registry-ui-basic-auth" }}
- {{- $secretData := (get (lookup "v1" "Secret" .Release.Namespace $secretName) "data") | default dict }}
- # set $password to existing secret data or generate a random one when it does not exists
- {{- $password := (get $secretData "password" | b64dec) | default (randAlphaNum 32) }}
- username: {{ .Values.registryUI.auth.username }}
- password: {{ .Values.registryUI.auth.password | default $password }}
-{{- end }}
diff --git a/helm/kube-image-keeper/values.yaml b/helm/kube-image-keeper/values.yaml
index 939cde73..5635de55 100644
--- a/helm/kube-image-keeper/values.yaml
+++ b/helm/kube-image-keeper/values.yaml
@@ -230,6 +230,22 @@ registry:
schedule: "0 0 * * 0"
# -- If true, delete untagged manifests. Default to false since there is a known bug in **docker distribution** garbage collect job.
deleteUntagged: false
+ # -- Specify a nodeSelector for the garbage collector pod
+ nodeSelector: {}
+ # -- Deadline for the whole job
+ activeDeadlineSeconds: 600
+ # -- Resources settings for the garbage collector pod
+ resources:
+ requests:
+ # -- Cpu requests for the garbage collector pod
+ cpu: "10m"
+ # -- Memory requests for the garbage collector pod
+ memory: "10Mi"
+ limits:
+ # -- Cpu limits for the garbage collector pod
+ cpu: "1"
+ # -- Memory limits for the garbage collector pod
+ memory: "512Mi"
image:
# -- Cronjob image repository
repository: bitnami/kubectl
@@ -303,37 +319,12 @@ registry:
# -- Annotations to add to the servicateAccount
annotations: {}
-registryUI:
+docker-registry-ui:
# -- If true, enable the registry user interface
enabled: false
- image:
- # -- Registry UI image repository
- repository: parabuzzle/craneoperator
- # -- Registry UI image pull policy
- pullPolicy: IfNotPresent
- # -- Registry UI image tag
- tag: "2.2.5"
- auth:
- # -- Registry UI username
- username: "admin"
- # -- Registry UI password
- password: ""
- # -- CPU / Memory resources requests / limits for the registry UI pod
- resources: {}
- # -- Specify secrets to be used when pulling registry UI image
- imagePullSecrets: []
- # -- Annotations to add to the registry UI pod
- podAnnotations: {}
- # -- Security context for the registry UI pod
- podSecurityContext: {}
- # -- Security context for containers of the registry UI pod
- securityContext: {}
- # -- Node selector for the registry UI pod
- nodeSelector: {}
- # -- Toleration for the registry UI pod
- tolerations: []
- # -- Affinity for the registry UI pod
- affinity: {}
+ ui:
+ proxy: true
+ dockerRegistryUrl: http://kube-image-keeper-registry:5000
minio:
# -- If true, install minio as a local storage backend for the registry
diff --git a/internal/controller/collector.go b/internal/controller/collector.go
index 345ce05a..f3b971a8 100644
--- a/internal/controller/collector.go
+++ b/internal/controller/collector.go
@@ -17,7 +17,7 @@ const subsystem = "controller"
var ProbeAddr = ""
var (
- imagePutInCache = prometheus.NewCounter(
+ ImagePutInCache = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: kuikMetrics.Namespace,
Subsystem: subsystem,
@@ -25,7 +25,7 @@ var (
Help: "Number of images put in cache successfully",
},
)
- imageRemovedFromCache = prometheus.NewCounter(
+ ImageRemovedFromCache = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: kuikMetrics.Namespace,
Subsystem: subsystem,
@@ -59,8 +59,8 @@ var (
func RegisterMetrics(client client.Client) {
// Register custom metrics with the global prometheus registry
metrics.Registry.MustRegister(
- imagePutInCache,
- imageRemovedFromCache,
+ ImagePutInCache,
+ ImageRemovedFromCache,
kuikMetrics.NewInfo(subsystem),
isLeader,
up,
diff --git a/internal/controller/kuik/cachedimage_controller.go b/internal/controller/kuik/cachedimage_controller.go
index 39767040..8aa44153 100644
--- a/internal/controller/kuik/cachedimage_controller.go
+++ b/internal/controller/kuik/cachedimage_controller.go
@@ -139,7 +139,7 @@ func (r *CachedImageReconciler) Reconcile(ctx context.Context, req ctrl.Request)
return ctrl.Result{}, err
}
r.Recorder.Eventf(&cachedImage, "Normal", "CleanedUp", "Image %s successfully removed from cache", cachedImage.Spec.SourceImage)
- // imageRemovedFromCache.Inc()
+ kuikController.ImageRemovedFromCache.Inc()
log.Info("removing finalizer")
controllerutil.RemoveFinalizer(&cachedImage, cachedImageFinalizerName)
@@ -230,8 +230,6 @@ func (r *CachedImageReconciler) Reconcile(ctx context.Context, req ctrl.Request)
}
r.Recorder.Eventf(&cachedImage, "Normal", "Expired", "Image %s successfully expired", cachedImage.Spec.SourceImage)
return ctrl.Result{}, nil
- } else {
- return ctrl.Result{RequeueAfter: time.Until(expiresAt.Time)}, nil
}
}
@@ -253,7 +251,7 @@ func (r *CachedImageReconciler) Reconcile(ctx context.Context, req ctrl.Request)
} else {
log.Info("image cached")
r.Recorder.Eventf(&cachedImage, "Normal", "Cached", "Successfully cached image %s", cachedImage.Spec.SourceImage)
- // imagePutInCache.Inc()
+ kuikController.ImagePutInCache.Inc()
}
} else {
log.Info("image already present in cache, ignoring")
@@ -264,6 +262,11 @@ func (r *CachedImageReconciler) Reconcile(ctx context.Context, req ctrl.Request)
}
log.Info("cachedimage reconciled")
+
+ if !expiresAt.IsZero() {
+ return ctrl.Result{RequeueAfter: time.Until(expiresAt.Time)}, nil
+ }
+
return ctrl.Result{}, nil
}
diff --git a/internal/controller/kuik/repository_controller.go b/internal/controller/kuik/repository_controller.go
index 7b9d3300..cd451ee6 100644
--- a/internal/controller/kuik/repository_controller.go
+++ b/internal/controller/kuik/repository_controller.go
@@ -167,10 +167,16 @@ func (r *RepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Request)
cachedImage.Annotations = map[string]string{}
}
cachedImage.Annotations[cachedImageAnnotationForceUpdateName] = "true"
- r.Patch(ctx, &cachedImage, patch)
+ err = r.Patch(ctx, &cachedImage, patch)
+ if err != nil {
+ return ctrl.Result{}, err
+ }
}
repository.Status.LastUpdate = metav1.NewTime(time.Now())
+ if err := r.Status().Update(ctx, &repository); err != nil {
+ return ctrl.Result{}, err
+ }
}
}
diff --git a/internal/proxy/bearer.go b/internal/proxy/bearer.go
index d162c078..1f24e039 100644
--- a/internal/proxy/bearer.go
+++ b/internal/proxy/bearer.go
@@ -39,6 +39,9 @@ func (b *Bearer) GetToken() string {
func NewBearer(endpoint string, path string) (*Bearer, error) {
response, err := http.Get(endpoint + path)
+ if response != nil && response.Body != nil {
+ defer response.Body.Close()
+ }
if err != nil {
return nil, err
}
@@ -49,6 +52,9 @@ func NewBearer(endpoint string, path string) (*Bearer, error) {
url := fmt.Sprintf("%s?service=%s&scope=%s", wwwAuthenticate["realm"], wwwAuthenticate["service"], wwwAuthenticate["scope"])
response, err := http.Get(url)
+ if response != nil && response.Body != nil {
+ defer response.Body.Close()
+ }
if err != nil {
return nil, err
}
@@ -57,8 +63,6 @@ func NewBearer(endpoint string, path string) (*Bearer, error) {
if err != nil {
return nil, err
}
-
- response.Body.Close()
}
return &bearer, nil